code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import importlib
__all__ = ['mount_gdrive']
def mount_gdrive() -> str:
"""Mount Google Drive storage of the current Google account and return the root path.
Functionality only available in Google Colab Enviroment; otherwise, it raises a RuntimeError.
"""
if (importlib.util.find_spec("google.colab") is None):
raise RuntimeError("Cannot mount Google Drive outside of Google Colab.")
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
root_dir = "/content/gdrive/My Drive/"
return root_dir
| [
"importlib.util.find_spec",
"google.colab.drive.mount"
]
| [((452, 502), 'google.colab.drive.mount', 'drive.mount', (['"""/content/gdrive"""'], {'force_remount': '(True)'}), "('/content/gdrive', force_remount=True)\n", (463, 502), False, 'from google.colab import drive\n'), ((280, 320), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""google.colab"""'], {}), "('google.colab')\n", (304, 320), False, 'import importlib\n')] |
import HandRankings as Hand
from deuces.deuces import Card, Evaluator
class GameData:
def __init__(self, name, opponent_name, stack_size, bb):
# match stats
self.name = name
self.opponent_name = opponent_name
self.starting_stack_size = int(stack_size)
self.num_hands = 0
self.num_wins = 0
self.num_flop = 0
self.big_blind = int(bb)
# self pre-flop stats
self.pfr = 0
self.vpip = 0
self.three_bet = 0
self.fold_big_bet = 0
# opponent pre-flop stats
self.opponent_pfr = 0
self.opponent_vpip = 0
self.opponent_three_bet = 0
self.opponent_fold_pfr = 0
self.opponent_fold_three_bet = 0
# self post-flop stats
self.aggression_factor = False
self.showdown = 0
self.c_bet = 0
self.showdown_win = 0
self.double_barrel = 0
self.discarded_card = None
# opponent post-flop stats
self.opponent_c_bet = 0
self.opponent_fold_c_bet = 0
self.opponent_double_barrel = 0
# current hand stats
self.button = True
self.current_pot_size = 0
self.current_hand = []
self.current_hand_strength = 0.0
self.hand_class = ''
self.hand_score = 0
self.current_game_state = ''
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.has_called = False
self.opponent_has_called = False
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.discard = False
self.has_five_bet = False
self.has_bet_aggressively = False
self.time_bank = 0.0
self.opc = 0
def new_hand(self, data_list):
self.num_hands += 1
self.button = data_list[2]
if "true" in self.button:
self.button = True
else:
self.button = False
self.current_hand = [data_list[3], data_list[4]]
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.current_game_state = 'PREFLOP'
self.board_cards = []
self.last_actions = []
self.current_legal_actions = []
self.street_dict = {'0': 0, '3': 0, '4': 0, '5': 0}
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.aggression_factor = False
self.discarded_card = None
def get_action(self, data_list):
self.current_pot_size = int(data_list[1])
self.opc = self.starting_stack_size - self.current_pot_size
self.time_bank = float(data_list[-1])
num_board_cards = int(data_list[2])
self.street_dict[str(num_board_cards)] += 1
if self.current_game_state == 'PREFLOP':
if self.street_dict['3'] > 0 and self.street_dict['4'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'FLOPTURN'
self.num_flop += 1
elif self.current_game_state == 'FLOPTURN':
if self.street_dict['4'] > 0 and self.street_dict['5'] == 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'TURNRIVER'
elif self.current_game_state == 'TURNRIVER':
if self.street_dict['5'] > 0:
self.has_two_bet = False
self.opponent_has_two_bet = False
self.has_three_bet = False
self.opponent_has_three_bet = False
self.has_four_bet = False
self.opponent_has_four_bet = False
self.has_bet_aggressively = False
self.current_game_state = 'POSTRIVER'
for i in range(num_board_cards):
board_card = data_list[3 + i]
if board_card not in self.board_cards:
self.board_cards.append(data_list[3 + i])
if num_board_cards > 0:
board_cards = []
for board_card in self.board_cards:
board_cards.append(Card.new(board_card))
hand = []
for card in self.current_hand:
hand.append(Card.new(card))
self.hand_score = Evaluator().evaluate(hand, board_cards)
self.hand_class = Evaluator().class_to_string(Evaluator().get_rank_class(self.hand_score))
index = 3 + num_board_cards
num_last_actions = int(data_list[index])
index += 1
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index + i])
self.last_actions.append(current_last_actions)
if self.discard:
for action in current_last_actions:
if 'DISCARD' in action and self.name in action:
old_card = action[8:10]
new_card = action[11:13]
self.current_hand[self.current_hand.index(old_card)] = new_card
self.current_hand_strength = Hand.hand_win_odds(self.current_hand)
self.discard = False
break
if self.current_game_state == 'PREFLOP':
if self.current_pot_size == 4:
if self.button:
self.vpip += 1
self.has_called = True
else:
self.opponent_vpip += 1
self.opponent_has_called = True
else:
for action in current_last_actions:
if 'RAISE' in action:
round_num = self.street_dict['0']
if round_num == 1:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_two_bet = True
elif round_num == 2:
if self.button:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_two_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_three_bet = True
else:
if self.name in action:
self.pfr += 1
self.vpip += 1
self.has_three_bet = True
else:
self.opponent_pfr += 1
self.opponent_vpip += 1
self.opponent_has_four_bet = True
elif round_num == 3:
if self.name in action:
self.pfr += 1
self.vpip += 1
elif 'CALL' in action:
if self.name in action:
self.vpip += 1
else:
self.opponent_vpip += 1
elif self.current_game_state == 'FLOPTURN':
round_num = self.street_dict['3']
if round_num == 1:
self.discard = True
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'TURNRIVER':
round_num = self.street_dict['4']
if round_num == 1:
self.discard = True
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
break
elif round_num == 2:
for action in current_last_actions:
if 'BET' in action:
self.opponent_c_bet += 1
break
elif round_num == 3:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.c_bet += 1
else:
self.opponent_c_bet += 1
elif 'RAISE' in action:
if self.name in action:
self.has_two_bet = True
else:
if self.button:
self.opponent_has_three_bet = True
else:
self.opponent_has_two_bet = True
elif round_num == 4:
for action in current_last_actions:
if 'RAISE' in action:
if self.name in action:
if self.button:
self.has_four_bet = True
else:
self.has_three_bet = True
break
elif self.current_game_state == 'POSTRIVER':
round_num = self.street_dict['5']
if round_num == 1:
for action in current_last_actions:
if 'BET' in action:
if self.name in action:
self.double_barrel += 1
else:
self.opponent_double_barrel += 1
break
index += num_last_actions
num_legal_actions = int(data_list[index])
index += 1
self.current_legal_actions = []
for i in range(num_legal_actions):
self.current_legal_actions.append(data_list[index + i])
def legal_action(self, action):
for legal_action in self.current_legal_actions:
if action in legal_action:
if action == 'BET' or action == 'RAISE':
index = legal_action.index(':') + 1
sub = legal_action[index:]
index = sub.index(':')
return [int(sub[:index]), int(sub[index+1:])]
if action == 'CALL':
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.opponent_name in last_action:
sub = last_action[last_action.index(':')+1:]
return int(sub[:sub.index(':')])
return True
return None
def hand_over(self, data_list):
num_board_cards = data_list[3]
index = 4+num_board_cards
num_last_actions = data_list[index]
current_last_actions = []
for i in range(num_last_actions):
current_last_actions.append(data_list[index+i])
if self.current_game_state == 'PREFLOP':
for action in current_last_actions:
if 'FOLD' in action and self.opponent_name in action:
if self.button:
for last_action in self.last_actions[-1]:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
else:
for last_action in current_last_actions:
if 'RAISE' in last_action and self.name in last_action:
self.opponent_fold_pfr += 1
if self.has_three_bet and not self.has_four_bet:
self.opponent_fold_three_bet += 1
self.num_wins += 1
elif self.current_game_state == 'FLOPTURN':
for action in current_last_actions:
if self.button:
if 'FOLD' in action and self.opponent_name in action:
for last_action in self.last_actions[-1]:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
else:
if 'FOLD' in action and self.opponent_name in action:
for last_action in current_last_actions:
if 'BET' in last_action and self.name in last_action:
self.opponent_fold_c_bet += 1
self.num_wins += 1
elif self.current_game_state == 'POSTRIVER':
for action in current_last_actions:
if 'WIN' in action:
if self.name in action:
self.num_wins += 1
for last_action in current_last_actions:
if 'SHOW' in last_action:
self.showdown += 1
self.showdown_win += 1
break
break
| [
"deuces.deuces.Card.new",
"deuces.deuces.Evaluator",
"HandRankings.hand_win_odds"
]
| [((2278, 2315), 'HandRankings.hand_win_odds', 'Hand.hand_win_odds', (['self.current_hand'], {}), '(self.current_hand)\n', (2296, 2315), True, 'import HandRankings as Hand\n'), ((5028, 5048), 'deuces.deuces.Card.new', 'Card.new', (['board_card'], {}), '(board_card)\n', (5036, 5048), False, 'from deuces.deuces import Card, Evaluator\n'), ((5143, 5157), 'deuces.deuces.Card.new', 'Card.new', (['card'], {}), '(card)\n', (5151, 5157), False, 'from deuces.deuces import Card, Evaluator\n'), ((5189, 5200), 'deuces.deuces.Evaluator', 'Evaluator', ([], {}), '()\n', (5198, 5200), False, 'from deuces.deuces import Card, Evaluator\n'), ((5259, 5270), 'deuces.deuces.Evaluator', 'Evaluator', ([], {}), '()\n', (5268, 5270), False, 'from deuces.deuces import Card, Evaluator\n'), ((5990, 6027), 'HandRankings.hand_win_odds', 'Hand.hand_win_odds', (['self.current_hand'], {}), '(self.current_hand)\n', (6008, 6027), True, 'import HandRankings as Hand\n'), ((5287, 5298), 'deuces.deuces.Evaluator', 'Evaluator', ([], {}), '()\n', (5296, 5298), False, 'from deuces.deuces import Card, Evaluator\n')] |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Todo(models.Model):
time_add = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=64)
detail = models.TextField(blank=True)
deadline = models.DateTimeField(blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
done = models.BooleanField(default=False)
def __str__(self):
return self.title
def seconds_left(self):
return (self.deadline - timezone.now()).total_seconds()
def state(self):
if self.done:
return 'Done'
elif self.seconds_left() > 0:
return 'Todo'
else:
return 'Exceeded'
class Meta:
ordering = ['deadline']
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((150, 189), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (170, 189), False, 'from django.db import models\n'), ((202, 233), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (218, 233), False, 'from django.db import models\n'), ((247, 275), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (263, 275), False, 'from django.db import models\n'), ((291, 323), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)'}), '(blank=True)\n', (311, 323), False, 'from django.db import models\n'), ((335, 384), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (352, 384), False, 'from django.db import models\n'), ((396, 430), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (415, 430), False, 'from django.db import models\n'), ((542, 556), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (554, 556), False, 'from django.utils import timezone\n')] |
import os
from trame import change, update_state
from trame.layouts import SinglePageWithDrawer
from trame.html import vtk, vuetify, widgets
from vtkmodules.vtkCommonDataModel import vtkDataObject
from vtkmodules.vtkFiltersCore import vtkContourFilter
from vtkmodules.vtkIOXML import vtkXMLUnstructuredGridReader
from vtkmodules.vtkRenderingAnnotation import vtkCubeAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkDataSetMapper,
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
)
# Required for interacter factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
# Required for remote rendering factory initialization, not necessary for
# local rendering, but doesn't hurt to include it
import vtkmodules.vtkRenderingOpenGL2 # noqa
CURRENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
class Representation:
Points = 0
Wireframe = 1
Surface = 2
SurfaceWithEdges = 3
class LookupTable:
Rainbow = 0
Inverted_Rainbow = 1
Greyscale = 2
Inverted_Greyscale = 3
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
# Read Data
reader = vtkXMLUnstructuredGridReader()
reader.SetFileName(os.path.join(CURRENT_DIRECTORY, "../data/disk_out_ref.vtu"))
reader.Update()
# Extract Array/Field information
dataset_arrays = []
fields = [
(reader.GetOutput().GetPointData(), vtkDataObject.FIELD_ASSOCIATION_POINTS),
(reader.GetOutput().GetCellData(), vtkDataObject.FIELD_ASSOCIATION_CELLS),
]
for field in fields:
field_arrays, association = field
for i in range(field_arrays.GetNumberOfArrays()):
array = field_arrays.GetArray(i)
array_range = array.GetRange()
dataset_arrays.append(
{
"text": array.GetName(),
"value": i,
"range": list(array_range),
"type": association,
}
)
default_array = dataset_arrays[0]
default_min, default_max = default_array.get("range")
# Mesh
mesh_mapper = vtkDataSetMapper()
mesh_mapper.SetInputConnection(reader.GetOutputPort())
mesh_actor = vtkActor()
mesh_actor.SetMapper(mesh_mapper)
renderer.AddActor(mesh_actor)
# Mesh: Setup default representation to surface
mesh_actor.GetProperty().SetRepresentationToSurface()
mesh_actor.GetProperty().SetPointSize(1)
mesh_actor.GetProperty().EdgeVisibilityOff()
# Mesh: Apply rainbow color map
mesh_lut = mesh_mapper.GetLookupTable()
mesh_lut.SetHueRange(0.666, 0.0)
mesh_lut.SetSaturationRange(1.0, 1.0)
mesh_lut.SetValueRange(1.0, 1.0)
mesh_lut.Build()
# Mesh: Color by default array
mesh_mapper.SelectColorArray(default_array.get("text"))
mesh_mapper.GetLookupTable().SetRange(default_min, default_max)
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mesh_mapper.SetScalarVisibility(True)
mesh_mapper.SetUseLookupTableScalarRange(True)
# Contour
contour = vtkContourFilter()
contour.SetInputConnection(reader.GetOutputPort())
contour_mapper = vtkDataSetMapper()
contour_mapper.SetInputConnection(contour.GetOutputPort())
contour_actor = vtkActor()
contour_actor.SetMapper(contour_mapper)
renderer.AddActor(contour_actor)
# Contour: ContourBy default array
contour_value = 0.5 * (default_max + default_min)
contour.SetInputArrayToProcess(
0, 0, 0, default_array.get("type"), default_array.get("text")
)
contour.SetValue(0, contour_value)
# Contour: Setup default representation to surface
contour_actor.GetProperty().SetRepresentationToSurface()
contour_actor.GetProperty().SetPointSize(1)
contour_actor.GetProperty().EdgeVisibilityOff()
# Contour: Apply rainbow color map
contour_lut = contour_mapper.GetLookupTable()
contour_lut.SetHueRange(0.666, 0.0)
contour_lut.SetSaturationRange(1.0, 1.0)
contour_lut.SetValueRange(1.0, 1.0)
contour_lut.Build()
# Contour: Color by default array
contour_mapper.GetLookupTable().SetRange(default_min, default_max)
contour_mapper.SelectColorArray(default_array.get("text"))
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
contour_mapper.SetScalarModeToUsePointFieldData()
else:
contour_mapper.SetScalarModeToUseCellFieldData()
contour_mapper.SetScalarVisibility(True)
contour_mapper.SetUseLookupTableScalarRange(True)
# Cube Axes
cube_axes = vtkCubeAxesActor()
renderer.AddActor(cube_axes)
# Cube Axes: Boundaries, camera, and styling
cube_axes.SetBounds(mesh_actor.GetBounds())
cube_axes.SetCamera(renderer.GetActiveCamera())
cube_axes.SetXLabelFormat("%6.1f")
cube_axes.SetYLabelFormat("%6.1f")
cube_axes.SetZLabelFormat("%6.1f")
cube_axes.SetFlyModeToOuterEdges()
renderer.ResetCamera()
# -----------------------------------------------------------------------------
# trame Views
# -----------------------------------------------------------------------------
local_view = vtk.VtkLocalView(renderWindow)
remote_view = vtk.VtkRemoteView(renderWindow, interactive_ratio=(1,))
html_view = local_view
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
def update_view(**kwargs):
html_view.update()
# -----------------------------------------------------------------------------
# Toolbar Callbacks
# -----------------------------------------------------------------------------
@change("cube_axes_visibility")
def update_cube_axes_visibility(cube_axes_visibility, **kwargs):
cube_axes.SetVisibility(cube_axes_visibility)
update_view()
@change("local_vs_remote")
def update_local_vs_remote(local_vs_remote, **kwargs):
# Switch html_view
global html_view
if local_vs_remote:
html_view = local_view
else:
html_view = remote_view
# Update layout
layout.content.children[0].children[0] = html_view
layout.flush_content()
# Update View
update_view()
# -----------------------------------------------------------------------------
# Representation Callbacks
# -----------------------------------------------------------------------------
def update_representation(actor, mode):
property = actor.GetProperty()
if mode == Representation.Points:
property.SetRepresentationToPoints()
property.SetPointSize(5)
property.EdgeVisibilityOff()
elif mode == Representation.Wireframe:
property.SetRepresentationToWireframe()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.Surface:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.SurfaceWithEdges:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOn()
@change("mesh_representation")
def update_mesh_representation(mesh_representation, **kwargs):
update_representation(mesh_actor, mesh_representation)
update_view()
@change("contour_representation")
def update_contour_representation(contour_representation, **kwargs):
update_representation(contour_actor, contour_representation)
update_view()
# -----------------------------------------------------------------------------
# ColorBy Callbacks
# -----------------------------------------------------------------------------
def color_by_array(actor, array):
_min, _max = array.get("range")
mapper = actor.GetMapper()
mapper.SelectColorArray(array.get("text"))
mapper.GetLookupTable().SetRange(_min, _max)
if array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mapper.SetScalarModeToUsePointFieldData()
mapper.SetScalarVisibility(True)
mapper.SetUseLookupTableScalarRange(True)
@change("mesh_color_array_idx")
def update_mesh_color_by_name(mesh_color_array_idx, **kwargs):
array = dataset_arrays[mesh_color_array_idx]
color_by_array(mesh_actor, array)
update_view()
@change("contour_color_array_idx")
def update_contour_color_by_name(contour_color_array_idx, **kwargs):
array = dataset_arrays[contour_color_array_idx]
color_by_array(contour_actor, array)
update_view()
# -----------------------------------------------------------------------------
# ColorMap Callbacks
# -----------------------------------------------------------------------------
def use_preset(actor, preset):
lut = actor.GetMapper().GetLookupTable()
if preset == LookupTable.Rainbow:
lut.SetHueRange(0.666, 0.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Inverted_Rainbow:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Greyscale:
lut.SetHueRange(0.0, 0.0)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(0.0, 1.0)
elif preset == LookupTable.Inverted_Greyscale:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(1.0, 0.0)
lut.Build()
@change("mesh_color_preset")
def update_mesh_color_preset(mesh_color_preset, **kwargs):
use_preset(mesh_actor, mesh_color_preset)
update_view()
@change("contour_color_preset")
def update_contour_color_preset(contour_color_preset, **kwargs):
use_preset(contour_actor, contour_color_preset)
update_view()
# -----------------------------------------------------------------------------
# Opacity Callbacks
# -----------------------------------------------------------------------------
@change("mesh_opacity")
def update_mesh_opacity(mesh_opacity, **kwargs):
mesh_actor.GetProperty().SetOpacity(mesh_opacity)
update_view()
@change("contour_opacity")
def update_contour_opacity(contour_opacity, **kwargs):
contour_actor.GetProperty().SetOpacity(contour_opacity)
update_view()
# -----------------------------------------------------------------------------
# Contour Callbacks
# -----------------------------------------------------------------------------
@change("contour_by_array_idx")
def update_contour_by(contour_by_array_idx, **kwargs):
array = dataset_arrays[contour_by_array_idx]
contour_min, contour_max = array.get("range")
contour_step = 0.01 * (contour_max - contour_min)
contour_value = 0.5 * (contour_max + contour_min)
contour.SetInputArrayToProcess(0, 0, 0, array.get("type"), array.get("text"))
contour.SetValue(0, contour_value)
# Update UI
update_state("contour_min", contour_min)
update_state("contour_max", contour_max)
update_state("contour_value", contour_value)
update_state("contour_step", contour_step)
# Update View
update_view()
@change("contour_value")
def update_contour_value(contour_value, **kwargs):
contour.SetValue(0, float(contour_value))
update_view()
# -----------------------------------------------------------------------------
# Pipeline Widget Callbacks
# -----------------------------------------------------------------------------
# Selection Change
def actives_change(ids):
_id = ids[0]
if _id == "1": # Mesh
update_state("active_ui", "mesh")
elif _id == "2": # Contour
update_state("active_ui", "contour")
else:
update_state("active_ui", "nothing")
# Visibility Change
def visibility_change(event):
_id = event["id"]
_visibility = event["visible"]
if _id == "1": # Mesh
mesh_actor.SetVisibility(_visibility)
elif _id == "2": # Contour
contour_actor.SetVisibility(_visibility)
update_view()
# -----------------------------------------------------------------------------
# GUI Toolbar Buttons
# -----------------------------------------------------------------------------
def standard_buttons():
vuetify.VCheckbox(
v_model=("cube_axes_visibility", True),
on_icon="mdi-cube-outline",
off_icon="mdi-cube-off-outline",
classes="mx-1",
hide_details=True,
dense=True,
)
vuetify.VCheckbox(
v_model="$vuetify.theme.dark",
on_icon="mdi-lightbulb-off-outline",
off_icon="mdi-lightbulb-outline",
classes="mx-1",
hide_details=True,
dense=True,
)
vuetify.VCheckbox(
v_model=("local_vs_remote", True),
on_icon="mdi-lan-disconnect",
off_icon="mdi-lan-connect",
classes="mx-1",
hide_details=True,
dense=True,
)
with vuetify.VBtn(icon=True, click="$refs.view.resetCamera()"):
vuetify.VIcon("mdi-crop-free")
# -----------------------------------------------------------------------------
# GUI Pipelines Widget
# -----------------------------------------------------------------------------
def pipeline_widget():
widgets.GitTree(
sources=(
"pipeline",
[
{"id": "1", "parent": "0", "visible": 1, "name": "Mesh"},
{"id": "2", "parent": "1", "visible": 1, "name": "Contour"},
],
),
actives_change=(actives_change, "[$event]"),
visibility_change=(visibility_change, "[$event]"),
)
# -----------------------------------------------------------------------------
# GUI Cards
# -----------------------------------------------------------------------------
def ui_card(title, ui_name):
with vuetify.VCard(v_show=f"active_ui == '{ui_name}'"):
vuetify.VCardTitle(
title,
classes="grey lighten-1 py-1 grey--text text--darken-3",
style="user-select: none; cursor: pointer",
hide_details=True,
dense=True,
)
content = vuetify.VCardText(classes="py-2")
return content
def mesh_card():
with ui_card(title="Mesh", ui_name="mesh"):
vuetify.VSelect(
v_model=("mesh_representation", Representation.Surface),
items=(
"representations",
[
{"text": "Points", "value": 0},
{"text": "Wireframe", "value": 1},
{"text": "Surface", "value": 2},
{"text": "SurfaceWithEdges", "value": 3},
],
),
label="Representation",
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VRow(classes="pt-2", dense=True):
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Color by",
v_model=("mesh_color_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Colormap",
v_model=("mesh_color_preset", LookupTable.Rainbow),
items=(
"colormaps",
[
{"text": "Rainbow", "value": 0},
{"text": "Inv Rainbow", "value": 1},
{"text": "Greyscale", "value": 2},
{"text": "Inv Greyscale", "value": 3},
],
),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("mesh_opacity", 1.0),
min=0,
max=1,
step=0.1,
label="Opacity",
classes="mt-1",
hide_details=True,
dense=True,
)
def contour_card():
with ui_card(title="Contour", ui_name="contour"):
vuetify.VSelect(
label="Contour by",
v_model=("contour_by_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("contour_value", contour_value),
min=("contour_min", default_min),
max=("contour_max", default_max),
step=("contour_step", 0.01 * (default_max - default_min)),
label="Value",
classes="my-1",
hide_details=True,
dense=True,
)
vuetify.VSelect(
v_model=("contour_representation", Representation.Surface),
items=(
"representations",
[
{"text": "Points", "value": 0},
{"text": "Wireframe", "value": 1},
{"text": "Surface", "value": 2},
{"text": "SurfaceWithEdges", "value": 3},
],
),
label="Representation",
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VRow(classes="pt-2", dense=True):
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Color by",
v_model=("contour_color_array_idx", 0),
items=("array_list", dataset_arrays),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
with vuetify.VCol(cols="6"):
vuetify.VSelect(
label="Colormap",
v_model=("contour_color_preset", LookupTable.Rainbow),
items=(
"colormaps",
[
{"text": "Rainbow", "value": 0},
{"text": "Inv Rainbow", "value": 1},
{"text": "Greyscale", "value": 2},
{"text": "Inv Greyscale", "value": 3},
],
),
hide_details=True,
dense=True,
outlined=True,
classes="pt-1",
)
vuetify.VSlider(
v_model=("contour_opacity", 1.0),
min=0,
max=1,
step=0.1,
label="Opacity",
classes="mt-1",
hide_details=True,
dense=True,
)
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
layout = SinglePageWithDrawer("Viewer", on_ready=update_view)
layout.title.set_text("Viewer")
with layout.toolbar:
# toolbar components
vuetify.VSpacer()
vuetify.VDivider(vertical=True, classes="mx-2")
standard_buttons()
with layout.drawer as drawer:
# drawer components
drawer.width = 325
pipeline_widget()
vuetify.VDivider(classes="mb-2")
mesh_card()
contour_card()
with layout.content:
# content components
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# State use to track active ui card
layout.state = {
"active_ui": None,
}
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| [
"trame.layouts.SinglePageWithDrawer",
"trame.html.vuetify.VSelect",
"vtkmodules.vtkFiltersCore.vtkContourFilter",
"trame.html.vuetify.VCardTitle",
"vtkmodules.vtkRenderingCore.vtkRenderWindow",
"trame.html.vtk.VtkLocalView",
"trame.change",
"trame.html.vuetify.VContainer",
"vtkmodules.vtkRenderingCore.vtkActor",
"trame.html.vuetify.VRow",
"trame.update_state",
"vtkmodules.vtkRenderingCore.vtkRenderWindowInteractor",
"trame.html.vuetify.VSlider",
"trame.html.vuetify.VIcon",
"trame.html.vuetify.VCol",
"trame.html.vuetify.VDivider",
"trame.html.vuetify.VCardText",
"trame.html.vuetify.VCheckbox",
"os.path.dirname",
"trame.html.vuetify.VBtn",
"trame.html.vtk.VtkRemoteView",
"trame.html.vuetify.VCard",
"vtkmodules.vtkIOXML.vtkXMLUnstructuredGridReader",
"os.path.join",
"vtkmodules.vtkRenderingCore.vtkDataSetMapper",
"vtkmodules.vtkRenderingCore.vtkRenderer",
"trame.html.widgets.GitTree",
"vtkmodules.vtkRenderingAnnotation.vtkCubeAxesActor",
"trame.html.vuetify.VSpacer"
]
| [((1453, 1466), 'vtkmodules.vtkRenderingCore.vtkRenderer', 'vtkRenderer', ([], {}), '()\n', (1464, 1466), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((1482, 1499), 'vtkmodules.vtkRenderingCore.vtkRenderWindow', 'vtkRenderWindow', ([], {}), '()\n', (1497, 1499), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((1561, 1588), 'vtkmodules.vtkRenderingCore.vtkRenderWindowInteractor', 'vtkRenderWindowInteractor', ([], {}), '()\n', (1586, 1588), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((1743, 1773), 'vtkmodules.vtkIOXML.vtkXMLUnstructuredGridReader', 'vtkXMLUnstructuredGridReader', ([], {}), '()\n', (1771, 1773), False, 'from vtkmodules.vtkIOXML import vtkXMLUnstructuredGridReader\n'), ((2620, 2638), 'vtkmodules.vtkRenderingCore.vtkDataSetMapper', 'vtkDataSetMapper', ([], {}), '()\n', (2636, 2638), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((2707, 2717), 'vtkmodules.vtkRenderingCore.vtkActor', 'vtkActor', ([], {}), '()\n', (2715, 2717), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((3602, 3620), 'vtkmodules.vtkFiltersCore.vtkContourFilter', 'vtkContourFilter', ([], {}), '()\n', (3618, 3620), False, 'from vtkmodules.vtkFiltersCore import vtkContourFilter\n'), ((3689, 3707), 'vtkmodules.vtkRenderingCore.vtkDataSetMapper', 'vtkDataSetMapper', ([], {}), '()\n', (3705, 3707), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((3783, 3793), 'vtkmodules.vtkRenderingCore.vtkActor', 'vtkActor', ([], {}), '()\n', (3791, 3793), False, 'from vtkmodules.vtkRenderingCore import vtkActor, vtkDataSetMapper, vtkRenderer, vtkRenderWindow, vtkRenderWindowInteractor\n'), ((4966, 4984), 'vtkmodules.vtkRenderingAnnotation.vtkCubeAxesActor', 'vtkCubeAxesActor', ([], {}), '()\n', (4982, 4984), False, 'from vtkmodules.vtkRenderingAnnotation import vtkCubeAxesActor\n'), ((5505, 5535), 'trame.html.vtk.VtkLocalView', 'vtk.VtkLocalView', (['renderWindow'], {}), '(renderWindow)\n', (5521, 5535), False, 'from trame.html import vtk, vuetify, widgets\n'), ((5550, 5605), 'trame.html.vtk.VtkRemoteView', 'vtk.VtkRemoteView', (['renderWindow'], {'interactive_ratio': '(1,)'}), '(renderWindow, interactive_ratio=(1,))\n', (5567, 5605), False, 'from trame.html import vtk, vuetify, widgets\n'), ((6039, 6069), 'trame.change', 'change', (['"""cube_axes_visibility"""'], {}), "('cube_axes_visibility')\n", (6045, 6069), False, 'from trame import change, update_state\n'), ((6206, 6231), 'trame.change', 'change', (['"""local_vs_remote"""'], {}), "('local_vs_remote')\n", (6212, 6231), False, 'from trame import change, update_state\n'), ((7473, 7502), 'trame.change', 'change', (['"""mesh_representation"""'], {}), "('mesh_representation')\n", (7479, 7502), False, 'from trame import change, update_state\n'), ((7646, 7678), 'trame.change', 'change', (['"""contour_representation"""'], {}), "('contour_representation')\n", (7652, 7678), False, 'from trame import change, update_state\n'), ((8531, 8561), 'trame.change', 'change', (['"""mesh_color_array_idx"""'], {}), "('mesh_color_array_idx')\n", (8537, 8561), False, 'from trame import change, update_state\n'), ((8733, 8766), 'trame.change', 'change', (['"""contour_color_array_idx"""'], {}), "('contour_color_array_idx')\n", (8739, 8766), False, 'from trame import change, update_state\n'), ((9857, 9884), 'trame.change', 'change', (['"""mesh_color_preset"""'], {}), "('mesh_color_preset')\n", (9863, 9884), False, 'from trame import change, update_state\n'), ((10011, 10041), 'trame.change', 'change', (['"""contour_color_preset"""'], {}), "('contour_color_preset')\n", (10017, 10041), False, 'from trame import change, update_state\n'), ((10362, 10384), 'trame.change', 'change', (['"""mesh_opacity"""'], {}), "('mesh_opacity')\n", (10368, 10384), False, 'from trame import change, update_state\n'), ((10509, 10534), 'trame.change', 'change', (['"""contour_opacity"""'], {}), "('contour_opacity')\n", (10515, 10534), False, 'from trame import change, update_state\n'), ((10853, 10883), 'trame.change', 'change', (['"""contour_by_array_idx"""'], {}), "('contour_by_array_idx')\n", (10859, 10883), False, 'from trame import change, update_state\n'), ((11510, 11533), 'trame.change', 'change', (['"""contour_value"""'], {}), "('contour_value')\n", (11516, 11533), False, 'from trame import change, update_state\n'), ((19519, 19571), 'trame.layouts.SinglePageWithDrawer', 'SinglePageWithDrawer', (['"""Viewer"""'], {'on_ready': 'update_view'}), "('Viewer', on_ready=update_view)\n", (19539, 19571), False, 'from trame.layouts import SinglePageWithDrawer\n'), ((861, 886), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (876, 886), False, 'import os\n'), ((1793, 1852), 'os.path.join', 'os.path.join', (['CURRENT_DIRECTORY', '"""../data/disk_out_ref.vtu"""'], {}), "(CURRENT_DIRECTORY, '../data/disk_out_ref.vtu')\n", (1805, 1852), False, 'import os\n'), ((11288, 11328), 'trame.update_state', 'update_state', (['"""contour_min"""', 'contour_min'], {}), "('contour_min', contour_min)\n", (11300, 11328), False, 'from trame import change, update_state\n'), ((11333, 11373), 'trame.update_state', 'update_state', (['"""contour_max"""', 'contour_max'], {}), "('contour_max', contour_max)\n", (11345, 11373), False, 'from trame import change, update_state\n'), ((11378, 11422), 'trame.update_state', 'update_state', (['"""contour_value"""', 'contour_value'], {}), "('contour_value', contour_value)\n", (11390, 11422), False, 'from trame import change, update_state\n'), ((11427, 11469), 'trame.update_state', 'update_state', (['"""contour_step"""', 'contour_step'], {}), "('contour_step', contour_step)\n", (11439, 11469), False, 'from trame import change, update_state\n'), ((12598, 12772), 'trame.html.vuetify.VCheckbox', 'vuetify.VCheckbox', ([], {'v_model': "('cube_axes_visibility', True)", 'on_icon': '"""mdi-cube-outline"""', 'off_icon': '"""mdi-cube-off-outline"""', 'classes': '"""mx-1"""', 'hide_details': '(True)', 'dense': '(True)'}), "(v_model=('cube_axes_visibility', True), on_icon=\n 'mdi-cube-outline', off_icon='mdi-cube-off-outline', classes='mx-1',\n hide_details=True, dense=True)\n", (12615, 12772), False, 'from trame.html import vtk, vuetify, widgets\n'), ((12823, 12999), 'trame.html.vuetify.VCheckbox', 'vuetify.VCheckbox', ([], {'v_model': '"""$vuetify.theme.dark"""', 'on_icon': '"""mdi-lightbulb-off-outline"""', 'off_icon': '"""mdi-lightbulb-outline"""', 'classes': '"""mx-1"""', 'hide_details': '(True)', 'dense': '(True)'}), "(v_model='$vuetify.theme.dark', on_icon=\n 'mdi-lightbulb-off-outline', off_icon='mdi-lightbulb-outline', classes=\n 'mx-1', hide_details=True, dense=True)\n", (12840, 12999), False, 'from trame.html import vtk, vuetify, widgets\n'), ((13049, 13215), 'trame.html.vuetify.VCheckbox', 'vuetify.VCheckbox', ([], {'v_model': "('local_vs_remote', True)", 'on_icon': '"""mdi-lan-disconnect"""', 'off_icon': '"""mdi-lan-connect"""', 'classes': '"""mx-1"""', 'hide_details': '(True)', 'dense': '(True)'}), "(v_model=('local_vs_remote', True), on_icon=\n 'mdi-lan-disconnect', off_icon='mdi-lan-connect', classes='mx-1',\n hide_details=True, dense=True)\n", (13066, 13215), False, 'from trame.html import vtk, vuetify, widgets\n'), ((13583, 13849), 'trame.html.widgets.GitTree', 'widgets.GitTree', ([], {'sources': "('pipeline', [{'id': '1', 'parent': '0', 'visible': 1, 'name': 'Mesh'}, {\n 'id': '2', 'parent': '1', 'visible': 1, 'name': 'Contour'}])", 'actives_change': "(actives_change, '[$event]')", 'visibility_change': "(visibility_change, '[$event]')"}), "(sources=('pipeline', [{'id': '1', 'parent': '0', 'visible':\n 1, 'name': 'Mesh'}, {'id': '2', 'parent': '1', 'visible': 1, 'name':\n 'Contour'}]), actives_change=(actives_change, '[$event]'),\n visibility_change=(visibility_change, '[$event]'))\n", (13598, 13849), False, 'from trame.html import vtk, vuetify, widgets\n'), ((19655, 19672), 'trame.html.vuetify.VSpacer', 'vuetify.VSpacer', ([], {}), '()\n', (19670, 19672), False, 'from trame.html import vtk, vuetify, widgets\n'), ((19677, 19724), 'trame.html.vuetify.VDivider', 'vuetify.VDivider', ([], {'vertical': '(True)', 'classes': '"""mx-2"""'}), "(vertical=True, classes='mx-2')\n", (19693, 19724), False, 'from trame.html import vtk, vuetify, widgets\n'), ((19852, 19884), 'trame.html.vuetify.VDivider', 'vuetify.VDivider', ([], {'classes': '"""mb-2"""'}), "(classes='mb-2')\n", (19868, 19884), False, 'from trame.html import vtk, vuetify, widgets\n'), ((19971, 20056), 'trame.html.vuetify.VContainer', 'vuetify.VContainer', ([], {'fluid': '(True)', 'classes': '"""pa-0 fill-height"""', 'children': '[html_view]'}), "(fluid=True, classes='pa-0 fill-height', children=[html_view]\n )\n", (19989, 20056), False, 'from trame.html import vtk, vuetify, widgets\n'), ((11936, 11969), 'trame.update_state', 'update_state', (['"""active_ui"""', '"""mesh"""'], {}), "('active_ui', 'mesh')\n", (11948, 11969), False, 'from trame import change, update_state\n'), ((13271, 13328), 'trame.html.vuetify.VBtn', 'vuetify.VBtn', ([], {'icon': '(True)', 'click': '"""$refs.view.resetCamera()"""'}), "(icon=True, click='$refs.view.resetCamera()')\n", (13283, 13328), False, 'from trame.html import vtk, vuetify, widgets\n'), ((13338, 13368), 'trame.html.vuetify.VIcon', 'vuetify.VIcon', (['"""mdi-crop-free"""'], {}), "('mdi-crop-free')\n", (13351, 13368), False, 'from trame.html import vtk, vuetify, widgets\n'), ((14165, 14214), 'trame.html.vuetify.VCard', 'vuetify.VCard', ([], {'v_show': 'f"""active_ui == \'{ui_name}\'"""'}), '(v_show=f"active_ui == \'{ui_name}\'")\n', (14178, 14214), False, 'from trame.html import vtk, vuetify, widgets\n'), ((14224, 14391), 'trame.html.vuetify.VCardTitle', 'vuetify.VCardTitle', (['title'], {'classes': '"""grey lighten-1 py-1 grey--text text--darken-3"""', 'style': '"""user-select: none; cursor: pointer"""', 'hide_details': '(True)', 'dense': '(True)'}), "(title, classes=\n 'grey lighten-1 py-1 grey--text text--darken-3', style=\n 'user-select: none; cursor: pointer', hide_details=True, dense=True)\n", (14242, 14391), False, 'from trame.html import vtk, vuetify, widgets\n'), ((14471, 14504), 'trame.html.vuetify.VCardText', 'vuetify.VCardText', ([], {'classes': '"""py-2"""'}), "(classes='py-2')\n", (14488, 14504), False, 'from trame.html import vtk, vuetify, widgets\n'), ((14599, 14945), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'v_model': "('mesh_representation', Representation.Surface)", 'items': "('representations', [{'text': 'Points', 'value': 0}, {'text': 'Wireframe',\n 'value': 1}, {'text': 'Surface', 'value': 2}, {'text':\n 'SurfaceWithEdges', 'value': 3}])", 'label': '"""Representation"""', 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(v_model=('mesh_representation', Representation.Surface),\n items=('representations', [{'text': 'Points', 'value': 0}, {'text':\n 'Wireframe', 'value': 1}, {'text': 'Surface', 'value': 2}, {'text':\n 'SurfaceWithEdges', 'value': 3}]), label='Representation', hide_details\n =True, dense=True, outlined=True, classes='pt-1')\n", (14614, 14945), False, 'from trame.html import vtk, vuetify, widgets\n'), ((16361, 16499), 'trame.html.vuetify.VSlider', 'vuetify.VSlider', ([], {'v_model': "('mesh_opacity', 1.0)", 'min': '(0)', 'max': '(1)', 'step': '(0.1)', 'label': '"""Opacity"""', 'classes': '"""mt-1"""', 'hide_details': '(True)', 'dense': '(True)'}), "(v_model=('mesh_opacity', 1.0), min=0, max=1, step=0.1,\n label='Opacity', classes='mt-1', hide_details=True, dense=True)\n", (16376, 16499), False, 'from trame.html import vtk, vuetify, widgets\n'), ((16687, 16867), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'label': '"""Contour by"""', 'v_model': "('contour_by_array_idx', 0)", 'items': "('array_list', dataset_arrays)", 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(label='Contour by', v_model=('contour_by_array_idx', 0),\n items=('array_list', dataset_arrays), hide_details=True, dense=True,\n outlined=True, classes='pt-1')\n", (16702, 16867), False, 'from trame.html import vtk, vuetify, widgets\n'), ((16963, 17223), 'trame.html.vuetify.VSlider', 'vuetify.VSlider', ([], {'v_model': "('contour_value', contour_value)", 'min': "('contour_min', default_min)", 'max': "('contour_max', default_max)", 'step': "('contour_step', 0.01 * (default_max - default_min))", 'label': '"""Value"""', 'classes': '"""my-1"""', 'hide_details': '(True)', 'dense': '(True)'}), "(v_model=('contour_value', contour_value), min=(\n 'contour_min', default_min), max=('contour_max', default_max), step=(\n 'contour_step', 0.01 * (default_max - default_min)), label='Value',\n classes='my-1', hide_details=True, dense=True)\n", (16978, 17223), False, 'from trame.html import vtk, vuetify, widgets\n'), ((17325, 17674), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'v_model': "('contour_representation', Representation.Surface)", 'items': "('representations', [{'text': 'Points', 'value': 0}, {'text': 'Wireframe',\n 'value': 1}, {'text': 'Surface', 'value': 2}, {'text':\n 'SurfaceWithEdges', 'value': 3}])", 'label': '"""Representation"""', 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(v_model=('contour_representation', Representation.Surface),\n items=('representations', [{'text': 'Points', 'value': 0}, {'text':\n 'Wireframe', 'value': 1}, {'text': 'Surface', 'value': 2}, {'text':\n 'SurfaceWithEdges', 'value': 3}]), label='Representation', hide_details\n =True, dense=True, outlined=True, classes='pt-1')\n", (17340, 17674), False, 'from trame.html import vtk, vuetify, widgets\n'), ((19096, 19237), 'trame.html.vuetify.VSlider', 'vuetify.VSlider', ([], {'v_model': "('contour_opacity', 1.0)", 'min': '(0)', 'max': '(1)', 'step': '(0.1)', 'label': '"""Opacity"""', 'classes': '"""mt-1"""', 'hide_details': '(True)', 'dense': '(True)'}), "(v_model=('contour_opacity', 1.0), min=0, max=1, step=0.1,\n label='Opacity', classes='mt-1', hide_details=True, dense=True)\n", (19111, 19237), False, 'from trame.html import vtk, vuetify, widgets\n'), ((12010, 12046), 'trame.update_state', 'update_state', (['"""active_ui"""', '"""contour"""'], {}), "('active_ui', 'contour')\n", (12022, 12046), False, 'from trame import change, update_state\n'), ((12065, 12101), 'trame.update_state', 'update_state', (['"""active_ui"""', '"""nothing"""'], {}), "('active_ui', 'nothing')\n", (12077, 12101), False, 'from trame import change, update_state\n'), ((15183, 15223), 'trame.html.vuetify.VRow', 'vuetify.VRow', ([], {'classes': '"""pt-2"""', 'dense': '(True)'}), "(classes='pt-2', dense=True)\n", (15195, 15223), False, 'from trame.html import vtk, vuetify, widgets\n'), ((17912, 17952), 'trame.html.vuetify.VRow', 'vuetify.VRow', ([], {'classes': '"""pt-2"""', 'dense': '(True)'}), "(classes='pt-2', dense=True)\n", (17924, 17952), False, 'from trame.html import vtk, vuetify, widgets\n'), ((15242, 15264), 'trame.html.vuetify.VCol', 'vuetify.VCol', ([], {'cols': '"""6"""'}), "(cols='6')\n", (15254, 15264), False, 'from trame.html import vtk, vuetify, widgets\n'), ((15282, 15460), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'label': '"""Color by"""', 'v_model': "('mesh_color_array_idx', 0)", 'items': "('array_list', dataset_arrays)", 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(label='Color by', v_model=('mesh_color_array_idx', 0),\n items=('array_list', dataset_arrays), hide_details=True, dense=True,\n outlined=True, classes='pt-1')\n", (15297, 15460), False, 'from trame.html import vtk, vuetify, widgets\n'), ((15629, 15651), 'trame.html.vuetify.VCol', 'vuetify.VCol', ([], {'cols': '"""6"""'}), "(cols='6')\n", (15641, 15651), False, 'from trame.html import vtk, vuetify, widgets\n'), ((15669, 16001), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'label': '"""Colormap"""', 'v_model': "('mesh_color_preset', LookupTable.Rainbow)", 'items': "('colormaps', [{'text': 'Rainbow', 'value': 0}, {'text': 'Inv Rainbow',\n 'value': 1}, {'text': 'Greyscale', 'value': 2}, {'text':\n 'Inv Greyscale', 'value': 3}])", 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(label='Colormap', v_model=('mesh_color_preset', LookupTable\n .Rainbow), items=('colormaps', [{'text': 'Rainbow', 'value': 0}, {\n 'text': 'Inv Rainbow', 'value': 1}, {'text': 'Greyscale', 'value': 2},\n {'text': 'Inv Greyscale', 'value': 3}]), hide_details=True, dense=True,\n outlined=True, classes='pt-1')\n", (15684, 16001), False, 'from trame.html import vtk, vuetify, widgets\n'), ((17971, 17993), 'trame.html.vuetify.VCol', 'vuetify.VCol', ([], {'cols': '"""6"""'}), "(cols='6')\n", (17983, 17993), False, 'from trame.html import vtk, vuetify, widgets\n'), ((18011, 18192), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'label': '"""Color by"""', 'v_model': "('contour_color_array_idx', 0)", 'items': "('array_list', dataset_arrays)", 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(label='Color by', v_model=('contour_color_array_idx', 0),\n items=('array_list', dataset_arrays), hide_details=True, dense=True,\n outlined=True, classes='pt-1')\n", (18026, 18192), False, 'from trame.html import vtk, vuetify, widgets\n'), ((18361, 18383), 'trame.html.vuetify.VCol', 'vuetify.VCol', ([], {'cols': '"""6"""'}), "(cols='6')\n", (18373, 18383), False, 'from trame.html import vtk, vuetify, widgets\n'), ((18401, 18735), 'trame.html.vuetify.VSelect', 'vuetify.VSelect', ([], {'label': '"""Colormap"""', 'v_model': "('contour_color_preset', LookupTable.Rainbow)", 'items': "('colormaps', [{'text': 'Rainbow', 'value': 0}, {'text': 'Inv Rainbow',\n 'value': 1}, {'text': 'Greyscale', 'value': 2}, {'text':\n 'Inv Greyscale', 'value': 3}])", 'hide_details': '(True)', 'dense': '(True)', 'outlined': '(True)', 'classes': '"""pt-1"""'}), "(label='Colormap', v_model=('contour_color_preset',\n LookupTable.Rainbow), items=('colormaps', [{'text': 'Rainbow', 'value':\n 0}, {'text': 'Inv Rainbow', 'value': 1}, {'text': 'Greyscale', 'value':\n 2}, {'text': 'Inv Greyscale', 'value': 3}]), hide_details=True, dense=\n True, outlined=True, classes='pt-1')\n", (18416, 18735), False, 'from trame.html import vtk, vuetify, widgets\n')] |
# Copyright 1996-2021 Soft_illusion.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class LineFollower(Node):
def __init__(self):
super().__init__('linefollower_cmdvel')
# Subscribe Infra Red sensors
self.subs_right_ir = self.create_subscription(
Float64, 'right_IR', self.right_infrared_callback, 1)
self.subs_left_ir = self.create_subscription(
Float64, 'left_IR', self.left_infrared_callback, 1)
self.subs_mid_ir = self.create_subscription(
Float64, 'mid_IR', self.mid_infrared_callback, 1)
# Publish cmd vel
self.pubs_cmdvel = self.create_publisher(Twist, 'cmd_vel', 1)
# vehicle parameters
self.speed = 0.2
self.angle_correction = 0.01
# Initialize parameters
self.ground_right, self.ground_mid, self.ground_left = 0, 0, 0
self.delta = 0
self.cmd = Twist()
self.stop = False
self.count = 0
self.count_threshold = 10
def lineFollowingModule(self):
# Constant velocity
self.cmd.linear.x = self.speed
# Correction parameters
self.delta = self.ground_right - self.ground_left
self.cmd.angular.z = self.angle_correction*self.delta
# Logic for stop if black line not seen .
if self.ground_right > 500 and self.ground_left > 500 and self.ground_mid > 500:
self.count += 1
else:
self.count = 0
if self.count > self.count_threshold:
self.stop = True
if self.stop:
self.cmd.linear.x = 0.0
self.cmd.angular.z = 0.0
# Publish cmd vel
self.pubs_cmdvel.publish(self.cmd)
self.stop = False
# Call backs to update sensor reading variables
def right_infrared_callback(self, msg):
self.ground_right = msg.data
self.lineFollowingModule()
def left_infrared_callback(self, msg):
self.ground_left = msg.data
def mid_infrared_callback(self, msg):
self.ground_mid = msg.data
def main(args=None):
rclpy.init(args=args)
ls = LineFollower()
rclpy.spin(ls)
ls.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"geometry_msgs.msg.Twist",
"rclpy.init",
"rclpy.shutdown",
"rclpy.spin"
]
| [((2699, 2720), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (2709, 2720), False, 'import rclpy\n'), ((2750, 2764), 'rclpy.spin', 'rclpy.spin', (['ls'], {}), '(ls)\n', (2760, 2764), False, 'import rclpy\n'), ((2792, 2808), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (2806, 2808), False, 'import rclpy\n'), ((1520, 1527), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (1525, 1527), False, 'from geometry_msgs.msg import Twist\n')] |
from random import sample
from time import sleep
jogos = list()
print('-' * 20)
print(f'{"MEGA SENA":^20}')
print('-' * 20)
while True:
n = int(input("\nQuatos jogos você quer que eu sorteie? "))
if (n > 0):
break
print('\n[ERRO] Valor fora do intervalo')
print()
print('-=' * 3, end=' ')
print(f'SORTEANDO {n} JOGOS', end=' ')
print('-=' * 3)
for i in range(n):
jogos.append(sample(range(1,61), 6))
sleep(0.6)
print(f'Jogo {i+1}: {jogos[i]}')
print('-=' * 5, end=' ')
print('< BOA SORTE >', end=' ')
print('-=' * 3, end='\n\n') | [
"time.sleep"
]
| [((458, 468), 'time.sleep', 'sleep', (['(0.6)'], {}), '(0.6)\n', (463, 468), False, 'from time import sleep\n')] |
from importlib import import_module
from unittest import TestCase as UnitTestCase
from django.contrib.auth.models import Group
from django.core.management import BaseCommand
from django.conf import settings
from django.test import TestCase
from django.views.generic import TemplateView
try:
from unittest.mock import Mock, patch, MagicMock
except:
from mock import Mock, patch
from django_roles_access.decorator import access_by_role
from django_roles_access.mixin import RolesMixin
from django_roles_access.models import ViewAccess
from tests import views
from django_roles_access.utils import (walk_site_url, get_views_by_app,
view_access_analyzer,
get_view_analyze_report,
check_django_roles_is_used,
analyze_by_role, APP_NAME_FOR_NONE,
NOT_SECURED_DEFAULT, SECURED_DEFAULT,
PUBLIC_DEFAULT, NONE_TYPE_DEFAULT,
DISABLED_DEFAULT, OutputReport)
class MockRegex:
def __init__(self):
self.pattern = '^fake-regex-pattern/$'
class MockRegexResolver:
def __init__(self):
self.pattern = '^fake-resolver/'
class MockRegexResolverNested:
def __init__(self):
self.pattern = '^fake-nested-resolver/'
class MockPattern:
def __init__(self):
self.regex = MockRegex()
self.callback = 'fake-callback'
self.name = 'fake-view-name'
class MockResolver:
def __init__(self):
self.url_patterns = [MockPattern()]
self.regex = MockRegexResolver()
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverNested:
def __init__(self):
self.url_patterns = [MockResolver()]
self.regex = MockRegexResolverNested()
self.app_name = 'fake-app-name'
self.namespace = 'nested-namespace'
class MockPatternDjango2:
def __init__(self):
self.pattern = '^fake-pattern/'
self.callback = 'fake-callback'
self.name = 'fake-view-name'
class MockPatternDjango2None:
def __init__(self):
self.pattern = '^fake-pattern/'
self.callback = 'fake-callback'
self.name = 'fake-view-none'
class MockResolverDjango2:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockPatternDjango2()]
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverDjango2None:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockPatternDjango2None()]
self.app_name = None
self.namespace = None
class MockResolverDjango2None2:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockResolverDjango2None()]
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverDjangoNested:
def __init__(self):
self.pattern = '^fake-nested-resolver/'
self.url_patterns = [MockResolverDjango2()]
self.app_name = 'fake-app-name'
self.namespace = 'nested-namespace'
class UnitTestWalkSiteURL(UnitTestCase):
def setUp(self):
self.pattern_1 = MockPattern()
self.data = [self.pattern_1]
def test_second_param_is_optional_return_a_list(self):
result = walk_site_url(self.data)
self.assertIsInstance(result, list)
def test_first_param_list_of_pattern_and_view(self):
result = walk_site_url(self.data)
self.assertEqual(result, [('fake-regex-pattern/', 'fake-callback',
'fake-view-name', None)])
def test_first_param_list_of_patterns_and_views(self):
pattern_2 = MockPattern()
pattern_2.regex.pattern = 'fake-regex-pattern-2/'
pattern_2.callback = 'fake-view-2'
result = walk_site_url([self.pattern_1, pattern_2])
self.assertEqual(result, [('fake-regex-pattern/', 'fake-callback',
'fake-view-name', None),
('fake-regex-pattern-2/', 'fake-view-2',
'fake-view-name', None)])
def test_param_list_with_pattern_and_resolver_django_1(self):
expected_result = [
('fake-regex-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-resolver/fake-regex-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
)]
resolver = MockResolver()
result = walk_site_url([self.pattern_1, resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_nested_resolver_django_1(self):
expected_result = [
('fake-regex-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-nested-resolver/fake-resolver/fake-regex-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
resolver = MockResolverNested()
result = walk_site_url([self.pattern_1, resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_resolver_django_2(self):
expected_result = [
('fake-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-resolver/fake-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
)
]
resolver = MockResolverDjango2()
result = walk_site_url([MockPatternDjango2(), resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_nested_resolver_django_2(self):
expected_result = [
('fake-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-nested-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
result = walk_site_url([MockPatternDjango2(),
MockResolverDjangoNested()])
self.assertEqual(result, expected_result)
def test_param_list_with_resolver_get_app_name_and_view_name_django_1(self):
expected_result = [
('fake-resolver/fake-regex-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
),
('fake-nested-resolver/fake-resolver/fake-regex-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
result = walk_site_url([MockResolver(), MockResolverNested()])
self.assertEqual(result, expected_result)
def test_param_list_with_resolver_get_app_name_and_view_name_django_2(self):
expected_result = [
('fake-resolver/fake-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
),
('fake-nested-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
resolver = MockResolverDjango2()
nested_resolver = MockResolverDjangoNested()
result = walk_site_url([resolver, nested_resolver])
self.assertEqual(result, expected_result)
def test_when_url_namespace_is_None(self):
expected_result = [
('fake-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'fake-view-none', None
)
]
resolver = MockResolverDjango2None2()
result = walk_site_url([resolver])
self.assertEqual(result, expected_result)
# def test_when_view_name_is_None(self):
# expected_result = [
# ('fake-resolver/fake-pattern/',
# 'fake-callback', 'fake-view-name', None
# )
# ]
# resolver = MockResolverDjango2None2()
# result = walk_site_url([resolver])
# print(result)
# self.assertEqual(result, expected_result)
class IntegratedTestWalkSiteURL(TestCase):
def setUp(self):
self.url = import_module(settings.ROOT_URLCONF).urlpatterns
def test_found_direct_access_view(self):
expected_result = ('direct_access_view/',
views.protected_view_by_role,
'direct_access_view', None)
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_included_view_without_namespace(self):
expected_result = ('role-included[135]/view_by_role/',
views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
'django_roles_access')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_included_view_with_namespace(self):
expected_result = ('role-included2/view_by_role/',
views.protected_view_by_role,
'app-ns2:view_protected_by_role',
'django_roles_access')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_nested_access_view(self):
expected_result = ('nest1/nest2/view_by_role/',
views.protected_view_by_role,
'nest1_namespace:nest2_namespace:view_'
'protected_by_role',
'roles-app-name')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
class UnitTestGetViewsByApp(UnitTestCase):
"""
get_views_by_app receive the result of walk_site_url and is required to
return a dictionary with keys been installed applications.
"""
def setUp(self):
self.data = [('a', 'b', 'c', 'fake-app-1')]
@patch('django_roles_access.utils.settings')
def test_returns_a_dictionary(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
self.assertIsInstance(result, dict)
@patch('django_roles_access.utils.settings')
def test_returns_a_dictionary_with_all_installed_apps(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
assert 'fake-app-1' in result
assert 'fake-app-2' in result
@patch('django_roles_access.utils.settings')
def test_values_of_returned_dictionary_keys_are_lists(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
self.assertIsInstance(result['fake-app-1'], list)
self.assertIsInstance(result['fake-app-2'], list)
@patch('django_roles_access.utils.settings')
def test_receive_list_of_tuples_with_4_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
result = get_views_by_app(self.data)
assert 'fake-app-1' in result
@patch('django_roles_access.utils.settings')
def test_raise_type_error_if_receive_list_of_tuples_with_3_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
data = [('a', 'b', 'c')]
with self.assertRaises(TypeError):
get_views_by_app(data)
@patch('django_roles_access.utils.settings')
def test_raise_type_error_if_receive_list_of_tuples_with_5_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
data = [('a', 'b', 'c', 'd', 'e')]
with self.assertRaises(TypeError):
get_views_by_app(data)
@patch('django_roles_access.utils.settings')
def test_received_data_is_ordered_and_returned_by_application(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
data = [('a', 'b', 'c', 'fake-app-1'), ('1', '2', '3', 'fake-app-2'),
('a1', 'b2', 'c3', None)]
expected_result = [('a', 'b', 'c')]
result = get_views_by_app(data)
self.assertEqual(expected_result, result['fake-app-1'])
@patch('django_roles_access.utils.settings')
def test_can_work_with_no_declared_application_name(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
data = [('a', 'b', 'c', 'fake-app-1'), ('1', '2', '3', 'fake-app-2'),
('a1', 'b2', 'c3', None)]
expected_result = [('a1', 'b2', 'c3')]
result = get_views_by_app(data)
self.assertEqual(expected_result, result[APP_NAME_FOR_NONE])
@patch('django_roles_access.utils.settings')
def test_if_application_is_not_in_installed_apps_will_not_be_in_dict(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
result = get_views_by_app(self.data)
assert 'fake-app-3' not in result
class IntegratedTestGetViewsByApp(TestCase):
def setUp(self):
self.url = import_module(settings.ROOT_URLCONF).urlpatterns
def test_not_declared_app_are_recognized_as_undefined_app(self):
expected_result = ('direct_access_view/',
views.protected_view_by_role,
'direct_access_view')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result[APP_NAME_FOR_NONE])
def test_views_without_namespace_are_added_with_app_name_in_view_name(self):
expected_result = ('role-included[135]/view_by_role/',
views.protected_view_by_role,
'django_roles_access:view_protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['django_roles_access'])
def test_view_with_namespace_are_added_with_correct_app_name(self):
expected_result = ('role-included2/view_by_role/',
views.protected_view_by_role,
'app-ns2:view_protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['django_roles_access'])
def test_nested_namespace_are_added_with_correct_app_name(self):
expected_result = ('nest1/nest2/view_by_role/',
views.protected_view_by_role,
'nest1_namespace:nest2_namespace:view_'
'protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['roles-app-name'])
class TestGetViewAnalyzeReport(UnitTestCase):
def test_report_for_no_application_type(self):
expected = u'\t' + NONE_TYPE_DEFAULT
result = get_view_analyze_report(None)
self.assertEqual(result, expected)
def test_report_for_application_type_NOT_SECURED(self):
expected = u'\t' + NOT_SECURED_DEFAULT
result = get_view_analyze_report('NOT_SECURED')
self.assertEqual(result, expected)
self.assertEqual(result, expected)
def test_report_for_application_type_DISABLED(self):
expected = u'\t' + DISABLED_DEFAULT
result = get_view_analyze_report('DISABLED')
self.assertEqual(result, expected)
def test_report_for_application_type_SECURED(self):
expected = u'\t' + SECURED_DEFAULT
result = get_view_analyze_report('SECURED')
self.assertEqual(result, expected)
def test_report_for_application_type_PUBLIC(self):
expected = u'\t' + PUBLIC_DEFAULT
result = get_view_analyze_report('PUBLIC')
self.assertEqual(result, expected)
class TestCheckDjangoRolesIsUsed(UnitTestCase):
def test_detect_view_is_decorated(self):
@access_by_role
def function():
pass
self.assertTrue(check_django_roles_is_used(function))
def test_detect_view_is_not_decorated(self):
def function():
pass
self.assertFalse(check_django_roles_is_used(function()))
def test_detect_view_use_mixin(self):
class Aview(RolesMixin, TemplateView):
template_name = 'dummyTemplate.html'
self.assertTrue(check_django_roles_is_used(Aview))
def test_detect_view_not_use_mixin(self):
class Aview(TemplateView):
template_name = 'dummyTemplate.html'
self.assertFalse(check_django_roles_is_used(Aview))
@patch('django_roles_access.utils.ViewAccess')
class UnitTestAnalyzeByRoleAccess(UnitTestCase):
def test_detect_access_is_by_role(
self, mock_view_access
):
expected = u'ERROR: No roles configured to access de view.'
mock_view_access.type = 'br'
mock_view_access.roles.count.return_value = 0
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role(
self, mock_view_access
):
expected = u''
mock_view_access.type = 'pu'
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_with_roles(
self, mock_view_access
):
expected = u'Roles with access: role-1, role-2'
mock_view_access.type = 'br'
role_1 = Mock()
role_1.name = u'role-1'
role_2 = Mock()
role_2.name = u'role-2'
mock_view_access.roles.all.return_value = [role_1, role_2]
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_without_roles(
self, mock_view_access
):
expected = u'ERROR: No roles configured to access de view.'
mock_view_access.type = 'br'
mock_view_access.roles.count.return_value = 0
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
class IntegratedTestAnalyzeByRoleAccess(TestCase):
def test_detect_access_is_by_role(self):
expected = u'ERROR: No roles configured to access de view.'
view_access = ViewAccess.objects.create(view='any-name', type='br')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role(self):
expected = u''
view_access = ViewAccess.objects.create(view='any-name', type='pu')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_by_role_with_roles(self):
expected = u'Roles with access: role-1, role-2'
view_access = ViewAccess.objects.create(view='any-name', type='br')
role_1, created = Group.objects.get_or_create(name='role-1')
role_2, created = Group.objects.get_or_create(name='role-2')
view_access.roles.add(role_1)
view_access.roles.add(role_2)
view_access.save()
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_without_roles(self):
expected = u'ERROR: No roles configured to access de view.'
view_access = ViewAccess.objects.create(view='any-name', type='br')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
@patch('django_roles_access.utils.ViewAccess.objects')
class UnitTestViewAnalyzer(UnitTestCase):
def test_view_analyzer_return_a_report(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
try:
self.assertIsInstance(result, unicode)
except:
self.assertIsInstance(result, str)
def test_view_analyzer_search_view_access_for_the_view(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
assert mock_objects.first.called
def test_view_analyzer_search_view_access_for_the_view_once(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
self.assertEqual(mock_objects.filter.call_count, 1)
def test_view_analyzer_search_view_access_with_view_name(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
mock_objects.filter.assert_called_once_with(view='fake-view-name')
def test_view_access_type_when_site_active_and_exists_view_access(
self, mock_objects
):
expected = u'View access is of type Public.'
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
assert mock_analyze_by_role.called
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role_once(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
self.assertEqual(mock_analyze_by_role.call_count, 1)
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role_with_view_access(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
mock_analyze_by_role.assert_called_once_with(view_access)
def test_no_view_access_object_for_the_view_and_site_active_no_app_type(
self, mock_objects
):
expected = u'\t' + NONE_TYPE_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer(None, 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_NOT_SECURED(
self, mock_objects
):
expected = u'\t' + NOT_SECURED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('NOT_SECURED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_DISABLED(
self, mock_objects
):
expected = u'\t' + DISABLED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('DISABLED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_SECURED(
self, mock_objects
):
expected = u'\t' + SECURED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('SECURED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_PUBLIC(
self, mock_objects
):
expected = u'\t' + PUBLIC_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('PUBLIC', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_middleware_not_used_view_access_object_exist_and_dr_tools_used(
self, mock_objects
):
expected = u'View access is of type Public.'
@access_by_role
def function():
pass
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_middleware_not_used_view_access_object_exist_and_dr_tools_not_used(
self, mock_objects
):
expected = u'ERROR: View access object exist for the view, but no '
expected += u'Django role access tool is used: neither decorator, '
expected += u'mixin, or middleware.'
def function():
pass
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_middleware_not_used_dr_tools_are_used_no_view_access_object(
self, mock_objects
):
expected = u'\t' + PUBLIC_DEFAULT
@access_by_role
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('PUBLIC', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_no_django_roles_tools_used_no_application_type(
self, mock_objects
):
expected = u'No Django roles access tool used. Access to view depends '
expected += u'on its implementation.'
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer(None, function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_no_django_roles_tools_used_application_type(
self, mock_objects
):
expected = u'No Django roles access tool used. Access to view depends '
expected += u'on its implementation.'
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('Authorized', function,
'fake-view-name', False)
self.assertEqual(result, expected)
class IntegratedTestViewAnalyzezr(TestCase):
def test_with_middleware_SECURED_without_view_access_object(self):
expected = u'\t' + SECURED_DEFAULT
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(expected, result)
def test_with_middleware_NOT_SECURED_with_view_access_object(self):
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
result = view_access_analyzer(
'NOT_SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, u'\t' + NOT_SECURED_DEFAULT)
def test_with_middleware_DISABLED_with_view_access_object(self):
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='pu')
result = view_access_analyzer(
'DISABLED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, u'\t' + DISABLED_DEFAULT)
def test_with_middleware_with_view_access_object(self):
expected = u'View access is of type By role.'
expected += u'ERROR: No roles configured to access de view.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_with_roles(self):
expected = u'View access is of type By role.'
expected += u'Roles with access: test1, test2'
g1, created = Group.objects.get_or_create(name='test1')
g2, created = Group.objects.get_or_create(name='test2')
view_access = ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
view_access.roles.add(g1)
view_access.roles.add(g2)
view_access.save()
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_authorized(self):
expected = u'View access is of type Authorized.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='au')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_public(self):
expected = u'View access is of type Public.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='pu')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object(self):
expected = u'View access is of type By role.'
expected += u'ERROR: No roles configured to access de view.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='br')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_with_roles(self):
expected = u'View access is of type By role.'
expected += u'Roles with access: test1, test2'
g1, created = Group.objects.get_or_create(name='test1')
g2, created = Group.objects.get_or_create(name='test2')
view_access = ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='br')
view_access.roles.add(g1)
view_access.roles.add(g2)
view_access.save()
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_authorized(self):
expected = u'View access is of type Authorized.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='au')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_public(self):
expected = u'View access is of type Public.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='pu')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_without_view_access_object_and_view_protected(
self
):
expected = u'\t' + SECURED_DEFAULT
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_no_view_access_object_and_view_protected_without_app(
self
):
expected = u'\t' + NONE_TYPE_DEFAULT
result = view_access_analyzer(
None, views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_and_view_not_protected(
self
):
expected = u'ERROR: View access object exist for the view, '
expected += 'but no Django role access tool is used: neither '
expected += 'decorator, mixin, or middleware.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_func',
type='pu')
result = view_access_analyzer(
None, views.middleware_view,
'django_roles_access:middleware_view_func',
False)
self.assertEqual(result, expected)
class UnitTestOutputReport(UnitTestCase):
def setUp(self):
self.patch_mock_stdout = patch.object(BaseCommand(), 'style')
self.patch_mock_style = patch.object(BaseCommand(), 'stdout')
self.mock_stdout = self.patch_mock_stdout.start()
self.mock_style = self.patch_mock_style.start()
self._output = OutputReport(self.mock_stdout, self.mock_style)
def tearDown(self):
self.patch_mock_stdout.stop()
self.patch_mock_style.stop()
def test_initial_with_parameter(self):
assert self._output.stdout == self.mock_stdout
assert self._output.style == self.mock_style
def test_internal_attributes_are_initialize(self):
assert hasattr(self._output, '_row') and self._output._row == u''
assert hasattr(self._output, '_format') and self._output._format == \
'console'
def test_initial_without_parameter(self):
with self.assertRaises(TypeError) as e:
OutputReport()
def test_default_output_format_is_correct_type(self):
assert self._output._format == 'console'
def test_set_format(self):
self._output.set_format('csv')
assert self._output._format == 'csv'
def test_add_to_row(self):
self._output.add_to_row('text')
self._output.add_to_row('other')
self.assertIn('text', self._output._row)
self.assertIn('other', self._output._row)
def test_write_method_write_to_stdout(self):
self._output.write(u'some text')
assert self.mock_stdout.write.called
def test_write_method_use_stdout_write_once(self):
self._output.write(u'some text')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_write_method_use_SUCCESS_style_for_styling_output(self):
self._output.write(u'some text')
self.mock_stdout.write.assert_called_once_with(
self.mock_style.SUCCESS())
def test_write_method_use_SUCCESS_style_for_output(self):
self._output.write(u'some text')
assert self.mock_style.SUCCESS.called
def test_write_method_use_style_with_received_argument(self):
self._output.write(u'some text')
self.mock_style.SUCCESS.assert_called_once_with(u'some text')
def test_console_format_write_correct_header_to_stdout_with_SUCCESS_style(
self
):
expected = u'Start checking views access.\n'
expected += u'Start gathering information.'
self._output.write_header()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
@patch('django_roles_access.utils.timezone')
def test_cvs_format_write_correct_header(
self, mock_timezone
):
mock_timezone.now.return_value = 'fake-date'
self._output.set_format('csv')
self._output.write_header()
self.mock_style.SUCCESS.assert_called_once_with(u'Reported: fake-date')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_write_correct_middleware_status_and_end_of_header(
self
):
expected = u'Django roles access middleware is active: False.\n'
self._output.write_middleware_status(False)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_write_correct_end_of_header(
self
):
expected = u'Finish gathering information.'
self._output.write_end_of_head()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_correct_correct_middleware_status(
self
):
expected = u'Django roles access middleware is active: False.\n'
self._output.set_format('csv')
self._output.write_middleware_status(False)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_correct_csv_columns(
self
):
expected = u'App Name,Type,View Name,Url,Status,Status description'
self._output.set_format('csv')
self._output.write_end_of_head()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_to_stdout_with_SUCCESS_style(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = ['fake-view']
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} is {} type.'.format(app_name, app_type)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_without_type(self):
app_name = u'fake-app-name'
app_type = None
view_list = ['fake-view']
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} has no type.'.format(app_name)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_without_views(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = []
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} is {} type.'.format(app_name, app_type)
expected += u'\t\t{} does not have configured views.'.format(app_name)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_process_application_data_to_string(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = ['fake-view-list']
expected = u'{},{},'.format(app_name, app_type, view_list)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_cvs_format_process_application_data_without_type_to_string(self):
app_name = u'fake-app-name'
app_type = None
view_list = ['fake-view-list']
expected = u'fake-app-name,no type,'.format(app_name)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_cvs_format_process_application_data_without_views(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = []
expected = u'fake-app-name,fake-app-type,,,,,'.format(app_name)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_console_format_process_view_data_to_stdout_with_SUCCESS_style(
self
):
view_name = u'fake-view-name'
url = '/fake-url/'
expected = u'\n\t\tAnalysis for view: {}'.format(view_name)
expected += u'\n\t\tView url: {}'.format(url)
self._output.process_view_data(view_name, url)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_process_view_data(self):
view_name = u'fake-view-name'
url = '/fake-url/'
expected = u'{},{}'.format(view_name, url)
self._output.set_format('csv')
self._output.process_view_data(view_name, url)
self.assertIn(expected, self._output._row)
# View_access_analyzer output.
def test_console_format_write_vaa_to_stdout(self):
self._output.write_view_access_analyzer(u'some text')
assert self.mock_stdout.write.called
def test_console_format_use_stdout_write_once_with_vaa(self):
self._output.write_view_access_analyzer(u'some text')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_use_SUCCESS_style_for_styling_output_of_vaa(self):
self._output.write_view_access_analyzer(u'some text')
self.mock_stdout.write.assert_called_once_with(
self.mock_style.SUCCESS())
def test_console_format_use_SUCCESS_style_for_output_of_vaa(self):
self._output.write_view_access_analyzer(u'some text')
assert self.mock_style.SUCCESS.called
def test_console_format_use_style_with_vaa_result(self):
self._output.write_view_access_analyzer(u'some text')
self.mock_style.SUCCESS.assert_called_once_with(u'\t\tsome text')
def test_console_format_use_ERROR_style_for_output_if_error_in_vaa(self):
self._output.write_view_access_analyzer('ERROR: fake report')
assert self.mock_style.ERROR.called
def test_console_format_use_ERROR_style_with_the_error_in_vaa(self):
self._output.write_view_access_analyzer('ERROR: fake report')
self.mock_style.ERROR.assert_called_once_with('\t\t' +
'ERROR: fake report')
def test_console_format_use_WARNING_style_for_output_if_warning_in_vaa(self):
self._output.write_view_access_analyzer('WARNING: fake report')
assert self.mock_style.WARNING.called
def test_console_format_use_WARNING_style_with_the_warning_in_vaa(self):
self._output.write_view_access_analyzer('WARNING: fake report')
self.mock_style.WARNING.assert_called_once_with(
'\t\t' + 'WARNING: fake report')
def test_csv_format_write_view_access_analyzer_with_Normal_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer(u'fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_Normal_to_style(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Normal,fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer(u'fake-report')
self.mock_style.SUCCESS.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_with_WARNING_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('WARNING: fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_WARNING_with_style(
self
):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Warning,' \
u'fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer('WARNING: fake-report')
self.mock_style.WARNING.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_with_ERROR_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('ERROR: fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_ERROR_with_style(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Error,fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer('ERROR: fake-report')
self.mock_style.ERROR.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_reset_OutputFormater_row(
self
):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('fake-report')
self.assertEqual(self._output._row, u'fake-app,fake-type,')
def test_console_format_close_application_data_to_stdout_with_SUCCESS_style(
self
):
expected = u'\tFinish analyzing fake-app-name.'
self._output.close_application_data('fake-app-name')
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_close_application_data_to_string(self):
expected = u''
self._output.set_format('csv')
self._output.close_application_data('fake-app-name')
self.assertEqual(self._output._row, expected)
def test_console_format_write_footer_to_stdout_with_SUCCESS_style(self):
expected = u'End checking view access.'
self._output.write_footer()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_footer_to_string(self):
expected = u'\n'
self._output.set_format('csv')
self._output.write_footer()
self.assertEqual(self._output._row, expected)
| [
"mock.patch",
"django_roles_access.models.ViewAccess.objects.create",
"importlib.import_module",
"django_roles_access.utils.analyze_by_role",
"django_roles_access.utils.view_access_analyzer",
"mock.Mock",
"django_roles_access.utils.walk_site_url",
"django_roles_access.utils.check_django_roles_is_used",
"django.contrib.auth.models.Group.objects.get_or_create",
"django_roles_access.utils.get_views_by_app",
"django_roles_access.utils.get_view_analyze_report",
"django_roles_access.utils.OutputReport",
"django.core.management.BaseCommand"
]
| [((16946, 16991), 'mock.patch', 'patch', (['"""django_roles_access.utils.ViewAccess"""'], {}), "('django_roles_access.utils.ViewAccess')\n", (16951, 16991), False, 'from mock import Mock, patch\n'), ((19821, 19874), 'mock.patch', 'patch', (['"""django_roles_access.utils.ViewAccess.objects"""'], {}), "('django_roles_access.utils.ViewAccess.objects')\n", (19826, 19874), False, 'from mock import Mock, patch\n'), ((10161, 10204), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (10166, 10204), False, 'from mock import Mock, patch\n'), ((10442, 10485), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (10447, 10485), False, 'from mock import Mock, patch\n'), ((10779, 10822), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (10784, 10822), False, 'from mock import Mock, patch\n'), ((11156, 11199), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (11161, 11199), False, 'from mock import Mock, patch\n'), ((11434, 11477), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (11439, 11477), False, 'from mock import Mock, patch\n'), ((11760, 11803), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (11765, 11803), False, 'from mock import Mock, patch\n'), ((12096, 12139), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (12101, 12139), False, 'from mock import Mock, patch\n'), ((12594, 12637), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (12599, 12637), False, 'from mock import Mock, patch\n'), ((13090, 13133), 'mock.patch', 'patch', (['"""django_roles_access.utils.settings"""'], {}), "('django_roles_access.utils.settings')\n", (13095, 13133), False, 'from mock import Mock, patch\n'), ((22346, 22396), 'mock.patch', 'patch', (['"""django_roles_access.utils.analyze_by_role"""'], {}), "('django_roles_access.utils.analyze_by_role')\n", (22351, 22396), False, 'from mock import Mock, patch\n'), ((22853, 22903), 'mock.patch', 'patch', (['"""django_roles_access.utils.analyze_by_role"""'], {}), "('django_roles_access.utils.analyze_by_role')\n", (22858, 22903), False, 'from mock import Mock, patch\n'), ((23383, 23433), 'mock.patch', 'patch', (['"""django_roles_access.utils.analyze_by_role"""'], {}), "('django_roles_access.utils.analyze_by_role')\n", (23388, 23433), False, 'from mock import Mock, patch\n'), ((38468, 38511), 'mock.patch', 'patch', (['"""django_roles_access.utils.timezone"""'], {}), "('django_roles_access.utils.timezone')\n", (38473, 38511), False, 'from mock import Mock, patch\n'), ((3489, 3513), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.data'], {}), '(self.data)\n', (3502, 3513), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((3633, 3657), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.data'], {}), '(self.data)\n', (3646, 3657), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((4006, 4048), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['[self.pattern_1, pattern_2]'], {}), '([self.pattern_1, pattern_2])\n', (4019, 4048), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((4689, 4730), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['[self.pattern_1, resolver]'], {}), '([self.pattern_1, resolver])\n', (4702, 4730), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((5223, 5264), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['[self.pattern_1, resolver]'], {}), '([self.pattern_1, resolver])\n', (5236, 5264), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((7464, 7506), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['[resolver, nested_resolver]'], {}), '([resolver, nested_resolver])\n', (7477, 7506), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((7832, 7857), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['[resolver]'], {}), '([resolver])\n', (7845, 7857), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((8643, 8666), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (8656, 8666), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((9033, 9056), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (9046, 9056), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((9404, 9427), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (9417, 9427), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((9811, 9834), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (9824, 9834), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((10364, 10391), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['self.data'], {}), '(self.data)\n', (10380, 10391), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((10669, 10696), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['self.data'], {}), '(self.data)\n', (10685, 10696), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((11006, 11033), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['self.data'], {}), '(self.data)\n', (11022, 11033), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((11362, 11389), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['self.data'], {}), '(self.data)\n', (11378, 11389), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((12501, 12523), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['data'], {}), '(data)\n', (12517, 12523), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((12992, 13014), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['data'], {}), '(data)\n', (13008, 13014), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((13338, 13365), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['self.data'], {}), '(self.data)\n', (13354, 13365), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((15267, 15296), 'django_roles_access.utils.get_view_analyze_report', 'get_view_analyze_report', (['None'], {}), '(None)\n', (15290, 15296), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((15465, 15503), 'django_roles_access.utils.get_view_analyze_report', 'get_view_analyze_report', (['"""NOT_SECURED"""'], {}), "('NOT_SECURED')\n", (15488, 15503), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((15709, 15744), 'django_roles_access.utils.get_view_analyze_report', 'get_view_analyze_report', (['"""DISABLED"""'], {}), "('DISABLED')\n", (15732, 15744), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((15905, 15939), 'django_roles_access.utils.get_view_analyze_report', 'get_view_analyze_report', (['"""SECURED"""'], {}), "('SECURED')\n", (15928, 15939), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((16098, 16131), 'django_roles_access.utils.get_view_analyze_report', 'get_view_analyze_report', (['"""PUBLIC"""'], {}), "('PUBLIC')\n", (16121, 16131), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((17299, 17332), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['mock_view_access'], {}), '(mock_view_access)\n', (17314, 17332), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((17539, 17572), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['mock_view_access'], {}), '(mock_view_access)\n', (17554, 17572), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((17823, 17829), 'mock.Mock', 'Mock', ([], {}), '()\n', (17827, 17829), False, 'from mock import Mock, patch\n'), ((17879, 17885), 'mock.Mock', 'Mock', ([], {}), '()\n', (17883, 17885), False, 'from mock import Mock, patch\n'), ((18002, 18035), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['mock_view_access'], {}), '(mock_view_access)\n', (18017, 18035), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((18355, 18388), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['mock_view_access'], {}), '(mock_view_access)\n', (18370, 18388), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((18621, 18674), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""any-name"""', 'type': '"""br"""'}), "(view='any-name', type='br')\n", (18646, 18674), False, 'from django_roles_access.models import ViewAccess\n'), ((18692, 18720), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['view_access'], {}), '(view_access)\n', (18707, 18720), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((18859, 18912), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""any-name"""', 'type': '"""pu"""'}), "(view='any-name', type='pu')\n", (18884, 18912), False, 'from django_roles_access.models import ViewAccess\n'), ((18930, 18958), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['view_access'], {}), '(view_access)\n', (18945, 18958), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((19137, 19190), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""any-name"""', 'type': '"""br"""'}), "(view='any-name', type='br')\n", (19162, 19190), False, 'from django_roles_access.models import ViewAccess\n'), ((19217, 19259), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""role-1"""'}), "(name='role-1')\n", (19244, 19259), False, 'from django.contrib.auth.models import Group\n'), ((19286, 19328), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""role-2"""'}), "(name='role-2')\n", (19313, 19328), False, 'from django.contrib.auth.models import Group\n'), ((19449, 19477), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['view_access'], {}), '(view_access)\n', (19464, 19477), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((19675, 19728), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""any-name"""', 'type': '"""br"""'}), "(view='any-name', type='br')\n", (19700, 19728), False, 'from django_roles_access.models import ViewAccess\n'), ((19746, 19774), 'django_roles_access.utils.analyze_by_role', 'analyze_by_role', (['view_access'], {}), '(view_access)\n', (19761, 19774), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((20022, 20028), 'mock.Mock', 'Mock', ([], {}), '()\n', (20026, 20028), False, 'from mock import Mock, patch\n'), ((20188, 20284), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '"""fake-site-active"""'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name',\n 'fake-site-active')\n", (20208, 20284), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((20567, 20573), 'mock.Mock', 'Mock', ([], {}), '()\n', (20571, 20573), False, 'from mock import Mock, patch\n'), ((20724, 20820), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '"""fake-site-active"""'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name',\n 'fake-site-active')\n", (20744, 20820), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((21013, 21019), 'mock.Mock', 'Mock', ([], {}), '()\n', (21017, 21019), False, 'from mock import Mock, patch\n'), ((21170, 21266), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '"""fake-site-active"""'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name',\n 'fake-site-active')\n", (21190, 21266), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((21475, 21481), 'mock.Mock', 'Mock', ([], {}), '()\n', (21479, 21481), False, 'from mock import Mock, patch\n'), ((21632, 21728), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '"""fake-site-active"""'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name',\n 'fake-site-active')\n", (21652, 21728), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((22014, 22020), 'mock.Mock', 'Mock', ([], {}), '()\n', (22018, 22020), False, 'from mock import Mock, patch\n'), ((22180, 22258), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name', True)\n", (22200, 22258), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((22539, 22545), 'mock.Mock', 'Mock', ([], {}), '()\n', (22543, 22545), False, 'from mock import Mock, patch\n'), ((22696, 22774), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name', True)\n", (22716, 22774), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((23051, 23057), 'mock.Mock', 'Mock', ([], {}), '()\n', (23055, 23057), False, 'from mock import Mock, patch\n'), ((23208, 23286), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name', True)\n", (23228, 23286), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((23593, 23599), 'mock.Mock', 'Mock', ([], {}), '()\n', (23597, 23599), False, 'from mock import Mock, patch\n'), ((23750, 23828), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('fake-app-type', 'fake-callback', 'fake-view-name', True)\n", (23770, 23828), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((24205, 24272), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['None', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "(None, 'fake-callback', 'fake-view-name', True)\n", (24225, 24272), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((24633, 24709), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""NOT_SECURED"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('NOT_SECURED', 'fake-callback', 'fake-view-name', True)\n", (24653, 24709), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((25064, 25137), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""DISABLED"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('DISABLED', 'fake-callback', 'fake-view-name', True)\n", (25084, 25137), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((25490, 25562), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('SECURED', 'fake-callback', 'fake-view-name', True)\n", (25510, 25562), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((25913, 25984), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""PUBLIC"""', '"""fake-callback"""', '"""fake-view-name"""', '(True)'], {}), "('PUBLIC', 'fake-callback', 'fake-view-name', True)\n", (25933, 25984), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((26323, 26329), 'mock.Mock', 'Mock', ([], {}), '()\n', (26327, 26329), False, 'from mock import Mock, patch\n'), ((26489, 26561), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', 'function', '"""fake-view-name"""', '(False)'], {}), "('fake-app-type', function, 'fake-view-name', False)\n", (26509, 26561), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((27025, 27031), 'mock.Mock', 'Mock', ([], {}), '()\n', (27029, 27031), False, 'from mock import Mock, patch\n'), ((27191, 27263), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""fake-app-type"""', 'function', '"""fake-view-name"""', '(False)'], {}), "('fake-app-type', function, 'fake-view-name', False)\n", (27211, 27263), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((27686, 27751), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""PUBLIC"""', 'function', '"""fake-view-name"""', '(False)'], {}), "('PUBLIC', function, 'fake-view-name', False)\n", (27706, 27751), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((28221, 28282), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['None', 'function', '"""fake-view-name"""', '(False)'], {}), "(None, function, 'fake-view-name', False)\n", (28241, 28282), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((28749, 28818), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""Authorized"""', 'function', '"""fake-view-name"""', '(False)'], {}), "('Authorized', function, 'fake-view-name', False)\n", (28769, 28818), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((29079, 29195), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('SECURED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (29099, 29195), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((29353, 29443), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_class"""', 'type': '"""br"""'}), "(view='django_roles_access:middleware_view_class',\n type='br')\n", (29378, 29443), False, 'from django_roles_access.models import ViewAccess\n'), ((29482, 29602), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""NOT_SECURED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('NOT_SECURED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (29502, 29602), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((29776, 29866), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_class"""', 'type': '"""pu"""'}), "(view='django_roles_access:middleware_view_class',\n type='pu')\n", (29801, 29866), False, 'from django_roles_access.models import ViewAccess\n'), ((29905, 30022), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""DISABLED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('DISABLED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (29925, 30022), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((30307, 30397), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_class"""', 'type': '"""br"""'}), "(view='django_roles_access:middleware_view_class',\n type='br')\n", (30332, 30397), False, 'from django_roles_access.models import ViewAccess\n'), ((30436, 30552), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('SECURED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (30456, 30552), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((30832, 30873), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""test1"""'}), "(name='test1')\n", (30859, 30873), False, 'from django.contrib.auth.models import Group\n'), ((30896, 30937), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""test2"""'}), "(name='test2')\n", (30923, 30937), False, 'from django.contrib.auth.models import Group\n'), ((30960, 31050), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_class"""', 'type': '"""br"""'}), "(view='django_roles_access:middleware_view_class',\n type='br')\n", (30985, 31050), False, 'from django_roles_access.models import ViewAccess\n'), ((31184, 31300), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('SECURED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (31204, 31300), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((31514, 31604), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_class"""', 'type': '"""au"""'}), "(view='django_roles_access:middleware_view_class',\n type='au')\n", (31539, 31604), False, 'from django_roles_access.models import ViewAccess\n'), ((31643, 31759), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('SECURED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (31663, 31759), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((31965, 32055), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_class"""', 'type': '"""pu"""'}), "(view='django_roles_access:middleware_view_class',\n type='pu')\n", (31990, 32055), False, 'from django_roles_access.models import ViewAccess\n'), ((32094, 32210), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.MiddlewareView.as_view', '"""django_roles_access:middleware_view_class"""', '(True)'], {}), "('SECURED', views.MiddlewareView.as_view,\n 'django_roles_access:middleware_view_class', True)\n", (32114, 32210), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((32482, 32573), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:view_protected_by_role"""', 'type': '"""br"""'}), "(view='django_roles_access:view_protected_by_role',\n type='br')\n", (32507, 32573), False, 'from django_roles_access.models import ViewAccess\n'), ((32612, 32730), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.protected_view_by_role', '"""django_roles_access:view_protected_by_role"""', '(False)'], {}), "('SECURED', views.protected_view_by_role,\n 'django_roles_access:view_protected_by_role', False)\n", (32632, 32730), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((33013, 33054), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""test1"""'}), "(name='test1')\n", (33040, 33054), False, 'from django.contrib.auth.models import Group\n'), ((33077, 33118), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""test2"""'}), "(name='test2')\n", (33104, 33118), False, 'from django.contrib.auth.models import Group\n'), ((33141, 33232), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:view_protected_by_role"""', 'type': '"""br"""'}), "(view='django_roles_access:view_protected_by_role',\n type='br')\n", (33166, 33232), False, 'from django_roles_access.models import ViewAccess\n'), ((33366, 33484), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.protected_view_by_role', '"""django_roles_access:view_protected_by_role"""', '(False)'], {}), "('SECURED', views.protected_view_by_role,\n 'django_roles_access:view_protected_by_role', False)\n", (33386, 33484), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((33701, 33792), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:view_protected_by_role"""', 'type': '"""au"""'}), "(view='django_roles_access:view_protected_by_role',\n type='au')\n", (33726, 33792), False, 'from django_roles_access.models import ViewAccess\n'), ((33831, 33949), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.protected_view_by_role', '"""django_roles_access:view_protected_by_role"""', '(False)'], {}), "('SECURED', views.protected_view_by_role,\n 'django_roles_access:view_protected_by_role', False)\n", (33851, 33949), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((34158, 34249), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:view_protected_by_role"""', 'type': '"""pu"""'}), "(view='django_roles_access:view_protected_by_role',\n type='pu')\n", (34183, 34249), False, 'from django_roles_access.models import ViewAccess\n'), ((34288, 34406), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.protected_view_by_role', '"""django_roles_access:view_protected_by_role"""', '(False)'], {}), "('SECURED', views.protected_view_by_role,\n 'django_roles_access:view_protected_by_role', False)\n", (34308, 34406), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((34647, 34765), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['"""SECURED"""', 'views.protected_view_by_role', '"""django_roles_access:view_protected_by_role"""', '(False)'], {}), "('SECURED', views.protected_view_by_role,\n 'django_roles_access:view_protected_by_role', False)\n", (34667, 34765), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((35016, 35129), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['None', 'views.protected_view_by_role', '"""django_roles_access:view_protected_by_role"""', '(False)'], {}), "(None, views.protected_view_by_role,\n 'django_roles_access:view_protected_by_role', False)\n", (35036, 35129), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((35514, 35603), 'django_roles_access.models.ViewAccess.objects.create', 'ViewAccess.objects.create', ([], {'view': '"""django_roles_access:middleware_view_func"""', 'type': '"""pu"""'}), "(view='django_roles_access:middleware_view_func',\n type='pu')\n", (35539, 35603), False, 'from django_roles_access.models import ViewAccess\n'), ((35642, 35746), 'django_roles_access.utils.view_access_analyzer', 'view_access_analyzer', (['None', 'views.middleware_view', '"""django_roles_access:middleware_view_func"""', '(False)'], {}), "(None, views.middleware_view,\n 'django_roles_access:middleware_view_func', False)\n", (35662, 35746), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((36166, 36213), 'django_roles_access.utils.OutputReport', 'OutputReport', (['self.mock_stdout', 'self.mock_style'], {}), '(self.mock_stdout, self.mock_style)\n', (36178, 36213), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((8369, 8405), 'importlib.import_module', 'import_module', (['settings.ROOT_URLCONF'], {}), '(settings.ROOT_URLCONF)\n', (8382, 8405), False, 'from importlib import import_module\n'), ((11731, 11753), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['data'], {}), '(data)\n', (11747, 11753), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((12067, 12089), 'django_roles_access.utils.get_views_by_app', 'get_views_by_app', (['data'], {}), '(data)\n', (12083, 12089), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((13496, 13532), 'importlib.import_module', 'import_module', (['settings.ROOT_URLCONF'], {}), '(settings.ROOT_URLCONF)\n', (13509, 13532), False, 'from importlib import import_module\n'), ((13809, 13832), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (13822, 13832), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((14209, 14232), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (14222, 14232), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((14588, 14611), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (14601, 14611), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((15015, 15038), 'django_roles_access.utils.walk_site_url', 'walk_site_url', (['self.url'], {}), '(self.url)\n', (15028, 15038), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((16360, 16396), 'django_roles_access.utils.check_django_roles_is_used', 'check_django_roles_is_used', (['function'], {}), '(function)\n', (16386, 16396), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((16717, 16750), 'django_roles_access.utils.check_django_roles_is_used', 'check_django_roles_is_used', (['Aview'], {}), '(Aview)\n', (16743, 16750), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((16908, 16941), 'django_roles_access.utils.check_django_roles_is_used', 'check_django_roles_is_used', (['Aview'], {}), '(Aview)\n', (16934, 16941), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n'), ((35935, 35948), 'django.core.management.BaseCommand', 'BaseCommand', ([], {}), '()\n', (35946, 35948), False, 'from django.core.management import BaseCommand\n'), ((36004, 36017), 'django.core.management.BaseCommand', 'BaseCommand', ([], {}), '()\n', (36015, 36017), False, 'from django.core.management import BaseCommand\n'), ((36806, 36820), 'django_roles_access.utils.OutputReport', 'OutputReport', ([], {}), '()\n', (36818, 36820), False, 'from django_roles_access.utils import walk_site_url, get_views_by_app, view_access_analyzer, get_view_analyze_report, check_django_roles_is_used, analyze_by_role, APP_NAME_FOR_NONE, NOT_SECURED_DEFAULT, SECURED_DEFAULT, PUBLIC_DEFAULT, NONE_TYPE_DEFAULT, DISABLED_DEFAULT, OutputReport\n')] |
import libcst as cst
import libcst.matchers as m
from fixit import CstLintRule
from fixit import InvalidTestCase as Invalid
from fixit import ValidTestCase as Valid
class UseFstringRule(CstLintRule):
MESSAGE: str = (
"As mentioned in the [Contributing Guidelines]"
+ "(https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md), "
+ "please do not use printf style formatting or `str.format()`. "
+ "Use [f-string](https://realpython.com/python-f-strings/) instead to be "
+ "more readable and efficient."
)
VALID = [
Valid("assigned='string'; f'testing {assigned}'"),
Valid("'simple string'"),
Valid("'concatenated' + 'string'"),
Valid("b'bytes %s' % 'string'.encode('utf-8')"),
]
INVALID = [
Invalid("'hello, {name}'.format(name='you')"),
Invalid("'hello, %s' % 'you'"),
Invalid("r'raw string value=%s' % val"),
]
def visit_Call(self, node: cst.Call) -> None:
if m.matches(
node,
m.Call(
func=m.Attribute(value=m.SimpleString(), attr=m.Name(value="format"))
),
):
self.report(node)
def visit_BinaryOperation(self, node: cst.BinaryOperation) -> None:
if (
m.matches(
node, m.BinaryOperation(left=m.SimpleString(), operator=m.Modulo())
)
# SimpleString can be bytes and fstring don't support bytes.
# https://www.python.org/dev/peps/pep-0498/#no-binary-f-strings
and isinstance(
cst.ensure_type(node.left, cst.SimpleString).evaluated_value, str
)
):
self.report(node)
| [
"libcst.matchers.SimpleString",
"libcst.ensure_type",
"fixit.ValidTestCase",
"libcst.matchers.Name",
"libcst.matchers.Modulo",
"fixit.InvalidTestCase"
]
| [((592, 641), 'fixit.ValidTestCase', 'Valid', (['"""assigned=\'string\'; f\'testing {assigned}\'"""'], {}), '("assigned=\'string\'; f\'testing {assigned}\'")\n', (597, 641), True, 'from fixit import ValidTestCase as Valid\n'), ((651, 675), 'fixit.ValidTestCase', 'Valid', (['"""\'simple string\'"""'], {}), '("\'simple string\'")\n', (656, 675), True, 'from fixit import ValidTestCase as Valid\n'), ((685, 719), 'fixit.ValidTestCase', 'Valid', (['"""\'concatenated\' + \'string\'"""'], {}), '("\'concatenated\' + \'string\'")\n', (690, 719), True, 'from fixit import ValidTestCase as Valid\n'), ((729, 776), 'fixit.ValidTestCase', 'Valid', (['"""b\'bytes %s\' % \'string\'.encode(\'utf-8\')"""'], {}), '("b\'bytes %s\' % \'string\'.encode(\'utf-8\')")\n', (734, 776), True, 'from fixit import ValidTestCase as Valid\n'), ((809, 854), 'fixit.InvalidTestCase', 'Invalid', (['"""\'hello, {name}\'.format(name=\'you\')"""'], {}), '("\'hello, {name}\'.format(name=\'you\')")\n', (816, 854), True, 'from fixit import InvalidTestCase as Invalid\n'), ((864, 894), 'fixit.InvalidTestCase', 'Invalid', (['"""\'hello, %s\' % \'you\'"""'], {}), '("\'hello, %s\' % \'you\'")\n', (871, 894), True, 'from fixit import InvalidTestCase as Invalid\n'), ((904, 943), 'fixit.InvalidTestCase', 'Invalid', (['"""r\'raw string value=%s\' % val"""'], {}), '("r\'raw string value=%s\' % val")\n', (911, 943), True, 'from fixit import InvalidTestCase as Invalid\n'), ((1604, 1648), 'libcst.ensure_type', 'cst.ensure_type', (['node.left', 'cst.SimpleString'], {}), '(node.left, cst.SimpleString)\n', (1619, 1648), True, 'import libcst as cst\n'), ((1358, 1374), 'libcst.matchers.SimpleString', 'm.SimpleString', ([], {}), '()\n', (1372, 1374), True, 'import libcst.matchers as m\n'), ((1385, 1395), 'libcst.matchers.Modulo', 'm.Modulo', ([], {}), '()\n', (1393, 1395), True, 'import libcst.matchers as m\n'), ((1101, 1117), 'libcst.matchers.SimpleString', 'm.SimpleString', ([], {}), '()\n', (1115, 1117), True, 'import libcst.matchers as m\n'), ((1124, 1146), 'libcst.matchers.Name', 'm.Name', ([], {'value': '"""format"""'}), "(value='format')\n", (1130, 1146), True, 'import libcst.matchers as m\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/13_model_fn.ipynb (unless otherwise specified).
__all__ = ['variable_summaries', 'filter_loss', 'BertMultiTaskBody', 'BertMultiTaskTop', 'BertMultiTask']
# Cell
from typing import Dict, Tuple
from inspect import signature
import tensorflow as tf
import transformers
from .modeling import MultiModalBertModel
from .params import BaseParams
from .top import (Classification, MultiLabelClassification, PreTrain,
Seq2Seq, SequenceLabel, MaskLM)
from .utils import get_embedding_table_from_model, get_transformer_main_model
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.compat.v1.name_scope(name):
mean = tf.reduce_mean(input_tensor=var)
tf.compat.v1.summary.scalar('mean', mean)
with tf.compat.v1.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(
input_tensor=tf.square(var - mean)))
tf.compat.v1.summary.scalar('stddev', stddev)
tf.compat.v1.summary.scalar('max', tf.reduce_max(input_tensor=var))
tf.compat.v1.summary.scalar('min', tf.reduce_min(input_tensor=var))
tf.compat.v1.summary.histogram('histogram', var)
@tf.function
def filter_loss(loss, features, problem):
if tf.reduce_mean(input_tensor=features['%s_loss_multiplier' % problem]) == 0:
return_loss = 0.0
else:
return_loss = loss
return return_loss
class BertMultiTaskBody(tf.keras.Model):
"""Model to extract bert features and dispatch corresponding rows to each problem_chunk.
for each problem chunk, we extract corresponding features
and hidden features for that problem. The reason behind this
is to save computation for downstream processing.
For example, we have a batch of two instances and they're from
problem a and b respectively:
Input:
[{'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0},
{'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}]
Output:
{
'a': {'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0}
'b': {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}
}
"""
def __init__(self, params: BaseParams, name='BertMultiTaskBody'):
super(BertMultiTaskBody, self).__init__(name=name)
self.params = params
self.bert = MultiModalBertModel(params=self.params)
if self.params.custom_pooled_hidden_size:
self.custom_pooled_layer = tf.keras.layers.Dense(
self.params.custom_pooled_hidden_size, activation=tf.keras.activations.selu)
else:
self.custom_pooled_layer = None
@tf.function
def get_features_for_problem(self, features, hidden_feature, problem, mode):
# get features with ind == 1
if mode == tf.estimator.ModeKeys.PREDICT:
feature_this_round = features
hidden_feature_this_round = hidden_feature
else:
multiplier_name = '%s_loss_multiplier' % problem
record_ind = tf.where(tf.cast(
tf.squeeze(features[multiplier_name]), tf.bool))
hidden_feature_this_round = {}
for hidden_feature_name in hidden_feature:
if hidden_feature_name != 'embed_table':
hidden_feature_this_round[hidden_feature_name] = tf.squeeze(tf.gather(
hidden_feature[hidden_feature_name], record_ind, axis=0
), axis=1)
hidden_feature_this_round[hidden_feature_name].set_shape(
hidden_feature[hidden_feature_name].shape.as_list())
else:
hidden_feature_this_round[hidden_feature_name] = hidden_feature[hidden_feature_name]
feature_this_round = {}
for features_name in features:
feature_this_round[features_name] = tf.gather_nd(
features[features_name],
record_ind)
return feature_this_round, hidden_feature_this_round
def call(self, inputs: Dict[str, tf.Tensor],
mode: str) -> Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]]:
_ = self.bert(inputs, mode == tf.estimator.ModeKeys.TRAIN)
# extract bert hidden features
inputs['model_input_mask'] = self.bert.get_input_mask()
inputs['model_token_type_ids'] = self.bert.get_token_type_ids()
hidden_feature = {}
for logit_type in ['seq', 'pooled', 'all', 'embed', 'embed_table']:
if logit_type == 'seq':
# tensor, [batch_size, seq_length, hidden_size]
hidden_feature[logit_type] = self.bert.get_sequence_output()
elif logit_type == 'pooled':
# tensor, [batch_size, hidden_size]
hidden_feature[logit_type] = self.bert.get_pooled_output()
if self.custom_pooled_layer:
hidden_feature[logit_type] = self.custom_pooled_layer(
hidden_feature[logit_type])
elif logit_type == 'all':
# list, num_hidden_layers * [batch_size, seq_length, hidden_size]
hidden_feature[logit_type] = self.bert.get_all_encoder_layers()
elif logit_type == 'embed':
# for res connection
hidden_feature[logit_type] = self.bert.get_embedding_output()
elif logit_type == 'embed_table':
hidden_feature[logit_type] = self.bert.get_embedding_table()
# for each problem chunk, we extract corresponding features
# and hidden features for that problem. The reason behind this
# is to save computation for downstream processing.
# For example, we have a batch of two instances and they're from
# problem a and b respectively:
# Input:
# [{'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0},
# {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}]
# Output:
# {
# 'a': {'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0}
# 'b': {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}
# }
features = inputs
return_feature = {}
return_hidden_feature = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
if self.params.task_transformer:
# hidden_feature = task_tranformer_hidden_feature[problem]
raise NotImplementedError
if len(self.params.run_problem_list) > 1:
feature_this_round, hidden_feature_this_round = self.get_features_for_problem(
features, hidden_feature, problem, mode)
else:
feature_this_round, hidden_feature_this_round = features, hidden_feature
if self.params.label_transfer and self.params.grid_transformer:
raise ValueError(
'Label Transfer and grid transformer cannot be enabled in the same time.'
)
if self.params.grid_transformer:
raise NotImplementedError
return_hidden_feature[problem] = hidden_feature_this_round
return_feature[problem] = feature_this_round
return return_feature, return_hidden_feature
# Cell
class BertMultiTaskTop(tf.keras.Model):
"""Model to create top layer, aka classification layer, for each problem.
"""
def __init__(self, params: BaseParams, name='BertMultiTaskTop', input_embeddings: tf.Tensor = None):
super(BertMultiTaskTop, self).__init__(name=name)
self.params = params
problem_type_layer = {
'seq_tag': SequenceLabel,
'cls': Classification,
'seq2seq_tag': Seq2Seq,
'seq2seq_text': Seq2Seq,
'multi_cls': MultiLabelClassification,
'pretrain': PreTrain,
'masklm': MaskLM
}
problem_type_layer.update(self.params.top_layer)
self.top_layer_dict = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
problem_type = self.params.problem_type[problem]
# some layers has different signatures, assign inputs accordingly
layer_signature_name = signature(
problem_type_layer[problem_type].__init__).parameters.keys()
inputs_kwargs = {
'params': self.params,
'problem_name': problem
}
for signature_name in layer_signature_name:
if signature_name == 'input_embeddings':
inputs_kwargs.update(
{signature_name: input_embeddings})
self.top_layer_dict[problem] = problem_type_layer[problem_type](
**inputs_kwargs)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str) -> Dict[str, tf.Tensor]:
features, hidden_feature = inputs
return_dict = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
feature_this_round = features[problem]
hidden_feature_this_round = hidden_feature[problem]
problem_type = self.params.problem_type[problem]
# if pretrain, return pretrain logit
if problem_type == 'pretrain':
pretrain = self.top_layer_dict[problem]
return_dict[problem] = pretrain(
(feature_this_round, hidden_feature_this_round), mode)
return return_dict
if self.params.label_transfer and self.params.grid_transformer:
raise ValueError(
'Label Transfer and grid transformer cannot be enabled in the same time.'
)
with tf.name_scope(problem):
layer = self.top_layer_dict[problem]
return_dict[problem] = layer(
(feature_this_round, hidden_feature_this_round), mode)
if self.params.augument_mask_lm and mode == tf.estimator.ModeKeys.TRAIN:
raise NotImplementedError
# try:
# mask_lm_top = MaskLM(self.params)
# return_dict['augument_mask_lm'] = \
# mask_lm_top(features,
# hidden_feature, mode, 'dummy')
# except ValueError:
# pass
return return_dict
# Cell
class BertMultiTask(tf.keras.Model):
def __init__(self, params: BaseParams, name='BertMultiTask') -> None:
super(BertMultiTask, self).__init__(name=name)
self.params = params
# initialize body model, aka transformers
self.body = BertMultiTaskBody(params=self.params)
# mlm might need word embedding from bert
# build sub-model
_ = get_embedding_table_from_model(self.body.bert.bert_model)
main_model = get_transformer_main_model(self.body.bert.bert_model)
# input_embeddings = self.body.bert.bert_model.bert.embeddings
input_embeddings = main_model.embeddings
self.top = BertMultiTaskTop(
params=self.params, input_embeddings=input_embeddings)
def call(self, inputs, mode=tf.estimator.ModeKeys.TRAIN):
feature_per_problem, hidden_feature_per_problem = self.body(
inputs, mode)
pred_per_problem = self.top(
(feature_per_problem, hidden_feature_per_problem), mode)
return pred_per_problem
def compile(self):
super(BertMultiTask, self).compile()
logger = tf.get_logger()
logger.info('Initial lr: {}'.format(self.params.lr))
logger.info('Train steps: {}'.format(self.params.train_steps))
logger.info('Warmup steps: {}'.format(self.params.num_warmup_steps))
self.optimizer, self.lr_scheduler = transformers.optimization_tf.create_optimizer(
init_lr=self.params.lr,
num_train_steps=self.params.train_steps,
num_warmup_steps=self.params.num_warmup_steps,
weight_decay_rate=0.01
)
self.mean_acc = tf.keras.metrics.Mean(name='mean_acc')
def train_step(self, data):
with tf.GradientTape() as tape:
# Forward pass
_ = self(data, mode=tf.estimator.ModeKeys.TRAIN)
# gather losses from all problems
loss_dict = {'{}_loss'.format(problem_name): tf.reduce_sum(top_layer.losses) for problem_name,
top_layer in self.top.top_layer_dict.items()}
# metric_dict = {'{}_metric'.format(problem_name): tf.reduce_mean(top_layer.metrics) for problem_name,
# top_layer in self.top.top_layer_dict.items()}
metric_dict = {m.name: m.result() for m in self.metrics}
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(self.losses, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.mean_acc.update_state(
[v for n, v in metric_dict.items() if n != 'mean_acc'])
return_dict = metric_dict
return_dict.update(loss_dict)
return_dict[self.mean_acc.name] = self.mean_acc.result()
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return return_dict
def test_step(self, data):
"""The logic for one evaluation step.
This method can be overridden to support custom evaluation logic.
This method is called by `Model.make_test_function`.
This function should contain the mathemetical logic for one step of
evaluation.
This typically includes the forward pass, loss calculation, and metrics
updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model.make_test_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned.
"""
y_pred = self(data, mode=tf.estimator.ModeKeys.EVAL)
# Updates stateful loss metrics.
self.compiled_loss(
None, y_pred, None, regularization_losses=self.losses)
self.compiled_metrics.update_state(None, y_pred, None)
# get metrics to calculate mean
m_list = []
for metric in self.metrics:
if 'mean_acc' in metric.name:
continue
if 'acc' in metric.name:
m_list.append(metric.result())
if 'f1' in metric.name:
m_list.append(metric.result())
self.mean_acc.update_state(
m_list)
return {m.name: m.result() for m in self.metrics}
def predict_step(self, data):
return self(data, mode=tf.estimator.ModeKeys.PREDICT)
| [
"tensorflow.compat.v1.name_scope",
"tensorflow.reduce_min",
"tensorflow.compat.v1.summary.histogram",
"tensorflow.reduce_sum",
"tensorflow.keras.metrics.Mean",
"tensorflow.compat.v1.summary.scalar",
"inspect.signature",
"tensorflow.reduce_max",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.name_scope",
"tensorflow.get_logger",
"tensorflow.gather",
"tensorflow.square",
"tensorflow.reduce_mean",
"transformers.optimization_tf.create_optimizer",
"tensorflow.gather_nd",
"tensorflow.squeeze"
]
| [((722, 751), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name'], {}), '(name)\n', (745, 751), True, 'import tensorflow as tf\n'), ((768, 800), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'var'}), '(input_tensor=var)\n', (782, 800), True, 'import tensorflow as tf\n'), ((809, 850), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (836, 850), True, 'import tensorflow as tf\n'), ((1005, 1050), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""stddev"""', 'stddev'], {}), "('stddev', stddev)\n", (1032, 1050), True, 'import tensorflow as tf\n'), ((1211, 1259), 'tensorflow.compat.v1.summary.histogram', 'tf.compat.v1.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (1241, 1259), True, 'import tensorflow as tf\n'), ((1325, 1394), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': "features['%s_loss_multiplier' % problem]"}), "(input_tensor=features['%s_loss_multiplier' % problem])\n", (1339, 1394), True, 'import tensorflow as tf\n'), ((12062, 12077), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (12075, 12077), True, 'import tensorflow as tf\n'), ((12331, 12521), 'transformers.optimization_tf.create_optimizer', 'transformers.optimization_tf.create_optimizer', ([], {'init_lr': 'self.params.lr', 'num_train_steps': 'self.params.train_steps', 'num_warmup_steps': 'self.params.num_warmup_steps', 'weight_decay_rate': '(0.01)'}), '(init_lr=self.params.lr,\n num_train_steps=self.params.train_steps, num_warmup_steps=self.params.\n num_warmup_steps, weight_decay_rate=0.01)\n', (12376, 12521), False, 'import transformers\n'), ((12595, 12633), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""mean_acc"""'}), "(name='mean_acc')\n", (12616, 12633), True, 'import tensorflow as tf\n'), ((864, 897), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (887, 897), True, 'import tensorflow as tf\n'), ((1094, 1125), 'tensorflow.reduce_max', 'tf.reduce_max', ([], {'input_tensor': 'var'}), '(input_tensor=var)\n', (1107, 1125), True, 'import tensorflow as tf\n'), ((1170, 1201), 'tensorflow.reduce_min', 'tf.reduce_min', ([], {'input_tensor': 'var'}), '(input_tensor=var)\n', (1183, 1201), True, 'import tensorflow as tf\n'), ((2577, 2680), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.params.custom_pooled_hidden_size'], {'activation': 'tf.keras.activations.selu'}), '(self.params.custom_pooled_hidden_size, activation=tf.\n keras.activations.selu)\n', (2598, 2680), True, 'import tensorflow as tf\n'), ((12681, 12698), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (12696, 12698), True, 'import tensorflow as tf\n'), ((3990, 4039), 'tensorflow.gather_nd', 'tf.gather_nd', (['features[features_name]', 'record_ind'], {}), '(features[features_name], record_ind)\n', (4002, 4039), True, 'import tensorflow as tf\n'), ((12899, 12930), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['top_layer.losses'], {}), '(top_layer.losses)\n', (12912, 12930), True, 'import tensorflow as tf\n'), ((3169, 3206), 'tensorflow.squeeze', 'tf.squeeze', (['features[multiplier_name]'], {}), '(features[multiplier_name])\n', (3179, 3206), True, 'import tensorflow as tf\n'), ((10273, 10295), 'tensorflow.name_scope', 'tf.name_scope', (['problem'], {}), '(problem)\n', (10286, 10295), True, 'import tensorflow as tf\n'), ((973, 994), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (982, 994), True, 'import tensorflow as tf\n'), ((3454, 3520), 'tensorflow.gather', 'tf.gather', (['hidden_feature[hidden_feature_name]', 'record_ind'], {'axis': '(0)'}), '(hidden_feature[hidden_feature_name], record_ind, axis=0)\n', (3463, 3520), True, 'import tensorflow as tf\n'), ((8581, 8633), 'inspect.signature', 'signature', (['problem_type_layer[problem_type].__init__'], {}), '(problem_type_layer[problem_type].__init__)\n', (8590, 8633), False, 'from inspect import signature\n')] |
import os.path
import tron.Misc
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.SocketActorNub import SocketActorNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'hal'
def start(poller):
cfg = tron.Misc.cfg.get(g.location, 'actors', doFlush=True)[name]
stop()
initCmds = ('ping', 'status', 'version')
safeCmdsList = ['ping', 'version', 'status']
safeCmds = r'^\s*({0})\s*$'.format('|'.join(safeCmdsList))
d = ASCIIReplyDecoder(cidFirst=True, debug=1)
e = ASCIICmdEncoder(sendCommander=True, useCID=False, debug=1)
nub = SocketActorNub(
poller,
cfg['host'],
cfg['port'],
name=name,
encoder=e,
decoder=d,
grabCID=True, # the actor spontaneously generates a line we can eat.
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
logDir=os.path.join(g.logDir, name),
debug=3)
hub.addActor(nub)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| [
"tron.Hub.Reply.Decoders.ASCIIReplyDecoder.ASCIIReplyDecoder",
"tron.hub.dropActor",
"tron.Hub.Command.Encoders.ASCIICmdEncoder.ASCIICmdEncoder",
"tron.hub.addActor",
"tron.hub.findActor"
]
| [((540, 581), 'tron.Hub.Reply.Decoders.ASCIIReplyDecoder.ASCIIReplyDecoder', 'ASCIIReplyDecoder', ([], {'cidFirst': '(True)', 'debug': '(1)'}), '(cidFirst=True, debug=1)\n', (557, 581), False, 'from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder\n'), ((590, 648), 'tron.Hub.Command.Encoders.ASCIICmdEncoder.ASCIICmdEncoder', 'ASCIICmdEncoder', ([], {'sendCommander': '(True)', 'useCID': '(False)', 'debug': '(1)'}), '(sendCommander=True, useCID=False, debug=1)\n', (605, 648), False, 'from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder\n'), ((1012, 1029), 'tron.hub.addActor', 'hub.addActor', (['nub'], {}), '(nub)\n', (1024, 1029), False, 'from tron import g, hub\n'), ((1052, 1071), 'tron.hub.findActor', 'hub.findActor', (['name'], {}), '(name)\n', (1065, 1071), False, 'from tron import g, hub\n'), ((1090, 1106), 'tron.hub.dropActor', 'hub.dropActor', (['n'], {}), '(n)\n', (1103, 1106), False, 'from tron import g, hub\n')] |
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import time
import datetime
from lib.common.out import *
from lib.common.objects import File
from lib.core.database import Database
from lib.core.investigation import __project__
class Session(object):
def __init__(self):
self.id = None
# This will be assigned with the File object of the file currently
# being analyzed.
self.file = None
# Timestamp of the creation of the session.
self.created_at = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# MISP event associated to the object
self.misp_event = None
class Sessions(object):
def __init__(self):
self.current = None
self.sessions = []
# Store the results of the last "find" command.
self.find = None
def close(self):
self.current = None
def is_set(self):
# Check if the session has been opened or not.
if self.current:
return True
else:
return False
def switch(self, session):
self.current = session
print_info("Switched to session #{0} on {1}".format(self.current.id, self.current.file.path))
def new(self, path=None, misp_event=None):
if path is None and misp_event is None:
print_error("You have to open a session on a path or on a misp event.")
return
if __project__.name:
pass
else:
print_error("You must open an investigation to store files")
return
session = Session()
total = len(self.sessions)
session.id = total + 1
if path is not None:
if self.is_set() and self.current.misp_event:
session.misp_event = self.current.misp_event
# Open a section on the given file.
session.file = File(path)
# Try to lookup the file in the database. If it is already present
# we get file name and
row = Database().find(key='sha256', value=session.file.sha256)
if row:
session.file.name = row[0].name
session.file.tags = ', '.join(tag.to_dict()['tag'] for tag in row[0].tag)
print_info("Session opened on {0}".format(path))
if misp_event is not None:
if self.is_set() and self.current.file:
session.file = self.current.file
refresh = False
if self.current is not None and self.current.misp_event is not None \
and self.current.misp_event.event_id == misp_event.event_id:
refresh = True
session.misp_event = misp_event
if refresh:
print_info("Session on MISP event {0} refreshed.".format(misp_event.event_id))
else:
print_info("Session opened on MISP event {0}.".format(misp_event.event_id))
if session.file is not None:
# Loop through all existing sessions and check whether there's another
# session open on the same file and delete it. This is to avoid
# duplicates in sessions.
# NOTE: in the future we might want to remove this if sessions have
# unique attributes (for example, an history just for each of them).
for entry in self.sessions:
if entry.file is not None and entry.file.sha256 == session.file.sha256:
self.sessions.remove(entry)
# Add new session to the list.
self.sessions.append(session)
# Mark the new session as the current one.
self.current = session
__sessions__ = Sessions()
| [
"time.time",
"lib.core.database.Database",
"lib.common.objects.File"
]
| [((1970, 1980), 'lib.common.objects.File', 'File', (['path'], {}), '(path)\n', (1974, 1980), False, 'from lib.common.objects import File\n'), ((610, 621), 'time.time', 'time.time', ([], {}), '()\n', (619, 621), False, 'import time\n'), ((2113, 2123), 'lib.core.database.Database', 'Database', ([], {}), '()\n', (2121, 2123), False, 'from lib.core.database import Database\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Test error handler.
Usage:
pytest tests/ut/datavisual
"""
from unittest.mock import patch
from werkzeug.exceptions import MethodNotAllowed, NotFound
from ...backend.datavisual.conftest import TRAIN_ROUTES
from ..mock import MockLogger
from ....utils.tools import get_url
from mindinsight.datavisual.processors import scalars_processor
from mindinsight.datavisual.processors.scalars_processor import ScalarsProcessor
class TestErrorHandler:
"""Test train visual api."""
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_not_found(self, mock_scalar_processor, client):
"""Test handle http exception error not found."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# NotFound
def get_metadata_list(train_ids, tag):
raise NotFound("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 404
response = response.get_json()
assert response['error_code'] == '50545001'
assert response['error_msg'] == '404 Not Found.'
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_method_not_allowed(self, mock_scalar_processor, client):
"""Test handling http exception error method not allowed."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# MethodNotAllowed
def get_metadata_list(train_ids, tag):
raise MethodNotAllowed("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 405
response = response.get_json()
assert response['error_code'] == '50545002'
assert response['error_msg'] == '405 Method Not Allowed.'
@patch.object(ScalarsProcessor, 'get_metadata_list')
def test_handle_http_exception_error_method_other_errors(self, mock_scalar_processor, client):
"""Test handling http exception error method other errors."""
scalars_processor.logger = MockLogger
text = 'Test Message'
# Other errors
def get_metadata_list(train_ids, tag):
raise KeyError("%s" % text)
mock_scalar_processor.side_effect = get_metadata_list
test_train_ids = "aa"
test_tag = "bb"
params = dict(train_ids=test_train_ids, tag=test_tag)
url = get_url(TRAIN_ROUTES['scalar_metadata'], params)
response = client.get(url)
assert response.status_code == 500
response = response.get_json()
assert response['error_code'] == '50540000'
assert response['error_msg'] == 'System error.'
| [
"werkzeug.exceptions.MethodNotAllowed",
"werkzeug.exceptions.NotFound",
"unittest.mock.patch.object"
]
| [((1172, 1223), 'unittest.mock.patch.object', 'patch.object', (['ScalarsProcessor', '"""get_metadata_list"""'], {}), "(ScalarsProcessor, 'get_metadata_list')\n", (1184, 1223), False, 'from unittest.mock import patch\n'), ((2030, 2081), 'unittest.mock.patch.object', 'patch.object', (['ScalarsProcessor', '"""get_metadata_list"""'], {}), "(ScalarsProcessor, 'get_metadata_list')\n", (2042, 2081), False, 'from unittest.mock import patch\n'), ((2933, 2984), 'unittest.mock.patch.object', 'patch.object', (['ScalarsProcessor', '"""get_metadata_list"""'], {}), "(ScalarsProcessor, 'get_metadata_list')\n", (2945, 2984), False, 'from unittest.mock import patch\n'), ((1532, 1553), 'werkzeug.exceptions.NotFound', 'NotFound', (["('%s' % text)"], {}), "('%s' % text)\n", (1540, 1553), False, 'from werkzeug.exceptions import MethodNotAllowed, NotFound\n'), ((2418, 2447), 'werkzeug.exceptions.MethodNotAllowed', 'MethodNotAllowed', (["('%s' % text)"], {}), "('%s' % text)\n", (2434, 2447), False, 'from werkzeug.exceptions import MethodNotAllowed, NotFound\n')] |
import dash_bootstrap_components as dbc
import dash_html_components as html
DBC_DOCS = (
"https://dash-bootstrap-components.opensource.faculty.ai/docs/components/"
)
def make_subheading(label, link):
slug = label.replace(" ", "")
heading = html.H2(
html.Span(
[
label,
html.A(
html.I(className="fas fa-book fa-xs ml-2"),
href=f"{DBC_DOCS}{link}",
target="_blank",
id=f"tooltip_target_{slug}",
),
],
),
)
return html.Div(
[
heading,
dbc.Tooltip(
f"See {label} documentation", target=f"tooltip_target_{slug}"
),
],
className="mt-3",
)
| [
"dash_html_components.I",
"dash_bootstrap_components.Tooltip"
]
| [((657, 731), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', (['f"""See {label} documentation"""'], {'target': 'f"""tooltip_target_{slug}"""'}), "(f'See {label} documentation', target=f'tooltip_target_{slug}')\n", (668, 731), True, 'import dash_bootstrap_components as dbc\n'), ((365, 407), 'dash_html_components.I', 'html.I', ([], {'className': '"""fas fa-book fa-xs ml-2"""'}), "(className='fas fa-book fa-xs ml-2')\n", (371, 407), True, 'import dash_html_components as html\n')] |
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.range import Range
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.graph.graph import Node
class ArangeExt(FrontExtractorOp):
op = '_arange'
enabled = True
@classmethod
def extract(cls, node: Node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
Range.update_node_stat(node, {
'start': attrs.int('start', 0),
'stop': attrs.int('stop', 0),
'repeat': attrs.int('repeat', 1),
'step': attrs.float('step', 1),
'dtype': np.dtype(attrs.str('dtype ', 'float32'))
})
return cls.enabled
| [
"mo.front.mxnet.extractors.utils.get_mxnet_layer_attrs"
]
| [((935, 974), 'mo.front.mxnet.extractors.utils.get_mxnet_layer_attrs', 'get_mxnet_layer_attrs', (['node.symbol_dict'], {}), '(node.symbol_dict)\n', (956, 974), False, 'from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs\n')] |
import cv2
from PIL import Image
import argparse
from pathlib import Path
from multiprocessing import Process, Pipe,Value,Array
import torch
from config import get_config
from mtcnn import MTCNN
from Learner_trans_tf import face_learner
from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from sklearn.model_selection import KFold
import os
import glob
import shutil
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for face verification')
parser.add_argument("-ds", "--dataset_dir", help="where to get data", default="noonan", type=str)
parser.add_argument('-sd','--stored_result_dir',help='where to store data as np arrays',
default="results/trans/", type=str)
parser.add_argument("-k", "--kfold", help="returns the number of splitting iterations in the cross-validator.",
default=10, type=int)
parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int)
parser.add_argument("-n", "--names_considered", help="names for different types considered, separated by commas",
default="normal,noonan,others", type=str)
parser.add_argument("-g", "--gpu_id", help="gpu id to use", default="", type=str)
parser.add_argument("-s", "--use_shuffled_kfold", help="whether to use shuffled kfold.", action="store_true")
parser.add_argument("-rs", "--random_seed", help="random seed used for k-fold split.", default=6, type=int)
parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
parser.add_argument("-a", "--additional_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ta", "--additional_test_or_train", help="use additional data in only train, or test, or both",
default="", type=str)
parser.add_argument("-as", "--stylegan_data_dir", help="where to get the additional data",
default="", type=str)
parser.add_argument("-ts", "--stylegan_test_or_train", help="use stylegan data in only train, or test, or both",
default="", type=str)
parser.add_argument("-tf", "--transfer", help="how many layer(s) used for transfer learning, "
"but 0 means retraining the whole network.", default=0, type=int)
parser.add_argument("-ac", "--arch", help="types of model used for encoder", default="mobile", type=str)
args = parser.parse_args()
for arg in vars(args):
print(arg+':', getattr(args, arg))
emore_dir = 'faces_emore'
conf = get_config(True, args)
conf.emore_folder = conf.data_path/emore_dir
mtcnn = MTCNN()
print('mtcnn loaded')
names_considered = args.names_considered.strip().split(',')
exp_name = args.dataset_dir[:4]
if args.additional_data_dir:
if 'LAG' in args.additional_data_dir:
exp_name += '_lag'
elif 'literature' in args.additional_data_dir:
exp_name += '_ltr'
if args.kfold != 10:
exp_name += ('_k' + str(args.kfold))
if args.epochs != 20:
exp_name += ('_e' + str(args.epochs))
if args.transfer != 0 and args.transfer != 1:
exp_name += ('_td' + str(args.transfer))
if args.use_shuffled_kfold:
exp_name += ('_s' + str(args.random_seed))
print(exp_name)
# prepare folders
raw_dir = 'raw_112'
verify_type = 'trans'
if args.use_shuffled_kfold:
verify_type += '_shuffled'
# train_dir = conf.facebank_path/args.dataset_dir/verify_type/'train'
train_dir = conf.emore_folder/'imgs'
test_dir = conf.emore_folder/'test'
conf.facebank_path = train_dir
if os.path.exists(train_dir):
shutil.rmtree(train_dir)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(train_dir)
os.mkdir(test_dir)
for name in names_considered:
os.makedirs(str(train_dir) + '/' + name, exist_ok=True)
os.makedirs(str(test_dir) + '/' + name, exist_ok=True)
if args.stylegan_data_dir:
#e.g. smile_refine_mtcnn_112_divi
full_stylegan_dir = str(conf.data_path/'facebank'/'stylegan'/args.stylegan_data_dir)
stylegan_folders = os.listdir(full_stylegan_dir)
if args.additional_data_dir:
full_additional_dir = str(conf.data_path/'facebank'/args.additional_data_dir)
# init kfold
if args.use_shuffled_kfold:
kf = KFold(n_splits=args.kfold, shuffle=True, random_state=args.random_seed)
else:
kf = KFold(n_splits=args.kfold, shuffle=False, random_state=None)
# collect and split raw data
data_dict = {}
idx_gen = {}
for name in names_considered:
tmp_list = glob.glob(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir) +
'/' + name + '*')
if 'innm' in args.stylegan_data_dir:
tmp_list = tmp_list + glob.glob(str(full_stylegan_dir) + '/' + name + '*')
stylegan_folders = []
print(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir))
data_dict[name] = np.array(tmp_list)
idx_gen[name] = kf.split(data_dict[name])
if 'literature' in args.additional_data_dir:
data_dict['ltr'] = np.array(glob.glob(str(full_additional_dir) + '/*'))
idx_gen['ltr'] = kf.split(data_dict['ltr'])
score_names = []
scores = []
wrong_names = []
args.stored_result_path = args.stored_result_dir + os.sep + str(datetime.datetime.now())[:19]
if not os.path.exists(args.stored_result_path):
os.mkdir(args.stored_result_path)
# for fold_idx, (train_index, test_index) in enumerate(kf.split(data_dict[names_considered[0]])):
for fold_idx in range(args.kfold):
train_set = {}
test_set = {}
for name in names_considered:
(train_index, test_index) = next(idx_gen[name])
train_set[name], test_set[name] = data_dict[name][train_index], data_dict[name][test_index]
if 'ltr' in data_dict.keys():
(train_index, test_index) = next(idx_gen['ltr'])
train_set['ltr'], test_set['ltr'] = data_dict['ltr'][train_index], data_dict['ltr'][test_index]
if 'train' in args.additional_test_or_train:
train_set['noonan'] = np.concatenate((train_set['noonan'], train_set['ltr']))
if 'test' in args.additional_test_or_train:
test_set['noonan'] = np.concatenate((test_set['noonan'], test_set['ltr']))
# remove previous data
prev = glob.glob(str(train_dir) + '/*/*')
for p in prev:
os.remove(p)
prev = glob.glob(str(test_dir) + '/*/*')
for p in prev:
os.remove(p)
# save trains to conf.facebank_path/args.dataset_dir/'train' and
# tests to conf.data_path/'facebank'/args.dataset_dir/'test'
# count unbalanced data
train_count = {}
test_count = {}
for name in names_considered:
train_count[name] = 0
for i in range(len(train_set[name])):
img_folder = str(train_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(train_dir), name, str(img)))
train_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(train_set[name][i])
if args.stylegan_data_dir and ('train' in args.stylegan_test_or_train) and (folder in stylegan_folders):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(train_dir), name, str(img)))
# ('/'.join(train_set[name][i].strip().split('/')[:-2]) +
# '/' + verify_type + '/train/' + name + os.sep + img))
train_count[name] += 1
# test
for i in range(len(test_set[name])):
test_count[name] = 0
img_folder = str(test_set[name][i])
for img in os.listdir(img_folder):
shutil.copy(img_folder + os.sep + str(img),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
# addition data from stylegan
if 'interp' not in data_dict.keys():
folder = os.path.basename(test_set[name][i])
if args.stylegan_data_dir and ('test' in args.stylegan_test_or_train) and (folder in stylegan_folders):
# and
# (folder not in ['noonan7','noonan19','noonan23','normal9','normal20','normal23'])):
for img in os.listdir(full_stylegan_dir + os.sep + folder):
shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)),
os.path.join(str(test_dir), name, str(img)))
test_count[name] += 1
print(train_count, test_count)
# deal with unbalanced data
"""
if train_count['normal'] // train_count['noonan'] > 1:
aug_num = train_count['normal'] // train_count['noonan'] - 1
for img in os.listdir(os.path.join(str(train_dir), 'noonan')):
for aug_idx in range(aug_num):
aug_img = img[:img.rfind('.')] + '_' + str(aug_idx) + img[img.rfind('.'):]
shutil.copy(os.path.join(str(train_dir), 'noonan', img),
os.path.join(str(train_dir), 'noonan', aug_img))
"""
if 'fake' in args.additional_data_dir:
fake_dict = {'noonan':'normal', 'normal':'noonan'}
full_additional_dir = conf.data_path/'facebank'/'noonan+normal'/args.additional_data_dir
add_data = glob.glob(str(full_additional_dir) + os.sep + '*.png')
print('additional:', args.additional_data_dir, len(add_data))
for name in names_considered:
for img_f in add_data:
if name in img_f.strip().split(os.sep)[-1]:
# print('source:', img_f)
# print('copy to:', img_f.replace(str(full_additional_dir),
# str(train_dir) + os.sep + fake_dict[name]))
# print('copy to:', img_f.replace(args.additional_data_dir,
# verify_type + '/train/' + name))
shutil.copy(img_f, os.path.join(str(train_dir), fake_dict[name], os.path.basename(img_f)))
print(fold_idx)
print('datasets ready')
conf_train = get_config(True, args)
conf_train.emore_folder = conf.data_path/emore_dir
conf_train.stored_result_dir = args.stored_result_path
learner = face_learner(conf=conf_train, transfer=args.transfer, ext=exp_name+'_'+str(fold_idx))
# conf, inference=False, transfer=0
if args.transfer != 0:
learner.load_state(conf.save_path, False, True)
print('learner loaded')
learner.train(conf_train, args.epochs)
print('learner retrained.')
learner.save_state()
print('Model is saved')
# prepare_facebank
targets, names, names_idx = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta)
print('names_classes:', names)
noonan_idx = names_idx['noonan']
print('facebank updated')
for path in test_dir.iterdir():
if path.is_file():
continue
# print(path)
for fil in path.iterdir():
# print(fil)
orig_name = ''.join([i for i in fil.name.strip().split('.')[0].split('_')[0] if not i.isdigit()])
for name in names_idx.keys():
if name in orig_name:
score_names.append(names_idx[name])
"""
if orig_name not in names_considered:
print("Un-considered name:", fil.name)
continue
"""
frame = cv2.imread(str(fil))
image = Image.fromarray(frame)
faces = [image,]
distance = learner.binfer(conf, faces, targets, args.tta)
label = score_names[-1]
score = np.exp(distance.dot(-1))
pred = np.argmax(score, 1)
if pred != label:
wrong_names.append(orig_name)
scores.append(score)
score_names = np.array(score_names)
wrong_names = np.array(wrong_names)
score_np = np.squeeze(np.array(scores))
n_classes = score_np.shape[1]
score_names = label_binarize(score_names, classes=range(n_classes))
score_sum = np.zeros([score_np.shape[0], 1])
for i in range(n_classes):
score_sum += score_np[:, i, None] # keep the dimension
relative_scores = (score_np / score_sum)
total_scores = relative_scores.ravel()
total_names = score_names.ravel()
name_path = os.path.join(args.stored_result_path, 'wrong_names.npy')
save_label_score(name_path, wrong_names)
label_path = os.path.join(args.stored_result_path, 'labels_trans.npy')
save_label_score(label_path, score_names)
score_path = os.path.join(args.stored_result_path, 'scores_trans.npy')
save_label_score(score_path, relative_scores)
print('saved!')
# Compute ROC curve and ROC area for noonan
fpr, tpr, _ = roc_curve(total_names, total_scores) #scores_np[:, noonan_idx]
roc_auc = auc(fpr, tpr)
# For PR curve
precision, recall, _ = precision_recall_curve(total_names, total_scores)
average_precision = average_precision_score(total_names, total_scores)
# plots
plt.figure()
colors = list(mcolors.TABLEAU_COLORS)
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.4f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC_{}'.format(exp_name))
plt.legend(loc="lower right")
plt.savefig(args.stored_result_path + os.sep + '/fp_tp_{}.png'.format(exp_name))
plt.close()
# plt.show()
plt.figure()
plt.step(recall, precision, where='post')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Average precision score ({}): AP={:0.4f}'.format(exp_name, average_precision))
plt.savefig(args.stored_result_path + os.sep + '/pr_{}.png'.format(exp_name))
plt.close()
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"numpy.array",
"sklearn.metrics.roc_curve",
"sklearn.model_selection.KFold",
"utils.prepare_facebank",
"os.remove",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"os.mkdir",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"mtcnn.MTCNN",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.precision_recall_curve",
"numpy.argmax",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.step",
"PIL.Image.fromarray",
"utils.save_label_score",
"os.path.join",
"config.get_config",
"numpy.zeros",
"matplotlib.pyplot.figure",
"datetime.datetime.now",
"os.path.basename",
"shutil.rmtree"
]
| [((651, 711), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""for face verification"""'}), "(description='for face verification')\n", (674, 711), False, 'import argparse\n'), ((2874, 2896), 'config.get_config', 'get_config', (['(True)', 'args'], {}), '(True, args)\n', (2884, 2896), False, 'from config import get_config\n'), ((2959, 2966), 'mtcnn.MTCNN', 'MTCNN', ([], {}), '()\n', (2964, 2966), False, 'from mtcnn import MTCNN\n'), ((3979, 4004), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (3993, 4004), False, 'import os\n'), ((4046, 4070), 'os.path.exists', 'os.path.exists', (['test_dir'], {}), '(test_dir)\n', (4060, 4070), False, 'import os\n'), ((4108, 4127), 'os.mkdir', 'os.mkdir', (['train_dir'], {}), '(train_dir)\n', (4116, 4127), False, 'import os\n'), ((4132, 4150), 'os.mkdir', 'os.mkdir', (['test_dir'], {}), '(test_dir)\n', (4140, 4150), False, 'import os\n'), ((13241, 13262), 'numpy.array', 'np.array', (['score_names'], {}), '(score_names)\n', (13249, 13262), True, 'import numpy as np\n'), ((13281, 13302), 'numpy.array', 'np.array', (['wrong_names'], {}), '(wrong_names)\n', (13289, 13302), True, 'import numpy as np\n'), ((13470, 13502), 'numpy.zeros', 'np.zeros', (['[score_np.shape[0], 1]'], {}), '([score_np.shape[0], 1])\n', (13478, 13502), True, 'import numpy as np\n'), ((13742, 13798), 'os.path.join', 'os.path.join', (['args.stored_result_path', '"""wrong_names.npy"""'], {}), "(args.stored_result_path, 'wrong_names.npy')\n", (13754, 13798), False, 'import os\n'), ((13803, 13843), 'utils.save_label_score', 'save_label_score', (['name_path', 'wrong_names'], {}), '(name_path, wrong_names)\n', (13819, 13843), False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((13861, 13918), 'os.path.join', 'os.path.join', (['args.stored_result_path', '"""labels_trans.npy"""'], {}), "(args.stored_result_path, 'labels_trans.npy')\n", (13873, 13918), False, 'import os\n'), ((13923, 13964), 'utils.save_label_score', 'save_label_score', (['label_path', 'score_names'], {}), '(label_path, score_names)\n', (13939, 13964), False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((13982, 14039), 'os.path.join', 'os.path.join', (['args.stored_result_path', '"""scores_trans.npy"""'], {}), "(args.stored_result_path, 'scores_trans.npy')\n", (13994, 14039), False, 'import os\n'), ((14044, 14089), 'utils.save_label_score', 'save_label_score', (['score_path', 'relative_scores'], {}), '(score_path, relative_scores)\n', (14060, 14089), False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((14181, 14217), 'sklearn.metrics.roc_curve', 'roc_curve', (['total_names', 'total_scores'], {}), '(total_names, total_scores)\n', (14190, 14217), False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((14259, 14272), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (14262, 14272), False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((14320, 14369), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['total_names', 'total_scores'], {}), '(total_names, total_scores)\n', (14342, 14369), False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((14394, 14444), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['total_names', 'total_scores'], {}), '(total_names, total_scores)\n', (14417, 14444), False, 'from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score\n'), ((14462, 14474), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14472, 14474), True, 'import matplotlib.pyplot as plt\n'), ((14532, 14626), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.4f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.4f)' % roc_auc)\n", (14540, 14626), True, 'import matplotlib.pyplot as plt\n'), ((14639, 14700), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (14647, 14700), True, 'import matplotlib.pyplot as plt\n'), ((14705, 14725), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (14713, 14725), True, 'import matplotlib.pyplot as plt\n'), ((14730, 14751), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (14738, 14751), True, 'import matplotlib.pyplot as plt\n'), ((14756, 14789), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (14766, 14789), True, 'import matplotlib.pyplot as plt\n'), ((14794, 14826), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (14804, 14826), True, 'import matplotlib.pyplot as plt\n'), ((14872, 14901), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (14882, 14901), True, 'import matplotlib.pyplot as plt\n'), ((14991, 15002), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15000, 15002), True, 'import matplotlib.pyplot as plt\n'), ((15025, 15037), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15035, 15037), True, 'import matplotlib.pyplot as plt\n'), ((15042, 15083), 'matplotlib.pyplot.step', 'plt.step', (['recall', 'precision'], {'where': '"""post"""'}), "(recall, precision, where='post')\n", (15050, 15083), True, 'import matplotlib.pyplot as plt\n'), ((15088, 15108), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (15098, 15108), True, 'import matplotlib.pyplot as plt\n'), ((15113, 15136), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (15123, 15136), True, 'import matplotlib.pyplot as plt\n'), ((15141, 15162), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (15149, 15162), True, 'import matplotlib.pyplot as plt\n'), ((15167, 15187), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (15175, 15187), True, 'import matplotlib.pyplot as plt\n'), ((15368, 15379), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15377, 15379), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4038), 'shutil.rmtree', 'shutil.rmtree', (['train_dir'], {}), '(train_dir)\n', (4027, 4038), False, 'import shutil\n'), ((4080, 4103), 'shutil.rmtree', 'shutil.rmtree', (['test_dir'], {}), '(test_dir)\n', (4093, 4103), False, 'import shutil\n'), ((4507, 4536), 'os.listdir', 'os.listdir', (['full_stylegan_dir'], {}), '(full_stylegan_dir)\n', (4517, 4536), False, 'import os\n'), ((4719, 4790), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'args.kfold', 'shuffle': '(True)', 'random_state': 'args.random_seed'}), '(n_splits=args.kfold, shuffle=True, random_state=args.random_seed)\n', (4724, 4790), False, 'from sklearn.model_selection import KFold\n'), ((4814, 4874), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'args.kfold', 'shuffle': '(False)', 'random_state': 'None'}), '(n_splits=args.kfold, shuffle=False, random_state=None)\n', (4819, 4874), False, 'from sklearn.model_selection import KFold\n'), ((5392, 5410), 'numpy.array', 'np.array', (['tmp_list'], {}), '(tmp_list)\n', (5400, 5410), True, 'import numpy as np\n'), ((5813, 5852), 'os.path.exists', 'os.path.exists', (['args.stored_result_path'], {}), '(args.stored_result_path)\n', (5827, 5852), False, 'import os\n'), ((5862, 5895), 'os.mkdir', 'os.mkdir', (['args.stored_result_path'], {}), '(args.stored_result_path)\n', (5870, 5895), False, 'import os\n'), ((11323, 11345), 'config.get_config', 'get_config', (['(True)', 'args'], {}), '(True, args)\n', (11333, 11345), False, 'from config import get_config\n'), ((11951, 12009), 'utils.prepare_facebank', 'prepare_facebank', (['conf', 'learner.model', 'mtcnn'], {'tta': 'args.tta'}), '(conf, learner.model, mtcnn, tta=args.tta)\n', (11967, 12009), False, 'from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize\n'), ((13329, 13345), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (13337, 13345), True, 'import numpy as np\n'), ((6909, 6921), 'os.remove', 'os.remove', (['p'], {}), '(p)\n', (6918, 6921), False, 'import os\n'), ((7006, 7018), 'os.remove', 'os.remove', (['p'], {}), '(p)\n', (7015, 7018), False, 'import os\n'), ((5772, 5795), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5793, 5795), False, 'import datetime\n'), ((6588, 6643), 'numpy.concatenate', 'np.concatenate', (["(train_set['noonan'], train_set['ltr'])"], {}), "((train_set['noonan'], train_set['ltr']))\n", (6602, 6643), True, 'import numpy as np\n'), ((6737, 6790), 'numpy.concatenate', 'np.concatenate', (["(test_set['noonan'], test_set['ltr'])"], {}), "((test_set['noonan'], test_set['ltr']))\n", (6751, 6790), True, 'import numpy as np\n'), ((7447, 7469), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (7457, 7469), False, 'import os\n'), ((8642, 8664), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (8652, 8664), False, 'import os\n'), ((12839, 12861), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (12854, 12861), False, 'from PIL import Image\n'), ((13081, 13100), 'numpy.argmax', 'np.argmax', (['score', '(1)'], {}), '(score, 1)\n', (13090, 13100), True, 'import numpy as np\n'), ((7784, 7820), 'os.path.basename', 'os.path.basename', (['train_set[name][i]'], {}), '(train_set[name][i])\n', (7800, 7820), False, 'import os\n'), ((8978, 9013), 'os.path.basename', 'os.path.basename', (['test_set[name][i]'], {}), '(test_set[name][i])\n', (8994, 9013), False, 'import os\n'), ((7981, 8028), 'os.listdir', 'os.listdir', (['(full_stylegan_dir + os.sep + folder)'], {}), '(full_stylegan_dir + os.sep + folder)\n', (7991, 8028), False, 'import os\n'), ((9314, 9361), 'os.listdir', 'os.listdir', (['(full_stylegan_dir + os.sep + folder)'], {}), '(full_stylegan_dir + os.sep + folder)\n', (9324, 9361), False, 'import os\n'), ((11217, 11240), 'os.path.basename', 'os.path.basename', (['img_f'], {}), '(img_f)\n', (11233, 11240), False, 'import os\n')] |
from jinja2 import nodes
from jinja2.ext import Extension
class FragmentCacheExtension(Extension):
# a set of names that trigger the extension.
tags = set(['cache'])
def __init__(self, environment):
super(FragmentCacheExtension, self).__init__(environment)
# add the defaults to the environment
environment.extend(
fragment_cache_prefix='fragment',
fragment_cache=None
)
def parse(self, parser):
# the first token is the token that started the tag. In our case
# we only listen to ``'cache'`` so this will be a name token with
# `cache` as value. We get the line number so that we can give
# that line number to the nodes we create by hand.
lineno = next(parser.stream).lineno
# now we parse a single expression that is used as cache key.
args = [parser.parse_expression()]
# if there is a comma, the user provided a timeout. If not use
# None as second parameter.
if parser.stream.skip_if('comma'):
args.append(parser.parse_expression())
else:
args.append(nodes.Const(None))
# now we parse the body of the cache block up to `endcache` and
# drop the needle (which would always be `endcache` in that case)
body = parser.parse_statements(['name:endcache'], drop_needle=True)
# now return a `CallBlock` node that calls our _cache_support
# helper method on this extension.
return nodes.CallBlock(self.call_method('_cache_support', args),
[], [], body).set_lineno(lineno)
def _cache_support(self, name, timeout, caller):
"""Helper callback."""
key = self.environment.fragment_cache_prefix + name
# try to load the block from the cache
# if there is no fragment in the cache, render it and store
# it in the cache.
rv = self.environment.fragment_cache.get(key)
if rv is not None:
return rv
rv = caller()
self.environment.fragment_cache.add(key, rv, timeout)
return rv
| [
"jinja2.nodes.Const"
]
| [((1151, 1168), 'jinja2.nodes.Const', 'nodes.Const', (['None'], {}), '(None)\n', (1162, 1168), False, 'from jinja2 import nodes\n')] |
# api.py
# Part of PyBBIO
# github.com/alexanderhiam/PyBBIO
# MIT License
#
# Beaglebone platform API file.
from bbio.platform.platform import detect_platform
PLATFORM = detect_platform()
if "3.8" in PLATFORM:
from bone_3_8.adc import analog_init, analog_cleanup
from bone_3_8.pwm import pwm_init, pwm_cleanup
from serial_port import serial_cleanup
elif "3.2" in PLATFORM:
from bone_3_2.adc import analog_init, analog_cleanup
from bone_3_2.pwm import pwm_init, pwm_cleanup
from serial_port import serial_cleanup
def platform_init():
analog_init()
pwm_init()
def platform_cleanup():
analog_cleanup()
pwm_cleanup()
serial_cleanup()
| [
"bone_3_2.pwm.pwm_cleanup",
"bbio.platform.platform.detect_platform",
"serial_port.serial_cleanup",
"bone_3_2.adc.analog_cleanup",
"bone_3_2.pwm.pwm_init",
"bone_3_2.adc.analog_init"
]
| [((175, 192), 'bbio.platform.platform.detect_platform', 'detect_platform', ([], {}), '()\n', (190, 192), False, 'from bbio.platform.platform import detect_platform\n'), ((559, 572), 'bone_3_2.adc.analog_init', 'analog_init', ([], {}), '()\n', (570, 572), False, 'from bone_3_2.adc import analog_init, analog_cleanup\n'), ((575, 585), 'bone_3_2.pwm.pwm_init', 'pwm_init', ([], {}), '()\n', (583, 585), False, 'from bone_3_2.pwm import pwm_init, pwm_cleanup\n'), ((613, 629), 'bone_3_2.adc.analog_cleanup', 'analog_cleanup', ([], {}), '()\n', (627, 629), False, 'from bone_3_2.adc import analog_init, analog_cleanup\n'), ((632, 645), 'bone_3_2.pwm.pwm_cleanup', 'pwm_cleanup', ([], {}), '()\n', (643, 645), False, 'from bone_3_2.pwm import pwm_init, pwm_cleanup\n'), ((648, 664), 'serial_port.serial_cleanup', 'serial_cleanup', ([], {}), '()\n', (662, 664), False, 'from serial_port import serial_cleanup\n')] |
import re
import sys
from urllib.parse import quote as _uriquote
import requests
from . import __version__, errors, utils
from .converters import _county_types, _leaderboard_types, _vpn_types, _not_none
from . import checks
from .cog import request_cog
GET='get'
POST='post'
class HTTPClient:
__CSRF_token_regex = re.compile("const csrfToken[ ]{0,1}=[ ]{0,1}[\"|'](.{36})[\"|']")
__Username_regex = re.compile("const username[ ]{0,1}=[ ]{0,1}[\"|'](.{1,16})[\"|']")
def __init__(self, session=None):
self._state = None
self.authenticated = False
self.__session = requests.Session()
self.static_session = requests.Session()
self.connect_sid = None
self._CSRF_token = None
self.username = None
self.user_agent = f'Tryhackme: (https://github.com/GnarLito/thm-api-py {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} requests/{requests.__version__}'
if session is not None:
self.static_login(session)
def close(self):
if self.__session:
self.__session.close()
def static_login(self, session):
self.connect_sid = session
cookie = requests.cookies.create_cookie('connect.sid', session, domain='tryhackme.com')
self.__session.cookies.set_cookie(cookie)
try:
self.request(RouteList.get_unseen_notifications())
self.authenticated = True
self._CSRF_token = self.retrieve_CSRF_token()
self.username = self.retrieve_username()
except Exception as e:
print("session Issue:", e)
def retrieve_CSRF_token(self):
if not self.authenticated:
return None
try:
page = self.request(RouteList.get_profile_page())
return self._HTTPClient__CSRF_token_regex.search(page).group(1)
except AttributeError:
self.authenticated = False
return None
def retrieve_username(self):
if not self.authenticated:
return None
try:
page = self.request(RouteList.get_profile_page())
return self._HTTPClient__Username_regex.search(page).group(1)
except AttributeError:
self.authenticated = False
return None
def request(self, route, **kwargs):
session = self.__session
endpoint = route.url
method = route.method
settings = kwargs.pop('settings', {})
headers = {
'User-Agent': self.user_agent
}
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils.to_json(kwargs.pop('json'))
if "static" in settings:
session = self.static_session
if "CSRF" in settings:
headers['CSRF-Token'] = self._CSRF_token
kwargs["data"]["_CSRF"] = self._CSRF_token
# TODO: retries, Pagenator
try:
with session.request(method, endpoint, **kwargs) as r:
data = utils.response_to_json_or_text(r)
# * valid return
if 300 > r.status_code >= 200:
# $ if return url is login then no auth
if r.url.split('/')[-1] == "login":
raise errors.Unauthorized(request=r, route=route, data=data)
return data
# $ no auth
if r.status_code in {401, 403}:
raise errors.Unauthorized(request=r, route=route, data=data)
# $ endpoint not found
if 404 == r.status_code:
raise errors.NotFound(request=r, route=route, data=data)
# $ server side issue's
if r.status_code in {500, 502}:
raise errors.ServerError(request=r, route=route, data=data)
except Exception as e:
raise e
class Route:
# TODO: add post payload capabilities
BASE = "https://www.tryhackme.com"
def __init__(self, method=GET, path='', **parameters):
self.method = method
self._path = path
self.path = path
url = self.BASE + self.path
options = parameters.pop("options", None)
if parameters:
try:
self.path = self.path.format(**{k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url = self.BASE + self.path
except Exception as e:
raise errors.NotValidUrlParameters(e)
else:
self.url = url
if options:
if "?" not in self.url:
self.url + "?" + "&".join([f"{i}={options[i]}" for i in options.keys() if options[i] != None])
else:
self.url + "&" + "&".join([f"{i}={options[i]}" for i in options.keys() if options[i] != None])
self.bucket = f"{method} {path}"
class RouteList:
def get_profile_page(**parameters): return Route(path="/profile", **parameters)
# * normal site calls
def get_server_time( **parameters): return Route(path="/api/server-time", **parameters)
def get_site_stats( **parameters): return Route(path="/api/site-stats", **parameters)
def get_practise_rooms( **parameters): return Route(path="/api/practice-rooms", **parameters)
def get_series( **parameters): return Route(path="/api/series?show={show}", **parameters)
def get_glossary_terms( **parameters): return Route(path="/glossary/all-terms", **parameters)
# * Leaderboards
def get_leaderboards( **parameters): return Route(path="/api/leaderboards", **parameters)
def get_koth_leaderboards(**parameters): return Route(path="/api/leaderboards/koth", **parameters)
# * networks
def get_networks( **parameters): return Route(path="/api/networks", **parameters)
def get_network( **parameters): return Route(path="/api/room/network?code={network_code}", **parameters)
def get_network_cost( **parameters): return Route(path="/api/room/cost?code={network_code}", **parameters)
# * account
def get_subscription_cost(**parameters): return Route(path="/account/subscription/cost", **parameters)
# * paths
def get_path( **parameters): return Route(path="/paths/single/{path_code}", **parameters)
def get_public_paths( **parameters): return Route(path="/paths/public", **parameters)
def get_path_summary( **parameters): return Route(path="/paths/summary", **parameters)
# * modules
def get_modules_summary(**parameters): return Route(path="/modules/summary", **parameters)
def get_module( **parameters): return Route(path="/modules/data/{module_code}",**parameters)
# * games
def get_machine_pool( **parameters): return Route(path="/games/koth/get/machine-pool", **parameters)
def get_game_detail( **parameters): return Route(path="/games/koth/data/{game_code}", **parameters)
def get_recent_games( **parameters): return Route(path="/games/koth/recent/games", **parameters)
def get_user_games( **parameters): return Route(path="/games/koth/user/games", **parameters)
def get_game_tickets_won(**parameters): return Route(path="/games/tickets/won?username={username}", **parameters)
def post_join_koth( **parameters): return Route(method=POST, path="/games/koth/new", **parameters)
def post_new_koth( **parameters): return Route(method=POST, path="/games/koth/join-public", **parameters) # ? might be different for premium users
# * VPN
def get_available_vpns(**parameters): return Route(path="/vpn/get-available-vpns", **parameters)
def get_vpn_info( **parameters): return Route(path="/vpn/my-data", **parameters)
# * VM
def get_machine_running( **parameters): return Route(path="/api/vm/running", **parameters)
def post_renew_machine( **parameters): return Route(method=POST, path="/api/vm/renew", **parameters)
def post_terminate_machine( **parameters): return Route(method=POST, path="/api/vm/terminate", **parameters)
# * user -badge
def get_own_badges( **parameters): return Route(path="/api/badges/mine", **parameters)
def get_user_badges(**parameters): return Route(path="/api/badges/get/{username}", **parameters)
def get_all_badges( **parameters): return Route(path="/api/badges/get", **parameters)
# * user -team
def get_team_info(**parameters): return Route(path="/api/team/is-member", **parameters)
# * user -notifications
def get_unseen_notifications(**parameters): return Route(path="/notifications/has-unseen", **parameters)
def get_all_notifications( **parameters): return Route(path="/notifications/get", **parameters)
# * user -messages
def get_unseen_messages( **parameters): return Route(path="/message/has-unseen", **parameters)
def get_all_group_messages(**parameters): return Route(path="/message/group/get-all", **parameters)
def get_group_messages( **parameters): return Route(path="/message/group/get/{group_id}", **parameters)
# * user -room
def get_user_completed_rooms_count( **parameters): return Route(path="/api/no-completed-rooms-public/{username}", **parameters)
def get_user_completed_rooms( **parameters): return Route(path="/api/all-completed-rooms?username={username}", **parameters)
def get_user_created_rooms( **parameters): return Route(path="/api/created-rooms/{username}", **parameters)
# * user
def get_user_rank( **parameters): return Route(path="/api/user/rank/{username}", **parameters)
def get_user_activty(**parameters): return Route(path="/api/user/activity-events?username={username}", **parameters)
def get_all_friends( **parameters): return Route(path="/api/friend/all", **parameters)
def get_discord_user(**parameters): return Route(path="/api/discord/user/{username}", **parameters) # ? rename to user profile
def get_user_exist( **parameters): return Route(path="/api/user/exist/{username}", **parameters)
def search_user( **parameters): return Route(path="/api/similar-users/{username}", **parameters)
# * room
def get_new_rooms( **parameters): return Route(path="/api/new-rooms", **parameters)
def get_recommended_rooms( **parameters): return Route(path="/recommend/last-room?type=json", **parameters)
def get_questions_answered( **parameters): return Route(path="/api/questions-answered", **parameters)
def get_joined_rooms( **parameters): return Route(path="/api/my-rooms", **parameters)
def get_room_percetages( **parameters): return Route(method=POST, path="/api/room-percentages", **parameters) # ? is a post but it gets stuff
def get_room_scoreboard( **parameters): return Route(path="/api/room/scoreboard?code={room_code}", **parameters)
def get_room_votes( **parameters): return Route(path="/api/room/votes?code={room_code}", **parameters)
def get_room_details( **parameters): return Route(path="/api/room/details?codes={room_code}", **parameters) # ? list posibility
def get_room_tasks( **parameters): return Route(path="/api/tasks/{room_code}", **parameters)
def post_room_answer( **parameters): return Route(method=POST, path="/api/{room_code}/answer", **parameters)
def post_deploy_machine( **parameters): return Route(method=POST, path="/material/deploy", **parameters)
def post_reset_room_progress(**parameters): return Route(method=POST, path="/api/reset-progress", **parameters)
def post_leave_room( **parameters): return Route(method=POST, path="/api/room/leave", **parameters)
class HTTP(request_cog, HTTPClient):
# * normal site calls
def get_server_time(self, **attrs):
return self.request(RouteList.get_server_time(), **attrs)
def get_site_stats(self, **attrs):
return self.request(RouteList.get_site_stats(), **attrs)
def get_practise_rooms(self, **attrs):
return self.request(RouteList.get_practise_rooms(), **attrs)
def get_serie(self, show, serie_code, **attrs):
return self.request(RouteList.get_series(show=show, options={"name": serie_code}), **attrs)
def get_series(self, show, **attrs):
return self.request(RouteList.get_series(show=show), **attrs)
def get_glossary_terms(self, **attrs):
return self.request(RouteList.get_glossary_terms(), **attrs)
# * Leaderboards
def get_leaderboards(self, country: _county_types, type:_leaderboard_types, **attrs):
return self.request(RouteList.get_leaderboards(country=country.to_lower_case(), type=type), **attrs)
def get_koth_leaderboards(self, country: _county_types, type:_leaderboard_types, **attrs):
return self.request(RouteList.get_koth_leaderboards(country=country.to_lower_case(), type=type), **attrs)
# * networks
def get_network(self, network_code, **attrs):
return self.request(RouteList.get_network(network_code=network_code), **attrs)
def get_networks(self, **attrs):
return self.request(RouteList.get_networks(),**attrs)
def get_network_cost(self, network_code, **attrs):
return self.request(RouteList.get_networks(network_code=network_code), **attrs)
# * account
@checks.is_authenticated()
def get_subscription_cost(self, **attrs):
return self.request(RouteList.get_subscription_cost(), **attrs)
# * paths
def get_path(self, path_code, **attrs):
return self.request(RouteList.get_path(path_code=path_code), **attrs)
def get_public_paths(self, **attrs):
return self.request(RouteList.get_public_paths(), **attrs)
def get_path_summary(self, **attrs):
return self.request(RouteList.get_path_summary(), **attrs)
# * modules
def get_modules_summary(self, **attrs):
return self.request(RouteList.get_modules_summary(), **attrs)
def get_module(self, module_code, **attrs):
return self.request(RouteList.get_module(module_code), **attrs)
# * games
def get_machine_pool(self, **attrs):
return self.request(RouteList.get_machine_pool(), **attrs)
def get_game_detail(self, game_code, **attrs):
return self.request(RouteList.get_game_detail(game_code=game_code), **attrs)
def get_recent_games(self, **attrs):
return self.request(RouteList.get_recent_games(), **attrs)
def get_user_games(self, **attrs):
return self.request(RouteList.get_user_games(), **attrs)
def get_game_tickets_won(self, username, **attrs):
return self.request(RouteList.get_game_tickets_won(username=username), **attrs)
@checks.set_header_CSRF()
def post_join_koth(self, **attrs):
return self.request(RouteList.post_join_koth(), **attrs)
@checks.set_header_CSRF()
def post_new_koth(self, **attrs):
return self.request(RouteList.post_new_koth(), **attrs)
# * VPN
@checks.is_authenticated()
def get_available_vpns(self, type : _vpn_types, **attrs):
return self.request(RouteList.get_available_vpns(options={"type": type}), **attrs)
@checks.is_authenticated()
def get_vpn_info(self, **attrs):
return self.request(RouteList.get_vpn_info(), **attrs)
# * VM
def get_machine_running(self, **attrs):
return self.request(RouteList.get_machine_running(), **attrs)
@checks.set_header_CSRF()
def post_renew_machine(self, room_code, **attrs):
return self.request(RouteList.post_renew_machine(), json={"code": room_code}, **attrs)
@checks.set_header_CSRF()
def post_terminate_machine(self, room_code, **attrs):
return self.request(RouteList.post_terminate_machine(), json={"code": room_code}, **attrs)
# * user -badge
@checks.is_authenticated()
def get_own_badges(self, **attrs):
return self.request(RouteList.get_own_badges(), **attrs)
def get_user_badges(self, username, **attrs):
return self.request(RouteList.get_user_badges(username=username), **attrs)
def get_all_badges(self, **attrs):
return self.request(RouteList.get_all_badges(), **attrs)
# * user -team
@checks.is_authenticated()
def get_team_info(self, **attrs):
return self.request(RouteList.get_team_info(), **attrs)
# * user -notifications
@checks.is_authenticated()
def get_unseen_notifications(self, **attrs):
return self.request(RouteList.get_unseen_notifications(), **attrs)
@checks.is_authenticated()
def get_all_notifications(self, **attrs):
return self.request(RouteList.get_all_notifications(), **attrs)
# * user -messages
@checks.is_authenticated()
def get_unseen_messages(self, **attrs):
return self.request(RouteList.get_unseen_messages(), **attrs)
@checks.is_authenticated()
def get_all_group_messages(self, **attrs):
return self.request(RouteList.get_all_group_messages(), **attrs)
@checks.is_authenticated()
def get_group_messages(self, group_id, **attrs):
return self.request(RouteList.get_group_messages(group_id), **attrs)
# * user -room
def get_user_completed_rooms_count(self, username, **attrs):
return self.request(RouteList.get_user_completed_rooms_count(username=username), **attrs)
def get_user_completed_rooms(self, username, limit:int=10, page:int=1, **attrs):
return self.request(RouteList.get_user_completed_rooms(username=username, options={"limit": limit, "page": page}), **attrs)
def get_user_created_rooms(self, username, limit:int=10, page:int=1, **attrs):
return self.request(RouteList.get_user_created_rooms(username=username, options={"limit": limit, "page": page}), **attrs)
# * user
def get_user_rank(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_rank(username=username), **attrs)
def get_user_activty(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_activty(username=username), **attrs)
@checks.is_authenticated()
def get_all_friends(self, **attrs):
return self.request(RouteList.get_all_friends(), **attrs)
def get_discord_user(self, username : _not_none, **attrs):
return self.request(RouteList.get_discord_user(username=username), **attrs)
def get_user_exist(self, username : _not_none, **attrs):
return self.request(RouteList.get_user_exist(username=username), **attrs)
def search_user(self, username : _not_none, **attrs):
return self.request(RouteList.search_user(username=username), **attrs)
# * room
def get_new_rooms(self, **attrs):
return self.request(RouteList.get_new_rooms(), **attrs)
@checks.is_authenticated()
def get_recommended_rooms(self, **attrs):
return self.request(RouteList.get_recommended_rooms(), **attrs)
def get_questions_answered(self, **attrs):
return self.request(RouteList.get_questions_answered(), **attrs)
@checks.is_authenticated()
def get_joined_rooms(self, **attrs):
return self.request(RouteList.get_joined_rooms(), **attrs)
@checks.is_authenticated()
def get_room_percentages(self, room_codes, **attrs):
return self.request(RouteList.get_room_percetages(), json={"rooms": room_codes}, **attrs)
@checks.is_authenticated()
def get_room_scoreboard(self, room_code, **attrs):
return self.request(RouteList.get_room_scoreboard(room_code=room_code), **attrs)
def get_room_votes(self, room_code, **attrs):
return self.request(RouteList.get_room_votes(room_code=room_code), **attrs)
def get_room_details(self, room_code, loadWriteUps: bool=True, loadCreators: bool=True, loadUser: bool=True, **attrs):
return self.request(RouteList.get_room_details(room_code=room_code, options={"loadWriteUps": loadWriteUps, "loadCreators": loadCreators, "loadUser": loadUser}), **attrs).get(room_code, {})
def get_room_tasks(self, room_code, **attrs):
return self.request(RouteList.get_room_tasks(room_code=room_code), **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_room_answer(self, room_code, taskNo: int, questionNo: int, answer: str, **attrs):
return self.request(RouteList.post_room_answer(room_code=room_code), json={"taskNo": taskNo, "questionNo": questionNo, "answer": answer}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_deploy_machine(self, room_code, uploadId, **attrs):
return self.request(RouteList.post_deploy_machine(), json={"roomCode": room_code, "id": uploadId}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_reset_room_progress(self, room_code, **attrs):
return self.request(RouteList.post_reset_room_progress(), json={"code": room_code}, **attrs)
@checks.set_header_CSRF()
@checks.is_authenticated()
def post_leave_room(self, room_code, **attrs):
return self.request(RouteList.post_leave_room(), json={"code": room_code}, **attrs)
| [
"requests.cookies.create_cookie",
"urllib.parse.quote",
"requests.Session",
"re.compile"
]
| [((323, 388), 're.compile', 're.compile', (['"""const csrfToken[ ]{0,1}=[ ]{0,1}["|\'](.{36})["|\']"""'], {}), '(\'const csrfToken[ ]{0,1}=[ ]{0,1}["|\\\'](.{36})["|\\\']\')\n', (333, 388), False, 'import re\n'), ((414, 480), 're.compile', 're.compile', (['"""const username[ ]{0,1}=[ ]{0,1}["|\'](.{1,16})["|\']"""'], {}), '(\'const username[ ]{0,1}=[ ]{0,1}["|\\\'](.{1,16})["|\\\']\')\n', (424, 480), False, 'import re\n'), ((611, 629), 'requests.Session', 'requests.Session', ([], {}), '()\n', (627, 629), False, 'import requests\n'), ((660, 678), 'requests.Session', 'requests.Session', ([], {}), '()\n', (676, 678), False, 'import requests\n'), ((1221, 1299), 'requests.cookies.create_cookie', 'requests.cookies.create_cookie', (['"""connect.sid"""', 'session'], {'domain': '"""tryhackme.com"""'}), "('connect.sid', session, domain='tryhackme.com')\n", (1251, 1299), False, 'import requests\n'), ((4518, 4530), 'urllib.parse.quote', '_uriquote', (['v'], {}), '(v)\n', (4527, 4530), True, 'from urllib.parse import quote as _uriquote\n')] |
import numpy as np
DEFAULT_FILE_PATH = "utils/datasets/glove.6B.50d.txt"
def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50):
"""Read pretrained GloVe vectors"""
wordVectors = np.zeros((len(tokens), dimensions))
with open(filepath) as ifs:
for line in ifs:
line = line.strip()
if not line:
continue
row = line.split()
token = row[0]
if token not in tokens:
continue
data = [float(x) for x in row[1:]]
if len(data) != dimensions:
raise RuntimeError("wrong number of dimensions")
wordVectors[tokens[token]] = np.asarray(data)
return wordVectors
| [
"numpy.asarray"
]
| [((692, 708), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (702, 708), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import subprocess
import sys
import json
SERVICES = {
'control': [
'control',
'nodemgr',
'named',
'dns',
],
'config-database': [
'nodemgr',
'zookeeper',
'rabbitmq',
'cassandra',
],
'webui': [
'web',
'job',
],
'config': [
'svc-monitor',
'nodemgr',
'device-manager',
'api',
'schema',
],
}
WARNING = 1
CRITICAL = 2
def get_contrail_status_txt(services):
try:
output = subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status", shell=True).decode('UTF-8')
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = dict()
group = None
for line in output.splitlines()[1:]:
words = line.split()
if len(words) == 4 and words[0] == '==' and words[3] == '==':
group = words[2]
continue
if len(words) == 0:
group = None
continue
if group and len(words) >= 2 and group in services:
srv = words[0].split(':')[0]
statuses.setdefault(group, list()).append(
{srv: ' '.join(words[1:])})
return statuses
def get_contrail_status_json(services):
try:
output = json.loads(subprocess.check_output("export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status --format json", shell=True).decode('UTF-8'))
except subprocess.CalledProcessError as err:
message = ('CRITICAL: Could not get contrail-status.'
' return code: {} cmd: {} output: {}'.
format(err.returncode, err.cmd, err.output))
print(message)
sys.exit(CRITICAL)
statuses = output["pods"]
return statuses
def check_contrail_status(services, version=None):
if version > 1912:
statuses = get_contrail_status_json(services)
else:
statuses = get_contrail_status_txt(services)
for group in services:
if group not in statuses:
message = ('WARNING: POD {} is absent in the contrail-status'
.format(group))
print(message)
sys.exit(WARNING)
for srv in services[group]:
if not any(srv in key for key in statuses[group]):
message = ('WARNING: {} is absent in the contrail-status'
.format(srv))
print(message)
sys.exit(WARNING)
status = next(stat[srv] for stat in statuses[group] if srv in stat)
if status not in ['active', 'backup']:
message = ('CRITICAL: {} is not ready. Reason: {}'
.format(srv, status))
print(message)
sys.exit(CRITICAL)
print('Contrail status OK')
sys.exit()
if __name__ == '__main__':
cver = sys.argv[1]
if '.' in str(cver):
if cver == '5.0':
version = 500
elif cver == '5.1':
version = 510
else:
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
elif not cver.isdigit():
print("CRITICAL: invalid version: {}".format(cver))
sys.exit(CRITICAL)
else:
version = int(cver)
check_contrail_status(SERVICES, version=version)
| [
"subprocess.check_output",
"sys.exit"
]
| [((3148, 3158), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3156, 3158), False, 'import sys\n'), ((972, 990), 'sys.exit', 'sys.exit', (['CRITICAL'], {}), '(CRITICAL)\n', (980, 990), False, 'import sys\n'), ((2025, 2043), 'sys.exit', 'sys.exit', (['CRITICAL'], {}), '(CRITICAL)\n', (2033, 2043), False, 'import sys\n'), ((2502, 2519), 'sys.exit', 'sys.exit', (['WARNING'], {}), '(WARNING)\n', (2510, 2519), False, 'import sys\n'), ((3548, 3566), 'sys.exit', 'sys.exit', (['CRITICAL'], {}), '(CRITICAL)\n', (3556, 3566), False, 'import sys\n'), ((557, 701), 'subprocess.check_output', 'subprocess.check_output', (['"""export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status"""'], {'shell': '(True)'}), "(\n 'export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status'\n , shell=True)\n", (580, 701), False, 'import subprocess\n'), ((2781, 2798), 'sys.exit', 'sys.exit', (['WARNING'], {}), '(WARNING)\n', (2789, 2798), False, 'import sys\n'), ((3093, 3111), 'sys.exit', 'sys.exit', (['CRITICAL'], {}), '(CRITICAL)\n', (3101, 3111), False, 'import sys\n'), ((3432, 3450), 'sys.exit', 'sys.exit', (['CRITICAL'], {}), '(CRITICAL)\n', (3440, 3450), False, 'import sys\n'), ((1595, 1753), 'subprocess.check_output', 'subprocess.check_output', (['"""export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status --format json"""'], {'shell': '(True)'}), "(\n 'export CONTRAIL_STATUS_CONTAINER_NAME=contrail-status-controller-nrpe ; sudo -E contrail-status --format json'\n , shell=True)\n", (1618, 1753), False, 'import subprocess\n')] |
import argparse
import numpy as np
from .._helpers import read, reader_map
from ._helpers import _get_version_text
def info(argv=None):
# Parse command line arguments.
parser = _get_info_parser()
args = parser.parse_args(argv)
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
print(mesh)
# check if the cell arrays are consistent with the points
is_consistent = True
for cells in mesh.cells:
if np.any(cells.data > mesh.points.shape[0]):
print("\nATTENTION: Inconsistent mesh. Cells refer to nonexistent points.")
is_consistent = False
break
# check if there are redundant points
if is_consistent:
point_is_used = np.zeros(mesh.points.shape[0], dtype=bool)
for cells in mesh.cells:
point_is_used[cells.data] = True
if np.any(~point_is_used):
print("ATTENTION: Some points are not part of any cell.")
def _get_info_parser():
parser = argparse.ArgumentParser(
description=("Print mesh info."), formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
return parser
| [
"numpy.any",
"numpy.zeros",
"argparse.ArgumentParser"
]
| [((1006, 1113), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Print mesh info."""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), "(description='Print mesh info.', formatter_class=\n argparse.RawTextHelpFormatter)\n", (1029, 1113), False, 'import argparse\n'), ((469, 510), 'numpy.any', 'np.any', (['(cells.data > mesh.points.shape[0])'], {}), '(cells.data > mesh.points.shape[0])\n', (475, 510), True, 'import numpy as np\n'), ((741, 783), 'numpy.zeros', 'np.zeros', (['mesh.points.shape[0]'], {'dtype': 'bool'}), '(mesh.points.shape[0], dtype=bool)\n', (749, 783), True, 'import numpy as np\n'), ((873, 895), 'numpy.any', 'np.any', (['(~point_is_used)'], {}), '(~point_is_used)\n', (879, 895), True, 'import numpy as np\n')] |
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Creates a top-level, referenceable asset USD file from one or more
'variant' files, each of which can contain arbitrary scene description.
When supplying multiple files, one must also provide the name for a
variantSet that will be constructed to switch between the files.
The asset file will place the variant files behind a "payload", which will
enable consumers to defer loading and processing of the data when composed
onto a UsdStage.
The names of the created variations will be taken directly from the basename
of their corresponding input file.
'''
from __future__ import print_function
from pxr import Tf, Kind, Sdf, Usd
# ToDo:
# - handle multiple variantSets
# - layer multiple kinds of files (e.g. shading.usd over geom.usd)
# - allow output filename to be independently specifiable? (Breaks with Pixar
# convention)
# - allow variant names to be specified independently of variant file names
# - Compute and present (per-variant) UsdGeomModelAPI.extentsHint
# - Compute and author UsdModelAPI::SetPayloadAssetDependencies()
def CreateModelStage(assetName,
assetIdentifier=None,
kind=Kind.Tokens.component,
filesToReference=None,
variantSetName=None,
defaultVariantSelection=None):
# Preconditions....
if not Tf.IsValidIdentifier(assetName):
print("assetName '%s' must be a valid identifier. Aborting." %
assetName)
return None
if variantSetName and not Tf.IsValidIdentifier(variantSetName):
print("variantSetName '%s' must be a valid identifier. Aborting." %
variantSetName)
return None
if filesToReference and len(filesToReference) > 1 and not variantSetName:
# For now, we only allow multiple files to reference if we're switching
# them with a variantSet. We can relax this restriction when we can
# make internal payload arcs (bug #119960)
print("Cannot create multiple-file-reference without a variantSet. Aborting")
return None
if not Kind.Registry.IsA(kind, Kind.Tokens.model):
print("kind '%s' is not a valid model kind, which must be one of:" %
kind)
print(Kind.Registry.GetAllKinds())
return None
# Create the root file for the stage, and make it ASCII text.
# We need some nicer sugar for this.
fileName = assetName + ".usd"
rootLayer = Sdf.Layer.CreateNew(fileName, args = {'format':'usda'})
stage = Usd.Stage.Open(rootLayer)
# Name the root prim after the asset. Don't give it a type, since we
# want that to come from referenced files. Make it be the "default prim"
# so that we can reference the resulting file without specifiying a
# prim path
rootPath = Sdf.Path.absoluteRootPath
modelRootPrim = stage.DefinePrim(rootPath.AppendChild(assetName))
stage.SetDefaultPrim(modelRootPrim)
modelAPI = Usd.ModelAPI(modelRootPrim)
modelAPI.SetKind(kind)
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
# for more on assetInfo
modelAPI.SetAssetName(assetName)
modelAPI.SetAssetIdentifier(assetIdentifier or fileName)
# Add a class named after the asset, and make the asset inherit from it.
# This is not necessary for a valid asset, and the class-naming is a Pixar
# convention. But always having a class associated with each asset is
# extremely useful for non-destructively editing many referenced or
# instanced assets of the same type.
classPrim = stage.CreateClassPrim(rootPath.AppendChild("_class_"+assetName))
modelRootPrim.GetInherits().AddInherit(classPrim.GetPath())
if not filesToReference:
# weird edge case... we're done
return stage
elif len(filesToReference) == 1 and not variantSetName:
# The other, more plausible edge case: we're just wrapping
# some other file (e.g. alembic) in order to give it a payload
# and other proper USD trappings - no variants
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(filesToReference[0]))
return stage
# OK, we're making a variantSet, and we are going to vary the payload
# in each variant
varSet = modelRootPrim.GetVariantSet(variantSetName)
for variantFile in filesToReference:
import os
variantName = os.path.splitext(os.path.basename(variantFile))[0]
# If we didn't specify a default selection, choose the first one
if not defaultVariantSelection:
defaultVariantSelection = variantName
varSet.AddVariant(variantName)
varSet.SetVariantSelection(variantName)
# The context object makes all edits "go inside" the variant we
# just created.
with varSet.GetVariantEditContext():
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(variantFile))
# Now put the variantSet into the state we want it to be in by default
varSet.SetVariantSelection(defaultVariantSelection)
return stage
if __name__ == "__main__":
import argparse, os, sys
descr = __doc__.strip()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description=descr)
parser.add_argument('assetName')
parser.add_argument('variantFiles', nargs='+')
parser.add_argument(
'-k', '--kind', default='component', action='store', metavar='kind',
help="Model kind, one of: component, group, or assembly")
parser.add_argument(
'-v', '--variantSet', default='', action='store', metavar='variantSet',
help="Variantset to create to modulate variantFiles. Can be elided "
"if only one file is supplied")
parser.add_argument(
'-i', '--identifier', default='', action='store', metavar='identifier',
help="The identifier you would expect your Ar asset-resolver plugin "
"to resolve to the (installed) assetName.usd file this script creates. "
" If unspecified, defaults to assetName.usd")
parser.add_argument(
'-d', '--defaultVariantSelection', default='', action='store',
metavar='defaultVariantSelection',
help="This variant will be selected by default when the asset is "
"added to a composition. If unspecified, will be the variant for "
"'variantFile1'")
args = parser.parse_args()
if not args.assetName or args.assetName == '':
parser.error("No assetName specified")
stage = CreateModelStage(args.assetName,
assetIdentifier=args.identifier,
kind=args.kind,
filesToReference=args.variantFiles,
variantSetName=args.variantSet,
defaultVariantSelection=args.defaultVariantSelection)
if stage:
stage.GetRootLayer().Save()
exit(0)
else:
exit(1)
| [
"pxr.Sdf.Layer.CreateNew",
"pxr.Tf.IsValidIdentifier",
"pxr.Sdf.Payload",
"pxr.Usd.ModelAPI",
"pxr.Kind.Registry.IsA",
"pxr.Usd.Stage.Open",
"os.path.basename",
"pxr.Kind.Registry.GetAllKinds"
]
| [((3531, 3585), 'pxr.Sdf.Layer.CreateNew', 'Sdf.Layer.CreateNew', (['fileName'], {'args': "{'format': 'usda'}"}), "(fileName, args={'format': 'usda'})\n", (3550, 3585), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((3599, 3624), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', (['rootLayer'], {}), '(rootLayer)\n', (3613, 3624), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((4033, 4060), 'pxr.Usd.ModelAPI', 'Usd.ModelAPI', (['modelRootPrim'], {}), '(modelRootPrim)\n', (4045, 4060), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((2425, 2456), 'pxr.Tf.IsValidIdentifier', 'Tf.IsValidIdentifier', (['assetName'], {}), '(assetName)\n', (2445, 2456), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((3167, 3209), 'pxr.Kind.Registry.IsA', 'Kind.Registry.IsA', (['kind', 'Kind.Tokens.model'], {}), '(kind, Kind.Tokens.model)\n', (3184, 3209), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((2602, 2638), 'pxr.Tf.IsValidIdentifier', 'Tf.IsValidIdentifier', (['variantSetName'], {}), '(variantSetName)\n', (2622, 2638), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((3320, 3347), 'pxr.Kind.Registry.GetAllKinds', 'Kind.Registry.GetAllKinds', ([], {}), '()\n', (3345, 3347), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((6265, 6294), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (6281, 6294), False, 'import os\n'), ((5176, 5208), 'pxr.Sdf.Payload', 'Sdf.Payload', (['filesToReference[0]'], {}), '(filesToReference[0])\n', (5187, 5208), False, 'from pxr import Tf, Kind, Sdf, Usd\n'), ((5483, 5512), 'os.path.basename', 'os.path.basename', (['variantFile'], {}), '(variantFile)\n', (5499, 5512), False, 'import os\n'), ((5959, 5983), 'pxr.Sdf.Payload', 'Sdf.Payload', (['variantFile'], {}), '(variantFile)\n', (5970, 5983), False, 'from pxr import Tf, Kind, Sdf, Usd\n')] |
from __future__ import print_function
import json
import os
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from wagtail.wagtailcore.models import Page, Site
from v1.models import HomePage, BrowseFilterablePage
def run():
print('Running script \'scripts.initial_data\' ...')
admin_user = None
site_root = None
events = None
admin_user = User.objects.filter(username='admin')
if not admin_user:
admin_user = User(username='admin',
password=make_password(os.environ.get('WAGTAIL_ADMIN_PW')),
is_superuser=True, is_active=True, is_staff=True)
admin_user.save()
else:
admin_user = admin_user[0]
# Creates a new site root `CFGov`
site_root = HomePage.objects.filter(title='CFGOV')
if not site_root:
root = Page.objects.first()
site_root = HomePage(title='CFGOV', slug='home-page', depth=2, owner=admin_user)
site_root.live = True
root.add_child(instance=site_root)
latest = site_root.save_revision(user=admin_user, submitted_for_moderation=False)
latest.save()
else:
site_root = site_root[0]
# Setting new site root
if not Site.objects.filter(hostname='content.localhost').exists():
site = Site.objects.first()
site.port = 8000
site.root_page_id = site_root.id
site.save()
content_site = Site(hostname='content.localhost', port=8000, root_page_id=site_root.id)
content_site.save()
# Clean Up
old_site_root = Page.objects.filter(id=2)[0]
if old_site_root:
old_site_root.delete()
# Events Browse Page required for event `import-data` command
if not BrowseFilterablePage.objects.filter(title='Events').exists():
events = BrowseFilterablePage(title='Events', slug='events', owner=admin_user)
site_root.add_child(instance=events)
revision = events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
# Archived Events Browse Filterable Page
if not BrowseFilterablePage.objects.filter(title='Archive').exists():
archived_events = BrowseFilterablePage(title='Archive', slug='archive', owner=admin_user)
if not events:
events = BrowseFilterablePage.objects.get(title='Events')
events.add_child(instance=archived_events)
revision = archived_events.save_revision(
user=admin_user,
submitted_for_moderation=False,
)
revision.publish()
| [
"wagtail.wagtailcore.models.Site",
"v1.models.BrowseFilterablePage.objects.filter",
"v1.models.BrowseFilterablePage",
"os.environ.get",
"wagtail.wagtailcore.models.Site.objects.first",
"django.contrib.auth.models.User.objects.filter",
"v1.models.HomePage.objects.filter",
"wagtail.wagtailcore.models.Page.objects.first",
"wagtail.wagtailcore.models.Site.objects.filter",
"v1.models.HomePage",
"v1.models.BrowseFilterablePage.objects.get",
"wagtail.wagtailcore.models.Page.objects.filter"
]
| [((446, 483), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': '"""admin"""'}), "(username='admin')\n", (465, 483), False, 'from django.contrib.auth.models import User\n'), ((843, 881), 'v1.models.HomePage.objects.filter', 'HomePage.objects.filter', ([], {'title': '"""CFGOV"""'}), "(title='CFGOV')\n", (866, 881), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((919, 939), 'wagtail.wagtailcore.models.Page.objects.first', 'Page.objects.first', ([], {}), '()\n', (937, 939), False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((960, 1028), 'v1.models.HomePage', 'HomePage', ([], {'title': '"""CFGOV"""', 'slug': '"""home-page"""', 'depth': '(2)', 'owner': 'admin_user'}), "(title='CFGOV', slug='home-page', depth=2, owner=admin_user)\n", (968, 1028), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((1372, 1392), 'wagtail.wagtailcore.models.Site.objects.first', 'Site.objects.first', ([], {}), '()\n', (1390, 1392), False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((1502, 1574), 'wagtail.wagtailcore.models.Site', 'Site', ([], {'hostname': '"""content.localhost"""', 'port': '(8000)', 'root_page_id': 'site_root.id'}), "(hostname='content.localhost', port=8000, root_page_id=site_root.id)\n", (1506, 1574), False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((1894, 1963), 'v1.models.BrowseFilterablePage', 'BrowseFilterablePage', ([], {'title': '"""Events"""', 'slug': '"""events"""', 'owner': 'admin_user'}), "(title='Events', slug='events', owner=admin_user)\n", (1914, 1963), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((2306, 2377), 'v1.models.BrowseFilterablePage', 'BrowseFilterablePage', ([], {'title': '"""Archive"""', 'slug': '"""archive"""', 'owner': 'admin_user'}), "(title='Archive', slug='archive', owner=admin_user)\n", (2326, 2377), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((1647, 1672), 'wagtail.wagtailcore.models.Page.objects.filter', 'Page.objects.filter', ([], {'id': '(2)'}), '(id=2)\n', (1666, 1672), False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((2422, 2470), 'v1.models.BrowseFilterablePage.objects.get', 'BrowseFilterablePage.objects.get', ([], {'title': '"""Events"""'}), "(title='Events')\n", (2454, 2470), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((1297, 1346), 'wagtail.wagtailcore.models.Site.objects.filter', 'Site.objects.filter', ([], {'hostname': '"""content.localhost"""'}), "(hostname='content.localhost')\n", (1316, 1346), False, 'from wagtail.wagtailcore.models import Page, Site\n'), ((1815, 1866), 'v1.models.BrowseFilterablePage.objects.filter', 'BrowseFilterablePage.objects.filter', ([], {'title': '"""Events"""'}), "(title='Events')\n", (1850, 1866), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((2217, 2269), 'v1.models.BrowseFilterablePage.objects.filter', 'BrowseFilterablePage.objects.filter', ([], {'title': '"""Archive"""'}), "(title='Archive')\n", (2252, 2269), False, 'from v1.models import HomePage, BrowseFilterablePage\n'), ((602, 636), 'os.environ.get', 'os.environ.get', (['"""WAGTAIL_ADMIN_PW"""'], {}), "('WAGTAIL_ADMIN_PW')\n", (616, 636), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain.meta.stockhk_meta import Stockhk
from zvt.recorders.em import em_api
class EMStockhkRecorder(Recorder):
provider = "em"
data_schema = Stockhk
def run(self):
df_south = em_api.get_tradable_list(entity_type="stockhk", hk_south=True)
df_south = df_south.set_index("code", drop=False)
df_south["south"] = True
df = em_api.get_tradable_list(entity_type="stockhk")
df = df.set_index("code", drop=False)
df_other = df.loc[~df.index.isin(df_south.index)].copy()
df_other["south"] = False
df_to_db(df=df_south, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
df_to_db(df=df_other, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
if __name__ == "__main__":
recorder = EMStockhkRecorder()
recorder.run()
# the __all__ is generated
__all__ = ["EMStockhkRecorder"]
| [
"zvt.recorders.em.em_api.get_tradable_list",
"zvt.contract.api.df_to_db"
]
| [((313, 375), 'zvt.recorders.em.em_api.get_tradable_list', 'em_api.get_tradable_list', ([], {'entity_type': '"""stockhk"""', 'hk_south': '(True)'}), "(entity_type='stockhk', hk_south=True)\n", (337, 375), False, 'from zvt.recorders.em import em_api\n'), ((481, 528), 'zvt.recorders.em.em_api.get_tradable_list', 'em_api.get_tradable_list', ([], {'entity_type': '"""stockhk"""'}), "(entity_type='stockhk')\n", (505, 528), False, 'from zvt.recorders.em import em_api\n'), ((682, 793), 'zvt.contract.api.df_to_db', 'df_to_db', ([], {'df': 'df_south', 'data_schema': 'self.data_schema', 'provider': 'self.provider', 'force_update': 'self.force_update'}), '(df=df_south, data_schema=self.data_schema, provider=self.provider,\n force_update=self.force_update)\n', (690, 793), False, 'from zvt.contract.api import df_to_db\n'), ((798, 909), 'zvt.contract.api.df_to_db', 'df_to_db', ([], {'df': 'df_other', 'data_schema': 'self.data_schema', 'provider': 'self.provider', 'force_update': 'self.force_update'}), '(df=df_other, data_schema=self.data_schema, provider=self.provider,\n force_update=self.force_update)\n', (806, 909), False, 'from zvt.contract.api import df_to_db\n')] |
# -*- test-case-name: twisted.names.test.test_rootresolve -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Resolver implementation for querying successive authoritative servers to
lookup a record, starting from the root nameservers.
@author: <NAME>
todo::
robustify it
break discoverAuthority into several smaller functions
documentation
"""
from twisted.internet import defer
from twisted.names import dns
from twisted.names import common
def retry(t, p, *args):
assert t, "Timeout is required"
t = list(t)
def errback(failure):
failure.trap(defer.TimeoutError)
if not t:
return failure
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
return p.query(timeout=t.pop(0), *args
).addErrback(errback
)
class _DummyController:
def messageReceived(self, *args):
pass
class Resolver(common.ResolverBase):
def __init__(self, hints):
common.ResolverBase.__init__(self)
self.hints = hints
def _lookup(self, name, cls, type, timeout):
d = discoverAuthority(name, self.hints
).addCallback(self.discoveredAuthority, name, cls, type, timeout
)
return d
def discoveredAuthority(self, auth, name, cls, type, timeout):
from twisted.names import client
q = dns.Query(name, type, cls)
r = client.Resolver(servers=[(auth, dns.PORT)])
d = r.queryUDP([q], timeout)
d.addCallback(r.filterAnswers)
return d
def lookupNameservers(host, atServer, p=None):
# print 'Nameserver lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.NS, dns.IN)] # Question to ask
)
def lookupAddress(host, atServer, p=None):
# print 'Address lookup for', host, 'at', atServer, 'with', p
if p is None:
p = dns.DNSDatagramProtocol(_DummyController())
p.noisy = False
return retry(
(1, 3, 11, 45), # Timeouts
p, # Protocol instance
(atServer, dns.PORT), # Server to query
[dns.Query(host, dns.A, dns.IN)] # Question to ask
)
def extractAuthority(msg, cache):
records = msg.answers + msg.authority + msg.additional
nameservers = [r for r in records if r.type == dns.NS]
# print 'Records for', soFar, ':', records
# print 'NS for', soFar, ':', nameservers
if not nameservers:
return None, nameservers
if not records:
raise IOError("No records")
for r in records:
if r.type == dns.A:
cache[str(r.name)] = r.payload.dottedQuad()
for r in records:
if r.type == dns.NS:
if str(r.payload.name) in cache:
return cache[str(r.payload.name)], nameservers
for addr in records:
if addr.type == dns.A and addr.name == r.name:
return addr.payload.dottedQuad(), nameservers
return None, nameservers
def discoverAuthority(host, roots, cache=None, p=None):
if cache is None:
cache = {}
rootAuths = list(roots)
parts = host.rstrip('.').split('.')
parts.reverse()
authority = rootAuths.pop()
soFar = ''
for part in parts:
soFar = part + '.' + soFar
# print '///////', soFar, authority, p
msg = defer.waitForDeferred(lookupNameservers(soFar, authority, p))
yield msg
msg = msg.getResult()
newAuth, nameservers = extractAuthority(msg, cache)
if newAuth is not None:
# print "newAuth is not None"
authority = newAuth
else:
if nameservers:
r = str(nameservers[0].payload.name)
# print 'Recursively discovering authority for', r
authority = defer.waitForDeferred(discoverAuthority(r, roots, cache, p))
yield authority
authority = authority.getResult()
# print 'Discovered to be', authority, 'for', r
## else:
## # print 'Doing address lookup for', soFar, 'at', authority
## msg = defer.waitForDeferred(lookupAddress(soFar, authority, p))
## yield msg
## msg = msg.getResult()
## records = msg.answers + msg.authority + msg.additional
## addresses = [r for r in records if r.type == dns.A]
## if addresses:
## authority = addresses[0].payload.dottedQuad()
## else:
## raise IOError("Resolution error")
# print "Yielding authority", authority
yield authority
discoverAuthority = defer.deferredGenerator(discoverAuthority)
def makePlaceholder(deferred, name):
def placeholder(*args, **kw):
deferred.addCallback(lambda r: getattr(r, name)(*args, **kw))
return deferred
return placeholder
class DeferredResolver:
def __init__(self, resolverDeferred):
self.waiting = []
resolverDeferred.addCallback(self.gotRealResolver)
def gotRealResolver(self, resolver):
w = self.waiting
self.__dict__ = resolver.__dict__
self.__class__ = resolver.__class__
for d in w:
d.callback(resolver)
def __getattr__(self, name):
if name.startswith('lookup') or name in ('getHostByName', 'query'):
self.waiting.append(defer.Deferred())
return makePlaceholder(self.waiting[-1], name)
raise AttributeError(name)
def bootstrap(resolver):
"""Lookup the root nameserver addresses using the given resolver
Return a Resolver which will eventually become a C{root.Resolver}
instance that has references to all the root servers that we were able
to look up.
"""
domains = [chr(ord('a') + i) for i in range(13)]
# f = lambda r: (log.msg('Root server address: ' + str(r)), r)[1]
f = lambda r: r
L = [resolver.getHostByName('%s.root-servers.net' % d).addCallback(f) for d in domains]
d = defer.DeferredList(L)
d.addCallback(lambda r: Resolver([e[1] for e in r if e[0]]))
return DeferredResolver(d)
| [
"twisted.names.client.Resolver",
"twisted.internet.defer.deferredGenerator",
"twisted.internet.defer.Deferred",
"twisted.internet.defer.DeferredList",
"twisted.names.common.ResolverBase.__init__",
"twisted.names.dns.Query"
]
| [((5016, 5058), 'twisted.internet.defer.deferredGenerator', 'defer.deferredGenerator', (['discoverAuthority'], {}), '(discoverAuthority)\n', (5039, 5058), False, 'from twisted.internet import defer\n'), ((6368, 6389), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['L'], {}), '(L)\n', (6386, 6389), False, 'from twisted.internet import defer\n'), ((1015, 1049), 'twisted.names.common.ResolverBase.__init__', 'common.ResolverBase.__init__', (['self'], {}), '(self)\n', (1043, 1049), False, 'from twisted.names import common\n'), ((1403, 1429), 'twisted.names.dns.Query', 'dns.Query', (['name', 'type', 'cls'], {}), '(name, type, cls)\n', (1412, 1429), False, 'from twisted.names import dns\n'), ((1442, 1485), 'twisted.names.client.Resolver', 'client.Resolver', ([], {'servers': '[(auth, dns.PORT)]'}), '(servers=[(auth, dns.PORT)])\n', (1457, 1485), False, 'from twisted.names import client\n'), ((2002, 2033), 'twisted.names.dns.Query', 'dns.Query', (['host', 'dns.NS', 'dns.IN'], {}), '(host, dns.NS, dns.IN)\n', (2011, 2033), False, 'from twisted.names import dns\n'), ((2477, 2507), 'twisted.names.dns.Query', 'dns.Query', (['host', 'dns.A', 'dns.IN'], {}), '(host, dns.A, dns.IN)\n', (2486, 2507), False, 'from twisted.names import dns\n'), ((5748, 5764), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (5762, 5764), False, 'from twisted.internet import defer\n')] |
# ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import os, sys
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(1, rootpath)
import tensorflow as tf
import numpy as np
from PIL import Image
from absl import app
from absl import flags
from common_flags import FLAGS
from ids.voc2012 import get_colormap as get_voc2012_colormap
from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap
flags.DEFINE_string("input_dir", None, "input dir path")
flags.DEFINE_string("output_dir", None, "output dir path")
flags.DEFINE_string("colormap", "voc2012", "colormap name")
flags.DEFINE_integer("ignore_label", 255, "ignore label")
def apply_colormap_to_dir(input_dir, output_dir=None, colormap=None):
colormap = colormap.astype(np.uint8)
counter = 0
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for filename in tf.io.gfile.listdir(input_dir):
input_path = os.path.join(input_dir, filename)
output_path = os.path.join(output_dir, filename)
img = Image.open(input_path)
if img.mode != "L" and img.mode != "P":
continue
img = img.convert("P")
img.putpalette(colormap)
img.save(output_path, format="PNG")
counter += 1
tf.print("Processed {}".format(counter))
def main(argv):
colormap_name = FLAGS.colormap
colormap_name = colormap_name.lower()
if colormap_name == "voc2012":
colormap = get_voc2012_colormap()
elif colormap_name == "cityscapes":
colormap = get_cityscapes_colormap()
else:
raise ValueError(f"Not support colormap = {colormap_name}")
if FLAGS.ignore_label == 0:
colormap = colormap[1:]
apply_colormap_to_dir(FLAGS.input_dir, FLAGS.output_dir, colormap=colormap)
if __name__ == "__main__":
app.run(main)
| [
"os.path.exists",
"sys.path.insert",
"PIL.Image.open",
"absl.flags.DEFINE_integer",
"os.path.join",
"ids.voc2012.get_colormap",
"absl.app.run",
"tensorflow.io.gfile.listdir",
"os.path.dirname",
"os.mkdir",
"absl.flags.DEFINE_string",
"ids.cityscapes_fine.get_colormap"
]
| [((326, 354), 'sys.path.insert', 'sys.path.insert', (['(1)', 'rootpath'], {}), '(1, rootpath)\n', (341, 354), False, 'import os, sys\n'), ((634, 690), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""input_dir"""', 'None', '"""input dir path"""'], {}), "('input_dir', None, 'input dir path')\n", (653, 690), False, 'from absl import flags\n'), ((691, 749), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', 'None', '"""output dir path"""'], {}), "('output_dir', None, 'output dir path')\n", (710, 749), False, 'from absl import flags\n'), ((750, 809), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""colormap"""', '"""voc2012"""', '"""colormap name"""'], {}), "('colormap', 'voc2012', 'colormap name')\n", (769, 809), False, 'from absl import flags\n'), ((810, 867), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ignore_label"""', '(255)', '"""ignore label"""'], {}), "('ignore_label', 255, 'ignore label')\n", (830, 867), False, 'from absl import flags\n'), ((1089, 1119), 'tensorflow.io.gfile.listdir', 'tf.io.gfile.listdir', (['input_dir'], {}), '(input_dir)\n', (1108, 1119), True, 'import tensorflow as tf\n'), ((2042, 2055), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2049, 2055), False, 'from absl import app\n'), ((275, 300), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (290, 300), False, 'import os, sys\n'), ((1011, 1037), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1025, 1037), False, 'import os, sys\n'), ((1047, 1067), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (1055, 1067), False, 'import os, sys\n'), ((1143, 1176), 'os.path.join', 'os.path.join', (['input_dir', 'filename'], {}), '(input_dir, filename)\n', (1155, 1176), False, 'import os, sys\n'), ((1199, 1233), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (1211, 1233), False, 'import os, sys\n'), ((1249, 1271), 'PIL.Image.open', 'Image.open', (['input_path'], {}), '(input_path)\n', (1259, 1271), False, 'from PIL import Image\n'), ((1676, 1698), 'ids.voc2012.get_colormap', 'get_voc2012_colormap', ([], {}), '()\n', (1696, 1698), True, 'from ids.voc2012 import get_colormap as get_voc2012_colormap\n'), ((1758, 1783), 'ids.cityscapes_fine.get_colormap', 'get_cityscapes_colormap', ([], {}), '()\n', (1781, 1783), True, 'from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap\n')] |
import sqlite3
conn = sqlite3.connect('example.db')
c = conn.cursor()
import os
import hashlib
import time
def get_file_md5(filePath):
h = hashlib.md5()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_file_sha256(filePath):
h = hashlib.sha256()
h.update(open(filePath,"rb").read())
return h.hexdigest()
def get_dir_data(dir_path):
dir_path = os.path.realpath(dir_path)
#print next(os.walk(dir_path))[2]
#print os.path.basename(dir_path)
id_location = 0
id_file = 0
for dir_file in next(os.walk(dir_path))[2]:
file_name = dir_file
file_md5 = get_file_md5(dir_file)
file_sha256 = get_file_sha256(dir_file)
file_size = os.path.getsize(dir_file)
file_time = time.gmtime(os.path.getctime(dir_file))
file_formatted_time = time.strftime("%Y-%m-%d %I:%M:%S %p", file_time)
file_path = os.path.realpath(dir_file)
location_values = (id_location, file_path)
c.execute("INSERT INTO location VALUES (?, ?)", location_values)
files_values = (id_location, id_file)
c.execute("INSERT INTO files VALUES (?, ?)", files_values)
file_info_values = (id_file, file_name, file_size, file_formatted_time, file_md5)
c.execute("INSERT INTO file_info VALUES (?, ?, ?, ?, ?)", file_info_values)
id_location += 1
id_file += 1
get_dir_data('./')
# Save (commit) the changes
conn.commit()
conn.close() | [
"os.path.getsize",
"hashlib.sha256",
"hashlib.md5",
"sqlite3.connect",
"os.path.getctime",
"time.strftime",
"os.path.realpath",
"os.walk"
]
| [((22, 51), 'sqlite3.connect', 'sqlite3.connect', (['"""example.db"""'], {}), "('example.db')\n", (37, 51), False, 'import sqlite3\n'), ((144, 157), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (155, 157), False, 'import hashlib\n'), ((255, 271), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (269, 271), False, 'import hashlib\n'), ((374, 400), 'os.path.realpath', 'os.path.realpath', (['dir_path'], {}), '(dir_path)\n', (390, 400), False, 'import os\n'), ((665, 690), 'os.path.getsize', 'os.path.getsize', (['dir_file'], {}), '(dir_file)\n', (680, 690), False, 'import os\n'), ((772, 820), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %I:%M:%S %p"""', 'file_time'], {}), "('%Y-%m-%d %I:%M:%S %p', file_time)\n", (785, 820), False, 'import time\n'), ((835, 861), 'os.path.realpath', 'os.path.realpath', (['dir_file'], {}), '(dir_file)\n', (851, 861), False, 'import os\n'), ((527, 544), 'os.walk', 'os.walk', (['dir_path'], {}), '(dir_path)\n', (534, 544), False, 'import os\n'), ((720, 746), 'os.path.getctime', 'os.path.getctime', (['dir_file'], {}), '(dir_file)\n', (736, 746), False, 'import os\n')] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GremlinResourcesOperations(object):
"""GremlinResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_gremlin_databases(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.GremlinDatabaseListResult"]
"""Lists the Gremlin databases under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GremlinDatabaseListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.GremlinDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_gremlin_databases.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GremlinDatabaseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_gremlin_databases.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases'} # type: ignore
def get_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.GremlinDatabaseGetResults"
"""Gets the Gremlin databases under an existing Azure Cosmos DB database account with the provided
name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GremlinDatabaseGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_database.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def _create_update_gremlin_database_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
create_update_gremlin_database_parameters, # type: "models.GremlinDatabaseCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.GremlinDatabaseGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GremlinDatabaseGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_gremlin_database_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_gremlin_database_parameters, 'GremlinDatabaseCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_gremlin_database_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def begin_create_update_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
create_update_gremlin_database_parameters, # type: "models.GremlinDatabaseCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.GremlinDatabaseGetResults"]
"""Create or update an Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param create_update_gremlin_database_parameters: The parameters to provide for the current
Gremlin database.
:type create_update_gremlin_database_parameters: ~azure.mgmt.cosmosdb.models.GremlinDatabaseCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GremlinDatabaseGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.GremlinDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinDatabaseGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_gremlin_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
create_update_gremlin_database_parameters=create_update_gremlin_database_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GremlinDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def _delete_gremlin_database_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_gremlin_database_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_gremlin_database_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def begin_delete_gremlin_database(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_gremlin_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_gremlin_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}'} # type: ignore
def get_gremlin_database_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ThroughputSettingsGetResults"
"""Gets the RUs per second of the Gremlin database under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_database_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_database_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def _update_gremlin_database_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gremlin_database_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_gremlin_database_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def begin_update_gremlin_database_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Update RUs per second of an Azure Cosmos DB Gremlin database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Gremlin database.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gremlin_database_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gremlin_database_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default'} # type: ignore
def _migrate_gremlin_database_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_database_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_database_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_gremlin_database_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin database from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_database_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_database_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_gremlin_database_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_database_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_database_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_gremlin_database_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin database from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_database_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_database_to_manual_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def list_gremlin_graphs(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.GremlinGraphListResult"]
"""Lists the Gremlin graph under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GremlinGraphListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.GremlinGraphListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_gremlin_graphs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GremlinGraphListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_gremlin_graphs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs'} # type: ignore
def get_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.GremlinGraphGetResults"
"""Gets the Gremlin graph under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GremlinGraphGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.GremlinGraphGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_graph.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def _create_update_gremlin_graph_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
create_update_gremlin_graph_parameters, # type: "models.GremlinGraphCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.GremlinGraphGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GremlinGraphGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_gremlin_graph_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_gremlin_graph_parameters, 'GremlinGraphCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_gremlin_graph_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def begin_create_update_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
create_update_gremlin_graph_parameters, # type: "models.GremlinGraphCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.GremlinGraphGetResults"]
"""Create or update an Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:param create_update_gremlin_graph_parameters: The parameters to provide for the current
Gremlin graph.
:type create_update_gremlin_graph_parameters: ~azure.mgmt.cosmosdb.models.GremlinGraphCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GremlinGraphGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.GremlinGraphGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GremlinGraphGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_gremlin_graph_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
create_update_gremlin_graph_parameters=create_update_gremlin_graph_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GremlinGraphGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def _delete_gremlin_graph_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
# Construct URL
url = self._delete_gremlin_graph_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_gremlin_graph_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def begin_delete_gremlin_graph(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_gremlin_graph_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_gremlin_graph.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}'} # type: ignore
def get_gremlin_graph_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ThroughputSettingsGetResults"
"""Gets the Gremlin graph throughput under an existing Azure Cosmos DB database account with the
provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_gremlin_graph_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gremlin_graph_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def _update_gremlin_graph_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gremlin_graph_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_gremlin_graph_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def begin_update_gremlin_graph_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
update_throughput_parameters, # type: "models.ThroughputSettingsUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Update RUs per second of an Azure Cosmos DB Gremlin graph.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:param update_throughput_parameters: The RUs per second of the parameters to provide for the
current Gremlin graph.
:type update_throughput_parameters: ~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gremlin_graph_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
update_throughput_parameters=update_throughput_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gremlin_graph_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default'} # type: ignore
def _migrate_gremlin_graph_to_autoscale_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_graph_to_autoscale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_graph_to_autoscale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def begin_migrate_gremlin_graph_to_autoscale(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin graph from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_graph_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_graph_to_autoscale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToAutoscale'} # type: ignore
def _migrate_gremlin_graph_to_manual_throughput_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.ThroughputSettingsGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._migrate_gremlin_graph_to_manual_throughput_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'graphName': self._serialize.url("graph_name", graph_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_gremlin_graph_to_manual_throughput_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
def begin_migrate_gremlin_graph_to_manual_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
graph_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ThroughputSettingsGetResults"]
"""Migrate an Azure Cosmos DB Gremlin graph from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param graph_name: Cosmos DB graph name.
:type graph_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._migrate_gremlin_graph_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
graph_name=graph_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_gremlin_graph_to_manual_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName}/throughputSettings/default/migrateToManualThroughput'} # type: ignore
| [
"azure.mgmt.core.polling.arm_polling.ARMPolling",
"azure.core.exceptions.map_error",
"azure.core.polling.LROPoller",
"azure.core.exceptions.HttpResponseError",
"azure.core.polling.NoPolling",
"azure.core.polling.LROPoller.from_continuation_token",
"azure.core.paging.ItemPaged",
"typing.TypeVar"
]
| [((1181, 1193), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1188, 1193), False, 'from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union\n'), ((5708, 5741), 'azure.core.paging.ItemPaged', 'ItemPaged', (['get_next', 'extract_data'], {}), '(get_next, extract_data)\n', (5717, 5741), False, 'from azure.core.paging import ItemPaged\n'), ((48562, 48595), 'azure.core.paging.ItemPaged', 'ItemPaged', (['get_next', 'extract_data'], {}), '(get_next, extract_data)\n', (48571, 48595), False, 'from azure.core.paging import ItemPaged\n'), ((8702, 8790), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (8711, 8790), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((8804, 8869), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (8821, 8869), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((11870, 11958), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (11879, 11958), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((11972, 12037), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (11989, 12037), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((15583, 15614), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (15593, 15614), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((15757, 15931), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (15790, 15931), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((16035, 16111), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (16044, 16111), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((18176, 18264), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (18185, 18264), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((18278, 18343), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (18295, 18343), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((20948, 20979), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (20958, 20979), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((21122, 21296), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (21155, 21296), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((21400, 21476), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (21409, 21476), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((24492, 24580), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (24501, 24580), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((24594, 24659), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (24611, 24659), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((27683, 27771), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (27692, 27771), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((27785, 27850), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (27802, 27850), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((31412, 31443), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (31422, 31443), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((31586, 31760), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (31619, 31760), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((31864, 31940), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (31873, 31940), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((34270, 34358), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (34279, 34358), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((34372, 34437), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (34389, 34437), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((37637, 37668), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (37647, 37668), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((37811, 37985), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (37844, 37985), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((38089, 38165), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (38098, 38165), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((40533, 40621), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (40542, 40621), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((40635, 40700), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (40652, 40700), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((43931, 43962), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (43941, 43962), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((44105, 44279), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (44138, 44279), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((44383, 44459), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (44392, 44459), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((51713, 51801), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (51722, 51801), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((51815, 51880), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (51832, 51880), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((54982, 55070), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (54991, 55070), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((55084, 55149), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (55101, 55149), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((58811, 58842), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (58821, 58842), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((58985, 59159), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (59018, 59159), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((59263, 59339), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (59272, 59339), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((61526, 61614), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (61535, 61614), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((61628, 61693), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (61645, 61693), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((64456, 64487), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (64466, 64487), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((64630, 64804), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (64663, 64804), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((64908, 64984), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (64917, 64984), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((68187, 68275), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (68196, 68275), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((68289, 68354), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (68306, 68354), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((71500, 71588), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (71509, 71588), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((71602, 71667), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (71619, 71667), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((75384, 75415), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (75394, 75415), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((75558, 75732), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (75591, 75732), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((75836, 75912), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (75845, 75912), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((78364, 78452), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (78373, 78452), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((78466, 78531), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (78483, 78531), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((81889, 81920), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (81899, 81920), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((82063, 82237), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (82096, 82237), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((82341, 82417), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (82350, 82417), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((84907, 84995), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (84916, 84995), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((85009, 85074), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (85026, 85074), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((88463, 88494), 'azure.mgmt.core.polling.arm_polling.ARMPolling', 'ARMPolling', (['lro_delay'], {}), '(lro_delay, **kwargs)\n', (88473, 88494), False, 'from azure.mgmt.core.polling.arm_polling import ARMPolling\n'), ((88637, 88811), 'azure.core.polling.LROPoller.from_continuation_token', 'LROPoller.from_continuation_token', ([], {'polling_method': 'polling_method', 'continuation_token': 'cont_token', 'client': 'self._client', 'deserialization_callback': 'get_long_running_output'}), '(polling_method=polling_method,\n continuation_token=cont_token, client=self._client,\n deserialization_callback=get_long_running_output)\n', (88670, 88811), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((88915, 88991), 'azure.core.polling.LROPoller', 'LROPoller', (['self._client', 'raw_result', 'get_long_running_output', 'polling_method'], {}), '(self._client, raw_result, get_long_running_output, polling_method)\n', (88924, 88991), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((5482, 5570), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (5491, 5570), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((5588, 5653), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (5605, 5653), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((15664, 15675), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (15673, 15675), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((21029, 21040), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (21038, 21040), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((31493, 31504), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (31502, 31504), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((37718, 37729), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (37727, 37729), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((44012, 44023), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (44021, 44023), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((48336, 48424), 'azure.core.exceptions.map_error', 'map_error', ([], {'status_code': 'response.status_code', 'response': 'response', 'error_map': 'error_map'}), '(status_code=response.status_code, response=response, error_map=\n error_map)\n', (48345, 48424), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((48442, 48507), 'azure.core.exceptions.HttpResponseError', 'HttpResponseError', ([], {'response': 'response', 'error_format': 'ARMErrorFormat'}), '(response=response, error_format=ARMErrorFormat)\n', (48459, 48507), False, 'from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\n'), ((58892, 58903), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (58901, 58903), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((64537, 64548), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (64546, 64548), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((75465, 75476), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (75474, 75476), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((81970, 81981), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (81979, 81981), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n'), ((88544, 88555), 'azure.core.polling.NoPolling', 'NoPolling', ([], {}), '()\n', (88553, 88555), False, 'from azure.core.polling import LROPoller, NoPolling, PollingMethod\n')] |
"""Support for MQTT discovery."""
import asyncio
import logging
from hatasmota.discovery import (
TasmotaDiscovery,
get_device_config as tasmota_get_device_config,
get_entities_for_platform as tasmota_get_entities_for_platform,
get_entity as tasmota_get_entity,
has_entities_with_platform as tasmota_has_entities_with_platform,
unique_id_from_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_PLATFORMS = [
"switch",
]
ALREADY_DISCOVERED = "tasmota_discovered_components"
CONFIG_ENTRY_IS_SETUP = "tasmota_config_entry_is_setup"
DATA_CONFIG_ENTRY_LOCK = "tasmota_config_entry_lock"
TASMOTA_DISCOVERY_DEVICE = "tasmota_discovery_device"
TASMOTA_DISCOVERY_ENTITY_NEW = "tasmota_discovery_entity_new_{}"
TASMOTA_DISCOVERY_ENTITY_UPDATED = "tasmota_discovery_entity_updated_{}_{}_{}_{}"
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
del hass.data[ALREADY_DISCOVERED][discovery_hash]
def set_discovery_hash(hass, discovery_hash):
"""Set entry in ALREADY_DISCOVERED list."""
hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
async def async_start(
hass: HomeAssistantType, discovery_topic, config_entry, tasmota_mqtt
) -> bool:
"""Start MQTT Discovery."""
async def _load_platform(platform):
"""Load a Tasmota platform if not already done."""
async with hass.data[DATA_CONFIG_ENTRY_LOCK]:
config_entries_key = f"{platform}.tasmota"
if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:
await hass.config_entries.async_forward_entry_setup(
config_entry, platform
)
hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)
async def _discover_entity(tasmota_entity_config, discovery_hash, platform):
"""Handle adding or updating a discovered entity."""
if not tasmota_entity_config:
# Entity disabled, clean up entity registry
entity_registry = await hass.helpers.entity_registry.async_get_registry()
unique_id = unique_id_from_hash(discovery_hash)
entity_id = entity_registry.async_get_entity_id(platform, DOMAIN, unique_id)
if entity_id:
_LOGGER.debug("Removing entity: %s %s", platform, discovery_hash)
entity_registry.async_remove(entity_id)
return
if discovery_hash in hass.data[ALREADY_DISCOVERED]:
_LOGGER.debug(
"Entity already added, sending update: %s %s",
platform,
discovery_hash,
)
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_UPDATED.format(*discovery_hash),
tasmota_entity_config,
)
else:
_LOGGER.debug("Adding new entity: %s %s", platform, discovery_hash)
tasmota_entity = tasmota_get_entity(tasmota_entity_config, tasmota_mqtt)
hass.data[ALREADY_DISCOVERED][discovery_hash] = None
async_dispatcher_send(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(platform),
tasmota_entity,
discovery_hash,
)
async def async_device_discovered(payload, mac):
"""Process the received message."""
if ALREADY_DISCOVERED not in hass.data:
hass.data[ALREADY_DISCOVERED] = {}
_LOGGER.debug("Received discovery data for tasmota device: %s", mac)
tasmota_device_config = tasmota_get_device_config(payload)
async_dispatcher_send(
hass, TASMOTA_DISCOVERY_DEVICE, tasmota_device_config, mac
)
if not payload:
return
for platform in SUPPORTED_PLATFORMS:
if not tasmota_has_entities_with_platform(payload, platform):
continue
await _load_platform(platform)
for platform in SUPPORTED_PLATFORMS:
tasmota_entities = tasmota_get_entities_for_platform(payload, platform)
for (tasmota_entity_config, discovery_hash) in tasmota_entities:
await _discover_entity(tasmota_entity_config, discovery_hash, platform)
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
tasmota_discovery = TasmotaDiscovery(discovery_topic, tasmota_mqtt)
await tasmota_discovery.start_discovery(async_device_discovered, None)
| [
"logging.getLogger",
"hatasmota.discovery.has_entities_with_platform",
"hatasmota.discovery.get_device_config",
"asyncio.Lock",
"hatasmota.discovery.get_entity",
"homeassistant.helpers.dispatcher.async_dispatcher_send",
"hatasmota.discovery.unique_id_from_hash",
"hatasmota.discovery.TasmotaDiscovery",
"hatasmota.discovery.get_entities_for_platform"
]
| [((541, 568), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (558, 568), False, 'import logging\n'), ((4432, 4446), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (4444, 4446), False, 'import asyncio\n'), ((4517, 4564), 'hatasmota.discovery.TasmotaDiscovery', 'TasmotaDiscovery', (['discovery_topic', 'tasmota_mqtt'], {}), '(discovery_topic, tasmota_mqtt)\n', (4533, 4564), False, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((3717, 3751), 'hatasmota.discovery.get_device_config', 'tasmota_get_device_config', (['payload'], {}), '(payload)\n', (3742, 3751), True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((3760, 3845), 'homeassistant.helpers.dispatcher.async_dispatcher_send', 'async_dispatcher_send', (['hass', 'TASMOTA_DISCOVERY_DEVICE', 'tasmota_device_config', 'mac'], {}), '(hass, TASMOTA_DISCOVERY_DEVICE, tasmota_device_config,\n mac)\n', (3781, 3845), False, 'from homeassistant.helpers.dispatcher import async_dispatcher_send\n'), ((2254, 2289), 'hatasmota.discovery.unique_id_from_hash', 'unique_id_from_hash', (['discovery_hash'], {}), '(discovery_hash)\n', (2273, 2289), False, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((3092, 3147), 'hatasmota.discovery.get_entity', 'tasmota_get_entity', (['tasmota_entity_config', 'tasmota_mqtt'], {}), '(tasmota_entity_config, tasmota_mqtt)\n', (3110, 3147), True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((4173, 4225), 'hatasmota.discovery.get_entities_for_platform', 'tasmota_get_entities_for_platform', (['payload', 'platform'], {}), '(payload, platform)\n', (4206, 4225), True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n'), ((3973, 4026), 'hatasmota.discovery.has_entities_with_platform', 'tasmota_has_entities_with_platform', (['payload', 'platform'], {}), '(payload, platform)\n', (4007, 4026), True, 'from hatasmota.discovery import TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash\n')] |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX InfraValidator executor definition."""
import contextlib
import functools
import os
import signal
import threading
import time
from typing import Any, Dict, List, Optional
from absl import logging
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import request_builder
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator import types as iv_types
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.components.infra_validator.model_server_runners import local_docker_runner
from tfx.dsl.components.base import base_executor
from tfx.proto import infra_validator_pb2
from tfx.types import artifact_utils
from tfx.types.standard_component_specs import BLESSING_KEY
from tfx.types.standard_component_specs import EXAMPLES_KEY
from tfx.types.standard_component_specs import MODEL_KEY
from tfx.types.standard_component_specs import REQUEST_SPEC_KEY
from tfx.types.standard_component_specs import SERVING_SPEC_KEY
from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx.utils import proto_utils
from tfx.utils.model_paths import tf_serving_flavor
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
from tensorflow_serving.apis import regression_pb2
_DEFAULT_NUM_TRIES = 5
_DEFAULT_POLLING_INTERVAL_SEC = 1
_DEFAULT_MAX_LOADING_TIME_SEC = 300
_DEFAULT_MODEL_NAME = 'infra-validation-model'
# Proto message keys for oneof block.
_TENSORFLOW_SERVING = 'tensorflow_serving'
_LOCAL_DOCKER = 'local_docker'
_KUBERNETES = 'kubernetes'
# Artifact property keys
_BLESSED_KEY = 'blessed'
_MODEL_FLAG_KEY = 'has_model'
# Filename of infra blessing artifact on succeed.
_BLESSED_FILENAME = 'INFRA_BLESSED'
# Filename of infra blessing artifact on fail.
_NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED'
def _create_model_server_runner(
model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: One of ServingBinary instances parsed from the
`serving_spec`.
serving_spec: A ServingSpec instance of this infra validation.
Returns:
A ModelServerRunner.
"""
platform = serving_spec.WhichOneof('serving_platform')
if platform == 'local_docker':
return local_docker_runner.LocalDockerRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
elif platform == 'kubernetes':
return kubernetes_runner.KubernetesRunner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec
)
else:
raise NotImplementedError('Invalid serving_platform {}'.format(platform))
def _convert_to_prediction_log(request: iv_types.Request):
"""Try convert infra validation request to TF-Serving PredictionLog."""
if isinstance(request, classification_pb2.ClassificationRequest):
return prediction_log_pb2.PredictionLog(
classify_log=prediction_log_pb2.ClassifyLog(request=request))
elif isinstance(request, regression_pb2.RegressionRequest):
return prediction_log_pb2.PredictionLog(
regress_log=prediction_log_pb2.RegressLog(request=request))
elif isinstance(request, predict_pb2.PredictRequest):
return prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
else:
raise NotImplementedError(
f'Cannot convert {type(request)} to PredictionLog')
def _mark_blessed(blessing: types.Artifact) -> None:
logging.info('Model passed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 1)
def _mark_not_blessed(blessing: types.Artifact) -> None:
logging.info('Model failed infra validation.')
io_utils.write_string_file(
os.path.join(blessing.uri, _NOT_BLESSED_FILENAME), '')
blessing.set_int_custom_property(_BLESSED_KEY, 0)
class Executor(base_executor.BaseExecutor):
"""TFX infra validator executor."""
def __init__(self,
context: Optional[base_executor.BaseExecutor.Context] = None):
super(Executor, self).__init__(context)
self._cleanups = []
def _AddCleanup(self, function, *args, **kwargs):
self._cleanups.append(functools.partial(function, *args, **kwargs))
def _Cleanup(self):
for cleanup in self._cleanups:
try:
cleanup()
except: # pylint: disable=broad-except, bare-except
logging.warning('Error occurred during cleanup.', exc_info=True)
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Contract for running InfraValidator Executor.
Args:
input_dict:
- `model`: Single `Model` artifact that we're validating.
- `examples`: `Examples` artifacts to be used for test requests.
output_dict:
- `blessing`: Single `InfraBlessing` artifact containing the validated
result and optinally validated model if warmup requests are appended.
Artifact URI includes an empty file with the name either of
INFRA_BLESSED or INFRA_NOT_BLESSED.
exec_properties:
- `serving_spec`: Serialized `ServingSpec` configuration.
- `validation_spec`: Serialized `ValidationSpec` configuration.
- `request_spec`: Serialized `RequestSpec` configuration.
"""
self._log_startup(input_dict, output_dict, exec_properties)
model = artifact_utils.get_single_instance(input_dict[MODEL_KEY])
blessing = artifact_utils.get_single_instance(output_dict[BLESSING_KEY])
if input_dict.get(EXAMPLES_KEY):
examples = artifact_utils.get_single_instance(input_dict[EXAMPLES_KEY])
else:
examples = None
serving_spec = infra_validator_pb2.ServingSpec()
proto_utils.json_to_proto(exec_properties[SERVING_SPEC_KEY], serving_spec)
if not serving_spec.model_name:
serving_spec.model_name = _DEFAULT_MODEL_NAME
validation_spec = infra_validator_pb2.ValidationSpec()
if exec_properties.get(VALIDATION_SPEC_KEY):
proto_utils.json_to_proto(exec_properties[VALIDATION_SPEC_KEY],
validation_spec)
if not validation_spec.num_tries:
validation_spec.num_tries = _DEFAULT_NUM_TRIES
if not validation_spec.max_loading_time_seconds:
validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC
if exec_properties.get(REQUEST_SPEC_KEY):
request_spec = infra_validator_pb2.RequestSpec()
proto_utils.json_to_proto(exec_properties[REQUEST_SPEC_KEY],
request_spec)
else:
request_spec = None
with self._InstallGracefulShutdownHandler():
self._Do(
model=model,
examples=examples,
blessing=blessing,
serving_spec=serving_spec,
validation_spec=validation_spec,
request_spec=request_spec,
)
@contextlib.contextmanager
def _InstallGracefulShutdownHandler(self):
# pylint: disable=g-doc-return-or-yield
"""Install graceful shutdown behavior.
Caveat: InfraValidator currently only recognizes SIGTERM signal as a
graceful shutdown. Furthermore, SIGTERM can be handled only if the executor
is running on the MainThread (the thread that runs the python interpreter)
due to the limitation of Python API.
When the executor is running on Kubernetes, SIGTERM is a standard way to
signal the graceful shutdown. Python default behavior for receiving SIGTERM
is to terminate the process without raising any exception. By registering a
handler that raises on signal, we can effectively transform the signal to an
exception, and we can reuse our cleanup code inside "except" or "finally"
block during the grace period.
When the executor is run by the local Beam DirectRunner, the executor thread
is one of the worker threads (not a MainThread) therefore SIGTERM cannot
be recognized. If either of MainThread or worker thread receives SIGTERM,
executor will die immediately without grace period.
Even if the executor fails to shutdown gracefully, external resources that
are created by model server runner can be cleaned up if the platform
supports such mechanism (e.g. activeDeadlineSeconds in Kubernetes).
"""
def _handler(signum, frame):
del frame # Unused.
raise error_types.GracefulShutdown('Got signal {}.'.format(signum))
try:
old_handler = signal.signal(signal.SIGTERM, _handler)
except ValueError:
# If current thread is not a MainThread, it is not allowed to register
# the signal handler (ValueError raised).
logging.info('Unable to register signal handler for non-MainThread '
'(name=%s). SIGTERM will not be handled.',
threading.current_thread().name)
old_handler = None
try:
yield
finally:
self._Cleanup()
if old_handler:
signal.signal(signal.SIGTERM, old_handler)
def _Do(
self,
model: types.Artifact,
examples: Optional[types.Artifact],
blessing: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
request_spec: Optional[infra_validator_pb2.RequestSpec],
):
if examples and request_spec:
logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
requests = request_builder.build_requests(
model_name=serving_spec.model_name,
model=model,
examples=examples,
request_spec=request_spec)
else:
logging.info('InfraValidator will be run in LOAD_ONLY mode.')
requests = []
model_path = self._PrepareModelPath(model, serving_spec)
# TODO(jjong): Make logic parallel.
all_passed = True
for serving_binary in serving_bins.parse_serving_binaries(serving_spec):
all_passed &= self._ValidateWithRetry(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
if all_passed:
_mark_blessed(blessing)
if requests and request_spec.make_warmup:
self._CreateWarmupModel(blessing, model_path, warmup_requests=requests)
else:
_mark_not_blessed(blessing)
def _CreateWarmupModel(self, blessing: types.Artifact, model_path: str,
warmup_requests: List[iv_types.Request]):
output_model_path = path_utils.stamped_model_path(blessing.uri)
io_utils.copy_dir(src=model_path, dst=output_model_path)
io_utils.write_tfrecord_file(
path_utils.warmup_file_path(output_model_path),
*[_convert_to_prediction_log(r) for r in warmup_requests])
blessing.set_int_custom_property(_MODEL_FLAG_KEY, 1)
def _PrepareModelPath(self, model: types.Artifact,
serving_spec: infra_validator_pb2.ServingSpec) -> str:
model_path = path_utils.serving_model_path(
model.uri, path_utils.is_old_model_artifact(model))
serving_binary = serving_spec.WhichOneof('serving_binary')
if serving_binary == _TENSORFLOW_SERVING:
# TensorFlow Serving requires model to be stored in its own directory
# structure flavor. If current model_path does not conform to the flavor,
# we need to make a copy to the temporary path.
try:
# Check whether current model_path conforms to the tensorflow serving
# model path flavor. (Parsed without exception)
tf_serving_flavor.parse_model_path(
model_path,
expected_model_name=serving_spec.model_name)
except ValueError:
# Copy the model to comply with the tensorflow serving model path
# flavor.
temp_model_path = tf_serving_flavor.make_model_path(
model_base_path=self._get_tmp_dir(),
model_name=serving_spec.model_name,
version=int(time.time()))
io_utils.copy_dir(src=model_path, dst=temp_model_path)
self._AddCleanup(io_utils.delete_dir, self._context.get_tmp_path())
return temp_model_path
return model_path
def _ValidateWithRetry(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
for i in range(validation_spec.num_tries):
logging.info('Starting infra validation (attempt %d/%d).', i + 1,
validation_spec.num_tries)
try:
self._ValidateOnce(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec,
validation_spec=validation_spec,
requests=requests)
except error_types.GracefulShutdown:
# GracefulShutdown means infra validation aborted. No more retry and
# escalate the error.
raise
except Exception as e: # pylint: disable=broad-except
# Other exceptions indicates validation failure. Log the error and
# retry.
logging.exception('Infra validation (attempt %d/%d) failed.', i + 1,
validation_spec.num_tries)
if isinstance(e, error_types.DeadlineExceeded):
logging.info('Consider increasing the value of '
'ValidationSpec.max_loading_time_seconds.')
else:
# If validation has passed without any exception, succeeded.
return True
# Every trial has failed. Marking model as not blessed.
return False
def _ValidateOnce(
self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec,
validation_spec: infra_validator_pb2.ValidationSpec,
requests: List[iv_types.Request]):
deadline = time.time() + validation_spec.max_loading_time_seconds
runner = _create_model_server_runner(
model_path=model_path,
serving_binary=serving_binary,
serving_spec=serving_spec)
try:
logging.info('Starting %r.', runner)
runner.Start()
# Check model is successfully loaded.
runner.WaitUntilRunning(deadline)
client = serving_binary.MakeClient(runner.GetEndpoint())
client.WaitUntilModelLoaded(
deadline, polling_interval_sec=_DEFAULT_POLLING_INTERVAL_SEC)
# Check model can be successfully queried.
if requests:
client.SendRequests(requests)
finally:
logging.info('Stopping %r.', runner)
runner.Stop()
| [
"tfx.proto.infra_validator_pb2.ValidationSpec",
"tensorflow_serving.apis.prediction_log_pb2.RegressLog",
"absl.logging.exception",
"absl.logging.info",
"tfx.components.infra_validator.model_server_runners.kubernetes_runner.KubernetesRunner",
"tfx.proto.infra_validator_pb2.RequestSpec",
"tfx.components.infra_validator.model_server_runners.local_docker_runner.LocalDockerRunner",
"tfx.types.artifact_utils.get_single_instance",
"tfx.utils.proto_utils.json_to_proto",
"tfx.utils.io_utils.copy_dir",
"tfx.proto.infra_validator_pb2.ServingSpec",
"tfx.components.infra_validator.serving_bins.parse_serving_binaries",
"absl.logging.warning",
"tfx.components.infra_validator.request_builder.build_requests",
"time.time",
"tensorflow_serving.apis.prediction_log_pb2.PredictLog",
"tfx.utils.path_utils.stamped_model_path",
"signal.signal",
"threading.current_thread",
"os.path.join",
"tfx.utils.model_paths.tf_serving_flavor.parse_model_path",
"tensorflow_serving.apis.prediction_log_pb2.ClassifyLog",
"functools.partial",
"tfx.utils.path_utils.is_old_model_artifact",
"tfx.utils.path_utils.warmup_file_path"
]
| [((4482, 4528), 'absl.logging.info', 'logging.info', (['"""Model passed infra validation."""'], {}), "('Model passed infra validation.')\n", (4494, 4528), False, 'from absl import logging\n'), ((4729, 4775), 'absl.logging.info', 'logging.info', (['"""Model failed infra validation."""'], {}), "('Model failed infra validation.')\n", (4741, 4775), False, 'from absl import logging\n'), ((3239, 3362), 'tfx.components.infra_validator.model_server_runners.local_docker_runner.LocalDockerRunner', 'local_docker_runner.LocalDockerRunner', ([], {'model_path': 'model_path', 'serving_binary': 'serving_binary', 'serving_spec': 'serving_spec'}), '(model_path=model_path, serving_binary\n =serving_binary, serving_spec=serving_spec)\n', (3276, 3362), False, 'from tfx.components.infra_validator.model_server_runners import local_docker_runner\n'), ((4565, 4610), 'os.path.join', 'os.path.join', (['blessing.uri', '_BLESSED_FILENAME'], {}), '(blessing.uri, _BLESSED_FILENAME)\n', (4577, 4610), False, 'import os\n'), ((4812, 4861), 'os.path.join', 'os.path.join', (['blessing.uri', '_NOT_BLESSED_FILENAME'], {}), '(blessing.uri, _NOT_BLESSED_FILENAME)\n', (4824, 4861), False, 'import os\n'), ((6509, 6566), 'tfx.types.artifact_utils.get_single_instance', 'artifact_utils.get_single_instance', (['input_dict[MODEL_KEY]'], {}), '(input_dict[MODEL_KEY])\n', (6543, 6566), False, 'from tfx.types import artifact_utils\n'), ((6582, 6643), 'tfx.types.artifact_utils.get_single_instance', 'artifact_utils.get_single_instance', (['output_dict[BLESSING_KEY]'], {}), '(output_dict[BLESSING_KEY])\n', (6616, 6643), False, 'from tfx.types import artifact_utils\n'), ((6812, 6845), 'tfx.proto.infra_validator_pb2.ServingSpec', 'infra_validator_pb2.ServingSpec', ([], {}), '()\n', (6843, 6845), False, 'from tfx.proto import infra_validator_pb2\n'), ((6850, 6924), 'tfx.utils.proto_utils.json_to_proto', 'proto_utils.json_to_proto', (['exec_properties[SERVING_SPEC_KEY]', 'serving_spec'], {}), '(exec_properties[SERVING_SPEC_KEY], serving_spec)\n', (6875, 6924), False, 'from tfx.utils import proto_utils\n'), ((7036, 7072), 'tfx.proto.infra_validator_pb2.ValidationSpec', 'infra_validator_pb2.ValidationSpec', ([], {}), '()\n', (7070, 7072), False, 'from tfx.proto import infra_validator_pb2\n'), ((10925, 10974), 'tfx.components.infra_validator.serving_bins.parse_serving_binaries', 'serving_bins.parse_serving_binaries', (['serving_spec'], {}), '(serving_spec)\n', (10960, 10974), False, 'from tfx.components.infra_validator import serving_bins\n'), ((11592, 11635), 'tfx.utils.path_utils.stamped_model_path', 'path_utils.stamped_model_path', (['blessing.uri'], {}), '(blessing.uri)\n', (11621, 11635), False, 'from tfx.utils import path_utils\n'), ((11640, 11696), 'tfx.utils.io_utils.copy_dir', 'io_utils.copy_dir', ([], {'src': 'model_path', 'dst': 'output_model_path'}), '(src=model_path, dst=output_model_path)\n', (11657, 11696), False, 'from tfx.utils import io_utils\n'), ((3432, 3552), 'tfx.components.infra_validator.model_server_runners.kubernetes_runner.KubernetesRunner', 'kubernetes_runner.KubernetesRunner', ([], {'model_path': 'model_path', 'serving_binary': 'serving_binary', 'serving_spec': 'serving_spec'}), '(model_path=model_path, serving_binary=\n serving_binary, serving_spec=serving_spec)\n', (3466, 3552), False, 'from tfx.components.infra_validator.model_server_runners import kubernetes_runner\n'), ((5250, 5294), 'functools.partial', 'functools.partial', (['function', '*args'], {}), '(function, *args, **kwargs)\n', (5267, 5294), False, 'import functools\n'), ((6699, 6759), 'tfx.types.artifact_utils.get_single_instance', 'artifact_utils.get_single_instance', (['input_dict[EXAMPLES_KEY]'], {}), '(input_dict[EXAMPLES_KEY])\n', (6733, 6759), False, 'from tfx.types import artifact_utils\n'), ((7128, 7213), 'tfx.utils.proto_utils.json_to_proto', 'proto_utils.json_to_proto', (['exec_properties[VALIDATION_SPEC_KEY]', 'validation_spec'], {}), '(exec_properties[VALIDATION_SPEC_KEY], validation_spec\n )\n', (7153, 7213), False, 'from tfx.utils import proto_utils\n'), ((7532, 7565), 'tfx.proto.infra_validator_pb2.RequestSpec', 'infra_validator_pb2.RequestSpec', ([], {}), '()\n', (7563, 7565), False, 'from tfx.proto import infra_validator_pb2\n'), ((7572, 7646), 'tfx.utils.proto_utils.json_to_proto', 'proto_utils.json_to_proto', (['exec_properties[REQUEST_SPEC_KEY]', 'request_spec'], {}), '(exec_properties[REQUEST_SPEC_KEY], request_spec)\n', (7597, 7646), False, 'from tfx.utils import proto_utils\n'), ((9546, 9585), 'signal.signal', 'signal.signal', (['signal.SIGTERM', '_handler'], {}), '(signal.SIGTERM, _handler)\n', (9559, 9585), False, 'import signal\n'), ((10426, 10492), 'absl.logging.info', 'logging.info', (['"""InfraValidator will be run in LOAD_AND_QUERY mode."""'], {}), "('InfraValidator will be run in LOAD_AND_QUERY mode.')\n", (10438, 10492), False, 'from absl import logging\n'), ((10510, 10640), 'tfx.components.infra_validator.request_builder.build_requests', 'request_builder.build_requests', ([], {'model_name': 'serving_spec.model_name', 'model': 'model', 'examples': 'examples', 'request_spec': 'request_spec'}), '(model_name=serving_spec.model_name, model=\n model, examples=examples, request_spec=request_spec)\n', (10540, 10640), False, 'from tfx.components.infra_validator import request_builder\n'), ((10693, 10754), 'absl.logging.info', 'logging.info', (['"""InfraValidator will be run in LOAD_ONLY mode."""'], {}), "('InfraValidator will be run in LOAD_ONLY mode.')\n", (10705, 10754), False, 'from absl import logging\n'), ((11739, 11785), 'tfx.utils.path_utils.warmup_file_path', 'path_utils.warmup_file_path', (['output_model_path'], {}), '(output_model_path)\n', (11766, 11785), False, 'from tfx.utils import path_utils\n'), ((12111, 12150), 'tfx.utils.path_utils.is_old_model_artifact', 'path_utils.is_old_model_artifact', (['model'], {}), '(model)\n', (12143, 12150), False, 'from tfx.utils import path_utils\n'), ((13560, 13656), 'absl.logging.info', 'logging.info', (['"""Starting infra validation (attempt %d/%d)."""', '(i + 1)', 'validation_spec.num_tries'], {}), "('Starting infra validation (attempt %d/%d).', i + 1,\n validation_spec.num_tries)\n", (13572, 13656), False, 'from absl import logging\n'), ((14982, 14993), 'time.time', 'time.time', ([], {}), '()\n', (14991, 14993), False, 'import time\n'), ((15200, 15236), 'absl.logging.info', 'logging.info', (['"""Starting %r."""', 'runner'], {}), "('Starting %r.', runner)\n", (15212, 15236), False, 'from absl import logging\n'), ((15639, 15675), 'absl.logging.info', 'logging.info', (['"""Stopping %r."""', 'runner'], {}), "('Stopping %r.', runner)\n", (15651, 15675), False, 'from absl import logging\n'), ((3933, 3980), 'tensorflow_serving.apis.prediction_log_pb2.ClassifyLog', 'prediction_log_pb2.ClassifyLog', ([], {'request': 'request'}), '(request=request)\n', (3963, 3980), False, 'from tensorflow_serving.apis import prediction_log_pb2\n'), ((10035, 10077), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'old_handler'], {}), '(signal.SIGTERM, old_handler)\n', (10048, 10077), False, 'import signal\n'), ((12624, 12720), 'tfx.utils.model_paths.tf_serving_flavor.parse_model_path', 'tf_serving_flavor.parse_model_path', (['model_path'], {'expected_model_name': 'serving_spec.model_name'}), '(model_path, expected_model_name=\n serving_spec.model_name)\n', (12658, 12720), False, 'from tfx.utils.model_paths import tf_serving_flavor\n'), ((4109, 4155), 'tensorflow_serving.apis.prediction_log_pb2.RegressLog', 'prediction_log_pb2.RegressLog', ([], {'request': 'request'}), '(request=request)\n', (4138, 4155), False, 'from tensorflow_serving.apis import prediction_log_pb2\n'), ((5450, 5514), 'absl.logging.warning', 'logging.warning', (['"""Error occurred during cleanup."""'], {'exc_info': '(True)'}), "('Error occurred during cleanup.', exc_info=True)\n", (5465, 5514), False, 'from absl import logging\n'), ((13062, 13116), 'tfx.utils.io_utils.copy_dir', 'io_utils.copy_dir', ([], {'src': 'model_path', 'dst': 'temp_model_path'}), '(src=model_path, dst=temp_model_path)\n', (13079, 13116), False, 'from tfx.utils import io_utils\n'), ((14229, 14328), 'absl.logging.exception', 'logging.exception', (['"""Infra validation (attempt %d/%d) failed."""', '(i + 1)', 'validation_spec.num_tries'], {}), "('Infra validation (attempt %d/%d) failed.', i + 1,\n validation_spec.num_tries)\n", (14246, 14328), False, 'from absl import logging\n'), ((4278, 4324), 'tensorflow_serving.apis.prediction_log_pb2.PredictLog', 'prediction_log_pb2.PredictLog', ([], {'request': 'request'}), '(request=request)\n', (4307, 4324), False, 'from tensorflow_serving.apis import prediction_log_pb2\n'), ((9890, 9916), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (9914, 9916), False, 'import threading\n'), ((14417, 14516), 'absl.logging.info', 'logging.info', (['"""Consider increasing the value of ValidationSpec.max_loading_time_seconds."""'], {}), "(\n 'Consider increasing the value of ValidationSpec.max_loading_time_seconds.'\n )\n", (14429, 14516), False, 'from absl import logging\n'), ((13040, 13051), 'time.time', 'time.time', ([], {}), '()\n', (13049, 13051), False, 'import time\n')] |
from django.db import models
from products.models import Product
from utils.models import Utility
class Inventory(Utility):
inventory_number = models.CharField(unique=True, max_length=100, blank=True, null=True)
supplier = models.CharField(max_length=100, blank=True, null=True)
user = models.ForeignKey('auth.User', on_delete=models.SET_NULL, blank=True, null=True)
is_adjusment = models.BooleanField(default=False)
def __str__(self):
return self.inventory_number
class InventoryItem(Utility):
inventory = models.ForeignKey(Inventory, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return self.product.name
| [
"django.db.models.PositiveIntegerField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ForeignKey"
]
| [((150, 218), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(unique=True, max_length=100, blank=True, null=True)\n', (166, 218), False, 'from django.db import models\n'), ((234, 289), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (250, 289), False, 'from django.db import models\n'), ((301, 386), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""auth.User"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), "('auth.User', on_delete=models.SET_NULL, blank=True, null=True\n )\n", (318, 386), False, 'from django.db import models\n'), ((401, 435), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (420, 435), False, 'from django.db import models\n'), ((545, 599), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Inventory'], {'on_delete': 'models.CASCADE'}), '(Inventory, on_delete=models.CASCADE)\n', (562, 599), False, 'from django.db import models\n'), ((614, 666), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Product'], {'on_delete': 'models.CASCADE'}), '(Product, on_delete=models.CASCADE)\n', (631, 666), False, 'from django.db import models\n'), ((682, 720), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)'}), '(default=1)\n', (709, 720), False, 'from django.db import models\n')] |
from django.shortcuts import render
from hierarchical_app.models import Folder
# Create your views here.
def index_view(request):
return render(request, 'index.html', {'welcome': "Welcome to Kens Hierarchical Data and You assessment", 'folders': Folder.objects.all()})
| [
"hierarchical_app.models.Folder.objects.all"
]
| [((252, 272), 'hierarchical_app.models.Folder.objects.all', 'Folder.objects.all', ([], {}), '()\n', (270, 272), False, 'from hierarchical_app.models import Folder\n')] |
import sys
import os
import argparse
import logging
import json
import time
import subprocess
from shutil import copyfile
import numpy as np
from sklearn import metrics
from easydict import EasyDict as edict
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.nn import DataParallel
from vit_pytorch import ViT
from tensorboardX import SummaryWriter
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
from data.dataset import ImageDataset # noqa
from model.classifier import Classifier # noqa
from utils.misc import lr_schedule # noqa
from model.utils import get_optimizer # noqa
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str,
help="Path to the config file in yaml format")
parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str,
help="Path to the saved models")
parser.add_argument('--num_workers', default=8, type=int, help="Number of "
"workers for each data loader")
parser.add_argument('--device_ids', default='0,1,2,3', type=str,
help="GPU indices ""comma separated, e.g. '0,1' ")
parser.add_argument('--pre_train', default=None, type=str, help="If get"
"parameters from pretrained model")
parser.add_argument('--resume', default=0, type=int, help="If resume from "
"previous run")
parser.add_argument('--logtofile', default=False, type=bool, help="Save log "
"in save_path/log.txt if set True")
parser.add_argument('--verbose', default=False, type=bool, help="Detail info")
def get_loss(output, target, index, device, cfg):
if cfg.criterion == 'BCE':
for num_class in cfg.num_classes:
assert num_class == 1
target = target[:, index].view(-1)
pos_weight = torch.from_numpy(
np.array(cfg.pos_weight,
dtype=np.float32)).to(device).type_as(target)
if cfg.batch_weight:
if target.sum() == 0:
loss = torch.tensor(0., requires_grad=True).to(device)
else:
weight = (target.size()[0] - target.sum()) / target.sum()
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=weight)
else:
loss = F.binary_cross_entropy_with_logits(
output[index].view(-1), target, pos_weight=pos_weight[index])
label = torch.sigmoid(output[index].view(-1)).ge(0.5).float()
acc = (target == label).float().sum() / len(label)
else:
raise Exception('Unknown criterion : {}'.format(cfg.criterion))
return (loss, acc)
def train_epoch(summary, summary_dev, cfg, args, model, dataloader,
dataloader_dev, optimizer, summary_writer, best_dict,
dev_header):
torch.set_grad_enabled(True)
model.train()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
label_header = dataloader.dataset._label_header
num_tasks = len(cfg.num_classes)
time_now = time.time()
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
# output, logit_map = model(image)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
loss = 0
for t in range(num_tasks):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
loss += loss_t
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
summary['step'] += 1
if summary['step'] % cfg.log_every == 0:
time_spent = time.time() - time_now
time_now = time.time()
loss_sum /= cfg.log_every
acc_sum /= cfg.log_every
loss_str = ' '.join(map(lambda x: '{:.5f}'.format(x), loss_sum))
acc_str = ' '.join(map(lambda x: '{:.3f}'.format(x), acc_sum))
logging.info(
'{}, Train, Epoch : {}, Step : {}, Loss : {}, '
'Acc : {}, Run Time : {:.2f} sec'
.format(time.strftime("%Y-%m-%d %H:%M:%S"),
summary['epoch'] + 1, summary['step'], loss_str,
acc_str, time_spent))
for t in range(num_tasks):
summary_writer.add_scalar(
'train/loss_{}'.format(label_header[t]), loss_sum[t],
summary['step'])
summary_writer.add_scalar(
'train/acc_{}'.format(label_header[t]), acc_sum[t],
summary['step'])
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
if summary['step'] % cfg.test_every == 0:
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]),
summary_dev['loss'][t], summary['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary['epoch'],
'step': summary['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'best{}.ckpt'.format(
best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},Auc :{},'
'Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
model.train()
torch.set_grad_enabled(True)
summary['epoch'] += 1
return summary, best_dict
def test_epoch(summary, cfg, args, model, dataloader):
torch.set_grad_enabled(False)
model.eval()
device_ids = list(map(int, args.device_ids.split(',')))
device = torch.device('cuda:{}'.format(device_ids[0]))
steps = len(dataloader)
dataiter = iter(dataloader)
num_tasks = len(cfg.num_classes)
loss_sum = np.zeros(num_tasks)
acc_sum = np.zeros(num_tasks)
predlist = list(x for x in range(len(cfg.num_classes)))
true_list = list(x for x in range(len(cfg.num_classes)))
for step in range(steps):
image, target = next(dataiter)
image = image.to(device)
target = target.to(device)
output = model(image)
output = [torch.unsqueeze(i, 1) for i in output.T]
# different number of tasks
for t in range(len(cfg.num_classes)):
loss_t, acc_t = get_loss(output, target, t, device, cfg)
# AUC
output_tensor = torch.sigmoid(
output[t].view(-1)).cpu().detach().numpy()
target_tensor = target[:, t].view(-1).cpu().detach().numpy()
if step == 0:
predlist[t] = output_tensor
true_list[t] = target_tensor
else:
predlist[t] = np.append(predlist[t], output_tensor)
true_list[t] = np.append(true_list[t], target_tensor)
loss_sum[t] += loss_t.item()
acc_sum[t] += acc_t.item()
summary['loss'] = loss_sum / steps
summary['acc'] = acc_sum / steps
return summary, predlist, true_list
def run(args):
with open(args.cfg_path) as f:
cfg = edict(json.load(f))
if args.verbose is True:
print(json.dumps(cfg, indent=4))
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
if args.logtofile is True:
logging.basicConfig(filename=args.save_path + '/log.txt',
filemode="w", level=logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
if not args.resume:
with open(os.path.join(args.save_path, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=1)
device_ids = list(map(int, args.device_ids.split(',')))
num_devices = torch.cuda.device_count()
if num_devices < len(device_ids):
raise Exception(
'#available gpu : {} < --device_ids : {}'
.format(num_devices, len(device_ids)))
device = torch.device('cuda:{}'.format(device_ids[0]))
# model = Classifier(cfg)
model = ViT(
cfg = cfg,
image_size=cfg.width,
patch_size=32,
num_classes=5,
dim=1024,
depth=6,
heads=8,
mlp_dim=512,
dropout=0.3,
emb_dropout=0.3,
channels=3
)
if args.verbose is True:
from torchsummary import summary
if cfg.fix_ratio:
h, w = cfg.long_side, cfg.long_side
else:
h, w = cfg.height, cfg.width
summary(model.to(device), (3, h, w))
model = DataParallel(model, device_ids=device_ids).to(device).train()
if args.pre_train is not None:
if os.path.exists(args.pre_train):
ckpt = torch.load(args.pre_train, map_location=device)
model.module.load_state_dict(ckpt)
optimizer = get_optimizer(model.parameters(), cfg)
src_folder = os.path.dirname(os.path.abspath(__file__)) + '/../'
dst_folder = os.path.join(args.save_path, 'classification')
rc, size = subprocess.getstatusoutput('du --max-depth=0 %s | cut -f1'
% src_folder)
if rc != 0:
raise Exception('Copy folder error : {}'.format(rc))
rc, err_msg = subprocess.getstatusoutput('cp -R %s %s' % (src_folder,
dst_folder))
if rc != 0:
raise Exception('copy folder error : {}'.format(err_msg))
copyfile(cfg.train_csv, os.path.join(args.save_path, 'train.csv'))
copyfile(cfg.dev_csv, os.path.join(args.save_path, 'dev.csv'))
dataloader_train = DataLoader(
ImageDataset(cfg.train_csv, cfg, mode='train'),
batch_size=cfg.train_batch_size, num_workers=args.num_workers,
drop_last=True, shuffle=True)
dataloader_dev = DataLoader(
ImageDataset(cfg.dev_csv, cfg, mode='dev'),
batch_size=cfg.dev_batch_size, num_workers=args.num_workers,
drop_last=False, shuffle=False)
dev_header = dataloader_dev.dataset._label_header
summary_train = {'epoch': 0, 'step': 0}
summary_dev = {'loss': float('inf'), 'acc': 0.0}
summary_writer = SummaryWriter(args.save_path)
epoch_start = 0
best_dict = {
"acc_dev_best": 0.0,
"auc_dev_best": 0.0,
"loss_dev_best": float('inf'),
"fused_dev_best": 0.0,
"best_idx": 1}
if args.resume:
ckpt_path = os.path.join(args.save_path, 'train.ckpt')
ckpt = torch.load(ckpt_path, map_location=device)
model.module.load_state_dict(ckpt['state_dict'])
summary_train = {'epoch': ckpt['epoch'], 'step': ckpt['step']}
best_dict['acc_dev_best'] = ckpt['acc_dev_best']
best_dict['loss_dev_best'] = ckpt['loss_dev_best']
best_dict['auc_dev_best'] = ckpt['auc_dev_best']
epoch_start = ckpt['epoch']
for epoch in range(epoch_start, cfg.epoch):
lr = lr_schedule(cfg.lr, cfg.lr_factor, summary_train['epoch'],
cfg.lr_epochs)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
summary_train, best_dict = train_epoch(
summary_train, summary_dev, cfg, args, model,
dataloader_train, dataloader_dev, optimizer,
summary_writer, best_dict, dev_header)
time_now = time.time()
summary_dev, predlist, true_list = test_epoch(
summary_dev, cfg, args, model, dataloader_dev)
time_spent = time.time() - time_now
auclist = []
for i in range(len(cfg.num_classes)):
y_pred = predlist[i]
y_true = true_list[i]
fpr, tpr, thresholds = metrics.roc_curve(
y_true, y_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
auclist.append(auc)
summary_dev['auc'] = np.array(auclist)
loss_dev_str = ' '.join(map(lambda x: '{:.5f}'.format(x),
summary_dev['loss']))
acc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['acc']))
auc_dev_str = ' '.join(map(lambda x: '{:.3f}'.format(x),
summary_dev['auc']))
logging.info(
'{}, Dev, Step : {}, Loss : {}, Acc : {}, Auc : {},'
'Mean auc: {:.3f} ''Run Time : {:.2f} sec' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
summary_dev['auc'].mean(),
time_spent))
for t in range(len(cfg.num_classes)):
summary_writer.add_scalar(
'dev/loss_{}'.format(dev_header[t]), summary_dev['loss'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/acc_{}'.format(dev_header[t]), summary_dev['acc'][t],
summary_train['step'])
summary_writer.add_scalar(
'dev/auc_{}'.format(dev_header[t]), summary_dev['auc'][t],
summary_train['step'])
save_best = False
mean_acc = summary_dev['acc'][cfg.save_index].mean()
if mean_acc >= best_dict['acc_dev_best']:
best_dict['acc_dev_best'] = mean_acc
if cfg.best_target == 'acc':
save_best = True
mean_auc = summary_dev['auc'][cfg.save_index].mean()
if mean_auc >= best_dict['auc_dev_best']:
best_dict['auc_dev_best'] = mean_auc
if cfg.best_target == 'auc':
save_best = True
mean_loss = summary_dev['loss'][cfg.save_index].mean()
if mean_loss <= best_dict['loss_dev_best']:
best_dict['loss_dev_best'] = mean_loss
if cfg.best_target == 'loss':
save_best = True
if save_best:
torch.save(
{'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path,
'best{}.ckpt'.format(best_dict['best_idx']))
)
best_dict['best_idx'] += 1
if best_dict['best_idx'] > cfg.save_top_k:
best_dict['best_idx'] = 1
logging.info(
'{}, Best, Step : {}, Loss : {}, Acc : {},'
'Auc :{},Best Auc : {:.3f}' .format(
time.strftime("%Y-%m-%d %H:%M:%S"),
summary_train['step'],
loss_dev_str,
acc_dev_str,
auc_dev_str,
best_dict['auc_dev_best']))
torch.save({'epoch': summary_train['epoch'],
'step': summary_train['step'],
'acc_dev_best': best_dict['acc_dev_best'],
'auc_dev_best': best_dict['auc_dev_best'],
'loss_dev_best': best_dict['loss_dev_best'],
'state_dict': model.module.state_dict()},
os.path.join(args.save_path, 'train.ckpt'))
summary_writer.close()
def main():
args = parser.parse_args()
if args.verbose is True:
print('Using the specified args:')
print(args)
run(args)
if __name__ == '__main__':
main()
| [
"sklearn.metrics.auc",
"torch.cuda.device_count",
"numpy.array",
"sklearn.metrics.roc_curve",
"os.path.exists",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"torch.unsqueeze",
"json.dumps",
"os.mkdir",
"subprocess.getstatusoutput",
"utils.misc.lr_schedule",
"time.time",
"data.dataset.ImageDataset",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"logging.basicConfig",
"torch.load",
"time.strftime",
"os.path.join",
"torch.nn.DataParallel",
"vit_pytorch.ViT",
"json.load",
"numpy.append",
"numpy.zeros",
"torch.tensor",
"os.path.abspath",
"torch.set_grad_enabled",
"json.dump"
]
| [((466, 486), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (483, 486), False, 'import torch\n'), ((487, 516), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(0)'], {}), '(0)\n', (513, 516), False, 'import torch\n'), ((711, 761), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model"""'}), "(description='Train model')\n", (734, 761), False, 'import argparse\n'), ((3011, 3039), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (3033, 3039), False, 'import torch\n'), ((3342, 3353), 'time.time', 'time.time', ([], {}), '()\n', (3351, 3353), False, 'import time\n'), ((3369, 3388), 'numpy.zeros', 'np.zeros', (['num_tasks'], {}), '(num_tasks)\n', (3377, 3388), True, 'import numpy as np\n'), ((3403, 3422), 'numpy.zeros', 'np.zeros', (['num_tasks'], {}), '(num_tasks)\n', (3411, 3422), True, 'import numpy as np\n'), ((9269, 9298), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (9291, 9298), False, 'import torch\n'), ((9548, 9567), 'numpy.zeros', 'np.zeros', (['num_tasks'], {}), '(num_tasks)\n', (9556, 9567), True, 'import numpy as np\n'), ((9582, 9601), 'numpy.zeros', 'np.zeros', (['num_tasks'], {}), '(num_tasks)\n', (9590, 9601), True, 'import numpy as np\n'), ((11437, 11462), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (11460, 11462), False, 'import torch\n'), ((11733, 11884), 'vit_pytorch.ViT', 'ViT', ([], {'cfg': 'cfg', 'image_size': 'cfg.width', 'patch_size': '(32)', 'num_classes': '(5)', 'dim': '(1024)', 'depth': '(6)', 'heads': '(8)', 'mlp_dim': '(512)', 'dropout': '(0.3)', 'emb_dropout': '(0.3)', 'channels': '(3)'}), '(cfg=cfg, image_size=cfg.width, patch_size=32, num_classes=5, dim=1024,\n depth=6, heads=8, mlp_dim=512, dropout=0.3, emb_dropout=0.3, channels=3)\n', (11736, 11884), False, 'from vit_pytorch import ViT\n'), ((12629, 12675), 'os.path.join', 'os.path.join', (['args.save_path', '"""classification"""'], {}), "(args.save_path, 'classification')\n", (12641, 12675), False, 'import os\n'), ((12691, 12763), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (["('du --max-depth=0 %s | cut -f1' % src_folder)"], {}), "('du --max-depth=0 %s | cut -f1' % src_folder)\n", (12717, 12763), False, 'import subprocess\n'), ((12901, 12969), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (["('cp -R %s %s' % (src_folder, dst_folder))"], {}), "('cp -R %s %s' % (src_folder, dst_folder))\n", (12927, 12969), False, 'import subprocess\n'), ((13821, 13850), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.save_path'], {}), '(args.save_path)\n', (13834, 13850), False, 'from tensorboardX import SummaryWriter\n'), ((9122, 9150), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (9144, 9150), False, 'import torch\n'), ((10940, 10970), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (10954, 10970), False, 'import os\n'), ((10980, 11004), 'os.mkdir', 'os.mkdir', (['args.save_path'], {}), '(args.save_path)\n', (10988, 11004), False, 'import os\n'), ((11044, 11139), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(args.save_path + '/log.txt')", 'filemode': '"""w"""', 'level': 'logging.INFO'}), "(filename=args.save_path + '/log.txt', filemode='w',\n level=logging.INFO)\n", (11063, 11139), False, 'import logging\n'), ((11182, 11221), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (11201, 11221), False, 'import logging\n'), ((12341, 12371), 'os.path.exists', 'os.path.exists', (['args.pre_train'], {}), '(args.pre_train)\n', (12355, 12371), False, 'import os\n'), ((13143, 13184), 'os.path.join', 'os.path.join', (['args.save_path', '"""train.csv"""'], {}), "(args.save_path, 'train.csv')\n", (13155, 13184), False, 'import os\n'), ((13212, 13251), 'os.path.join', 'os.path.join', (['args.save_path', '"""dev.csv"""'], {}), "(args.save_path, 'dev.csv')\n", (13224, 13251), False, 'import os\n'), ((13297, 13343), 'data.dataset.ImageDataset', 'ImageDataset', (['cfg.train_csv', 'cfg'], {'mode': '"""train"""'}), "(cfg.train_csv, cfg, mode='train')\n", (13309, 13343), False, 'from data.dataset import ImageDataset\n'), ((13495, 13537), 'data.dataset.ImageDataset', 'ImageDataset', (['cfg.dev_csv', 'cfg'], {'mode': '"""dev"""'}), "(cfg.dev_csv, cfg, mode='dev')\n", (13507, 13537), False, 'from data.dataset import ImageDataset\n'), ((14081, 14123), 'os.path.join', 'os.path.join', (['args.save_path', '"""train.ckpt"""'], {}), "(args.save_path, 'train.ckpt')\n", (14093, 14123), False, 'import os\n'), ((14139, 14181), 'torch.load', 'torch.load', (['ckpt_path'], {'map_location': 'device'}), '(ckpt_path, map_location=device)\n', (14149, 14181), False, 'import torch\n'), ((14581, 14654), 'utils.misc.lr_schedule', 'lr_schedule', (['cfg.lr', 'cfg.lr_factor', "summary_train['epoch']", 'cfg.lr_epochs'], {}), "(cfg.lr, cfg.lr_factor, summary_train['epoch'], cfg.lr_epochs)\n", (14592, 14654), False, 'from utils.misc import lr_schedule\n'), ((15001, 15012), 'time.time', 'time.time', ([], {}), '()\n', (15010, 15012), False, 'import time\n'), ((15506, 15523), 'numpy.array', 'np.array', (['auclist'], {}), '(auclist)\n', (15514, 15523), True, 'import numpy as np\n'), ((428, 453), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (443, 453), False, 'import os\n'), ((3651, 3672), 'torch.unsqueeze', 'torch.unsqueeze', (['i', '(1)'], {}), '(i, 1)\n', (3666, 3672), False, 'import torch\n'), ((4188, 4199), 'time.time', 'time.time', ([], {}), '()\n', (4197, 4199), False, 'import time\n'), ((5118, 5137), 'numpy.zeros', 'np.zeros', (['num_tasks'], {}), '(num_tasks)\n', (5126, 5137), True, 'import numpy as np\n'), ((5160, 5179), 'numpy.zeros', 'np.zeros', (['num_tasks'], {}), '(num_tasks)\n', (5168, 5179), True, 'import numpy as np\n'), ((5254, 5265), 'time.time', 'time.time', ([], {}), '()\n', (5263, 5265), False, 'import time\n'), ((5807, 5824), 'numpy.array', 'np.array', (['auclist'], {}), '(auclist)\n', (5815, 5824), True, 'import numpy as np\n'), ((9909, 9930), 'torch.unsqueeze', 'torch.unsqueeze', (['i', '(1)'], {}), '(i, 1)\n', (9924, 9930), False, 'import torch\n'), ((10836, 10848), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10845, 10848), False, 'import json\n'), ((11330, 11357), 'json.dump', 'json.dump', (['cfg', 'f'], {'indent': '(1)'}), '(cfg, f, indent=1)\n', (11339, 11357), False, 'import json\n'), ((12392, 12439), 'torch.load', 'torch.load', (['args.pre_train'], {'map_location': 'device'}), '(args.pre_train, map_location=device)\n', (12402, 12439), False, 'import torch\n'), ((12576, 12601), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (12591, 12601), False, 'import os\n'), ((15148, 15159), 'time.time', 'time.time', ([], {}), '()\n', (15157, 15159), False, 'import time\n'), ((15341, 15387), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (15358, 15387), False, 'from sklearn import metrics\n'), ((15423, 15444), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (15434, 15444), False, 'from sklearn import metrics\n'), ((18958, 19000), 'os.path.join', 'os.path.join', (['args.save_path', '"""train.ckpt"""'], {}), "(args.save_path, 'train.ckpt')\n", (18970, 19000), False, 'import os\n'), ((4142, 4153), 'time.time', 'time.time', ([], {}), '()\n', (4151, 4153), False, 'import time\n'), ((5413, 5424), 'time.time', 'time.time', ([], {}), '()\n', (5422, 5424), False, 'import time\n'), ((5626, 5672), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (5643, 5672), False, 'from sklearn import metrics\n'), ((5716, 5737), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5727, 5737), False, 'from sklearn import metrics\n'), ((10458, 10495), 'numpy.append', 'np.append', (['predlist[t]', 'output_tensor'], {}), '(predlist[t], output_tensor)\n', (10467, 10495), True, 'import numpy as np\n'), ((10527, 10565), 'numpy.append', 'np.append', (['true_list[t]', 'target_tensor'], {}), '(true_list[t], target_tensor)\n', (10536, 10565), True, 'import numpy as np\n'), ((10901, 10926), 'json.dumps', 'json.dumps', (['cfg'], {'indent': '(4)'}), '(cfg, indent=4)\n', (10911, 10926), False, 'import json\n'), ((11265, 11305), 'os.path.join', 'os.path.join', (['args.save_path', '"""cfg.json"""'], {}), "(args.save_path, 'cfg.json')\n", (11277, 11305), False, 'import os\n'), ((16059, 16093), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (16072, 16093), False, 'import time\n'), ((4593, 4627), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (4606, 4627), False, 'import time\n'), ((6400, 6434), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (6413, 6434), False, 'import time\n'), ((12233, 12275), 'torch.nn.DataParallel', 'DataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (12245, 12275), False, 'from torch.nn import DataParallel\n'), ((18355, 18389), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (18368, 18389), False, 'import time\n'), ((2185, 2222), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'requires_grad': '(True)'}), '(0.0, requires_grad=True)\n', (2197, 2222), False, 'import torch\n'), ((8851, 8885), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (8864, 8885), False, 'import time\n'), ((2007, 2049), 'numpy.array', 'np.array', (['cfg.pos_weight'], {'dtype': 'np.float32'}), '(cfg.pos_weight, dtype=np.float32)\n', (2015, 2049), True, 'import numpy as np\n')] |
import unittest
from hf_src.main import soma
class TestSoma(unittest.TestCase):
def test_retorno_soma_15_30(self):
self.assertEqual(soma(15, 30), 45)
| [
"hf_src.main.soma"
]
| [((146, 158), 'hf_src.main.soma', 'soma', (['(15)', '(30)'], {}), '(15, 30)\n', (150, 158), False, 'from hf_src.main import soma\n')] |
import copy
import json
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyJar
from oic.utils.keyio import KeyBundle
__author__ = 'roland'
import logging
logger = logging.getLogger(__name__)
class OIDCError(Exception):
pass
def flow2sequence(operations, item):
flow = operations.FLOWS[item]
return [operations.PHASES[phase] for phase in flow["sequence"]]
class OIDCTestSetup(object):
def __init__(self, client_cls, config, test_defs):
"""
:param config: Imported configuration module
:return:
"""
self.client_cls = client_cls
self.config = config
self.test_features = []
self.client = self.create_client(**config.CLIENT)
self.test_defs = test_defs
def create_client(self, **kwargs):
"""
Instantiate a _client instance
:param: Keyword arguments
Keys are ["srv_discovery_url", "client_info", "client_registration",
"provider_info". "keys]
:return: _client instance
"""
_key_set = set(kwargs.keys())
args = {}
_client = self.client_cls(client_authn_method=CLIENT_AUTHN_METHOD,
behaviour=kwargs["behaviour"],
verify_ssl=self.config.VERIFY_SSL, **args)
# The behaviour parameter is not significant for the election process
_key_set.discard("behaviour")
try:
setattr(_client, "allow", kwargs["allow"])
except KeyError:
pass
else:
_key_set.discard("allow")
try:
jwks = self.construct_jwks(_client, kwargs["keys"])
except KeyError:
pass
else:
# export JWKS
f = open("export/jwk.json", "w")
f.write(json.dumps(jwks))
f.close()
_client.jwks_uri = self.config.CLIENT["key_export_url"]
self.test_features = _key_set
try:
_client.client_prefs = copy.copy(kwargs["preferences"])
except KeyError:
pass
else:
_key_set.discard("preferences")
if "client_info" in _key_set:
_client.redirect_uris = self.config.CLIENT[
"client_info"]["redirect_uris"]
elif "client_registration" in _key_set:
reg_info = self.config.CLIENT["client_registration"]
_client.redirect_uris = reg_info["redirect_uris"]
_client.client_id = reg_info["client_id"]
_client.client_secret = reg_info["client_secret"]
return _client
@staticmethod
def construct_jwks(_client, key_conf):
"""
Construct the jwks
"""
if _client.keyjar is None:
_client.keyjar = KeyJar()
kbl = []
kid_template = "a%d"
kid = 0
for typ, info in key_conf.items():
kb = KeyBundle(source="file://%s" % info["key"], fileformat="der",
keytype=typ)
for k in kb.keys():
k.serialize()
k.kid = kid_template % kid
kid += 1
_client.kid[k.use][k.kty] = k.kid
_client.keyjar.add_kb("", kb)
kbl.append(kb)
jwks = {"keys": []}
for kb in kbl:
# ignore simple keys
jwks["keys"].extend([k.to_dict()
for k in kb.keys() if k.kty != 'oct'])
return jwks
def make_sequence(self, flow):
"""
Translate a flow name into a sequence of request/responses.
:param flow: Which test flow to use
:return: test sequence and test definitions
"""
sequence = flow2sequence(self.test_defs, flow)
res = {"sequence": sequence,
"tests": {"pre": [], "post": []},
"flow": [flow],
"block": [],
"mode": "",
"expect_exception": False}
_flow = self.test_defs.FLOWS[flow]
for param in ["tests", "block", "mode", "expect_exception"]:
try:
res[param] = _flow[param]
except KeyError:
pass
return res
def add_init(self, test_spec):
"""
Add _client registration and provider info gathering if necessary
:param test_spec:
:return:
"""
_seq = test_spec["sequence"]
_flow = test_spec["flow"]
if "client_info" in self.test_features and \
"registration" not in test_spec["block"]:
_register = True
# May not be the first item in the sequence
for sq in _seq:
try:
if sq[0].request == "RegistrationRequest":
_register = False
except TypeError:
pass
if _register:
_ext = self.test_defs.PHASES["oic-registration"]
_seq.insert(0, _ext)
_flow.insert(0, "oic-registration")
if "srv_discovery_url" in self.test_features:
op_spec = self.test_defs.PHASES["provider-discovery"]
if op_spec not in _seq:
_seq.insert(0, op_spec)
_flow.insert(0, "provider-discovery")
return test_spec
def request_and_return(conv, url, response=None, method="GET", body=None,
body_type="json", state="", http_args=None,
**kwargs):
"""
:param url: The URL to which the request should be sent
:param response: Response type
:param method: Which HTTP method to use
:param body: A message body if any
:param body_type: The format of the body of the return message
:param http_args: Arguments for the HTTP _client
:return: A cls or ErrorResponse instance or the HTTP response
instance if no response body was expected.
"""
if http_args is None:
http_args = {}
_cli = conv._client
try:
_resp = _cli.http_request(url, method, data=body, **http_args)
except Exception:
raise
conv.position = url
conv.last_response = _resp
conv.last_content = _resp.content
if not "keyjar" in kwargs:
kwargs["keyjar"] = conv.keyjar
_response = _cli.parse_request_response(_resp, response, body_type, state,
**kwargs)
conv.protocol_response.append((_response, _resp.content))
return _response
def test_summation(conv, sid):
status = 0
for item in conv.test_output:
if item["status"] > status:
status = item["status"]
if status == 0:
status = 1
info = {
"id": sid,
"status": status,
"tests": conv.test_output
}
return info | [
"logging.getLogger",
"oic.utils.keyio.KeyJar",
"json.dumps",
"oic.utils.keyio.KeyBundle",
"copy.copy"
]
| [((202, 229), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (219, 229), False, 'import logging\n'), ((2044, 2076), 'copy.copy', 'copy.copy', (["kwargs['preferences']"], {}), "(kwargs['preferences'])\n", (2053, 2076), False, 'import copy\n'), ((2812, 2820), 'oic.utils.keyio.KeyJar', 'KeyJar', ([], {}), '()\n', (2818, 2820), False, 'from oic.utils.keyio import KeyJar\n'), ((2944, 3018), 'oic.utils.keyio.KeyBundle', 'KeyBundle', ([], {'source': "('file://%s' % info['key'])", 'fileformat': '"""der"""', 'keytype': 'typ'}), "(source='file://%s' % info['key'], fileformat='der', keytype=typ)\n", (2953, 3018), False, 'from oic.utils.keyio import KeyBundle\n'), ((1848, 1864), 'json.dumps', 'json.dumps', (['jwks'], {}), '(jwks)\n', (1858, 1864), False, 'import json\n')] |
from YoshiViz import Gui
if __name__ == '__main__':
#file director
gui = Gui.Gui()
"""
report_generator.\
generate_pdf_report(fileDirectory, repositoryName, tempCommunityType)
"""
print('the type of', repositoryName, 'is', tempCommunityType, '\n"check .\YoshiViz\output"')
| [
"YoshiViz.Gui.Gui"
]
| [((83, 92), 'YoshiViz.Gui.Gui', 'Gui.Gui', ([], {}), '()\n', (90, 92), False, 'from YoshiViz import Gui\n')] |
''' Wrap an __init__ function so that I don't have to assign all the
parameters to a self. variable. '''
# https://stackoverflow.com/questions/5048329/python-decorator-for-automatic-binding-init-arguments
import inspect
from functools import wraps
def lazy_init(init):
''' Create an annotation to assign all the parameters to a self.
variable. '''
arg_names = inspect.getfullargspec(init)[0]
# pylint: disable=E1101
@wraps(init)
def new_init(self, *args):
for name, value in zip(arg_names[1:], args):
setattr(self, name, value)
init(self, *args)
return new_init
| [
"inspect.getfullargspec",
"functools.wraps"
]
| [((441, 452), 'functools.wraps', 'wraps', (['init'], {}), '(init)\n', (446, 452), False, 'from functools import wraps\n'), ((375, 403), 'inspect.getfullargspec', 'inspect.getfullargspec', (['init'], {}), '(init)\n', (397, 403), False, 'import inspect\n')] |
# def register_feed():
import os
import cv2
path = '/UserImage'
cam = cv2.VideoCapture(0)
name=input("Name: ")
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
else:
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
# img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(name + ".jpg", frame)
# print("{} written!".format(img_name))
print("Image Captured! Proceed...")
img_counter += 1
cam.release()
cv2.destroyAllWindows() | [
"cv2.imwrite",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.waitKey",
"cv2.namedWindow"
]
| [((70, 89), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (86, 89), False, 'import cv2\n'), ((112, 135), 'cv2.namedWindow', 'cv2.namedWindow', (['"""test"""'], {}), "('test')\n", (127, 135), False, 'import cv2\n'), ((758, 781), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (779, 781), False, 'import cv2\n'), ((280, 305), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'frame'], {}), "('test', frame)\n", (290, 305), False, 'import cv2\n'), ((319, 333), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (330, 333), False, 'import cv2\n'), ((579, 612), 'cv2.imwrite', 'cv2.imwrite', (["(name + '.jpg')", 'frame'], {}), "(name + '.jpg', frame)\n", (590, 612), False, 'import cv2\n')] |
# -*- encoding: utf-8 -*-
'''
@Author : lance
@Email : <EMAIL>
'''
import time
from model_cx.inceptionresnet import inceptionresnet
from model_cx.vgg19two import vgg19_all_lr
from model_cx.inceptionv3 import inceptionv3
from model_cx.densenet import densenet
from model_cx.nasnet import nasnet
from model_cx.merge import merge
from model_cx.bcnn import bilinearnet
from model_cx.resnet import ResNet50
from model_cx.mobilenetv2 import mobilenetv2
from model_cx.senet import senet
if __name__=="__main__":
classes = 1
epochs = 100
steps_per_epoch = 113
validation_steps = 48
shape=(224,224)
print("开始训练...")
start = time.time()
#
# try:
# print("densenet")
# densenet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("bcnn")
# bilinearnet(classes, epochs, steps_per_epoch, validation_steps, shape)
#
# except Exception as e:
# print(e)
# try:
# print("resnet")
# ResNet50(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("merge")
merge(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
# try:
# print("ince_res")
# inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("mobilenetv2")
# mobilenetv2(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
# try:
# print("inceptionv3")
# inceptionv3(classes, epochs, steps_per_epoch, validation_steps, (299, 299))
# # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, shape)
# except Exception as e:
# print(e)
try:
print("nasnet")
nasnet(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("vgg19two")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, shape)
except Exception as e:
print(e)
try:
print("senet")
vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, (100,100))
except Exception as e:
print(e)
end = time.time()
print("ETA:", (end - start) / 3600) | [
"time.time",
"model_cx.vgg19two.vgg19_all_lr",
"model_cx.nasnet.nasnet",
"model_cx.merge.merge"
]
| [((687, 698), 'time.time', 'time.time', ([], {}), '()\n', (696, 698), False, 'import time\n'), ((2581, 2592), 'time.time', 'time.time', ([], {}), '()\n', (2590, 2592), False, 'import time\n'), ((1263, 1327), 'model_cx.merge.merge', 'merge', (['classes', 'epochs', 'steps_per_epoch', 'validation_steps', 'shape'], {}), '(classes, epochs, steps_per_epoch, validation_steps, shape)\n', (1268, 1327), False, 'from model_cx.merge import merge\n'), ((2129, 2194), 'model_cx.nasnet.nasnet', 'nasnet', (['classes', 'epochs', 'steps_per_epoch', 'validation_steps', 'shape'], {}), '(classes, epochs, steps_per_epoch, validation_steps, shape)\n', (2135, 2194), False, 'from model_cx.nasnet import nasnet\n'), ((2287, 2358), 'model_cx.vgg19two.vgg19_all_lr', 'vgg19_all_lr', (['classes', 'epochs', 'steps_per_epoch', 'validation_steps', 'shape'], {}), '(classes, epochs, steps_per_epoch, validation_steps, shape)\n', (2299, 2358), False, 'from model_cx.vgg19two import vgg19_all_lr\n'), ((2448, 2524), 'model_cx.vgg19two.vgg19_all_lr', 'vgg19_all_lr', (['classes', 'epochs', 'steps_per_epoch', 'validation_steps', '(100, 100)'], {}), '(classes, epochs, steps_per_epoch, validation_steps, (100, 100))\n', (2460, 2524), False, 'from model_cx.vgg19two import vgg19_all_lr\n')] |
"""Coordinate changes in state space models."""
import abc
try:
# cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
import numpy as np
import scipy.special # for vectorised factorial
from probnum import config, linops, randvars
def apply_precon(precon, rv):
# public (because it is needed in some integrator implementations),
# but not exposed to the 'randprocs' namespace
# (i.e. not imported in any __init__.py).
# There is no way of checking whether `rv` has its Cholesky factor computed already or not.
# Therefore, since we need to update the Cholesky factor for square-root filtering,
# we also update the Cholesky factor for non-square-root algorithms here,
# which implies additional cost.
# See Issues #319 and #329.
# When they are resolved, this function here will hopefully be superfluous.
new_mean = precon @ rv.mean
new_cov_cholesky = precon @ rv.cov_cholesky # precon is diagonal, so this is valid
new_cov = new_cov_cholesky @ new_cov_cholesky.T
return randvars.Normal(new_mean, new_cov, cov_cholesky=new_cov_cholesky)
class Preconditioner(abc.ABC):
"""Coordinate change transformations as preconditioners in state space models.
For some models, this makes the filtering and smoothing steps more numerically
stable.
"""
@abc.abstractmethod
def __call__(self, step) -> np.ndarray:
# if more than step is needed, add them into the signature in the future
raise NotImplementedError
@cached_property
def inverse(self) -> "Preconditioner":
raise NotImplementedError
class NordsieckLikeCoordinates(Preconditioner):
"""Nordsieck-like coordinates.
Similar to Nordsieck coordinates (which store the Taylor coefficients instead of the
derivatives), but better for ODE filtering and smoothing. Used in integrator-transitions, e.g. in
:class:`IntegratedWienerTransition`.
"""
def __init__(self, powers, scales, dimension):
# Clean way of assembling these coordinates cheaply,
# because the powers and scales of the inverse
# are better read off than inverted
self.powers = powers
self.scales = scales
self.dimension = dimension
@classmethod
def from_order(cls, order, dimension):
# used to conveniently initialise in the beginning
powers = np.arange(order, -1, -1)
scales = scipy.special.factorial(powers)
return cls(
powers=powers + 0.5,
scales=scales,
dimension=dimension,
)
def __call__(self, step):
scaling_vector = np.abs(step) ** self.powers / self.scales
if config.matrix_free:
return linops.Kronecker(
A=linops.Identity(self.dimension),
B=linops.Scaling(factors=scaling_vector),
)
return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
@cached_property
def inverse(self) -> "NordsieckLikeCoordinates":
return NordsieckLikeCoordinates(
powers=-self.powers,
scales=1.0 / self.scales,
dimension=self.dimension,
)
| [
"numpy.abs",
"numpy.eye",
"probnum.randvars.Normal",
"probnum.linops.Scaling",
"numpy.diag",
"probnum.linops.Identity",
"numpy.arange"
]
| [((1145, 1210), 'probnum.randvars.Normal', 'randvars.Normal', (['new_mean', 'new_cov'], {'cov_cholesky': 'new_cov_cholesky'}), '(new_mean, new_cov, cov_cholesky=new_cov_cholesky)\n', (1160, 1210), False, 'from probnum import config, linops, randvars\n'), ((2482, 2506), 'numpy.arange', 'np.arange', (['order', '(-1)', '(-1)'], {}), '(order, -1, -1)\n', (2491, 2506), True, 'import numpy as np\n'), ((2991, 3013), 'numpy.eye', 'np.eye', (['self.dimension'], {}), '(self.dimension)\n', (2997, 3013), True, 'import numpy as np\n'), ((3015, 3038), 'numpy.diag', 'np.diag', (['scaling_vector'], {}), '(scaling_vector)\n', (3022, 3038), True, 'import numpy as np\n'), ((2735, 2747), 'numpy.abs', 'np.abs', (['step'], {}), '(step)\n', (2741, 2747), True, 'import numpy as np\n'), ((2863, 2894), 'probnum.linops.Identity', 'linops.Identity', (['self.dimension'], {}), '(self.dimension)\n', (2878, 2894), False, 'from probnum import config, linops, randvars\n'), ((2914, 2952), 'probnum.linops.Scaling', 'linops.Scaling', ([], {'factors': 'scaling_vector'}), '(factors=scaling_vector)\n', (2928, 2952), False, 'from probnum import config, linops, randvars\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
import time
import pandas as pd
import numpy as np
import csv
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
class CCRNN(nn.Module):
def __init__(self):
# 继承RNN
super(CCRNN, self).__init__()
self.ccLSTM = nn.LSTM(
input_size=4,
hidden_size=128,
num_layers=4,
bidirectional=True,
batch_first=True
)
self.ccCNN22 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=2,
stride=2,
padding=0
)
self.ccCNN14 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(1, 4),
stride=1,
padding=0
)
self.ccCNN41 = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(4, 1),
stride=1,
padding=0
)
self.CNN22toFC = nn.Linear(4, 64)
self.CNN41toFC = nn.Linear(4, 32)
self.CNN14toFC = nn.Linear(4, 32)
self.LSTMtoFC = nn.Linear(256, 128)
self.FCtoOut = nn.Linear(256, 4)
def forward(self, x):
LSTM_out, (h_n, c_n) = self.ccLSTM(x, None)
CNN_in = torch.unsqueeze(x[:, 0:4, :], 1)
CNN_out22 = self.ccCNN22(CNN_in)
CNN_out41 = self.ccCNN41(CNN_in)
CNN_out14 = self.ccCNN14(CNN_in)
CNN22_reshape = CNN_out22.view(-1, 4)
CNN14_reshape = CNN_out41.view(-1, 4)
CNN41_reshape = CNN_out14.view(-1, 4)
CNN22toFC = self.CNN22toFC(CNN22_reshape)
CNN14toFC = self.CNN14toFC(CNN14_reshape)
CNN41toFC = self.CNN41toFC(CNN41_reshape)
LSTMtoFC = self.LSTMtoFC(LSTM_out[:, -1, :])
CNNandLSTM = torch.cat((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1)
out = self.FCtoOut(CNNandLSTM)
return out
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
#-------------------------------------------------------
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=False)
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
Y_train = torch.LongTensor(Y_train)
Y_test = torch.LongTensor(Y_test)
train_dataset = torch.utils.data.TensorDataset(X_train,Y_train)
# test_dataset = torch.utils.data.TensorDataset(X_test,Y_test)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True
)
# test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=batch_size,
# shuffle=False
# )
batch_size = 128
NUM_EPOCHS = 30
LR = 0.001
TIME_STEP = 4
#------------------读入数据-----------------------------
csv_data = pd.read_csv('./drive/My Drive/DATA.csv')
csv_data = csv_data.values
A = csv_data.shape[0]
board_data = csv_data[:,0:16]
# X = np.log2(X)
X = torch.FloatTensor(board_data)
X = np.int64(board_data)
# 转置后拼接
X = np.reshape(X, (-1,4,4))
XT = X.transpose(0,2,1)
X = np.concatenate((X,XT),axis=1)
print(X.shape)
direction_data = csv_data[:,16]
Y = np.int64(direction_data)
model = CCRNN()
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
def train(epoch):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data).cuda(), Variable(target).cuda()
data = data/11.0
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
torch.save(self.model, 'rnn_model_' + str(epoch) + '.pkl')
if __name__ == '__main__':
for epoch in range(0, NUM_EPOCHS):
train(epoch) | [
"numpy.int64",
"numpy.reshape",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"torch.LongTensor",
"torch.nn.LSTM",
"torch.unsqueeze",
"torch.utils.data.TensorDataset",
"torch.nn.Conv2d",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.nn.functional.cross_entropy",
"torch.autograd.Variable",
"torch.FloatTensor",
"torch.cat"
]
| [((2241, 2281), 'pandas.read_csv', 'pd.read_csv', (['"""./drive/My Drive/DATA.csv"""'], {}), "('./drive/My Drive/DATA.csv')\n", (2252, 2281), True, 'import pandas as pd\n'), ((2387, 2416), 'torch.FloatTensor', 'torch.FloatTensor', (['board_data'], {}), '(board_data)\n', (2404, 2416), False, 'import torch\n'), ((2422, 2442), 'numpy.int64', 'np.int64', (['board_data'], {}), '(board_data)\n', (2430, 2442), True, 'import numpy as np\n'), ((2459, 2484), 'numpy.reshape', 'np.reshape', (['X', '(-1, 4, 4)'], {}), '(X, (-1, 4, 4))\n', (2469, 2484), True, 'import numpy as np\n'), ((2515, 2546), 'numpy.concatenate', 'np.concatenate', (['(X, XT)'], {'axis': '(1)'}), '((X, XT), axis=1)\n', (2529, 2546), True, 'import numpy as np\n'), ((2601, 2625), 'numpy.int64', 'np.int64', (['direction_data'], {}), '(direction_data)\n', (2609, 2625), True, 'import numpy as np\n'), ((2728, 2780), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'shuffle': '(False)'}), '(X, Y, test_size=0.2, shuffle=False)\n', (2744, 2780), False, 'from sklearn.model_selection import train_test_split\n'), ((2791, 2817), 'torch.FloatTensor', 'torch.FloatTensor', (['X_train'], {}), '(X_train)\n', (2808, 2817), False, 'import torch\n'), ((2828, 2853), 'torch.FloatTensor', 'torch.FloatTensor', (['X_test'], {}), '(X_test)\n', (2845, 2853), False, 'import torch\n'), ((2865, 2890), 'torch.LongTensor', 'torch.LongTensor', (['Y_train'], {}), '(Y_train)\n', (2881, 2890), False, 'import torch\n'), ((2901, 2925), 'torch.LongTensor', 'torch.LongTensor', (['Y_test'], {}), '(Y_test)\n', (2917, 2925), False, 'import torch\n'), ((2945, 2993), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['X_train', 'Y_train'], {}), '(X_train, Y_train)\n', (2975, 2993), False, 'import torch\n'), ((3075, 3166), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\n', (3102, 3166), False, 'import torch\n'), ((3506, 3546), 'pandas.read_csv', 'pd.read_csv', (['"""./drive/My Drive/DATA.csv"""'], {}), "('./drive/My Drive/DATA.csv')\n", (3517, 3546), True, 'import pandas as pd\n'), ((3652, 3681), 'torch.FloatTensor', 'torch.FloatTensor', (['board_data'], {}), '(board_data)\n', (3669, 3681), False, 'import torch\n'), ((3687, 3707), 'numpy.int64', 'np.int64', (['board_data'], {}), '(board_data)\n', (3695, 3707), True, 'import numpy as np\n'), ((3724, 3749), 'numpy.reshape', 'np.reshape', (['X', '(-1, 4, 4)'], {}), '(X, (-1, 4, 4))\n', (3734, 3749), True, 'import numpy as np\n'), ((3780, 3811), 'numpy.concatenate', 'np.concatenate', (['(X, XT)'], {'axis': '(1)'}), '((X, XT), axis=1)\n', (3794, 3811), True, 'import numpy as np\n'), ((3866, 3890), 'numpy.int64', 'np.int64', (['direction_data'], {}), '(direction_data)\n', (3874, 3890), True, 'import numpy as np\n'), ((490, 584), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(4)', 'hidden_size': '(128)', 'num_layers': '(4)', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(input_size=4, hidden_size=128, num_layers=4, bidirectional=True,\n batch_first=True)\n', (497, 584), True, 'import torch.nn as nn\n'), ((685, 761), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(1)', 'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=1, out_channels=1, kernel_size=2, stride=2, padding=0)\n', (694, 761), True, 'import torch.nn as nn\n'), ((864, 949), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(1)', 'kernel_size': '(1, 4)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=1, out_channels=1, kernel_size=(1, 4), stride=1,\n padding=0)\n', (873, 949), True, 'import torch.nn as nn\n'), ((1048, 1133), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(1)', 'kernel_size': '(4, 1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=1, out_channels=1, kernel_size=(4, 1), stride=1,\n padding=0)\n', (1057, 1133), True, 'import torch.nn as nn\n'), ((1234, 1250), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(64)'], {}), '(4, 64)\n', (1243, 1250), True, 'import torch.nn as nn\n'), ((1277, 1293), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(32)'], {}), '(4, 32)\n', (1286, 1293), True, 'import torch.nn as nn\n'), ((1320, 1336), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(32)'], {}), '(4, 32)\n', (1329, 1336), True, 'import torch.nn as nn\n'), ((1362, 1381), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (1371, 1381), True, 'import torch.nn as nn\n'), ((1406, 1423), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(4)'], {}), '(256, 4)\n', (1415, 1423), True, 'import torch.nn as nn\n'), ((1524, 1556), 'torch.unsqueeze', 'torch.unsqueeze', (['x[:, 0:4, :]', '(1)'], {}), '(x[:, 0:4, :], 1)\n', (1539, 1556), False, 'import torch\n'), ((2055, 2112), 'torch.cat', 'torch.cat', (['(CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC)', '(1)'], {}), '((CNN22toFC, CNN41toFC, CNN14toFC, LSTMtoFC), 1)\n', (2064, 2112), False, 'import torch\n'), ((4250, 4281), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (4265, 4281), True, 'import torch.nn.functional as F\n'), ((4100, 4114), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (4108, 4114), False, 'from torch.autograd import Variable\n'), ((4123, 4139), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (4131, 4139), False, 'from torch.autograd import Variable\n')] |
import json, io, os, os.path
from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder
from Query.Plan import PlanBuilder
from Storage.StorageEngine import StorageEngine
class Database:
"""
A top-level database engine class.
For now, this primarily maintains a simple catalog,
mapping relation names to schema objects.
Also, it provides the ability to construct query
plan objects, as well as wrapping the storage layer methods.
"""
checkpointEncoding = "latin1"
checkpointFile = "db.catalog"
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
else:
storageArgs = {k:v for (k,v) in kwargs.items() \
if k in ["pageSize", "poolSize", "dataDir", "indexDir"]}
self.relationMap = kwargs.get("relations", {})
self.defaultPageSize = kwargs.get("pageSize", io.DEFAULT_BUFFER_SIZE)
self.storage = kwargs.get("storage", StorageEngine(**storageArgs))
checkpointFound = os.path.exists(os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile))
restoring = "restore" in kwargs
if not restoring and checkpointFound:
self.restore()
def fromOther(self, other):
self.relationMap = other.relationMap
self.defaultPageSize = other.defaultPageSize
self.storage = other.storage
def close(self):
if self.storage:
self.storage.close()
# Database internal components
def storageEngine(self):
return self.storage
def bufferPool(self):
return self.storage.bufferPool if self.storage else None
def fileManager(self):
return self.storage.fileMgr if self.storage else None
# User API
# Catalog methods
def relations(self):
return self.relationMap.keys()
def hasRelation(self, relationName):
return relationName in self.relationMap
def relationSchema(self, relationName):
if relationName in self.relationMap:
return self.relationMap[relationName]
# DDL statements
def createRelation(self, relationName, relationFields):
if relationName not in self.relationMap:
schema = DBSchema(relationName, relationFields)
self.relationMap[relationName] = schema
self.storage.createRelation(relationName, schema)
self.checkpoint()
else:
raise ValueError("Relation '" + relationName + "' already exists")
def removeRelation(self, relationName):
if relationName in self.relationMap:
del self.relationMap[relationName]
self.storage.removeRelation(relationName)
self.checkpoint()
else:
raise ValueError("No relation '" + relationName + "' found in database")
# DML statements
# Returns a tuple id for the newly inserted data.
def insertTuple(self, relationName, tupleData):
if relationName in self.relationMap:
return self.storage.insertTuple(relationName, tupleData)
else:
raise ValueError("Unknown relation '" + relationName + "' while inserting a tuple")
def deleteTuple(self, tupleId):
self.storage.deleteTuple(tupleId)
def updateTuple(self, tupleId, tupleData):
self.storage.updateTuple(tupleId, tupleData)
# Queries
# Returns an empty query builder that can access the current database.
def query(self):
return PlanBuilder(db=self)
# Returns an iterable for query results, after initializing the given plan.
def processQuery(self, queryPlan):
return queryPlan.prepare(self)
# Save the database internals to the data directory.
def checkpoint(self):
if self.storage:
dbcPath = os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile)
with open(dbcPath, 'w', encoding=Database.checkpointEncoding) as f:
f.write(self.pack())
# Load relations and schema from an existing data directory.
def restore(self):
if self.storage:
dbcPath = os.path.join(self.storage.fileMgr.dataDir, Database.checkpointFile)
with open(dbcPath, 'r', encoding=Database.checkpointEncoding) as f:
other = Database.unpack(f.read(), self.storage)
self.fromOther(other)
# Database schema catalog serialization
def pack(self):
if self.relationMap is not None:
return json.dumps([self.relationMap, self.defaultPageSize], cls=DBSchemaEncoder)
@classmethod
def unpack(cls, buffer, storageEngine):
(relationMap, pageSize) = json.loads(buffer, cls=DBSchemaDecoder)
return cls(relations=relationMap, pageSize=pageSize, storage=storageEngine, restore=True)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"json.loads",
"json.dumps",
"os.path.join",
"doctest.testmod",
"Storage.StorageEngine.StorageEngine",
"Query.Plan.PlanBuilder",
"Catalog.Schema.DBSchema"
]
| [((4600, 4617), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4615, 4617), False, 'import doctest\n'), ((3331, 3351), 'Query.Plan.PlanBuilder', 'PlanBuilder', ([], {'db': 'self'}), '(db=self)\n', (3342, 3351), False, 'from Query.Plan import PlanBuilder\n'), ((4415, 4454), 'json.loads', 'json.loads', (['buffer'], {'cls': 'DBSchemaDecoder'}), '(buffer, cls=DBSchemaDecoder)\n', (4425, 4454), False, 'import json, io, os, os.path\n'), ((2185, 2223), 'Catalog.Schema.DBSchema', 'DBSchema', (['relationName', 'relationFields'], {}), '(relationName, relationFields)\n', (2193, 2223), False, 'from Catalog.Schema import DBSchema, DBSchemaEncoder, DBSchemaDecoder\n'), ((3621, 3688), 'os.path.join', 'os.path.join', (['self.storage.fileMgr.dataDir', 'Database.checkpointFile'], {}), '(self.storage.fileMgr.dataDir, Database.checkpointFile)\n', (3633, 3688), False, 'import json, io, os, os.path\n'), ((3914, 3981), 'os.path.join', 'os.path.join', (['self.storage.fileMgr.dataDir', 'Database.checkpointFile'], {}), '(self.storage.fileMgr.dataDir, Database.checkpointFile)\n', (3926, 3981), False, 'import json, io, os, os.path\n'), ((4253, 4326), 'json.dumps', 'json.dumps', (['[self.relationMap, self.defaultPageSize]'], {'cls': 'DBSchemaEncoder'}), '([self.relationMap, self.defaultPageSize], cls=DBSchemaEncoder)\n', (4263, 4326), False, 'import json, io, os, os.path\n'), ((995, 1023), 'Storage.StorageEngine.StorageEngine', 'StorageEngine', ([], {}), '(**storageArgs)\n', (1008, 1023), False, 'from Storage.StorageEngine import StorageEngine\n'), ((1071, 1138), 'os.path.join', 'os.path.join', (['self.storage.fileMgr.dataDir', 'Database.checkpointFile'], {}), '(self.storage.fileMgr.dataDir, Database.checkpointFile)\n', (1083, 1138), False, 'import json, io, os, os.path\n')] |
import unittest
from routes import Mapper
class TestMapperStr(unittest.TestCase):
def test_str(self):
m = Mapper()
m.connect('/{controller}/{action}')
m.connect('entries', '/entries', controller='entry', action='index')
m.connect('entry', '/entries/{id}', controller='entry', action='show')
expected = """\
Route name Methods Path
/{controller}/{action}
entries /entries
entry /entries/{id}"""
for expected_line, actual_line in zip(expected.splitlines(), str(m).splitlines()):
assert expected_line == actual_line.rstrip()
| [
"routes.Mapper"
]
| [((124, 132), 'routes.Mapper', 'Mapper', ([], {}), '()\n', (130, 132), False, 'from routes import Mapper\n')] |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
from typing import List
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from drf_yasg.utils import swagger_auto_schema
from pydantic.tools import parse_obj_as
from rest_framework import serializers, status, views
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, mixins
from backend.account.permissions import RolePermission, role_perm_class
from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ
from backend.apps.group import tasks # noqa
from backend.apps.group.models import Group
from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ
from backend.apps.template.models import PermTemplatePolicyAuthorized
from backend.audit.audit import audit_context_setter, view_audit_decorator
from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean
from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz
from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz
from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker
from backend.biz.template import TemplateBiz
from backend.common.error_codes import error_codes
from backend.common.filters import NoCheckModelFilterBackend
from backend.common.serializers import SystemQuerySLZ
from backend.common.time import PERMANENT_SECONDS
from backend.service.constants import PermissionCodeEnum, RoleType, SubjectType
from backend.service.models import Subject
from backend.trans.group import GroupTrans
from .audit import (
GroupCreateAuditProvider,
GroupDeleteAuditProvider,
GroupMemberCreateAuditProvider,
GroupMemberDeleteAuditProvider,
GroupMemberRenewAuditProvider,
GroupPolicyDeleteAuditProvider,
GroupPolicyUpdateAuditProvider,
GroupTemplateCreateAuditProvider,
GroupTransferAuditProvider,
GroupUpdateAuditProvider,
)
from .constants import OperateEnum
from .filters import GroupFilter, GroupTemplateSystemFilter
from .serializers import (
GroupAddMemberSLZ,
GroupAuthoriedConditionSLZ,
GroupAuthorizationSLZ,
GroupCreateSLZ,
GroupDeleteMemberSLZ,
GroupIdSLZ,
GroupMemberUpdateExpiredAtSLZ,
GroupPolicyUpdateSLZ,
GroupSLZ,
GroupTemplateDetailSchemaSLZ,
GroupTemplateDetailSLZ,
GroupTemplateSchemaSLZ,
GroupTemplateSLZ,
GroupTransferSLZ,
GroupUpdateSLZ,
MemberSLZ,
SearchMemberSLZ,
)
permission_logger = logging.getLogger("permission")
def check_readonly_group(operation):
"""用户组可读检测"""
def decorate(func):
@wraps(func)
def wrapper(view, request, *args, **kwargs):
group = view.get_object()
readonly = group.readonly
if readonly:
raise error_codes.FORBIDDEN.format(
message=_("只读用户组({})无法进行({})操作!").format(group.id, operation), replace=True
)
response = func(view, request, *args, **kwargs)
return response
return wrapper
return decorate
class GroupQueryMixin:
def get_queryset(self):
request = self.request
return RoleListQuery(request.role, request.user).query_group()
class GroupPermissionMixin:
def check_object_permissions(self, request, obj):
if not RoleObjectRelationChecker(request.role).check_group(obj):
self.permission_denied(request, message=f"{request.role.type} role can not access group {obj.id}")
class GroupViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
serializer_class = GroupSLZ
filterset_class = GroupFilter
lookup_field = "id"
group_biz = GroupBiz()
group_check_biz = GroupCheckBiz()
role_biz = RoleBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="创建用户组",
request_body=GroupCreateSLZ(label="用户组"),
responses={status.HTTP_201_CREATED: GroupIdSLZ(label="用户组ID")},
tags=["group"],
)
@view_audit_decorator(GroupCreateAuditProvider)
def create(self, request, *args, **kwargs):
"""
创建用户组
"""
serializer = GroupCreateSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"])
# 用户组数量在角色内是否超限
number_of_new_group = 1 # 接口只支持创建一个用户组,不支持批量,所以新增用户组数量为1
self.group_check_biz.check_role_group_limit(request.role, number_of_new_group)
# 检测成员是否满足管理的授权范围
members = parse_obj_as(List[Subject], data["members"])
self.group_check_biz.check_role_subject_scope(request.role, members)
group = self.group_biz.create_and_add_members(
request.role.id, data["name"], data["description"], user_id, members, data["expired_at"]
)
# 使用长时任务触发多个模板同时授权
if data["templates"]:
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(group=group)
return Response({"id": group.id}, status=status.HTTP_201_CREATED)
def get_queryset(self):
request = self.request
role = request.role
username = request.user.username
filter_role_id = request.query_params.get("role_id")
# 如果当前角色是staff 并且 存在筛选的role_id
if role.type == RoleType.STAFF.value and filter_role_id:
# 检查用户是否在角色的授权范围内
filter_role = self.role_biz.get_role_scope_include_user(filter_role_id, username)
if not filter_role:
return Group.objects.none()
# 返回角色的用户组列表
return RoleListQuery(filter_role, request.user).query_group()
return RoleListQuery(role, request.user).query_group()
@swagger_auto_schema(
operation_description="用户组列表",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="用户组详情",
responses={status.HTTP_200_OK: GroupSLZ(label="用户组")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
@swagger_auto_schema(
operation_description="修改用户组",
request_body=GroupUpdateSLZ(label="用户组"),
responses={status.HTTP_200_OK: GroupUpdateSLZ(label="用户组")},
tags=["group"],
)
@view_audit_decorator(GroupUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
serializer = GroupUpdateSLZ(group, data=request.data)
serializer.is_valid(raise_exception=True)
user_id = request.user.username
data = serializer.validated_data
# 用户组名称在角色内唯一
self.group_check_biz.check_role_group_name_unique(request.role.id, data["name"], group.id)
group = self.group_biz.update(group, data["name"], data["description"], user_id)
# 写入审计上下文
audit_context_setter(group=group)
return Response(serializer.data)
@swagger_auto_schema(
operation_description="删除用户组",
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_DELETE.label)
def destroy(self, request, *args, **kwargs):
group = self.get_object()
self.group_biz.delete(group.id)
# 写入审计上下文
audit_context_setter(group=group)
return Response({})
class GroupMemberViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"list": PermissionCodeEnum.MANAGE_GROUP.value,
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
}
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
group_check_biz = GroupCheckBiz()
@swagger_auto_schema(
operation_description="用户组成员列表",
query_serializer=SearchMemberSLZ(label="keyword"),
responses={status.HTTP_200_OK: MemberSLZ(label="成员")},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
# 校验权限
checker = RoleObjectRelationChecker(request.role)
if not checker.check_group(group):
raise error_codes.FORBIDDEN.format(message=_("用户组({})不在当前用户身份可访问的范围内").format(group.id), replace=True)
if request.query_params.get("keyword"):
slz = SearchMemberSLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
keyword = slz.validated_data["keyword"].lower()
group_members = self.biz.search_member_by_keyword(group.id, keyword)
return Response({"results": [one.dict() for one in group_members]})
pagination = LimitOffsetPagination()
limit = pagination.get_limit(request)
offset = pagination.get_offset(request)
count, group_members = self.biz.list_paging_group_member(group.id, limit, offset)
return Response({"count": count, "results": [one.dict() for one in group_members]})
@swagger_auto_schema(
operation_description="用户组添加成员",
request_body=GroupAddMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAddMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
members_data = data["members"]
expired_at = data["expired_at"]
# 成员Dict结构转换为Subject结构,并去重
members = list(set(parse_obj_as(List[Subject], members_data)))
# 检测成员是否满足管理的授权范围
self.group_check_biz.check_role_subject_scope(request.role, members)
self.group_check_biz.check_member_count(group.id, len(members))
permission_logger.info("group %s add members %s by user %s", group.id, members, request.user.username)
# 添加成员
self.biz.add_members(group.id, members, expired_at)
# 写入审计上下文
audit_context_setter(group=group, members=[m.dict() for m in members])
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组删除成员",
request_body=GroupDeleteMemberSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_DELETE.label)
def destroy(self, request, *args, **kwargs):
serializer = GroupDeleteMemberSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s delete members %s by user %s", group.id, data["members"], request.user.username
)
self.biz.remove_members(str(group.id), parse_obj_as(List[Subject], data["members"]))
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupMemberUpdateExpiredAtViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_GROUP.value)]
queryset = Group.objects.all()
lookup_field = "id"
# service
group_biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组成员续期",
request_body=GroupMemberUpdateExpiredAtSLZ(label="成员"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupMemberRenewAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_MEMBER_RENEW.label)
def create(self, request, *args, **kwargs):
serializer = GroupMemberUpdateExpiredAtSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
permission_logger.info(
"group %s update members %s expired_at by user %s", group.id, data["members"], request.user.username
)
for m in data["members"]:
m["policy_expired_at"] = m.pop("expired_at")
self.group_biz.update_members_expired_at(
group.id, parse_obj_as(List[GroupMemberExpiredAtBean], data["members"])
)
# 写入审计上下文
audit_context_setter(group=group, members=data["members"])
return Response({})
class GroupTemplateViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {"create": PermissionCodeEnum.MANAGE_GROUP.value}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
filterset_class = GroupTemplateSystemFilter
filter_backends = [NoCheckModelFilterBackend]
lookup_field = "id"
template_biz = TemplateBiz()
@swagger_auto_schema(
operation_description="用户组拥有的权限模板列表",
responses={status.HTTP_200_OK: GroupTemplateSchemaSLZ(label="权限模板", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
queryset = PermTemplatePolicyAuthorized.objects.filter_by_subject(subject).defer("_data")
queryset = self.filter_queryset(queryset)
return Response(GroupTemplateSLZ(queryset, many=True).data)
@swagger_auto_schema(
operation_description="用户组权限模板授权信息",
responses={status.HTTP_200_OK: GroupTemplateDetailSchemaSLZ(label="授权信息")},
tags=["group"],
)
def retrieve(self, request, *args, **kwargs):
group = get_object_or_404(self.queryset, pk=kwargs["id"])
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
return Response(GroupTemplateDetailSLZ(authorized_template).data)
class GroupPolicyViewSet(GroupPermissionMixin, GenericViewSet):
permission_classes = [RolePermission]
action_permission = {
"create": PermissionCodeEnum.MANAGE_GROUP.value,
"destroy": PermissionCodeEnum.MANAGE_GROUP.value,
"update": PermissionCodeEnum.MANAGE_GROUP.value,
}
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
policy_query_biz = PolicyQueryBiz()
policy_operation_biz = PolicyOperationBiz()
group_biz = GroupBiz()
group_trans = GroupTrans()
@swagger_auto_schema(
operation_description="用户组添加权限",
request_body=GroupAuthorizationSLZ(label="授权信息"),
responses={status.HTTP_201_CREATED: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTemplateCreateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_CREATE.label)
def create(self, request, *args, **kwargs):
serializer = GroupAuthorizationSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
group = self.get_object()
data = serializer.validated_data
templates = self.group_trans.from_group_grant_data(data["templates"])
self.group_biz.grant(request.role, group, templates)
# 写入审计上下文
audit_context_setter(
group=group,
templates=[{"system_id": t["system_id"], "template_id": t["template_id"]} for t in data["templates"]],
)
return Response({}, status=status.HTTP_201_CREATED)
@swagger_auto_schema(
operation_description="用户组自定义权限列表",
query_serializer=SystemQuerySLZ,
responses={status.HTTP_200_OK: PolicySLZ(label="策略", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
slz = SystemQuerySLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
group = get_object_or_404(self.queryset, pk=kwargs["id"])
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
policies = self.policy_query_biz.list_by_subject(system_id, subject)
# ResourceNameAutoUpdate
updated_policies = self.policy_operation_biz.update_due_to_renamed_resource(system_id, subject, policies)
return Response([p.dict() for p in updated_policies])
@swagger_auto_schema(
operation_description="用户组删除自定义权限",
request_body=PolicyDeleteSLZ(label="ids"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyDeleteAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_DELETE.label)
def destroy(self, request, *args, **kwargs):
slz = PolicyDeleteSLZ(data=request.data)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
ids = slz.validated_data["ids"]
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
permission_logger.info(
"subject type=%s, id=%s policy deleted by user %s", subject.type, subject.id, request.user.username
)
policy_list = self.policy_query_biz.query_policy_list_by_policy_ids(system_id, subject, ids)
# 删除权限
self.policy_operation_biz.delete_by_ids(system_id, subject, ids)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, policies=policy_list.policies)
return Response()
@swagger_auto_schema(
operation_description="用户组权限修改",
request_body=GroupPolicyUpdateSLZ(label="修改策略"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupPolicyUpdateAuditProvider)
@check_readonly_group(operation=OperateEnum.GROUP_POLICY_UPDATE.label)
def update(self, request, *args, **kwargs):
group = self.get_object()
slz = GroupPolicyUpdateSLZ(data=request.data)
slz.is_valid(raise_exception=True)
data = slz.validated_data
system_id = data["system_id"]
template_id = data["template_id"]
policies = [PolicyBean(expired_at=PERMANENT_SECONDS, **action) for action in data["actions"]]
self.group_biz.update_policies(request.role, group.id, system_id, template_id, policies)
# 写入审计上下文
audit_context_setter(group=group, system_id=system_id, template_id=template_id, policies=policies)
return Response({})
class GroupSystemViewSet(GenericViewSet):
pagination_class = None # 去掉swagger中的limit offset参数
queryset = Group.objects.all()
lookup_field = "id"
biz = GroupBiz()
@swagger_auto_schema(
operation_description="用户组有权限的所有系统列表",
responses={status.HTTP_200_OK: PolicySystemSLZ(label="系统", many=True)},
tags=["group"],
)
def list(self, request, *args, **kwargs):
group = self.get_object()
data = self.biz.list_system_counter(group.id)
return Response([one.dict() for one in data])
class GroupTransferView(views.APIView):
"""
用户组转出
"""
permission_classes = [role_perm_class(PermissionCodeEnum.TRANSFER_GROUP.value)]
role_biz = RoleBiz()
@swagger_auto_schema(
operation_description="用户组批量转出",
request_body=GroupTransferSLZ(label="用户转移"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["group"],
)
@view_audit_decorator(GroupTransferAuditProvider)
def post(self, request, *args, **kwargs):
slz = GroupTransferSLZ(data=request.data, context={"role": request.role})
slz.is_valid(raise_exception=True)
group_ids = slz.validated_data["group_ids"]
role_id = slz.validated_data["role_id"]
self.role_biz.transfer_groups_role(group_ids, role_id)
audit_context_setter(group_ids=group_ids, role_id=role_id)
return Response({})
class GroupTemplateConditionCompareView(GroupPermissionMixin, GenericViewSet):
condition_biz = ConditionTagBiz()
template_biz = TemplateBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="权限模板操作条件对比",
request_body=GroupAuthoriedConditionSLZ(label="操作条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = GroupAuthoriedConditionSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
action_id = data["action_id"]
resource_group_id = data["resource_group_id"]
related_resource_type = data["related_resource_type"]
new_condition = parse_obj_as(List[ConditionTagBean], related_resource_type["condition"])
# 从模板数据中查找匹配的操作, 资源类型的条件
template_id = kwargs["template_id"]
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
authorized_template = PermTemplatePolicyAuthorized.objects.get_by_subject_template(subject, int(template_id))
for action in authorized_template.data["actions"]:
policy = PolicyBean.parse_obj(action)
# 查询对应的操作
if policy.action_id == action_id:
# 操作操作中对应于资源类型的操作
related_resource_type = policy.get_related_resource_type(
resource_group_id, related_resource_type["system_id"], related_resource_type["type"]
)
old_condition = related_resource_type.condition if related_resource_type else []
# 对比用户组已有的条件与用户提交的条件
conditions = self.condition_biz.compare_and_tag(
new_condition, parse_obj_as(List[ConditionTagBean], old_condition), is_template=True
)
return Response([c.dict() for c in conditions])
raise error_codes.VALIDATE_ERROR.format(_("模板: {} 没有操作: {} 的权限").format(template_id, action_id))
class GroupCustomPolicyConditionCompareView(GroupPermissionMixin, GenericViewSet):
policy_biz = PolicyQueryBiz()
condition_biz = ConditionTagBiz()
queryset = Group.objects.all()
lookup_field = "id"
@swagger_auto_schema(
operation_description="条件差异对比",
request_body=ConditionCompareSLZ(label="资源条件"),
responses={status.HTTP_200_OK: ConditionTagSLZ(label="条件差异", many=True)},
tags=["group"],
)
def create(self, request, *args, **kwargs):
serializer = ConditionCompareSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
group = self.get_object()
subject = Subject(type=SubjectType.GROUP.value, id=str(group.id))
# 1. 查询policy的condition
related_resource_type = data["related_resource_type"]
old_condition = self.policy_biz.get_policy_resource_type_conditions(
subject,
data["policy_id"],
data["resource_group_id"],
related_resource_type["system_id"],
related_resource_type["type"],
)
# 2. 对比合并差异
conditions = self.condition_biz.compare_and_tag(
parse_obj_as(List[ConditionTagBean], related_resource_type["condition"]),
parse_obj_as(List[ConditionTagBean], old_condition),
is_template=True,
)
return Response([c.dict() for c in conditions])
| [
"logging.getLogger",
"backend.biz.group.GroupCheckBiz",
"backend.apps.group.models.Group.objects.all",
"backend.apps.group.models.Group.objects.none",
"backend.biz.role.RoleBiz",
"backend.account.permissions.role_perm_class",
"backend.apps.template.models.PermTemplatePolicyAuthorized.objects.filter_by_subject",
"backend.apps.policy.serializers.PolicySLZ",
"backend.biz.role.RoleListQuery",
"backend.common.serializers.SystemQuerySLZ",
"backend.biz.policy.PolicyOperationBiz",
"backend.biz.policy.PolicyQueryBiz",
"backend.biz.role.RoleObjectRelationChecker",
"django.utils.translation.gettext",
"rest_framework.pagination.LimitOffsetPagination",
"backend.biz.policy.PolicyBean",
"django.shortcuts.get_object_or_404",
"backend.apps.policy.serializers.PolicySystemSLZ",
"functools.wraps",
"backend.trans.group.GroupTrans",
"backend.audit.audit.audit_context_setter",
"backend.biz.group.GroupBiz",
"rest_framework.serializers.Serializer",
"backend.apps.policy.serializers.PolicyDeleteSLZ",
"backend.biz.template.TemplateBiz",
"backend.apps.application.serializers.ConditionTagSLZ",
"backend.audit.audit.view_audit_decorator",
"pydantic.tools.parse_obj_as",
"rest_framework.response.Response",
"backend.biz.policy.PolicyBean.parse_obj",
"backend.biz.policy_tag.ConditionTagBiz",
"backend.apps.application.serializers.ConditionCompareSLZ"
]
| [((3317, 3348), 'logging.getLogger', 'logging.getLogger', (['"""permission"""'], {}), "('permission')\n", (3334, 3348), False, 'import logging\n'), ((4682, 4701), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (4699, 4701), False, 'from backend.apps.group.models import Group\n'), ((4809, 4819), 'backend.biz.group.GroupBiz', 'GroupBiz', ([], {}), '()\n', (4817, 4819), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((4842, 4857), 'backend.biz.group.GroupCheckBiz', 'GroupCheckBiz', ([], {}), '()\n', (4855, 4857), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((4873, 4882), 'backend.biz.role.RoleBiz', 'RoleBiz', ([], {}), '()\n', (4880, 4882), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((4902, 4914), 'backend.trans.group.GroupTrans', 'GroupTrans', ([], {}), '()\n', (4912, 4914), False, 'from backend.trans.group import GroupTrans\n'), ((5138, 5184), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupCreateAuditProvider'], {}), '(GroupCreateAuditProvider)\n', (5158, 5184), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((7838, 7884), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupUpdateAuditProvider'], {}), '(GroupUpdateAuditProvider)\n', (7858, 7884), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((8711, 8757), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupDeleteAuditProvider'], {}), '(GroupDeleteAuditProvider)\n', (8731, 8757), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((9367, 9386), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (9384, 9386), False, 'from backend.apps.group.models import Group\n'), ((9422, 9432), 'backend.biz.group.GroupBiz', 'GroupBiz', ([], {}), '()\n', (9430, 9432), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((9455, 9470), 'backend.biz.group.GroupCheckBiz', 'GroupCheckBiz', ([], {}), '()\n', (9468, 9470), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((10959, 11011), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupMemberCreateAuditProvider'], {}), '(GroupMemberCreateAuditProvider)\n', (10979, 11011), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((12252, 12304), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupMemberDeleteAuditProvider'], {}), '(GroupMemberDeleteAuditProvider)\n', (12272, 12304), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((13150, 13169), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (13167, 13169), False, 'from backend.apps.group.models import Group\n'), ((13225, 13235), 'backend.biz.group.GroupBiz', 'GroupBiz', ([], {}), '()\n', (13233, 13235), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((13469, 13520), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupMemberRenewAuditProvider'], {}), '(GroupMemberRenewAuditProvider)\n', (13489, 13520), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((14605, 14624), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (14622, 14624), False, 'from backend.apps.group.models import Group\n'), ((14767, 14780), 'backend.biz.template.TemplateBiz', 'TemplateBiz', ([], {}), '()\n', (14778, 14780), False, 'from backend.biz.template import TemplateBiz\n'), ((16375, 16394), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (16392, 16394), False, 'from backend.apps.group.models import Group\n'), ((16443, 16459), 'backend.biz.policy.PolicyQueryBiz', 'PolicyQueryBiz', ([], {}), '()\n', (16457, 16459), False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((16487, 16507), 'backend.biz.policy.PolicyOperationBiz', 'PolicyOperationBiz', ([], {}), '()\n', (16505, 16507), False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((16524, 16534), 'backend.biz.group.GroupBiz', 'GroupBiz', ([], {}), '()\n', (16532, 16534), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((16554, 16566), 'backend.trans.group.GroupTrans', 'GroupTrans', ([], {}), '()\n', (16564, 16566), False, 'from backend.trans.group import GroupTrans\n'), ((16799, 16853), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupTemplateCreateAuditProvider'], {}), '(GroupTemplateCreateAuditProvider)\n', (16819, 16853), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((18632, 18684), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupPolicyDeleteAuditProvider'], {}), '(GroupPolicyDeleteAuditProvider)\n', (18652, 18684), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((19814, 19866), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupPolicyUpdateAuditProvider'], {}), '(GroupPolicyUpdateAuditProvider)\n', (19834, 19866), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((20709, 20728), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (20726, 20728), False, 'from backend.apps.group.models import Group\n'), ((20764, 20774), 'backend.biz.group.GroupBiz', 'GroupBiz', ([], {}), '()\n', (20772, 20774), False, 'from backend.biz.group import GroupBiz, GroupCheckBiz, GroupMemberExpiredAtBean\n'), ((21316, 21325), 'backend.biz.role.RoleBiz', 'RoleBiz', ([], {}), '()\n', (21323, 21325), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((21548, 21596), 'backend.audit.audit.view_audit_decorator', 'view_audit_decorator', (['GroupTransferAuditProvider'], {}), '(GroupTransferAuditProvider)\n', (21568, 21596), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((22131, 22148), 'backend.biz.policy_tag.ConditionTagBiz', 'ConditionTagBiz', ([], {}), '()\n', (22146, 22148), False, 'from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz\n'), ((22168, 22181), 'backend.biz.template.TemplateBiz', 'TemplateBiz', ([], {}), '()\n', (22179, 22181), False, 'from backend.biz.template import TemplateBiz\n'), ((22198, 22217), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (22215, 22217), False, 'from backend.apps.group.models import Group\n'), ((24256, 24272), 'backend.biz.policy.PolicyQueryBiz', 'PolicyQueryBiz', ([], {}), '()\n', (24270, 24272), False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((24293, 24310), 'backend.biz.policy_tag.ConditionTagBiz', 'ConditionTagBiz', ([], {}), '()\n', (24308, 24310), False, 'from backend.biz.policy_tag import ConditionTagBean, ConditionTagBiz\n'), ((24327, 24346), 'backend.apps.group.models.Group.objects.all', 'Group.objects.all', ([], {}), '()\n', (24344, 24346), False, 'from backend.apps.group.models import Group\n'), ((3440, 3451), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3445, 3451), False, 'from functools import wraps\n'), ((5792, 5836), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[Subject]', "data['members']"], {}), "(List[Subject], data['members'])\n", (5804, 5836), False, 'from pydantic.tools import parse_obj_as\n'), ((6313, 6346), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group'}), '(group=group)\n', (6333, 6346), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((6363, 6421), 'rest_framework.response.Response', 'Response', (["{'id': group.id}"], {'status': 'status.HTTP_201_CREATED'}), "({'id': group.id}, status=status.HTTP_201_CREATED)\n", (6371, 6421), False, 'from rest_framework.response import Response\n'), ((8468, 8501), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group'}), '(group=group)\n', (8488, 8501), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((8518, 8543), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (8526, 8543), False, 'from rest_framework.response import Response\n'), ((8977, 9010), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group'}), '(group=group)\n', (8997, 9010), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((9027, 9039), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (9035, 9039), False, 'from rest_framework.response import Response\n'), ((9753, 9802), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.queryset'], {'pk': "kwargs['id']"}), "(self.queryset, pk=kwargs['id'])\n", (9770, 9802), False, 'from django.shortcuts import get_object_or_404\n'), ((9837, 9876), 'backend.biz.role.RoleObjectRelationChecker', 'RoleObjectRelationChecker', (['request.role'], {}), '(request.role)\n', (9862, 9876), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((10437, 10460), 'rest_framework.pagination.LimitOffsetPagination', 'LimitOffsetPagination', ([], {}), '()\n', (10458, 10460), False, 'from rest_framework.pagination import LimitOffsetPagination\n'), ((11983, 12027), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_201_CREATED'}), '({}, status=status.HTTP_201_CREATED)\n', (11991, 12027), False, 'from rest_framework.response import Response\n'), ((12882, 12940), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group', 'members': "data['members']"}), "(group=group, members=data['members'])\n", (12902, 12940), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((12957, 12969), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (12965, 12969), False, 'from rest_framework.response import Response\n'), ((13078, 13132), 'backend.account.permissions.role_perm_class', 'role_perm_class', (['PermissionCodeEnum.MANAGE_GROUP.value'], {}), '(PermissionCodeEnum.MANAGE_GROUP.value)\n', (13093, 13132), False, 'from backend.account.permissions import RolePermission, role_perm_class\n'), ((14259, 14317), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group', 'members': "data['members']"}), "(group=group, members=data['members'])\n", (14279, 14317), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((14334, 14346), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (14342, 14346), False, 'from rest_framework.response import Response\n'), ((15035, 15084), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.queryset'], {'pk': "kwargs['id']"}), "(self.queryset, pk=kwargs['id'])\n", (15052, 15084), False, 'from django.shortcuts import get_object_or_404\n'), ((15628, 15677), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.queryset'], {'pk': "kwargs['id']"}), "(self.queryset, pk=kwargs['id'])\n", (15645, 15677), False, 'from django.shortcuts import get_object_or_404\n'), ((17332, 17472), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group', 'templates': "[{'system_id': t['system_id'], 'template_id': t['template_id']} for t in\n data['templates']]"}), "(group=group, templates=[{'system_id': t['system_id'],\n 'template_id': t['template_id']} for t in data['templates']])\n", (17352, 17472), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((17520, 17564), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_201_CREATED'}), '({}, status=status.HTTP_201_CREATED)\n', (17528, 17564), False, 'from rest_framework.response import Response\n'), ((17841, 17882), 'backend.common.serializers.SystemQuerySLZ', 'SystemQuerySLZ', ([], {'data': 'request.query_params'}), '(data=request.query_params)\n', (17855, 17882), False, 'from backend.common.serializers import SystemQuerySLZ\n'), ((17995, 18044), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['self.queryset'], {'pk': "kwargs['id']"}), "(self.queryset, pk=kwargs['id'])\n", (18012, 18044), False, 'from django.shortcuts import get_object_or_404\n'), ((18823, 18857), 'backend.apps.policy.serializers.PolicyDeleteSLZ', 'PolicyDeleteSLZ', ([], {'data': 'request.data'}), '(data=request.data)\n', (18838, 18857), False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((19475, 19565), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group', 'system_id': 'system_id', 'policies': 'policy_list.policies'}), '(group=group, system_id=system_id, policies=policy_list\n .policies)\n', (19495, 19565), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((19577, 19587), 'rest_framework.response.Response', 'Response', ([], {}), '()\n', (19585, 19587), False, 'from rest_framework.response import Response\n'), ((20464, 20567), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group': 'group', 'system_id': 'system_id', 'template_id': 'template_id', 'policies': 'policies'}), '(group=group, system_id=system_id, template_id=\n template_id, policies=policies)\n', (20484, 20567), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((20579, 20591), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (20587, 20591), False, 'from rest_framework.response import Response\n'), ((21242, 21298), 'backend.account.permissions.role_perm_class', 'role_perm_class', (['PermissionCodeEnum.TRANSFER_GROUP.value'], {}), '(PermissionCodeEnum.TRANSFER_GROUP.value)\n', (21257, 21298), False, 'from backend.account.permissions import RolePermission, role_perm_class\n'), ((21942, 22000), 'backend.audit.audit.audit_context_setter', 'audit_context_setter', ([], {'group_ids': 'group_ids', 'role_id': 'role_id'}), '(group_ids=group_ids, role_id=role_id)\n', (21962, 22000), False, 'from backend.audit.audit import audit_context_setter, view_audit_decorator\n'), ((22017, 22029), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (22025, 22029), False, 'from rest_framework.response import Response\n'), ((22909, 22981), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[ConditionTagBean]', "related_resource_type['condition']"], {}), "(List[ConditionTagBean], related_resource_type['condition'])\n", (22921, 22981), False, 'from pydantic.tools import parse_obj_as\n'), ((24675, 24713), 'backend.apps.application.serializers.ConditionCompareSLZ', 'ConditionCompareSLZ', ([], {'data': 'request.data'}), '(data=request.data)\n', (24694, 24713), False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((12809, 12853), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[Subject]', "data['members']"], {}), "(List[Subject], data['members'])\n", (12821, 12853), False, 'from pydantic.tools import parse_obj_as\n'), ((14160, 14221), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[GroupMemberExpiredAtBean]', "data['members']"], {}), "(List[GroupMemberExpiredAtBean], data['members'])\n", (14172, 14221), False, 'from pydantic.tools import parse_obj_as\n'), ((18501, 18529), 'backend.apps.policy.serializers.PolicyDeleteSLZ', 'PolicyDeleteSLZ', ([], {'label': '"""ids"""'}), "(label='ids')\n", (18516, 18529), False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((20258, 20308), 'backend.biz.policy.PolicyBean', 'PolicyBean', ([], {'expired_at': 'PERMANENT_SECONDS'}), '(expired_at=PERMANENT_SECONDS, **action)\n', (20268, 20308), False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((23332, 23360), 'backend.biz.policy.PolicyBean.parse_obj', 'PolicyBean.parse_obj', (['action'], {}), '(action)\n', (23352, 23360), False, 'from backend.biz.policy import PolicyBean, PolicyOperationBiz, PolicyQueryBiz\n'), ((25369, 25441), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[ConditionTagBean]', "related_resource_type['condition']"], {}), "(List[ConditionTagBean], related_resource_type['condition'])\n", (25381, 25441), False, 'from pydantic.tools import parse_obj_as\n'), ((25455, 25506), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[ConditionTagBean]', 'old_condition'], {}), '(List[ConditionTagBean], old_condition)\n', (25467, 25506), False, 'from pydantic.tools import parse_obj_as\n'), ((24459, 24492), 'backend.apps.application.serializers.ConditionCompareSLZ', 'ConditionCompareSLZ', ([], {'label': '"""资源条件"""'}), "(label='资源条件')\n", (24478, 24492), False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((4007, 4048), 'backend.biz.role.RoleListQuery', 'RoleListQuery', (['request.role', 'request.user'], {}), '(request.role, request.user)\n', (4020, 4048), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((6896, 6916), 'backend.apps.group.models.Group.objects.none', 'Group.objects.none', ([], {}), '()\n', (6914, 6916), False, 'from backend.apps.group.models import Group\n'), ((7033, 7066), 'backend.biz.role.RoleListQuery', 'RoleListQuery', (['role', 'request.user'], {}), '(role, request.user)\n', (7046, 7066), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((8649, 8673), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (8671, 8673), False, 'from rest_framework import serializers, status, views\n'), ((11462, 11503), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[Subject]', 'members_data'], {}), '(List[Subject], members_data)\n', (11474, 11503), False, 'from pydantic.tools import parse_obj_as\n'), ((10897, 10921), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (10919, 10921), False, 'from rest_framework import serializers, status, views\n'), ((12190, 12214), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (12212, 12214), False, 'from rest_framework import serializers, status, views\n'), ((13407, 13431), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (13429, 13431), False, 'from rest_framework import serializers, status, views\n'), ((15178, 15241), 'backend.apps.template.models.PermTemplatePolicyAuthorized.objects.filter_by_subject', 'PermTemplatePolicyAuthorized.objects.filter_by_subject', (['subject'], {}), '(subject)\n', (15232, 15241), False, 'from backend.apps.template.models import PermTemplatePolicyAuthorized\n'), ((16737, 16761), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (16759, 16761), False, 'from rest_framework import serializers, status, views\n'), ((17716, 17748), 'backend.apps.policy.serializers.PolicySLZ', 'PolicySLZ', ([], {'label': '"""策略"""', 'many': '(True)'}), "(label='策略', many=True)\n", (17725, 17748), False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((18570, 18594), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (18592, 18594), False, 'from rest_framework import serializers, status, views\n'), ((19752, 19776), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (19774, 19776), False, 'from rest_framework import serializers, status, views\n'), ((20888, 20926), 'backend.apps.policy.serializers.PolicySystemSLZ', 'PolicySystemSLZ', ([], {'label': '"""系统"""', 'many': '(True)'}), "(label='系统', many=True)\n", (20903, 20926), False, 'from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicySLZ, PolicySystemSLZ\n'), ((21486, 21510), 'rest_framework.serializers.Serializer', 'serializers.Serializer', ([], {}), '()\n', (21508, 21510), False, 'from rest_framework import serializers, status, views\n'), ((22415, 22455), 'backend.apps.application.serializers.ConditionTagSLZ', 'ConditionTagSLZ', ([], {'label': '"""条件差异"""', 'many': '(True)'}), "(label='条件差异', many=True)\n", (22430, 22455), False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((24533, 24573), 'backend.apps.application.serializers.ConditionTagSLZ', 'ConditionTagSLZ', ([], {'label': '"""条件差异"""', 'many': '(True)'}), "(label='条件差异', many=True)\n", (24548, 24573), False, 'from backend.apps.application.serializers import ConditionCompareSLZ, ConditionTagSLZ\n'), ((4162, 4201), 'backend.biz.role.RoleObjectRelationChecker', 'RoleObjectRelationChecker', (['request.role'], {}), '(request.role)\n', (4187, 4201), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((6962, 7002), 'backend.biz.role.RoleListQuery', 'RoleListQuery', (['filter_role', 'request.user'], {}), '(filter_role, request.user)\n', (6975, 7002), False, 'from backend.biz.role import RoleBiz, RoleListQuery, RoleObjectRelationChecker\n'), ((23895, 23946), 'pydantic.tools.parse_obj_as', 'parse_obj_as', (['List[ConditionTagBean]', 'old_condition'], {}), '(List[ConditionTagBean], old_condition)\n', (23907, 23946), False, 'from pydantic.tools import parse_obj_as\n'), ((24097, 24121), 'django.utils.translation.gettext', '_', (['"""模板: {} 没有操作: {} 的权限"""'], {}), "('模板: {} 没有操作: {} 的权限')\n", (24098, 24121), True, 'from django.utils.translation import gettext as _\n'), ((9975, 10002), 'django.utils.translation.gettext', '_', (['"""用户组({})不在当前用户身份可访问的范围内"""'], {}), "('用户组({})不在当前用户身份可访问的范围内')\n", (9976, 10002), True, 'from django.utils.translation import gettext as _\n'), ((3687, 3712), 'django.utils.translation.gettext', '_', (['"""只读用户组({})无法进行({})操作!"""'], {}), "('只读用户组({})无法进行({})操作!')\n", (3688, 3712), True, 'from django.utils.translation import gettext as _\n')] |
#!/usr/bin/python3
from PIL import Image
from numpy import complex, array
from tqdm import tqdm
import colorsys
W=512
#W=142
def mandelbrot(x, y):
def get_colors(i):
color = 255 * array(colorsys.hsv_to_rgb(i / 255.0, 1.0, 0.5))
return tuple(color.astype(int))
c, cc = 0, complex(x, y)
for i in range(1, 1000):
if abs(c) > 2:
return get_colors(i)
c = c * c + cc
return 0,0,0
if __name__ == "__main__":
img = Image.new("RGB", (W, int(W / 2)))
pixels = img.load()
for x in tqdm(range(img.size[0])):
for y in tqdm(range(img.size[1])):
xx = (x - (0.75 * W)) / (W / 4)
yy = (y - (W / 4)) / (W / 4)
pixels[x, y] = mandelbrot(xx, yy)
img.show()
img.save("mandelbrot.jpg")
| [
"numpy.complex",
"colorsys.hsv_to_rgb"
]
| [((300, 313), 'numpy.complex', 'complex', (['x', 'y'], {}), '(x, y)\n', (307, 313), False, 'from numpy import complex, array\n'), ((202, 242), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['(i / 255.0)', '(1.0)', '(0.5)'], {}), '(i / 255.0, 1.0, 0.5)\n', (221, 242), False, 'import colorsys\n')] |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" )
warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" )
import mcni
from mccomposite import mccompositebp
from mccomponents import mccomponentsbp
class TestCase(unittest.TestCase):
def test(self):
E_Q = "Q*Q/3."
S_Q = "1"
sigma_Q = "Q/2."
Qmin = 0; Qmax = 10
absorption_coefficient = scattering_coefficient = 1.
kernel = mccomponentsbp.create_Broadened_E_Q_Kernel(
E_Q, S_Q, sigma_Q,
Qmin, Qmax,
absorption_coefficient,
scattering_coefficient,
)
ei = 500 # meV
from mcni.utils import conversion
vil = conversion.e2v(ei)
vi = (0,0,vil)
import numpy.linalg as nl
import numpy as np
for i in range(10):
event = mcni.neutron(
r = (0,0,0), v = vi,
prob = 1, time = 0 )
kernel.scatter( event );
vf = np.array(event.state.velocity)
diffv = vi - vf
Q = conversion.v2k(nl.norm(diffv))
ef = conversion.v2e(nl.norm(vf))
E = ei - ef
# print E, Q, event
E1 = eval(E_Q)
continue
return
pass # end of TestCase
def main():
unittest.main()
return
if __name__ == "__main__":
main()
# version
__id__ = "$Id: TestCase.py 696 2010-11-09 06:23:06Z linjiao $"
# End of file
| [
"journal.debug",
"unittestX.main",
"mcni.utils.conversion.e2v",
"numpy.array",
"mcni.neutron",
"numpy.linalg.norm",
"journal.warning",
"mccomponents.mccomponentsbp.create_Broadened_E_Q_Kernel"
]
| [((445, 491), 'journal.debug', 'journal.debug', (['"""Broadened_E_Q_Kernel_TestCase"""'], {}), "('Broadened_E_Q_Kernel_TestCase')\n", (458, 491), False, 'import journal\n'), ((504, 552), 'journal.warning', 'journal.warning', (['"""Broadened_E_Q_Kernel_TestCase"""'], {}), "('Broadened_E_Q_Kernel_TestCase')\n", (519, 552), False, 'import journal\n'), ((1769, 1784), 'unittestX.main', 'unittest.main', ([], {}), '()\n', (1782, 1784), True, 'import unittestX as unittest\n'), ((877, 1002), 'mccomponents.mccomponentsbp.create_Broadened_E_Q_Kernel', 'mccomponentsbp.create_Broadened_E_Q_Kernel', (['E_Q', 'S_Q', 'sigma_Q', 'Qmin', 'Qmax', 'absorption_coefficient', 'scattering_coefficient'], {}), '(E_Q, S_Q, sigma_Q, Qmin, Qmax,\n absorption_coefficient, scattering_coefficient)\n', (919, 1002), False, 'from mccomponents import mccomponentsbp\n'), ((1142, 1160), 'mcni.utils.conversion.e2v', 'conversion.e2v', (['ei'], {}), '(ei)\n', (1156, 1160), False, 'from mcni.utils import conversion\n'), ((1294, 1341), 'mcni.neutron', 'mcni.neutron', ([], {'r': '(0, 0, 0)', 'v': 'vi', 'prob': '(1)', 'time': '(0)'}), '(r=(0, 0, 0), v=vi, prob=1, time=0)\n', (1306, 1341), False, 'import mcni\n'), ((1438, 1468), 'numpy.array', 'np.array', (['event.state.velocity'], {}), '(event.state.velocity)\n', (1446, 1468), True, 'import numpy as np\n'), ((1528, 1542), 'numpy.linalg.norm', 'nl.norm', (['diffv'], {}), '(diffv)\n', (1535, 1542), True, 'import numpy.linalg as nl\n'), ((1576, 1587), 'numpy.linalg.norm', 'nl.norm', (['vf'], {}), '(vf)\n', (1583, 1587), True, 'import numpy.linalg as nl\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:45:34 2020
@author: kakdemi
"""
import pandas as pd
#importing generators
all_generators = pd.read_excel('generators2.xlsx', sheet_name='NEISO generators (dispatch)')
#getting all oil generators
all_oil = all_generators[all_generators['typ']=='oil'].copy()
#getting all generators in every zone
CT_oil = all_oil[all_oil['zone']=='CT'].copy()
ME_oil = all_oil[all_oil['zone']=='ME'].copy()
NEMA_oil = all_oil[all_oil['zone']=='NEMA'].copy()
NH_oil = all_oil[all_oil['zone']=='NH'].copy()
RI_oil = all_oil[all_oil['zone']=='RI'].copy()
SEMA_oil = all_oil[all_oil['zone']=='SEMA'].copy()
VT_oil = all_oil[all_oil['zone']=='VT'].copy()
WCMA_oil = all_oil[all_oil['zone']=='WCMA'].copy()
#defining zones
zones = ['CT','ME','NEMA','NH','RI','SEMA','VT','WCMA']
#getting all slack generators
all_slack = all_generators[all_generators['typ']=='slack'].copy()
#getting generators other than slack and oil
all_other = all_generators[(all_generators['typ']!='oil') & (all_generators['typ']!='slack')].copy()
#defining a function to downsample oil generators
def oil_downsampler(zone):
#copying the oil generators in that zone and sorting wrt to their seg1 heat rate
Selected_line_oil = globals()[zone+'_oil'].copy()
sorted_df = Selected_line_oil.sort_values(by=['seg1'])
sorted_df_reset = sorted_df.reset_index(drop=True)
#creating 3 chunks wrt their heatrates
heat_rate = list(sorted_df_reset.loc[:,'seg1'])
num = int(len(heat_rate)/3)
First_plant = sorted_df_reset.iloc[:num,:].copy()
Second_plant = sorted_df_reset.iloc[num:num*2,:].copy()
Third_plant = sorted_df_reset.iloc[num*2:,:].copy()
#finding the relevant parameters for the downsampled oil plants
First_cap = First_plant.loc[:,'netcap'].sum()
Second_cap = Second_plant.loc[:,'netcap'].sum()
Third_cap = Third_plant.loc[:,'netcap'].sum()
netcap = [First_cap, Second_cap, Third_cap]
ramp_1 = First_cap
ramp_2 = Second_cap
ramp_3 = Third_cap
ramp = [ramp_1, ramp_2, ramp_3]
First_min_cap = First_cap*0.35
Second_min_cap = Second_cap*0.35
Third_min_cap = Third_cap*0.35
min_cap = [First_min_cap, Second_min_cap, Third_min_cap]
Min_u = [1, 1, 1]
Min_d = [1, 1, 1]
zones = [zone, zone, zone]
types = ['oil', 'oil', 'oil']
seg_1_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg1']
seg_1_1_new = seg_1_1.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_2 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg2']
seg_1_2_new = seg_1_2.sum()/First_plant.loc[:,'netcap'].sum()
seg_1_3 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'seg3']
seg_1_3_new = seg_1_3.sum()/First_plant.loc[:,'netcap'].sum()
seg_2_1 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg1']
seg_2_1_new = seg_2_1.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg2']
seg_2_2_new = seg_2_2.sum()/Second_plant.loc[:,'netcap'].sum()
seg_2_3 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'seg3']
seg_2_3_new = seg_2_3.sum()/Second_plant.loc[:,'netcap'].sum()
seg_3_1 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg1']
seg_3_1_new = seg_3_1.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_2 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg2']
seg_3_2_new = seg_3_2.sum()/Third_plant.loc[:,'netcap'].sum()
seg_3_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'seg3']
seg_3_3_new = seg_3_3.sum()/Third_plant.loc[:,'netcap'].sum()
seg_1 = [seg_1_1_new, seg_2_1_new, seg_3_1_new]
seg_2 = [seg_1_2_new, seg_2_2_new, seg_3_2_new]
seg_3 = [seg_1_3_new, seg_2_3_new, seg_3_3_new]
var_om_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'var_om']
var_om_1_new = var_om_1.sum()/First_plant.loc[:,'netcap'].sum()
var_om_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'var_om']
var_om_2_new = var_om_2.sum()/Second_plant.loc[:,'netcap'].sum()
var_om_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'var_om']
var_om_3_new = var_om_3.sum()/Third_plant.loc[:,'netcap'].sum()
var_om = [var_om_1_new, var_om_2_new, var_om_3_new]
no_load_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'no_load']
no_load_1_new = no_load_1.sum()/First_plant.loc[:,'netcap'].sum()
no_load_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'no_load']
no_load_2_new = no_load_2.sum()/Second_plant.loc[:,'netcap'].sum()
no_load_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'no_load']
no_load_3_new = no_load_3.sum()/Third_plant.loc[:,'netcap'].sum()
no_load = [no_load_1_new, no_load_2_new, no_load_3_new]
st_cost_1 = First_plant.loc[:,'netcap'] * First_plant.loc[:,'st_cost']
st_cost_1_new = st_cost_1.sum()/First_plant.loc[:,'netcap'].sum()
st_cost_2 = Second_plant.loc[:,'netcap'] * Second_plant.loc[:,'st_cost']
st_cost_2_new = st_cost_2.sum()/Second_plant.loc[:,'netcap'].sum()
st_cost_3 = Third_plant.loc[:,'netcap'] * Third_plant.loc[:,'st_cost']
st_cost_3_new = st_cost_3.sum()/Third_plant.loc[:,'netcap'].sum()
st_cost = [st_cost_1_new, st_cost_2_new, st_cost_3_new]
name = [zone+'_agg_oil_1', zone+'_agg_oil_2', zone+'_agg_oil_3']
#creating a dataframe that includes downsampled oil generators
list_labels = list(WCMA_oil.columns)
list_columns = [name, types, zones, netcap, seg_1, seg_2, seg_3, min_cap, ramp, Min_u,
Min_d, var_om, no_load, st_cost]
zipped_list = list(zip(list_labels, list_columns))
gen_df = dict(zipped_list)
df_oils = pd.DataFrame(gen_df)
return df_oils
#downsampling oil generators in every zone by using the defined function
for z in zones:
globals()[z+'_agg_oil_df'] = oil_downsampler(z)
#adding downsampled oil generators to create a complete list of generators
final_generators = pd.concat([all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df,
NH_agg_oil_df, RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df,
WCMA_agg_oil_df, all_slack], ignore_index=True)
#exporting the generators as an Excel file
final_generators.to_excel('generators.xlsx', sheet_name='NEISO generators (dispatch)', index=False)
| [
"pandas.DataFrame",
"pandas.concat",
"pandas.read_excel"
]
| [((147, 222), 'pandas.read_excel', 'pd.read_excel', (['"""generators2.xlsx"""'], {'sheet_name': '"""NEISO generators (dispatch)"""'}), "('generators2.xlsx', sheet_name='NEISO generators (dispatch)')\n", (160, 222), True, 'import pandas as pd\n'), ((5938, 6124), 'pandas.concat', 'pd.concat', (['[all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df, NH_agg_oil_df,\n RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df, WCMA_agg_oil_df, all_slack]'], {'ignore_index': '(True)'}), '([all_other, CT_agg_oil_df, ME_agg_oil_df, NEMA_agg_oil_df,\n NH_agg_oil_df, RI_agg_oil_df, SEMA_agg_oil_df, VT_agg_oil_df,\n WCMA_agg_oil_df, all_slack], ignore_index=True)\n', (5947, 6124), True, 'import pandas as pd\n'), ((5647, 5667), 'pandas.DataFrame', 'pd.DataFrame', (['gen_df'], {}), '(gen_df)\n', (5659, 5667), True, 'import pandas as pd\n')] |
from fastapi import APIRouter, status, Body, HTTPException
from fastapi.encoders import jsonable_encoder
from starlette.responses import JSONResponse
from app.models.common import *
from app.models.clickup import *
from app.database.crud.clickup import *
router = APIRouter()
@router.get("/", response_description="Clickup integrations are retrieved.")
async def get_clickup_integrations():
clickups = await retrieve_clickups()
return (
ResponseModel(clickups, "Clickup integrations data retrieved successfully")
if len(clickups) > 0
else ResponseModel(clickups, "Empty list returned")
)
@router.post(
"/", response_description="Clickup integrations data added into the database."
)
async def add_clickup_a_integration(clickup: ClickupModel = Body(...)):
clickup = jsonable_encoder(clickup)
new_clickup = await add_new_clickup(clickup)
return ResponseModel(
new_clickup,
"clickup integration created successfully.",
status.HTTP_201_CREATED,
)
@router.get("/{id}/", response_description="Clickup data retrieved.")
async def find_clickup_integration(id):
clickup = await retrieve_clickup(id)
return (
ResponseModel(clickup, "Clickup integrations data retrieved successfully")
if clickup
else ErrorResponseModel(
"An error occured.", status.HTTP_404_NOT_FOUND, "Integration doesn't exist."
)
)
@router.put(
"/{id}/", response_description="Clickup integrations data updated in the database."
)
async def update_a_clickup_integration(
id: str, clickup: UpdateClickupModel = Body(...)
):
clickup = jsonable_encoder(clickup)
updated_clickup = await update_clickup_data(id, clickup)
return (
ResponseModel({"id": id}, "Clickup integration updated successfully")
if updated_clickup
else ErrorResponseModel(
"An error occurred",
status.HTTP_404_NOT_FOUND,
"There was an error updating the Clickup integration.",
)
)
@router.delete("/{id}/", response_description="Delete the integration")
async def delete_clickup_integration(id: str):
deleted_clickup = await delete_integration(id)
return (
ResponseModel(
"Integration with ID: {} removed".format(id),
"Integration deleted successfully",
)
if deleted_clickup
else ErrorResponseModel(
"An error occured",
status.HTTP_404_NOT_FOUND,
"Integration with id {0} doesn't exist".format(id),
)
)
| [
"fastapi.APIRouter",
"fastapi.encoders.jsonable_encoder",
"fastapi.Body"
]
| [((266, 277), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (275, 277), False, 'from fastapi import APIRouter, status, Body, HTTPException\n'), ((789, 798), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (793, 798), False, 'from fastapi import APIRouter, status, Body, HTTPException\n'), ((815, 840), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['clickup'], {}), '(clickup)\n', (831, 840), False, 'from fastapi.encoders import jsonable_encoder\n'), ((1623, 1632), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (1627, 1632), False, 'from fastapi import APIRouter, status, Body, HTTPException\n'), ((1650, 1675), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['clickup'], {}), '(clickup)\n', (1666, 1675), False, 'from fastapi.encoders import jsonable_encoder\n')] |
# coding=utf-8
import dateutil.parser
import flask
import json
import os
import time
import urllib
import yaml
EPISODES = yaml.load(open("episodes.yaml").read())
app = flask.Flask(__name__,
static_path="/assets",
static_folder="assets")
app.jinja_env.filters["strftime"] = \
lambda str, fmt: dateutil.parser.parse(str).strftime(fmt)
app.jinja_env.filters["quote_plus"] = lambda u: urllib.quote_plus(u)
ASSETS = os.path.join(app.root_path, "assets")
@app.route("/favicon.ico")
def favicon():
return flask.send_from_directory(
ASSETS,
"favicon.ico",
mimetype="image/icon")
@app.route("/")
def home():
return flask.render_template("pages/home.html",
playlist=os.environ["PLAYLIST"],
episodes=EPISODES,
autoplay=not app.debug)
@app.route("/episod/<int:number>")
def episode(number):
if number < 1:
return "not found"
elif number > len(EPISODES):
return "coming soon"
else:
episode = EPISODES[len(EPISODES) - number]
template = "pages/episode/%s.html" % (
"youtube" if "yt" in episode else "facebook"
)
return flask.render_template(template,
number=number,
episode=episode,
episodes=EPISODES)
| [
"flask.render_template",
"flask.send_from_directory",
"urllib.quote_plus",
"flask.Flask",
"os.path.join"
]
| [((172, 240), 'flask.Flask', 'flask.Flask', (['__name__'], {'static_path': '"""/assets"""', 'static_folder': '"""assets"""'}), "(__name__, static_path='/assets', static_folder='assets')\n", (183, 240), False, 'import flask\n'), ((458, 495), 'os.path.join', 'os.path.join', (['app.root_path', '"""assets"""'], {}), "(app.root_path, 'assets')\n", (470, 495), False, 'import os\n'), ((426, 446), 'urllib.quote_plus', 'urllib.quote_plus', (['u'], {}), '(u)\n', (443, 446), False, 'import urllib\n'), ((549, 620), 'flask.send_from_directory', 'flask.send_from_directory', (['ASSETS', '"""favicon.ico"""'], {'mimetype': '"""image/icon"""'}), "(ASSETS, 'favicon.ico', mimetype='image/icon')\n", (574, 620), False, 'import flask\n'), ((687, 807), 'flask.render_template', 'flask.render_template', (['"""pages/home.html"""'], {'playlist': "os.environ['PLAYLIST']", 'episodes': 'EPISODES', 'autoplay': '(not app.debug)'}), "('pages/home.html', playlist=os.environ['PLAYLIST'],\n episodes=EPISODES, autoplay=not app.debug)\n", (708, 807), False, 'import flask\n'), ((1184, 1271), 'flask.render_template', 'flask.render_template', (['template'], {'number': 'number', 'episode': 'episode', 'episodes': 'EPISODES'}), '(template, number=number, episode=episode, episodes=\n EPISODES)\n', (1205, 1271), False, 'import flask\n')] |
# Generated by Django 2.2.14 on 2020-11-08 05:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('processes', '0131_auto_20201107_2316'),
]
operations = [
migrations.RunSQL(
"UPDATE processes_workflow SET run_environment_id = scheduling_run_environment_id WHERE run_environment_id IS NULL;",
reverse_sql='',
),
]
| [
"django.db.migrations.RunSQL"
]
| [((242, 403), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""UPDATE processes_workflow SET run_environment_id = scheduling_run_environment_id WHERE run_environment_id IS NULL;"""'], {'reverse_sql': '""""""'}), "(\n 'UPDATE processes_workflow SET run_environment_id = scheduling_run_environment_id WHERE run_environment_id IS NULL;'\n , reverse_sql='')\n", (259, 403), False, 'from django.db import migrations\n')] |
from __future__ import absolute_import, unicode_literals
from dcs.celeryconf import app
import time
from django.core.mail import EmailMessage
@app.task(bind=True, ignore_result=False, max_retries=3)
def demo_task1(self):
result = {
'val1': 1,
'val2': 2,
'val3': 3,
}
print("hellp")
from_email = '<EMAIL>'
to_list = ['<EMAIL>',]
sendemail = EmailMessage("Message received!!!", "Hello test", str(from_email), to_list)
sendemail.send()
return result
| [
"dcs.celeryconf.app.task"
]
| [((145, 200), 'dcs.celeryconf.app.task', 'app.task', ([], {'bind': '(True)', 'ignore_result': '(False)', 'max_retries': '(3)'}), '(bind=True, ignore_result=False, max_retries=3)\n', (153, 200), False, 'from dcs.celeryconf import app\n')] |
from typing import Any, List, Callable
from fastapi import APIRouter, HTTPException, status, BackgroundTasks
from app import schemas
from app.core import docker_client
import json
from copy import deepcopy
router = APIRouter()
@router.get("/images", response_model=schemas.DockerImageRespond)
def get_docker_image() -> Any:
images_list = docker_client.images.list(all=True)
return {
'images': [{'id': image.short_id, 'tags': image.tags} for image in images_list if image.tags]
}
@router.get("/volumes", response_model=schemas.DockerVolumeRespond)
def get_docker_volume() -> Any:
volumes_list = docker_client.volumes.list()
return {
'volumes': [{'id': volume.short_id, 'name': volume.name} for volume in volumes_list]
}
| [
"app.core.docker_client.images.list",
"fastapi.APIRouter",
"app.core.docker_client.volumes.list"
]
| [((219, 230), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (228, 230), False, 'from fastapi import APIRouter, HTTPException, status, BackgroundTasks\n'), ((348, 383), 'app.core.docker_client.images.list', 'docker_client.images.list', ([], {'all': '(True)'}), '(all=True)\n', (373, 383), False, 'from app.core import docker_client\n'), ((623, 651), 'app.core.docker_client.volumes.list', 'docker_client.volumes.list', ([], {}), '()\n', (649, 651), False, 'from app.core import docker_client\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import unicode_literals
import json
import os
import os.path
import subprocess
import sys
import unittest
import pytest
from ...__main__ import TESTING_TOOLS_ROOT
CWD = os.getcwd()
DATA_DIR = os.path.join(os.path.dirname(__file__), '.data')
SCRIPT = os.path.join(TESTING_TOOLS_ROOT, 'run_adapter.py')
def resolve_testroot(name):
projroot = os.path.join(DATA_DIR, name)
return projroot, os.path.join(projroot, 'tests')
def run_adapter(cmd, tool, *cliargs):
try:
return _run_adapter(cmd, tool, *cliargs)
except subprocess.CalledProcessError:
# Re-run pytest but print out stdout & stderr this time
try:
return _run_adapter(cmd, tool, *cliargs, hidestdio=False)
except subprocess.CalledProcessError as exc:
print(exc.output)
def _run_adapter(cmd, tool, *cliargs, **kwargs):
hidestdio = kwargs.pop('hidestdio', True)
assert not kwargs
kwds = {}
argv = [sys.executable, SCRIPT, cmd, tool, '--'] + list(cliargs)
if not hidestdio:
argv.insert(4, '--no-hide-stdio')
kwds['stderr'] = subprocess.STDOUT
argv.append('--cache-clear')
print('running {!r}'.format(' '.join(arg.rpartition(CWD + '/')[-1] for arg in argv)))
return subprocess.check_output(argv,
universal_newlines=True,
**kwds)
def fix_path(nodeid):
return nodeid.replace('/', os.path.sep)
def fix_test_order(tests):
if sys.version_info >= (3, 6):
return tests
fixed = []
curfile = None
group = []
for test in tests:
if (curfile or '???') not in test['id']:
fixed.extend(sorted(group, key=lambda t: t['id']))
group = []
curfile = test['id'].partition('.py::')[0] + '.py'
group.append(test)
fixed.extend(sorted(group, key=lambda t: t['id']))
return fixed
def fix_source(tests, testid, srcfile, lineno):
testid = fix_path(testid)
for test in tests:
if test['id'] == testid:
break
else:
raise KeyError('test {!r} not found'.format(testid))
if not srcfile:
srcfile = test['source'].rpartition(':')[0]
test['source'] = fix_path('{}:{}'.format(srcfile, lineno))
@pytest.mark.functional
class PytestTests(unittest.TestCase):
def complex(self, testroot):
results = COMPLEX.copy()
results['root'] = testroot
return [results]
def test_discover_simple(self):
projroot, testroot = resolve_testroot('simple')
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
self.maxDiff = None
self.assertEqual(result, [{
'root': projroot,
'rootid': '.',
'parents': [
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
{'id': fix_path('./tests/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests'),
},
],
'tests': [
{'id': fix_path('./tests/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_spam.py'),
},
],
}])
def test_discover_complex_default(self):
projroot, testroot = resolve_testroot('complex')
expected = self.complex(projroot)
expected[0]['tests'] = fix_test_order(expected[0]['tests'])
if sys.version_info < (3,):
decorated = [
'./tests/test_unittest.py::MyTests::test_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_not_skipped',
]
for testid in decorated:
fix_source(expected[0]['tests'], testid, None, 0)
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
result[0]['tests'] = fix_test_order(result[0]['tests'])
self.maxDiff = None
self.assertEqual(result, expected)
def test_discover_complex_doctest(self):
projroot, _ = resolve_testroot('complex')
expected = self.complex(projroot)
# add in doctests from test suite
expected[0]['parents'].insert(3, {
'id': fix_path('./tests/test_doctest.py'),
'kind': 'file',
'name': 'test_doctest.py',
'parentid': fix_path('./tests'),
})
expected[0]['tests'].insert(2, {
'id': fix_path('./tests/test_doctest.py::tests.test_doctest'),
'name': 'tests.test_doctest',
'source': fix_path('./tests/test_doctest.py:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.py'),
})
# add in doctests from non-test module
expected[0]['parents'].insert(0, {
'id': fix_path('./mod.py'),
'kind': 'file',
'name': 'mod.py',
'parentid': '.',
})
expected[0]['tests'] = [
{'id': fix_path('./mod.py::mod'),
'name': 'mod',
'source': fix_path('./mod.py:1'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.Spam'),
'name': 'mod.Spam',
'source': fix_path('./mod.py:33'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.Spam.eggs'),
'name': 'mod.Spam.eggs',
'source': fix_path('./mod.py:43'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
{'id': fix_path('./mod.py::mod.square'),
'name': 'mod.square',
'source': fix_path('./mod.py:18'),
'markers': [],
'parentid': fix_path('./mod.py'),
},
] + expected[0]['tests']
expected[0]['tests'] = fix_test_order(expected[0]['tests'])
if sys.version_info < (3,):
decorated = [
'./tests/test_unittest.py::MyTests::test_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_skipped',
'./tests/test_unittest.py::MyTests::test_maybe_not_skipped',
]
for testid in decorated:
fix_source(expected[0]['tests'], testid, None, 0)
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
'--doctest-modules',
projroot)
result = json.loads(out)
result[0]['tests'] = fix_test_order(result[0]['tests'])
self.maxDiff = None
self.assertEqual(result, expected)
def test_discover_not_found(self):
projroot, testroot = resolve_testroot('notests')
out = run_adapter('discover', 'pytest',
'--rootdir', projroot,
testroot)
result = json.loads(out)
self.maxDiff = None
self.assertEqual(result, [])
# TODO: Expect the following instead?
#self.assertEqual(result, [{
# 'root': projroot,
# 'rootid': '.',
# 'parents': [],
# 'tests': [],
# }])
COMPLEX = {
'root': None,
'rootid': '.',
'parents': [
#
{'id': fix_path('./tests'),
'kind': 'folder',
'name': 'tests',
'parentid': '.',
},
# +++
{'id': fix_path('./tests/test_42-43.py'),
'kind': 'file',
'name': 'test_42-43.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_42.py'),
'kind': 'file',
'name': 'test_42.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_doctest.txt'),
'kind': 'file',
'name': 'test_doctest.txt',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_foo.py'),
'kind': 'file',
'name': 'test_foo.py',
'parentid': fix_path('./tests'),
},
# +++
{'id': fix_path('./tests/test_mixed.py'),
'kind': 'file',
'name': 'test_mixed.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite'),
'kind': 'suite',
'name': 'TestMySuite',
'parentid': fix_path('./tests/test_mixed.py'),
},
# +++
{'id': fix_path('./tests/test_pytest.py'),
'kind': 'file',
'name': 'test_pytest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam'),
'kind': 'suite',
'name': 'TestParam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam'),
'kind': 'suite',
'name': 'TestSpam',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
'kind': 'suite',
'name': 'TestHam',
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
'kind': 'suite',
'name': 'TestEggs',
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param'),
'kind': 'function',
'name': 'test_fixture_param',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01'),
'kind': 'function',
'name': 'test_param_01',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11'),
'kind': 'function',
'name': 'test_param_11',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers'),
'kind': 'function',
'name': 'test_param_13_markers',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
'kind': 'function',
'name': 'test_param_13_repeat',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
'kind': 'function',
'name': 'test_param_13_skipped',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13'),
'kind': 'function',
'name': 'test_param_23_13',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises'),
'kind': 'function',
'name': 'test_param_23_raises',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33'),
'kind': 'function',
'name': 'test_param_33',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids'),
'kind': 'function',
'name': 'test_param_33_ids',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture'),
'kind': 'function',
'name': 'test_param_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
'kind': 'function',
'name': 'test_param_mark_fixture',
'parentid': fix_path('./tests/test_pytest.py'),
},
# +++
{'id': fix_path('./tests/test_pytest_param.py'),
'kind': 'file',
'name': 'test_pytest_param.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll'),
'kind': 'suite',
'name': 'TestParamAll',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
'kind': 'function',
'name': 'test_spam_13',
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13'),
'kind': 'function',
'name': 'test_param_13',
'parentid': fix_path('./tests/test_pytest_param.py'),
},
# +++
{'id': fix_path('./tests/test_unittest.py'),
'kind': 'file',
'name': 'test_unittest.py',
'parentid': fix_path('./tests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests'),
'kind': 'suite',
'name': 'MyTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests'),
'kind': 'suite',
'name': 'OtherTests',
'parentid': fix_path('./tests/test_unittest.py'),
},
##
{'id': fix_path('./tests/v'),
'kind': 'folder',
'name': 'v',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/v/test_eggs.py'),
'kind': 'file',
'name': 'test_eggs.py',
'parentid': fix_path('./tests/v'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple'),
'kind': 'suite',
'name': 'TestSimple',
'parentid': fix_path('./tests/v/test_eggs.py'),
},
## +++
{'id': fix_path('./tests/v/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/v'),
},
## +++
{'id': fix_path('./tests/v/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/v'),
},
##
{'id': fix_path('./tests/w'),
'kind': 'folder',
'name': 'w',
'parentid': fix_path('./tests'),
},
## +++
{'id': fix_path('./tests/w/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/w'),
},
## +++
{'id': fix_path('./tests/w/test_spam_ex.py'),
'kind': 'file',
'name': 'test_spam_ex.py',
'parentid': fix_path('./tests/w'),
},
##
{'id': fix_path('./tests/x'),
'kind': 'folder',
'name': 'x',
'parentid': fix_path('./tests'),
},
###
{'id': fix_path('./tests/x/y'),
'kind': 'folder',
'name': 'y',
'parentid': fix_path('./tests/x'),
},
####
{'id': fix_path('./tests/x/y/z'),
'kind': 'folder',
'name': 'z',
'parentid': fix_path('./tests/x/y'),
},
#####
{'id': fix_path('./tests/x/y/z/a'),
'kind': 'folder',
'name': 'a',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/a/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/a'),
},
#####
{'id': fix_path('./tests/x/y/z/b'),
'kind': 'folder',
'name': 'b',
'parentid': fix_path('./tests/x/y/z'),
},
##### +++
{'id': fix_path('./tests/x/y/z/b/test_spam.py'),
'kind': 'file',
'name': 'test_spam.py',
'parentid': fix_path('./tests/x/y/z/b'),
},
#### +++
{'id': fix_path('./tests/x/y/z/test_ham.py'),
'kind': 'file',
'name': 'test_ham.py',
'parentid': fix_path('./tests/x/y/z'),
},
],
'tests': [
##########
{'id': fix_path('./tests/test_42-43.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42-43.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42-43.py'),
},
#####
{'id': fix_path('./tests/test_42.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_42.py:2'),
'markers': [],
'parentid': fix_path('./tests/test_42.py'),
},
#####
{'id': fix_path('./tests/test_doctest.txt::test_doctest.txt'),
'name': 'test_doctest.txt',
'source': fix_path('./tests/test_doctest.txt:1'),
'markers': [],
'parentid': fix_path('./tests/test_doctest.txt'),
},
#####
{'id': fix_path('./tests/test_foo.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_foo.py:3'),
'markers': [],
'parentid': fix_path('./tests/test_foo.py'),
},
#####
{'id': fix_path('./tests/test_mixed.py::test_top_level'),
'name': 'test_top_level',
'source': fix_path('./tests/test_mixed.py:5'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:9'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py'),
},
{'id': fix_path('./tests/test_mixed.py::TestMySuite::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:16'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::TestMySuite'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_mixed.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
{'id': fix_path('./tests/test_mixed.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_mixed.py:25'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_mixed.py::MyTests'),
},
#####
{'id': fix_path('./tests/test_pytest.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_pytest.py:10'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_skipped'),
'name': 'test_runtime_skipped',
'source': fix_path('./tests/test_pytest.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_runtime_failed'),
'name': 'test_runtime_failed',
'source': fix_path('./tests/test_pytest.py:18'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_raises'),
'name': 'test_raises',
'source': fix_path('./tests/test_pytest.py:22'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:26'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_pytest.py:31'),
'markers': ['skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_pytest.py:36'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_warned'),
'name': 'test_warned',
'source': fix_path('./tests/test_pytest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_custom_marker'),
'name': 'test_custom_marker',
'source': fix_path('./tests/test_pytest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_multiple_markers'),
'name': 'test_multiple_markers',
'source': fix_path('./tests/test_pytest.py:51'),
'markers': ['expected-failure', 'skip', 'skip-if'],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_1'),
'name': 'test_dynamic_1',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_2'),
'name': 'test_dynamic_2',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_dynamic_3'),
'name': 'test_dynamic_3',
'source': fix_path('./tests/test_pytest.py:62'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:70'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_pytest.py:73'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::TestSpam'),
},
{'id': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:81'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestSpam::TestHam::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::TestEggs::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:93'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestEggs'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_01[]'),
'name': 'test_param_01[]',
'source': fix_path('./tests/test_pytest.py:103'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_01'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_11[x0]'),
'name': 'test_param_11[x0]',
'source': fix_path('./tests/test_pytest.py:108'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_11'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:113'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x0]'),
'name': 'test_param_13_repeat[x0]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x1]'),
'name': 'test_param_13_repeat[x1]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_repeat[x2]'),
'name': 'test_param_13_repeat[x2]',
'source': fix_path('./tests/test_pytest.py:118'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_repeat'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[1-1-1]'),
'name': 'test_param_33[1-1-1]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[3-4-5]'),
'name': 'test_param_33[3-4-5]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33[0-0-0]'),
'name': 'test_param_33[0-0-0]',
'source': fix_path('./tests/test_pytest.py:123'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v1]'),
'name': 'test_param_33_ids[v1]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v2]'),
'name': 'test_param_33_ids[v2]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_33_ids[v3]'),
'name': 'test_param_33_ids[v3]',
'source': fix_path('./tests/test_pytest.py:128'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_33_ids'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z0]'),
'name': 'test_param_23_13[1-1-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z1]'),
'name': 'test_param_23_13[1-1-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[1-1-z2]'),
'name': 'test_param_23_13[1-1-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z0]'),
'name': 'test_param_23_13[3-4-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z1]'),
'name': 'test_param_23_13[3-4-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[3-4-z2]'),
'name': 'test_param_23_13[3-4-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z0]'),
'name': 'test_param_23_13[0-0-z0]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z1]'),
'name': 'test_param_23_13[0-0-z1]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_13[0-0-z2]'),
'name': 'test_param_23_13[0-0-z2]',
'source': fix_path('./tests/test_pytest.py:134'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[x0]'),
'name': 'test_param_13_markers[x0]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[???]'),
'name': 'test_param_13_markers[???]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_markers[2]'),
'name': 'test_param_13_markers[2]',
'source': fix_path('./tests/test_pytest.py:140'),
'markers': ['expected-failure'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_markers'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x0]'),
'name': 'test_param_13_skipped[x0]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x1]'),
'name': 'test_param_13_skipped[x1]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_13_skipped[x2]'),
'name': 'test_param_13_skipped[x2]',
'source': fix_path('./tests/test_pytest.py:149'),
'markers': ['skip'],
'parentid': fix_path('./tests/test_pytest.py::test_param_13_skipped'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1-None]'),
'name': 'test_param_23_raises[1-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[1.0-None]'),
'name': 'test_param_23_raises[1.0-None]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_23_raises[2-catch2]'),
'name': 'test_param_23_raises[2-catch2]',
'source': fix_path('./tests/test_pytest.py:155'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_23_raises'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_pytest.py:164'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParam::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:167'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParam::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest.py:175'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest.py:178'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture'),
'name': 'test_fixture',
'source': fix_path('./tests/test_pytest.py:192'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_mark_fixture'),
'name': 'test_mark_fixture',
'source': fix_path('./tests/test_pytest.py:196'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x0]'),
'name': 'test_param_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x1]'),
'name': 'test_param_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_fixture[x2]'),
'name': 'test_param_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:201'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x0]'),
'name': 'test_param_mark_fixture[x0]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x1]'),
'name': 'test_param_mark_fixture[x1]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_param_mark_fixture[x2]'),
'name': 'test_param_mark_fixture[x2]',
'source': fix_path('./tests/test_pytest.py:207'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_param_mark_fixture'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[spam]'),
'name': 'test_fixture_param[spam]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
{'id': fix_path('./tests/test_pytest.py::test_fixture_param[eggs]'),
'name': 'test_fixture_param[eggs]',
'source': fix_path('./tests/test_pytest.py:216'),
'markers': [],
'parentid': fix_path('./tests/test_pytest.py::test_fixture_param'),
},
######
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:8'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]'),
'name': 'test_param_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]'),
'name': 'test_param_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]'),
'name': 'test_param_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:14'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_param_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]'),
'name': 'test_spam_13[x0]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]'),
'name': 'test_spam_13[x1]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
{'id': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]'),
'name': 'test_spam_13[x2]',
'source': fix_path('./tests/test_pytest_param.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_pytest_param.py::TestParamAll::test_spam_13'),
},
######
{'id': fix_path('./tests/test_unittest.py::MyTests::test_dynamic_'),
'name': 'test_dynamic_',
'source': fix_path('./tests/test_unittest.py:54'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_failure'),
'name': 'test_failure',
'source': fix_path('./tests/test_unittest.py:34'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_known_failure'),
'name': 'test_known_failure',
'source': fix_path('./tests/test_unittest.py:37'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_not_skipped'),
'name': 'test_maybe_not_skipped',
'source': fix_path('./tests/test_unittest.py:17'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_maybe_skipped'),
'name': 'test_maybe_skipped',
'source': fix_path('./tests/test_unittest.py:13'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:6'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped'),
'name': 'test_skipped',
'source': fix_path('./tests/test_unittest.py:9'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_skipped_inside'),
'name': 'test_skipped_inside',
'source': fix_path('./tests/test_unittest.py:21'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_nested_subtests'),
'name': 'test_with_nested_subtests',
'source': fix_path('./tests/test_unittest.py:46'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::MyTests::test_with_subtests'),
'name': 'test_with_subtests',
'source': fix_path('./tests/test_unittest.py:41'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::MyTests'),
},
{'id': fix_path('./tests/test_unittest.py::OtherTests::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/test_unittest.py:61'),
'markers': [],
'parentid': fix_path('./tests/test_unittest.py::OtherTests'),
},
###########
{'id': fix_path('./tests/v/test_eggs.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py'),
},
{'id': fix_path('./tests/v/test_eggs.py::TestSimple::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:8'),
'markers': [],
'parentid': fix_path('./tests/v/test_eggs.py::TestSimple'),
},
######
{'id': fix_path('./tests/v/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
{'id': fix_path('./tests/v/test_ham.py::test_not_hard'),
'name': 'test_not_hard',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_ham.py'),
},
######
{'id': fix_path('./tests/v/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/v/spam.py:2'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
{'id': fix_path('./tests/v/test_spam.py::test_simpler'),
'name': 'test_simpler',
'source': fix_path('./tests/v/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/v/test_spam.py'),
},
###########
{'id': fix_path('./tests/w/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam.py'),
},
{'id': fix_path('./tests/w/test_spam_ex.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/w/test_spam_ex.py:4'),
'markers': [],
'parentid': fix_path('./tests/w/test_spam_ex.py'),
},
###########
{'id': fix_path('./tests/x/y/z/test_ham.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/test_ham.py:2'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/test_ham.py'),
},
######
{'id': fix_path('./tests/x/y/z/a/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/a/test_spam.py:11'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/a/test_spam.py'),
},
{'id': fix_path('./tests/x/y/z/b/test_spam.py::test_simple'),
'name': 'test_simple',
'source': fix_path('./tests/x/y/z/b/test_spam.py:7'),
'markers': [],
'parentid': fix_path('./tests/x/y/z/b/test_spam.py'),
},
],
}
| [
"subprocess.check_output",
"json.loads",
"os.path.join",
"os.getcwd",
"os.path.dirname"
]
| [((285, 296), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (294, 296), False, 'import os\n'), ((366, 416), 'os.path.join', 'os.path.join', (['TESTING_TOOLS_ROOT', '"""run_adapter.py"""'], {}), "(TESTING_TOOLS_ROOT, 'run_adapter.py')\n", (378, 416), False, 'import os\n'), ((321, 346), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (336, 346), False, 'import os\n'), ((462, 490), 'os.path.join', 'os.path.join', (['DATA_DIR', 'name'], {}), '(DATA_DIR, name)\n', (474, 490), False, 'import os\n'), ((1357, 1419), 'subprocess.check_output', 'subprocess.check_output', (['argv'], {'universal_newlines': '(True)'}), '(argv, universal_newlines=True, **kwds)\n', (1380, 1419), False, 'import subprocess\n'), ((512, 543), 'os.path.join', 'os.path.join', (['projroot', '"""tests"""'], {}), "(projroot, 'tests')\n", (524, 543), False, 'import os\n'), ((2807, 2822), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (2817, 2822), False, 'import json\n'), ((4483, 4498), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (4493, 4498), False, 'import json\n'), ((7241, 7256), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (7251, 7256), False, 'import json\n'), ((7641, 7656), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (7651, 7656), False, 'import json\n')] |
'''
Wavelet kernel
slice allows kernel operation on feature subset
active_dims is iterable of feature dimensions to extract
input_dim must equal dimension defined by active_dims
'''
import numpy as np
import tensorflow as tf
from .. import util
from . import kernel
from .kernel_extras import *
class WaveletSlice(kernel.Kernel):
def __init__(self, input_dim, active_dims=None, shift=0, scale = 0.01,
white=0.01, input_scaling=False):
if input_scaling:
self.shift = tf.Variable(shift * tf.ones([input_dim]))
self.scale = tf.Variable(scale * tf.ones([input_dim]))
else:
self.shift = tf.Variable([shift], dtype=tf.float32)
self.scale = tf.Variable([scale], dtype=tf.float32)
self.input_dim = input_dim
self.active_dims = active_dims
self.white = white
def kernel(self, points1, points2=None):
if points2 is None:
points2 = points1
white_noise = (self.white * util.eye(tf.shape(points1)[0]) +
0.1 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points1)[0]]))
else:
white_noise = 0.01 * self.white * tf.ones( [tf.shape(points1)[0], tf.shape(points2)[0]] )
points1, points2 = dim_slice(self, points1, points2)
def h(x):
# Zhang wavelet
#return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
return (1-x**2)*tf.exp(-0.5*x**2)
kern1, kern2 = h((points1 - self.shift)/tf.exp(self.scale)), h((points2 - self.shift)/tf.exp(self.scale))
kern1, kern2 = tf.reduce_prod(kern1, axis=1), tf.reduce_prod(kern2, axis=1)
kern = tf.einsum('i,j->ij', kern1, kern2)
return kern + white_noise
def diag_kernel(self, points):
def h(x):
# Zhang wavelet
return tf.cos(1.75*x)*tf.exp(-0.5*x**2)
# mexican hat wavelet
#return (1-x**2)*tf.exp(-0.5*x**2)
points = dim_slice_diag(self, points)
kern = tf.reduce_prod(h((points - self.shift)/tf.exp(self.scale)) , axis=1) **2
return kern + self.white
def get_params(self):
return [self.shift, self.scale]
| [
"tensorflow.reduce_prod",
"tensorflow.shape",
"tensorflow.Variable",
"tensorflow.ones",
"tensorflow.einsum",
"tensorflow.exp",
"tensorflow.cos"
]
| [((1704, 1738), 'tensorflow.einsum', 'tf.einsum', (['"""i,j->ij"""', 'kern1', 'kern2'], {}), "('i,j->ij', kern1, kern2)\n", (1713, 1738), True, 'import tensorflow as tf\n'), ((660, 698), 'tensorflow.Variable', 'tf.Variable', (['[shift]'], {'dtype': 'tf.float32'}), '([shift], dtype=tf.float32)\n', (671, 698), True, 'import tensorflow as tf\n'), ((724, 762), 'tensorflow.Variable', 'tf.Variable', (['[scale]'], {'dtype': 'tf.float32'}), '([scale], dtype=tf.float32)\n', (735, 762), True, 'import tensorflow as tf\n'), ((1628, 1657), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['kern1'], {'axis': '(1)'}), '(kern1, axis=1)\n', (1642, 1657), True, 'import tensorflow as tf\n'), ((1659, 1688), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['kern2'], {'axis': '(1)'}), '(kern2, axis=1)\n', (1673, 1688), True, 'import tensorflow as tf\n'), ((1472, 1493), 'tensorflow.exp', 'tf.exp', (['(-0.5 * x ** 2)'], {}), '(-0.5 * x ** 2)\n', (1478, 1493), True, 'import tensorflow as tf\n'), ((1874, 1890), 'tensorflow.cos', 'tf.cos', (['(1.75 * x)'], {}), '(1.75 * x)\n', (1880, 1890), True, 'import tensorflow as tf\n'), ((1889, 1910), 'tensorflow.exp', 'tf.exp', (['(-0.5 * x ** 2)'], {}), '(-0.5 * x ** 2)\n', (1895, 1910), True, 'import tensorflow as tf\n'), ((532, 552), 'tensorflow.ones', 'tf.ones', (['[input_dim]'], {}), '([input_dim])\n', (539, 552), True, 'import tensorflow as tf\n'), ((599, 619), 'tensorflow.ones', 'tf.ones', (['[input_dim]'], {}), '([input_dim])\n', (606, 619), True, 'import tensorflow as tf\n'), ((1539, 1557), 'tensorflow.exp', 'tf.exp', (['self.scale'], {}), '(self.scale)\n', (1545, 1557), True, 'import tensorflow as tf\n'), ((1585, 1603), 'tensorflow.exp', 'tf.exp', (['self.scale'], {}), '(self.scale)\n', (1591, 1603), True, 'import tensorflow as tf\n'), ((2089, 2107), 'tensorflow.exp', 'tf.exp', (['self.scale'], {}), '(self.scale)\n', (2095, 2107), True, 'import tensorflow as tf\n'), ((1017, 1034), 'tensorflow.shape', 'tf.shape', (['points1'], {}), '(points1)\n', (1025, 1034), True, 'import tensorflow as tf\n'), ((1202, 1219), 'tensorflow.shape', 'tf.shape', (['points1'], {}), '(points1)\n', (1210, 1219), True, 'import tensorflow as tf\n'), ((1224, 1241), 'tensorflow.shape', 'tf.shape', (['points2'], {}), '(points2)\n', (1232, 1241), True, 'import tensorflow as tf\n'), ((1086, 1103), 'tensorflow.shape', 'tf.shape', (['points1'], {}), '(points1)\n', (1094, 1103), True, 'import tensorflow as tf\n'), ((1108, 1125), 'tensorflow.shape', 'tf.shape', (['points1'], {}), '(points1)\n', (1116, 1125), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python3
#program to parse png images and change images
# cmd: python3 transform.py
# you must have local input/ and output/ directories
#
# name: <NAME>
# date: 12/27/20
# cmdline: python transform.py cmd show image='city.png' --ulx=1 --uly=2 --brx=0 --bry=9
# python transform.py show city.png
# python transform.py blur city.png
from image import Image
import numpy as np
import time, os, argparse, string
#from tkinter import *
import imghdr
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def adjust_brightness(image,factor):
#scale each value by some amount
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = image.array[x,y,c] * factor #non vectorized version
#vectorized version
# new_im.array = image.array * factor -# this is faster
return new_im
#adjust the contrast by increasing difference from user
#defined midpoint
def adjust_contrast(image, factor, mid=0.5):
x_pixels, y_pixels,num_channels = image.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x,y,c] = (image.array[x,y,c] -mid)* factor + mid #non vectorized version
#vectorized version
# new_im.array = (image.array - mid) * factor + mid
return new_im
# blur and image
def blur(image, k_size):
#k_size is the number of pixels to use when doing the blur
#k_size=3 would be above and below and left neighbor, right neighbor pixels, and diagonal
#neighbor pixels.
im = Image(filename = image)
x_pixels, y_pixels,num_channels = im.array.shape
new_im = Image(x_pixels=x_pixels,y_pixels=y_pixels,num_channels=num_channels)
neighbor_range = k_size // 2
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
total += image.array[x_i, y_i, c]
new_im.array[x,y,c] = total / (k_size **2) # average for kernel size in image
return new_im
def apply_kernel(image, kernel):
# the kernel should be a 2D array that represents the kernel we'll use!
# for the sake of simiplicity of this implementation, let's assume that the kernel is SQUARE
# for example the sobel x kernel (detecting horizontal edges) is as follows:
# [1 0 -1]
# [2 0 -2]
# [1 0 -1]
x_pixels, y_pixels, num_channels = image.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
neighbor_range = kernel.shape[0] // 2 # this is a variable that tells us how many neighbors we actually look at (ie for a 3x3 kernel, this value should be 1)
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
total = 0
for x_i in range(max(0,x-neighbor_range), min(new_im.x_pixels-1, x+neighbor_range)+1):
for y_i in range(max(0,y-neighbor_range), min(new_im.y_pixels-1, y+neighbor_range)+1):
x_k = x_i + neighbor_range - x
y_k = y_i + neighbor_range - y
kernel_val = kernel[x_k, y_k]
total += image.array[x_i, y_i, c] * kernel_val
new_im.array[x, y, c] = total
return new_im
def combine_images(image1, image2):
# let's combine two images using the squared sum of squares: value = sqrt(value_1**2, value_2**2)
# size of image1 and image2 MUST be the same
x_pixels, y_pixels, num_channels = image1.array.shape # represents x, y pixels of image, # channels (R, G, B)
new_im = Image(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels) # making a new array to copy values to!
for x in range(x_pixels):
for y in range(y_pixels):
for c in range(num_channels):
new_im.array[x, y, c] = (image1.array[x, y, c]**2 + image2.array[x, y, c]**2)**0.5
return new_im
def show_image(in_image):
path="input/"
img = mpimg.imread(path+in_image)
imgplot = plt.imshow(img)
plt.show()
# check for necessary parts of the runtime environment
def check_env( in_image):
#check to verify that output/input dirs exist:
path = './output/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./output dir must exist, cannot continue...')
print(quit)
quit()
#verify output is writeable
is_w = os.access(path, os.W_OK)
if not is_w:
print('local ./output dir must be writeable, cannot continue...')
print(quit)
quit()
path = './input/'
is_path = os.path.isdir(path)
if not is_path:
print('local ./input dir must exist, cannot continue...')
print(quit)
quit()
#verify input image
if in_image:
thefile = 'input/'+in_image
print('file path: '+thefile)
is_file = os.path.isfile(thefile)
if not is_file:
print(f'local ./input file {in_image} must exist, cannot continue...')
print(quit)
quit()
if imghdr.what(thefile) != 'png':
print('wrong image file type, cannot continue...')
print(quit)
quit()
def cmd():
print("routine cmd")
# setup command line args and parms
# optional args have --
# fixed (required args do not have --)
def arg_init():
parser = argparse.ArgumentParser(description='Process an image.')
parser.add_argument("cmd",help="command to this program",type=str)
parser.add_argument("image",help="input image name for the command",type=str)
parser.add_argument("--ulx",action='store_true',help="upperleft x in image")
parser.add_argument("--uly",action='store_true',help="upperleft y in image")
parser.add_argument("--brx",action='store_true',help="bottomright x in image")
parser.add_argument("--bry",action='store_true',help="bottomright y in image")
group = parser.add_mutually_exclusive_group()
group.add_argument('--v', action='store_true',help="add more text output")
group.add_argument('--q', action='store_true',help="minimal output")
args = parser.parse_args()
print(args.image)
#if args.cmd != "show" and args.cmd != "blur":
return args
#def show_image(filename):
if __name__ == '__main__':
args = arg_init()
check_env(args.image)
lake = Image(filename = 'lake.png')
city = Image(filename='city.png')
start_time = time.time()
# brightened_im = adjust_brightness(lake, 1.7)
# brightened_im.write_image('brightened.png')
# darkened_im = adjust_brightness(lake, 0.3)
# darkened_im.write_image('darkened.png')
# incr_contrast = adjust_contrast(lake, 2,0.5)
# incr_contrast.write_image('incr_contrast.png')
# decr_contrast = adjust_contrast(lake, 0.5,0.5)
# decr_contrast.write_image('decr_contrast.png')
# blur_3 = blur(city,3)
# blur_3.write_image('blur_k3.png')
# blur_15 = blur(city,15)
# blur_15.write_image('blur_k15.png')
# let's apply a sobel kernel on the x and y axis
# sobel_x = apply_kernel(city, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# sobel_x.write_image('edge_x.png')
# sobel_y = apply_kernel(city, np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]))
# sobel_y.write_image('edge_y.png')
# # this will show x and y edges
# sobel_xy = combine_images(sobel_x, sobel_y)
# sobel_xy.write_image('edge_xy.png')
if args.cmd == "show" and args.image:
show_image(args.image)
if args.cmd == "blur" and args.image:
blur_15 = blur(args.image,15)
blur_15.write_image(args.image+'blur_k15.png')
show_image(blur_k15.png)
if args.v:
print(f'total execution duration: {time.time() - start_time}s')
| [
"matplotlib.pyplot.imshow",
"argparse.ArgumentParser",
"matplotlib.image.imread",
"os.access",
"os.path.isfile",
"os.path.isdir",
"imghdr.what",
"time.time",
"image.Image",
"matplotlib.pyplot.show"
]
| [((679, 749), 'image.Image', 'Image', ([], {'x_pixels': 'x_pixels', 'y_pixels': 'y_pixels', 'num_channels': 'num_channels'}), '(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels)\n', (684, 749), False, 'from image import Image\n'), ((1212, 1282), 'image.Image', 'Image', ([], {'x_pixels': 'x_pixels', 'y_pixels': 'y_pixels', 'num_channels': 'num_channels'}), '(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels)\n', (1217, 1282), False, 'from image import Image\n'), ((1795, 1816), 'image.Image', 'Image', ([], {'filename': 'image'}), '(filename=image)\n', (1800, 1816), False, 'from image import Image\n'), ((1882, 1952), 'image.Image', 'Image', ([], {'x_pixels': 'x_pixels', 'y_pixels': 'y_pixels', 'num_channels': 'num_channels'}), '(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels)\n', (1887, 1952), False, 'from image import Image\n'), ((2898, 2968), 'image.Image', 'Image', ([], {'x_pixels': 'x_pixels', 'y_pixels': 'y_pixels', 'num_channels': 'num_channels'}), '(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels)\n', (2903, 2968), False, 'from image import Image\n'), ((4131, 4201), 'image.Image', 'Image', ([], {'x_pixels': 'x_pixels', 'y_pixels': 'y_pixels', 'num_channels': 'num_channels'}), '(x_pixels=x_pixels, y_pixels=y_pixels, num_channels=num_channels)\n', (4136, 4201), False, 'from image import Image\n'), ((4518, 4547), 'matplotlib.image.imread', 'mpimg.imread', (['(path + in_image)'], {}), '(path + in_image)\n', (4530, 4547), True, 'import matplotlib.image as mpimg\n'), ((4558, 4573), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4568, 4573), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4586), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4584, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4751, 4770), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4764, 4770), False, 'import time, os, argparse, string\n'), ((4920, 4944), 'os.access', 'os.access', (['path', 'os.W_OK'], {}), '(path, os.W_OK)\n', (4929, 4944), False, 'import time, os, argparse, string\n'), ((5091, 5110), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5104, 5110), False, 'import time, os, argparse, string\n'), ((5780, 5836), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process an image."""'}), "(description='Process an image.')\n", (5803, 5836), False, 'import time, os, argparse, string\n'), ((6736, 6762), 'image.Image', 'Image', ([], {'filename': '"""lake.png"""'}), "(filename='lake.png')\n", (6741, 6762), False, 'from image import Image\n'), ((6774, 6800), 'image.Image', 'Image', ([], {'filename': '"""city.png"""'}), "(filename='city.png')\n", (6779, 6800), False, 'from image import Image\n'), ((6816, 6827), 'time.time', 'time.time', ([], {}), '()\n', (6825, 6827), False, 'import time, os, argparse, string\n'), ((5337, 5360), 'os.path.isfile', 'os.path.isfile', (['thefile'], {}), '(thefile)\n', (5351, 5360), False, 'import time, os, argparse, string\n'), ((5496, 5516), 'imghdr.what', 'imghdr.what', (['thefile'], {}), '(thefile)\n', (5507, 5516), False, 'import imghdr\n'), ((8048, 8059), 'time.time', 'time.time', ([], {}), '()\n', (8057, 8059), False, 'import time, os, argparse, string\n')] |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2019-2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import sys, argparse, os
from suit_tool import __version__
from suit_tool import keygen
from suit_tool import get_pubkey
import json
import re
def str_to_component(s):
types = {
'file' : ('file', lambda x : str(x.strip('"'))),
# 'desc' : ('component-description', lambda x : str(x.strip('"'))),
'inst' : ('install-id', lambda x : [ str(y) for y in eval(x) ]),
'uri' : ('uri', lambda x : str(x.strip('"')))
}
d = {types[k][0]:types[k][1](v) for k,v in [ re.split(r'=',e, maxsplit=1) for e in re.split(r''',\s*(?=["']?[a-zA-Z0-9_-]+["']?=)''', s)]}
return d
class MainArgumentParser(object):
def __init__(self):
self.parser = self._make_parser()
def _make_parser(self):
parser = argparse.ArgumentParser(description = 'Create or transform a manifest.'
' Use {} [command] -h for help on each command.'.format(sys.argv[0]))
# Add all top-level commands
parser.add_argument('-l', '--log-level', choices=['debug','info','warning','exception'], default='info',
help='Set the verbosity level of console output.')
parser.add_argument('--version', action='version', version=__version__,
help='display the version'
)
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
create_parser = subparsers.add_parser('create', help='Create a new manifest')
# create_parser.add_argument('-v', '--manifest-version', choices=['1'], default='1')
create_parser.add_argument('-i', '--input-file', metavar='FILE', type=argparse.FileType('r'),
help='An input file describing the update. The file must be formated as JSON. The overal structure is described in README.')
create_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
create_parser.add_argument('-f', '--format', metavar='FMT', choices=['suit', 'suit-debug', 'json'], default='suit')
create_parser.add_argument('-s', '--severable', action='store_true', help='Convert large elements to severable fields.')
create_parser.add_argument('-c', '--add-component', action='append', type=str_to_component, dest='components', default=[])
sign_parser = subparsers.add_parser('sign', help='Sign a manifest')
sign_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
sign_parser.add_argument('-i', '--key-id', metavar='ID', type=str)
sign_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
parse_parser = subparsers.add_parser('parse', help='Parse a manifest')
parse_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
parse_parser.add_argument('-j', '--json-output', default=False, action='store_true', dest='json')
get_pubkey_parser = subparsers.add_parser('pubkey', help='Get the public key for a supplied private key.')
get_pubkey_parser.add_argument('-k', '--private-key', metavar='FILE', type=argparse.FileType('rb'), required=True)
get_pubkey_parser.add_argument('-f', '--output-format', choices=get_pubkey.OutputFormaters.keys(), default='pem')
get_pubkey_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser = subparsers.add_parser('keygen', help='Create a signing key. Not for production use')
keygen_parser.add_argument('-t', '--type', choices=keygen.KeyGenerators.keys(),
default='secp256r1', help='The type of the key to generate')
keygen_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), default=sys.stdout)
keygen_parser.add_argument('-f', '--output-format', choices=keygen.OutputFormaters.keys(), default='pem')
keygen_parser.add_argument('-l', '--levels', help='The number of hss-lms levels', type=int, default=2)
sever_parser = subparsers.add_parser('sever', help='Remove one or more severable elements from the manifest, if present.')
sever_parser.add_argument('-m', '--manifest', metavar='FILE', type=argparse.FileType('rb'), required=True)
sever_parser.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('wb'), required=True)
sever_parser.add_argument('-e', '--element', action='append', type=str, dest='elements', default=[])
sever_parser.add_argument('-a', '--all', action='store_true', default=False)
return parser
def parse_args(self, args=None):
self.options = self.parser.parse_args(args)
return self
| [
"re.split",
"argparse.FileType",
"suit_tool.keygen.KeyGenerators.keys",
"suit_tool.keygen.OutputFormaters.keys",
"suit_tool.get_pubkey.OutputFormaters.keys"
]
| [((1319, 1347), 're.split', 're.split', (['"""="""', 'e'], {'maxsplit': '(1)'}), "('=', e, maxsplit=1)\n", (1327, 1347), False, 'import re\n'), ((2418, 2440), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (2435, 2440), False, 'import sys, argparse, os\n'), ((2658, 2681), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (2675, 2681), False, 'import sys, argparse, os\n'), ((3234, 3257), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (3251, 3257), False, 'import sys, argparse, os\n'), ((3351, 3374), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (3368, 3374), False, 'import sys, argparse, os\n'), ((3543, 3566), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (3560, 3566), False, 'import sys, argparse, os\n'), ((3739, 3762), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (3756, 3762), False, 'import sys, argparse, os\n'), ((4085, 4108), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (4102, 4108), False, 'import sys, argparse, os\n'), ((4197, 4230), 'suit_tool.get_pubkey.OutputFormaters.keys', 'get_pubkey.OutputFormaters.keys', ([], {}), '()\n', (4228, 4230), False, 'from suit_tool import get_pubkey\n'), ((4330, 4353), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (4347, 4353), False, 'import sys, argparse, os\n'), ((4545, 4572), 'suit_tool.keygen.KeyGenerators.keys', 'keygen.KeyGenerators.keys', ([], {}), '()\n', (4570, 4572), False, 'from suit_tool import keygen\n'), ((4726, 4749), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (4743, 4749), False, 'import sys, argparse, os\n'), ((4839, 4868), 'suit_tool.keygen.OutputFormaters.keys', 'keygen.OutputFormaters.keys', ([], {}), '()\n', (4866, 4868), False, 'from suit_tool import keygen\n'), ((5203, 5226), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (5220, 5226), False, 'import sys, argparse, os\n'), ((5321, 5344), 'argparse.FileType', 'argparse.FileType', (['"""wb"""'], {}), "('wb')\n", (5338, 5344), False, 'import sys, argparse, os\n'), ((1357, 1408), 're.split', 're.split', (['""",\\\\s*(?=["\']?[a-zA-Z0-9_-]+["\']?=)"""', 's'], {}), '(\',\\\\s*(?=["\\\']?[a-zA-Z0-9_-]+["\\\']?=)\', s)\n', (1365, 1408), False, 'import re\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:48:59 2020
@author: mazal
"""
"""
=========================================
Support functions of pydicom (Not sourced)
=========================================
Purpose: Create support functions for the pydicom project
"""
"""
Test mode 1 | Basics
testMode = True
reportMode = False
Test mode 2 | Function Report
testMode = False
reportMode = True
Commisionning mode
testMode = False
reportMode = False
"""
testMode = False
reportMode = False
"""
=========================================
Function 1: Aleatory Sampling
=========================================
Purpose: Build an aleatory sample given a train dataset of Kaggle for competition and a sample size
Raw code reference (see Tester.py): Test 5
"""
def trainDatasetSampler(samplingSize,testMode,reportMode):
# Set sampling size (% of the train population)
samplingSize = 5
# Build a Sampling dataset | Phase 1: Determine: (1) the source path of the train data; (2) the location path of the sampling
import os
import pandas as pd
path_source = 'Y:/Kaggle_OSIC/2-Data/train/'
path_source_test = 'Y:/Kaggle_OSIC/2-Data/test/'
path_destination = 'Y:/Kaggle_OSIC/4-Data (Sampling)/train/'
path_destination_test = 'Y:/Kaggle_OSIC/4-Data (Sampling)/test/'
path_destination_outcome = 'Y:/Kaggle_OSIC/4-Data (Sampling)/outcome/'
# Build a Sampling dataset | Phase 2: Build dataset using the following features from train data: (1) ID; (2) # of DICOM files per ID (including percentage).
## Improvement: (3) # of other registers (not related to DICOM files)
os.chdir(path_source)
ID_list = os.listdir(path_source)
ID_list_range = len(ID_list)
DICOMFile_list = []
DICOMFileNumber_list = []
for i in range(0,ID_list_range):
path_ID = path_source + ID_list[i] + '/'
DICOMFile_list_unitary = os.listdir(path_ID)
DICOMFile_list = DICOMFile_list + [DICOMFile_list_unitary]
DICOMFileNumber_list_unitary = len(DICOMFile_list_unitary)
DICOMFileNumber_list = DICOMFileNumber_list + [DICOMFileNumber_list_unitary]
Population_Dictionary = {'ID':ID_list,'NumberDicomFiles':DICOMFileNumber_list,'DicomFIles':DICOMFile_list}
Population_DataFrame = pd.DataFrame(data = Population_Dictionary)
DICOMFilePercentage_list = []
TotalNumberDicomFiles = sum(Population_DataFrame.NumberDicomFiles)
for j in range(0,ID_list_range):
Percentage = Population_DataFrame['NumberDicomFiles'][j] / TotalNumberDicomFiles * 100
Percentage = round(Percentage,6)
DICOMFilePercentage_list = DICOMFilePercentage_list + [Percentage]
Population_Percentage_Dictionary = {'Percentage':DICOMFilePercentage_list}
Population_Percentage_DataFrame = pd.DataFrame(data=Population_Percentage_Dictionary)
Population_DataFrame = pd.concat([Population_DataFrame, Population_Percentage_DataFrame],axis=1, sort=False)
filename_population = 'populationDataset.csv'
path_population = path_destination_outcome
Population_DataFrame.to_csv(path_population+filename_population)
# Build a Sampling dataset | Phase 3: Get an aleatory grouping of IDs (just tags)
import random
Population_DataFrame_IndexToSample=[]
Population_DataFrame_IDToSample=[]
Population_DataFrame_PercentageToSample=[]
samplingSizeGoal = 0
while (samplingSizeGoal <= samplingSize):
randomNumberTermination = len(Population_DataFrame.ID)
randomNumber = random.randrange(0,randomNumberTermination,1)
if (randomNumber not in Population_DataFrame_IndexToSample):
Population_DataFrame_IndexToSample = Population_DataFrame_IndexToSample + [randomNumber]
ID_unitary = Population_DataFrame.ID[randomNumber]
Population_DataFrame_IDToSample = Population_DataFrame_IDToSample + [ID_unitary]
Percentage_unitary = Population_DataFrame.Percentage[randomNumber]
Population_DataFrame_PercentageToSample = Population_DataFrame_PercentageToSample + [Percentage_unitary]
samplingSize_unitary = Population_DataFrame.Percentage[randomNumber]
samplingSizeGoal = samplingSizeGoal + samplingSize_unitary
samplingDataset_Dictionary = {'Index':Population_DataFrame_IndexToSample,'ID':Population_DataFrame_IDToSample,'Percentage':Population_DataFrame_PercentageToSample}
samplingDataset_DataFrame = pd.DataFrame(data=samplingDataset_Dictionary)
filename_sampling = 'samplingDataset.csv'
path_sampling = path_destination_outcome
samplingDataset_DataFrame.to_csv(path_sampling+filename_sampling)
# Build a Sampling dataset | Phase 3: Get train dataset (an aleatory grouping of IDs; tree-copy task)
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination)
create_tree(path_destination,[])
if testMode == True:
print("=========================================")
print("Building the Sampling Dataset given the Train Dataset of Kaggle for competition")
print("=========================================")
for k in Population_DataFrame_IDToSample:
path_source_unitary = path_source + k + '/'
path_destination_unitary = path_destination + k + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",k)
# Build a Sampling dataset | Phase 4: Get test dataset (tree-copy task)
## Assumption: The complete test dataset is copied.
from distutils.dir_util import create_tree
from distutils.dir_util import remove_tree
from distutils.dir_util import copy_tree
remove_tree(path_destination_test)
create_tree(path_destination_test,[])
if testMode == True:
print("=========================================")
print("Building the Test Dataset given the Test Dataset of Kaggle for competition")
print("=========================================")
IDList_test = os.listdir(path_source_test)
for l in IDList_test:
path_source_unitary = path_source + l + '/'
path_destination_unitary = path_destination_test + l + '/'
create_tree(path_destination_unitary,[])
copy_tree(path_source_unitary,path_destination_unitary)
if testMode == True: print("ID tree copied: ",l)
if (testMode == False and reportMode == True):
from datetime import date
reportDate = date.today()
print("=========================================")
print("Function Report | Date:",reportDate.year,'/',reportDate.month,'/',reportDate.day,'/' )
print("=========================================")
print("Function: trainDatasetSampler(samplingSize,testMode)")
print("=========================================")
print("(1) Inputs")
print("=========================================")
print("-Sampling Size :", samplingSize, "%")
print("-Test Mode : False")
print("=========================================")
print("(2) Outputs")
print("=========================================")
print("-Type of sample: Aleatory based on IDs")
print("-Train dataset percentage to sample (base): ", round(abs(samplingSize),6),"%")
print("-Train dataset percentage to sample (adjustment): ", round(abs(samplingSizeGoal-samplingSize),6),"%")
print("-Train dataset percentage to sample (fitted): ", round(samplingSizeGoal,6),"%")
print("-Population of Train dataset (just information) available in file: ", filename_population)
print("-Sample of Train dataset (just information) available in file: ", filename_sampling)
print("=========================================")
print("(2) Outcomes:")
print("=========================================")
print("Being the outcome expressed under the variable result, outcomes are as follows:")
print("result[0] -> Dataframe for Population")
print("result[1] -> Dataframe for Sample")
print("result[2] -> Test Mode")
print("result[3] -> Rerport Mode")
print("=========================================")
return Population_DataFrame, samplingDataset_DataFrame, testMode, reportMode
if testMode == True:
samplingSize = 5
resultFunction1 = trainDatasetSampler(samplingSize,testMode,reportMode)
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[0])
print("=========================================")
print("Population dataset:")
print("=========================================")
print(resultFunction1[1])
print("=========================================")
print("Test result Function 1: Success")
print("=========================================")
"""
=========================================
Function 2: Submission Builder
=========================================
Purpose: Build a submission CSV file
Raw code reference (see Tester.py): Test 8
"""
def SubmissionBuilder(ProductType,filename,testMode):
import os
import pandas as pd
# Set ProductType
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# Set productType and splitType
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set outcome
path_outcome = path_ProductType + 'outcome/'
# Get raw data as a DataFrame
os.chdir(path_outcome)
rawFile_DataFrame = pd.read_csv('submissionRawFile_2020_09_19.csv')
# Get submission file template as a DataFrame
os.chdir(path_ProductType)
submissionFile_DataFrame = pd.read_csv('sample_submission.csv')
# Get submission data as required in submission file
submissionNumber_range = len(rawFile_DataFrame.index)
IDcases_List = submissionFile_DataFrame.Patient_Week.copy()
IDcases_List = IDcases_List[0:5]
IDcases_List_range = len(IDcases_List)
for i in range (0,IDcases_List_range):
IDcases_List[i] = IDcases_List[i][:-4]
# Get submission data as required in submission file | FVC
FVCDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_FVC')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
FVCDataList = FVCDataList + [datum]
submissionFile_DataFrame['FVC'] = FVCDataList
# Get submission data as required in submission file | Confidence
CONDataList = []
for k in range(0,submissionNumber_range):
for j in IDcases_List:
# Get datum in raw data
IDlabel_rawFile = str(j)+str('_CON')
datum = rawFile_DataFrame[IDlabel_rawFile][k]
datum = round(datum,0)
# Set datum in submission file
CONDataList = CONDataList + [datum]
submissionFile_DataFrame['Confidence'] = CONDataList
# Save file | Get directory
path_destination = path_outcome+'submissions/'
try:
os.chdir(path_destination)
GetCreation = True
except FileNotFoundError:
GetCreation = False
if GetCreation == False:
from distutils.dir_util import mkpath
mkpath(path_destination)
os.chdir(path_destination)
submissionList = os.listdir(path_destination)
number = len(submissionList)
filename = 'submission_'+str(number+1)+'.csv'
submissionFile_DataFrame.to_csv(filename, index=False)
return submissionFile_DataFrame, filename, testMode
if testMode == True:
ProductType = 'population'
filename = 'submissionRawFile_2020_09_19.csv'
resultFunction2 = SubmissionBuilder(ProductType,filename,testMode)
print("=========================================")
print("Product Type:")
print("=========================================")
print(ProductType)
print("=========================================")
print("Submission File saved as:")
print("=========================================")
print(resultFunction2[1])
print("=========================================")
print("Test result Function 2: Success")
print("=========================================")
"""
=========================================
Function 3: Dataset builder (Stacking solution case) to process with ML models
=========================================
Purpose: Build an input dataset to be processed with an stacking solution
Raw code reference (see Tester.py): Test 15
"""
def stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType)
# Get train dataset and test dataset
import pandas as pd
filename_trainDataset = 'train.csv'
train_dataset = pd.read_csv(path_ProductType+filename_trainDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission dataset (template)
import numpy as np
path_resources = 'Y:/Kaggle_OSIC/3-Data (Prototype)/resources/'
if (PydicomMode == False):
filename_submissionDataset = 'submissionInputDataset.csv'
else:
filename_submissionDataset = 'submissionInputDataset_pydicom.csv'
submission_dataset = pd.read_csv(path_resources+filename_submissionDataset)
submission_dataset = submission_dataset.replace(np.nan,'iNaN')
# Adjust train dataset | Phase 1: Get ID list of the test dataset
IDList = list(test_dataset.Patient)
# Adjust train dataset | Phase 2: Get submission instances from train dataset
instancesPopulation = len(train_dataset.Patient)
indexList = []
for i in IDList:
for j in range(0,instancesPopulation):
if i == train_dataset.Patient[j]:
indexToInclude = train_dataset.index[j]
indexList = indexList + [indexToInclude]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | a. Remove test instances from train dataset and reset index
train_dataset_adjusted = train_dataset.drop(indexList)
train_dataset_adjusted.reset_index
# Adjust train dataset | Phase 3: Create an adjusted train dataset | b. Get Transferring data from train dataset
instanceToTrasferList_index = []
for k in range(0,instancesPopulation):
for l in IDList:
if train_dataset.Patient[k] == l:
instanceToTransfer_Index = train_dataset.index[k]
instanceToTrasferList_index = instanceToTrasferList_index + [instanceToTransfer_Index]
train_dataset_instancesToTransfer = train_dataset.take(instanceToTrasferList_index)
train_dataset_instancesToTransfer.index
train_dataset_instancesToTransfer = train_dataset_instancesToTransfer.reset_index()
train_dataset_instancesToTransfer.drop(columns='index')
# Adjust train dataset | Phase 3: Create an adjusted train dataset | c. Update the submission dataset with the transferring data in b.
submission_dataset_range = len(submission_dataset.Patient)
train_dataset_instancesToTransfer_range = len(train_dataset_instancesToTransfer.Patient)
Patient_List = []
Week_List = []
FVC_List = []
Percent_List = []
Age_List = []
Sex_List = []
SmokingStatus_List = []
for m in range (0,submission_dataset_range):
timesCopy = 0
if(submission_dataset.Patient[m] in IDList):
referenceWeek = submission_dataset.Weeks[m]
for n in range (0,train_dataset_instancesToTransfer_range):
if(train_dataset_instancesToTransfer.Patient[n] == submission_dataset.Patient[m] and train_dataset_instancesToTransfer.Weeks[n] == referenceWeek):
if (timesCopy == 0):
submission_dataset.FVC[m] = train_dataset_instancesToTransfer.FVC[n]
submission_dataset.Percent[m] = train_dataset_instancesToTransfer.Percent[n]
submission_dataset.Age[m] = train_dataset_instancesToTransfer.Age[n]
submission_dataset.Sex[m] = train_dataset_instancesToTransfer.Sex[n]
submission_dataset.SmokingStatus[m] = train_dataset_instancesToTransfer.SmokingStatus[n]
timesCopy = timesCopy + 1
else:
# Additional instances to include
Patient_List = Patient_List + [train_dataset_instancesToTransfer.Patient[n]]
Week_List = Week_List + [train_dataset_instancesToTransfer.Weeks[n]]
FVC_List = FVC_List + [train_dataset_instancesToTransfer.FVC[n]]
Percent_List = Percent_List + [train_dataset_instancesToTransfer.Percent[n]]
Age_List = Age_List + [train_dataset_instancesToTransfer.Age[n]]
Sex_List = Sex_List + [train_dataset_instancesToTransfer.Sex[n]]
SmokingStatus_List = SmokingStatus_List + [train_dataset_instancesToTransfer.SmokingStatus[n]]
# Adjust train dataset | Phase 3: Create an adjusted train dataset | d. Add common values to submission dataset given those from the test dataset (Features: Age, Sex, SmokingStatus)
submission_dataset_range = len(submission_dataset.Patient)
for o in range(0,submission_dataset_range):
if(submission_dataset.Patient[o] in IDList):
for p in range(0,train_dataset_instancesToTransfer_range):
if(submission_dataset.Patient[o] == train_dataset_instancesToTransfer.Patient[p]):
submission_dataset.Age[o] = train_dataset_instancesToTransfer.Age[p]
submission_dataset.Sex[o] = train_dataset_instancesToTransfer.Sex[p]
submission_dataset.SmokingStatus[o] = train_dataset_instancesToTransfer.SmokingStatus[p]
# Scenario to replace NaN values: Average FVC for a given Patient
averageFVC = train_dataset_instancesToTransfer.FVC[train_dataset_instancesToTransfer.Patient == train_dataset_instancesToTransfer.Patient[p]].mean()
submission_dataset.FVC[o] = averageFVC
# Adjust train dataset | Phase 4: Create an adjusted train dataset | e. Concatenate the submission dataset (and additional instance) and the adjusted train dataset
additionalDictionary = {submission_dataset.columns[0]:Patient_List,
submission_dataset.columns[1]:Week_List,
submission_dataset.columns[2]:FVC_List,
submission_dataset.columns[3]:Percent_List,
submission_dataset.columns[4]:Age_List,
submission_dataset.columns[5]:Sex_List,
submission_dataset.columns[6]:SmokingStatus_List}
additional_dataset = pd.DataFrame(data=additionalDictionary)
frames = [train_dataset_adjusted,submission_dataset,additional_dataset]
train_dataset_adjusted = pd.concat(frames)
train_dataset_adjusted = train_dataset_adjusted.reset_index()
train_dataset_adjusted = train_dataset_adjusted.drop(columns='index')
# Adjust train dataset with pydicom train dataset) | Phase 1: Get pydicom train dataset
if(PydicomMode == True):
filename_pydicom = 'train_pydicom.csv'
path_ProductType_pydicom = path_ProductType + 'outcome/'
train_dataset_pydicom = pd.read_csv(path_ProductType_pydicom + filename_pydicom)
# Adjust train dataset with pydicom train dataset) | Phase 2: Include values from train_adjusted_pydicom.py into adjusted train dataset
if(PydicomMode == True):
instancesToInclude_List = list(train_dataset_pydicom.Patient)
InstanceToInclude_Patient = i
newIndex = len(train_dataset_adjusted.Patient)
for i in instancesToInclude_List:
# Get instance to transfer
InstanceToInclude_Patient = i
InstanceToInclude_Week = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].Weeks)[0]
InstanceToInclude_indexType1_Exhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Exhalation)[0]
InstanceToInclude_indexType1_Inhalation = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].indexType1_Inhalation)[0]
InstanceToInclude_ImageType = list(train_dataset_pydicom[train_dataset_pydicom.Patient == i].ImageType)[0]
# Put instance into train_dataset_adjusted DataFrame
if (0 in list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Weeks)):
# Get index
indexToComplete = list(train_dataset_adjusted[train_dataset_adjusted.Weeks == 0].Patient[train_dataset_adjusted.Patient == i].index)
# Complete instance
train_dataset_adjusted.indexType1_Exhalation[indexToComplete] = InstanceToInclude_indexType1_Exhalation
train_dataset_adjusted.indexType1_Inhalation[indexToComplete] = InstanceToInclude_indexType1_Inhalation
train_dataset_adjusted.ImageType[indexToComplete] = str(InstanceToInclude_ImageType)
else:
# Add new instance
## Get repeatable instances
repeatableInstance1 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].FVC)[0]
repeatableInstance2 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Percent)[0]
repeatableInstance3 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Age)[0]
repeatableInstance4 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].Sex)[0]
repeatableInstance5 = list(train_dataset_adjusted[train_dataset_adjusted.Patient == i].SmokingStatus)[0]
## Get Dictionary
DictionaryToInclude = {}
DictionaryToInclude['Patient'] = InstanceToInclude_Patient
DictionaryToInclude['Weeks'] = InstanceToInclude_Week
DictionaryToInclude['FVC'] = repeatableInstance1
DictionaryToInclude['Percent'] = repeatableInstance2
DictionaryToInclude['Age'] = repeatableInstance3
DictionaryToInclude['Sex'] = repeatableInstance4
DictionaryToInclude['SmokingStatus'] = repeatableInstance5
DictionaryToInclude['indexType1_Exhalation'] = InstanceToInclude_indexType1_Exhalation
DictionaryToInclude['indexType1_Inhalation'] = InstanceToInclude_indexType1_Inhalation
DictionaryToInclude['ImageType'] = str(InstanceToInclude_ImageType)
## Get DataFrame
DataFrameToInclude = pd.DataFrame(data = DictionaryToInclude, index=[newIndex])
newIndex = newIndex + 1
## Concatenate DataFrame
train_dataset_adjusted = pd.concat([train_dataset_adjusted, DataFrameToInclude])
# nan filling
train_dataset_adjusted = train_dataset_adjusted.replace('iNaN',np.nan)
# Specifying dtype
train_dataset_adjusted.astype({'Patient': 'O'}).dtypes
train_dataset_adjusted.astype({'Weeks': 'float64'}).dtypes
train_dataset_adjusted.astype({'Percent': 'float64'}).dtypes
train_dataset_adjusted.astype({'Age': 'float64'}).dtypes
train_dataset_adjusted.astype({'Sex': 'O'}).dtypes
train_dataset_adjusted.astype({'SmokingStatus': 'O'}).dtypes
train_dataset_adjusted.astype({'FVC': 'float64'}).dtypes
if(PydicomMode == True):
train_dataset_adjusted.astype({'indexType1_Exhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'indexType1_Inhalation': 'float64'}).dtypes
train_dataset_adjusted.astype({'ImageType': 'O'}).dtypes
# Get CSV file
path_output = path_ProductType +'outcome/'
if(PydicomMode == False):
filename_output = 'train_adjusted.csv'
else:
filename_output = 'train_adjusted_pydicom.csv'
train_dataset_adjusted.to_csv(path_output+filename_output)
# Function Result
resultFunction = train_dataset_adjusted,path_output,filename_output
# Report Mode
if reportMode == True:
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction[0])
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction[1])
print("=========================================")
print("Input File saved as:", resultFunction[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
return resultFunction
if testMode == True:
ProductType = 'prototype'
PydicomMode = True
reportMode = False
resultFunction3 = stacking_Dataset_Builder(ProductType, PydicomMode, reportMode, testMode)
print("=========================================")
print("Function Report")
print("=========================================")
print("DataFrame")
print("=========================================")
print(resultFunction3[0])
print("=========================================")
print("=========================================")
print("Product Type: ", ProductType)
print("=========================================")
print("Pydicom Mode: ", PydicomMode)
print("=========================================")
print("Location of Input File:", resultFunction3[1])
print("=========================================")
print("Input File saved as:", resultFunction3[2])
print("=========================================")
print("Data type of the dataset")
print("=========================================")
print(resultFunction3[0].dtypes)
print("=========================================")
print("Test result Function 3: Success")
print("=========================================")
"""
=========================================
Function 4: Submission dataset builder (Stacking solution case) after ML outcome
=========================================
Purpose: Build a submission CSV file (Stacking solution case)
Raw code reference (see Tester.py): Test 17
About the Shape Parameter: It amounts to c = 0.12607421874999922 for every instance in the oject of concern. c value has been computed
deeming the following data fitting scope: (1) Data: FVC predictions; (2) Probability density function as follows (staistical function
in scipy renowend as scipy.stats.loglaplace): loglaplace.pdf(x, c, loc=0, scale=1).
"""
def Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode):
# Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
# Set working directory
import os
os.chdir(path_ProductType + 'outcome/')
# Get result data and test dataset
import pandas as pd
if(pydicomMode == True):
filename_resultDataset = 'result_pydicom.csv'
else:
filename_resultDataset = 'result.csv'
result_dataset = pd.read_csv(path_ProductType+'outcome/'+filename_resultDataset)
filename_testDataset = 'test.csv'
test_dataset = pd.read_csv(path_ProductType+filename_testDataset)
# Get submission instances | Phase 1: Index
IDList = list(test_dataset.Patient)
IDList_index_dictionary = {}
for i in IDList:
itemToInclude = result_dataset.Patient[result_dataset.Patient==i].index
IDList_index_dictionary[i] = itemToInclude
# Get submission instances | Phase 2: Extract submission instances from result dataset
IDList_index = []
IDList_columns = ['Patient', 'Weeks', 'Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
for j in IDList: IDList_index = IDList_index + list(IDList_index_dictionary[j])
submission_dataset = result_dataset.loc[IDList_index]
# Get submission instances | Phase 3: Extract duplicated instances
submission_dataset = submission_dataset.drop_duplicates(subset=['Patient','Weeks'])
# Get submission instances | Phase 4: Sort submission instances by Weeks (ascending) and reset index
submission_dataset = submission_dataset.sort_values(by=['Weeks','Patient'])
submission_dataset = submission_dataset.reset_index()
submission_dataset = submission_dataset.drop(columns=['Unnamed: 0','index'])
# Get confidence measure | Phase 1: Get shape Parameter DataFrame by default
## When shapeParameter_DataFrame==[], parameter c = 0.126074 is assigned by default per model and ID
if (shapeParameter_DataFrame == []):
shapeParameter_dictionary = {}
shapeParameter = 0.126074
MLModelList = IDList_columns[2:]
for l in MLModelList:
keyShapeParameter = 'c Parameter_'+l
shapeParameter_dictionary[keyShapeParameter] = [shapeParameter,shapeParameter,shapeParameter,shapeParameter,shapeParameter]
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = IDList)
# Get confidence measure | Phase 2: Get standard-deviation-clipped per instance
## Metric - Part 1: standard_deviation_clipped = max(standard_deviation, 70)
## Build a DataFrame with standard-deviation-clipped values given an ID and a ML Model: standardDeviationClipped_DataFrame
standardDeviationClipped_DataFrame = shapeParameter_DataFrame.copy()
columnLabels = list(standardDeviationClipped_DataFrame.columns)
columnLabels_SDC_dictionary = {}
for i in columnLabels:
columnLabels_item ='SD_Clipped'+i[11:]
columnLabels_SDC_dictionary[i]=columnLabels_item
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.rename(columns=columnLabels_SDC_dictionary)
import numpy as np
standardDeviationClipped_DataFrame = standardDeviationClipped_DataFrame.replace(3,np.nan)
ID_List = list(standardDeviationClipped_DataFrame.index)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
CParameter_List = list(shapeParameter_DataFrame.columns)
numy = 0
from scipy.stats import loglaplace
for j in ID_List:
for k in SDModel_List:
itemToInclude = CParameter_List[numy]
c = shapeParameter_DataFrame[itemToInclude][j]
sd_LL = loglaplace.std(c, loc=0, scale=100)
standardDeviationClipped_DataFrame[k][j] = max(70,sd_LL) # j: index is ID | k: SD_Clipped_(ML Model)
numy = numy + 1
numy = 0
# Get confidence measure | Phase 3: Get metric axe per model: |FVC_true - FVC_predicted|
## Metric - Part 1: |FVC_true - FVC_pred|
if(pydicomMode == True):
variableNumber = 10
else:
variableNumber = 7
MLModelList = list(submission_dataset.columns[variableNumber:])
metric_dictionary = {}
for j in MLModelList:
metric_differential = abs(submission_dataset.FVC - submission_dataset[j])
metric_differential = list(metric_differential)
keyToInclude = 'metric_'+j
metric_dictionary[keyToInclude] = metric_differential
metric_DataFrame = pd.DataFrame(data=metric_dictionary)
# Get confidence measure | Phase 4: Get metric axe per model: min(|FVC_true - FVC_predicted|, 1000)
## metric per instance
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
metricLabels = list(metric_DataFrame.columns)
instancesNumber = len(submission_dataset.index)
for i in metricLabels:
j = 0
while (j<instancesNumber):
metric_DataFrame[i][j] = min(metric_DataFrame[i][j],1000)
j = j+1
submission_dataset = submission_dataset.join(metric_DataFrame)
# Get confidence measure | Phase 5: Get metric axe per model: (-1 * differential * 2^0.5 / SDC ) - ln(2^0.5 * SCD)
## metric per instance
## differential = min(|FVC_true - FVC_predicted|, 1000)
## SDC: Standard Deviation Clipped
## Metric - Part 2: min(|FVC_true - FVC_pred|,1000)
IDList = list(test_dataset.Patient)
SDModel_List = list(standardDeviationClipped_DataFrame.columns)
SDModel_index_List = list(standardDeviationClipped_DataFrame.index)
metric_lists = list(metric_DataFrame.columns)
metric_index_lists = list(metric_DataFrame.index)
submission_dataset_index_List = list(submission_dataset.index)
instancesNumber = len(submission_dataset_index_List)
indexPerID_dictionary = {}
### Step 1: Get index per ID to compute
for i in IDList:
listToInclude = list(submission_dataset.Patient[submission_dataset.Patient == i].index)
indexPerID_dictionary[i] = listToInclude
indexPerID_DataFrame = pd.DataFrame(data=indexPerID_dictionary)
### Step 3: Compute metric
import math
from math import log1p
for k in IDList:
for i in metric_lists:
for j in list(indexPerID_DataFrame[k]):
differential = submission_dataset[i][j]
SDC_Label = 'SD_Clipped_' + i[7:]
SDC = standardDeviationClipped_DataFrame[SDC_Label][k]
metric_part1 = -1* 2**0.5 * differential / SDC
metric_part2 = -1 * math.log1p(2**0.5 * SDC)
metric = metric_part1 + metric_part2
submission_dataset[i][j] = metric
# Result function specification
resultFunction = submission_dataset,shapeParameter_DataFrame,standardDeviationClipped_DataFrame
# Get submission files | Phase 1: Get submission file template
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
## Get submission files | Phase 2: Create directory
try:
path_output = path_ProductType + 'submission/'
os.chdir(path_output)
except FileNotFoundError:
import distutils.ccompiler
path_output = path_ProductType + 'submission/'
distutils.dir_util.mkpath(path_output)
## Get submission files | Phase 3: Get correlative
files_list = os.listdir(path_output)
try:
maxNumber = max(files_list)
maxNumber = maxNumber[:-4]
maxNumber = int(maxNumber)
nextNumber = maxNumber+1
except ValueError:
nextNumber = 0
## Get submission files | Phase 4: Get models to include and their corresponding metrics
ModelToInclude = IDList_columns[2:]
## Get submission files | Phase 5: Build Files
for i in ModelToInclude:
filename = 'sample_submission.csv'
submissionFile = pd.read_csv(path_ProductType+filename)
submissionFile_columns = list(submissionFile.columns)
fvc_array = np.array(submission_dataset[i])
confidence_array = np.array(submission_dataset['metric_'+i])
submissionFile['FVC'] = fvc_array
submissionFile['Confidence'] = confidence_array
filename_output = str(nextNumber)+'.csv'
path_output = path_ProductType +'submission/'
submissionFile.to_csv(path_output+filename_output,columns=submissionFile_columns,index=False)
nextNumber = nextNumber + 1
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
example = False
if (example == True):
import pandas as pd
shapeParameter_IDList = ['ID00419637202311204720264','ID00421637202311550012437','ID00422637202311677017371','ID00423637202312137826377','ID00426637202313170790466']
c_List1 = [3,3,3,3,3]
c_List2 = [3,3,3,3,3]
c_List3 = [3,3,3,3,3]
c_List4 = [3,3,3,3,3]
shapeParameter_dictionary = {'Random Forest':c_List1, 'Lasso':c_List2, 'Gradient Boosting':c_List3, 'Stacking Regressor':c_List4}
shapeParameter_DataFrame = pd.DataFrame(data = shapeParameter_dictionary, index = shapeParameter_IDList)
else:
shapeParameter_DataFrame = []
# Set Pydicom mode
pydicomMode = True
resultFunction4 = Stacking_Submission_Dataset_Builder(ProductType,shapeParameter_DataFrame,pydicomMode,testMode)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[1])
print("Standard Deviation Clipped - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction4[2])
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
"""
=========================================
Function 5: Get parameters given a must-usage of a log-laplace distribution (i.e. Laplace Log Likelihood)
=========================================
Purpose: Get shape parameter visualization for loglaplace
Raw code reference (see Tester.py): Test 17
"""
def shapeParameter_visualizer(ProductType,testMode):
import numpy as np
from scipy.stats import loglaplace
import matplotlib.pyplot as plt
fig, ax = plt.subplots(4, 5, sharex=False, sharey=False, figsize=(32, 24))
## Get IDs to test
import os
import pandas as pd
## Set Product Type and its corresponding path
if ProductType == 'population':
path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
if ProductType == 'prototype':
path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
if ProductType == 'sampling':
path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
## Get probabilities from predicted values grouping by ID and Model
path = path_ProductType + 'outcome/'
filename = 'result.csv'
y_pred = pd.read_csv(path+filename)
## Get IDs to test
path = path_ProductType
filename = 'test.csv'
test_dataset = pd.read_csv(path+filename)
ID_List = list(test_dataset.Patient)
## Get models
model_List = ['Random Forest', 'Lasso', 'Gradient Boosting', 'Stacking Regressor']
## Grouping task
k = 0
l = 0
for i in ID_List:
k = 0
for j in model_List:
# Data Fit task
#r = y_pred[y_pred.Patient==i][j]/sum(y_pred[y_pred.Patient==i][j])
r = y_pred[y_pred.Patient==i][j]
r = np.array(r)
c1, loc1, scale1 = loglaplace.fit(r,floc=0,fscale=1)
c = c1
# # Calculate a few first moments
# mean, var, skew, kurt = loglaplace.stats(c, moments='mvsk')
# Display the probability density function (pdf):
x = np.linspace(loglaplace.ppf(0.01, c), loglaplace.ppf(0.99, c), num=100)
ax[k,l].plot(x, loglaplace.pdf(x, c),'r-', lw=5, alpha=0.6, label='loglaplace pdf')
# Freeze the distribution and display the frozen pdf:
rv = loglaplace(c)
ax[k,l].plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Generate random numbers:
r = loglaplace.rvs(c1, loc=0, scale=1, size=1000)
# And compare the histogram:
#ax[k,l].hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax[k,l].legend(loc='best', frameon=False)
# Set limits
#ax[k,l].set_xlim(0,0.1)
#ax[k,l].set_ylim(0,4)
ax[k,l].set_xlabel('x')
ax[k,l].set_ylabel('f(x,c)')
# Check Accuracy
vals = loglaplace.ppf([0.001, 0.5, 0.999], c)
accuracy = np.allclose([0.001, 0.5, 0.999], loglaplace.cdf(vals, c))
# Returns True if two arrays are element-wise equal within a tolerance.
if(accuracy == True):
accuracy = 'Equal case'
else:
accuracy = 'Unequal case'
# Set title
title = str('Probability density function for loglaplace'+'\n'+i + '\n' + j + ' | Accuracy:'+accuracy)
ax[k,l].set_title(title)
k = k + 1
l = l + 1
plt.tight_layout()
plt.show()
resultFunction = c
return resultFunction
if testMode == True:
# Set Product type
ProductType = 'prototype'
# ShapeParameter_Dataframe
resultFunction5 = shapeParameter_visualizer(ProductType, testMode = True)
print("=========================================")
print("Shape Parameter - Laplace Log Likelihood:")
print("=========================================")
print(resultFunction5)
print("=========================================")
print("Test result Function 4: Success")
print("=========================================")
# """
# =========================================
# Function : Dataset builder 2 (Stacking solution case) to process with ML models
# =========================================
# Purpose: Build an input dataset to be processed with an stacking solution but including Pydicom image-processing solution
# Raw code reference (see Tester.py): 15
# """
# def stacking_Dataset_Builder_PydicomSolution(productType, testMode):
# # Set Product Type and its corresponding path
# if ProductType == 'population':
# path_ProductType = 'Y:/Kaggle_OSIC/2-Data/'
# if ProductType == 'prototype':
# path_ProductType = 'Y:/Kaggle_OSIC/3-Data (Prototype)/'
# if ProductType == 'sampling':
# path_ProductType = 'Y:/Kaggle_OSIC/4-Data (Sampling)/'
| [
"distutils.dir_util.mkpath",
"distutils.dir_util.create_tree",
"distutils.dir_util.copy_tree",
"math.log1p",
"pandas.read_csv",
"numpy.array",
"scipy.stats.loglaplace",
"os.listdir",
"scipy.stats.loglaplace.ppf",
"pandas.DataFrame",
"random.randrange",
"scipy.stats.loglaplace.std",
"scipy.stats.loglaplace.cdf",
"datetime.date.today",
"scipy.stats.loglaplace.pdf",
"matplotlib.pyplot.show",
"scipy.stats.loglaplace.fit",
"os.chdir",
"scipy.stats.loglaplace.rvs",
"distutils.dir_util.remove_tree",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"matplotlib.pyplot.subplots"
]
| [((1713, 1734), 'os.chdir', 'os.chdir', (['path_source'], {}), '(path_source)\n', (1721, 1734), False, 'import os\n'), ((1756, 1779), 'os.listdir', 'os.listdir', (['path_source'], {}), '(path_source)\n', (1766, 1779), False, 'import os\n'), ((2403, 2443), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Population_Dictionary'}), '(data=Population_Dictionary)\n', (2415, 2443), True, 'import pandas as pd\n'), ((2942, 2993), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Population_Percentage_Dictionary'}), '(data=Population_Percentage_Dictionary)\n', (2954, 2993), True, 'import pandas as pd\n'), ((3028, 3118), 'pandas.concat', 'pd.concat', (['[Population_DataFrame, Population_Percentage_DataFrame]'], {'axis': '(1)', 'sort': '(False)'}), '([Population_DataFrame, Population_Percentage_DataFrame], axis=1,\n sort=False)\n', (3037, 3118), True, 'import pandas as pd\n'), ((4653, 4698), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'samplingDataset_Dictionary'}), '(data=samplingDataset_Dictionary)\n', (4665, 4698), True, 'import pandas as pd\n'), ((5141, 5170), 'distutils.dir_util.remove_tree', 'remove_tree', (['path_destination'], {}), '(path_destination)\n', (5152, 5170), False, 'from distutils.dir_util import remove_tree\n'), ((5176, 5209), 'distutils.dir_util.create_tree', 'create_tree', (['path_destination', '[]'], {}), '(path_destination, [])\n', (5187, 5209), False, 'from distutils.dir_util import create_tree\n'), ((6107, 6141), 'distutils.dir_util.remove_tree', 'remove_tree', (['path_destination_test'], {}), '(path_destination_test)\n', (6118, 6141), False, 'from distutils.dir_util import remove_tree\n'), ((6147, 6185), 'distutils.dir_util.create_tree', 'create_tree', (['path_destination_test', '[]'], {}), '(path_destination_test, [])\n', (6158, 6185), False, 'from distutils.dir_util import create_tree\n'), ((6455, 6483), 'os.listdir', 'os.listdir', (['path_source_test'], {}), '(path_source_test)\n', (6465, 6483), False, 'import os\n'), ((10323, 10345), 'os.chdir', 'os.chdir', (['path_outcome'], {}), '(path_outcome)\n', (10331, 10345), False, 'import os\n'), ((10371, 10418), 'pandas.read_csv', 'pd.read_csv', (['"""submissionRawFile_2020_09_19.csv"""'], {}), "('submissionRawFile_2020_09_19.csv')\n", (10382, 10418), True, 'import pandas as pd\n'), ((10481, 10507), 'os.chdir', 'os.chdir', (['path_ProductType'], {}), '(path_ProductType)\n', (10489, 10507), False, 'import os\n'), ((10540, 10576), 'pandas.read_csv', 'pd.read_csv', (['"""sample_submission.csv"""'], {}), "('sample_submission.csv')\n", (10551, 10576), True, 'import pandas as pd\n'), ((12487, 12515), 'os.listdir', 'os.listdir', (['path_destination'], {}), '(path_destination)\n', (12497, 12515), False, 'import os\n'), ((14228, 14254), 'os.chdir', 'os.chdir', (['path_ProductType'], {}), '(path_ProductType)\n', (14236, 14254), False, 'import os\n'), ((14390, 14443), 'pandas.read_csv', 'pd.read_csv', (['(path_ProductType + filename_trainDataset)'], {}), '(path_ProductType + filename_trainDataset)\n', (14401, 14443), True, 'import pandas as pd\n'), ((14501, 14553), 'pandas.read_csv', 'pd.read_csv', (['(path_ProductType + filename_testDataset)'], {}), '(path_ProductType + filename_testDataset)\n', (14512, 14553), True, 'import pandas as pd\n'), ((14914, 14970), 'pandas.read_csv', 'pd.read_csv', (['(path_resources + filename_submissionDataset)'], {}), '(path_resources + filename_submissionDataset)\n', (14925, 14970), True, 'import pandas as pd\n'), ((20686, 20725), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'additionalDictionary'}), '(data=additionalDictionary)\n', (20698, 20725), True, 'import pandas as pd\n'), ((20845, 20862), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (20854, 20862), True, 'import pandas as pd\n'), ((30038, 30077), 'os.chdir', 'os.chdir', (["(path_ProductType + 'outcome/')"], {}), "(path_ProductType + 'outcome/')\n", (30046, 30077), False, 'import os\n'), ((30320, 30387), 'pandas.read_csv', 'pd.read_csv', (["(path_ProductType + 'outcome/' + filename_resultDataset)"], {}), "(path_ProductType + 'outcome/' + filename_resultDataset)\n", (30331, 30387), True, 'import pandas as pd\n'), ((30443, 30495), 'pandas.read_csv', 'pd.read_csv', (['(path_ProductType + filename_testDataset)'], {}), '(path_ProductType + filename_testDataset)\n', (30454, 30495), True, 'import pandas as pd\n'), ((36230, 36270), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'indexPerID_dictionary'}), '(data=indexPerID_dictionary)\n', (36242, 36270), True, 'import pandas as pd\n'), ((37262, 37302), 'pandas.read_csv', 'pd.read_csv', (['(path_ProductType + filename)'], {}), '(path_ProductType + filename)\n', (37273, 37302), True, 'import pandas as pd\n'), ((37716, 37739), 'os.listdir', 'os.listdir', (['path_output'], {}), '(path_output)\n', (37726, 37739), False, 'import os\n'), ((40874, 40938), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(5)'], {'sharex': '(False)', 'sharey': '(False)', 'figsize': '(32, 24)'}), '(4, 5, sharex=False, sharey=False, figsize=(32, 24))\n', (40886, 40938), True, 'import matplotlib.pyplot as plt\n'), ((41538, 41566), 'pandas.read_csv', 'pd.read_csv', (['(path + filename)'], {}), '(path + filename)\n', (41549, 41566), True, 'import pandas as pd\n'), ((41667, 41695), 'pandas.read_csv', 'pd.read_csv', (['(path + filename)'], {}), '(path + filename)\n', (41678, 41695), True, 'import pandas as pd\n'), ((44060, 44078), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (44076, 44078), True, 'import matplotlib.pyplot as plt\n'), ((44084, 44094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44092, 44094), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2023), 'os.listdir', 'os.listdir', (['path_ID'], {}), '(path_ID)\n', (2014, 2023), False, 'import os\n'), ((3711, 3758), 'random.randrange', 'random.randrange', (['(0)', 'randomNumberTermination', '(1)'], {}), '(0, randomNumberTermination, 1)\n', (3727, 3758), False, 'import random\n'), ((5644, 5685), 'distutils.dir_util.create_tree', 'create_tree', (['path_destination_unitary', '[]'], {}), '(path_destination_unitary, [])\n', (5655, 5685), False, 'from distutils.dir_util import create_tree\n'), ((5694, 5750), 'distutils.dir_util.copy_tree', 'copy_tree', (['path_source_unitary', 'path_destination_unitary'], {}), '(path_source_unitary, path_destination_unitary)\n', (5703, 5750), False, 'from distutils.dir_util import copy_tree\n'), ((6654, 6695), 'distutils.dir_util.create_tree', 'create_tree', (['path_destination_unitary', '[]'], {}), '(path_destination_unitary, [])\n', (6665, 6695), False, 'from distutils.dir_util import create_tree\n'), ((6704, 6760), 'distutils.dir_util.copy_tree', 'copy_tree', (['path_source_unitary', 'path_destination_unitary'], {}), '(path_source_unitary, path_destination_unitary)\n', (6713, 6760), False, 'from distutils.dir_util import copy_tree\n'), ((6933, 6945), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6943, 6945), False, 'from datetime import date\n'), ((12169, 12195), 'os.chdir', 'os.chdir', (['path_destination'], {}), '(path_destination)\n', (12177, 12195), False, 'import os\n'), ((12394, 12418), 'distutils.dir_util.mkpath', 'mkpath', (['path_destination'], {}), '(path_destination)\n', (12400, 12418), False, 'from distutils.dir_util import mkpath\n'), ((12428, 12454), 'os.chdir', 'os.chdir', (['path_destination'], {}), '(path_destination)\n', (12436, 12454), False, 'import os\n'), ((21287, 21343), 'pandas.read_csv', 'pd.read_csv', (['(path_ProductType_pydicom + filename_pydicom)'], {}), '(path_ProductType_pydicom + filename_pydicom)\n', (21298, 21343), True, 'import pandas as pd\n'), ((32296, 32354), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'shapeParameter_dictionary', 'index': 'IDList'}), '(data=shapeParameter_dictionary, index=IDList)\n', (32308, 32354), True, 'import pandas as pd\n'), ((34569, 34605), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'metric_dictionary'}), '(data=metric_dictionary)\n', (34581, 34605), True, 'import pandas as pd\n'), ((37439, 37460), 'os.chdir', 'os.chdir', (['path_output'], {}), '(path_output)\n', (37447, 37460), False, 'import os\n'), ((38249, 38289), 'pandas.read_csv', 'pd.read_csv', (['(path_ProductType + filename)'], {}), '(path_ProductType + filename)\n', (38260, 38289), True, 'import pandas as pd\n'), ((38382, 38413), 'numpy.array', 'np.array', (['submission_dataset[i]'], {}), '(submission_dataset[i])\n', (38390, 38413), True, 'import numpy as np\n'), ((38442, 38485), 'numpy.array', 'np.array', (["submission_dataset['metric_' + i]"], {}), "(submission_dataset['metric_' + i])\n", (38450, 38485), True, 'import numpy as np\n'), ((39557, 39630), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'shapeParameter_dictionary', 'index': 'shapeParameter_IDList'}), '(data=shapeParameter_dictionary, index=shapeParameter_IDList)\n', (39569, 39630), True, 'import pandas as pd\n'), ((33697, 33732), 'scipy.stats.loglaplace.std', 'loglaplace.std', (['c'], {'loc': '(0)', 'scale': '(100)'}), '(c, loc=0, scale=100)\n', (33711, 33732), False, 'from scipy.stats import loglaplace\n'), ((42182, 42193), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (42190, 42193), True, 'import numpy as np\n'), ((42240, 42275), 'scipy.stats.loglaplace.fit', 'loglaplace.fit', (['r'], {'floc': '(0)', 'fscale': '(1)'}), '(r, floc=0, fscale=1)\n', (42254, 42275), False, 'from scipy.stats import loglaplace\n'), ((42806, 42819), 'scipy.stats.loglaplace', 'loglaplace', (['c'], {}), '(c)\n', (42816, 42819), False, 'from scipy.stats import loglaplace\n'), ((42975, 43020), 'scipy.stats.loglaplace.rvs', 'loglaplace.rvs', (['c1'], {'loc': '(0)', 'scale': '(1)', 'size': '(1000)'}), '(c1, loc=0, scale=1, size=1000)\n', (42989, 43020), False, 'from scipy.stats import loglaplace\n'), ((43443, 43481), 'scipy.stats.loglaplace.ppf', 'loglaplace.ppf', (['[0.001, 0.5, 0.999]', 'c'], {}), '([0.001, 0.5, 0.999], c)\n', (43457, 43481), False, 'from scipy.stats import loglaplace\n'), ((24892, 24948), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'DictionaryToInclude', 'index': '[newIndex]'}), '(data=DictionaryToInclude, index=[newIndex])\n', (24904, 24948), True, 'import pandas as pd\n'), ((25093, 25148), 'pandas.concat', 'pd.concat', (['[train_dataset_adjusted, DataFrameToInclude]'], {}), '([train_dataset_adjusted, DataFrameToInclude])\n', (25102, 25148), True, 'import pandas as pd\n'), ((42559, 42582), 'scipy.stats.loglaplace.ppf', 'loglaplace.ppf', (['(0.01)', 'c'], {}), '(0.01, c)\n', (42573, 42582), False, 'from scipy.stats import loglaplace\n'), ((42584, 42607), 'scipy.stats.loglaplace.ppf', 'loglaplace.ppf', (['(0.99)', 'c'], {}), '(0.99, c)\n', (42598, 42607), False, 'from scipy.stats import loglaplace\n'), ((42647, 42667), 'scipy.stats.loglaplace.pdf', 'loglaplace.pdf', (['x', 'c'], {}), '(x, c)\n', (42661, 42667), False, 'from scipy.stats import loglaplace\n'), ((43539, 43562), 'scipy.stats.loglaplace.cdf', 'loglaplace.cdf', (['vals', 'c'], {}), '(vals, c)\n', (43553, 43562), False, 'from scipy.stats import loglaplace\n'), ((36804, 36830), 'math.log1p', 'math.log1p', (['(2 ** 0.5 * SDC)'], {}), '(2 ** 0.5 * SDC)\n', (36814, 36830), False, 'import math\n')] |
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.conf import settings
MAX_LEN_AUTHORS_FIELD = 512
CITATION_FORMAT_FLAVORS = ['html', 'ris', 'bibtex', 'biblatex']
DEFAULT_KEYWORDS = ['surface', 'topography']
class UnknownCitationFormat(Exception):
def __init__(self, flavor):
self._flavor = flavor
def __str__(self):
return f"Unknown citation format flavor '{self._flavor}'."
class Publication(models.Model):
LICENSE_CHOICES = [(k, settings.CC_LICENSE_INFOS[k]['option_name'])
for k in ['cc0-1.0', 'ccby-4.0', 'ccbysa-4.0']]
short_url = models.CharField(max_length=10, unique=True, null=True)
surface = models.OneToOneField("manager.Surface", on_delete=models.PROTECT, related_name='publication')
original_surface = models.ForeignKey("manager.Surface", on_delete=models.SET_NULL,
null=True, related_name='derived_publications')
publisher = models.ForeignKey("users.User", on_delete=models.PROTECT)
publisher_orcid_id = models.CharField(max_length=19, default='') # 16 digits including 3 dashes
version = models.PositiveIntegerField(default=1)
datetime = models.DateTimeField(auto_now_add=True)
license = models.CharField(max_length=12, choices=LICENSE_CHOICES, blank=False, default='')
authors = models.CharField(max_length=MAX_LEN_AUTHORS_FIELD)
container = models.FileField(max_length=50, default='')
def get_absolute_url(self):
return reverse('publication:go', args=[self.short_url])
def get_full_url(self, request):
return request.build_absolute_uri(self.get_absolute_url())
def get_citation(self, flavor, request):
if flavor not in CITATION_FORMAT_FLAVORS:
raise UnknownCitationFormat(flavor)
method_name = '_get_citation_as_'+flavor
return getattr(self, method_name)(request)
def _get_citation_as_html(self, request):
s = '{authors}. ({year}). contact.engineering. <em>{surface.name} (Version {version})</em>.'
s += ' <a href="{publication_url}">{publication_url}</a>'
s = s.format(
authors=self.authors,
year=self.datetime.year,
version=self.version,
surface=self.surface,
publication_url=self.get_full_url(request),
)
return mark_safe(s)
def _get_citation_as_ris(self, request):
# see http://refdb.sourceforge.net/manual-0.9.6/sect1-ris-format.html
# or https://en.wikipedia.org/wiki/RIS_(file_format)
# or https://web.archive.org/web/20120526103719/http://refman.com/support/risformat_intro.asp
# https://web.archive.org/web/20120717122530/http://refman.com/support/direct%20export.zip
s = ""
def add(key, value):
nonlocal s
s += f"{key} - {value}\n"
# Electronic citation / Website
add('TY', 'ELEC')
# Title
add('TI', f"{self.surface.name} (Version {self.version})")
# Authors
for author in self.authors.split(','):
add('AU', author.strip())
# Publication Year
add('PY', format(self.datetime, '%Y/%m/%d/'))
# URL
add('UR', self.get_full_url(request))
# Name of Database
add('DB', 'contact.engineering')
# Notes
add('N1', self.surface.description)
# add keywords, defaults ones and tags
for kw in DEFAULT_KEYWORDS:
add('KW', kw)
for t in self.surface.tags.all():
add('KW', t.name)
# End of record, must be empty and last tag
add('ER', '')
return s.strip()
def _get_citation_as_bibtex(self, request):
title = f"{self.surface.name} (Version {self.version})"
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@misc{{
{shortname},
title = {{{title}}},
author = {{{author}}},
year = {{{year}}},
note = {{{note}}},
keywords = {{{keywords}}},
howpublished = {{{publication_url}}},
}}
""".format(title=title,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
note=self.surface.description,
publication_url=self.get_full_url(request),
keywords=keywords,
shortname=shortname,
)
return s.strip()
def _get_citation_as_biblatex(self, request):
shortname = f"{self.surface.name}_v{self.version}".lower().replace(' ','_')
keywords = ",".join(DEFAULT_KEYWORDS)
if self.surface.tags.count()>0:
keywords += ","+",".join(t.name for t in self.surface.tags.all())
s = """
@online{{
{shortname},
title = {{{title}}},
version = {{{version}}},
author = {{{author}}},
year = {{{year}}},
month = {{{month}}},
date = {{{date}}},
note = {{{note}}},
keywords = {{{keywords}}},
url = {{{url}}},
urldate = {{{urldate}}}
}}
""".format(title=self.surface.name,
version=self.version,
author=self.authors.replace(', ', ' and '),
year=self.datetime.year,
month=self.datetime.month,
date=format(self.datetime, "%Y-%m-%d"),
note=self.surface.description,
url=self.get_full_url(request),
urldate=format(timezone.now(), "%Y-%m-%d"),
keywords=keywords,
shortname=shortname,
)
return s.strip()
@property
def storage_prefix(self):
"""Return prefix used for storage.
https://docs.djangoproject.com/en/2.2/ref/models/fields/#django.db.models.FileField.upload_to
Looks like a relative path to a directory.
If storage is on filesystem, the prefix should correspond
to a real directory.
"""
return "publications/{}/".format(self.short_url)
@property
def container_storage_path(self):
"""Return relative path of container in storage."""
return f"{self.storage_prefix}container.zip"
| [
"django.db.models.OneToOneField",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.utils.timezone.now",
"django.db.models.PositiveIntegerField",
"django.utils.safestring.mark_safe",
"django.urls.reverse",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((704, 759), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'unique': '(True)', 'null': '(True)'}), '(max_length=10, unique=True, null=True)\n', (720, 759), False, 'from django.db import models\n'), ((774, 871), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""manager.Surface"""'], {'on_delete': 'models.PROTECT', 'related_name': '"""publication"""'}), "('manager.Surface', on_delete=models.PROTECT,\n related_name='publication')\n", (794, 871), False, 'from django.db import models\n'), ((891, 1006), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""manager.Surface"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'related_name': '"""derived_publications"""'}), "('manager.Surface', on_delete=models.SET_NULL, null=True,\n related_name='derived_publications')\n", (908, 1006), False, 'from django.db import models\n'), ((1060, 1117), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.User"""'], {'on_delete': 'models.PROTECT'}), "('users.User', on_delete=models.PROTECT)\n", (1077, 1117), False, 'from django.db import models\n'), ((1143, 1186), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(19)', 'default': '""""""'}), "(max_length=19, default='')\n", (1159, 1186), False, 'from django.db import models\n'), ((1233, 1271), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)'}), '(default=1)\n', (1260, 1271), False, 'from django.db import models\n'), ((1287, 1326), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1307, 1326), False, 'from django.db import models\n'), ((1341, 1426), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'choices': 'LICENSE_CHOICES', 'blank': '(False)', 'default': '""""""'}), "(max_length=12, choices=LICENSE_CHOICES, blank=False,\n default='')\n", (1357, 1426), False, 'from django.db import models\n'), ((1437, 1487), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': 'MAX_LEN_AUTHORS_FIELD'}), '(max_length=MAX_LEN_AUTHORS_FIELD)\n', (1453, 1487), False, 'from django.db import models\n'), ((1504, 1547), 'django.db.models.FileField', 'models.FileField', ([], {'max_length': '(50)', 'default': '""""""'}), "(max_length=50, default='')\n", (1520, 1547), False, 'from django.db import models\n'), ((1596, 1644), 'django.urls.reverse', 'reverse', (['"""publication:go"""'], {'args': '[self.short_url]'}), "('publication:go', args=[self.short_url])\n", (1603, 1644), False, 'from django.urls import reverse\n'), ((2450, 2462), 'django.utils.safestring.mark_safe', 'mark_safe', (['s'], {}), '(s)\n', (2459, 2462), False, 'from django.utils.safestring import mark_safe\n'), ((5935, 5949), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5947, 5949), False, 'from django.utils import timezone\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-24 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendor', '0002_store_image'),
]
operations = [
migrations.AddField(
model_name='store',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
]
| [((392, 434), 'django.db.models.CharField', 'models.CharField', ([], {'default': '(1)', 'max_length': '(30)'}), '(default=1, max_length=30)\n', (408, 434), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/python
import praw
reddit = praw.Reddit('mob-secondbot')
subreddit = reddit.subreddit("learnpython")
for submission in subreddit.hot(limit=5):
print("Title: ", submission.title)
print("Text: ", submission.selftext)
print("Score: ", submission.score)
print("---------------------------------\n")
| [
"praw.Reddit"
]
| [((40, 68), 'praw.Reddit', 'praw.Reddit', (['"""mob-secondbot"""'], {}), "('mob-secondbot')\n", (51, 68), False, 'import praw\n')] |
#!/usr/bin/env python3
import sys
import re
import numpy as np
from PIL import Image
moves = { 'e': (2, 0), 'se': (1, 2), 'sw': (-1, 2), 'w': (-2, 0), 'nw': (-1, -2), 'ne': (1, -2) }
# Save (x, y): True/False in tiles. True = black, False = white.
tiles = {}
for line in open(sys.argv[1]).read().splitlines():
pos = np.array((0, 0))
for d in re.findall(r'e|se|sw|w|nw|ne', line):
pos += moves[d]
t = tuple(pos)
if t in tiles:
tiles[t] = not tiles[t]
else:
tiles[t] = True
# Part 1
print('black:', sum(val == True for val in tiles.values()))
# -- Part 2 --
# take a chance on how wide it needs to be
width = 300
heigth = 300
board = np.zeros(width * heigth, dtype=np.int8)
board = board.reshape(heigth, width)
# Fill in tiles, move to center
for key, value in tiles.items():
x, y = key
x += width // 2
y += heigth // 2
board[y][x] = value
def black_neighbours(y, x, b):
num = 0
for m in moves.values():
num += b[(y + m[1], x + m[0])]
return num
def game():
board_copy = np.copy(board)
w, h = board.shape
# Don't do outer edge (to avoid special cases)
for y in range(2, h - 2):
for x in range(2, w - 2):
tile = board_copy[(y, x)]
n = black_neighbours(y, x, board_copy)
if tile:
# black
if n == 0 or n > 2:
board[(y, x)] = False
else:
# white
if n == 2:
board[(y, x)] = True
def save_image(day):
colours = [(0, 0, 0), (255, 255, 255)]
im = Image.new('RGB', (width, heigth))
for y in range(heigth):
for x in range(width):
c = colours[board[y][x]]
im.putpixel((x, y), c)
im.save('img%03d.png' % (day))
save_image(0)
for day in range(1, 101):
game()
save_image(day)
print('Day %d: %d' % (day, len(np.where(board == True)[0])))
ys, xs = np.where(board)
print(min(ys), max(ys), min(xs), max(xs))
| [
"numpy.copy",
"numpy.where",
"PIL.Image.new",
"numpy.array",
"numpy.zeros",
"re.findall"
]
| [((683, 722), 'numpy.zeros', 'np.zeros', (['(width * heigth)'], {'dtype': 'np.int8'}), '(width * heigth, dtype=np.int8)\n', (691, 722), True, 'import numpy as np\n'), ((1959, 1974), 'numpy.where', 'np.where', (['board'], {}), '(board)\n', (1967, 1974), True, 'import numpy as np\n'), ((324, 340), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (332, 340), True, 'import numpy as np\n'), ((354, 389), 're.findall', 're.findall', (['"""e|se|sw|w|nw|ne"""', 'line'], {}), "('e|se|sw|w|nw|ne', line)\n", (364, 389), False, 'import re\n'), ((1063, 1077), 'numpy.copy', 'np.copy', (['board'], {}), '(board)\n', (1070, 1077), True, 'import numpy as np\n'), ((1612, 1645), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, heigth)'], {}), "('RGB', (width, heigth))\n", (1621, 1645), False, 'from PIL import Image\n'), ((1919, 1942), 'numpy.where', 'np.where', (['(board == True)'], {}), '(board == True)\n', (1927, 1942), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import gzip
import json
import os
import shutil
import six
from six.moves import zip
_OUTPUT_DIR = 'output'
_OUTPUT_GRAPH_DIR = os.path.join(_OUTPUT_DIR, 'graph')
class Process(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.types = {}
self.strings = {}
self.stackframes = {}
self.allocators = None
self.version = None
class Entry(object):
def __init__(self):
self.count = None
self.size = None
self.type = None
self.stackframe = None
class GraphDump(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.heap = None
self.root = ''
self.leaks = ''
self.leak_stackframes = 0
self.leak_objects = 0
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
return open(file_path, mode + 't')
def FindMemoryDumps(filename):
processes = {}
with OpenTraceFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
for event in data['traceEvents']:
pid = event['pid']
if pid not in processes:
processes[pid] = Process()
processes[pid].pid = pid
process = processes[pid]
# Retrieve process informations.
if event['ph'] == 'M':
if event['name'] == 'process_name' and 'name' in event['args']:
process.name = event['args']['name']
if event['name'] == 'process_labels' and 'labels' in event['args']:
process.labels = event['args']['labels']
if event['name'] == 'typeNames':
process.types = {}
for type_id, t in six.iteritems(event['args']['typeNames']):
process.types[int(type_id)] = t
if event['name'] == 'stackFrames':
process.stackframes = {}
for stack_id, s in six.iteritems(event['args']['stackFrames']):
new_stackframe = {}
new_stackframe['name'] = s['name']
if 'parent' in s:
new_stackframe['parent'] = int(s['parent'])
process.stackframes[int(stack_id)] = new_stackframe
# Look for a detailed memory dump event.
if not ((event['name'] == 'periodic_interval' or
event['name'] == 'explicitly_triggered') and
event['args']['dumps']['level_of_detail'] == 'detailed'):
continue
# Check for a memory dump V1.
if u'heaps' in event['args']['dumps']:
# Get the first memory dump.
if not process.allocators:
process.version = 1
process.allocators = event['args']['dumps']['heaps']
# Check for a memory dump V2.
# See format: [chromium] src/base/trace_event/heap_profiler_event_writer.h
if u'heaps_v2' in event['args']['dumps']:
# Memory dump format V2 is dumping information incrementally. Update
# the cumulated indexes.
maps = event['args']['dumps']['heaps_v2']['maps']
for string in maps['strings']:
process.strings[string['id']] = string['string']
for node in maps['nodes']:
node_v1 = {}
node_v1['name'] = process.strings[node['name_sid']]
if 'parent' in node:
node_v1['parent'] = node['parent']
process.stackframes[node['id']] = node_v1
for t in maps['types']:
process.types[t['id']] = process.strings[t['name_sid']]
# Get the first memory dump.
if not process.allocators:
dump = event['args']['dumps']
process.version = 2
process.allocators = dump['heaps_v2']['allocators']
# Remove processes with incomplete memory dump.
for pid, process in processes.items():
if not (process.allocators and process.stackframes and process.types):
del processes[pid]
return processes
def ResolveMemoryDumpFields(entries, stackframes, types):
def ResolveStackTrace(stack_id, stackframes):
stackframe = stackframes[stack_id]
tail = ()
if 'parent' in stackframe:
tail = ResolveStackTrace(stackframe['parent'], stackframes)
name = stackframe['name'].replace('\r', '').replace('\n', '')
return (name,) + tail
def ResolveType(type_id, types):
return types[type_id]
for entry in entries:
# Stackframe may be -1 (18446744073709551615L) when not stackframe are
# available.
if entry.stackframe not in stackframes:
entry.stackframe = []
else:
entry.stackframe = ResolveStackTrace(entry.stackframe, stackframes)
entry.type = ResolveType(entry.type, types)
def IncrementHeapEntry(stack, count, size, typename, root):
if not stack:
root['count'] += count
root['size'] += size
if typename not in root['count_by_type']:
root['count_by_type'][typename] = 0
root['count_by_type'][typename] += count
else:
top = stack[-1]
tail = stack[:-1]
if top not in root['children']:
new_node = {}
new_node['count'] = 0
new_node['size'] = 0
new_node['children'] = {}
new_node['count_by_type'] = {}
root['children'][top] = new_node
IncrementHeapEntry(tail, count, size, typename, root['children'][top])
def CanonicalHeapEntries(root):
total_count = 0
total_size = 0
for child in six.itervalues(root['children']):
total_count += child['count']
total_size += child['size']
root['count'] -= total_count
root['size'] -= total_size
for typename in root['count_by_type']:
total_count_for_type = 0
for child in six.itervalues(root['children']):
if typename in child['count_by_type']:
total_count_for_type += child['count_by_type'][typename]
root['count_by_type'][typename] -= total_count_for_type
for child in six.itervalues(root['children']):
CanonicalHeapEntries(child)
def FindLeaks(root, stack, leaks, threshold, size_threshold):
for frame in root['children']:
FindLeaks(root['children'][frame], [frame] + stack, leaks, threshold,
size_threshold)
if root['count'] > threshold and root['size'] > size_threshold:
leaks.append({'count': root['count'],
'size': root['size'],
'count_by_type': root['count_by_type'],
'stackframes': stack})
def DumpTree(root, frame, output, threshold, size_threshold):
output.write('\n{ \"name\": \"%s\",' % frame)
if root['count'] > threshold and root['count'] > size_threshold:
output.write(' \"size\": \"%s\",' % root['size'])
output.write(' \"count\": \"%s\",' % root['count'])
output.write(' \"children\": [')
is_first = True
for child_frame, child in root['children'].items():
if is_first:
is_first = False
else:
output.write(',')
DumpTree(child, child_frame, output, threshold, size_threshold)
output.write(']')
output.write('}')
def GetEntries(heap, process):
"""
Returns all entries in a heap, after filtering out unknown entries, and doing
some post processing to extract the relevant fields.
"""
if not process:
return []
entries = []
if process.version == 1:
for raw_entry in process.allocators[heap]['entries']:
# Cumulative sizes and types are skipped. see:
# https://chromium.googlesource.com/chromium/src/+/a990af190304be5bf38b120799c594df5a293518/base/trace_event/heap_profiler_heap_dump_writer.cc#294
if 'type' not in raw_entry or not raw_entry['bt']:
continue
entry = Entry()
entry.count = int(raw_entry['count'], 16)
entry.size = int(raw_entry['size'], 16)
entry.type = int(raw_entry['type'])
entry.stackframe = int(raw_entry['bt'])
entries.append(entry)
elif process.version == 2:
raw_entries = list(zip(process.allocators[heap]['counts'],
process.allocators[heap]['sizes'],
process.allocators[heap]['types'],
process.allocators[heap]['nodes']))
for (raw_count, raw_size, raw_type, raw_stackframe) in raw_entries:
entry = Entry()
entry.count = raw_count
entry.size = raw_size
entry.type = raw_type
entry.stackframe = raw_stackframe
entries.append(entry)
# Resolve fields by looking into indexes
ResolveMemoryDumpFields(entries, process.stackframes, process.types)
return entries
def FilterProcesses(processes, filter_by_name, filter_by_labels):
remaining_processes = {}
for pid, process in six.iteritems(processes):
if filter_by_name and process.name != filter_by_name:
continue
if (filter_by_labels and
(not process.labels or filter_by_labels not in process.labels)):
continue
remaining_processes[pid] = process
return remaining_processes
def FindRelevantProcesses(start_trace, end_trace,
filter_by_name,
filter_by_labels,
match_by_labels):
# Retrieve the processes and the associated memory dump.
end_processes = FindMemoryDumps(end_trace)
end_processes = FilterProcesses(end_processes, filter_by_name,
filter_by_labels)
start_processes = None
if start_trace:
start_processes = FindMemoryDumps(start_trace)
start_processes = FilterProcesses(start_processes, filter_by_name,
filter_by_labels)
# Build a sequence of pair of processes to be compared.
processes = []
if not start_processes:
# Only keep end-processes.
for _, end_process in six.iteritems(end_processes):
processes.append((None, end_process))
elif match_by_labels:
# Processes are paired based on name/labels.
for _, end_process in six.iteritems(end_processes):
matching_start_process = None
for _, start_process in six.iteritems(start_processes):
if (start_process.name == end_process.name and
(start_process.name in ['Browser', 'GPU'] or
start_process.labels == end_process.labels)):
matching_start_process = start_process
if matching_start_process:
processes.append((matching_start_process, end_process))
else:
# Processes are paired based on their PID.
relevant_pids = set(end_processes.keys()) & set(start_processes.keys())
for pid in relevant_pids:
start_process = start_processes[pid]
end_process = end_processes[pid]
processes.append((start_process, end_process))
return processes
def BuildGraphDumps(processes, threshold, size_threshold):
"""
Build graph for a sequence of pair of processes.
If start_process is None, counts objects in end_trace.
Otherwise, counts objects present in end_trace, but not in start_process.
"""
graph_dumps = []
for (start_process, end_process) in processes:
pid = end_process.pid
name = end_process.name if end_process.name else ''
labels = end_process.labels if end_process.labels else ''
print('Process[%d] %s: %s' % (pid, name, labels))
for heap in end_process.allocators:
start_entries = GetEntries(heap, start_process)
end_entries = GetEntries(heap, end_process)
graph = GraphDump()
graph.pid = pid
graph.name = name
graph.labels = labels
graph.heap = heap
graph_dumps.append(graph)
# Do the math: diffing start and end memory dumps.
root = {}
root['count'] = 0
root['size'] = 0
root['children'] = {}
root['count_by_type'] = {}
for entry in start_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, - entry.count, - entry.size,
entry.type, root)
for entry in end_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, entry.count, entry.size,
entry.type, root)
CanonicalHeapEntries(root)
graph.root = root
# Find leaks
leaks = []
FindLeaks(root, [], leaks, threshold, size_threshold)
leaks.sort(reverse=True, key=lambda k: k['size'])
if leaks:
print(' %s: %d potential leaks found.' % (heap, len(leaks)))
graph.leaks = leaks
graph.leak_stackframes = len(leaks)
for leak in leaks:
graph.leak_objects += leak['count']
return graph_dumps
def WritePotentialLeaks(graph_dumps):
for graph in graph_dumps:
if graph.leaks:
filename = 'process_%d_%s-leaks.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_DIR, filename)
with open(output_filename, 'w') as output:
json.dump(graph.leaks, output)
def WriteGrahDumps(graph_dumps, threshold, size_threshold):
for graph in graph_dumps:
# Dump the remaining allocated objects tree.
filename = 'process_%d_%s-objects.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, filename)
if graph.root:
with open(output_filename, 'w') as output:
DumpTree(graph.root, '.', output, threshold, size_threshold)
graph.root = filename
def WriteIndex(graph_dumps):
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, 'index.json')
with open(output_filename, 'w') as output:
json.dump([
{'pid': graph.pid,
'heap': graph.heap,
'name': graph.name,
'labels': graph.labels,
'objects': graph.root,
'potential leaks': graph.leak_stackframes,
'objects leaked': graph.leak_objects,
}
for graph in graph_dumps], output)
def WriteHTML():
# Copy the HTML page.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'diff_heap_profiler.html')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'index.html')
shutil.copyfile(source, destination)
# Copy the D3 library file.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
os.path.pardir,
'tracing',
'third_party',
'd3',
'd3.min.js')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'd3.min.js')
shutil.copyfile(source, destination)
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--flame-graph',
action='store_true',
help='Output a flame graph based on stackframe allocations')
parser.add_argument(
'--threshold',
type=int,
default=0,
help='Objects threshold for being a potential memory leak')
parser.add_argument(
'--size-threshold',
type=int,
default=0,
help='Size threshold for being a potential memory leak')
parser.add_argument(
'--filter-by-name',
type=str,
help='Only keep processes with name (i.e. Browser, Renderer, ...)')
parser.add_argument(
'--filter-by-labels',
type=str,
help='Only keep processes with matching labels')
parser.add_argument(
'--match-by-labels',
action='store_true',
help='Match processes between runs by labels')
parser.add_argument(
'trace',
nargs='+',
help='Trace files to be processed')
options = parser.parse_args()
if options.threshold == 0 and options.size_threshold == 0:
options.threshold = 1000
if len(options.trace) == 1:
end_trace = options.trace[0]
start_trace = None
else:
start_trace = options.trace[0]
end_trace = options.trace[1]
if not os.path.exists(_OUTPUT_DIR):
os.makedirs(_OUTPUT_DIR)
# Find relevant processes to be processed.
processes = FindRelevantProcesses(start_trace, end_trace,
options.filter_by_name,
options.filter_by_labels,
options.match_by_labels)
graph_dumps = BuildGraphDumps(processes, options.threshold,
options.size_threshold)
WritePotentialLeaks(graph_dumps)
if options.flame_graph:
if not os.path.exists(_OUTPUT_GRAPH_DIR):
os.makedirs(_OUTPUT_GRAPH_DIR)
WriteGrahDumps(graph_dumps, options.threshold, options.size_threshold)
WriteIndex(graph_dumps)
WriteHTML()
if __name__ == '__main__':
Main()
| [
"os.path.exists",
"argparse.ArgumentParser",
"gzip.open",
"six.itervalues",
"os.makedirs",
"os.path.join",
"shutil.copyfile",
"os.path.abspath",
"six.iteritems",
"six.moves.zip",
"json.dump"
]
| [((407, 441), 'os.path.join', 'os.path.join', (['_OUTPUT_DIR', '"""graph"""'], {}), "(_OUTPUT_DIR, 'graph')\n", (419, 441), False, 'import os\n'), ((5509, 5541), 'six.itervalues', 'six.itervalues', (["root['children']"], {}), "(root['children'])\n", (5523, 5541), False, 'import six\n'), ((5977, 6009), 'six.itervalues', 'six.itervalues', (["root['children']"], {}), "(root['children'])\n", (5991, 6009), False, 'import six\n'), ((8659, 8683), 'six.iteritems', 'six.iteritems', (['processes'], {}), '(processes)\n', (8672, 8683), False, 'import six\n'), ((13288, 13333), 'os.path.join', 'os.path.join', (['_OUTPUT_GRAPH_DIR', '"""index.json"""'], {}), "(_OUTPUT_GRAPH_DIR, 'index.json')\n", (13300, 13333), False, 'import os\n'), ((13875, 13920), 'os.path.join', 'os.path.join', (['_OUTPUT_GRAPH_DIR', '"""index.html"""'], {}), "(_OUTPUT_GRAPH_DIR, 'index.html')\n", (13887, 13920), False, 'import os\n'), ((13923, 13959), 'shutil.copyfile', 'shutil.copyfile', (['source', 'destination'], {}), '(source, destination)\n', (13938, 13959), False, 'import shutil\n'), ((14336, 14380), 'os.path.join', 'os.path.join', (['_OUTPUT_GRAPH_DIR', '"""d3.min.js"""'], {}), "(_OUTPUT_GRAPH_DIR, 'd3.min.js')\n", (14348, 14380), False, 'import os\n'), ((14383, 14419), 'shutil.copyfile', 'shutil.copyfile', (['source', 'destination'], {}), '(source, destination)\n', (14398, 14419), False, 'import shutil\n'), ((14445, 14470), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14468, 14470), False, 'import argparse\n'), ((1120, 1152), 'gzip.open', 'gzip.open', (['file_path', "(mode + 'b')"], {}), "(file_path, mode + 'b')\n", (1129, 1152), False, 'import gzip\n'), ((5757, 5789), 'six.itervalues', 'six.itervalues', (["root['children']"], {}), "(root['children'])\n", (5771, 5789), False, 'import six\n'), ((9728, 9756), 'six.iteritems', 'six.iteritems', (['end_processes'], {}), '(end_processes)\n', (9741, 9756), False, 'import six\n'), ((13030, 13071), 'os.path.join', 'os.path.join', (['_OUTPUT_GRAPH_DIR', 'filename'], {}), '(_OUTPUT_GRAPH_DIR, filename)\n', (13042, 13071), False, 'import os\n'), ((13383, 13628), 'json.dump', 'json.dump', (["[{'pid': graph.pid, 'heap': graph.heap, 'name': graph.name, 'labels': graph\n .labels, 'objects': graph.root, 'potential leaks': graph.\n leak_stackframes, 'objects leaked': graph.leak_objects} for graph in\n graph_dumps]", 'output'], {}), "([{'pid': graph.pid, 'heap': graph.heap, 'name': graph.name,\n 'labels': graph.labels, 'objects': graph.root, 'potential leaks': graph\n .leak_stackframes, 'objects leaked': graph.leak_objects} for graph in\n graph_dumps], output)\n", (13392, 13628), False, 'import json\n'), ((15683, 15710), 'os.path.exists', 'os.path.exists', (['_OUTPUT_DIR'], {}), '(_OUTPUT_DIR)\n', (15697, 15710), False, 'import os\n'), ((15716, 15740), 'os.makedirs', 'os.makedirs', (['_OUTPUT_DIR'], {}), '(_OUTPUT_DIR)\n', (15727, 15740), False, 'import os\n'), ((9901, 9929), 'six.iteritems', 'six.iteritems', (['end_processes'], {}), '(end_processes)\n', (9914, 9929), False, 'import six\n'), ((12675, 12710), 'os.path.join', 'os.path.join', (['_OUTPUT_DIR', 'filename'], {}), '(_OUTPUT_DIR, filename)\n', (12687, 12710), False, 'import os\n'), ((13780, 13805), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (13795, 13805), False, 'import os\n'), ((14031, 14056), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (14046, 14056), False, 'import os\n'), ((16223, 16256), 'os.path.exists', 'os.path.exists', (['_OUTPUT_GRAPH_DIR'], {}), '(_OUTPUT_GRAPH_DIR)\n', (16237, 16256), False, 'import os\n'), ((16264, 16294), 'os.makedirs', 'os.makedirs', (['_OUTPUT_GRAPH_DIR'], {}), '(_OUTPUT_GRAPH_DIR)\n', (16275, 16294), False, 'import os\n'), ((1932, 1973), 'six.iteritems', 'six.iteritems', (["event['args']['typeNames']"], {}), "(event['args']['typeNames'])\n", (1945, 1973), False, 'import six\n'), ((2119, 2162), 'six.iteritems', 'six.iteritems', (["event['args']['stackFrames']"], {}), "(event['args']['stackFrames'])\n", (2132, 2162), False, 'import six\n'), ((7949, 8097), 'six.moves.zip', 'zip', (["process.allocators[heap]['counts']", "process.allocators[heap]['sizes']", "process.allocators[heap]['types']", "process.allocators[heap]['nodes']"], {}), "(process.allocators[heap]['counts'], process.allocators[heap]['sizes'],\n process.allocators[heap]['types'], process.allocators[heap]['nodes'])\n", (7952, 8097), False, 'from six.moves import zip\n'), ((9997, 10027), 'six.iteritems', 'six.iteritems', (['start_processes'], {}), '(start_processes)\n', (10010, 10027), False, 'import six\n'), ((12768, 12798), 'json.dump', 'json.dump', (['graph.leaks', 'output'], {}), '(graph.leaks, output)\n', (12777, 12798), False, 'import json\n')] |
"""Write raw files to BIDS format.
example usage: $ mne_bids raw_to_bids --subject_id sub01 --task rest
--raw data.edf --bids_root new_path
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne_bids
from mne_bids import write_raw_bids, BIDSPath
from mne_bids.read import _read_raw
def run():
"""Run the raw_to_bids command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--subject_id', dest='subject_id',
help=('subject name in BIDS compatible format '
'(01, 02, etc.)'))
parser.add_option('--task', dest='task',
help='name of the task the data is based on')
parser.add_option('--raw', dest='raw_fname',
help='path to the raw MEG file')
parser.add_option('--bids_root', dest='bids_root',
help='The path of the BIDS compatible folder.')
parser.add_option('--session_id', dest='session_id',
help='session name in BIDS compatible format')
parser.add_option('--run', dest='run',
help='run number for this dataset')
parser.add_option('--acq', dest='acq',
help='acquisition parameter for this dataset')
parser.add_option('--events_data', dest='events_data',
help='events file (events.tsv)')
parser.add_option('--event_id', dest='event_id',
help='event id dict', metavar='eid')
parser.add_option('--hpi', dest='hpi',
help='path to the MEG marker points')
parser.add_option('--electrode', dest='electrode',
help='path to head-native digitizer points')
parser.add_option('--hsp', dest='hsp',
help='path to headshape points')
parser.add_option('--config', dest='config',
help='path to the configuration file')
parser.add_option('--overwrite', dest='overwrite',
help="whether to overwrite existing data (BOOLEAN)")
parser.add_option('--line_freq', dest='line_freq',
help="The frequency of the line noise in Hz "
"(e.g. 50 or 60). If unknown, pass None")
opt, args = parser.parse_args()
if len(args) > 0:
parser.print_help()
parser.error('Do not specify arguments without flags. Found: "{}".\n'
.format(args))
if not all([opt.subject_id, opt.task, opt.raw_fname, opt.bids_root]):
parser.print_help()
parser.error('Arguments missing. You need to specify at least the'
'following: --subject_id, --task, --raw, --bids_root.')
bids_path = BIDSPath(
subject=opt.subject_id, session=opt.session_id, run=opt.run,
acquisition=opt.acq, task=opt.task, root=opt.bids_root)
allow_maxshield = False
if opt.raw_fname.endswith('.fif'):
allow_maxshield = True
raw = _read_raw(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode,
hsp=opt.hsp, config=opt.config,
allow_maxshield=allow_maxshield)
if opt.line_freq is not None:
line_freq = None if opt.line_freq == "None" else opt.line_freq
raw.info['line_freq'] = line_freq
write_raw_bids(raw, bids_path, event_id=opt.event_id,
events_data=opt.events_data, overwrite=opt.overwrite,
verbose=True)
if __name__ == '__main__':
run()
| [
"mne.commands.utils.get_optparser",
"mne_bids.read._read_raw",
"mne_bids.write_raw_bids",
"mne_bids.BIDSPath"
]
| [((444, 561), 'mne.commands.utils.get_optparser', 'get_optparser', (['__file__'], {'usage': '"""usage: %prog options args"""', 'prog_prefix': '"""mne_bids"""', 'version': 'mne_bids.__version__'}), "(__file__, usage='usage: %prog options args', prog_prefix=\n 'mne_bids', version=mne_bids.__version__)\n", (457, 561), False, 'from mne.commands.utils import get_optparser\n'), ((2922, 3051), 'mne_bids.BIDSPath', 'BIDSPath', ([], {'subject': 'opt.subject_id', 'session': 'opt.session_id', 'run': 'opt.run', 'acquisition': 'opt.acq', 'task': 'opt.task', 'root': 'opt.bids_root'}), '(subject=opt.subject_id, session=opt.session_id, run=opt.run,\n acquisition=opt.acq, task=opt.task, root=opt.bids_root)\n', (2930, 3051), False, 'from mne_bids import write_raw_bids, BIDSPath\n'), ((3175, 3306), 'mne_bids.read._read_raw', '_read_raw', (['opt.raw_fname'], {'hpi': 'opt.hpi', 'electrode': 'opt.electrode', 'hsp': 'opt.hsp', 'config': 'opt.config', 'allow_maxshield': 'allow_maxshield'}), '(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode, hsp=opt.hsp,\n config=opt.config, allow_maxshield=allow_maxshield)\n', (3184, 3306), False, 'from mne_bids.read import _read_raw\n'), ((3494, 3620), 'mne_bids.write_raw_bids', 'write_raw_bids', (['raw', 'bids_path'], {'event_id': 'opt.event_id', 'events_data': 'opt.events_data', 'overwrite': 'opt.overwrite', 'verbose': '(True)'}), '(raw, bids_path, event_id=opt.event_id, events_data=opt.\n events_data, overwrite=opt.overwrite, verbose=True)\n', (3508, 3620), False, 'from mne_bids import write_raw_bids, BIDSPath\n')] |
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of <NAME>'s Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by <NAME> (http://www.dwerg.net/).
Maintained for a few years by <NAME> (http://www.freewisdom.org).
Currently maintained by <NAME> (https://github.com/waylan),
<NAME> (https://github.com/mitya57) and <NAME> (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 <NAME> (v. 0.2-1.6b)
Copyright 2004 <NAME> (the original version)
License: BSD (see LICENSE.md for details).
Python-Markdown Regression Tests
================================
Tests of the various APIs with the python markdown lib.
"""
from __future__ import unicode_literals
import unittest
import sys
import os
import markdown
import warnings
from markdown.__main__ import parse_options
from logging import DEBUG, WARNING, CRITICAL
import yaml
import tempfile
from io import BytesIO
from xml.etree.ElementTree import ProcessingInstruction
PY3 = sys.version_info[0] == 3
if not PY3:
def bytes(string, encoding):
return string.encode(encoding)
class TestMarkdownBasics(unittest.TestCase):
""" Tests basics of the Markdown class. """
def setUp(self):
""" Create instance of Markdown. """
self.md = markdown.Markdown()
def testBlankInput(self):
""" Test blank input. """
self.assertEqual(self.md.convert(''), '')
def testWhitespaceOnly(self):
""" Test input of only whitespace. """
self.assertEqual(self.md.convert(' '), '')
def testSimpleInput(self):
""" Test simple input. """
self.assertEqual(self.md.convert('foo'), '<p>foo</p>')
def testInstanceExtension(self):
""" Test Extension loading with a class instance. """
from markdown.extensions.footnotes import FootnoteExtension
markdown.Markdown(extensions=[FootnoteExtension()])
def testEntryPointExtension(self):
""" Test Extension loading with an entry point. """
markdown.Markdown(extensions=['footnotes'])
def testDotNotationExtension(self):
""" Test Extension loading with Name (`path.to.module`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes'])
def testDotNotationExtensionWithClass(self):
""" Test Extension loading with class name (`path.to.module:Class`). """
markdown.Markdown(extensions=['markdown.extensions.footnotes:FootnoteExtension'])
class TestConvertFile(unittest.TestCase):
""" Tests of ConvertFile. """
def setUp(self):
self.saved = sys.stdin, sys.stdout
sys.stdin = BytesIO(bytes('foo', encoding='utf-8'))
sys.stdout = BytesIO()
def tearDown(self):
sys.stdin, sys.stdout = self.saved
def getTempFiles(self, src):
""" Return the file names for two temp files. """
infd, infile = tempfile.mkstemp(suffix='.txt')
with os.fdopen(infd, 'w') as fp:
fp.write(src)
outfd, outfile = tempfile.mkstemp(suffix='.html')
return infile, outfile, outfd
def testFileNames(self):
infile, outfile, outfd = self.getTempFiles('foo')
markdown.markdownFromFile(input=infile, output=outfile)
with os.fdopen(outfd, 'r') as fp:
output = fp.read()
self.assertEqual(output, '<p>foo</p>')
def testFileObjects(self):
infile = BytesIO(bytes('foo', encoding='utf-8'))
outfile = BytesIO()
markdown.markdownFromFile(input=infile, output=outfile)
outfile.seek(0)
self.assertEqual(outfile.read().decode('utf-8'), '<p>foo</p>')
def testStdinStdout(self):
markdown.markdownFromFile()
sys.stdout.seek(0)
self.assertEqual(sys.stdout.read().decode('utf-8'), '<p>foo</p>')
class TestBlockParser(unittest.TestCase):
""" Tests of the BlockParser class. """
def setUp(self):
""" Create instance of BlockParser. """
self.parser = markdown.Markdown().parser
def testParseChunk(self):
""" Test BlockParser.parseChunk. """
root = markdown.util.etree.Element("div")
text = 'foo'
self.parser.parseChunk(root, text)
self.assertEqual(
markdown.serializers.to_xhtml_string(root),
"<div><p>foo</p></div>"
)
def testParseDocument(self):
""" Test BlockParser.parseDocument. """
lines = ['#foo', '', 'bar', '', ' baz']
tree = self.parser.parseDocument(lines)
self.assertIsInstance(tree, markdown.util.etree.ElementTree)
self.assertIs(markdown.util.etree.iselement(tree.getroot()), True)
self.assertEqual(
markdown.serializers.to_xhtml_string(tree.getroot()),
"<div><h1>foo</h1><p>bar</p><pre><code>baz\n</code></pre></div>"
)
class TestBlockParserState(unittest.TestCase):
""" Tests of the State class for BlockParser. """
def setUp(self):
self.state = markdown.blockparser.State()
def testBlankState(self):
""" Test State when empty. """
self.assertEqual(self.state, [])
def testSetSate(self):
""" Test State.set(). """
self.state.set('a_state')
self.assertEqual(self.state, ['a_state'])
self.state.set('state2')
self.assertEqual(self.state, ['a_state', 'state2'])
def testIsSate(self):
""" Test State.isstate(). """
self.assertEqual(self.state.isstate('anything'), False)
self.state.set('a_state')
self.assertEqual(self.state.isstate('a_state'), True)
self.state.set('state2')
self.assertEqual(self.state.isstate('state2'), True)
self.assertEqual(self.state.isstate('a_state'), False)
self.assertEqual(self.state.isstate('missing'), False)
def testReset(self):
""" Test State.reset(). """
self.state.set('a_state')
self.state.reset()
self.assertEqual(self.state, [])
self.state.set('state1')
self.state.set('state2')
self.state.reset()
self.assertEqual(self.state, ['state1'])
class TestHtmlStash(unittest.TestCase):
""" Test Markdown's HtmlStash. """
def setUp(self):
self.stash = markdown.util.HtmlStash()
self.placeholder = self.stash.store('foo')
def testSimpleStore(self):
""" Test HtmlStash.store. """
self.assertEqual(self.placeholder, self.stash.get_placeholder(0))
self.assertEqual(self.stash.html_counter, 1)
self.assertEqual(self.stash.rawHtmlBlocks, ['foo'])
def testStoreMore(self):
""" Test HtmlStash.store with additional blocks. """
placeholder = self.stash.store('bar')
self.assertEqual(placeholder, self.stash.get_placeholder(1))
self.assertEqual(self.stash.html_counter, 2)
self.assertEqual(
self.stash.rawHtmlBlocks,
['foo', 'bar']
)
def testReset(self):
""" Test HtmlStash.reset. """
self.stash.reset()
self.assertEqual(self.stash.html_counter, 0)
self.assertEqual(self.stash.rawHtmlBlocks, [])
class Item(object):
""" A dummy Registry item object for testing. """
def __init__(self, data):
self.data = data
def __repr__(self):
return repr(self.data)
def __eq__(self, other):
return self.data == other
class RegistryTests(unittest.TestCase):
""" Test the processor registry. """
def testCreateRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
self.assertEqual(len(r), 1)
self.assertIsInstance(r, markdown.util.Registry)
def testRegisterWithoutPriority(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r.register(Item('a'))
def testSortRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 21)
r.register(Item('c'), 'c', 20.5)
self.assertEqual(len(r), 3)
self.assertEqual(list(r), ['b', 'c', 'a'])
def testIsSorted(self):
r = markdown.util.Registry()
self.assertIs(r._is_sorted, False)
r.register(Item('a'), 'a', 20)
list(r)
self.assertIs(r._is_sorted, True)
r.register(Item('b'), 'b', 21)
self.assertIs(r._is_sorted, False)
r['a']
self.assertIs(r._is_sorted, True)
r._is_sorted = False
r.get_index_for_name('a')
self.assertIs(r._is_sorted, True)
r._is_sorted = False
repr(r)
self.assertIs(r._is_sorted, True)
def testDeregister(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
self.assertEqual(len(r), 3)
r.deregister('b')
self.assertEqual(len(r), 2)
r.deregister('c', strict=False)
self.assertEqual(len(r), 1)
# deregister non-existant item with strict=False
r.deregister('d', strict=False)
self.assertEqual(len(r), 1)
with self.assertRaises(ValueError):
# deregister non-existant item with strict=True
r.deregister('e')
self.assertEqual(list(r), ['a'])
def testRegistryContains(self):
r = markdown.util.Registry()
item = Item('a')
r.register(item, 'a', 20)
self.assertIs('a' in r, True)
self.assertIn(item, r)
self.assertNotIn('b', r)
def testRegistryIter(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(list(r), ['b', 'a'])
def testRegistryGetItemByIndex(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r[0], 'b')
self.assertEqual(r[1], 'a')
with self.assertRaises(IndexError):
r[3]
def testRegistryGetItemByItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r['a'], 'a')
self.assertEqual(r['b'], 'b')
with self.assertRaises(KeyError):
r['c']
def testRegistrySetItem(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r[0] = 'a'
# TODO: restore this when deprecated __setitem__ is removed.
# with self.assertRaises(TypeError):
# r['a'] = 'a'
# TODO: remove this when deprecated __setitem__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r['a'] = Item('a')
self.assertEqual(list(r), ['a'])
r['b'] = Item('b')
self.assertEqual(list(r), ['a', 'b'])
r['a'] = Item('a1')
self.assertEqual(list(r), ['a1', 'b'])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistryDelItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
with self.assertRaises(TypeError):
del r[0]
# TODO: restore this when deprecated __del__ is removed.
# with self.assertRaises(TypeError):
# del r['a']
# TODO: remove this when deprecated __del__ is removed.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r.register(Item('b'), 'b', 15)
r.register(Item('c'), 'c', 10)
del r['b']
self.assertEqual(list(r), ['a', 'c'])
del r['a']
self.assertEqual(list(r), ['c'])
with self.assertRaises(TypeError):
del r['badname']
del r['c']
self.assertEqual(list(r), [])
# Check the warnings
self.assertEqual(len(w), 3)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
def testRegistrySlice(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
slc = r[1:]
self.assertEqual(len(slc), 2)
self.assertIsInstance(slc, markdown.util.Registry)
self.assertEqual(list(slc), ['b', 'a'])
def testGetIndexForName(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r.get_index_for_name('a'), 1)
self.assertEqual(r.get_index_for_name('b'), 0)
with self.assertRaises(ValueError):
r.get_index_for_name('c')
def testRegisterDupplicate(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b1'), 'b', 10)
self.assertEqual(list(r), ['a', 'b1'])
self.assertEqual(len(r), 2)
r.register(Item('b2'), 'b', 30)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), ['b2', 'a'])
def testRegistryDeprecatedAdd(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
r = markdown.util.Registry()
# Add first item
r.add('c', Item('c'), '_begin')
self.assertEqual(list(r), ['c'])
# Added to beginning
r.add('b', Item('b'), '_begin')
self.assertEqual(list(r), ['b', 'c'])
# Add before first item
r.add('a', Item('a'), '<b')
self.assertEqual(list(r), ['a', 'b', 'c'])
# Add before non-first item
r.add('a1', Item('a1'), '<b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'c'])
# Add after non-last item
r.add('b1', Item('b1'), '>b')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c'])
# Add after last item
r.add('d', Item('d'), '>c')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd'])
# Add to end
r.add('e', Item('e'), '_end')
self.assertEqual(list(r), ['a', 'a1', 'b', 'b1', 'c', 'd', 'e'])
with self.assertRaises(ValueError):
r.add('f', Item('f'), 'badlocation')
# Check the warnings
self.assertEqual(len(w), 7)
self.assertTrue(all(issubclass(x.category, DeprecationWarning) for x in w))
class TestErrors(unittest.TestCase):
""" Test Error Reporting. """
def setUp(self):
# Set warnings to be raised as errors
warnings.simplefilter('error')
def tearDown(self):
# Reset warning behavior back to default
warnings.simplefilter('default')
def testNonUnicodeSource(self):
""" Test falure on non-unicode source text. """
if not PY3:
source = "foo".encode('utf-16')
self.assertRaises(UnicodeDecodeError, markdown.markdown, source)
def testBadOutputFormat(self):
""" Test failure on bad output_format. """
self.assertRaises(KeyError, markdown.Markdown, output_format='invalid')
def testLoadExtensionFailure(self):
""" Test failure of an extension to load. """
self.assertRaises(
ImportError,
markdown.Markdown, extensions=['non_existant_ext']
)
def testLoadBadExtension(self):
""" Test loading of an Extension with no makeExtension function. """
self.assertRaises(AttributeError, markdown.Markdown, extensions=['markdown.util'])
def testNonExtension(self):
""" Test loading a non Extension object as an extension. """
self.assertRaises(TypeError, markdown.Markdown, extensions=[object])
def testDotNotationExtensionWithBadClass(self):
""" Test Extension loading with non-existant class name (`path.to.module:Class`). """
self.assertRaises(
AttributeError,
markdown.Markdown,
extensions=['markdown.extensions.footnotes:MissingExtension']
)
def testBaseExtention(self):
""" Test that the base Extension class will raise NotImplemented. """
self.assertRaises(
NotImplementedError,
markdown.Markdown, extensions=[markdown.extensions.Extension()]
)
class testETreeComments(unittest.TestCase):
"""
Test that ElementTree Comments work.
These tests should only be a concern when using cElementTree with third
party serializers (including markdown's (x)html serializer). While markdown
doesn't use ElementTree.Comment itself, we should certainly support any
third party extensions which may. Therefore, these tests are included to
ensure such support is maintained.
"""
def setUp(self):
# Create comment node
self.comment = markdown.util.etree.Comment('foo')
if hasattr(markdown.util.etree, 'test_comment'):
self.test_comment = markdown.util.etree.test_comment
else:
self.test_comment = markdown.util.etree.Comment
def testCommentIsComment(self):
""" Test that an ElementTree Comment passes the `is Comment` test. """
self.assertIs(self.comment.tag, markdown.util.etree.test_comment)
def testCommentIsBlockLevel(self):
""" Test that an ElementTree Comment is recognized as BlockLevel. """
md = markdown.Markdown()
self.assertIs(md.is_block_level(self.comment.tag), False)
def testCommentSerialization(self):
""" Test that an ElementTree Comment serializes properly. """
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->'
)
def testCommentPrettify(self):
""" Test that an ElementTree Comment is prettified properly. """
pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
pretty.run(self.comment)
self.assertEqual(
markdown.serializers.to_html_string(self.comment),
'<!--foo-->\n'
)
class testElementTailTests(unittest.TestCase):
""" Element Tail Tests """
def setUp(self):
self.pretty = markdown.treeprocessors.PrettifyTreeprocessor(markdown.Markdown())
def testBrTailNoNewline(self):
""" Test that last <br> in tree has a new line tail """
root = markdown.util.etree.Element('root')
br = markdown.util.etree.SubElement(root, 'br')
self.assertEqual(br.tail, None)
self.pretty.run(root)
self.assertEqual(br.tail, "\n")
class testSerializers(unittest.TestCase):
""" Test the html and xhtml serializers. """
def testHtml(self):
""" Test HTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo <&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_html_string(el),
'<div id="foo<&">">'
'<p hidden>foo <&escaped></p>'
'<hr>'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testXhtml(self):
"""" Test XHTML serialization. """
el = markdown.util.etree.Element('div')
el.set('id', 'foo<&">')
p = markdown.util.etree.SubElement(el, 'p')
p.text = 'foo<&escaped>'
p.set('hidden', 'hidden')
markdown.util.etree.SubElement(el, 'hr')
non_element = markdown.util.etree.SubElement(el, None)
non_element.text = 'non-element text'
script = markdown.util.etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div id="foo<&">">'
'<p hidden="hidden">foo<&escaped></p>'
'<hr />'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testMixedCaseTags(self):
"""" Test preservation of tag case. """
el = markdown.util.etree.Element('MixedCase')
el.text = 'not valid '
em = markdown.util.etree.SubElement(el, 'EMPHASIS')
em.text = 'html'
markdown.util.etree.SubElement(el, 'HR')
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<MixedCase>not valid <EMPHASIS>html</EMPHASIS><HR /></MixedCase>'
)
def testProsessingInstruction(self):
""" Test serialization of ProcessignInstruction. """
pi = ProcessingInstruction('foo', text='<&"test\nescaping">')
self.assertIs(pi.tag, ProcessingInstruction)
self.assertEqual(
markdown.serializers.to_xhtml_string(pi),
'<?foo <&"test\nescaping">?>'
)
def testQNameTag(self):
""" Test serialization of QName tag. """
div = markdown.util.etree.Element('div')
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML', 'math')
math = markdown.util.etree.SubElement(div, qname)
math.set('display', 'block')
sem = markdown.util.etree.SubElement(math, 'semantics')
msup = markdown.util.etree.SubElement(sem, 'msup')
mi = markdown.util.etree.SubElement(msup, 'mi')
mi.text = 'x'
mn = markdown.util.etree.SubElement(msup, 'mn')
mn.text = '2'
ann = markdown.util.etree.SubElement(sem, 'annotations')
ann.text = 'x^2'
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div>'
'<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">'
'<semantics>'
'<msup>'
'<mi>x</mi>'
'<mn>2</mn>'
'</msup>'
'<annotations>x^2</annotations>'
'</semantics>'
'</math>'
'</div>'
)
def testQNameAttribute(self):
""" Test serialization of QName attribute. """
div = markdown.util.etree.Element('div')
div.set(markdown.util.etree.QName('foo'), markdown.util.etree.QName('bar'))
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div foo="bar"></div>'
)
def testBadQNameTag(self):
""" Test serialization of QName with no tag. """
qname = markdown.util.etree.QName('http://www.w3.org/1998/Math/MathML')
el = markdown.util.etree.Element(qname)
self.assertRaises(ValueError, markdown.serializers.to_xhtml_string, el)
def testQNameEscaping(self):
""" Test QName escaping. """
qname = markdown.util.etree.QName('<&"test\nescaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def testQNamePreEscaping(self):
""" Test QName that is already partially escaped. """
qname = markdown.util.etree.QName('<&"test escaping">', 'div')
el = markdown.util.etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def buildExtension(self):
""" Build an extension which registers fakeSerializer. """
def fakeSerializer(elem):
# Ignore input and return hardcoded output
return '<div><p>foo</p></div>'
class registerFakeSerializer(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.output_formats['fake'] = fakeSerializer
return registerFakeSerializer()
def testRegisterSerializer(self):
self.assertEqual(
markdown.markdown(
'baz', extensions=[self.buildExtension()], output_format='fake'
),
'<p>foo</p>'
)
def testXHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='xhtml'),
'<p>foo<br />\nbar</p>'
)
def testHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='html'),
'<p>foo<br>\nbar</p>'
)
class testAtomicString(unittest.TestCase):
""" Test that AtomicStrings are honored (not parsed). """
def setUp(self):
md = markdown.Markdown()
self.inlineprocessor = md.treeprocessors['inline']
def testString(self):
""" Test that a regular string is parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = 'some *text*'
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some <em>text</em></p></div>'
)
def testSimpleAtomicString(self):
""" Test that a simple AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('some *text*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>some *text*</p></div>'
)
def testNestedAtomicString(self):
""" Test that a nested AtomicString is not parsed. """
tree = markdown.util.etree.Element('div')
p = markdown.util.etree.SubElement(tree, 'p')
p.text = markdown.util.AtomicString('*some* ')
span1 = markdown.util.etree.SubElement(p, 'span')
span1.text = markdown.util.AtomicString('*more* ')
span2 = markdown.util.etree.SubElement(span1, 'span')
span2.text = markdown.util.AtomicString('*text* ')
span3 = markdown.util.etree.SubElement(span2, 'span')
span3.text = markdown.util.AtomicString('*here*')
span3.tail = markdown.util.AtomicString(' *to*')
span2.tail = markdown.util.AtomicString(' *test*')
span1.tail = markdown.util.AtomicString(' *with*')
new = self.inlineprocessor.run(tree)
self.assertEqual(
markdown.serializers.to_html_string(new),
'<div><p>*some* <span>*more* <span>*text* <span>*here*</span> '
'*to*</span> *test*</span> *with*</p></div>'
)
class TestConfigParsing(unittest.TestCase):
def assertParses(self, value, result):
self.assertIs(markdown.util.parseBoolValue(value, False), result)
def testBooleansParsing(self):
self.assertParses(True, True)
self.assertParses('novalue', None)
self.assertParses('yES', True)
self.assertParses('FALSE', False)
self.assertParses(0., False)
self.assertParses('none', False)
def testPreserveNone(self):
self.assertIsNone(markdown.util.parseBoolValue('None', preserve_none=True))
self.assertIsNone(markdown.util.parseBoolValue(None, preserve_none=True))
def testInvalidBooleansParsing(self):
self.assertRaises(ValueError, markdown.util.parseBoolValue, 'novalue')
class TestCliOptionParsing(unittest.TestCase):
""" Test parsing of Command Line Interface Options. """
def setUp(self):
self.default_options = {
'input': None,
'output': None,
'encoding': None,
'output_format': 'xhtml',
'lazy_ol': True,
'extensions': [],
'extension_configs': {},
}
self.tempfile = ''
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
def testNoOptions(self):
options, logging_level = parse_options([])
self.assertEqual(options, self.default_options)
self.assertEqual(logging_level, CRITICAL)
def testQuietOption(self):
options, logging_level = parse_options(['-q'])
self.assertGreater(logging_level, CRITICAL)
def testVerboseOption(self):
options, logging_level = parse_options(['-v'])
self.assertEqual(logging_level, WARNING)
def testNoisyOption(self):
options, logging_level = parse_options(['--noisy'])
self.assertEqual(logging_level, DEBUG)
def testInputFileOption(self):
options, logging_level = parse_options(['foo.txt'])
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testOutputFileOption(self):
options, logging_level = parse_options(['-f', 'foo.html'])
self.default_options['output'] = 'foo.html'
self.assertEqual(options, self.default_options)
def testInputAndOutputFileOptions(self):
options, logging_level = parse_options(['-f', 'foo.html', 'foo.txt'])
self.default_options['output'] = 'foo.html'
self.default_options['input'] = 'foo.txt'
self.assertEqual(options, self.default_options)
def testEncodingOption(self):
options, logging_level = parse_options(['-e', 'utf-8'])
self.default_options['encoding'] = 'utf-8'
self.assertEqual(options, self.default_options)
def testOutputFormatOption(self):
options, logging_level = parse_options(['-o', 'html'])
self.default_options['output_format'] = 'html'
self.assertEqual(options, self.default_options)
def testNoLazyOlOption(self):
options, logging_level = parse_options(['-n'])
self.default_options['lazy_ol'] = False
self.assertEqual(options, self.default_options)
def testExtensionOption(self):
options, logging_level = parse_options(['-x', 'markdown.extensions.footnotes'])
self.default_options['extensions'] = ['markdown.extensions.footnotes']
self.assertEqual(options, self.default_options)
def testMultipleExtensionOptions(self):
options, logging_level = parse_options([
'-x', 'markdown.extensions.footnotes',
'-x', 'markdown.extensions.smarty'
])
self.default_options['extensions'] = [
'markdown.extensions.footnotes',
'markdown.extensions.smarty'
]
self.assertEqual(options, self.default_options)
def create_config_file(self, config):
""" Helper to create temp config files. """
if not isinstance(config, markdown.util.string_type):
# convert to string
config = yaml.dump(config)
fd, self.tempfile = tempfile.mkstemp('.yml')
with os.fdopen(fd, 'w') as fp:
fp.write(config)
def testExtensionConfigOption(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def textBoolExtensionConfigOption(self):
config = {
'markdown.extensions.toc': {
'title': 'Some Title',
'anchorlink': True,
'permalink': True
}
}
self.create_config_file(config)
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionAsJSON(self):
config = {
'markdown.extensions.wikilinks': {
'base_url': 'http://example.com/',
'end_url': '.html',
'html_class': 'test',
},
'markdown.extensions.footnotes:FootnotesExtension': {
'PLACE_MARKER': '~~~footnotes~~~'
}
}
import json
self.create_config_file(json.dumps(config))
options, logging_level = parse_options(['-c', self.tempfile])
self.default_options['extension_configs'] = config
self.assertEqual(options, self.default_options)
def testExtensionConfigOptionMissingFile(self):
self.assertRaises(IOError, parse_options, ['-c', 'missing_file.yaml'])
def testExtensionConfigOptionBadFormat(self):
config = """
[footnotes]
PLACE_MARKER= ~~~footnotes~~~
"""
self.create_config_file(config)
self.assertRaises(yaml.YAMLError, parse_options, ['-c', self.tempfile])
class TestEscapeAppend(unittest.TestCase):
""" Tests escape character append. """
def testAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.ESCAPED_CHARS.append('|')
self.assertEqual('|' in md.ESCAPED_CHARS, True)
md2 = markdown.Markdown()
self.assertEqual('|' not in md2.ESCAPED_CHARS, True)
class TestBlockAppend(unittest.TestCase):
""" Tests block kHTML append. """
def testBlockAppend(self):
""" Test that appended escapes are only in the current instance. """
md = markdown.Markdown()
md.block_level_elements.append('test')
self.assertEqual('test' in md.block_level_elements, True)
md2 = markdown.Markdown()
self.assertEqual('test' not in md2.block_level_elements, True)
class TestAncestorExclusion(unittest.TestCase):
""" Tests exclusion of tags in ancestor list. """
class AncestorExample(markdown.inlinepatterns.SimpleTagInlineProcessor):
""" Ancestor Test. """
ANCESTOR_EXCLUDES = ('a',)
def handleMatch(self, m, data):
""" Handle match. """
el = markdown.util.etree.Element(self.tag)
el.text = m.group(2)
return el, m.start(0), m.end(0)
class AncestorExtension(markdown.Extension):
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {}
def extendMarkdown(self, md):
"""Modify inline patterns."""
pattern = r'(\+)([^\+]+)\1'
md.inlinePatterns.register(TestAncestorExclusion.AncestorExample(pattern, 'strong'), 'ancestor-test', 0)
def setUp(self):
"""Setup markdown object."""
self.md = markdown.Markdown(extensions=[TestAncestorExclusion.AncestorExtension()])
def test_ancestors(self):
""" Test that an extension can exclude parent tags. """
test = """
Some +test+ and a [+link+](http://test.com)
"""
result = """<p>Some <strong>test</strong> and a <a href="http://test.com">+link+</a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
def test_ancestors_tail(self):
""" Test that an extension can exclude parent tags when dealing with a tail. """
test = """
[***+em+*+strong+**](http://test.com)
"""
result = """<p><a href="http://test.com"><strong><em>+em+</em>+strong+</strong></a></p>"""
self.md.reset()
self.assertEqual(self.md.convert(test), result)
class TestGeneralDeprecations(unittest.TestCase):
"""Test general deprecations."""
def test_version_deprecation(self):
"""Test that version is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version = markdown.version
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version, markdown.__version__)
def test_version_info_deprecation(self):
"""Test that version info is deprecated."""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
version_info = markdown.version_info
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertEqual(version_info, markdown.__version_info__)
def test_deprecation_wrapper_dir(self):
"""Tests the `__dir__` attribute of the class as it replaces the module's."""
dir_attr = dir(markdown)
self.assertNotIn('version', dir_attr)
self.assertIn('__version__', dir_attr)
self.assertNotIn('version_info', dir_attr)
self.assertIn('__version_info__', dir_attr)
| [
"io.BytesIO",
"markdown.blockparser.State",
"markdown.extensions.Extension",
"markdown.util.AtomicString",
"markdown.serializers.to_xhtml_string",
"os.remove",
"markdown.util.etree.Element",
"json.dumps",
"warnings.simplefilter",
"markdown.util.etree.Comment",
"markdown.serializers.to_html_string",
"xml.etree.ElementTree.ProcessingInstruction",
"yaml.dump",
"markdown.util.HtmlStash",
"os.path.isfile",
"markdown.util.etree.SubElement",
"markdown.extensions.footnotes.FootnoteExtension",
"os.fdopen",
"markdown.util.Registry",
"tempfile.mkstemp",
"sys.stdout.seek",
"sys.stdout.read",
"markdown.markdown",
"markdown.Markdown",
"markdown.util.parseBoolValue",
"markdown.markdownFromFile",
"warnings.catch_warnings",
"markdown.util.etree.QName",
"markdown.__main__.parse_options"
]
| [((1426, 1445), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (1443, 1445), False, 'import markdown\n'), ((2160, 2203), 'markdown.Markdown', 'markdown.Markdown', ([], {'extensions': "['footnotes']"}), "(extensions=['footnotes'])\n", (2177, 2203), False, 'import markdown\n'), ((2322, 2385), 'markdown.Markdown', 'markdown.Markdown', ([], {'extensions': "['markdown.extensions.footnotes']"}), "(extensions=['markdown.extensions.footnotes'])\n", (2339, 2385), False, 'import markdown\n'), ((2525, 2611), 'markdown.Markdown', 'markdown.Markdown', ([], {'extensions': "['markdown.extensions.footnotes:FootnoteExtension']"}), "(extensions=[\n 'markdown.extensions.footnotes:FootnoteExtension'])\n", (2542, 2611), False, 'import markdown\n'), ((2831, 2840), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2838, 2840), False, 'from io import BytesIO\n'), ((3024, 3055), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".txt"""'}), "(suffix='.txt')\n", (3040, 3055), False, 'import tempfile\n'), ((3148, 3180), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".html"""'}), "(suffix='.html')\n", (3164, 3180), False, 'import tempfile\n'), ((3315, 3370), 'markdown.markdownFromFile', 'markdown.markdownFromFile', ([], {'input': 'infile', 'output': 'outfile'}), '(input=infile, output=outfile)\n', (3340, 3370), False, 'import markdown\n'), ((3598, 3607), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3605, 3607), False, 'from io import BytesIO\n'), ((3616, 3671), 'markdown.markdownFromFile', 'markdown.markdownFromFile', ([], {'input': 'infile', 'output': 'outfile'}), '(input=infile, output=outfile)\n', (3641, 3671), False, 'import markdown\n'), ((3807, 3834), 'markdown.markdownFromFile', 'markdown.markdownFromFile', ([], {}), '()\n', (3832, 3834), False, 'import markdown\n'), ((3843, 3861), 'sys.stdout.seek', 'sys.stdout.seek', (['(0)'], {}), '(0)\n', (3858, 3861), False, 'import sys\n'), ((4234, 4268), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (4261, 4268), False, 'import markdown\n'), ((5111, 5139), 'markdown.blockparser.State', 'markdown.blockparser.State', ([], {}), '()\n', (5137, 5139), False, 'import markdown\n'), ((6365, 6390), 'markdown.util.HtmlStash', 'markdown.util.HtmlStash', ([], {}), '()\n', (6388, 6390), False, 'import markdown\n'), ((7639, 7663), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (7661, 7663), False, 'import markdown\n'), ((7852, 7876), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (7874, 7876), False, 'import markdown\n'), ((7999, 8023), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (8021, 8023), False, 'import markdown\n'), ((8271, 8295), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (8293, 8295), False, 'import markdown\n'), ((8810, 8834), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (8832, 8834), False, 'import markdown\n'), ((9484, 9508), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (9506, 9508), False, 'import markdown\n'), ((9715, 9739), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (9737, 9739), False, 'import markdown\n'), ((9919, 9943), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (9941, 9943), False, 'import markdown\n'), ((10209, 10233), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (10231, 10233), False, 'import markdown\n'), ((10497, 10521), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (10519, 10521), False, 'import markdown\n'), ((11348, 11372), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (11370, 11372), False, 'import markdown\n'), ((12356, 12380), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (12378, 12380), False, 'import markdown\n'), ((12711, 12735), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (12733, 12735), False, 'import markdown\n'), ((13057, 13081), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (13079, 13081), False, 'import markdown\n'), ((14919, 14949), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (14940, 14949), False, 'import warnings\n'), ((15032, 15064), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""'], {}), "('default')\n", (15053, 15064), False, 'import warnings\n'), ((17172, 17206), 'markdown.util.etree.Comment', 'markdown.util.etree.Comment', (['"""foo"""'], {}), "('foo')\n", (17199, 17206), False, 'import markdown\n'), ((17724, 17743), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (17741, 17743), False, 'import markdown\n'), ((18702, 18737), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""root"""'], {}), "('root')\n", (18729, 18737), False, 'import markdown\n'), ((18751, 18793), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['root', '"""br"""'], {}), "(root, 'br')\n", (18781, 18793), False, 'import markdown\n'), ((19076, 19110), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (19103, 19110), False, 'import markdown\n'), ((19155, 19194), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', '"""p"""'], {}), "(el, 'p')\n", (19185, 19194), False, 'import markdown\n'), ((19271, 19311), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', '"""hr"""'], {}), "(el, 'hr')\n", (19301, 19311), False, 'import markdown\n'), ((19334, 19374), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', 'None'], {}), '(el, None)\n', (19364, 19374), False, 'import markdown\n'), ((19438, 19491), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['non_element', '"""script"""'], {}), "(non_element, 'script')\n", (19468, 19491), False, 'import markdown\n'), ((19969, 20003), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (19996, 20003), False, 'import markdown\n'), ((20048, 20087), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', '"""p"""'], {}), "(el, 'p')\n", (20078, 20087), False, 'import markdown\n'), ((20163, 20203), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', '"""hr"""'], {}), "(el, 'hr')\n", (20193, 20203), False, 'import markdown\n'), ((20226, 20266), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', 'None'], {}), '(el, None)\n', (20256, 20266), False, 'import markdown\n'), ((20330, 20383), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['non_element', '"""script"""'], {}), "(non_element, 'script')\n", (20360, 20383), False, 'import markdown\n'), ((20885, 20925), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""MixedCase"""'], {}), "('MixedCase')\n", (20912, 20925), False, 'import markdown\n'), ((20970, 21016), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', '"""EMPHASIS"""'], {}), "(el, 'EMPHASIS')\n", (21000, 21016), False, 'import markdown\n'), ((21050, 21090), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['el', '"""HR"""'], {}), "(el, 'HR')\n", (21080, 21090), False, 'import markdown\n'), ((21376, 21435), 'xml.etree.ElementTree.ProcessingInstruction', 'ProcessingInstruction', (['"""foo"""'], {'text': '"""<&"test\nescaping">"""'}), '(\'foo\', text="""<&"test\nescaping">""")\n', (21397, 21435), False, 'from xml.etree.ElementTree import ProcessingInstruction\n'), ((21720, 21754), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (21747, 21754), False, 'import markdown\n'), ((21771, 21842), 'markdown.util.etree.QName', 'markdown.util.etree.QName', (['"""http://www.w3.org/1998/Math/MathML"""', '"""math"""'], {}), "('http://www.w3.org/1998/Math/MathML', 'math')\n", (21796, 21842), False, 'import markdown\n'), ((21858, 21900), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['div', 'qname'], {}), '(div, qname)\n', (21888, 21900), False, 'import markdown\n'), ((21952, 22001), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['math', '"""semantics"""'], {}), "(math, 'semantics')\n", (21982, 22001), False, 'import markdown\n'), ((22017, 22060), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['sem', '"""msup"""'], {}), "(sem, 'msup')\n", (22047, 22060), False, 'import markdown\n'), ((22074, 22116), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['msup', '"""mi"""'], {}), "(msup, 'mi')\n", (22104, 22116), False, 'import markdown\n'), ((22152, 22194), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['msup', '"""mn"""'], {}), "(msup, 'mn')\n", (22182, 22194), False, 'import markdown\n'), ((22231, 22281), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['sem', '"""annotations"""'], {}), "(sem, 'annotations')\n", (22261, 22281), False, 'import markdown\n'), ((22836, 22870), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (22863, 22870), False, 'import markdown\n'), ((23187, 23250), 'markdown.util.etree.QName', 'markdown.util.etree.QName', (['"""http://www.w3.org/1998/Math/MathML"""'], {}), "('http://www.w3.org/1998/Math/MathML')\n", (23212, 23250), False, 'import markdown\n'), ((23264, 23298), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['qname'], {}), '(qname)\n', (23291, 23298), False, 'import markdown\n'), ((23466, 23524), 'markdown.util.etree.QName', 'markdown.util.etree.QName', (['"""<&"test\nescaping">"""', '"""div"""'], {}), '("""<&"test\nescaping">""", \'div\')\n', (23491, 23524), False, 'import markdown\n'), ((23535, 23569), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['qname'], {}), '(qname)\n', (23562, 23569), False, 'import markdown\n'), ((23852, 23920), 'markdown.util.etree.QName', 'markdown.util.etree.QName', (['"""<&"test escaping">"""', '"""div"""'], {}), '(\'<&"test escaping">\', \'div\')\n', (23877, 23920), False, 'import markdown\n'), ((23934, 23968), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['qname'], {}), '(qname)\n', (23961, 23968), False, 'import markdown\n'), ((25286, 25305), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (25303, 25305), False, 'import markdown\n'), ((25461, 25495), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (25488, 25495), False, 'import markdown\n'), ((25508, 25549), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['tree', '"""p"""'], {}), "(tree, 'p')\n", (25538, 25549), False, 'import markdown\n'), ((25884, 25918), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (25911, 25918), False, 'import markdown\n'), ((25931, 25972), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['tree', '"""p"""'], {}), "(tree, 'p')\n", (25961, 25972), False, 'import markdown\n'), ((25990, 26031), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['"""some *text*"""'], {}), "('some *text*')\n", (26016, 26031), False, 'import markdown\n'), ((26328, 26362), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['"""div"""'], {}), "('div')\n", (26355, 26362), False, 'import markdown\n'), ((26375, 26416), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['tree', '"""p"""'], {}), "(tree, 'p')\n", (26405, 26416), False, 'import markdown\n'), ((26434, 26471), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['"""*some* """'], {}), "('*some* ')\n", (26460, 26471), False, 'import markdown\n'), ((26488, 26529), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['p', '"""span"""'], {}), "(p, 'span')\n", (26518, 26529), False, 'import markdown\n'), ((26551, 26588), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['"""*more* """'], {}), "('*more* ')\n", (26577, 26588), False, 'import markdown\n'), ((26605, 26650), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['span1', '"""span"""'], {}), "(span1, 'span')\n", (26635, 26650), False, 'import markdown\n'), ((26672, 26709), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['"""*text* """'], {}), "('*text* ')\n", (26698, 26709), False, 'import markdown\n'), ((26726, 26771), 'markdown.util.etree.SubElement', 'markdown.util.etree.SubElement', (['span2', '"""span"""'], {}), "(span2, 'span')\n", (26756, 26771), False, 'import markdown\n'), ((26793, 26829), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['"""*here*"""'], {}), "('*here*')\n", (26819, 26829), False, 'import markdown\n'), ((26851, 26886), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['""" *to*"""'], {}), "(' *to*')\n", (26877, 26886), False, 'import markdown\n'), ((26908, 26945), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['""" *test*"""'], {}), "(' *test*')\n", (26934, 26945), False, 'import markdown\n'), ((26967, 27004), 'markdown.util.AtomicString', 'markdown.util.AtomicString', (['""" *with*"""'], {}), "(' *with*')\n", (26993, 27004), False, 'import markdown\n'), ((28489, 28518), 'os.path.isfile', 'os.path.isfile', (['self.tempfile'], {}), '(self.tempfile)\n', (28503, 28518), False, 'import os\n'), ((28620, 28637), 'markdown.__main__.parse_options', 'parse_options', (['[]'], {}), '([])\n', (28633, 28637), False, 'from markdown.__main__ import parse_options\n'), ((28809, 28830), 'markdown.__main__.parse_options', 'parse_options', (["['-q']"], {}), "(['-q'])\n", (28822, 28830), False, 'from markdown.__main__ import parse_options\n'), ((28950, 28971), 'markdown.__main__.parse_options', 'parse_options', (["['-v']"], {}), "(['-v'])\n", (28963, 28971), False, 'from markdown.__main__ import parse_options\n'), ((29086, 29112), 'markdown.__main__.parse_options', 'parse_options', (["['--noisy']"], {}), "(['--noisy'])\n", (29099, 29112), False, 'from markdown.__main__ import parse_options\n'), ((29229, 29255), 'markdown.__main__.parse_options', 'parse_options', (["['foo.txt']"], {}), "(['foo.txt'])\n", (29242, 29255), False, 'from markdown.__main__ import parse_options\n'), ((29432, 29465), 'markdown.__main__.parse_options', 'parse_options', (["['-f', 'foo.html']"], {}), "(['-f', 'foo.html'])\n", (29445, 29465), False, 'from markdown.__main__ import parse_options\n'), ((29653, 29697), 'markdown.__main__.parse_options', 'parse_options', (["['-f', 'foo.html', 'foo.txt']"], {}), "(['-f', 'foo.html', 'foo.txt'])\n", (29666, 29697), False, 'from markdown.__main__ import parse_options\n'), ((29924, 29954), 'markdown.__main__.parse_options', 'parse_options', (["['-e', 'utf-8']"], {}), "(['-e', 'utf-8'])\n", (29937, 29954), False, 'from markdown.__main__ import parse_options\n'), ((30134, 30163), 'markdown.__main__.parse_options', 'parse_options', (["['-o', 'html']"], {}), "(['-o', 'html'])\n", (30147, 30163), False, 'from markdown.__main__ import parse_options\n'), ((30343, 30364), 'markdown.__main__.parse_options', 'parse_options', (["['-n']"], {}), "(['-n'])\n", (30356, 30364), False, 'from markdown.__main__ import parse_options\n'), ((30538, 30592), 'markdown.__main__.parse_options', 'parse_options', (["['-x', 'markdown.extensions.footnotes']"], {}), "(['-x', 'markdown.extensions.footnotes'])\n", (30551, 30592), False, 'from markdown.__main__ import parse_options\n'), ((30806, 30900), 'markdown.__main__.parse_options', 'parse_options', (["['-x', 'markdown.extensions.footnotes', '-x', 'markdown.extensions.smarty']"], {}), "(['-x', 'markdown.extensions.footnotes', '-x',\n 'markdown.extensions.smarty'])\n", (30819, 30900), False, 'from markdown.__main__ import parse_options\n'), ((31386, 31410), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".yml"""'], {}), "('.yml')\n", (31402, 31410), False, 'import tempfile\n'), ((31940, 31976), 'markdown.__main__.parse_options', 'parse_options', (["['-c', self.tempfile]"], {}), "(['-c', self.tempfile])\n", (31953, 31976), False, 'from markdown.__main__ import parse_options\n'), ((32404, 32440), 'markdown.__main__.parse_options', 'parse_options', (["['-c', self.tempfile]"], {}), "(['-c', self.tempfile])\n", (32417, 32440), False, 'from markdown.__main__ import parse_options\n'), ((33055, 33091), 'markdown.__main__.parse_options', 'parse_options', (["['-c', self.tempfile]"], {}), "(['-c', self.tempfile])\n", (33068, 33091), False, 'from markdown.__main__ import parse_options\n'), ((33782, 33801), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (33799, 33801), False, 'import markdown\n'), ((33909, 33928), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (33926, 33928), False, 'import markdown\n'), ((34194, 34213), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (34211, 34213), False, 'import markdown\n'), ((34341, 34360), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (34358, 34360), False, 'import markdown\n'), ((3069, 3089), 'os.fdopen', 'os.fdopen', (['infd', '"""w"""'], {}), "(infd, 'w')\n", (3078, 3089), False, 'import os\n'), ((3384, 3405), 'os.fdopen', 'os.fdopen', (['outfd', '"""r"""'], {}), "(outfd, 'r')\n", (3393, 3405), False, 'import os\n'), ((4116, 4135), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (4133, 4135), False, 'import markdown\n'), ((4371, 4413), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['root'], {}), '(root)\n', (4407, 4413), False, 'import markdown\n'), ((10810, 10846), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (10833, 10846), False, 'import warnings\n'), ((10865, 10896), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (10886, 10896), False, 'import warnings\n'), ((11688, 11724), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (11711, 11724), False, 'import warnings\n'), ((11743, 11774), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (11764, 11774), False, 'import warnings\n'), ((13422, 13458), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (13445, 13458), False, 'import warnings\n'), ((13477, 13508), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (13498, 13508), False, 'import warnings\n'), ((13526, 13550), 'markdown.util.Registry', 'markdown.util.Registry', ([], {}), '()\n', (13548, 13550), False, 'import markdown\n'), ((17959, 18008), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', (['self.comment'], {}), '(self.comment)\n', (17994, 18008), False, 'import markdown\n'), ((18217, 18236), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (18234, 18236), False, 'import markdown\n'), ((18309, 18358), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', (['self.comment'], {}), '(self.comment)\n', (18344, 18358), False, 'import markdown\n'), ((18566, 18585), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (18583, 18585), False, 'import markdown\n'), ((19604, 19643), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', (['el'], {}), '(el)\n', (19639, 19643), False, 'import markdown\n'), ((20496, 20536), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['el'], {}), '(el)\n', (20532, 20536), False, 'import markdown\n'), ((21129, 21169), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['el'], {}), '(el)\n', (21165, 21169), False, 'import markdown\n'), ((21524, 21564), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['pi'], {}), '(pi)\n', (21560, 21564), False, 'import markdown\n'), ((22345, 22386), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['div'], {}), '(div)\n', (22381, 22386), False, 'import markdown\n'), ((22887, 22919), 'markdown.util.etree.QName', 'markdown.util.etree.QName', (['"""foo"""'], {}), "('foo')\n", (22912, 22919), False, 'import markdown\n'), ((22921, 22953), 'markdown.util.etree.QName', 'markdown.util.etree.QName', (['"""bar"""'], {}), "('bar')\n", (22946, 22953), False, 'import markdown\n'), ((22993, 23034), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['div'], {}), '(div)\n', (23029, 23034), False, 'import markdown\n'), ((23608, 23648), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['el'], {}), '(el)\n', (23644, 23648), False, 'import markdown\n'), ((24007, 24047), 'markdown.serializers.to_xhtml_string', 'markdown.serializers.to_xhtml_string', (['el'], {}), '(el)\n', (24043, 24047), False, 'import markdown\n'), ((24874, 24928), 'markdown.markdown', 'markdown.markdown', (['"""foo \nbar"""'], {'output_format': '"""xhtml"""'}), "('foo \\nbar', output_format='xhtml')\n", (24891, 24928), False, 'import markdown\n'), ((25045, 25098), 'markdown.markdown', 'markdown.markdown', (['"""foo \nbar"""'], {'output_format': '"""html"""'}), "('foo \\nbar', output_format='html')\n", (25062, 25098), False, 'import markdown\n'), ((25664, 25704), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', (['new'], {}), '(new)\n', (25699, 25704), False, 'import markdown\n'), ((26115, 26155), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', (['new'], {}), '(new)\n', (26150, 26155), False, 'import markdown\n'), ((27088, 27128), 'markdown.serializers.to_html_string', 'markdown.serializers.to_html_string', (['new'], {}), '(new)\n', (27123, 27128), False, 'import markdown\n'), ((27384, 27426), 'markdown.util.parseBoolValue', 'markdown.util.parseBoolValue', (['value', '(False)'], {}), '(value, False)\n', (27412, 27426), False, 'import markdown\n'), ((27771, 27827), 'markdown.util.parseBoolValue', 'markdown.util.parseBoolValue', (['"""None"""'], {'preserve_none': '(True)'}), "('None', preserve_none=True)\n", (27799, 27827), False, 'import markdown\n'), ((27855, 27909), 'markdown.util.parseBoolValue', 'markdown.util.parseBoolValue', (['None'], {'preserve_none': '(True)'}), '(None, preserve_none=True)\n', (27883, 27909), False, 'import markdown\n'), ((28532, 28556), 'os.remove', 'os.remove', (['self.tempfile'], {}), '(self.tempfile)\n', (28541, 28556), False, 'import os\n'), ((31340, 31357), 'yaml.dump', 'yaml.dump', (['config'], {}), '(config)\n', (31349, 31357), False, 'import yaml\n'), ((31424, 31442), 'os.fdopen', 'os.fdopen', (['fd', '"""w"""'], {}), "(fd, 'w')\n", (31433, 31442), False, 'import os\n'), ((33002, 33020), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (33012, 33020), False, 'import json\n'), ((34773, 34810), 'markdown.util.etree.Element', 'markdown.util.etree.Element', (['self.tag'], {}), '(self.tag)\n', (34800, 34810), False, 'import markdown\n'), ((36334, 36370), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (36357, 36370), False, 'import warnings\n'), ((36446, 36477), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (36467, 36477), False, 'import warnings\n'), ((36871, 36907), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (36894, 36907), False, 'import warnings\n'), ((36983, 37014), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (37004, 37014), False, 'import warnings\n'), ((2030, 2049), 'markdown.extensions.footnotes.FootnoteExtension', 'FootnoteExtension', ([], {}), '()\n', (2047, 2049), False, 'from markdown.extensions.footnotes import FootnoteExtension\n'), ((3887, 3904), 'sys.stdout.read', 'sys.stdout.read', ([], {}), '()\n', (3902, 3904), False, 'import sys\n'), ((16602, 16633), 'markdown.extensions.Extension', 'markdown.extensions.Extension', ([], {}), '()\n', (16631, 16633), False, 'import markdown\n')] |
import os
class StressedNetConfig:
def __init__(self,
synaptic_environmental_constraint=0.8,
group_environmental_constraint=0.6,
stress_factor=0.8,
save_folder=os.path.expanduser("~/.nervous/models/")):
self._synaptic_environmental_constraint = synaptic_environmental_constraint
self._group_environmental_constraint = group_environmental_constraint
self._stress_factor = stress_factor
self._save_folder = save_folder
self._sanitize()
def _sanitize(self):
if 1. < self._group_environmental_constraint <= 0.:
raise ValueError("Group environmental constraint has to be in the range [0. - 1.)")
if 1. < self._synaptic_environmental_constraint <= 0.:
raise ValueError("Synaptic environmental constraint has to be in the range [0. - 1.)")
if 1. < self._stress_factor <= 0.:
raise ValueError("Stress factor has to be in the range [0. - 1.)")
if not os.path.exists(self._save_folder):
os.makedirs(self._save_folder)
@property
def synaptic_environmental_constraint(self):
return self._synaptic_environmental_constraint
@synaptic_environmental_constraint.setter
def synaptic_environmental_constraint(self, value):
self._synaptic_environmental_constraint = value
self._sanitize()
@property
def group_environmental_constraint(self):
return self._group_environmental_constraint
@group_environmental_constraint.setter
def group_environmental_constraint(self, value):
self._group_environmental_constraint = value
self._sanitize()
@property
def stress_factor(self):
return self._stress_factor
@stress_factor.setter
def stress_factor(self, value):
self._stress_factor = value
self._sanitize()
@property
def save_folder(self):
return self._save_folder
@save_folder.setter
def save_folder(self, value):
self._save_folder = value
self._sanitize()
def __getitem__(self, item):
if item == "self":
raise ValueError("Hahaha")
return self.__dict__[item]
| [
"os.makedirs",
"os.path.exists",
"os.path.expanduser"
]
| [((235, 275), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.nervous/models/"""'], {}), "('~/.nervous/models/')\n", (253, 275), False, 'import os\n'), ((1030, 1063), 'os.path.exists', 'os.path.exists', (['self._save_folder'], {}), '(self._save_folder)\n', (1044, 1063), False, 'import os\n'), ((1077, 1107), 'os.makedirs', 'os.makedirs', (['self._save_folder'], {}), '(self._save_folder)\n', (1088, 1107), False, 'import os\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FTRL"""
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.common.parameter import Parameter
from mindspore.common import Tensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore._checkparam import Rel
from .optimizer import Optimizer, apply_decay, grad_scale
ftrl_opt = C.MultitypeFuncGraph("ftrl_opt")
@ftrl_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt(opt, learning_rate, l1, l2, lr_power, linear, gradient, weight, moment):
"""Apply ftrl optimizer to the weight parameter."""
success = True
success = F.depend(success, opt(weight, moment, linear, gradient, learning_rate, l1, l2, lr_power))
return success
def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0,
prim_name=None):
"""Check param."""
validator.check_value_type("initial_accum", initial_accum, [float], prim_name)
validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name)
validator.check_value_type("learning_rate", learning_rate, [float], prim_name)
validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name)
validator.check_value_type("lr_power", lr_power, [float], prim_name)
validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name)
validator.check_value_type("l1", l1, [float], prim_name)
validator.check_number("l1", l1, 0.0, Rel.GE, prim_name)
validator.check_value_type("l2", l2, [float], prim_name)
validator.check_number("l2", l2, 0.0, Rel.GE, prim_name)
validator.check_value_type("use_locking", use_locking, [bool], prim_name)
validator.check_value_type("loss_scale", loss_scale, [float], prim_name)
validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name)
validator.check_value_type("weight_decay", weight_decay, [float], prim_name)
validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name)
class FTRL(Optimizer):
"""
Implement the FTRL algorithm with ApplyFtrl Operator.
FTRL is an online convex optimization algorithm that adaptively chooses its regularization function
based on the loss functions. Refer to paper `Adaptive Bound Optimization for Online Convex Optimization
<https://arxiv.org/abs/1002.4908>`_. Refer to paper `Ad Click Prediction: a View from the Trenches
<https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf>`_ for engineering document.
Args:
params (list[Parameter]): A list of parameter, which will be updated. The element in `params`
should be Parameter.
initial_accum (float): The starting value for accumulators, must be zero or positive values. Default: 0.1.
learning_rate (float): The learning rate value, should be positive. Default: 0.001.
lr_power (float): Learning rate power controls how the learning rate decreases during training, must be less
than or equal to zero. Use fixed learning rate if lr_power is zero. Default: -0.5.
l1 (float): l1 regularization strength, must be greater than or equal to zero. Default: 0.0.
l2 (float): l2 regularization strength, must be greater than or equal to zero. Default: 0.0.
use_locking (bool): If True use locks for update operation. Default: False.
loss_scale (float): Value for the loss scale. It should be equal to or greater than 1.0. Default: 1.0.
wegith_decay (float): Weight decay value to multiply weight, must be zero or positive value. Default: 0.0.
Inputs:
- **grads** (tuple[Tensor]) - The gradients of `params` in optimizer, the shape is as same as the `params`
in optimizer.
Outputs:
tuple[Parameter], the updated parameters, the shape is the same as `params`.
Examples:
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> opt = nn.FTRL(net.trainable_params())
>>> model = Model(net, loss_fn=loss, optimizer=opt, metrics=None)
"""
def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0,
use_locking=False, loss_scale=1.0, weight_decay=0.0):
super(FTRL, self).__init__(learning_rate, params)
_check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay,
self.cls_name)
self.moments = self.parameters.clone(prefix="moments", init=initial_accum)
self.linear = self.parameters.clone(prefix="linear", init='zeros')
self.l1 = l1
self.l2 = l2
self.lr_power = lr_power
self.reciprocal_scale = 1.0 / loss_scale
self.weight_decay = weight_decay
self.decay_tf = tuple((lambda: True)() for x in self.parameters)
self.hyper_map = C.HyperMap()
self.opt = P.ApplyFtrl(use_locking=use_locking)
self.one = Tensor(1, mstype.int32)
def construct(self, grads):
params = self.parameters
moments = self.moments
linear = self.linear
if self.weight_decay > 0.0:
grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads)
if self.reciprocal_scale != 1.0:
grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads)
lr = self.learning_rate
success = self.hyper_map(F.partial(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power),
linear, grads, params, moments)
return success
| [
"mindspore.ops.composite.HyperMap",
"mindspore.ops.composite.MultitypeFuncGraph",
"mindspore._checkparam.Validator.check_value_type",
"mindspore.common.Tensor",
"mindspore.ops.functional.partial",
"mindspore._checkparam.Validator.check_number",
"mindspore.ops.operations.ApplyFtrl"
]
| [((1043, 1075), 'mindspore.ops.composite.MultitypeFuncGraph', 'C.MultitypeFuncGraph', (['"""ftrl_opt"""'], {}), "('ftrl_opt')\n", (1063, 1075), True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((1655, 1733), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""initial_accum"""', 'initial_accum', '[float]', 'prim_name'], {}), "('initial_accum', initial_accum, [float], prim_name)\n", (1681, 1733), True, 'from mindspore._checkparam import Validator as validator\n'), ((1738, 1816), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""initial_accum"""', 'initial_accum', '(0.0)', 'Rel.GE', 'prim_name'], {}), "('initial_accum', initial_accum, 0.0, Rel.GE, prim_name)\n", (1760, 1816), True, 'from mindspore._checkparam import Validator as validator\n'), ((1822, 1900), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""learning_rate"""', 'learning_rate', '[float]', 'prim_name'], {}), "('learning_rate', learning_rate, [float], prim_name)\n", (1848, 1900), True, 'from mindspore._checkparam import Validator as validator\n'), ((1905, 1983), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""learning_rate"""', 'learning_rate', '(0.0)', 'Rel.GT', 'prim_name'], {}), "('learning_rate', learning_rate, 0.0, Rel.GT, prim_name)\n", (1927, 1983), True, 'from mindspore._checkparam import Validator as validator\n'), ((1989, 2057), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""lr_power"""', 'lr_power', '[float]', 'prim_name'], {}), "('lr_power', lr_power, [float], prim_name)\n", (2015, 2057), True, 'from mindspore._checkparam import Validator as validator\n'), ((2062, 2130), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""lr_power"""', 'lr_power', '(0.0)', 'Rel.LE', 'prim_name'], {}), "('lr_power', lr_power, 0.0, Rel.LE, prim_name)\n", (2084, 2130), True, 'from mindspore._checkparam import Validator as validator\n'), ((2136, 2192), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""l1"""', 'l1', '[float]', 'prim_name'], {}), "('l1', l1, [float], prim_name)\n", (2162, 2192), True, 'from mindspore._checkparam import Validator as validator\n'), ((2197, 2253), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""l1"""', 'l1', '(0.0)', 'Rel.GE', 'prim_name'], {}), "('l1', l1, 0.0, Rel.GE, prim_name)\n", (2219, 2253), True, 'from mindspore._checkparam import Validator as validator\n'), ((2259, 2315), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""l2"""', 'l2', '[float]', 'prim_name'], {}), "('l2', l2, [float], prim_name)\n", (2285, 2315), True, 'from mindspore._checkparam import Validator as validator\n'), ((2320, 2376), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""l2"""', 'l2', '(0.0)', 'Rel.GE', 'prim_name'], {}), "('l2', l2, 0.0, Rel.GE, prim_name)\n", (2342, 2376), True, 'from mindspore._checkparam import Validator as validator\n'), ((2382, 2455), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""use_locking"""', 'use_locking', '[bool]', 'prim_name'], {}), "('use_locking', use_locking, [bool], prim_name)\n", (2408, 2455), True, 'from mindspore._checkparam import Validator as validator\n'), ((2461, 2533), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""loss_scale"""', 'loss_scale', '[float]', 'prim_name'], {}), "('loss_scale', loss_scale, [float], prim_name)\n", (2487, 2533), True, 'from mindspore._checkparam import Validator as validator\n'), ((2538, 2610), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""loss_scale"""', 'loss_scale', '(1.0)', 'Rel.GE', 'prim_name'], {}), "('loss_scale', loss_scale, 1.0, Rel.GE, prim_name)\n", (2560, 2610), True, 'from mindspore._checkparam import Validator as validator\n'), ((2616, 2692), 'mindspore._checkparam.Validator.check_value_type', 'validator.check_value_type', (['"""weight_decay"""', 'weight_decay', '[float]', 'prim_name'], {}), "('weight_decay', weight_decay, [float], prim_name)\n", (2642, 2692), True, 'from mindspore._checkparam import Validator as validator\n'), ((2697, 2773), 'mindspore._checkparam.Validator.check_number', 'validator.check_number', (['"""weight_decay"""', 'weight_decay', '(0.0)', 'Rel.GE', 'prim_name'], {}), "('weight_decay', weight_decay, 0.0, Rel.GE, prim_name)\n", (2719, 2773), True, 'from mindspore._checkparam import Validator as validator\n'), ((5633, 5645), 'mindspore.ops.composite.HyperMap', 'C.HyperMap', ([], {}), '()\n', (5643, 5645), True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((5665, 5701), 'mindspore.ops.operations.ApplyFtrl', 'P.ApplyFtrl', ([], {'use_locking': 'use_locking'}), '(use_locking=use_locking)\n', (5676, 5701), True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((5721, 5744), 'mindspore.common.Tensor', 'Tensor', (['(1)', 'mstype.int32'], {}), '(1, mstype.int32)\n', (5727, 5744), False, 'from mindspore.common import Tensor\n'), ((6209, 6275), 'mindspore.ops.functional.partial', 'F.partial', (['ftrl_opt', 'self.opt', 'lr', 'self.l1', 'self.l2', 'self.lr_power'], {}), '(ftrl_opt, self.opt, lr, self.l1, self.l2, self.lr_power)\n', (6218, 6275), True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((5942, 5983), 'mindspore.ops.functional.partial', 'F.partial', (['apply_decay', 'self.weight_decay'], {}), '(apply_decay, self.weight_decay)\n', (5951, 5983), True, 'from mindspore.ops import functional as F, composite as C, operations as P\n'), ((6091, 6135), 'mindspore.ops.functional.partial', 'F.partial', (['grad_scale', 'self.reciprocal_scale'], {}), '(grad_scale, self.reciprocal_scale)\n', (6100, 6135), True, 'from mindspore.ops import functional as F, composite as C, operations as P\n')] |
'''
Author: huangbaochen<<EMAIL>>
Date: 2021-12-11 20:04:19
LastEditTime: 2021-12-11 21:46:16
LastEditors: huangbaochen<<EMAIL>>
Description: 测试Try单子
No MERCY
'''
import pytest
from fppy.try_monad import Try, Success, Fail
from fppy.option import Just, Nothing
@pytest.mark.try_monad
def test_try_apply():
assert Try.apply(1) == Success(1)
assert Try(1) == Success(1)
@pytest.mark.try_monad
def test_try_unapply():
assert Success.unapply(Success(1)) == Just(1)
assert Fail.unapply(Fail(TypeError(), 1)) == Nothing()
with pytest.raises(TypeError):
Fail.unapply(1)
with pytest.raises(TypeError):
Fail.unapply(Success(1))
with pytest.raises(TypeError):
Success.unapply(1)
with pytest.raises(TypeError):
Success.unapply(Fail(Exception(), 1))
def test_try_monad_map():
assert Success(1).map(lambda x: x + 1) == Success(2)
assert Success(1).map(lambda x: x / 0) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.map(lambda x: x + 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
@pytest.mark.try_monad
def test_try_monad_flat_map():
assert Success(1).flat_map(lambda x: Success(2)) == Success(2)
assert Fail(ZeroDivisionError('division by zero'), 1)\
.flat_map(lambda x: Success(1)) ==\
Fail(ZeroDivisionError('division by zero'), 1)
with pytest.raises(TypeError):
Success(1).flat_map(lambda x: x + 1)
@pytest.mark.try_monad
def test_try_monad_eq():
assert Fail(ZeroDivisionError('division by zero'), 1) ==\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by'), 1) !=\
Fail(ZeroDivisionError('division by zero'), 1)
assert Fail(ZeroDivisionError('division by zero'), 0) !=\
Fail(ZeroDivisionError('division by zero'), 1)
@pytest.mark.try_monad
def test_try_monad_get():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get().args ==\
ZeroDivisionError('division by zero').args
assert Success(1).get() == 1
# pylint: disable=no-member
assert Try("s").get() == "s"
@pytest.mark.try_monad
def test_try_monad_get_or_else():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get_or_else(2) == 2
assert Success(1).get_or_else(2) == 1
@pytest.mark.try_monad
def test_try_monad_get_error_input():
assert Fail(ZeroDivisionError('division by zero'), 1)\
.get_error_input() == 1
| [
"fppy.try_monad.Try",
"fppy.try_monad.Fail.unapply",
"fppy.try_monad.Success",
"pytest.raises",
"fppy.option.Nothing",
"fppy.try_monad.Try.apply",
"fppy.try_monad.Success.unapply",
"fppy.option.Just"
]
| [((318, 330), 'fppy.try_monad.Try.apply', 'Try.apply', (['(1)'], {}), '(1)\n', (327, 330), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((334, 344), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (341, 344), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((356, 362), 'fppy.try_monad.Try', 'Try', (['(1)'], {}), '(1)\n', (359, 362), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((366, 376), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (373, 376), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((467, 474), 'fppy.option.Just', 'Just', (['(1)'], {}), '(1)\n', (471, 474), False, 'from fppy.option import Just, Nothing\n'), ((524, 533), 'fppy.option.Nothing', 'Nothing', ([], {}), '()\n', (531, 533), False, 'from fppy.option import Just, Nothing\n'), ((544, 568), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (557, 568), False, 'import pytest\n'), ((578, 593), 'fppy.try_monad.Fail.unapply', 'Fail.unapply', (['(1)'], {}), '(1)\n', (590, 593), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((604, 628), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (617, 628), False, 'import pytest\n'), ((673, 697), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (686, 697), False, 'import pytest\n'), ((707, 725), 'fppy.try_monad.Success.unapply', 'Success.unapply', (['(1)'], {}), '(1)\n', (722, 725), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((736, 760), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (749, 760), False, 'import pytest\n'), ((881, 891), 'fppy.try_monad.Success', 'Success', (['(2)'], {}), '(2)\n', (888, 891), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((1255, 1265), 'fppy.try_monad.Success', 'Success', (['(2)'], {}), '(2)\n', (1262, 1265), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((1434, 1458), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1447, 1458), False, 'import pytest\n'), ((452, 462), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (459, 462), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((651, 661), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (658, 661), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((846, 856), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (853, 856), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((903, 913), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (910, 913), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((1210, 1220), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (1217, 1220), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((1240, 1250), 'fppy.try_monad.Success', 'Success', (['(2)'], {}), '(2)\n', (1247, 1250), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((1353, 1363), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (1360, 1363), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((1468, 1478), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (1475, 1478), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((2098, 2108), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (2105, 2108), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((2164, 2172), 'fppy.try_monad.Try', 'Try', (['"""s"""'], {}), "('s')\n", (2167, 2172), False, 'from fppy.try_monad import Try, Success, Fail\n'), ((2345, 2355), 'fppy.try_monad.Success', 'Success', (['(1)'], {}), '(1)\n', (2352, 2355), False, 'from fppy.try_monad import Try, Success, Fail\n')] |
import csv
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType
spark = SparkSession.builder.appName("Assignment4").getOrCreate()
sc = spark.sparkContext
# load data to dataframe
path = 'fake_data.csv'
df = spark.read.format('csv').option('header','true').load(path)
# cast income as an integer
df = df.withColumn('Income', df['Income'].cast(IntegerType()))
# Question 1
print('*' * 30)
print('\nQuestion 1\n')
print(df.rdd.map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda vals: len(set(vals))).sortBy(lambda a: a[1], ascending = False).take(1))
print('\n\n')
# Question 2
print('*' * 30)
print('\nQuestion 2\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').map(lambda x: (x[1], x[4])).groupByKey().mapValues(lambda x: sum(x) / len(x)).collect())
print('\n\n')
# Question 3
print('*' * 30)
print('\nQuestion 3\n')
print(df.rdd.filter(lambda v: v[4] > 100000).filter(lambda v: v[7] == 'FALSE').count())
print('\n\n')
# Question 4
print('*' * 30)
print('\nQuestion 4\n')
print(df.rdd.filter(lambda v: v[1] == 'United States of America').sortBy(lambda x: x[4], ascending = False).map(lambda x: (x[3], x[6], x[4], x[5])).take(10))
print('\n\n')
# Question 5
print('*' * 30)
print('\nQuestion 5\n')
print(df.rdd.groupBy(lambda x: x[5]).count())
print('\n\n')
# Question 6
print('*' * 30)
print('\nQuestion 6\n')
print(df.rdd.filter(lambda v: v[5] == 'Writer').filter(lambda x: x[4] < 100000).count())
print('\n\n')
| [
"pyspark.sql.types.IntegerType",
"pyspark.sql.SparkSession.builder.appName"
]
| [((100, 143), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""Assignment4"""'], {}), "('Assignment4')\n", (128, 143), False, 'from pyspark.sql import SparkSession\n'), ((372, 385), 'pyspark.sql.types.IntegerType', 'IntegerType', ([], {}), '()\n', (383, 385), False, 'from pyspark.sql.types import IntegerType\n')] |
import unittest
from appium import webdriver
class MSiteDefaultBrowserAndroidUITests(unittest.TestCase):
def setUp(self):
# Default browser does not exist for android >= 6.0
desired_caps = {
'platformName': 'Android',
'deviceName': 'Android Emulator',
'appPackage': 'com.android.browser',
'appActivity': 'com.android.browser.BrowserActivity',
'avd': 'samsung_galaxy_s6_6.0'
}
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
def test_open_url(self):
self.driver.get('http://targeturl.com')
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(MSiteDefaultBrowserAndroidUITests)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"unittest.TextTestRunner",
"unittest.TestLoader",
"appium.webdriver.Remote"
]
| [((491, 553), 'appium.webdriver.Remote', 'webdriver.Remote', (['"""http://127.0.0.1:4723/wd/hub"""', 'desired_caps'], {}), "('http://127.0.0.1:4723/wd/hub', desired_caps)\n", (507, 553), False, 'from appium import webdriver\n'), ((724, 745), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (743, 745), False, 'import unittest\n'), ((807, 843), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (830, 843), False, 'import unittest\n')] |
'''Functional tests for CPG'''
from .. import CircuitPlayground
from .. import __version__ as CircuitPlaygroundVersion
import time
def funcTest(timestamps: bool = False) -> None:
cpg = CircuitPlayground()
if timestamps:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG with timestamps ...\n')
else:
_printFuncTestHeadingWithDeliLine(f'cpg_scpi v{CircuitPlaygroundVersion}\nRUNNING SOME FUNCTIONAL-TESTS WITH THE CPG without timestamps ...\n')
# test_led(cpg)
# test_buttonAny(cpg, timestamps)
# test_switch(cpg, timestamps)
test_temp(cpg, timestamps)
test_light(cpg, timestamps)
test_acc(cpg, timestamps)
test_touch(cpg, timestamps)
_printFuncTestHeadingWithDeliLine('DONE WITH FUNCTIONAL-TESTS')
_printFuncTestDeliLine()
def _printCountdown(start: int = 3) -> None:
for i in range(start, 0, -1):
print(i, end=" ", flush=True)
time.sleep(1)
print('', flush=True)
def _printFuncTestDeliLine() -> None:
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
def _printFuncTestHeadingWithDeliLine(heading) -> None:
_printFuncTestDeliLine()
print(heading)
def test_buttonAny(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | any button |'
outFormat = '| {:5} | {:12.3f} | {!s:10} |'
else:
outHeading = '| count | any button |'
outFormat = '| {:5} | {!s:10} |'
_printFuncTestHeadingWithDeliLine('Button-Test: Press left or right button...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.buttonAny_wts()) if timestamps else (count-i, cpg.buttonAny())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_switch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | switch |'
outFormat = '| {:5} | {:12.3f} | {!s:6} |'
else:
outHeading = '| count | switch |'
outFormat = '| {:5} | {!s:6} |'
_printFuncTestHeadingWithDeliLine('Switch-Test: Change slider switch position...')
print(outHeading)
_printCountdown(3)
count = 10
for i in range(count):
result = (count-i, *cpg.switch_wts()) if timestamps else (count-i, cpg.switch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_temp(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | temp °C |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} |'
else:
outHeading = '| count | temp °C |'
outFormat = '| {:5} | {:7.2f} |'
_printFuncTestHeadingWithDeliLine('Temp-Sensor-Test ...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.temp_wts()) if timestamps else (count-i, cpg.temp())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_light(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | light |'
outFormat = '| {:5} | {:12.3f} | {:5} |'
else:
outHeading = '| count | light |'
outFormat = '| {:5} | {:5} |'
_printFuncTestHeadingWithDeliLine('Light-Sensor-Test: Move hand over light sensor...')
print(outHeading)
_printCountdown(3)
count = 20
for i in range(count):
result = (count-i, *cpg.light_wts()) if timestamps else (count-i, cpg.light())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_acc(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:12.3f} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc_wts
else:
outHeading = '| count | x m/s^2 | y m/s^2 | z m/s^2 |'
outFormat = '| {:5} | {:7.2f} | {:7.2f} | {:7.2f} |'
testFunction = cpg.acc
_printFuncTestHeadingWithDeliLine('Accelerometer-Test: Tilt the CPG board...')
print(outHeading)
_printCountdown(3)
count = 60
for i in range(count):
print(outFormat.format(count-i, *testFunction()))
cpg.wait(0.2)
def test_touch(cpg, timestamps) -> None:
if timestamps:
outHeading = '| count | timestamp | touch | binary |'
outFormat = '| {0:5} | {1:12.3f} | {2:5} | {2:08b} |'
else:
outHeading = '| count | touch | binary |'
outFormat = '| {0:5} | {1:5} | {1:08b} |'
_printFuncTestHeadingWithDeliLine('Touch-Sensor-Test: Touch capacitive sensor pads...')
print(outHeading)
_printCountdown(3)
count = 30
for i in range(count):
result = (count-i, *cpg.touch_wts()) if timestamps else (count-i, cpg.touch())
print(outFormat.format(*result))
cpg.wait(0.5)
def test_led(cpg) -> None:
'''Flash LEDs and run a short chasing light.'''
_printFuncTestHeadingWithDeliLine('LED-Test: Flash LEDs and run a short chasing light...')
print('flashing LEDs...')
test_ledDemo(cpg)
value=1
# print('| val | LEDs |')
for i in range(10):
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
value <<= 1 # shift 1 bit to the left
for i in range(10):
value >>= 1 # shift 1 bit to the right
# print(f'| {value:4} | {value:010b} |')
cpg.led(value)
cpg.wait(0.2)
print('flashing LEDs...')
test_ledDemo(cpg)
def test_ledDemo(cpg) -> None:
'''Flash LEDs three times.'''
for i in range(3):
cpg.ledDemo()
cpg.wait(0.2)
def testAccSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do an accelerometer measurement.'''
print(f'Testing acc measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.acc(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def testLightSpeed(cpg, iterations: int = 100) -> None:
'''Measure how long it takes to do a light sensor measurement.'''
print(f'Testing light measurement speed with {iterations} iterations. Please wait ...')
import timeit
result = timeit.Timer(stmt=lambda: cpg.light(), setup='pass').timeit(number=iterations)
print(f'Total time: {result:.1f} seconds.')
print(f'On average {(result*1000/iterations):.1f} ms per measurement.')
def _testResponseWaitTime(cpg, iterations: int = 10000) -> None:
'''Test it the wait time for additional, unexpected responses is long enough.'''
print(f'Testing Response-Wait-Time with {iterations} iterations ...')
for i in range(iterations):
if i%100==0: print('try-count', i)
try:
# Request acc measurement values, but do not expect any response, even if the CPG will send one.
cpg._query('MEAS:ACC?', 0)
# If we are still here, we did not get a response. This is bad.
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
print('ERROR in testResponseWaitTime(): CPG-Response was too late.')
print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
except Exception:
# The normal behavior is a response, resulting in an exception.
# This is what we expected. Therefore, just continue.
pass
| [
"time.sleep"
]
| [((989, 1002), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (999, 1002), False, 'import time\n')] |
from django.db import models
class Account(models.Model):
clsNb = models.IntegerField()
Name = models.CharField(max_length=10)
pw = models.IntegerField()
def __str__(self):
return self.Name | [
"django.db.models.CharField",
"django.db.models.IntegerField"
]
| [((72, 93), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (91, 93), False, 'from django.db import models\n'), ((105, 136), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (121, 136), False, 'from django.db import models\n'), ((146, 167), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (165, 167), False, 'from django.db import models\n')] |
from math import sqrt
import torch
from torch_geometric.utils import geodesic_distance
def test_geodesic_distance():
pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])
face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t()
out = geodesic_distance(pos, face)
expected = [
[0, 1, 1, sqrt(2)],
[1, 0, sqrt(2), 1],
[1, sqrt(2), 0, 1],
[sqrt(2), 1, 1, 0],
]
assert torch.allclose(out, torch.tensor(expected))
assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1))
out = geodesic_distance(pos, face, norm=False)
expected = [
[0, 2, 2, 2 * sqrt(2)],
[2, 0, 2 * sqrt(2), 2],
[2, 2 * sqrt(2), 0, 2],
[2 * sqrt(2), 2, 2, 0],
]
assert torch.allclose(out, torch.tensor(expected))
src = torch.tensor([0, 0, 0, 0])
dest = torch.tensor([0, 1, 2, 3])
out = geodesic_distance(pos, face, src=src, dest=dest)
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, src=src[0:1])
expected = [0, 1, 1, sqrt(2)]
assert torch.allclose(out, torch.tensor(expected))
out = geodesic_distance(pos, face, dest=dest)
expected = [0, 0, 0, 0]
assert torch.allclose(out, torch.Tensor(expected))
| [
"torch_geometric.utils.geodesic_distance",
"torch.tensor",
"torch.Tensor",
"math.sqrt"
]
| [((130, 188), 'torch.Tensor', 'torch.Tensor', (['[[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]]'], {}), '([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]])\n', (142, 188), False, 'import torch\n'), ((252, 280), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (['pos', 'face'], {}), '(pos, face)\n', (269, 280), False, 'from torch_geometric.utils import geodesic_distance\n'), ((559, 599), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (['pos', 'face'], {'norm': '(False)'}), '(pos, face, norm=False)\n', (576, 599), False, 'from torch_geometric.utils import geodesic_distance\n'), ((817, 843), 'torch.tensor', 'torch.tensor', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (829, 843), False, 'import torch\n'), ((855, 881), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (867, 881), False, 'import torch\n'), ((892, 940), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (['pos', 'face'], {'src': 'src', 'dest': 'dest'}), '(pos, face, src=src, dest=dest)\n', (909, 940), False, 'from torch_geometric.utils import geodesic_distance\n'), ((1041, 1083), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (['pos', 'face'], {'src': 'src[0:1]'}), '(pos, face, src=src[0:1])\n', (1058, 1083), False, 'from torch_geometric.utils import geodesic_distance\n'), ((1184, 1223), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (['pos', 'face'], {'dest': 'dest'}), '(pos, face, dest=dest)\n', (1201, 1223), False, 'from torch_geometric.utils import geodesic_distance\n'), ((447, 469), 'torch.tensor', 'torch.tensor', (['expected'], {}), '(expected)\n', (459, 469), False, 'import torch\n'), ((502, 546), 'torch_geometric.utils.geodesic_distance', 'geodesic_distance', (['pos', 'face'], {'num_workers': '(-1)'}), '(pos, face, num_workers=-1)\n', (519, 546), False, 'from torch_geometric.utils import geodesic_distance\n'), ((782, 804), 'torch.tensor', 'torch.tensor', (['expected'], {}), '(expected)\n', (794, 804), False, 'import torch\n'), ((966, 973), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (970, 973), False, 'from math import sqrt\n'), ((1006, 1028), 'torch.tensor', 'torch.tensor', (['expected'], {}), '(expected)\n', (1018, 1028), False, 'import torch\n'), ((1109, 1116), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1113, 1116), False, 'from math import sqrt\n'), ((1149, 1171), 'torch.tensor', 'torch.tensor', (['expected'], {}), '(expected)\n', (1161, 1171), False, 'import torch\n'), ((1283, 1305), 'torch.Tensor', 'torch.Tensor', (['expected'], {}), '(expected)\n', (1295, 1305), False, 'import torch\n'), ((200, 236), 'torch.tensor', 'torch.tensor', (['[[0, 1, 3], [0, 2, 3]]'], {}), '([[0, 1, 3], [0, 2, 3]])\n', (212, 236), False, 'import torch\n'), ((316, 323), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (320, 323), False, 'from math import sqrt\n'), ((341, 348), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (345, 348), False, 'from math import sqrt\n'), ((366, 373), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (370, 373), False, 'from math import sqrt\n'), ((391, 398), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (395, 398), False, 'from math import sqrt\n'), ((639, 646), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (643, 646), False, 'from math import sqrt\n'), ((668, 675), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (672, 675), False, 'from math import sqrt\n'), ((697, 704), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (701, 704), False, 'from math import sqrt\n'), ((726, 733), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (730, 733), False, 'from math import sqrt\n')] |
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing files stored in GridFS."""
import datetime
import io
import math
import os
from bson.int64 import Int64
from bson.son import SON
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.errors import (ConfigurationError,
CursorNotFound,
DuplicateKeyError,
InvalidOperation,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from gridfs.errors import CorruptGridFile, FileExists, NoFile
try:
_SEEK_SET = os.SEEK_SET
_SEEK_CUR = os.SEEK_CUR
_SEEK_END = os.SEEK_END
# before 2.5
except AttributeError:
_SEEK_SET = 0
_SEEK_CUR = 1
_SEEK_END = 2
EMPTY = b""
NEWLN = b"\n"
"""Default chunk size, in bytes."""
# Slightly under a power of 2, to work well with server's record allocations.
DEFAULT_CHUNK_SIZE = 255 * 1024
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring)
def _grid_out_property(field_name, docstring):
"""Create a GridOut property."""
def getter(self):
self._ensure_file()
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
docstring += "\n\nThis attribute is read-only."
return property(getter, doc=docstring)
def _clear_entity_type_registry(entity, **kwargs):
"""Clear the given database/collection object's type registry."""
codecopts = entity.codec_options.with_options(type_registry=None)
return entity.with_options(codec_options=codecopts, **kwargs)
def _disallow_transactions(session):
if session and session.in_transaction:
raise InvalidOperation(
'GridFS does not support multi-document transactions')
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, session=None, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. Any :class:`str`
that is written to the file will be converted to :class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 4.0
Removed the `disable_md5` parameter. See
:ref:`removed-gridfs-checksum` for details.
.. versionchanged:: 3.7
Added the `disable_md5` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
_disallow_transactions(session)
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
coll = _clear_entity_type_registry(
root_collection, read_preference=ReadPreference.PRIMARY)
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_session", session)
object.__setattr__(self, "_coll", coll)
object.__setattr__(self, "_chunks", coll.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", io.BytesIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index_key, unique):
doc = collection.find_one(projection={"_id": 1}, session=self._session)
if doc is None:
try:
index_keys = [index_spec['key'] for index_spec in
collection.list_indexes(session=self._session)]
except OperationFailure:
index_keys = []
if index_key not in index_keys:
collection.create_index(
index_key.items(), unique=unique, session=self._session)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
_disallow_transactions(self._session)
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many(
{"files_id": self._file['_id']}, session=self._session)
self._coll.files.delete_one(
{"_id": self._file['_id']}, session=self._session)
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _grid_in_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _grid_in_property("filename", "Name of this file.")
name = _grid_in_property("filename", "Alias for `filename`.")
content_type = _grid_in_property("contentType", "Mime-type for this file.")
length = _grid_in_property("length", "Length (in bytes) of this file.",
closed_only=True)
chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.",
read_only=True)
upload_date = _grid_in_property("uploadDate",
"Date that this file was uploaded.",
closed_only=True)
md5 = _grid_in_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.",
closed_only=True)
def __getattr__(self, name):
if name in self._file:
return self._file[name]
raise AttributeError("GridIn object has no attribute '%s'" % name)
def __setattr__(self, name, value):
# For properties of this instance like _buffer, or descriptors set on
# the class like filename, use regular __setattr__
if name in self.__dict__ or name in self.__class__.__dict__:
object.__setattr__(self, name, value)
else:
# All other attributes are part of the document in db.fs.files.
# Store them to be sent to server on close() or if closed, send
# them now.
self._file[name] = value
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {name: value}})
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
self.__ensure_indexes()
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = io.BytesIO()
def __flush(self):
"""Flush the file to the database.
"""
try:
self.__flush_buffer()
# The GridFS spec says length SHOULD be an Int64.
self._file["length"] = Int64(self._position)
self._file["uploadDate"] = datetime.datetime.utcnow()
return self._coll.files.insert_one(
self._file, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._id)
def _raise_file_exists(self, file_id):
"""Raise a FileExists exception for the given file_id."""
raise FileExists("file with _id %r already exists" % file_id)
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True)
def read(self, size=-1):
raise io.UnsupportedOperation('read')
def readable(self):
return False
def seekable(self):
return False
def write(self, data):
"""Write data to the file. There is no return value.
`data` can be either a string of bytes or a file-like object
(implementing :meth:`read`). If the file has an
:attr:`encoding` attribute, `data` can also be a
:class:`str` instance, which will be encoded as
:attr:`encoding` before being written.
Due to buffering, the data may not actually be written to the
database until the :meth:`close` method is called. Raises
:class:`ValueError` if this file is already closed. Raises
:class:`TypeError` if `data` is not an instance of
:class:`bytes`, a file-like object, or an instance of :class:`str`.
Unicode data is only allowed if the file has an :attr:`encoding`
attribute.
:Parameters:
- `data`: string of bytes or file-like object to be written
to the file
"""
if self._closed:
raise ValueError("cannot write to a closed file")
try:
# file-like
read = data.read
except AttributeError:
# string
if not isinstance(data, (str, bytes)):
raise TypeError("can only write strings or file-like objects")
if isinstance(data, str):
try:
data = data.encode(self.encoding)
except AttributeError:
raise TypeError("must specify an encoding for file in "
"order to write str")
read = io.BytesIO(data).read
if self._buffer.tell() > 0:
# Make sure to flush only when _buffer is complete
space = self.chunk_size - self._buffer.tell()
if space:
try:
to_write = read(space)
except:
self.abort()
raise
self._buffer.write(to_write)
if len(to_write) < space:
return # EOF or incomplete
self.__flush_buffer()
to_write = read(self.chunk_size)
while to_write and len(to_write) == self.chunk_size:
self.__flush_data(to_write)
to_write = read(self.chunk_size)
self._buffer.write(to_write)
def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add seperators.
"""
for line in sequence:
self.write(line)
def writeable(self):
return True
def __enter__(self):
"""Support for the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for the context manager protocol.
Close the file and allow exceptions to propagate.
"""
self.close()
# propagate exceptions
return False
class GridOut(io.IOBase):
"""Class to read data out of GridFS.
"""
def __init__(self, root_collection, file_id=None, file_document=None,
session=None):
"""Read a file from GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Either `file_id` or `file_document` must be specified,
`file_document` will be given priority if present. Raises
:class:`TypeError` if `root_collection` is not an instance of
:class:`~pymongo.collection.Collection`.
:Parameters:
- `root_collection`: root collection to read from
- `file_id` (optional): value of ``"_id"`` for the file to read
- `file_document` (optional): file document from
`root_collection.files`
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
.. versionchanged:: 3.8
For better performance and to better follow the GridFS spec,
:class:`GridOut` now uses a single cursor to read all the chunks in
the file.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
Creating a GridOut does not immediately retrieve the file metadata
from the server. Metadata is fetched when first needed.
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
_disallow_transactions(session)
root_collection = _clear_entity_type_registry(root_collection)
super().__init__()
self.__chunks = root_collection.chunks
self.__files = root_collection.files
self.__file_id = file_id
self.__buffer = EMPTY
self.__chunk_iter = None
self.__position = 0
self._file = file_document
self._session = session
_id = _grid_out_property("_id", "The ``'_id'`` value for this file.")
filename = _grid_out_property("filename", "Name of this file.")
name = _grid_out_property("filename", "Alias for `filename`.")
content_type = _grid_out_property("contentType", "Mime-type for this file.")
length = _grid_out_property("length", "Length (in bytes) of this file.")
chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.")
upload_date = _grid_out_property("uploadDate",
"Date that this file was first uploaded.")
aliases = _grid_out_property("aliases", "List of aliases for this file.")
metadata = _grid_out_property("metadata", "Metadata attached to this file.")
md5 = _grid_out_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.")
def _ensure_file(self):
if not self._file:
_disallow_transactions(self._session)
self._file = self.__files.find_one({"_id": self.__file_id},
session=self._session)
if not self._file:
raise NoFile("no file in gridfs collection %r with _id %r" %
(self.__files, self.__file_id))
def __getattr__(self, name):
self._ensure_file()
if name in self._file:
return self._file[name]
raise AttributeError("GridOut object has no attribute '%s'" % name)
def readable(self):
return True
def readchunk(self):
"""Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.
"""
received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if received > 0:
chunk_data = self.__buffer
elif self.__position < int(self.length):
chunk_number = int((received + self.__position) / chunk_size)
if self.__chunk_iter is None:
self.__chunk_iter = _GridOutChunkIterator(
self, self.__chunks, self._session, chunk_number)
chunk = self.__chunk_iter.next()
chunk_data = chunk["data"][self.__position % chunk_size:]
if not chunk_data:
raise CorruptGridFile("truncated chunk")
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def tell(self):
"""Return the current position of this file.
"""
return self.__position
def seek(self, pos, whence=_SEEK_SET):
"""Set the current position of this file.
:Parameters:
- `pos`: the position (or offset if using relative
positioning) to seek to
- `whence` (optional): where to seek
from. :attr:`os.SEEK_SET` (``0``) for absolute file
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
to the current position, :attr:`os.SEEK_END` (``2``) to
seek relative to the file's end.
"""
if whence == _SEEK_SET:
new_pos = pos
elif whence == _SEEK_CUR:
new_pos = self.__position + pos
elif whence == _SEEK_END:
new_pos = int(self.length) + pos
else:
raise IOError(22, "Invalid value for `whence`")
if new_pos < 0:
raise IOError(22, "Invalid value for `pos` - must be positive")
# Optimization, continue using the same buffer and chunk iterator.
if new_pos == self.__position:
return
self.__position = new_pos
self.__buffer = EMPTY
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
def seekable(self):
return True
def __iter__(self):
"""Return an iterator over all of this file's data.
The iterator will return lines (delimited by ``b'\\n'``) of
:class:`bytes`. This can be useful when serving files
using a webserver that handles such an iterator efficiently.
.. versionchanged:: 3.8
The iterator now raises :class:`CorruptGridFile` when encountering
any truncated, missing, or extra chunk in a file. The previous
behavior was to only raise :class:`CorruptGridFile` on a missing
chunk.
.. versionchanged:: 4.0
The iterator now iterates over *lines* in the file, instead
of chunks, to conform to the base class :py:class:`io.IOBase`.
Use :meth:`GridOut.readchunk` to read chunk by chunk instead
of line by line.
"""
return self
def close(self):
"""Make GridOut more generically file-like."""
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
super().close()
def write(self, value):
raise io.UnsupportedOperation('write')
def writelines(self, lines):
raise io.UnsupportedOperation('writelines')
def writable(self):
return False
def __enter__(self):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
self.close()
return False
def fileno(self):
raise io.UnsupportedOperation('fileno')
def flush(self):
# GridOut is read-only, so flush does nothing.
pass
def isatty(self):
return False
def truncate(self, size=None):
# See https://docs.python.org/3/library/io.html#io.IOBase.writable
# for why truncate has to raise.
raise io.UnsupportedOperation('truncate')
# Override IOBase.__del__ otherwise it will lead to __getattr__ on
# __IOBase_closed which calls _ensure_file and potentially performs I/O.
# We cannot do I/O in __del__ since it can lead to a deadlock.
def __del__(self):
pass
class _GridOutChunkIterator(object):
"""Iterates over a file's chunks using a single cursor.
Raises CorruptGridFile when encountering any truncated, missing, or extra
chunk in a file.
"""
def __init__(self, grid_out, chunks, session, next_chunk):
self._id = grid_out._id
self._chunk_size = int(grid_out.chunk_size)
self._length = int(grid_out.length)
self._chunks = chunks
self._session = session
self._next_chunk = next_chunk
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
self._cursor = None
def expected_chunk_length(self, chunk_n):
if chunk_n < self._num_chunks - 1:
return self._chunk_size
return self._length - (self._chunk_size * (self._num_chunks - 1))
def __iter__(self):
return self
def _create_cursor(self):
filter = {"files_id": self._id}
if self._next_chunk > 0:
filter["n"] = {"$gte": self._next_chunk}
_disallow_transactions(self._session)
self._cursor = self._chunks.find(filter, sort=[("n", 1)],
session=self._session)
def _next_with_retry(self):
"""Return the next chunk and retry once on CursorNotFound.
We retry on CursorNotFound to maintain backwards compatibility in
cases where two calls to read occur more than 10 minutes apart (the
server's default cursor timeout).
"""
if self._cursor is None:
self._create_cursor()
try:
return self._cursor.next()
except CursorNotFound:
self._cursor.close()
self._create_cursor()
return self._cursor.next()
def next(self):
try:
chunk = self._next_with_retry()
except StopIteration:
if self._next_chunk >= self._num_chunks:
raise
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
if chunk["n"] != self._next_chunk:
self.close()
raise CorruptGridFile(
"Missing chunk: expected chunk #%d but found "
"chunk with n=%d" % (self._next_chunk, chunk["n"]))
if chunk["n"] >= self._num_chunks:
# According to spec, ignore extra chunks if they are empty.
if len(chunk["data"]):
self.close()
raise CorruptGridFile(
"Extra chunk found: expected %d chunks but found "
"chunk with n=%d" % (self._num_chunks, chunk["n"]))
expected_length = self.expected_chunk_length(chunk["n"])
if len(chunk["data"]) != expected_length:
self.close()
raise CorruptGridFile(
"truncated chunk #%d: expected chunk length to be %d but "
"found chunk with length %d" % (
chunk["n"], expected_length, len(chunk["data"])))
self._next_chunk += 1
return chunk
__next__ = next
def close(self):
if self._cursor:
self._cursor.close()
self._cursor = None
class GridOutIterator(object):
def __init__(self, grid_out, chunks, session):
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
def __iter__(self):
return self
def next(self):
chunk = self.__chunk_iter.next()
return bytes(chunk["data"])
__next__ = next
class GridOutCursor(Cursor):
"""A cursor / iterator for returning GridOut objects as the result
of an arbitrary query against the GridFS files collection.
"""
def __init__(self, collection, filter=None, skip=0, limit=0,
no_cursor_timeout=False, sort=None, batch_size=0,
session=None):
"""Create a new cursor, similar to the normal
:class:`~pymongo.cursor.Cursor`.
Should not be called directly by application developers - see
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
.. versionadded 2.7
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
_disallow_transactions(session)
collection = _clear_entity_type_registry(collection)
# Hold on to the base "fs" collection to create GridOut objects later.
self.__root_collection = collection
super(GridOutCursor, self).__init__(
collection.files, filter, skip=skip, limit=limit,
no_cursor_timeout=no_cursor_timeout, sort=sort,
batch_size=batch_size, session=session)
def next(self):
"""Get next GridOut object from cursor.
"""
_disallow_transactions(self.session)
# Work around "super is not iterable" issue in Python 3.x
next_file = super(GridOutCursor, self).next()
return GridOut(self.__root_collection, file_document=next_file,
session=self.session)
__next__ = next
def add_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def remove_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def _clone_base(self, session):
"""Creates an empty GridOutCursor for information to be copied into.
"""
return GridOutCursor(self.__root_collection, session=session)
| [
"bson.int64.Int64",
"gridfs.errors.CorruptGridFile",
"gridfs.errors.FileExists",
"bson.binary.Binary",
"datetime.datetime.utcnow",
"bson.objectid.ObjectId",
"io.BytesIO",
"io.UnsupportedOperation",
"pymongo.errors.ConfigurationError",
"bson.son.SON",
"pymongo.errors.InvalidOperation",
"gridfs.errors.NoFile"
]
| [((1622, 1670), 'bson.son.SON', 'SON', (["[('files_id', ASCENDING), ('n', ASCENDING)]"], {}), "([('files_id', ASCENDING), ('n', ASCENDING)])\n", (1625, 1670), False, 'from bson.son import SON\n'), ((1682, 1739), 'bson.son.SON', 'SON', (["[('filename', ASCENDING), ('uploadDate', ASCENDING)]"], {}), "([('filename', ASCENDING), ('uploadDate', ASCENDING)])\n", (1685, 1739), False, 'from bson.son import SON\n'), ((3663, 3734), 'pymongo.errors.InvalidOperation', 'InvalidOperation', (['"""GridFS does not support multi-document transactions"""'], {}), "('GridFS does not support multi-document transactions')\n", (3679, 3734), False, 'from pymongo.errors import ConfigurationError, CursorNotFound, DuplicateKeyError, InvalidOperation, OperationFailure\n'), ((11169, 11181), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11179, 11181), False, 'import io\n'), ((11797, 11852), 'gridfs.errors.FileExists', 'FileExists', (["('file with _id %r already exists' % file_id)"], {}), "('file with _id %r already exists' % file_id)\n", (11807, 11852), False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((12189, 12220), 'io.UnsupportedOperation', 'io.UnsupportedOperation', (['"""read"""'], {}), "('read')\n", (12212, 12220), False, 'import io\n'), ((20599, 20611), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (20609, 20611), False, 'import io\n'), ((21600, 21612), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21610, 21612), False, 'import io\n'), ((24626, 24658), 'io.UnsupportedOperation', 'io.UnsupportedOperation', (['"""write"""'], {}), "('write')\n", (24649, 24658), False, 'import io\n'), ((24707, 24744), 'io.UnsupportedOperation', 'io.UnsupportedOperation', (['"""writelines"""'], {}), "('writelines')\n", (24730, 24744), False, 'import io\n'), ((25196, 25229), 'io.UnsupportedOperation', 'io.UnsupportedOperation', (['"""fileno"""'], {}), "('fileno')\n", (25219, 25229), False, 'import io\n'), ((25530, 25565), 'io.UnsupportedOperation', 'io.UnsupportedOperation', (['"""truncate"""'], {}), "('truncate')\n", (25553, 25565), False, 'import io\n'), ((6098, 6171), 'pymongo.errors.ConfigurationError', 'ConfigurationError', (['"""root_collection must use acknowledged write_concern"""'], {}), "('root_collection must use acknowledged write_concern')\n", (6116, 6171), False, 'from pymongo.errors import ConfigurationError, CursorNotFound, DuplicateKeyError, InvalidOperation, OperationFailure\n'), ((6659, 6669), 'bson.objectid.ObjectId', 'ObjectId', ([], {}), '()\n', (6667, 6669), False, 'from bson.objectid import ObjectId\n'), ((6998, 7010), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (7008, 7010), False, 'import io\n'), ((10719, 10731), 'bson.binary.Binary', 'Binary', (['data'], {}), '(data)\n', (10725, 10731), False, 'from bson.binary import Binary\n'), ((11405, 11426), 'bson.int64.Int64', 'Int64', (['self._position'], {}), '(self._position)\n', (11410, 11426), False, 'from bson.int64 import Int64\n'), ((11466, 11492), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (11490, 11492), False, 'import datetime\n'), ((27900, 28021), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', (["('Missing chunk: expected chunk #%d but found chunk with n=%d' % (self.\n _next_chunk, chunk['n']))"], {}), "(\n 'Missing chunk: expected chunk #%d but found chunk with n=%d' % (self.\n _next_chunk, chunk['n']))\n", (27915, 28021), False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((18480, 18571), 'gridfs.errors.NoFile', 'NoFile', (["('no file in gridfs collection %r with _id %r' % (self.__files, self.__file_id)\n )"], {}), "('no file in gridfs collection %r with _id %r' % (self.__files, self.\n __file_id))\n", (18486, 18571), False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((27762, 27812), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', (["('no chunk #%d' % self._next_chunk)"], {}), "('no chunk #%d' % self._next_chunk)\n", (27777, 27812), False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((28250, 28375), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', (["('Extra chunk found: expected %d chunks but found chunk with n=%d' % (self.\n _num_chunks, chunk['n']))"], {}), "(\n 'Extra chunk found: expected %d chunks but found chunk with n=%d' % (\n self._num_chunks, chunk['n']))\n", (28265, 28375), False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n'), ((13888, 13904), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (13898, 13904), False, 'import io\n'), ((19646, 19680), 'gridfs.errors.CorruptGridFile', 'CorruptGridFile', (['"""truncated chunk"""'], {}), "('truncated chunk')\n", (19661, 19680), False, 'from gridfs.errors import CorruptGridFile, FileExists, NoFile\n')] |
"""This module tests Stanford NLP processors."""
import os
import unittest
from texar.torch import HParams
from forte.pipeline import Pipeline
from forte.data.readers import StringReader
from forte.processors.stanfordnlp_processor import StandfordNLPProcessor
from ft.onto.base_ontology import Token, Sentence
class TestStanfordNLPProcessor(unittest.TestCase):
def setUp(self):
self.stanford_nlp = Pipeline()
self.stanford_nlp.set_reader(StringReader())
models_path = os.getcwd()
config = HParams({
"processors": "tokenize",
"lang": "en",
# Language code for the language to build the Pipeline
"use_gpu": False
}, StandfordNLPProcessor.default_hparams())
self.stanford_nlp.add_processor(StandfordNLPProcessor(models_path),
config=config)
self.stanford_nlp.initialize()
# TODO
@unittest.skip("We need to test this without needing to download models "
"everytime")
def test_stanford_processor(self):
sentences = ["This tool is called Forte.",
"The goal of this project to help you build NLP "
"pipelines.",
"NLP has never been made this easy before."]
document = ' '.join(sentences)
pack = self.stanford_nlp.process(document)
print(pack)
| [
"forte.processors.stanfordnlp_processor.StandfordNLPProcessor",
"os.getcwd",
"forte.processors.stanfordnlp_processor.StandfordNLPProcessor.default_hparams",
"forte.pipeline.Pipeline",
"forte.data.readers.StringReader",
"unittest.skip"
]
| [((938, 1025), 'unittest.skip', 'unittest.skip', (['"""We need to test this without needing to download models everytime"""'], {}), "(\n 'We need to test this without needing to download models everytime')\n", (951, 1025), False, 'import unittest\n'), ((414, 424), 'forte.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (422, 424), False, 'from forte.pipeline import Pipeline\n'), ((500, 511), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (509, 511), False, 'import os\n'), ((462, 476), 'forte.data.readers.StringReader', 'StringReader', ([], {}), '()\n', (474, 476), False, 'from forte.data.readers import StringReader\n'), ((710, 749), 'forte.processors.stanfordnlp_processor.StandfordNLPProcessor.default_hparams', 'StandfordNLPProcessor.default_hparams', ([], {}), '()\n', (747, 749), False, 'from forte.processors.stanfordnlp_processor import StandfordNLPProcessor\n'), ((791, 825), 'forte.processors.stanfordnlp_processor.StandfordNLPProcessor', 'StandfordNLPProcessor', (['models_path'], {}), '(models_path)\n', (812, 825), False, 'from forte.processors.stanfordnlp_processor import StandfordNLPProcessor\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Serve current folder files in a HTTP webserver.
"""
import socketserver
from threading import Thread
from http.server import SimpleHTTPRequestHandler
PORT = 8000
def start_http_server(port=PORT):
httpd = socketserver.TCPServer(("", port), SimpleHTTPRequestHandler)
thread = Thread(target = httpd.serve_forever)
thread.start()
return thread
if __name__ == '__main__':
thread = start_http_server()
thread.join()
| [
"threading.Thread",
"socketserver.TCPServer"
]
| [((264, 324), 'socketserver.TCPServer', 'socketserver.TCPServer', (["('', port)", 'SimpleHTTPRequestHandler'], {}), "(('', port), SimpleHTTPRequestHandler)\n", (286, 324), False, 'import socketserver\n'), ((342, 376), 'threading.Thread', 'Thread', ([], {'target': 'httpd.serve_forever'}), '(target=httpd.serve_forever)\n', (348, 376), False, 'from threading import Thread\n')] |
# coding: utf-8
class AppTestCompile:
def test_simple(self):
import sys
co = compile('1+2', '?', 'eval')
assert eval(co) == 3
co = compile(memoryview(b'1+2'), '?', 'eval')
assert eval(co) == 3
exc = raises(ValueError, compile, chr(0), '?', 'eval')
assert str(exc.value) == "source code string cannot contain null bytes"
compile("from __future__ import with_statement", "<test>", "exec")
raises(SyntaxError, compile, '-', '?', 'eval')
raises(SyntaxError, compile, '"\\xt"', '?', 'eval')
raises(ValueError, compile, '1+2', '?', 'maybenot')
raises(ValueError, compile, "\n", "<string>", "exec", 0xff)
raises(TypeError, compile, '1+2', 12, 34)
def test_error_message(self):
import re
compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
exc = raises(SyntaxError, compile,
b'# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
assert 'iso-8859-15' in str(exc.value)
assert 'BOM' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
assert 'BOM' in str(exc.value)
def test_unicode(self):
try:
compile(u'-', '?', 'eval')
except SyntaxError as e:
assert e.lineno == 1
def test_unicode_encoding(self):
code = "# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
def test_bytes(self):
code = b"# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
c = compile(b"# coding: latin1\nfoo = 'caf\xe9'\n", "<string>", "exec")
ns = {}
exec(c, ns)
assert ns['foo'] == 'café'
assert eval(b"# coding: latin1\n'caf\xe9'\n") == 'café'
def test_memoryview(self):
m = memoryview(b'2 + 1')
co = compile(m, 'baz', 'eval')
assert eval(co) == 3
assert eval(m) == 3
ns = {}
exec(memoryview(b'r = 2 + 1'), ns)
assert ns['r'] == 3
def test_recompile_ast(self):
import _ast
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
raises(TypeError, compile, co1, '<ast>', 'eval')
co2 = compile('1+1', '<string>', 'eval', _ast.PyCF_ONLY_AST)
tree = compile(co2, '<ast>', 'eval')
assert compile(co2, '<ast>', 'eval', _ast.PyCF_ONLY_AST) is co2
def test_leading_newlines(self):
src = """
def fn(): pass
"""
co = compile(src, 'mymod', 'exec')
firstlineno = co.co_firstlineno
assert firstlineno == 2
def test_null_bytes(self):
raises(ValueError, compile, '\x00', 'mymod', 'exec', 0)
src = "#abc\x00def\n"
raises(ValueError, compile, src, 'mymod', 'exec')
raises(ValueError, compile, src, 'mymod', 'exec', 0)
def test_null_bytes_flag(self):
try:
from _ast import PyCF_ACCEPT_NULL_BYTES
except ImportError:
skip('PyPy only (requires _ast.PyCF_ACCEPT_NULL_BYTES)')
raises(SyntaxError, compile, '\x00', 'mymod', 'exec',
PyCF_ACCEPT_NULL_BYTES)
src = "#abc\x00def\n"
compile(src, 'mymod', 'exec', PyCF_ACCEPT_NULL_BYTES) # works
def test_compile_regression(self):
"""Clone of the part of the original test that was failing."""
import ast
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(
compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for i, code in enumerate(codeobjs):
print(optval, debugval, docstring, i)
ns = {}
exec(code, ns)
rv = ns['f']()
assert rv == (debugval, docstring)
def test_assert_remove(self):
"""Test removal of the asserts with optimize=1."""
import ast
code = """def f():
assert False
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=1)
ns = {}
exec(compiled, ns)
ns['f']()
def test_docstring_remove(self):
"""Test removal of docstrings with optimize=2."""
import ast
import marshal
code = """
'module_doc'
def f():
'func_doc'
class C:
'class_doc'
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=2)
ns = {}
exec(compiled, ns)
assert '__doc__' not in ns
assert ns['f'].__doc__ is None
assert ns['C'].__doc__ is None
# Check that the docstrings are gone from the bytecode and not just
# inaccessible.
marshalled = str(marshal.dumps(compiled))
assert 'module_doc' not in marshalled
assert 'func_doc' not in marshalled
assert 'class_doc' not in marshalled
class TestOptimizeO:
"""Test interaction of -O flag and optimize parameter of compile."""
def setup_method(self, method):
space = self.space
self._sys_debug = space.sys.debug
# imitate -O
space.sys.debug = False
def teardown_method(self, method):
self.space.sys.debug = self._sys_debug
def test_O_optmize_0(self):
"""Test that assert is not ignored if -O flag is set but optimize=0."""
space = self.space
w_res = space.appexec([], """():
assert False # check that our -O imitation hack works
try:
exec(compile('assert False', '', 'exec', optimize=0))
except AssertionError:
return True
else:
return False
""")
assert space.unwrap(w_res)
def test_O_optimize__1(self):
"""Test that assert is ignored with -O and optimize=-1."""
space = self.space
space.appexec([], """():
exec(compile('assert False', '', 'exec', optimize=-1))
""")
# TODO: Check the value of __debug__ inside of the compiled block!
# According to the documentation, it should follow the optimize flag.
# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows
# -O, -OO flags of the interpreter).
| [
"ast.parse",
"marshal.dumps"
]
| [((4989, 5004), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (4998, 5004), False, 'import ast\n'), ((5432, 5447), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (5441, 5447), False, 'import ast\n'), ((4463, 4481), 'ast.parse', 'ast.parse', (['codestr'], {}), '(codestr)\n', (4472, 4481), False, 'import ast\n'), ((5876, 5899), 'marshal.dumps', 'marshal.dumps', (['compiled'], {}), '(compiled)\n', (5889, 5899), False, 'import marshal\n')] |
import yfinance as yf
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
from IPython.display import Markdown
import numpy as np
from datetime import date, timedelta
def plot_and_get_info(ticker, start = None, end = None, ma = 'yes'):
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date]
closing_prices = frame['Close']
volume = frame['Volume']
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.03, row_heights = [0.8, 0.2])
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'), row = 1, col = 1)
if ma == 'yes':
closing_prices_ma = frame['Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = '7D Close Moving Average'), row = 1, col = 1)
fig.add_trace(go.Bar(x = closing_prices.index, y = volume, name = 'Volume'), row=2, col=1)
fig.update_xaxes(rangeslider_visible = True, rangeslider_thickness = 0.1, row=2, col=1)
fig.update_yaxes(title_text="Price", row=1, col=1)
fig.update_layout(title=ticker, height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date"
)
)
fig.show()
start_price, end_price = frame.iloc[0]['Close'], frame.iloc[-1]['Close']
def printmd(string):
display(Markdown(string))
printmd('Given Timeframe:')
printmd("Return: {:.2f}%".format((end_price - start_price)/start_price*100))
try:
ticker_info = ticker_obj.info
print()
printmd('Business Summary: ' + ticker_info['longBusinessSummary'])
market_cap = str(round(ticker_info['marketCap']/1000000000,2)) + 'B'
longname = ticker_info['longName']
sector = ticker_info['sector']
industry = ticker_info['industry']
country = ticker_info['country']
avg10d_vol = str(round(ticker_info['averageDailyVolume10Day']/1000000,2)) + 'M'
most_recent_vol = str(round(ticker_info['volume']/1000000,2)) + 'M'
try:
beta = round(ticker_info['beta'],2)
except:
beta = ticker_info['beta']
try:
ps_trailing_12mo = round(ticker_info['priceToSalesTrailing12Months'],2)
except:
ps_trailing_12mo = ticker_info['priceToSalesTrailing12Months']
try:
forwardpe = round(ticker_info['forwardPE'],2)
except:
forwardpe = ticker_info['forwardPE']
pegratio = ticker_info['pegRatio']
forwardeps = ticker_info['forwardEps']
trailingeps = ticker_info['trailingEps']
shares_outstanding = str(round(ticker_info['sharesOutstanding']/1000000,2)) + 'M'
shares_short = str(round(ticker_info['sharesShort']/1000000,2)) + 'M'
shares_short_perc_outstanding = str(round(ticker_info['sharesPercentSharesOut']*100,2)) + '%'
floatshares = str(round(ticker_info['floatShares']/1000000,2)) + 'M'
try:
short_perc_float = str(round(ticker_info['shortPercentOfFloat']*100,2)) + '%'
except:
short_perc_float = ticker_info['shortPercentOfFloat']
perc_institutions = str(round(ticker_info['heldPercentInstitutions']*100,2)) + '%'
perc_insiders = str(round(ticker_info['heldPercentInsiders']*100,2)) + '%'
stock_info = [market_cap, longname, sector, industry, country, beta, most_recent_vol, avg10d_vol, ps_trailing_12mo, forwardpe, pegratio, forwardeps, trailingeps,
shares_outstanding, perc_institutions, perc_insiders, shares_short, shares_short_perc_outstanding, floatshares, short_perc_float]
stock_info_df = pd.DataFrame(stock_info, index = ['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta', 'Day Volume (Most recent)',
'Avg 10D Volume', 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS',
'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding',
'Insiders % of Oustanding', 'Shares Short (Prev Mo)', 'Short % of Outstanding (Prev Mo)',
'Shares Float', 'Short % of Float (Prev Mo)'], columns = ['Info'])
print()
display(stock_info_df)
except:
pass
def compare_charts(tickers = [], start = None, end = None, ma = 'yes'):
if len(tickers) <= 1:
raise Exception("Please enter at least two tickers to compare")
def normalize_data(column):
min = column.min()
max = column.max()
# time series normalization
# y will be a column in a dataframe
y = (column - min) / (max - min)
return y
def printmd(string):
display(Markdown(string))
start_end_prices = {}
closing_90_days = []
fig = go.Figure()
for ticker in tickers:
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date].copy()
frame['Norm Close'] = normalize_data(frame['Close'])
closing_prices = frame['Norm Close']
start_end_prices[ticker] = {'start_price': frame.iloc[0]['Close'], 'end_price': frame.iloc[-1]['Close']}
closing_90_days.append(closing_prices.iloc[-90:].to_frame().rename(columns = {'Norm Close': ticker}))
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = ticker + ' Norm Close'))
if ma == 'yes':
closing_prices_ma = frame['Norm Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = ticker + '7D Close Moving Average'))
fig.update_layout(title = ', '.join(tickers) + ' Comparison', yaxis_title = 'Norm Price')
fig.update_layout(height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True, thickness = 0.1
),
type="date"
)
)
fig.show()
printmd('Given Timeframe:')
for ticker in tickers:
start_price, end_price = start_end_prices[ticker]['start_price'], start_end_prices[ticker]['end_price']
printmd(ticker + " Return: {:.2f}%".format((end_price - start_price)/start_price*100))
if len(tickers) > 2:
concat_closing_90_days = pd.concat(closing_90_days, axis = 1)
print('\n')
printmd("Last 90 Days Close Pearson Correlation Matrix: ")
display(concat_closing_90_days.corr())
fig2 = px.imshow(concat_closing_90_days.corr(), color_continuous_scale = 'blues', title = 'Last 90 Days Close Pearson Correlation Heatmap',
width = 500, height = 400)
fig2.show()
else:
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x = closing_90_days[0].loc[:, tickers[0]], y = closing_90_days[1].loc[:, tickers[1]], mode = 'markers', name = 'Norm Close'))
fig2.update_layout(title = ', '.join(tickers) + ' Last 90 Days Correlation', xaxis_title = tickers[0], yaxis_title = tickers[1], width = 1000, height = 500)
fig2.show()
printmd("Pearson Correlation: " + str(round(closing_90_days[0].loc[:, tickers[0]].corr(closing_90_days[1].loc[:, tickers[1]]),3)))
print()
def plot_buysell_points(ticker, tradesdf, crypto = 'no'):
trade_history = tradesdf[tradesdf['Symbol'] == ticker].reset_index(drop=True)
if crypto == 'yes':
ticker += '-USD'
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if len(ticker_hist) == 0:
return
start_date = (pd.to_datetime(trade_history.loc[0, 'Date']) - timedelta(150)).strftime("%Y-%m-%d")
today_date = date.today().strftime("%Y-%m-%d")
frame = ticker_hist.loc[start_date:today_date]
closing_prices = frame['Close']
fig = go.Figure()
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'))
for i in range(len(trade_history)):
trade_date = trade_history.loc[i, 'Date']
price = trade_history.loc[i, 'Avg_Price']
quantity = trade_history.loc[i, 'Quantity']
total = trade_history.loc[i, 'Total']
side = trade_history.loc[i, 'Side']
gain = trade_history.loc[i, 'Gain']
perc_gain = trade_history.loc[i, '% Gain']
if side == 'buy':
fig.add_annotation(x = trade_date, y = price, text = f'BB', showarrow = True, arrowhead = 1,
ax = -0.5, ay = -30, arrowsize = 1.5, align = 'left',
hovertext = f'B, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}')
if side == 'sell':
fig.add_annotation(x = trade_date, y = price, text = f'SS', showarrow = True, arrowhead = 1,
ax = 20, ay = -30, arrowsize = 1.5, align = 'right',
hovertext = f'S, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}, G: {gain}, %G: {perc_gain}')
fig.update_layout(title = ticker, yaxis_title = 'Price')
fig.show()
| [
"plotly.graph_objects.Bar",
"plotly.subplots.make_subplots",
"IPython.display.Markdown",
"datetime.timedelta",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Scatter",
"pandas.DataFrame",
"datetime.date.today",
"yfinance.Ticker",
"pandas.concat",
"pandas.to_datetime"
]
| [((361, 378), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (370, 378), True, 'import yfinance as yf\n'), ((711, 810), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.03)', 'row_heights': '[0.8, 0.2]'}), '(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.03,\n row_heights=[0.8, 0.2])\n', (724, 810), False, 'from plotly.subplots import make_subplots\n'), ((6513, 6524), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (6522, 6524), True, 'import plotly.graph_objects as go\n'), ((10582, 10599), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (10591, 10599), True, 'import yfinance as yf\n'), ((10953, 10964), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (10962, 10964), True, 'import plotly.graph_objects as go\n'), ((869, 954), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'closing_prices.index', 'y': 'closing_prices', 'mode': '"""lines"""', 'name': '"""Close"""'}), "(x=closing_prices.index, y=closing_prices, mode='lines', name='Close'\n )\n", (879, 954), True, 'import plotly.graph_objects as go\n'), ((1233, 1288), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'closing_prices.index', 'y': 'volume', 'name': '"""Volume"""'}), "(x=closing_prices.index, y=volume, name='Volume')\n", (1239, 1288), True, 'import plotly.graph_objects as go\n'), ((5237, 5690), 'pandas.DataFrame', 'pd.DataFrame', (['stock_info'], {'index': "['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta',\n 'Day Volume (Most recent)', 'Avg 10D Volume', 'P/S Trailing 12mo',\n 'Forward P/E', 'PEG Ratio', 'Forward EPS', 'Trailing EPS',\n 'Shares Outstanding', 'Institutions % of Oustanding',\n 'Insiders % of Oustanding', 'Shares Short (Prev Mo)',\n 'Short % of Outstanding (Prev Mo)', 'Shares Float',\n 'Short % of Float (Prev Mo)']", 'columns': "['Info']"}), "(stock_info, index=['Market Cap', 'Name', 'Sector', 'Industry',\n 'Country', 'Beta', 'Day Volume (Most recent)', 'Avg 10D Volume',\n 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS',\n 'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding',\n 'Insiders % of Oustanding', 'Shares Short (Prev Mo)',\n 'Short % of Outstanding (Prev Mo)', 'Shares Float',\n 'Short % of Float (Prev Mo)'], columns=['Info'])\n", (5249, 5690), True, 'import pandas as pd\n'), ((6576, 6593), 'yfinance.Ticker', 'yf.Ticker', (['ticker'], {}), '(ticker)\n', (6585, 6593), True, 'import yfinance as yf\n'), ((9430, 9464), 'pandas.concat', 'pd.concat', (['closing_90_days'], {'axis': '(1)'}), '(closing_90_days, axis=1)\n', (9439, 9464), True, 'import pandas as pd\n'), ((9854, 9865), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (9863, 9865), True, 'import plotly.graph_objects as go\n'), ((10984, 11069), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'closing_prices.index', 'y': 'closing_prices', 'mode': '"""lines"""', 'name': '"""Close"""'}), "(x=closing_prices.index, y=closing_prices, mode='lines', name='Close'\n )\n", (10994, 11069), True, 'import plotly.graph_objects as go\n'), ((1082, 1190), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'closing_prices_ma.index', 'y': 'closing_prices_ma', 'mode': '"""lines"""', 'name': '"""7D Close Moving Average"""'}), "(x=closing_prices_ma.index, y=closing_prices_ma, mode='lines',\n name='7D Close Moving Average')\n", (1092, 1190), True, 'import plotly.graph_objects as go\n'), ((2912, 2928), 'IPython.display.Markdown', 'Markdown', (['string'], {}), '(string)\n', (2920, 2928), False, 'from IPython.display import Markdown\n'), ((6430, 6446), 'IPython.display.Markdown', 'Markdown', (['string'], {}), '(string)\n', (6438, 6446), False, 'from IPython.display import Markdown\n'), ((7233, 7333), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'closing_prices.index', 'y': 'closing_prices', 'mode': '"""lines"""', 'name': "(ticker + ' Norm Close')"}), "(x=closing_prices.index, y=closing_prices, mode='lines', name=\n ticker + ' Norm Close')\n", (7243, 7333), True, 'import plotly.graph_objects as go\n'), ((9890, 10022), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'closing_90_days[0].loc[:, tickers[0]]', 'y': 'closing_90_days[1].loc[:, tickers[1]]', 'mode': '"""markers"""', 'name': '"""Norm Close"""'}), "(x=closing_90_days[0].loc[:, tickers[0]], y=closing_90_days[1].\n loc[:, tickers[1]], mode='markers', name='Norm Close')\n", (9900, 10022), True, 'import plotly.graph_objects as go\n'), ((10819, 10831), 'datetime.date.today', 'date.today', ([], {}), '()\n', (10829, 10831), False, 'from datetime import date, timedelta\n'), ((7459, 7576), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'closing_prices_ma.index', 'y': 'closing_prices_ma', 'mode': '"""lines"""', 'name': "(ticker + '7D Close Moving Average')"}), "(x=closing_prices_ma.index, y=closing_prices_ma, mode='lines',\n name=ticker + '7D Close Moving Average')\n", (7469, 7576), True, 'import plotly.graph_objects as go\n'), ((10718, 10762), 'pandas.to_datetime', 'pd.to_datetime', (["trade_history.loc[0, 'Date']"], {}), "(trade_history.loc[0, 'Date'])\n", (10732, 10762), True, 'import pandas as pd\n'), ((10765, 10779), 'datetime.timedelta', 'timedelta', (['(150)'], {}), '(150)\n', (10774, 10779), False, 'from datetime import date, timedelta\n')] |
#!/usr/bin/env python
"""Tests for `bids_statsmodels_design_synthesizer` package."""
import pytest
import subprocess as sp
from pathlib import Path
SYNTHESIZER = "aggregate_stats_design.py"
from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod
# from bids_statsmodels_design_synthesizer import Path(SYNTHESIZER).stem as synth_mod
EXAMPLE_USER_ARGS = {
"OUTPUT_TSV": "aggregated_design.tsv",
"MODEL": "data/ds000003/models/model-001_smdl.json",
"EVENTS_TSV": "data/ds000003/sub-01/func/sub-01_task-rhymejudgment_events.tsv",
"DURATION": 320,
}
def test_cli_help():
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "-h"])
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "--non-existent"])
def test_design_aggregation_function():
synth_mod.main(EXAMPLE_USER_ARGS)
def test_minimal_cli_functionality():
"""
We roughly want to implement the equivalent of the following:
from bids.analysis import Analysis
from bids.layout import BIDSLayout
layout = BIDSLayout("data/ds000003")
analysis = Analysis(model="data/ds000003/models/model-001_smdl.json",layout=layout)
analysis.setup()
more specifically we want to reimplement this line
https://github.com/bids-standard/pybids/blob/b6cd0f6787230ce976a374fbd5fce650865752a3/bids/analysis/analysis.py#L282
"""
bids_dir = Path(__file__).parent / "data/ds000003"
model = "model-001_smdl.json"
arg_list = " " .join([f"""--{k.lower().replace("_","-")}={v}""" for k,v in EXAMPLE_USER_ARGS.items()])
cmd = f"{SYNTHESIZER} {arg_list}"
output = sp.check_output(cmd.split())
@pytest.mark.xfail(reason="Container not setup for boutiques yet")
def test_minimal_cli_functionality_using_boutiques():
"""This might be nice to do. boutiques sets /bin/sh as the entrypoint for the contain to /bin/sh so this should be tweaked to have the conda env and the pip installed package working correctly"""
boutiques_dir = Path(__file__).parent.parent / "boutiques"
cmd = f"""
bosh
exec
launch
{boutiques_dir}/bids-app-bids-statsmodels-design-synthesizer.json
{boutiques_dir}/invocation.json
"""
output = sp.check_output(cmd.split())
| [
"subprocess.check_output",
"pathlib.Path",
"pytest.mark.xfail",
"pytest.raises",
"bids_statsmodels_design_synthesizer.aggregate_stats_design.main"
]
| [((1737, 1802), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Container not setup for boutiques yet"""'}), "(reason='Container not setup for boutiques yet')\n", (1754, 1802), False, 'import pytest\n'), ((896, 929), 'bids_statsmodels_design_synthesizer.aggregate_stats_design.main', 'synth_mod.main', (['EXAMPLE_USER_ARGS'], {}), '(EXAMPLE_USER_ARGS)\n', (910, 929), True, 'from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod\n'), ((645, 681), 'pytest.raises', 'pytest.raises', (['sp.CalledProcessError'], {}), '(sp.CalledProcessError)\n', (658, 681), False, 'import pytest\n'), ((700, 736), 'subprocess.check_output', 'sp.check_output', (["[SYNTHESIZER, '-h']"], {}), "([SYNTHESIZER, '-h'])\n", (715, 736), True, 'import subprocess as sp\n'), ((746, 782), 'pytest.raises', 'pytest.raises', (['sp.CalledProcessError'], {}), '(sp.CalledProcessError)\n', (759, 782), False, 'import pytest\n'), ((801, 849), 'subprocess.check_output', 'sp.check_output', (["[SYNTHESIZER, '--non-existent']"], {}), "([SYNTHESIZER, '--non-existent'])\n", (816, 849), True, 'import subprocess as sp\n'), ((1473, 1487), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1477, 1487), False, 'from pathlib import Path\n'), ((2077, 2091), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2081, 2091), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: <EMAIL>
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: plugin_api.py
@time: 2015-11-28 下午1:52
"""
from linux import cpu,disk,iostats,loadavg,memory,netstats,swap
def get_load_info():
return loadavg.monitor()
def get_cpu_status():
return cpu.monitor()
def get_memory_info():
return memory.monitor()
def get_swap_info():
return swap.monitor()
def get_disk_info():
return disk.monitor()
def get_network_info():
return netstats.monitor()
def get_iostats_info():
return iostats.monitor()
| [
"linux.cpu.monitor",
"linux.iostats.monitor",
"linux.swap.monitor",
"linux.netstats.monitor",
"linux.memory.monitor",
"linux.loadavg.monitor",
"linux.disk.monitor"
]
| [((355, 372), 'linux.loadavg.monitor', 'loadavg.monitor', ([], {}), '()\n', (370, 372), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((407, 420), 'linux.cpu.monitor', 'cpu.monitor', ([], {}), '()\n', (418, 420), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((456, 472), 'linux.memory.monitor', 'memory.monitor', ([], {}), '()\n', (470, 472), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((506, 520), 'linux.swap.monitor', 'swap.monitor', ([], {}), '()\n', (518, 520), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((554, 568), 'linux.disk.monitor', 'disk.monitor', ([], {}), '()\n', (566, 568), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((605, 623), 'linux.netstats.monitor', 'netstats.monitor', ([], {}), '()\n', (621, 623), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n'), ((660, 677), 'linux.iostats.monitor', 'iostats.monitor', ([], {}), '()\n', (675, 677), False, 'from linux import cpu, disk, iostats, loadavg, memory, netstats, swap\n')] |
import numpy as np
import eyekit
import algorithms
import core
data = eyekit.io.load(core.FIXATIONS / 'sample.json')
passages = eyekit.io.load(core.DATA / 'passages.json')
original_sequence = data['trial_5']['fixations']
fixation_XY = np.array([fixation.xy for fixation in original_sequence], dtype=int)
word_XY = np.array([word.center for word in passages['1B'].words(alphabetical_only=False)], dtype=int)
start_times = np.array([i*100 for i in range(len(word_XY))], dtype=int)
expected_sequence = eyekit.FixationSequence(np.column_stack([word_XY, start_times, start_times+100]))
diagram = eyekit.vis.Image(1920, 1080)
diagram.draw_text_block(passages['1B'], mask_text=True)
diagram.draw_fixation_sequence(expected_sequence, color='#E32823', fixation_radius=6)
diagram.draw_fixation_sequence(original_sequence, color='#205E84', fixation_radius=6)
_, warping_path = algorithms.dynamic_time_warping(fixation_XY, word_XY)
for fixation, mapped_words in zip(original_sequence, warping_path):
for word_i in mapped_words:
word_x, word_y = word_XY[word_i]
diagram.draw_line(fixation.xy, (word_x, word_y), color='black', stroke_width=0.5, dashed=True)
fig = eyekit.vis.Figure()
fig.add_image(diagram)
fig.set_crop_margin(2)
fig.set_padding(vertical=2, horizontal=3, edge=1)
fig.set_enumeration(False)
fig.save(core.VISUALS / 'illustration_warp.pdf', width=83)
# fig.save(core.FIGS / 'fig02_single_column.eps', width=83)
| [
"eyekit.vis.Image",
"eyekit.vis.Figure",
"numpy.column_stack",
"algorithms.dynamic_time_warping",
"eyekit.io.load",
"numpy.array"
]
| [((71, 117), 'eyekit.io.load', 'eyekit.io.load', (["(core.FIXATIONS / 'sample.json')"], {}), "(core.FIXATIONS / 'sample.json')\n", (85, 117), False, 'import eyekit\n'), ((129, 172), 'eyekit.io.load', 'eyekit.io.load', (["(core.DATA / 'passages.json')"], {}), "(core.DATA / 'passages.json')\n", (143, 172), False, 'import eyekit\n'), ((238, 306), 'numpy.array', 'np.array', (['[fixation.xy for fixation in original_sequence]'], {'dtype': 'int'}), '([fixation.xy for fixation in original_sequence], dtype=int)\n', (246, 306), True, 'import numpy as np\n'), ((596, 624), 'eyekit.vis.Image', 'eyekit.vis.Image', (['(1920)', '(1080)'], {}), '(1920, 1080)\n', (612, 624), False, 'import eyekit\n'), ((872, 925), 'algorithms.dynamic_time_warping', 'algorithms.dynamic_time_warping', (['fixation_XY', 'word_XY'], {}), '(fixation_XY, word_XY)\n', (903, 925), False, 'import algorithms\n'), ((1163, 1182), 'eyekit.vis.Figure', 'eyekit.vis.Figure', ([], {}), '()\n', (1180, 1182), False, 'import eyekit\n'), ((527, 585), 'numpy.column_stack', 'np.column_stack', (['[word_XY, start_times, start_times + 100]'], {}), '([word_XY, start_times, start_times + 100])\n', (542, 585), True, 'import numpy as np\n')] |
#!/usr/bin/python3
'''
This script follows formulas put forth in Kislyuk et al. (2011) to calculate genome
fluidity of a pangenome dataset. Variance and standard error are estimated as total
variance containing both the variance due to subsampling all possible combinations
(without replacement) of N genomes from the total pool of genomes and the variance
due to the limited number of sampled genomes (variance of the pangenome)(Kislyuk et al. 2011).
However, the script has a default max number of subsamples set to 250,000 for each N genomes.
This can be altered with the -max_sub / --max_subsamples flag or turned off with the --max_off flag.
Turning the max_off will force calculations to be done on all possible subsample combinations
of N genomes. For samples of N genomes that were stopped at the max number of subsamples the subsamples
are sampled WITH replacement and variance is calculated with a degree of freedom = 1 (i.e. n - 1).
Results are a text file of fluidity, variance, and standard error for all N genome samples
and a figure of pangenome fluidity with shaded regions showing total standard error with a
exponential regression fit.
Notes
1. This will only work if you have at least 5 isolates to make up your pangenome.
2. If you have 5 isolates your graph will probably not look pretty as it's difficult
to fit with such a low number of samples.
'''
import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from itertools import combinations
from collections import OrderedDict
from collections.abc import Iterable
from scipy.optimize import curve_fit, differential_evolution
rundir = os.getcwd()
class MyFormatter(argparse.RawTextHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(
usage='./%(prog)s [options] -i orthogroups -o output_folder',
description = ''' Performs multiple bootstraps and calculates genome fluidity
from a pangenome dataset (orthogroups).''',
epilog = """Written by <NAME> (2019)""",
formatter_class = MyFormatter)
parser.add_argument(
'-i',
'--input',
required = True,
help = 'Orthogroups file, see format in READ.me',
metavar=''
)
parser.add_argument(
'-o',
'--out',
required = True,
help = 'Output folder',
metavar=''
)
parser.add_argument(
'-c',
'--cpus',
type=int,
default=1,
help = 'Number of cores to use for multiprocessing [default: 1]',
metavar=''
)
parser.add_argument(
'-max_sub',
'--max_subsamples',
type=int,
default=250000,
help = 'Max number of subsamples to run on N genomes sampled. [default: 250000]',
metavar=''
)
parser.add_argument(
'--max_off',
action='store_true',
help = 'Turn off the max subsamples. This will cause the script sample ALL possible combinations'\
'for N genomes',
)
parser.add_argument(
'-p',
'--prefix',
help = 'Prefix to append to the result files (such as Genus, species, etc.)',
metavar=''
)
args=parser.parse_args()
if not os.path.isdir(args.out):
os.makedirs(os.path.join(args.out))
result_dir = os.path.abspath(os.path.join(rundir, args.out))
if args.input:
input_file = os.path.abspath(args.input)
else:
print('ERROR: No orthogroups file was provided please provide on, -i or --input')
sys.exit()
if args.prefix:
fluid_results = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.png'))
else:
fluid_results = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.png'))
def create_ortho_dictionary(ortho_file): # create dictionary of gene clusters and isolates per cluster
'''Genereate dictionary of Orthogroups.'''
print('Creating ortholog dictionary')
ortho_isolates_dict = OrderedDict() # {Protein Cluster : list of isolates represented in cluster}
with open(ortho_file, 'r') as infile:
ortho_list = [item.strip() for item in sorted(infile)]
for line in ortho_list:
iso_list = []
if ':' in line:
cluster, genes = line.split(':')
elif '\t' in line:
cluster, genes = line.split('\t', 1)
else:
cluster, genes = line.split(' ', 1)
for match in re.finditer(r'([^\s]+)', genes):
isolate = match.group(0).split('_')[0]
iso_list.append(isolate)
ortho_isolates_dict[cluster] = list(set(iso_list))
return ortho_isolates_dict
def create_pair_dictionary(ortho_dictionary):
'''Create all possible unique pairs of isolates and get their unique
sum gene clusters.'''
print('Creating dictionary of paired ratio values')
pair_dict = {} # {(Isolate1, Isolate2) : [ratio of sum(unique clusters)/sum(all clusters)]}
for i in range(0, len(iso_list)):
for x in range(0, len(iso_list)):
if not iso_list[i] == iso_list[x]:
pair = tuple(sorted([iso_list[i], iso_list[x]]))
if not pair in pair_dict.keys():
cogs = {'Shared' : 0, 'Uk' : 0, 'Ul' : 0}
for k,v in ortho_dictionary.items():
if pair[0] in v and pair[1] in v:
cogs['Shared'] += 1
elif pair[0] in v and pair[1] not in v:
cogs['Uk'] += 1
elif pair[0] not in v and pair[1] in v:
cogs['Ul'] += 1
else:
pass # don't need to count a cluster if both isolates are not present
unique_pair = cogs['Uk'] + cogs['Ul']
all_pair = (cogs['Uk'] + cogs['Shared']) + (cogs['Ul'] + cogs['Shared'])
pair_dict[pair] = unique_pair/all_pair
return pair_dict
def compute_fluidity_all_genomes():
'''
Computes the fluidity and variance for the pangenome in question from the max number
of genomes in the pangenome.
'''
N = iso_num
fluidity_list = [ratio for ratio in pair_dict.values()] # list of ratios
pangenome_fluidity = (2/(N*(N-1)))*sum(fluidity_list) # get fluidity from average of all ratios
jack_samples = list(combinations(iso_list, N - 1)) # get list of all combos of N-1 from max num of genomes
fluidity_i_list = []
for sample in jack_samples:
jack_pairs = tuple(combinations(sample,2)) # get all pairs from current jackknife sample
jack_sample_fluidity = [pair_dict[tuple(sorted(p))] for p in jack_pairs] # get ratios from pair_dict
fluidity_i = (2/((N-1)*(N-2)))*sum(jack_sample_fluidity) # calculate fluidity_i
fluidity_i_list.append(fluidity_i)
fluidity_i_mean = np.mean(fluidity_i_list) # calculate fluidity_i_mean from all fluidity_i's
fluidity_variance = ((N-1)/N)*sum([(i-fluidity_i_mean)**2 for i in fluidity_i_list]) # calculate variance
return pangenome_fluidity, fluidity_variance
def subsample_multiprocess(combo_list):
'''
Takes portions of the full combo_list and runs them on separate threads for faster processing.
Calcualtes fluidity for each sample and returns list of fluidities.
'''
N = len(combo_list[0]) # get N from number of genomes present
sample_process_list = []
for sample in combo_list:
pairs = tuple(combinations(sample,2))
pair_fluidity_list = [pair_dict[tuple(sorted(p))] for p in pairs]
sample_fluidity = (2/(N*(N-1)))*sum(pair_fluidity_list)
sample_process_list.append(sample_fluidity)
return sample_process_list
def genome_subsamples_fluidities(perm_list):
'''
Compute fluidities from all possible combinations of genomes from 3 to N randomly sampled genomes
(N is the max number of gneomes in sample, so only sampled once). Has a cut off of max subsamples
at which point variances are calcualted as sample variances (n-1) instead of full population
variances.
'''
sub_fluid_dict = {} # {N genomes sampled : [list of fluidities from subsamples]}
for N in range(3, iso_num + 1):
sub_fluid_dict[N] = []
N_combos = list(combinations(iso_list, N))
if args.max_off:
combos = N_combos
else:
if len(N_combos) > args.max_subsamples:
combos = random.choices(N_combos, k=args.max_subsamples)
perm_list.append(N)
else:
combos = N_combos
print('Performing fluidity calculations on {} subsample combinations of {} genomes'.format(len(combos),N))
if not len(N_combos) == 1:
chunk = round(len(combos)/args.cpus)
split_combos = [combos[i:i + chunk] for i in range(0, len(combos), chunk)]
pool = Pool(processes=args.cpus)
results = pool.imap(subsample_multiprocess, split_combos)
pool.close()
pool.join()
sub_fluid_dict[N].append(results)
else:
last_run = subsample_multiprocess(N_combos)
sub_fluid_dict[N].append(last_run)
sub_fluid_dict[N]=list(flatten(sub_fluid_dict[N]))
print(len(sub_fluid_dict[N]))
return sub_fluid_dict
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def exponential(x, a, b, c):
return a * np.exp(b * x) + c
def neg_exponential(x, a, b, c):
return a * np.exp(-b * x) + c
def sumOfSquaredError(parameterTuple, x_values, y_curve_values, func):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(x_values, *parameterTuple)
return np.sum((y_curve_values - val) ** 2.0)
def generate_Initial_Parameters(x_values, y_curve_values, func):
# min and max used for bounds
maxX = max(x_values)
minX = min(x_values)
maxY = max(y_curve_values)
minY = min(y_curve_values)
maxXY = max(maxX, maxY)
parameterBounds = []
parameterBounds.append([-maxXY, maxXY]) # seach bounds for a
parameterBounds.append([-maxXY, maxXY]) # seach bounds for b
parameterBounds.append([-maxXY, maxXY]) # seach bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, args=(x_values,y_curve_values, func), seed=3)
return result.x
def create_fluidity_results(figure_output, results_output):
total_variance = []
for i in range(3, iso_num + 1):
if i in permutation_list:
total_variance.append(np.var(sub_fluid_dict[i], ddof = 1) + pan_variance)
else:
total_variance.append(np.var(sub_fluid_dict[i]) + pan_variance)
total_variance = np.array(total_variance)
total_stderr = np.array([x**(1/2) for x in total_variance])
y_fluidity_values = np.array([pan_fluidity for i in range(3, iso_num + 1)])
x_labels = np.array([i for i in range(3, iso_num + 1)])
stderr_bottom = np.array([(pan_fluidity - v) for v in total_stderr])
stderr_top = np.array([(pan_fluidity + v) for v in total_stderr])
fig, ax = plt.subplots()
try: # Still had problems sometimes with fitting curves, this solution works best for now
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, exponential)
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, exponential)
popt_t, pcov = curve_fit(exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
popt_b, pcov = curve_fit(exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
if len(set(exponential(x_labels, *popt_t))) > 3 and len(set(exponential(x_labels, *popt_b))) > 3:
plt.fill_between(x_labels, exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
if len(set(exponential(x_labels, *popt_t))) <= 3:
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, neg_exponential)
popt_t, pcov = curve_fit(neg_exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
plt.fill_between(x_labels, neg_exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = neg_exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
else:
pass
if len(set(exponential(x_labels, *popt_b))) <= 3:
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, neg_exponential)
popt_b, pcov = curve_fit(neg_exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
plt.fill_between(x_labels, exponential(x_labels, *popt_t), neg_exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = neg_exponential(x_labels, *popt_b)
else:
pass
except:
pass
ax.set_axisbelow(True)
plt.minorticks_on()
plt.grid(which='minor', axis='y', color='white', linestyle='--', alpha=0.3)
ax.yaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white')
ax.xaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white', alpha=0.5)
ax.tick_params(axis='x', which='minor', bottom=False)
ax.set_facecolor('gainsboro')
plt.plot(x_labels, y_fluidity_values, ls='--', lw=1, color='black') # plot y-values of fluidity
plt.xticks(np.arange(x_labels[0], x_labels[len(x_labels)-1]+1, 1.0)) # make sure x interval is 1
plt.xlim(x_labels[0], x_labels[len(x_labels)-1]) # adjust x limit so it starts with 3 at 0
max_y = max(stderr_top)
min_y = min(stderr_bottom)
plt.ylim((min_y - min_y*0.15), (max_y + max_y*0.15))
plt.xlabel('Number of genomes sampled')
plt.ylabel('Fluidity, '+u'\u03C6')
plt.tight_layout()
plt.savefig(figure_output)
with open(results_output, 'w') as results: # print out fluidity results
results.write('Genomes_Sampled\tFluidity\tTotal_Variance\tTotal_Stderr\tExponential_top\tExponential_bottom\n')
r_out = []
for i in range(0, iso_num-2):
r_out.append([str(i+3), str(pan_fluidity), str(total_variance[i]), str(total_stderr[i]),
str(top_curve[i]), str(bottom_curve[i])])
for line in r_out:
results.write('\t'.join(line) + '\n')
if __name__ == "__main__":
ortho_dict = create_ortho_dictionary(input_file)
iso_num = max([len(v) for v in ortho_dict.values()])
iso_list = list(set(itertools.chain.from_iterable([v for v in ortho_dict.values() if len(v) == iso_num])))
pair_dict = create_pair_dictionary(ortho_dict)
pan_results = compute_fluidity_all_genomes()
pan_fluidity = pan_results[0]
pan_variance = pan_results[1]
permutation_list = []
sub_fluid_dict = genome_subsamples_fluidities(permutation_list)
create_fluidity_results(fluid_fig, fluid_results)
| [
"matplotlib.pyplot.grid",
"scipy.optimize.differential_evolution",
"matplotlib.pyplot.ylabel",
"numpy.array",
"random.choices",
"sys.exit",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.minorticks_on",
"numpy.exp",
"os.path.isdir",
"re.finditer",
"matplotlib.pyplot.ylim",
"collections.OrderedDict",
"matplotlib.pyplot.savefig",
"warnings.filterwarnings",
"scipy.optimize.curve_fit",
"os.path.join",
"os.getcwd",
"itertools.combinations",
"numpy.sum",
"multiprocessing.Pool",
"matplotlib.pyplot.tight_layout",
"os.path.abspath",
"matplotlib.pyplot.subplots",
"numpy.var"
]
| [((1749, 1760), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1758, 1760), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((1921, 2213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""./%(prog)s [options] -i orthogroups -o output_folder"""', 'description': '""" Performs multiple bootstraps and calculates genome fluidity \n from a pangenome dataset (orthogroups)."""', 'epilog': '"""Written by <NAME> (2019)"""', 'formatter_class': 'MyFormatter'}), '(usage=\n \'./%(prog)s [options] -i orthogroups -o output_folder\', description=\n """ Performs multiple bootstraps and calculates genome fluidity \n from a pangenome dataset (orthogroups)."""\n , epilog=\'Written by <NAME> (2019)\', formatter_class=MyFormatter)\n', (1944, 2213), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3207, 3230), 'os.path.isdir', 'os.path.isdir', (['args.out'], {}), '(args.out)\n', (3220, 3230), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3301, 3331), 'os.path.join', 'os.path.join', (['rundir', 'args.out'], {}), '(rundir, args.out)\n', (3313, 3331), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3366, 3393), 'os.path.abspath', 'os.path.abspath', (['args.input'], {}), '(args.input)\n', (3381, 3393), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3490, 3500), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3498, 3500), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((4093, 4106), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4104, 4106), False, 'from collections import OrderedDict\n'), ((7049, 7073), 'numpy.mean', 'np.mean', (['fluidity_i_list'], {}), '(fluidity_i_list)\n', (7056, 7073), True, 'import numpy as np\n'), ((9904, 9937), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (9927, 9937), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((10036, 10073), 'numpy.sum', 'np.sum', (['((y_curve_values - val) ** 2.0)'], {}), '((y_curve_values - val) ** 2.0)\n', (10042, 10073), True, 'import numpy as np\n'), ((10618, 10727), 'scipy.optimize.differential_evolution', 'differential_evolution', (['sumOfSquaredError', 'parameterBounds'], {'args': '(x_values, y_curve_values, func)', 'seed': '(3)'}), '(sumOfSquaredError, parameterBounds, args=(x_values,\n y_curve_values, func), seed=3)\n', (10640, 10727), False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((11095, 11119), 'numpy.array', 'np.array', (['total_variance'], {}), '(total_variance)\n', (11103, 11119), True, 'import numpy as np\n'), ((11139, 11189), 'numpy.array', 'np.array', (['[(x ** (1 / 2)) for x in total_variance]'], {}), '([(x ** (1 / 2)) for x in total_variance])\n', (11147, 11189), True, 'import numpy as np\n'), ((11344, 11396), 'numpy.array', 'np.array', (['[(pan_fluidity - v) for v in total_stderr]'], {}), '([(pan_fluidity - v) for v in total_stderr])\n', (11352, 11396), True, 'import numpy as np\n'), ((11414, 11466), 'numpy.array', 'np.array', (['[(pan_fluidity + v) for v in total_stderr]'], {}), '([(pan_fluidity + v) for v in total_stderr])\n', (11422, 11466), True, 'import numpy as np\n'), ((11481, 11495), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11493, 11495), True, 'import matplotlib.pyplot as plt\n'), ((13537, 13556), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (13554, 13556), True, 'import matplotlib.pyplot as plt\n'), ((13561, 13636), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'axis': '"""y"""', 'color': '"""white"""', 'linestyle': '"""--"""', 'alpha': '(0.3)'}), "(which='minor', axis='y', color='white', linestyle='--', alpha=0.3)\n", (13569, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13912, 13979), 'matplotlib.pyplot.plot', 'plt.plot', (['x_labels', 'y_fluidity_values'], {'ls': '"""--"""', 'lw': '(1)', 'color': '"""black"""'}), "(x_labels, y_fluidity_values, ls='--', lw=1, color='black')\n", (13920, 13979), True, 'import matplotlib.pyplot as plt\n'), ((14267, 14319), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(min_y - min_y * 0.15)', '(max_y + max_y * 0.15)'], {}), '(min_y - min_y * 0.15, max_y + max_y * 0.15)\n', (14275, 14319), True, 'import matplotlib.pyplot as plt\n'), ((14324, 14363), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of genomes sampled"""'], {}), "('Number of genomes sampled')\n", (14334, 14363), True, 'import matplotlib.pyplot as plt\n'), ((14368, 14399), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Fluidity, ' + u'φ')"], {}), "('Fluidity, ' + u'φ')\n", (14378, 14399), True, 'import matplotlib.pyplot as plt\n'), ((14407, 14425), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14423, 14425), True, 'import matplotlib.pyplot as plt\n'), ((14430, 14456), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_output'], {}), '(figure_output)\n', (14441, 14456), True, 'import matplotlib.pyplot as plt\n'), ((3248, 3270), 'os.path.join', 'os.path.join', (['args.out'], {}), '(args.out)\n', (3260, 3270), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3554, 3609), 'os.path.join', 'os.path.join', (['result_dir', "(args.prefix + '_fluidity.txt')"], {}), "(result_dir, args.prefix + '_fluidity.txt')\n", (3566, 3609), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3641, 3696), 'os.path.join', 'os.path.join', (['result_dir', "(args.prefix + '_fluidity.png')"], {}), "(result_dir, args.prefix + '_fluidity.png')\n", (3653, 3696), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3738, 3788), 'os.path.join', 'os.path.join', (['result_dir', '"""Pangenome_fluidity.txt"""'], {}), "(result_dir, 'Pangenome_fluidity.txt')\n", (3750, 3788), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((3822, 3872), 'os.path.join', 'os.path.join', (['result_dir', '"""Pangenome_fluidity.png"""'], {}), "(result_dir, 'Pangenome_fluidity.png')\n", (3834, 3872), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((6545, 6574), 'itertools.combinations', 'combinations', (['iso_list', '(N - 1)'], {}), '(iso_list, N - 1)\n', (6557, 6574), False, 'from itertools import combinations\n'), ((11809, 11895), 'scipy.optimize.curve_fit', 'curve_fit', (['exponential', 'x_labels', 'stderr_top', 'geneticParameters_top'], {'maxfev': '(10000)'}), '(exponential, x_labels, stderr_top, geneticParameters_top, maxfev=\n 10000)\n', (11818, 11895), False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((11914, 12005), 'scipy.optimize.curve_fit', 'curve_fit', (['exponential', 'x_labels', 'stderr_bottom', 'geneticParameters_bottom'], {'maxfev': '(10000)'}), '(exponential, x_labels, stderr_bottom, geneticParameters_bottom,\n maxfev=10000)\n', (11923, 12005), False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((4588, 4619), 're.finditer', 're.finditer', (['"""([^\\\\s]+)"""', 'genes'], {}), "('([^\\\\s]+)', genes)\n", (4599, 4619), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((6716, 6739), 'itertools.combinations', 'combinations', (['sample', '(2)'], {}), '(sample, 2)\n', (6728, 6739), False, 'from itertools import combinations\n'), ((7659, 7682), 'itertools.combinations', 'combinations', (['sample', '(2)'], {}), '(sample, 2)\n', (7671, 7682), False, 'from itertools import combinations\n'), ((8461, 8486), 'itertools.combinations', 'combinations', (['iso_list', 'N'], {}), '(iso_list, N)\n', (8473, 8486), False, 'from itertools import combinations\n'), ((9075, 9100), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'args.cpus'}), '(processes=args.cpus)\n', (9079, 9100), False, 'from multiprocessing import Pool\n'), ((9742, 9755), 'numpy.exp', 'np.exp', (['(b * x)'], {}), '(b * x)\n', (9748, 9755), True, 'import numpy as np\n'), ((9809, 9823), 'numpy.exp', 'np.exp', (['(-b * x)'], {}), '(-b * x)\n', (9815, 9823), True, 'import numpy as np\n'), ((12541, 12630), 'scipy.optimize.curve_fit', 'curve_fit', (['neg_exponential', 'x_labels', 'stderr_top', 'geneticParameters_top'], {'maxfev': '(10000)'}), '(neg_exponential, x_labels, stderr_top, geneticParameters_top,\n maxfev=10000)\n', (12550, 12630), False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((13105, 13200), 'scipy.optimize.curve_fit', 'curve_fit', (['neg_exponential', 'x_labels', 'stderr_bottom', 'geneticParameters_bottom'], {'maxfev': '(10000)'}), '(neg_exponential, x_labels, stderr_bottom,\n geneticParameters_bottom, maxfev=10000)\n', (13114, 13200), False, 'from scipy.optimize import curve_fit, differential_evolution\n'), ((8634, 8681), 'random.choices', 'random.choices', (['N_combos'], {'k': 'args.max_subsamples'}), '(N_combos, k=args.max_subsamples)\n', (8648, 8681), False, 'import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess\n'), ((10932, 10965), 'numpy.var', 'np.var', (['sub_fluid_dict[i]'], {'ddof': '(1)'}), '(sub_fluid_dict[i], ddof=1)\n', (10938, 10965), True, 'import numpy as np\n'), ((11032, 11057), 'numpy.var', 'np.var', (['sub_fluid_dict[i]'], {}), '(sub_fluid_dict[i])\n', (11038, 11057), True, 'import numpy as np\n')] |
#
# This module provides the Instance class that encapsulate some complex server instances related operations
#
from __future__ import print_function
from json import loads
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from cinderclient import client as cinder_client
from osvolbackup.server import ServerInstance, ServerNotFound
from osvolbackup.osauth import get_session, VERSION
from osvolbackup.verbose import vprint
from time import time, sleep
class BackupGroup(object):
max_secs_gbi = 300
poll_delay = 10
def __init__(self, serverName):
self.selected_metadata = None
self.selected_backups = []
self.selected_volumes = []
session = self.session = get_session()
self.neutron = neutron_client.Client(session=session)
self.nova = nova_client.Client(VERSION, session=session)
self.cinder = cinder_client.Client(VERSION, session=session)
try:
server = ServerInstance(serverName)
except ServerNotFound:
name = 'osvb_'+serverName
else:
name = 'osvb_'+server.instance.id
self.backup_list = self.cinder.backups.list(search_opts={"name": name})
self.volume_map = {}
if len(self.backup_list) == 0:
raise BackupNotFound(serverName)
# Load metadata from the backup description field
self.backup_meta_data = backup_meta_data = {}
for backup in self.backup_list:
meta_data = loads(backup.description)
backup_meta_data[backup.id] = meta_data
self.volume_map[backup.id] = {"id": backup.volume_id, "size": backup.size}
self.available_backups = sorted(set([b['backup_time'] for b in backup_meta_data.values()]))
def select_by_tag(self, tag):
if tag == 'last':
selected_backup_timestamp = self.available_backups[-1]
else:
raise BackupTooMany(tag)
# Get volumes associated with the selected backup
for backup_id, backup_meta in self.backup_meta_data.iteritems():
if backup_meta['backup_time'] == selected_backup_timestamp:
self.selected_backups.append(backup_id)
self.selected_volumes.append(self.volume_map[backup_id])
self.selected_metadata = backup_meta
def get_volumes(self):
return self.selected_volumes
def restore(self, server=None, network=None, to_project=None, skip_vm=False):
# flavor = self.nova.flavors.find(name=self.selected_metadata['flavor'])
new_volume_list = self._create_volumes(self.selected_volumes, to_project)
# Restore the volumes
block_device_mapping = {}
for i, backup_id in enumerate(self.selected_backups):
vol_index = self.backup_meta_data[backup_id]['vol_index']
new_volume_id = new_volume_list[i].id
vprint("Restoring from backup", backup_id, "to volume", new_volume_id)
dev_name = "vd" + chr(ord('a') + vol_index)
block_device_mapping[dev_name] = new_volume_id
restore = self.cinder.restores.restore(backup_id=backup_id, volume_id=new_volume_id)
restored_volume = self.cinder.volumes.get(restore.volume_id)
self._wait_for(restored_volume, ('restoring-backup',), 'available')
# We need to get again to refresh the metadata
restored_volume = self.cinder.volumes.get(restore.volume_id)
if vol_index == 0:
if not skip_vm:
name = restored_volume.metadata['osvb_name']
flavor = restored_volume.metadata['osvb_flavor']
flavor = self.nova.flavors.find(name=flavor) # name to id
saved_networks = loads(restored_volume.metadata['osvb_network'])
if not skip_vm:
nics = []
if network is not None:
net_name, net_ip = network.split("=")
net_id = self.neutron.list_networks(name=net_name)['networks'][0]['id']
nic_info = {'net-id': net_id, 'v4-fixed-ip': net_ip}
nics.append(nic_info)
else:
for network_name, network_ips in saved_networks.iteritems():
nic_info = {}
nic_info['net-id'] = self.neutron.list_networks(name=network_name)['networks'][0]['id']
nic_info['v4-fixed-ip'] = network_ips[0]
nics.append(nic_info)
target_session = get_session(to_project)
target_nova = nova_client.Client(VERSION, session=target_session)
server = target_nova.servers.create(
name=name, image=None, flavor=flavor, block_device_mapping=block_device_mapping, nics=nics
)
print("Server was restored into instance", server.id)
def _create_volumes(self, volume_list, to_project):
""" Create volumes based """
vprint("Creating volumes for the instance restore")
target_session = get_session(to_project)
target_cinder = cinder_client.Client(VERSION, session=target_session)
vol_list = []
for volume in volume_list:
vprint("Creating %dG volume" % volume['size'])
new_volume = target_cinder.volumes.create(volume['size'])
self._wait_for(new_volume, ('creating',), 'available')
vol_list.append(new_volume)
return vol_list
# Borrowed from https://github.com/Akrog/cinderback/blob/master/cinderback.py
def _wait_for(self, resource, allowed_states, expected_states=None, timeout=None):
"""Waits for a resource to come to a specific state.
:param resource: Resource we want to wait for
:param allowed_states: iterator with allowed intermediary states
:param expected_states: states we expect to have at the end, if None
is supplied then anything is good.
:param need_up: If wee need backup service to be up and running
:return: The most updated resource
"""
if timeout:
deadline = time() + timeout
else:
deadline = time() + (self.max_secs_gbi * resource.size)
while resource.status in allowed_states:
sleep(self.poll_delay)
if deadline <= time():
raise TimeoutError(what=resource)
resource = resource.manager.get(resource.id)
if expected_states and resource.status not in expected_states:
raise UnexpectedStatus(what=resource, intermediate=allowed_states, final=expected_states)
return resource
class BackupException(Exception):
def __init__(self, what, *args, **kwargs):
super(BackupException, self).__init__(*args, **kwargs)
self.what = what
def __str__(self):
return u'%s: %s' % (self.__class__.__name__, self.what)
class UnexpectedStatus(BackupException):
def __init__(self, what, intermediate='', final='', *args, **kwargs):
super(UnexpectedStatus, self).__init__(what, *args, **kwargs)
self.intermediate = intermediate
self.final = final
def __str__(self):
if self.intermediate or self.final:
steps = (' [intermediate: %s, final: %s]' % (self.intermediate, self.final))
else:
steps = ''
return (u'%s: Status is %s%s' %
(self.__class__.__name__, self.what.status, steps))
class BackupNotFound(BackupException):
pass
class BackupTooMany(BackupException):
pass
| [
"json.loads",
"osvolbackup.verbose.vprint",
"cinderclient.client.Client",
"osvolbackup.server.ServerInstance",
"neutronclient.v2_0.client.Client",
"time.sleep",
"osvolbackup.osauth.get_session",
"novaclient.client.Client",
"time.time"
]
| [((757, 770), 'osvolbackup.osauth.get_session', 'get_session', ([], {}), '()\n', (768, 770), False, 'from osvolbackup.osauth import get_session, VERSION\n'), ((794, 832), 'neutronclient.v2_0.client.Client', 'neutron_client.Client', ([], {'session': 'session'}), '(session=session)\n', (815, 832), True, 'from neutronclient.v2_0 import client as neutron_client\n'), ((853, 897), 'novaclient.client.Client', 'nova_client.Client', (['VERSION'], {'session': 'session'}), '(VERSION, session=session)\n', (871, 897), True, 'from novaclient import client as nova_client\n'), ((920, 966), 'cinderclient.client.Client', 'cinder_client.Client', (['VERSION'], {'session': 'session'}), '(VERSION, session=session)\n', (940, 966), True, 'from cinderclient import client as cinder_client\n'), ((5002, 5053), 'osvolbackup.verbose.vprint', 'vprint', (['"""Creating volumes for the instance restore"""'], {}), "('Creating volumes for the instance restore')\n", (5008, 5053), False, 'from osvolbackup.verbose import vprint\n'), ((5079, 5102), 'osvolbackup.osauth.get_session', 'get_session', (['to_project'], {}), '(to_project)\n', (5090, 5102), False, 'from osvolbackup.osauth import get_session, VERSION\n'), ((5127, 5180), 'cinderclient.client.Client', 'cinder_client.Client', (['VERSION'], {'session': 'target_session'}), '(VERSION, session=target_session)\n', (5147, 5180), True, 'from cinderclient import client as cinder_client\n'), ((1001, 1027), 'osvolbackup.server.ServerInstance', 'ServerInstance', (['serverName'], {}), '(serverName)\n', (1015, 1027), False, 'from osvolbackup.server import ServerInstance, ServerNotFound\n'), ((1528, 1553), 'json.loads', 'loads', (['backup.description'], {}), '(backup.description)\n', (1533, 1553), False, 'from json import loads\n'), ((2930, 3000), 'osvolbackup.verbose.vprint', 'vprint', (['"""Restoring from backup"""', 'backup_id', '"""to volume"""', 'new_volume_id'], {}), "('Restoring from backup', backup_id, 'to volume', new_volume_id)\n", (2936, 3000), False, 'from osvolbackup.verbose import vprint\n'), ((4562, 4585), 'osvolbackup.osauth.get_session', 'get_session', (['to_project'], {}), '(to_project)\n', (4573, 4585), False, 'from osvolbackup.osauth import get_session, VERSION\n'), ((4612, 4663), 'novaclient.client.Client', 'nova_client.Client', (['VERSION'], {'session': 'target_session'}), '(VERSION, session=target_session)\n', (4630, 4663), True, 'from novaclient import client as nova_client\n'), ((5250, 5296), 'osvolbackup.verbose.vprint', 'vprint', (["('Creating %dG volume' % volume['size'])"], {}), "('Creating %dG volume' % volume['size'])\n", (5256, 5296), False, 'from osvolbackup.verbose import vprint\n'), ((6330, 6352), 'time.sleep', 'sleep', (['self.poll_delay'], {}), '(self.poll_delay)\n', (6335, 6352), False, 'from time import time, sleep\n'), ((6170, 6176), 'time.time', 'time', ([], {}), '()\n', (6174, 6176), False, 'from time import time, sleep\n'), ((6224, 6230), 'time.time', 'time', ([], {}), '()\n', (6228, 6230), False, 'from time import time, sleep\n'), ((6380, 6386), 'time.time', 'time', ([], {}), '()\n', (6384, 6386), False, 'from time import time, sleep\n'), ((3814, 3861), 'json.loads', 'loads', (["restored_volume.metadata['osvb_network']"], {}), "(restored_volume.metadata['osvb_network'])\n", (3819, 3861), False, 'from json import loads\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
import numpy as np
import scipy.ndimage
from astropy import units as u
from astropy.convolution import Box1DKernel, Gaussian1DKernel
from astropy.coordinates import Angle
from astropy.table import Table
from .core import Estimator
__all__ = ["ImageProfile", "ImageProfileEstimator"]
# TODO: implement measuring profile along arbitrary directions
# TODO: think better about error handling. e.g. MC based methods
class ImageProfileEstimator(Estimator):
"""Estimate profile from image.
Parameters
----------
x_edges : `~astropy.coordinates.Angle`
Coordinate edges to define a custom measument grid (optional).
method : ['sum', 'mean']
Compute sum or mean within profile bins.
axis : ['lon', 'lat', 'radial']
Along which axis to estimate the profile.
center : `~astropy.coordinates.SkyCoord`
Center coordinate for the radial profile option.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from gammapy.maps import ImageProfileEstimator
from gammapy.maps import Map
from astropy import units as u
# load example data
filename = '$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz'
fermi_cts = Map.read(filename)
# set up profile estimator and run
p = ImageProfileEstimator(axis='lon', method='sum')
profile = p.run(fermi_cts)
# smooth profile and plot
smoothed = profile.smooth(kernel='gauss')
smoothed.peek()
plt.show()
"""
tag = "ImageProfileEstimator"
def __init__(self, x_edges=None, method="sum", axis="lon", center=None):
self._x_edges = x_edges
if method not in ["sum", "mean"]:
raise ValueError("Not a valid method, choose either 'sum' or 'mean'")
if axis not in ["lon", "lat", "radial"]:
raise ValueError("Not a valid axis, choose either 'lon' or 'lat'")
if method == "radial" and center is None:
raise ValueError("Please provide center coordinate for radial profiles")
self.parameters = {"method": method, "axis": axis, "center": center}
def _get_x_edges(self, image):
if self._x_edges is not None:
return self._x_edges
p = self.parameters
coordinates = image.geom.get_coord(mode="edges").skycoord
if p["axis"] == "lat":
x_edges = coordinates[:, 0].data.lat
elif p["axis"] == "lon":
lon = coordinates[0, :].data.lon
x_edges = lon.wrap_at("180d")
elif p["axis"] == "radial":
rad_step = image.geom.pixel_scales.mean()
corners = [0, 0, -1, -1], [0, -1, 0, -1]
rad_max = coordinates[corners].separation(p["center"]).max()
x_edges = Angle(np.arange(0, rad_max.deg, rad_step.deg), unit="deg")
return x_edges
def _estimate_profile(self, image, image_err, mask):
p = self.parameters
labels = self._label_image(image, mask)
profile_err = None
index = np.arange(1, len(self._get_x_edges(image)))
if p["method"] == "sum":
profile = scipy.ndimage.sum(image.data, labels.data, index)
if image.unit.is_equivalent("counts"):
profile_err = np.sqrt(profile)
elif image_err:
# gaussian error propagation
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum)
elif p["method"] == "mean":
# gaussian error propagation
profile = scipy.ndimage.mean(image.data, labels.data, index)
if image_err:
N = scipy.ndimage.sum(~np.isnan(image_err.data), labels.data, index)
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum) / N
return profile, profile_err
def _label_image(self, image, mask=None):
p = self.parameters
coordinates = image.geom.get_coord().skycoord
x_edges = self._get_x_edges(image)
if p["axis"] == "lon":
lon = coordinates.data.lon.wrap_at("180d")
data = np.digitize(lon.degree, x_edges.deg)
elif p["axis"] == "lat":
lat = coordinates.data.lat
data = np.digitize(lat.degree, x_edges.deg)
elif p["axis"] == "radial":
separation = coordinates.separation(p["center"])
data = np.digitize(separation.degree, x_edges.deg)
if mask is not None:
# assign masked values to background
data[mask.data] = 0
return image.copy(data=data)
def run(self, image, image_err=None, mask=None):
"""Run image profile estimator.
Parameters
----------
image : `~gammapy.maps.Map`
Input image to run profile estimator on.
image_err : `~gammapy.maps.Map`
Input error image to run profile estimator on.
mask : `~gammapy.maps.Map`
Optional mask to exclude regions from the measurement.
Returns
-------
profile : `ImageProfile`
Result image profile object.
"""
p = self.parameters
if image.unit.is_equivalent("count"):
image_err = image.copy(data=np.sqrt(image.data))
profile, profile_err = self._estimate_profile(image, image_err, mask)
result = Table()
x_edges = self._get_x_edges(image)
result["x_min"] = x_edges[:-1]
result["x_max"] = x_edges[1:]
result["x_ref"] = (x_edges[:-1] + x_edges[1:]) / 2
result["profile"] = profile * image.unit
if profile_err is not None:
result["profile_err"] = profile_err * image.unit
result.meta["PROFILE_TYPE"] = p["axis"]
return ImageProfile(result)
class ImageProfile:
"""Image profile class.
The image profile data is stored in `~astropy.table.Table` object, with the
following columns:
* `x_ref` Coordinate bin center (required).
* `x_min` Coordinate bin minimum (optional).
* `x_max` Coordinate bin maximum (optional).
* `profile` Image profile data (required).
* `profile_err` Image profile data error (optional).
Parameters
----------
table : `~astropy.table.Table`
Table instance with the columns specified as above.
"""
def __init__(self, table):
self.table = table
def smooth(self, kernel="box", radius="0.1 deg", **kwargs):
r"""Smooth profile with error propagation.
Smoothing is described by a convolution:
.. math::
x_j = \sum_i x_{(j - i)} h_i
Where :math:`h_i` are the coefficients of the convolution kernel.
The corresponding error on :math:`x_j` is then estimated using Gaussian
error propagation, neglecting correlations between the individual
:math:`x_{(j - i)}`:
.. math::
\Delta x_j = \sqrt{\sum_i \Delta x^{2}_{(j - i)} h^{2}_i}
Parameters
----------
kernel : {'gauss', 'box'}
Kernel shape
radius : `~astropy.units.Quantity`, str or float
Smoothing width given as quantity or float. If a float is given it
is interpreted as smoothing width in pixels. If an (angular) quantity
is given it is converted to pixels using `xref[1] - x_ref[0]`.
kwargs : dict
Keyword arguments passed to `~scipy.ndimage.uniform_filter`
('box') and `~scipy.ndimage.gaussian_filter` ('gauss').
Returns
-------
profile : `ImageProfile`
Smoothed image profile.
"""
table = self.table.copy()
profile = table["profile"]
radius = u.Quantity(radius)
radius = np.abs(radius / np.diff(self.x_ref))[0]
width = 2 * radius.value + 1
if kernel == "box":
smoothed = scipy.ndimage.uniform_filter(
profile.astype("float"), width, **kwargs
)
# renormalize data
if table["profile"].unit.is_equivalent("count"):
smoothed *= int(width)
smoothed_err = np.sqrt(smoothed)
elif "profile_err" in table.colnames:
profile_err = table["profile_err"]
# use gaussian error propagation
box = Box1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, box.array ** 2)
smoothed_err = np.sqrt(err_sum)
elif kernel == "gauss":
smoothed = scipy.ndimage.gaussian_filter(
profile.astype("float"), width, **kwargs
)
# use gaussian error propagation
if "profile_err" in table.colnames:
profile_err = table["profile_err"]
gauss = Gaussian1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, gauss.array ** 2)
smoothed_err = np.sqrt(err_sum)
else:
raise ValueError("Not valid kernel choose either 'box' or 'gauss'")
table["profile"] = smoothed * self.table["profile"].unit
if "profile_err" in table.colnames:
table["profile_err"] = smoothed_err * self.table["profile"].unit
return self.__class__(table)
def plot(self, ax=None, **kwargs):
"""Plot image profile.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to `~matplotlib.axes.Axes.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
x = self.x_ref.value
ax.plot(x, y, **kwargs)
ax.set_xlabel("lon")
ax.set_ylabel("profile")
ax.set_xlim(x.max(), x.min())
return ax
def plot_err(self, ax=None, **kwargs):
"""Plot image profile error as band.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to plt.fill_between()
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
ymin = y - self.table["profile_err"].data
ymax = y + self.table["profile_err"].data
x = self.x_ref.value
# plotting defaults
kwargs.setdefault("alpha", 0.5)
ax.fill_between(x, ymin, ymax, **kwargs)
ax.set_xlabel("x (deg)")
ax.set_ylabel("profile")
return ax
@property
def x_ref(self):
"""Reference x coordinates."""
return self.table["x_ref"].quantity
@property
def x_min(self):
"""Min. x coordinates."""
return self.table["x_min"].quantity
@property
def x_max(self):
"""Max. x coordinates."""
return self.table["x_max"].quantity
@property
def profile(self):
"""Image profile quantity."""
return self.table["profile"].quantity
@property
def profile_err(self):
"""Image profile error quantity."""
try:
return self.table["profile_err"].quantity
except KeyError:
return None
def peek(self, figsize=(8, 4.5), **kwargs):
"""Show image profile and error.
Parameters
----------
**kwargs : dict
Keyword arguments passed to `ImageProfile.plot_profile()`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax = self.plot(ax, **kwargs)
if "profile_err" in self.table.colnames:
ax = self.plot_err(ax, color=kwargs.get("c"))
return ax
def normalize(self, mode="peak"):
"""Normalize profile to peak value or integral.
Parameters
----------
mode : ['integral', 'peak']
Normalize image profile so that it integrates to unity ('integral')
or the maximum value corresponds to one ('peak').
Returns
-------
profile : `ImageProfile`
Normalized image profile.
"""
table = self.table.copy()
profile = self.table["profile"]
if mode == "peak":
norm = np.nanmax(profile)
elif mode == "integral":
norm = np.nansum(profile)
else:
raise ValueError(f"Invalid normalization mode: {mode!r}")
table["profile"] /= norm
if "profile_err" in table.colnames:
table["profile_err"] /= norm
return self.__class__(table)
| [
"numpy.sqrt",
"astropy.table.Table",
"numpy.arange",
"numpy.digitize",
"matplotlib.pyplot.gca",
"numpy.diff",
"astropy.convolution.Gaussian1DKernel",
"matplotlib.pyplot.figure",
"numpy.isnan",
"numpy.nanmax",
"astropy.convolution.Box1DKernel",
"numpy.nansum",
"astropy.units.Quantity"
]
| [((5684, 5691), 'astropy.table.Table', 'Table', ([], {}), '()\n', (5689, 5691), False, 'from astropy.table import Table\n'), ((8050, 8068), 'astropy.units.Quantity', 'u.Quantity', (['radius'], {}), '(radius)\n', (8060, 8068), True, 'from astropy import units as u\n'), ((12194, 12221), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (12204, 12221), True, 'import matplotlib.pyplot as plt\n'), ((4433, 4469), 'numpy.digitize', 'np.digitize', (['lon.degree', 'x_edges.deg'], {}), '(lon.degree, x_edges.deg)\n', (4444, 4469), True, 'import numpy as np\n'), ((10069, 10078), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10076, 10078), True, 'import matplotlib.pyplot as plt\n'), ((10757, 10766), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10764, 10766), True, 'import matplotlib.pyplot as plt\n'), ((12982, 13000), 'numpy.nanmax', 'np.nanmax', (['profile'], {}), '(profile)\n', (12991, 13000), True, 'import numpy as np\n'), ((3497, 3513), 'numpy.sqrt', 'np.sqrt', (['profile'], {}), '(profile)\n', (3504, 3513), True, 'import numpy as np\n'), ((4562, 4598), 'numpy.digitize', 'np.digitize', (['lat.degree', 'x_edges.deg'], {}), '(lat.degree, x_edges.deg)\n', (4573, 4598), True, 'import numpy as np\n'), ((8478, 8495), 'numpy.sqrt', 'np.sqrt', (['smoothed'], {}), '(smoothed)\n', (8485, 8495), True, 'import numpy as np\n'), ((13053, 13071), 'numpy.nansum', 'np.nansum', (['profile'], {}), '(profile)\n', (13062, 13071), True, 'import numpy as np\n'), ((3702, 3718), 'numpy.sqrt', 'np.sqrt', (['err_sum'], {}), '(err_sum)\n', (3709, 3718), True, 'import numpy as np\n'), ((4716, 4759), 'numpy.digitize', 'np.digitize', (['separation.degree', 'x_edges.deg'], {}), '(separation.degree, x_edges.deg)\n', (4727, 4759), True, 'import numpy as np\n'), ((5566, 5585), 'numpy.sqrt', 'np.sqrt', (['image.data'], {}), '(image.data)\n', (5573, 5585), True, 'import numpy as np\n'), ((8102, 8121), 'numpy.diff', 'np.diff', (['self.x_ref'], {}), '(self.x_ref)\n', (8109, 8121), True, 'import numpy as np\n'), ((8668, 8686), 'astropy.convolution.Box1DKernel', 'Box1DKernel', (['width'], {}), '(width)\n', (8679, 8686), False, 'from astropy.convolution import Box1DKernel, Gaussian1DKernel\n'), ((8801, 8817), 'numpy.sqrt', 'np.sqrt', (['err_sum'], {}), '(err_sum)\n', (8808, 8817), True, 'import numpy as np\n'), ((9143, 9166), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', (['width'], {}), '(width)\n', (9159, 9166), False, 'from astropy.convolution import Box1DKernel, Gaussian1DKernel\n'), ((9283, 9299), 'numpy.sqrt', 'np.sqrt', (['err_sum'], {}), '(err_sum)\n', (9290, 9299), True, 'import numpy as np\n'), ((3009, 3048), 'numpy.arange', 'np.arange', (['(0)', 'rad_max.deg', 'rad_step.deg'], {}), '(0, rad_max.deg, rad_step.deg)\n', (3018, 3048), True, 'import numpy as np\n'), ((4096, 4112), 'numpy.sqrt', 'np.sqrt', (['err_sum'], {}), '(err_sum)\n', (4103, 4112), True, 'import numpy as np\n'), ((3935, 3959), 'numpy.isnan', 'np.isnan', (['image_err.data'], {}), '(image_err.data)\n', (3943, 3959), True, 'import numpy as np\n')] |
"""versatileimagefield Field mixins."""
import os
import re
from .datastructures import FilterLibrary
from .registry import autodiscover, versatileimagefield_registry
from .settings import (
cache,
VERSATILEIMAGEFIELD_CREATE_ON_DEMAND,
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
from .validators import validate_ppoi
autodiscover()
filter_regex_snippet = r'__({registered_filters})__'.format(
registered_filters='|'.join([
key
for key, filter_cls in versatileimagefield_registry._filter_registry.items()
])
)
sizer_regex_snippet = r'-({registered_sizers})-(\d+)x(\d+)(?:-\d+)?'.format(
registered_sizers='|'.join([
sizer_cls.get_filename_key_regex()
for key, sizer_cls in versatileimagefield_registry._sizedimage_registry.items()
])
)
filter_regex = re.compile(filter_regex_snippet + '$')
sizer_regex = re.compile(sizer_regex_snippet + '$')
filter_and_sizer_regex = re.compile(
filter_regex_snippet + sizer_regex_snippet + '$'
)
class VersatileImageMixIn(object):
"""A mix-in that provides the filtering/sizing API."""
def __init__(self, *args, **kwargs):
"""Construct PPOI and create_on_demand."""
self._create_on_demand = VERSATILEIMAGEFIELD_CREATE_ON_DEMAND
super(VersatileImageMixIn, self).__init__(*args, **kwargs)
# Setting initial ppoi
if self.field.ppoi_field:
instance_ppoi_value = getattr(
self.instance,
self.field.ppoi_field,
(0.5, 0.5)
)
self.ppoi = instance_ppoi_value
else:
self.ppoi = (0.5, 0.5)
@property
def url(self):
"""
Return the appropriate URL.
URL is constructed based on these field conditions:
* If empty (not `self.name`) and a placeholder is defined, the
URL to the placeholder is returned.
* Otherwise, defaults to vanilla ImageFieldFile behavior.
"""
if not self.name and self.field.placeholder_image_name:
return self.storage.url(self.field.placeholder_image_name)
return super(VersatileImageMixIn, self).url
@property
def create_on_demand(self):
"""create_on_demand getter."""
return self._create_on_demand
@create_on_demand.setter
def create_on_demand(self, value):
if not isinstance(value, bool):
raise ValueError(
"`create_on_demand` must be a boolean"
)
else:
self._create_on_demand = value
self.build_filters_and_sizers(self.ppoi, value)
@property
def ppoi(self):
"""Primary Point of Interest (ppoi) getter."""
return self._ppoi_value
@ppoi.setter
def ppoi(self, value):
"""Primary Point of Interest (ppoi) setter."""
ppoi = validate_ppoi(
value,
return_converted_tuple=True
)
if ppoi is not False:
self._ppoi_value = ppoi
self.build_filters_and_sizers(ppoi, self.create_on_demand)
def build_filters_and_sizers(self, ppoi_value, create_on_demand):
"""Build the filters and sizers for a field."""
name = self.name
if not name and self.field.placeholder_image_name:
name = self.field.placeholder_image_name
self.filters = FilterLibrary(
name,
self.storage,
versatileimagefield_registry,
ppoi_value,
create_on_demand
)
for (
attr_name,
sizedimage_cls
) in versatileimagefield_registry._sizedimage_registry.items():
setattr(
self,
attr_name,
sizedimage_cls(
path_to_image=name,
storage=self.storage,
create_on_demand=create_on_demand,
ppoi=ppoi_value
)
)
def get_filtered_root_folder(self):
"""Return the location where filtered images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')
def get_sized_root_folder(self):
"""Return the location where sized images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')
def get_filtered_sized_root_folder(self):
"""Return the location where filtered + sized images are stored."""
sized_root_folder = self.get_sized_root_folder()
return os.path.join(
sized_root_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
def delete_matching_files_from_storage(self, root_folder, regex):
"""
Delete files in `root_folder` which match `regex` before file ext.
Example values:
* root_folder = 'foo/'
* self.name = 'bar.jpg'
* regex = re.compile('-baz')
Result:
* foo/bar-baz.jpg <- Deleted
* foo/bar-biz.jpg <- Not deleted
"""
if not self.name: # pragma: no cover
return
try:
directory_list, file_list = self.storage.listdir(root_folder)
except OSError: # pragma: no cover
pass
else:
folder, filename = os.path.split(self.name)
basename, ext = os.path.splitext(filename)
for f in file_list:
if not f.startswith(basename) or not f.endswith(ext): # pragma: no cover
continue
tag = f[len(basename):-len(ext)]
assert f == basename + tag + ext
if regex.match(tag) is not None:
file_location = os.path.join(root_folder, f)
self.storage.delete(file_location)
cache.delete(
self.storage.url(file_location)
)
print(
"Deleted {file} (created from: {original})".format(
file=os.path.join(root_folder, f),
original=self.name
)
)
def delete_filtered_images(self):
"""Delete all filtered images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_root_folder(),
filter_regex
)
def delete_sized_images(self):
"""Delete all sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_sized_root_folder(),
sizer_regex
)
def delete_filtered_sized_images(self):
"""Delete all filtered sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_sized_root_folder(),
filter_and_sizer_regex
)
def delete_all_created_images(self):
"""Delete all images created from `self.name`."""
self.delete_filtered_images()
self.delete_sized_images()
self.delete_filtered_sized_images()
| [
"os.path.split",
"os.path.splitext",
"os.path.join",
"re.compile"
]
| [((848, 886), 're.compile', 're.compile', (["(filter_regex_snippet + '$')"], {}), "(filter_regex_snippet + '$')\n", (858, 886), False, 'import re\n'), ((901, 938), 're.compile', 're.compile', (["(sizer_regex_snippet + '$')"], {}), "(sizer_regex_snippet + '$')\n", (911, 938), False, 'import re\n'), ((964, 1024), 're.compile', 're.compile', (["(filter_regex_snippet + sizer_regex_snippet + '$')"], {}), "(filter_regex_snippet + sizer_regex_snippet + '$')\n", (974, 1024), False, 'import re\n'), ((4144, 4168), 'os.path.split', 'os.path.split', (['self.name'], {}), '(self.name)\n', (4157, 4168), False, 'import os\n'), ((4184, 4246), 'os.path.join', 'os.path.join', (['folder', 'VERSATILEIMAGEFIELD_FILTERED_DIRNAME', '""""""'], {}), "(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')\n", (4196, 4246), False, 'import os\n'), ((4377, 4401), 'os.path.split', 'os.path.split', (['self.name'], {}), '(self.name)\n', (4390, 4401), False, 'import os\n'), ((4417, 4476), 'os.path.join', 'os.path.join', (['VERSATILEIMAGEFIELD_SIZED_DIRNAME', 'folder', '""""""'], {}), "(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')\n", (4429, 4476), False, 'import os\n'), ((4672, 4741), 'os.path.join', 'os.path.join', (['sized_root_folder', 'VERSATILEIMAGEFIELD_FILTERED_DIRNAME'], {}), '(sized_root_folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME)\n', (4684, 4741), False, 'import os\n'), ((5458, 5482), 'os.path.split', 'os.path.split', (['self.name'], {}), '(self.name)\n', (5471, 5482), False, 'import os\n'), ((5511, 5537), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5527, 5537), False, 'import os\n'), ((5873, 5901), 'os.path.join', 'os.path.join', (['root_folder', 'f'], {}), '(root_folder, f)\n', (5885, 5901), False, 'import os\n'), ((6205, 6233), 'os.path.join', 'os.path.join', (['root_folder', 'f'], {}), '(root_folder, f)\n', (6217, 6233), False, 'import os\n')] |
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training with differential privacy on various tasks."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from utils import task_utils
from utils import training_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_integer(
'max_elements_per_client', None, 'Maximum number of '
'elements for each training client. If set to None, all '
'available examples are used.')
# Training loop configuration
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer(
'num_validation_examples', -1, 'The number of validation'
'examples to use. If set to -1, all available examples '
'are used.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as dp_flags:
# Differential privacy flags
flags.DEFINE_float(
'clip', None, 'Clip value for fixed clipping or initial clip for '
'adaptive clipping. If None, no clipping is used.')
flags.DEFINE_float('noise_multiplier', None,
'Noise multiplier. If None, non-DP aggregator is used.')
flags.DEFINE_float(
'adaptive_clip_learning_rate', None, 'Adaptive clip learning rate. If '
'None, clip adaptation is not used.')
flags.DEFINE_float('target_unclipped_quantile', 0.5,
'Target unclipped quantile.')
flags.DEFINE_boolean('uniform_weighting', False,
'Whether to weigh clients uniformly.')
# Task specification
with utils_impl.record_hparam_flags() as task_flags:
task_utils.define_task_flags()
FLAGS = flags.FLAGS
def _write_hparam_flags():
"""Returns an ordered dictionary of pertinent hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task flags
task_flag_dict = utils_impl.lookup_flag_values(task_flags)
hparam_dict.update(task_flag_dict)
training_utils.write_hparams_to_csv(hparam_dict, FLAGS.root_output_dir,
FLAGS.experiment_name)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
train_client_spec = tff.simulation.baselines.ClientSpec(
num_epochs=FLAGS.client_epochs_per_round,
batch_size=FLAGS.client_batch_size,
max_elements=FLAGS.max_elements_per_client)
task = task_utils.create_task_from_flags(train_client_spec)
logging.info('Trainable weights:')
for weight in task.model_fn().trainable_variables:
logging.info('name: %s shape: %s', weight.name, weight.shape)
if FLAGS.uniform_weighting:
client_weighting = tff.learning.ClientWeighting.UNIFORM
elif FLAGS.task == 'shakespeare_character' or FLAGS.task == 'stackoverflow_word':
def client_weighting(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weighting = None
if FLAGS.noise_multiplier is None:
if FLAGS.uniform_weighting:
aggregation_factory = tff.aggregators.UnweightedMeanFactory()
else:
aggregation_factory = tff.aggregators.MeanFactory()
if FLAGS.clip is not None:
if FLAGS.clip <= 0:
raise ValueError('clip must be positive if clipping is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
clip = FLAGS.clip
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=FLAGS.clip,
target_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
aggregation_factory = tff.aggregators.clipping_factory(
clip, aggregation_factory)
else:
if not FLAGS.uniform_weighting:
raise ValueError(
'Differential privacy is only implemented for uniform weighting.')
if FLAGS.noise_multiplier <= 0:
raise ValueError('noise_multiplier must be positive if DP is enabled.')
if FLAGS.clip is None or FLAGS.clip <= 0:
raise ValueError('clip must be positive if DP is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
clip=FLAGS.clip)
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
initial_l2_norm_clip=FLAGS.clip,
target_unclipped_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
iterative_process = tff.learning.build_federated_averaging_process(
model_fn=task.model_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
client_optimizer_fn=client_optimizer_fn,
model_update_aggregation_factory=aggregation_factory)
train_data = task.datasets.train_data.preprocess(
task.datasets.train_preprocess_fn)
training_process = (
tff.simulation.compose_dataset_computation_with_iterative_process(
train_data.dataset_computation, iterative_process))
training_selection_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
train_data.client_ids, random_seed=FLAGS.client_datasets_random_seed),
size=FLAGS.clients_per_round)
test_data = task.datasets.get_centralized_test_data()
validation_data = test_data.take(FLAGS.num_validation_examples)
federated_eval = tff.learning.build_federated_evaluation(task.model_fn)
evaluation_selection_fn = lambda round_num: [validation_data]
def evaluation_fn(state, evaluation_data):
return federated_eval(state.model, evaluation_data)
program_state_manager, metrics_managers = training_utils.create_managers(
FLAGS.root_output_dir, FLAGS.experiment_name)
_write_hparam_flags()
state = tff.simulation.run_training_process(
training_process=training_process,
training_selection_fn=training_selection_fn,
total_rounds=FLAGS.total_rounds,
evaluation_fn=evaluation_fn,
evaluation_selection_fn=evaluation_selection_fn,
rounds_per_evaluation=FLAGS.rounds_per_eval,
program_state_manager=program_state_manager,
rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,
metrics_managers=metrics_managers)
test_metrics = federated_eval(state.model, [test_data])
for metrics_manager in metrics_managers:
metrics_manager.release(test_metrics, FLAGS.total_rounds + 1)
if __name__ == '__main__':
app.run(main)
| [
"utils.task_utils.define_task_flags",
"absl.logging.info",
"tensorflow_federated.simulation.run_training_process",
"tensorflow_federated.aggregators.clipping_factory",
"tensorflow_federated.aggregators.PrivateQuantileEstimationProcess.no_noise",
"tensorflow_federated.aggregators.MeanFactory",
"absl.flags.DEFINE_float",
"absl.flags.DEFINE_boolean",
"absl.app.run",
"utils.optimizers.optimizer_utils.define_optimizer_flags",
"tensorflow_federated.simulation.compose_dataset_computation_with_iterative_process",
"utils.utils_impl.record_hparam_flags",
"utils.optimizers.optimizer_utils.create_optimizer_fn_from_flags",
"tensorflow_federated.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive",
"tensorflow_federated.learning.build_federated_averaging_process",
"tensorflow_federated.aggregators.UnweightedMeanFactory",
"tensorflow_federated.simulation.baselines.ClientSpec",
"tensorflow_federated.simulation.build_uniform_sampling_fn",
"utils.optimizers.optimizer_utils.remove_unused_flags",
"utils.utils_impl.lookup_flag_values",
"tensorflow_federated.learning.build_federated_evaluation",
"absl.flags.DEFINE_string",
"utils.task_utils.create_task_from_flags",
"tensorflow_federated.aggregators.DifferentiallyPrivateFactory.gaussian_fixed",
"utils.training_utils.create_managers",
"absl.flags.DEFINE_integer",
"utils.training_utils.write_hparams_to_csv",
"tensorflow.squeeze"
]
| [((941, 973), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ([], {}), '()\n', (971, 973), False, 'from utils import utils_impl\n'), ((1025, 1073), 'utils.optimizers.optimizer_utils.define_optimizer_flags', 'optimizer_utils.define_optimizer_flags', (['"""client"""'], {}), "('client')\n", (1063, 1073), False, 'from utils.optimizers import optimizer_utils\n'), ((1076, 1124), 'utils.optimizers.optimizer_utils.define_optimizer_flags', 'optimizer_utils.define_optimizer_flags', (['"""server"""'], {}), "('server')\n", (1114, 1124), False, 'from utils.optimizers import optimizer_utils\n'), ((1131, 1163), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ([], {}), '()\n', (1161, 1163), False, 'from utils import utils_impl\n'), ((1222, 1329), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""client_epochs_per_round"""', '(1)', '"""Number of epochs in the client to take per round."""'], {}), "('client_epochs_per_round', 1,\n 'Number of epochs in the client to take per round.')\n", (1242, 1329), False, 'from absl import flags\n'), ((1351, 1426), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""client_batch_size"""', '(20)', '"""Batch size on the clients."""'], {}), "('client_batch_size', 20, 'Batch size on the clients.')\n", (1371, 1426), False, 'from absl import flags\n'), ((1429, 1519), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""clients_per_round"""', '(10)', '"""How many clients to sample per round."""'], {}), "('clients_per_round', 10,\n 'How many clients to sample per round.')\n", (1449, 1519), False, 'from absl import flags\n'), ((1541, 1635), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""client_datasets_random_seed"""', '(1)', '"""Random seed for client sampling."""'], {}), "('client_datasets_random_seed', 1,\n 'Random seed for client sampling.')\n", (1561, 1635), False, 'from absl import flags\n'), ((1657, 1824), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_elements_per_client"""', 'None', '"""Maximum number of elements for each training client. If set to None, all available examples are used."""'], {}), "('max_elements_per_client', None,\n 'Maximum number of elements for each training client. If set to None, all available examples are used.'\n )\n", (1677, 1824), False, 'from absl import flags\n'), ((1876, 1953), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""total_rounds"""', '(200)', '"""Number of total training rounds."""'], {}), "('total_rounds', 200, 'Number of total training rounds.')\n", (1896, 1953), False, 'from absl import flags\n'), ((1956, 2109), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""experiment_name"""', 'None', '"""The name of this experiment. Will be append to --root_output_dir to separate experiment results."""'], {}), "('experiment_name', None,\n 'The name of this experiment. Will be append to --root_output_dir to separate experiment results.'\n )\n", (1975, 2109), False, 'from absl import flags\n'), ((2119, 2227), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""root_output_dir"""', '"""/tmp/fed_opt/"""', '"""Root directory for writing experiment output."""'], {}), "('root_output_dir', '/tmp/fed_opt/',\n 'Root directory for writing experiment output.')\n", (2138, 2227), False, 'from absl import flags\n'), ((2248, 2363), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""rounds_per_eval"""', '(1)', '"""How often to evaluate the global model on the validation dataset."""'], {}), "('rounds_per_eval', 1,\n 'How often to evaluate the global model on the validation dataset.')\n", (2268, 2363), False, 'from absl import flags\n'), ((2375, 2526), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_validation_examples"""', '(-1)', '"""The number of validationexamples to use. If set to -1, all available examples are used."""'], {}), "('num_validation_examples', -1,\n 'The number of validationexamples to use. If set to -1, all available examples are used.'\n )\n", (2395, 2526), False, 'from absl import flags\n'), ((2545, 2643), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""rounds_per_checkpoint"""', '(50)', '"""How often to checkpoint the global model."""'], {}), "('rounds_per_checkpoint', 50,\n 'How often to checkpoint the global model.')\n", (2565, 2643), False, 'from absl import flags\n'), ((2669, 2701), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ([], {}), '()\n', (2699, 2701), False, 'from utils import utils_impl\n'), ((2748, 2891), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""clip"""', 'None', '"""Clip value for fixed clipping or initial clip for adaptive clipping. If None, no clipping is used."""'], {}), "('clip', None,\n 'Clip value for fixed clipping or initial clip for adaptive clipping. If None, no clipping is used.'\n )\n", (2766, 2891), False, 'from absl import flags\n'), ((2901, 3006), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""noise_multiplier"""', 'None', '"""Noise multiplier. If None, non-DP aggregator is used."""'], {}), "('noise_multiplier', None,\n 'Noise multiplier. If None, non-DP aggregator is used.')\n", (2919, 3006), False, 'from absl import flags\n'), ((3026, 3155), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""adaptive_clip_learning_rate"""', 'None', '"""Adaptive clip learning rate. If None, clip adaptation is not used."""'], {}), "('adaptive_clip_learning_rate', None,\n 'Adaptive clip learning rate. If None, clip adaptation is not used.')\n", (3044, 3155), False, 'from absl import flags\n'), ((3170, 3256), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""target_unclipped_quantile"""', '(0.5)', '"""Target unclipped quantile."""'], {}), "('target_unclipped_quantile', 0.5,\n 'Target unclipped quantile.')\n", (3188, 3256), False, 'from absl import flags\n'), ((3276, 3367), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""uniform_weighting"""', '(False)', '"""Whether to weigh clients uniformly."""'], {}), "('uniform_weighting', False,\n 'Whether to weigh clients uniformly.')\n", (3296, 3367), False, 'from absl import flags\n'), ((3414, 3446), 'utils.utils_impl.record_hparam_flags', 'utils_impl.record_hparam_flags', ([], {}), '()\n', (3444, 3446), False, 'from utils import utils_impl\n'), ((3464, 3494), 'utils.task_utils.define_task_flags', 'task_utils.define_task_flags', ([], {}), '()\n', (3492, 3494), False, 'from utils import task_utils\n'), ((3634, 3677), 'utils.utils_impl.lookup_flag_values', 'utils_impl.lookup_flag_values', (['shared_flags'], {}), '(shared_flags)\n', (3663, 3677), False, 'from utils import utils_impl\n'), ((3769, 3815), 'utils.utils_impl.lookup_flag_values', 'utils_impl.lookup_flag_values', (['optimizer_flags'], {}), '(optimizer_flags)\n', (3798, 3815), False, 'from utils import utils_impl\n'), ((3834, 3894), 'utils.optimizers.optimizer_utils.remove_unused_flags', 'optimizer_utils.remove_unused_flags', (['"""client"""', 'opt_flag_dict'], {}), "('client', opt_flag_dict)\n", (3869, 3894), False, 'from utils.optimizers import optimizer_utils\n'), ((3913, 3973), 'utils.optimizers.optimizer_utils.remove_unused_flags', 'optimizer_utils.remove_unused_flags', (['"""server"""', 'opt_flag_dict'], {}), "('server', opt_flag_dict)\n", (3948, 3973), False, 'from utils.optimizers import optimizer_utils\n'), ((4057, 4098), 'utils.utils_impl.lookup_flag_values', 'utils_impl.lookup_flag_values', (['task_flags'], {}), '(task_flags)\n', (4086, 4098), False, 'from utils import utils_impl\n'), ((4138, 4236), 'utils.training_utils.write_hparams_to_csv', 'training_utils.write_hparams_to_csv', (['hparam_dict', 'FLAGS.root_output_dir', 'FLAGS.experiment_name'], {}), '(hparam_dict, FLAGS.root_output_dir,\n FLAGS.experiment_name)\n', (4173, 4236), False, 'from utils import training_utils\n'), ((4447, 4503), 'utils.optimizers.optimizer_utils.create_optimizer_fn_from_flags', 'optimizer_utils.create_optimizer_fn_from_flags', (['"""client"""'], {}), "('client')\n", (4493, 4503), False, 'from utils.optimizers import optimizer_utils\n'), ((4528, 4584), 'utils.optimizers.optimizer_utils.create_optimizer_fn_from_flags', 'optimizer_utils.create_optimizer_fn_from_flags', (['"""server"""'], {}), "('server')\n", (4574, 4584), False, 'from utils.optimizers import optimizer_utils\n'), ((4608, 4774), 'tensorflow_federated.simulation.baselines.ClientSpec', 'tff.simulation.baselines.ClientSpec', ([], {'num_epochs': 'FLAGS.client_epochs_per_round', 'batch_size': 'FLAGS.client_batch_size', 'max_elements': 'FLAGS.max_elements_per_client'}), '(num_epochs=FLAGS.\n client_epochs_per_round, batch_size=FLAGS.client_batch_size,\n max_elements=FLAGS.max_elements_per_client)\n', (4643, 4774), True, 'import tensorflow_federated as tff\n'), ((4794, 4846), 'utils.task_utils.create_task_from_flags', 'task_utils.create_task_from_flags', (['train_client_spec'], {}), '(train_client_spec)\n', (4827, 4846), False, 'from utils import task_utils\n'), ((4850, 4884), 'absl.logging.info', 'logging.info', (['"""Trainable weights:"""'], {}), "('Trainable weights:')\n", (4862, 4884), False, 'from absl import logging\n'), ((7508, 7762), 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', ([], {'model_fn': 'task.model_fn', 'server_optimizer_fn': 'server_optimizer_fn', 'client_weighting': 'client_weighting', 'client_optimizer_fn': 'client_optimizer_fn', 'model_update_aggregation_factory': 'aggregation_factory'}), '(model_fn=task.model_fn,\n server_optimizer_fn=server_optimizer_fn, client_weighting=\n client_weighting, client_optimizer_fn=client_optimizer_fn,\n model_update_aggregation_factory=aggregation_factory)\n', (7554, 7762), True, 'import tensorflow_federated as tff\n'), ((7903, 8024), 'tensorflow_federated.simulation.compose_dataset_computation_with_iterative_process', 'tff.simulation.compose_dataset_computation_with_iterative_process', (['train_data.dataset_computation', 'iterative_process'], {}), '(train_data\n .dataset_computation, iterative_process)\n', (7968, 8024), True, 'import tensorflow_federated as tff\n'), ((8385, 8439), 'tensorflow_federated.learning.build_federated_evaluation', 'tff.learning.build_federated_evaluation', (['task.model_fn'], {}), '(task.model_fn)\n', (8424, 8439), True, 'import tensorflow_federated as tff\n'), ((8651, 8727), 'utils.training_utils.create_managers', 'training_utils.create_managers', (['FLAGS.root_output_dir', 'FLAGS.experiment_name'], {}), '(FLAGS.root_output_dir, FLAGS.experiment_name)\n', (8681, 8727), False, 'from utils import training_utils\n'), ((8769, 9207), 'tensorflow_federated.simulation.run_training_process', 'tff.simulation.run_training_process', ([], {'training_process': 'training_process', 'training_selection_fn': 'training_selection_fn', 'total_rounds': 'FLAGS.total_rounds', 'evaluation_fn': 'evaluation_fn', 'evaluation_selection_fn': 'evaluation_selection_fn', 'rounds_per_evaluation': 'FLAGS.rounds_per_eval', 'program_state_manager': 'program_state_manager', 'rounds_per_saving_program_state': 'FLAGS.rounds_per_checkpoint', 'metrics_managers': 'metrics_managers'}), '(training_process=training_process,\n training_selection_fn=training_selection_fn, total_rounds=FLAGS.\n total_rounds, evaluation_fn=evaluation_fn, evaluation_selection_fn=\n evaluation_selection_fn, rounds_per_evaluation=FLAGS.rounds_per_eval,\n program_state_manager=program_state_manager,\n rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,\n metrics_managers=metrics_managers)\n', (8804, 9207), True, 'import tensorflow_federated as tff\n'), ((9436, 9449), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (9443, 9449), False, 'from absl import app\n'), ((4942, 5004), 'absl.logging.info', 'logging.info', (['"""name: %s shape: %s"""', 'weight.name', 'weight.shape'], {}), "('name: %s shape: %s', weight.name, weight.shape)\n", (4954, 5004), False, 'from absl import logging\n'), ((8084, 8199), 'tensorflow_federated.simulation.build_uniform_sampling_fn', 'tff.simulation.build_uniform_sampling_fn', (['train_data.client_ids'], {'random_seed': 'FLAGS.client_datasets_random_seed'}), '(train_data.client_ids, random_seed\n =FLAGS.client_datasets_random_seed)\n', (8124, 8199), True, 'import tensorflow_federated as tff\n'), ((5430, 5469), 'tensorflow_federated.aggregators.UnweightedMeanFactory', 'tff.aggregators.UnweightedMeanFactory', ([], {}), '()\n', (5467, 5469), True, 'import tensorflow_federated as tff\n'), ((5508, 5537), 'tensorflow_federated.aggregators.MeanFactory', 'tff.aggregators.MeanFactory', ([], {}), '()\n', (5535, 5537), True, 'import tensorflow_federated as tff\n'), ((6213, 6272), 'tensorflow_federated.aggregators.clipping_factory', 'tff.aggregators.clipping_factory', (['clip', 'aggregation_factory'], {}), '(clip, aggregation_factory)\n', (6245, 6272), True, 'import tensorflow_federated as tff\n'), ((6733, 6902), 'tensorflow_federated.aggregators.DifferentiallyPrivateFactory.gaussian_fixed', 'tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed', ([], {'noise_multiplier': 'FLAGS.noise_multiplier', 'clients_per_round': 'FLAGS.clients_per_round', 'clip': 'FLAGS.clip'}), '(noise_multiplier\n =FLAGS.noise_multiplier, clients_per_round=FLAGS.clients_per_round,\n clip=FLAGS.clip)\n', (6792, 6902), True, 'import tensorflow_federated as tff\n'), ((7146, 7447), 'tensorflow_federated.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive', 'tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive', ([], {'noise_multiplier': 'FLAGS.noise_multiplier', 'clients_per_round': 'FLAGS.clients_per_round', 'initial_l2_norm_clip': 'FLAGS.clip', 'target_unclipped_quantile': 'FLAGS.target_unclipped_quantile', 'learning_rate': 'FLAGS.adaptive_clip_learning_rate'}), '(noise_multiplier\n =FLAGS.noise_multiplier, clients_per_round=FLAGS.clients_per_round,\n initial_l2_norm_clip=FLAGS.clip, target_unclipped_quantile=FLAGS.\n target_unclipped_quantile, learning_rate=FLAGS.adaptive_clip_learning_rate)\n', (7208, 7447), True, 'import tensorflow_federated as tff\n'), ((5963, 6156), 'tensorflow_federated.aggregators.PrivateQuantileEstimationProcess.no_noise', 'tff.aggregators.PrivateQuantileEstimationProcess.no_noise', ([], {'initial_estimate': 'FLAGS.clip', 'target_quantile': 'FLAGS.target_unclipped_quantile', 'learning_rate': 'FLAGS.adaptive_clip_learning_rate'}), '(initial_estimate=\n FLAGS.clip, target_quantile=FLAGS.target_unclipped_quantile,\n learning_rate=FLAGS.adaptive_clip_learning_rate)\n', (6020, 6156), True, 'import tensorflow_federated as tff\n'), ((5243, 5282), 'tensorflow.squeeze', 'tf.squeeze', (["local_outputs['num_tokens']"], {}), "(local_outputs['num_tokens'])\n", (5253, 5282), True, 'import tensorflow as tf\n')] |
import open3d as o3d
import numpy as np
pc_load_pathname = '/home/caizhongang/github/waymo_kitti_converter/007283-000.bin'
pc = np.fromfile(pc_load_pathname, dtype=np.float32).reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
visual = [pcd, axis]
o3d.visualization.draw_geometries(visual)
| [
"numpy.fromfile",
"open3d.visualization.draw_geometries",
"open3d.geometry.PointCloud",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"open3d.utility.Vector3dVector"
]
| [((199, 224), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (222, 224), True, 'import open3d as o3d\n'), ((239, 269), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pc'], {}), '(pc)\n', (265, 269), True, 'import open3d as o3d\n'), ((278, 353), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(1)', 'origin': '[0, 0, 0]'}), '(size=1, origin=[0, 0, 0])\n', (327, 353), True, 'import open3d as o3d\n'), ((375, 416), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['visual'], {}), '(visual)\n', (408, 416), True, 'import open3d as o3d\n'), ((129, 176), 'numpy.fromfile', 'np.fromfile', (['pc_load_pathname'], {'dtype': 'np.float32'}), '(pc_load_pathname, dtype=np.float32)\n', (140, 176), True, 'import numpy as np\n')] |
"""node-path implementation for OpenGLContext
"""
from vrml.vrml97 import nodepath, nodetypes
from vrml.cache import CACHE
from OpenGLContext import quaternion
from OpenGL.GL import glMultMatrixf
class _NodePath( object ):
"""OpenGLContext-specific node-path class
At the moment this only adds a single method,
transform() which traverses the path, calling
transform() for each Transforming node which
has a transform method.
"""
__slots__ = ()
def transform( self, mode=None, translate=1, scale=1, rotate=1 ):
"""For each Transforming node, do OpenGL transform
Does _not_ push-pop matrices, so do that before
if you want to save your current matrix. This method
is useful primarily for storing paths to, for instance,
bindable nodes, where you want to be able to rapidly
transform down to the node, without needing a full
traversal of the scenegraph.
"""
matrix = self.transformMatrix(
translate=translate, scale=scale, rotate=rotate
)
glMultMatrixf(
matrix
)
def quaternion( self ):
"""Get summary quaternion for all rotations in stack"""
nodes = [
node
for node in self
if (
isinstance(node, nodetypes.Transforming) and
hasattr( node, "orientation")
)
]
q = quaternion.Quaternion()
for node in nodes:
q = q * quaternion.fromXYZR( *node.orientation )
return q
class NodePath( _NodePath, nodepath.NodePath ):
pass
class WeakNodePath( _NodePath, nodepath.WeakNodePath ):
pass
| [
"OpenGLContext.quaternion.Quaternion",
"OpenGL.GL.glMultMatrixf",
"OpenGLContext.quaternion.fromXYZR"
]
| [((1074, 1095), 'OpenGL.GL.glMultMatrixf', 'glMultMatrixf', (['matrix'], {}), '(matrix)\n', (1087, 1095), False, 'from OpenGL.GL import glMultMatrixf\n'), ((1435, 1458), 'OpenGLContext.quaternion.Quaternion', 'quaternion.Quaternion', ([], {}), '()\n', (1456, 1458), False, 'from OpenGLContext import quaternion\n'), ((1506, 1544), 'OpenGLContext.quaternion.fromXYZR', 'quaternion.fromXYZR', (['*node.orientation'], {}), '(*node.orientation)\n', (1525, 1544), False, 'from OpenGLContext import quaternion\n')] |
import random
from math import sqrt
sum = 0
for x in range(101):
sum += x
print(sum)
'''
range(101) 0-100 一共101个数
range(1,101) 1-100
range(1,101,2) 1-100间的奇数 步长为2
range(100,0,-2) 100-0间的偶数 步长为-2
'''
sum = 0
for x in range(100, 0, -2):
sum += x
print(sum)
# while
# 0-100间的随机数
answer = random.randint(0, 100)
count = 0
while True:
count += 1
number = int(input("Please enter the number: "))
if number < answer:
print("more larger")
elif number > answer:
print("more smaller")
else:
print("right")
print('you got d% times to get right answer' % count)
for i in range(1, 10):
for j in range(1, i + 1):
print('%d*%d=%d' % (i, j, i * j), end='\t')
print()
# 输入一个正整数判断是不是素数
num = int(input('请输入一个正整数: '))
end = int(sqrt(num))
is_prime = True
# 为什么要放一个end 如果这个数有一个小于sqrt的因数
# 就一定会有一个大于sqrt的因数与之对应
for x in range(2, end + 1):
if num % x == 0:
is_prime = False
break
if is_prime and num != 1:
print('%d是素数' % num)
else:
print('%d不是素数' % num)
| [
"math.sqrt",
"random.randint"
]
| [((301, 323), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (315, 323), False, 'import random\n'), ((789, 798), 'math.sqrt', 'sqrt', (['num'], {}), '(num)\n', (793, 798), False, 'from math import sqrt\n')] |
#!/usr/bin/env python
"""
Compute diffusion coefficient from MSD data.
Time interval, DT, is obtained from in.pmd in the same directory.
Usage:
msd2diff.py [options] MSD_FILE
Options:
-h, --help Show this message and exit.
-o, --offset OFFSET
Offset of given data. [default: 0]
--plot Plot a fitted graph. [default: False]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
__author__ = "<NAME>"
__version__ = "191212"
def read_out_msd(fname='out.msd',offset=0,specorder=[],spc=None):
if specorder == [] or spc not in specorder:
index = 1
else:
index = specorder.index(spc) +1
with open(fname,'r') as f:
lines = f.readlines()
try:
dname = os.path.dirname(fname)
dt = dt_from_inpmd(fname=dname+'/in.pmd')
except Exception as e:
raise RuntimeError('Failed to read in.pmd.')
ts = []
msds = []
n0 = 0
msd0 = 0.0
for il,line in enumerate(lines):
if line[0] == '#':
continue
data = line.split()
if il < offset:
n0 = int(data[0])
msd0 = float(data[index])
continue
n = int(data[0])
msd = float(data[index])
ts.append((n-n0)*dt)
msds.append(msd-msd0)
return np.array(ts),np.array(msds)
def dt_from_inpmd(fname='in.pmd'):
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if 'time_interval' in line:
time_interval = abs(float(line.split()[1]))
elif 'num_iteration' in line:
num_iteration = int(line.split()[1])
elif 'num_out_pos' in line or 'num_out_pmd' in line:
num_out_pos = int(line.split()[1])
return time_interval*num_iteration/num_out_pos
def msd2D(ts,msds,fac,dim=3):
"""
Compute diffusion coefficient from time [fs] vs MSD [Ang^2] data
by solving least square problem using numpy.
Return diffusion coefficient multiplied by FAC.
"""
A= np.array([ts, np.ones(len(ts))])
A = A.T
xvar = np.var(A[:,0])
p,res,_,_ = np.linalg.lstsq(A,msds,rcond=None)
a = p[0]
b = p[1]
# fac = 1.0e-16 /1.e-15
a = a *fac /(2.0*dim)
b = b *fac
# print(res[0],xvar,np.mean(A[:,0]),len(ts))
std = np.sqrt(res[0]/len(ts)/xvar) *fac /(2.0*dim)
return a,b,std
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['MSD_FILE']
offset = int(args['--offset'])
plot = args['--plot']
ts,msds = read_out_msd(fname,offset)
#...Assuming input MSD unit in A^2/fs and output in cm^2/s
fac = 1.0e-16 /1.0e-15
#...Least square
a,b,std = msd2D(ts,msds,fac)
print(' Diffusion coefficient = {0:12.4e}'.format(a)+
' +/- {0:12.4e} [cm^2/s]'.format(std))
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk',style='ticks')
#...Original time unit == fs
unit = 'fs'
tfac = 1.0
if ts[-1] > 1.0e+5: #...if max t > 100ps, time unit in ps
unit = 'ps'
tfac = 1.0e-3
plt.xlabel('Time ({0:s})'.format(unit))
plt.ylabel('MSD (A^2/{0:s})'.format(unit))
fvals = np.array([ (t*a+b)/fac for t in ts ])
plt.plot(ts*tfac,msds/tfac,'b-',label='MSD data')
plt.plot(ts*tfac,fvals/tfac,'r-',label='Fitted curve')
plt.savefig("graph_msd2D.png", format='png',
dpi=300, bbox_inches='tight')
print(' Wrote graph_msd2D.png')
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"os.path.dirname",
"numpy.array",
"numpy.linalg.lstsq",
"docopt.docopt",
"numpy.var"
]
| [((2111, 2126), 'numpy.var', 'np.var', (['A[:, 0]'], {}), '(A[:, 0])\n', (2117, 2126), True, 'import numpy as np\n'), ((2142, 2178), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'msds'], {'rcond': 'None'}), '(A, msds, rcond=None)\n', (2157, 2178), True, 'import numpy as np\n'), ((2436, 2451), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (2442, 2451), False, 'from docopt import docopt\n'), ((780, 802), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (795, 802), False, 'import os, sys\n'), ((1339, 1351), 'numpy.array', 'np.array', (['ts'], {}), '(ts)\n', (1347, 1351), True, 'import numpy as np\n'), ((1352, 1366), 'numpy.array', 'np.array', (['msds'], {}), '(msds)\n', (1360, 1366), True, 'import numpy as np\n'), ((2927, 2965), 'seaborn.set', 'sns.set', ([], {'context': '"""talk"""', 'style': '"""ticks"""'}), "(context='talk', style='ticks')\n", (2934, 2965), True, 'import seaborn as sns\n'), ((3272, 3315), 'numpy.array', 'np.array', (['[((t * a + b) / fac) for t in ts]'], {}), '([((t * a + b) / fac) for t in ts])\n', (3280, 3315), True, 'import numpy as np\n'), ((3318, 3374), 'matplotlib.pyplot.plot', 'plt.plot', (['(ts * tfac)', '(msds / tfac)', '"""b-"""'], {'label': '"""MSD data"""'}), "(ts * tfac, msds / tfac, 'b-', label='MSD data')\n", (3326, 3374), True, 'import matplotlib.pyplot as plt\n'), ((3376, 3437), 'matplotlib.pyplot.plot', 'plt.plot', (['(ts * tfac)', '(fvals / tfac)', '"""r-"""'], {'label': '"""Fitted curve"""'}), "(ts * tfac, fvals / tfac, 'r-', label='Fitted curve')\n", (3384, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3439, 3513), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""graph_msd2D.png"""'], {'format': '"""png"""', 'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('graph_msd2D.png', format='png', dpi=300, bbox_inches='tight')\n", (3450, 3513), True, 'import matplotlib.pyplot as plt\n')] |
import pytest
import numbers
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises_regex
from skopt.space import LogN, Normalize
@pytest.mark.fast_test
def test_logn2_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.fast_test
def test_logn10_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.fast_test
def test_normalize_integer():
transformer = Normalize(1, 20, is_int=True)
assert transformer.transform(19.8) == 1.0
assert transformer.transform(20.2) == 1.0
assert transformer.transform(1.2) == 0.0
assert transformer.transform(0.9) == 0.0
assert_raises(ValueError, transformer.transform, 20.6)
assert_raises(ValueError, transformer.transform, 0.4)
assert transformer.inverse_transform(0.99) == 20
assert transformer.inverse_transform(0.01) == 1
assert_raises(ValueError, transformer.inverse_transform, 1. + 1e-8)
assert_raises(ValueError, transformer.transform, 0. - 1e-8)
@pytest.mark.fast_test
def test_normalize():
transformer = Normalize(1, 20, is_int=False)
assert transformer.transform(20.) == 1.0
assert transformer.transform(1.) == 0.0
assert_raises(ValueError, transformer.transform, 20. + 1e-7)
assert_raises(ValueError, transformer.transform, 1.0 - 1e-7)
assert_raises(ValueError, transformer.inverse_transform, 1. + 1e-8)
assert_raises(ValueError, transformer.transform, 0. - 1e-8)
| [
"numpy.round",
"skopt.space.LogN",
"numpy.testing.assert_raises",
"skopt.space.Normalize"
]
| [((328, 335), 'skopt.space.LogN', 'LogN', (['(2)'], {}), '(2)\n', (332, 335), False, 'from skopt.space import LogN, Normalize\n'), ((559, 566), 'skopt.space.LogN', 'LogN', (['(2)'], {}), '(2)\n', (563, 566), False, 'from skopt.space import LogN, Normalize\n'), ((793, 822), 'skopt.space.Normalize', 'Normalize', (['(1)', '(20)'], {'is_int': '(True)'}), '(1, 20, is_int=True)\n', (802, 822), False, 'from skopt.space import LogN, Normalize\n'), ((1009, 1063), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.transform', '(20.6)'], {}), '(ValueError, transformer.transform, 20.6)\n', (1022, 1063), False, 'from numpy.testing import assert_raises\n'), ((1068, 1121), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.transform', '(0.4)'], {}), '(ValueError, transformer.transform, 0.4)\n', (1081, 1121), False, 'from numpy.testing import assert_raises\n'), ((1232, 1301), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.inverse_transform', '(1.0 + 1e-08)'], {}), '(ValueError, transformer.inverse_transform, 1.0 + 1e-08)\n', (1245, 1301), False, 'from numpy.testing import assert_raises\n'), ((1304, 1365), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.transform', '(0.0 - 1e-08)'], {}), '(ValueError, transformer.transform, 0.0 - 1e-08)\n', (1317, 1365), False, 'from numpy.testing import assert_raises\n'), ((1429, 1459), 'skopt.space.Normalize', 'Normalize', (['(1)', '(20)'], {'is_int': '(False)'}), '(1, 20, is_int=False)\n', (1438, 1459), False, 'from skopt.space import LogN, Normalize\n'), ((1553, 1615), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.transform', '(20.0 + 1e-07)'], {}), '(ValueError, transformer.transform, 20.0 + 1e-07)\n', (1566, 1615), False, 'from numpy.testing import assert_raises\n'), ((1618, 1679), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.transform', '(1.0 - 1e-07)'], {}), '(ValueError, transformer.transform, 1.0 - 1e-07)\n', (1631, 1679), False, 'from numpy.testing import assert_raises\n'), ((1683, 1752), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.inverse_transform', '(1.0 + 1e-08)'], {}), '(ValueError, transformer.inverse_transform, 1.0 + 1e-08)\n', (1696, 1752), False, 'from numpy.testing import assert_raises\n'), ((1755, 1816), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'transformer.transform', '(0.0 - 1e-08)'], {}), '(ValueError, transformer.transform, 0.0 - 1e-08)\n', (1768, 1816), False, 'from numpy.testing import assert_raises\n'), ((467, 483), 'numpy.round', 'np.round', (['X_orig'], {}), '(X_orig)\n', (475, 483), True, 'import numpy as np\n'), ((698, 714), 'numpy.round', 'np.round', (['X_orig'], {}), '(X_orig)\n', (706, 714), True, 'import numpy as np\n')] |
import tensorflow as tf
FLIPPING_TENSOR = tf.constant([1.0, -1.0, 1.0])
@tf.function
def sample_data(points, labels, num_point):
if tf.random.uniform(shape=()) >= 0.5:
return points * FLIPPING_TENSOR, labels
return points, labels
mock_data = tf.constant([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]
])
mock_labels = tf.constant([
[1.], [0.], [1.]
])
sampling_lambda = lambda x, y: sample_data(x, y, 512)
train_data = tf.data.Dataset.from_tensors((mock_data, mock_labels)) \
.map(sampling_lambda) \
.unbatch() \
.batch(1) \
.repeat(5)
for x, y in train_data:
print(x) | [
"tensorflow.random.uniform",
"tensorflow.data.Dataset.from_tensors",
"tensorflow.constant"
]
| [((43, 72), 'tensorflow.constant', 'tf.constant', (['[1.0, -1.0, 1.0]'], {}), '([1.0, -1.0, 1.0])\n', (54, 72), True, 'import tensorflow as tf\n'), ((263, 327), 'tensorflow.constant', 'tf.constant', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n', (274, 327), True, 'import tensorflow as tf\n'), ((348, 382), 'tensorflow.constant', 'tf.constant', (['[[1.0], [0.0], [1.0]]'], {}), '([[1.0], [0.0], [1.0]])\n', (359, 382), True, 'import tensorflow as tf\n'), ((138, 165), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '()'}), '(shape=())\n', (155, 165), True, 'import tensorflow as tf\n'), ((455, 509), 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (['(mock_data, mock_labels)'], {}), '((mock_data, mock_labels))\n', (483, 509), True, 'import tensorflow as tf\n')] |
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'This is the app index page.'
| [
"flask.Flask"
]
| [((30, 45), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (35, 45), False, 'from flask import Flask\n')] |
# Generated by Django 3.1.6 on 2021-02-16 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0026_event'),
]
operations = [
migrations.AlterField(
model_name='group',
name='students',
field=models.ManyToManyField(blank=True, to='schedule.Student', verbose_name='Учні'),
),
migrations.AlterField(
model_name='teacher',
name='subjects',
field=models.ManyToManyField(blank=True, to='schedule.Subject', verbose_name='Предмети'),
),
]
| [
"django.db.models.ManyToManyField"
]
| [((325, 403), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""schedule.Student"""', 'verbose_name': '"""Учні"""'}), "(blank=True, to='schedule.Student', verbose_name='Учні')\n", (347, 403), False, 'from django.db import migrations, models\n'), ((528, 615), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""schedule.Subject"""', 'verbose_name': '"""Предмети"""'}), "(blank=True, to='schedule.Subject', verbose_name=\n 'Предмети')\n", (550, 615), False, 'from django.db import migrations, models\n')] |
from django.test import SimpleTestCase as TestCase
from corehq.apps.app_manager.models import _parse_xml
import os
class XMLParsingTest(TestCase):
def testUnicodeError(self):
"""Tests a bug found in Unicode processing of a form"""
file_path = os.path.join(os.path.dirname(__file__), "data", "unicode_error_form.xhtml")
with open(file_path, "rb") as f:
xml_data = f.read()
try:
_parse_xml(xml_data) # this should not raise an error
except:
self.fail("Parsing normal string data shouldn't fail!")
try:
_parse_xml(unicode(xml_data))
except:
self.fail("Parsing unicode data shouldn't fail!")
| [
"os.path.dirname",
"corehq.apps.app_manager.models._parse_xml"
]
| [((282, 307), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (297, 307), False, 'import os\n'), ((443, 463), 'corehq.apps.app_manager.models._parse_xml', '_parse_xml', (['xml_data'], {}), '(xml_data)\n', (453, 463), False, 'from corehq.apps.app_manager.models import _parse_xml\n')] |
import unittest
from app.models import News
# News = news.News
class NewsTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Movie class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_news = News('abc-news','ABC NEWS','Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.','http://www.abc.net.au/news','business','au')
def test_instance(self):
self.assertTrue(isinstance(self.new_news,News))
def test_init(self):
self.assertEqual(self.new_news.id,'abc-news')
self.assertEqual(self.new_news.name,'ABC NEWS')
self.assertEqual(self.new_news.description,'Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.')
self.assertEqual(self.new_news.url,'http://www.abc.net.au/news')
self.assertEqual(self.new_news.country,'au')
# if __name__ == '__main__':
# unittest.main()
| [
"app.models.News"
]
| [((295, 492), 'app.models.News', 'News', (['"""abc-news"""', '"""ABC NEWS"""', '"""Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com."""', '"""http://www.abc.net.au/news"""', '"""business"""', '"""au"""'], {}), "('abc-news', 'ABC NEWS',\n 'Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.'\n , 'http://www.abc.net.au/news', 'business', 'au')\n", (299, 492), False, 'from app.models import News\n')] |
from sanic import Blueprint, Request, HTTPResponse, response
from sanic.models.handler_types import RouteHandler
from functools import wraps
from inspect import isawaitable
from typing import Callable, Dict, Any, Union, Awaitable, List, Optional
ACCEPTED_WISH_VERS = ['wish.alpha.v1']
WishHandler = Callable[..., Union[Dict[str, Any], Awaitable[Dict[str, Any]]]]
def wish_endpoint(bp: Blueprint, uri: str, *, methods: Optional[List[str]] = None) -> Callable[[WishHandler], RouteHandler]:
if methods is None:
methods = ['POST']
def decorator(fn: WishHandler) -> RouteHandler:
@wraps(fn)
async def wrapped(req: Request, *args: Any, **kwargs: Any) -> HTTPResponse:
v = req.headers.get('X-Wish-Version', '(none)')
if v not in ACCEPTED_WISH_VERS:
return response.json({
'error': 'WISH_VERSION_MISMATCH',
'error_msg': f'前端版本 {v} 不是最新',
})
retval_ = fn(req, *args, **kwargs)
retval = (await retval_) if isawaitable(retval_) else retval_
return response.json({
'error': None, # may be overridden by retval
**retval,
})
return bp.route(uri, methods)(wrapped) # type: ignore
return decorator | [
"sanic.response.json",
"inspect.isawaitable",
"functools.wraps"
]
| [((604, 613), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (609, 613), False, 'from functools import wraps\n'), ((1107, 1147), 'sanic.response.json', 'response.json', (["{'error': None, **retval}"], {}), "({'error': None, **retval})\n", (1120, 1147), False, 'from sanic import Blueprint, Request, HTTPResponse, response\n'), ((825, 910), 'sanic.response.json', 'response.json', (["{'error': 'WISH_VERSION_MISMATCH', 'error_msg': f'前端版本 {v} 不是最新'}"], {}), "({'error': 'WISH_VERSION_MISMATCH', 'error_msg': f'前端版本 {v} 不是最新'}\n )\n", (838, 910), False, 'from sanic import Blueprint, Request, HTTPResponse, response\n'), ((1053, 1073), 'inspect.isawaitable', 'isawaitable', (['retval_'], {}), '(retval_)\n', (1064, 1073), False, 'from inspect import isawaitable\n')] |
import argparse
import boto3
import json
import logging
import os
from progressbar import ProgressBar
import sys
"""
Collects IAM Policies
Evaluates policies looking for badness (*.*, Effect:Allow + NotAction)
Need to add more tests/use cases
"""
def get_policies(profile):
session = boto3.session.Session(profile_name=profile)
myiam = session.client('iam')
marker = None
allPolicies = []
passcount = 1
while True:
pbar = ProgressBar('Collecting Policies')
print("Policy Collection, Pass Number: {}".format(passcount))
passcount += 1
if marker:
response_iterator = myiam.list_policies(OnlyAttached=True,
Marker=marker)
else:
response_iterator = myiam.list_policies(OnlyAttached=True)
for p in pbar(response_iterator['Policies']):
polVers = myiam.get_policy_version(
PolicyArn=p['Arn'], VersionId=p['DefaultVersionId'])
mypol = {'Policy': p, 'PolicyVersion': polVers['PolicyVersion']}
allPolicies.append(mypol)
pfl = open(os.path.join('policies/', p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(mypol, default=str, indent=4))
pfl.close()
ae = myiam.list_entities_for_policy(PolicyArn=p['Arn'])
pfl = open(os.path.join('attachedentities/',
p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(ae, default=str, indent=4))
pfl.close()
try:
marker = response_iterator['Marker']
except KeyError:
break
print("\nTotal Policies: {}".format(len(allPolicies)))
pbar = ProgressBar('\tChecking for Dangerous Policies')
for p in pbar(allPolicies):
# This section looks for bad/dangerous patterns
# Pattern 1: Allow *.*
# AWSLambdaRole {
# 'Version': '2012-10-17',
# 'Statement': [
# {'Effect': 'Allow',
# 'Action': '*',
# 'Resource': ['*']
# }
# ]
# }
try:
q = p['PolicyVersion']['Document']['Statement'][0]
except Exception as e:
print("Problem parsing this policy: {}".format(p))
logging.debug("Problem parsing this policy: {}".format(p))
print(e)
continue
try:
if (q['Effect'] == "Allow" and '*' in q['Resource']
and '*' in q['Action']):
print("Review Dangerous Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
# Pattern 2: Allow: *, NotAction
# {'Version': '2012-10-17',
# 'Statement': [
# {
# 'Effect': 'Allow',
# 'NotAction': ['iam:*', 'organizations:*', 'account:*'],
# 'Resource': '*'
# },
# {
# 'Effect': 'Allow',
# 'Action': [ 'iam:CreateServiceLinkedRole',
# 'iam:DeleteServiceLinkedRole',
# 'iam:ListRoles',
# 'organizations:DescribeOrganization',
# 'account:ListRegions'
# ],
# 'Resource': '*'
# }
# ]}
# This policy blacklists all 'iam:*', 'organizations:*', and
# 'accounts:*' with the NotAction. Then it grants specific
# access in the next stanza ('iam:ListRoles', etc)
# The fatal flaw is that it grants access to everything else,
# like lambda or ec2 because of the "Allow" in the first stanza.
# This user can create an EC2 instance, attach an admin role to
# it, and login and give themselves access to Admin. Instance
# privilege escalation.
try:
if (q['NotAction'] and q['Effect'] == 'Allow'
and q['Resource'] == '*'):
print("Review Suspect Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
return
def check_args_creds(args):
# handle profiles / authentication / credentials
workingCreds = False
global logging
global workingProfiles
workingProfiles = []
if not args.profile:
logging.info("Using AWS Default Profile")
if (not check_profile("default")):
logging.error("Default credentials not working.")
print("Default credentials not working.")
quit()
else:
workingProfiles.append("default")
workingCreds = True
if args.profile and args.profile is not None:
logging.info("Using " + args.profile + " Profile")
if (not check_profile(args.profile)):
logging.error("Profile " + args.profile + " not working")
exit(1)
else:
logging.info("Profile " + args.profile + " working")
workingProfiles.append(args.profile)
workingCreds = True
return args.profile
def check_profile(profile):
global logging
try:
if(profile == "default"):
client = boto3.session.Session()
else:
logging.info("Testing profile: " + profile)
client = boto3.session.Session(profile_name=profile)
except Exception as e:
logging.error("Error connecting: ")
logging.error(e)
return False
try:
iam = client.client('iam')
response = iam.list_users()
except Exception as e:
logging.error("Error listing users: ")
logging.error(e)
return False
if len(response['Users']) == 0:
logging.info("No users")
if len(response) > 0:
usercnt = len(response['Users'])
if(usercnt > 1):
userresp = " Users"
else:
userresp = " User"
logging.info(str(usercnt) + userresp)
return True
def setup_args(parser):
parser.add_argument("-p", "--profile",
help="AWS Profile")
parser.add_argument("-l", "--log",
help="Log Level")
def main():
global logging
parser = argparse.ArgumentParser()
setup_args(parser)
global args
args = parser.parse_args()
if args.log and args.log.upper() == "DEBUG":
loglevel = "DEBUG"
else:
loglevel = "INFO"
logging.basicConfig(filename='policyAssessment.log',
format='%(levelname)s:%(message)s',
level=loglevel)
profile = check_args_creds(args)
get_policies(profile)
if __name__ == "__main__":
# execute only if run as a script
main()
| [
"logging.basicConfig",
"boto3.session.Session",
"argparse.ArgumentParser",
"json.dumps",
"os.path.join",
"logging.info",
"logging.error",
"progressbar.ProgressBar"
]
| [((293, 336), 'boto3.session.Session', 'boto3.session.Session', ([], {'profile_name': 'profile'}), '(profile_name=profile)\n', (314, 336), False, 'import boto3\n'), ((1735, 1783), 'progressbar.ProgressBar', 'ProgressBar', (['"""\tChecking for Dangerous Policies"""'], {}), "('\\tChecking for Dangerous Policies')\n", (1746, 1783), False, 'from progressbar import ProgressBar\n'), ((6365, 6390), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6388, 6390), False, 'import argparse\n'), ((6577, 6686), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""policyAssessment.log"""', 'format': '"""%(levelname)s:%(message)s"""', 'level': 'loglevel'}), "(filename='policyAssessment.log', format=\n '%(levelname)s:%(message)s', level=loglevel)\n", (6596, 6686), False, 'import logging\n'), ((459, 493), 'progressbar.ProgressBar', 'ProgressBar', (['"""Collecting Policies"""'], {}), "('Collecting Policies')\n", (470, 493), False, 'from progressbar import ProgressBar\n'), ((4495, 4536), 'logging.info', 'logging.info', (['"""Using AWS Default Profile"""'], {}), "('Using AWS Default Profile')\n", (4507, 4536), False, 'import logging\n'), ((4865, 4915), 'logging.info', 'logging.info', (["('Using ' + args.profile + ' Profile')"], {}), "('Using ' + args.profile + ' Profile')\n", (4877, 4915), False, 'import logging\n'), ((5869, 5893), 'logging.info', 'logging.info', (['"""No users"""'], {}), "('No users')\n", (5881, 5893), False, 'import logging\n'), ((4592, 4641), 'logging.error', 'logging.error', (['"""Default credentials not working."""'], {}), "('Default credentials not working.')\n", (4605, 4641), False, 'import logging\n'), ((4974, 5031), 'logging.error', 'logging.error', (["('Profile ' + args.profile + ' not working')"], {}), "('Profile ' + args.profile + ' not working')\n", (4987, 5031), False, 'import logging\n'), ((5078, 5130), 'logging.info', 'logging.info', (["('Profile ' + args.profile + ' working')"], {}), "('Profile ' + args.profile + ' working')\n", (5090, 5130), False, 'import logging\n'), ((5349, 5372), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (5370, 5372), False, 'import boto3\n'), ((5399, 5442), 'logging.info', 'logging.info', (["('Testing profile: ' + profile)"], {}), "('Testing profile: ' + profile)\n", (5411, 5442), False, 'import logging\n'), ((5464, 5507), 'boto3.session.Session', 'boto3.session.Session', ([], {'profile_name': 'profile'}), '(profile_name=profile)\n', (5485, 5507), False, 'import boto3\n'), ((5543, 5578), 'logging.error', 'logging.error', (['"""Error connecting: """'], {}), "('Error connecting: ')\n", (5556, 5578), False, 'import logging\n'), ((5587, 5603), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (5600, 5603), False, 'import logging\n'), ((5740, 5778), 'logging.error', 'logging.error', (['"""Error listing users: """'], {}), "('Error listing users: ')\n", (5753, 5778), False, 'import logging\n'), ((5787, 5803), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (5800, 5803), False, 'import logging\n'), ((1138, 1190), 'os.path.join', 'os.path.join', (['"""policies/"""', "(p['PolicyName'] + '.json')"], {}), "('policies/', p['PolicyName'] + '.json')\n", (1150, 1190), False, 'import os\n'), ((1217, 1257), 'json.dumps', 'json.dumps', (['mypol'], {'default': 'str', 'indent': '(4)'}), '(mypol, default=str, indent=4)\n', (1227, 1257), False, 'import json\n'), ((1374, 1434), 'os.path.join', 'os.path.join', (['"""attachedentities/"""', "(p['PolicyName'] + '.json')"], {}), "('attachedentities/', p['PolicyName'] + '.json')\n", (1386, 1434), False, 'import os\n'), ((1497, 1534), 'json.dumps', 'json.dumps', (['ae'], {'default': 'str', 'indent': '(4)'}), '(ae, default=str, indent=4)\n', (1507, 1534), False, 'import json\n')] |
import json
import os
import re
from testinfra.utils.ansible_runner import AnsibleRunner
import util
testinfra_hosts = AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('agent-integrations')
def _get_key_value(tag_list):
for key, value in (pair.split(':', 1) for pair in tag_list):
yield key, value
def _component_data(json_data, type_name, external_id_assert_fn, tags_assert_fn):
for message in json_data["messages"]:
p = message["message"]["TopologyElement"]["payload"]
if "TopologyComponent" in p and \
p["TopologyComponent"]["typeName"] == type_name and \
external_id_assert_fn(p["TopologyComponent"]["externalId"]):
data = json.loads(p["TopologyComponent"]["data"])
if tags_assert_fn(dict(_get_key_value(data["tags"]))):
return data
return None
def test_nagios_mysql(host):
def assert_topology():
topo_url = "http://localhost:7070/api/topic/sts_topo_process_agents?limit=1500"
data = host.check_output('curl "{}"'.format(topo_url))
json_data = json.loads(data)
with open("./topic-nagios-topo-process-agents.json", 'w') as f:
json.dump(json_data, f, indent=4)
external_id_pattern = re.compile(r"urn:container:/agent-integrations:.*")
components = [
{
"assertion": "Should find the nagios container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_nagios_1"
},
{
"assertion": "Should find the mysql container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_mysql_1"
}
]
for c in components:
print("Running assertion for: " + c["assertion"])
assert _component_data(
json_data=json_data,
type_name=c["type"],
external_id_assert_fn=c["external_id"],
tags_assert_fn=c["tags"],
) is not None
util.wait_until(assert_topology, 30, 3)
def test_container_metrics(host):
url = "http://localhost:7070/api/topic/sts_multi_metrics?limit=1000"
def wait_for_metrics():
data = host.check_output("curl \"%s\"" % url)
json_data = json.loads(data)
with open("./topic-nagios-sts-multi-metrics.json", 'w') as f:
json.dump(json_data, f, indent=4)
def get_keys(m_host):
return set(
''.join(message["message"]["MultiMetric"]["values"].keys())
for message in json_data["messages"]
if message["message"]["MultiMetric"]["name"] == "convertedMetric" and
message["message"]["MultiMetric"]["host"] == m_host
)
expected = {'nagios.http.size', 'nagios.ping.pl', 'nagios.http.time', 'nagios.current_load.load15',
'nagios.swap_usage.swap', 'nagios.host.pl', 'nagios.root_partition', 'nagios.current_users.users',
'nagios.current_load.load1', 'nagios.host.rta', 'nagios.ping.rta', 'nagios.current_load.load5',
'nagios.total_processes.procs'}
assert all([expectedMetric for expectedMetric in expected if expectedMetric in get_keys("agent-integrations")])
util.wait_until(wait_for_metrics, 180, 3)
| [
"json.loads",
"re.compile",
"testinfra.utils.ansible_runner.AnsibleRunner",
"util.wait_until",
"json.dump"
]
| [((2256, 2295), 'util.wait_until', 'util.wait_until', (['assert_topology', '(30)', '(3)'], {}), '(assert_topology, 30, 3)\n', (2271, 2295), False, 'import util\n'), ((3514, 3555), 'util.wait_until', 'util.wait_until', (['wait_for_metrics', '(180)', '(3)'], {}), '(wait_for_metrics, 180, 3)\n', (3529, 3555), False, 'import util\n'), ((122, 174), 'testinfra.utils.ansible_runner.AnsibleRunner', 'AnsibleRunner', (["os.environ['MOLECULE_INVENTORY_FILE']"], {}), "(os.environ['MOLECULE_INVENTORY_FILE'])\n", (135, 174), False, 'from testinfra.utils.ansible_runner import AnsibleRunner\n'), ((1107, 1123), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1117, 1123), False, 'import json\n'), ((1273, 1323), 're.compile', 're.compile', (['"""urn:container:/agent-integrations:.*"""'], {}), "('urn:container:/agent-integrations:.*')\n", (1283, 1323), False, 'import re\n'), ((2508, 2524), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2518, 2524), False, 'import json\n'), ((724, 766), 'json.loads', 'json.loads', (["p['TopologyComponent']['data']"], {}), "(p['TopologyComponent']['data'])\n", (734, 766), False, 'import json\n'), ((1208, 1241), 'json.dump', 'json.dump', (['json_data', 'f'], {'indent': '(4)'}), '(json_data, f, indent=4)\n', (1217, 1241), False, 'import json\n'), ((2607, 2640), 'json.dump', 'json.dump', (['json_data', 'f'], {'indent': '(4)'}), '(json_data, f, indent=4)\n', (2616, 2640), False, 'import json\n')] |
import os
import sys
from contextlib import contextmanager
from invoke import UnexpectedExit
def git_commit(c, addstr, msg):
try:
c.run("git config --get user.email")
c.run("git config --get user.name")
except UnexpectedExit:
c.run('git config --local user.email "<EMAIL>"')
c.run('git config --local user.name "CI/CD"')
c.run(f'git add {addstr} && git commit -m "{msg}"')
@contextmanager
def cd_into(dirpath):
wd = os.getcwd()
os.chdir(dirpath)
sys.path.insert(0, str(dirpath))
yield
os.chdir(wd)
sys.path.pop(0)
| [
"os.chdir",
"sys.path.pop",
"os.getcwd"
]
| [((469, 480), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (478, 480), False, 'import os\n'), ((485, 502), 'os.chdir', 'os.chdir', (['dirpath'], {}), '(dirpath)\n', (493, 502), False, 'import os\n'), ((554, 566), 'os.chdir', 'os.chdir', (['wd'], {}), '(wd)\n', (562, 566), False, 'import os\n'), ((571, 586), 'sys.path.pop', 'sys.path.pop', (['(0)'], {}), '(0)\n', (583, 586), False, 'import sys\n')] |
from scipy.io import wavfile
import numpy as np
import pingouin as pg
import pandas as pd
_,data = wavfile.read('wav//ed//mp3baked.wav')
_,data1 = wavfile.read('wav//ing//ingeating.wav')
i= data.shape[0]-1
j = data1.shape[0]-1
index_1 = -1
index_2 = -1
try:
data.shape[1]
except IndexError:
data = data.reshape(data.shape[0],1)
try:
data1.shape[1]
except IndexError:
data1 = data1.reshape(data1.shape[0],1)
while True:
if data[i,0] !=0 and index_1==-1:
index_1 = i
pass
if data1[j,0] !=0 and index_2==-1:
index_2 = j
pass
if index_1!=-1 and index_2!=-1:
break
i-=1
j-=1
data = data[-index_1:,:]
data1 = data1[-index_2:,:]
data = data[-2000:,:]
data1= data1[-2000:,:]
x =pg.corr(x=data[:,0],y=data1[:,0])
print(x)
# print(data.tostring())
# print(data1.tostring())
# data = data[:,:]
# data1 = data1[:,:]
# data = data.reshape(data.shape[0],1)
# data1 = data1.reshape(data1.shape[0],1)
# data = data[-10000:,:]
# data1 = data1[-10000:,:]
# print(data1.shape[1])
# df = pd.DataFrame(data,data1)
# print(df.head())
# print(data1.shape)
# data = data[-5000:,:]
# data1 = data1[-5000:,:]
# #
# x =pg.corr(x=data[:,0],y=data1[:,0])
# print(x)
| [
"scipy.io.wavfile.read",
"pingouin.corr"
]
| [((106, 143), 'scipy.io.wavfile.read', 'wavfile.read', (['"""wav//ed//mp3baked.wav"""'], {}), "('wav//ed//mp3baked.wav')\n", (118, 143), False, 'from scipy.io import wavfile\n'), ((155, 194), 'scipy.io.wavfile.read', 'wavfile.read', (['"""wav//ing//ingeating.wav"""'], {}), "('wav//ing//ingeating.wav')\n", (167, 194), False, 'from scipy.io import wavfile\n'), ((800, 836), 'pingouin.corr', 'pg.corr', ([], {'x': 'data[:, 0]', 'y': 'data1[:, 0]'}), '(x=data[:, 0], y=data1[:, 0])\n', (807, 836), True, 'import pingouin as pg\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.