content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import scipy
from manimlib.imports import *
from from_3b1b.old.eoc.chapter1 import Thumbnail as Chapter1Thumbnail
from from_3b1b.old.eoc.chapter2 import Car, MoveCar, ShowSpeedometer, \
IncrementNumber, GraphCarTrajectory, SecantLineToTangentLine, \
VELOCITY_COLOR, TIME_COLOR, DISTANCE_COLOR
def v_rate_func(t):
return 4*t - 4*(t**2)
def s_rate_func(t):
return 3*(t**2) - 2*(t**3)
def v_func(t):
return t*(8-t)
def s_func(t):
return 4*t**2 - (t**3)/3.
class Chapter8OpeningQuote(OpeningQuote, PiCreatureScene):
CONFIG = {
"quote" : [
" One should never try to prove anything that \\\\ is not ",
"almost obvious", ". "
],
"quote_arg_separator" : "",
"highlighted_quote_terms" : {
"almost obvious" : BLUE,
},
"author" : "Alexander Grothendieck"
}
def construct(self):
self.remove(self.pi_creature)
OpeningQuote.construct(self)
words_copy = self.quote.get_part_by_tex("obvious").copy()
author = self.author
author.save_state()
formula = self.get_formula()
formula.next_to(author, DOWN, MED_LARGE_BUFF)
formula.to_edge(LEFT)
self.revert_to_original_skipping_status()
self.play(FadeIn(self.pi_creature))
self.play(
author.next_to, self.pi_creature.get_corner(UP+LEFT), UP,
self.pi_creature.change_mode, "raise_right_hand"
)
self.wait(3)
self.play(
author.restore,
self.pi_creature.change_mode, "plain"
)
self.play(
words_copy.next_to, self.pi_creature,
LEFT, MED_SMALL_BUFF, UP,
self.pi_creature.change_mode, "thinking"
)
self.wait(2)
self.play(
Write(formula),
self.pi_creature.change_mode, "confused"
)
self.wait()
def get_formula(self):
result = TexMobject(
"{d(\\sin(\\theta)) \\over \\,", "d\\theta}", "=",
"\\lim_{", "h", " \\to 0}",
"{\\sin(\\theta+", "h", ") - \\sin(\\theta) \\over", " h}", "=",
"\\lim_{", "h", " \\to 0}",
"{\\big[ \\sin(\\theta)\\cos(", "h", ") + ",
"\\sin(", "h", ")\\cos(\\theta)\\big] - \\sin(\\theta) \\over", "h}",
"= \\dots"
)
result.set_color_by_tex("h", GREEN, substring = False)
result.set_color_by_tex("d\\theta", GREEN)
result.set_width(FRAME_WIDTH - 2*MED_SMALL_BUFF)
return result
class ThisVideo(TeacherStudentsScene):
def construct(self):
series = VideoSeries()
series.to_edge(UP)
this_video = series[7]
this_video.save_state()
next_video = series[8]
deriv, integral, v_t, dt, equals, v_T = formula = TexMobject(
"\\frac{d}{dT}",
"\\int_0^T", "v(t)", "\\,dt",
"=", "v(T)"
)
formula.set_color_by_tex("v", VELOCITY_COLOR)
formula.next_to(self.teacher.get_corner(UP+LEFT), UP, MED_LARGE_BUFF)
self.play(FadeIn(series, lag_ratio = 0.5))
self.play(
this_video.shift, this_video.get_height()*DOWN/2,
this_video.set_color, YELLOW,
self.teacher.change_mode, "raise_right_hand",
)
self.play(Write(VGroup(integral, v_t, dt)))
self.change_student_modes(*["erm"]*3)
self.wait()
self.play(Write(VGroup(deriv, equals, v_T)), )
self.change_student_modes(*["confused"]*3)
self.wait(3)
self.play(
this_video.restore,
next_video.shift, next_video.get_height()*DOWN/2,
next_video.set_color, YELLOW,
integral[0].copy().next_to, next_video, DOWN, MED_LARGE_BUFF,
FadeOut(formula),
*it.chain(*[
[pi.change_mode, "plain", pi.look_at, next_video]
for pi in self.pi_creatures
])
)
self.wait(2)
class InCarRestrictedView(ShowSpeedometer):
CONFIG = {
"speedometer_title_text" : "Your view",
}
def construct(self):
car = Car()
car.move_to(self.point_A)
self.car = car
car.randy.save_state()
Transform(car.randy, Randolph()).update(1)
car.randy.next_to(car, RIGHT, MED_LARGE_BUFF)
car.randy.look_at(car)
window = car[1][6].copy()
window.is_subpath = False
window.set_fill(BLACK, opacity = 0.75)
window.set_stroke(width = 0)
square = Square(stroke_color = WHITE)
square.replace(VGroup(self.speedometer, self.speedometer_title))
square.scale_in_place(1.5)
square.pointwise_become_partial(square, 0.25, 0.75)
time_label = TextMobject("Time (in seconds):", "0")
time_label.shift(2*UP)
dots = VGroup(*list(map(Dot, [self.point_A, self.point_B])))
line = Line(*dots, buff = 0)
line.set_color(DISTANCE_COLOR)
brace = Brace(line, DOWN)
brace_text = brace.get_text("Distance traveled?")
#Sit in car
self.add(car)
self.play(Blink(car.randy))
self.play(car.randy.restore, Animation(car))
self.play(ShowCreation(window, run_time = 2))
self.wait()
#Show speedometer
self.introduce_added_mobjects()
self.play(ShowCreation(square))
self.wait()
#Travel
self.play(FadeIn(time_label))
self.play(
MoveCar(car, self.point_B, rate_func = s_rate_func),
IncrementNumber(time_label[1], run_time = 8),
MaintainPositionRelativeTo(window, car),
*self.get_added_movement_anims(
rate_func = v_rate_func,
radians = -(16.0/70)*4*np.pi/3
),
run_time = 8
)
eight = TexMobject("8").move_to(time_label[1])
self.play(Transform(
time_label[1], eight,
rate_func = squish_rate_func(smooth, 0, 0.5)
))
self.wait()
#Ask about distance
self.play(*list(map(ShowCreation, dots)))
self.play(ShowCreation(line))
self.play(
GrowFromCenter(brace),
Write(brace_text)
)
self.wait(2)
class GraphDistanceVsTime(GraphCarTrajectory):
CONFIG = {
"y_min" : 0,
"y_max" : 100,
"y_axis_height" : 6,
"y_tick_frequency" : 10,
"y_labeled_nums" : list(range(10, 100, 10)),
"y_axis_label" : "Distance (in meters)",
"x_min" : -1,
"x_max" : 9,
"x_axis_width" : 9,
"x_tick_frequency" : 1,
"x_leftmost_tick" : None, #Change if different from x_min
"x_labeled_nums" : list(range(1, 9)),
"x_axis_label" : "$t$",
"time_of_journey" : 8,
"care_movement_rate_func" : s_rate_func,
"num_graph_anchor_points" : 100
}
def construct(self):
self.setup_axes()
graph = self.get_graph(
s_func,
color = DISTANCE_COLOR,
x_min = 0,
x_max = 8,
)
origin = self.coords_to_point(0, 0)
graph_label = self.get_graph_label(
graph, "s(t)", color = DISTANCE_COLOR
)
self.introduce_graph(graph, origin)
class PlotVelocity(GraphScene):
CONFIG = {
"x_min" : -1,
"x_max" : 9,
"x_axis_width" : 9,
"x_tick_frequency" : 1,
"x_labeled_nums" : list(range(1, 9)),
"x_axis_label" : "$t$",
"y_min" : 0,
"y_max" : 25,
"y_axis_height" : 6,
"y_tick_frequency" : 5,
"y_labeled_nums" : list(range(5, 30, 5)),
"y_axis_label" : "Velocity in $\\frac{\\text{meters}}{\\text{second}}$",
"num_graph_anchor_points" : 50,
}
def construct(self):
self.setup_axes()
self.add_speedometer()
self.plot_points()
self.draw_curve()
def add_speedometer(self):
speedometer = Speedometer()
speedometer.next_to(self.y_axis_label_mob, RIGHT, LARGE_BUFF)
speedometer.to_edge(UP)
self.play(DrawBorderThenFill(
speedometer,
lag_ratio = 0.5,
rate_func=linear,
))
self.speedometer = speedometer
def plot_points(self):
times = list(range(0, 9))
points = [
self.coords_to_point(t, v_func(t))
for t in times
]
dots = VGroup(*[Dot(p, radius = 0.07) for p in points])
dots.set_color(VELOCITY_COLOR)
pre_dots = VGroup()
dot_intro_anims = []
for time, dot in zip(times, dots):
pre_dot = dot.copy()
self.speedometer.move_needle_to_velocity(v_func(time))
pre_dot.move_to(self.speedometer.get_needle_tip())
pre_dot.set_fill(opacity = 0)
pre_dots.add(pre_dot)
dot_intro_anims += [
ApplyMethod(
pre_dot.set_fill, YELLOW, 1,
run_time = 0.1,
),
ReplacementTransform(
pre_dot, dot,
run_time = 0.9,
)
]
self.speedometer.move_needle_to_velocity(0)
self.play(
Succession(
*dot_intro_anims, rate_func=linear
),
ApplyMethod(
self.speedometer.move_needle_to_velocity,
v_func(4),
rate_func = squish_rate_func(
lambda t : 1-v_rate_func(t),
0, 0.95,
)
),
run_time = 5
)
self.wait()
def draw_curve(self):
graph, label = self.get_v_graph_and_label()
self.revert_to_original_skipping_status()
self.play(ShowCreation(graph, run_time = 3))
self.play(Write(graph_label))
self.wait()
##
def get_v_graph_and_label(self):
graph = self.get_graph(
v_func,
x_min = 0,
x_max = 8,
color = VELOCITY_COLOR
)
graph_label = TexMobject("v(t)", "=t(8-t)")
graph_label.set_color_by_tex("v(t)", VELOCITY_COLOR)
graph_label.next_to(
graph.point_from_proportion(7./8.),
UP+RIGHT
)
self.v_graph = graph
self.v_graph_label = graph_label
return graph, graph_label
class Chapter2Wrapper(Scene):
CONFIG = {
"title" : "Chapter 2: The paradox of the derivative",
}
def construct(self):
title = TextMobject(self.title)
title.to_edge(UP)
rect = Rectangle(width = 16, height = 9, color = WHITE)
rect.set_height(1.5*FRAME_Y_RADIUS)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait(3)
class GivenDistanceWhatIsVelocity(GraphCarTrajectory):
def construct(self):
self.force_skipping()
self.setup_axes()
graph = self.graph_sigmoid_trajectory_function()
origin = self.coords_to_point(0, 0)
self.introduce_graph(graph, origin)
self.comment_on_slope(graph, origin)
self.revert_to_original_skipping_status()
self.show_velocity_graph()
class DerivativeOfDistance(SecantLineToTangentLine):
def construct(self):
self.setup_axes()
self.remove(self.y_axis_label_mob, self.x_axis_label_mob)
self.add_derivative_definition(self.y_axis_label_mob)
self.add_graph()
self.draw_axes()
self.show_tangent_line()
class AskAboutAntiderivative(PlotVelocity):
def construct(self):
self.setup_axes()
self.add_v_graph()
self.write_s_formula()
self.write_antiderivative()
def add_v_graph(self):
graph, label = self.get_v_graph_and_label()
self.play(ShowCreation(graph))
self.play(Write(label))
self.graph = graph
self.graph_label = label
def write_s_formula(self):
ds_dt = TexMobject("ds", "\\over\\,", "dt")
ds_dt.set_color_by_tex("ds", DISTANCE_COLOR)
ds_dt.set_color_by_tex("dt", TIME_COLOR)
ds_dt.next_to(self.graph_label, UP, LARGE_BUFF)
v_t = self.graph_label.get_part_by_tex("v(t)")
arrow = Arrow(
ds_dt.get_bottom(), v_t.get_top(),
color = WHITE,
)
self.play(
Write(ds_dt, run_time = 2),
ShowCreation(arrow)
)
self.wait()
def write_antiderivative(self):
randy = Randolph()
randy.to_corner(DOWN+LEFT)
randy.shift(2*RIGHT)
words = TexMobject(
"{d(", "???", ") \\over \\,", "dt}", "=", "t(8-t)"
)
words.set_color_by_tex("t(8-t)", VELOCITY_COLOR)
words.set_color_by_tex("???", DISTANCE_COLOR)
words.set_color_by_tex("dt", TIME_COLOR)
words.scale(0.7)
self.play(FadeIn(randy))
self.play(PiCreatureSays(
randy, words,
target_mode = "confused",
bubble_kwargs = {"height" : 3, "width" : 4},
))
self.play(Blink(randy))
self.wait()
class Antiderivative(PiCreatureScene):
def construct(self):
functions = self.get_functions("t^2", "2t")
alt_functions = self.get_functions("???", "t(8-t)")
top_arc, bottom_arc = arcs = self.get_arcs(functions)
derivative, antiderivative = self.get_arc_labels(arcs)
group = VGroup(functions, arcs, derivative, antiderivative)
self.add(functions, top_arc, derivative)
self.wait()
self.play(
ShowCreation(bottom_arc),
Write(antiderivative),
self.pi_creature.change_mode, "raise_right_hand"
)
self.wait(2)
for pair in reversed(list(zip(functions, alt_functions))):
self.play(
Transform(*pair),
self.pi_creature.change_mode, "pondering"
)
self.wait(2)
self.pi_creature_says(
"But first!",
target_mode = "surprised",
look_at_arg = 50*OUT,
added_anims = [group.to_edge, LEFT],
run_time = 1,
)
self.wait()
def get_functions(self, left_tex, right_tex):
left = TexMobject(left_tex)
left.shift(2*LEFT)
left.set_color(DISTANCE_COLOR)
right = TexMobject(right_tex)
right.shift(2*RIGHT)
right.set_color(VELOCITY_COLOR)
result = VGroup(left, right)
result.shift(UP)
return result
def get_arcs(self, functions):
f1, f2 = functions
top_line = Line(f1.get_corner(UP+RIGHT), f2.get_corner(UP+LEFT))
bottom_line = Line(f1.get_corner(DOWN+RIGHT), f2.get_corner(DOWN+LEFT))
top_arc = Arc(start_angle = 5*np.pi/6, angle = -2*np.pi/3)
bottom_arc = top_arc.copy()
bottom_arc.rotate(np.pi)
arcs = VGroup(top_arc, bottom_arc)
arcs.set_width(top_line.get_width())
for arc in arcs:
arc.add_tip()
top_arc.next_to(top_line, UP)
bottom_arc.next_to(bottom_line, DOWN)
bottom_arc.set_color(MAROON_B)
return arcs
def get_arc_labels(self, arcs):
top_arc, bottom_arc = arcs
derivative = TextMobject("Derivative")
derivative.next_to(top_arc, UP)
antiderivative = TextMobject("``Antiderivative''")
antiderivative.next_to(bottom_arc, DOWN)
antiderivative.set_color(bottom_arc.get_color())
return VGroup(derivative, antiderivative)
class AreaUnderVGraph(PlotVelocity):
def construct(self):
self.setup_axes()
self.add(*self.get_v_graph_and_label())
self.show_rects()
def show_rects(self):
rect_list = self.get_riemann_rectangles_list(
self.v_graph, 7,
max_dx = 1.0,
x_min = 0,
x_max = 8,
)
flat_graph = self.get_graph(lambda t : 0)
rects = self.get_riemann_rectangles(
flat_graph, x_min = 0, x_max = 8, dx = 1.0
)
for new_rects in rect_list:
new_rects.set_fill(opacity = 0.8)
rects.align_submobjects(new_rects)
for alt_rect in rects[::2]:
alt_rect.set_fill(opacity = 0)
self.play(Transform(
rects, new_rects,
run_time = 2,
lag_ratio = 0.5
))
self.wait()
class ConstantVelocityCar(Scene):
def construct(self):
car = Car()
car.move_to(5*LEFT + 3*DOWN)
self.add(car)
self.wait()
self.play(MoveCar(
car, 7*RIGHT+3*DOWN,
run_time = 5,
rate_func=linear,
))
self.wait()
class ConstantVelocityPlot(PlotVelocity):
CONFIG = {
"x_axis_label" : "Time",
"units_of_area_color" : BLUE_E,
}
def construct(self):
self.setup_axes()
self.x_axis_label_mob.shift(DOWN)
self.draw_graph()
self.show_product()
self.comment_on_area_wierdness()
self.note_units()
def draw_graph(self):
graph = self.get_graph(
lambda t : 10,
x_min = 0,
x_max = 8,
color = VELOCITY_COLOR
)
self.play(ShowCreation(graph, rate_func=linear, run_time = 3))
self.wait()
self.graph = graph
def show_product(self):
rect = Rectangle(
stroke_width = 0,
fill_color = DISTANCE_COLOR,
fill_opacity = 0.5
)
rect.replace(
VGroup(self.graph, VectorizedPoint(self.graph_origin)),
stretch = True
)
right_brace = Brace(rect, RIGHT)
top_brace = Brace(rect, UP)
v_label = right_brace.get_text(
"$10 \\frac{\\text{meters}}{\\text{second}}$",
)
v_label.set_color(VELOCITY_COLOR)
t_label = top_brace.get_text(
"8 seconds"
)
t_label.set_color(TIME_COLOR)
s_label = TexMobject("10", "\\times", "8", "\\text{ meters}")
s_label.set_color_by_tex("10", VELOCITY_COLOR)
s_label.set_color_by_tex("8", TIME_COLOR)
s_label.move_to(rect)
self.play(
GrowFromCenter(right_brace),
Write(v_label),
)
self.play(
GrowFromCenter(top_brace),
Write(t_label),
)
self.play(
FadeIn(rect),
Write(s_label),
Animation(self.graph)
)
self.wait(2)
self.area_rect = rect
self.s_label = s_label
def comment_on_area_wierdness(self):
randy = Randolph()
randy.to_corner(DOWN+LEFT)
bubble = randy.get_bubble(
"Distance \\\\ is area?",
bubble_class = ThoughtBubble,
height = 3,
width = 4,
fill_opacity = 1,
)
bubble.content.scale_in_place(0.8)
bubble.content.shift(SMALL_BUFF*UP)
VGroup(bubble[-1], bubble.content).shift(1.5*LEFT)
self.play(FadeIn(randy))
self.play(randy.change_mode, "pondering")
self.play(
self.area_rect.set_color, YELLOW,
*list(map(Animation, self.get_mobjects())),
rate_func = there_and_back
)
self.play(Blink(randy))
self.play(
randy.change_mode, "confused",
randy.look_at, randy.bubble,
ShowCreation(bubble),
Write(bubble.content),
)
self.wait()
self.play(Blink(randy))
self.wait()
self.play(
randy.change_mode, "pondering",
FadeOut(bubble),
FadeOut(bubble.content),
)
self.randy = randy
def note_units(self):
x_line, y_line = lines = VGroup(*[
axis.copy()
for axis in (self.x_axis, self.y_axis)
])
lines.set_color(TIME_COLOR)
square = Square(
stroke_color = BLACK,
stroke_width = 1,
fill_color = self.units_of_area_color,
fill_opacity = 1,
)
square.replace(
VGroup(*[
VectorizedPoint(self.coords_to_point(i, i))
for i in (0, 1)
]),
stretch = True
)
units_of_area = VGroup(*[
square.copy().move_to(
self.coords_to_point(x, y),
DOWN+LEFT
)
for x in range(8)
for y in range(10)
])
self.play(ShowCreation(x_line))
self.play(Indicate(self.x_axis_label_mob))
self.play(FadeOut(x_line))
self.play(
ShowCreation(y_line),
self.randy.look_at, self.y_axis_label_mob
)
self.play(Indicate(self.y_axis_label_mob))
self.play(FadeOut(y_line))
for FadeClass in FadeIn, FadeOut:
self.play(
FadeClass(
units_of_area,
lag_ratio = 0.5,
run_time = 3
),
Animation(self.s_label),
self.randy.look_at, self.area_rect
)
self.play(Blink(self.randy))
self.wait()
class PiecewiseConstantCar(Scene):
def construct(self):
car = Car()
start_point = 5*LEFT
car.move_to(start_point)
self.add(car)
self.wait()
for shift in 2, 6, 12:
car.randy.rotate_in_place(np.pi/8)
anim = MoveCar(
car, start_point+shift*RIGHT,
rate_func=linear
)
anim.target_mobject[0].rotate_in_place(-np.pi/8)
# for mob in anim.starting_mobject, anim.mobject:
# mob.randy.rotate_in_place(np.pi/6)
self.play(anim)
self.wait()
class PiecewiseConstantPlot(PlotVelocity):
CONFIG = {
"y_axis_label" : "",
"min_graph_proportion" : 0.1,
"max_graph_proportion" : 0.8,
"num_riemann_approximations" : 7,
"riemann_rect_fill_opacity" : 0.75,
"tick_size" : 0.2,
}
def construct(self):
self.setup_graph()
self.always_changing()
self.show_piecewise_constant_graph()
self.compute_distance_on_each_interval()
self.approximate_original_curve()
self.revert_to_specific_approximation()
self.show_specific_rectangle()
self.show_v_dt_for_all_rectangles()
self.write_integral_symbol()
self.roles_of_dt()
self.what_does_sum_approach()
self.label_integral()
def setup_graph(self):
self.setup_axes()
self.add(*self.get_v_graph_and_label())
def always_changing(self):
dot = Dot()
arrow = Arrow(LEFT, RIGHT)
words = TextMobject("Always changing")
group = VGroup(dot, arrow, words)
def update_group(group, alpha):
dot, arrow, words = group
prop = interpolate(
self.min_graph_proportion,
self.max_graph_proportion,
alpha
)
graph_point = self.v_graph.point_from_proportion(prop)
dot.move_to(graph_point)
x_val = self.x_axis.point_to_number(graph_point)
angle = self.angle_of_tangent(x_val, self.v_graph)
angle += np.pi/2
vect = rotate_vector(RIGHT, angle)
arrow.rotate(angle - arrow.get_angle() + np.pi)
arrow.shift(
graph_point + MED_SMALL_BUFF*vect - arrow.get_end()
)
words.next_to(arrow.get_start(), UP)
return group
update_group(group, 0)
self.play(
Write(words),
ShowCreation(arrow),
DrawBorderThenFill(dot),
run_time = 1
)
self.play(UpdateFromAlphaFunc(
group, update_group,
rate_func = there_and_back,
run_time = 5
))
self.wait()
self.play(FadeOut(group))
def show_piecewise_constant_graph(self):
pw_constant_graph = self.get_pw_constant_graph()
alt_lines = [
line.copy().set_color(YELLOW)
for line in pw_constant_graph[:4]
]
for line in alt_lines:
line.start_dot = Dot(line.get_start())
line.end_dot = Dot(line.get_end())
VGroup(line.start_dot, line.end_dot).set_color(line.get_color())
line = alt_lines[0]
faders = [self.v_graph, self.v_graph_label]
for mob in faders:
mob.save_state()
mob.generate_target()
mob.target.fade(0.7)
self.play(*list(map(MoveToTarget, faders)))
self.play(ShowCreation(pw_constant_graph, run_time = 2))
self.wait()
self.play(ShowCreation(line))
self.wait()
for new_line in alt_lines[1:]:
for mob in line.end_dot, new_line.start_dot, new_line:
self.play(Transform(
line, mob,
run_time = 1./3
))
self.remove(line)
self.add(new_line)
self.wait(2)
line = new_line
self.play(FadeOut(line))
self.pw_constant_graph = pw_constant_graph
def compute_distance_on_each_interval(self):
rect_list = self.get_riemann_rectangles_list(
self.v_graph, self.num_riemann_approximations,
max_dx = 1,
x_min = 0,
x_max = 8,
)
for rects in rect_list:
rects.set_fill(opacity = self.riemann_rect_fill_opacity)
flat_rects = self.get_riemann_rectangles(
self.get_graph(lambda t : 0),
x_min = 0, x_max = 8, dx = 1
)
rects = rect_list[0]
rect = rects[1]
flat_rects.submobjects[1] = rect.copy()
right_brace = Brace(rect, RIGHT)
top_brace = Brace(rect, UP)
right_brace.label = right_brace.get_text("$7\\frac{\\text{m}}{\\text{s}}$")
top_brace.label = top_brace.get_text("$1$s")
self.play(FadeIn(rect))
for brace in right_brace, top_brace:
self.play(
GrowFromCenter(brace),
Write(brace.label, run_time = 1),
)
brace.add(brace.label)
self.wait()
self.play(
ReplacementTransform(
flat_rects, rects,
run_time = 2,
lag_ratio = 0.5,
),
Animation(right_brace)
)
self.play(*list(map(FadeOut, [top_brace, right_brace])))
self.wait()
self.rects = rects
self.rect_list = rect_list
def approximate_original_curve(self):
rects = self.rects
self.play(
FadeOut(self.pw_constant_graph),
*[
m.restore
for m in (self.v_graph, self.v_graph_label)
]+[Animation(self.rects)]
)
for new_rects in self.rect_list[1:]:
self.transform_between_riemann_rects(rects, new_rects)
self.wait()
def revert_to_specific_approximation(self):
rects = self.rects
rects.save_state()
target_rects = self.rect_list[2]
target_rects.set_fill(opacity = 1)
ticks = self.get_ticks(target_rects)
tick_pair = VGroup(*ticks[4:6])
brace = Brace(tick_pair, DOWN, buff = 0)
dt_label = brace.get_text("$dt$", buff = SMALL_BUFF)
example_text = TextMobject(
"For example, \\\\",
"$dt$", "$=0.25$"
)
example_text.to_corner(UP+RIGHT)
example_text.set_color_by_tex("dt", YELLOW)
self.play(ReplacementTransform(
rects, target_rects,
run_time = 2,
lag_ratio = 0.5
))
rects.restore()
self.wait()
self.play(
ShowCreation(ticks),
FadeOut(self.x_axis.numbers)
)
self.play(
GrowFromCenter(brace),
Write(dt_label)
)
self.wait()
self.play(
FadeIn(
example_text,
run_time = 2,
lag_ratio = 0.5,
),
ReplacementTransform(
dt_label.copy(),
example_text.get_part_by_tex("dt")
)
)
self.wait()
self.rects = rects = target_rects
self.ticks = ticks
self.dt_brace = brace
self.dt_label = dt_label
self.dt_example_text = example_text
def show_specific_rectangle(self):
rects = self.rects
rect = rects[4].copy()
rect_top = Line(
rect.get_corner(UP+LEFT),
rect.get_corner(UP+RIGHT),
color = self.v_graph.get_color()
)
t_vals = [1, 1.25]
t_labels = VGroup(*[
TexMobject("t=%s"%str(t))
for t in t_vals
])
t_labels.scale(0.7)
t_labels.next_to(rect, DOWN)
for vect, label in zip([LEFT, RIGHT], t_labels):
label.shift(1.5*vect)
label.add(Arrow(
label.get_edge_center(-vect),
rect.get_corner(DOWN+vect),
buff = SMALL_BUFF,
tip_length = 0.15,
color = WHITE
))
v_lines = VGroup()
h_lines = VGroup()
height_labels = VGroup()
for t in t_vals:
v_line = self.get_vertical_line_to_graph(
t, self.v_graph,
color = YELLOW
)
y_axis_point = self.graph_origin[0]*RIGHT
y_axis_point += v_line.get_end()[1]*UP
h_line = DashedLine(v_line.get_end(), y_axis_point)
label = TexMobject("%.1f"%v_func(t))
label.scale(0.5)
label.next_to(h_line, LEFT, SMALL_BUFF)
v_lines.add(v_line)
h_lines.add(h_line)
height_labels.add(label)
circle = Circle(radius = 0.25, color = WHITE)
circle.move_to(rect.get_top())
self.play(
rects.set_fill, None, 0.25,
Animation(rect)
)
self.wait()
for label in t_labels:
self.play(FadeIn(label))
self.wait()
for v_line, h_line, label in zip(v_lines, h_lines, height_labels):
self.play(ShowCreation(v_line))
self.play(ShowCreation(h_line))
self.play(Write(label, run_time = 1))
self.wait()
self.wait()
t_label_copy = t_labels[0].copy()
self.play(
t_label_copy.scale, 1./0.7,
t_label_copy.next_to, self.v_graph_label, DOWN+LEFT, 0
)
self.wait()
self.play(FadeOut(t_label_copy))
self.wait()
self.play(ShowCreation(circle))
self.play(ShowCreation(rect_top))
self.play(FadeOut(circle))
rect.add(rect_top)
self.wait()
for x in range(2):
self.play(
rect.stretch_to_fit_height, v_lines[1].get_height(),
rect.move_to, rect.get_bottom(), DOWN,
Animation(v_lines),
run_time = 4,
rate_func = there_and_back
)
self.play(*list(map(FadeOut, [
group[1]
for group in (v_lines, h_lines, height_labels)
])))
self.play(
v_lines[0].set_color, RED,
rate_func = there_and_back,
)
self.wait()
area = TextMobject(
"7$\\frac{\\text{m}}{\\text{s}}$",
"$\\times$",
"0.25s",
"=",
"1.75m"
)
area.next_to(rect, RIGHT, LARGE_BUFF)
arrow = Arrow(
area.get_left(), rect.get_center(),
buff = 0,
color = WHITE
)
area.shift(SMALL_BUFF*RIGHT)
self.play(
Write(area),
ShowCreation(arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [
area, arrow,
v_lines[0], h_lines[0], height_labels[0],
rect, t_labels
])))
def show_v_dt_for_all_rectangles(self):
dt_brace_group = VGroup(self.dt_brace, self.dt_label)
rects_subset = self.rects[10:20]
last_rect = None
for rect in rects_subset:
brace = Brace(rect, LEFT, buff = 0)
v_t = TexMobject("v(t)")
v_t.next_to(brace, LEFT, SMALL_BUFF)
anims = [
rect.set_fill, None, 1,
dt_brace_group.next_to, rect, DOWN, SMALL_BUFF
]
if last_rect is not None:
anims += [
last_rect.set_fill, None, 0.25,
ReplacementTransform(last_brace, brace),
ReplacementTransform(last_v_t, v_t),
]
else:
anims += [
GrowFromCenter(brace),
Write(v_t)
]
self.play(*anims)
self.wait()
last_rect = rect
last_brace = brace
last_v_t = v_t
self.v_t = last_v_t
self.v_t_brace = last_brace
def write_integral_symbol(self):
integral = TexMobject(
"\\int", "^8", "_0", "v(t)", "\\,dt"
)
integral.to_corner(UP+RIGHT)
int_copy = integral.get_part_by_tex("int").copy()
bounds = list(map(integral.get_part_by_tex, ["0", "8"]))
sum_word = TextMobject("``Sum''")
sum_word.next_to(integral, DOWN, MED_LARGE_BUFF, LEFT)
alt_sum_word = sum_word.copy()
int_symbol = TexMobject("\\int")
int_symbol.replace(alt_sum_word[1], dim_to_match = 1)
alt_sum_word.submobjects[1] = int_symbol
self.play(FadeOut(self.dt_example_text))
self.play(Write(integral.get_part_by_tex("int")))
self.wait()
self.play(Transform(int_copy, int_symbol))
self.play(Write(alt_sum_word), Animation(int_copy))
self.remove(int_copy)
self.play(ReplacementTransform(alt_sum_word, sum_word))
self.wait()
for bound in bounds:
self.play(Write(bound))
self.wait()
for bound, num in zip(bounds, [0, 8]):
bound_copy = bound.copy()
point = self.coords_to_point(num, 0)
self.play(
bound_copy.scale, 1.5,
bound_copy.next_to, point, DOWN, MED_LARGE_BUFF
)
self.play(ApplyWave(self.ticks, direction = UP))
self.wait()
for mob, tex in (self.v_t, "v(t)"), (self.dt_label, "dt"):
self.play(ReplacementTransform(
mob.copy().set_color(YELLOW),
integral.get_part_by_tex(tex),
run_time = 2
))
self.wait()
self.integral = integral
self.sum_word = sum_word
def roles_of_dt(self):
rects = self.rects
next_rects = self.rect_list[3]
morty = Mortimer().flip()
morty.to_corner(DOWN+LEFT)
int_dt = self.integral.get_part_by_tex("dt")
dt_copy = int_dt.copy()
self.play(FadeIn(morty))
self.play(
morty.change_mode, "raise_right_hand",
morty.look, UP+RIGHT,
dt_copy.next_to, morty.get_corner(UP+RIGHT), UP,
dt_copy.set_color, YELLOW
)
self.play(Blink(morty))
self.play(
ReplacementTransform(
dt_copy.copy(), int_dt,
run_time = 2
),
morty.look_at, int_dt
)
self.wait(2)
self.play(
ReplacementTransform(dt_copy.copy(), self.dt_label),
morty.look_at, self.dt_label
)
self.play(*[
ApplyMethod(
tick.shift, tick.get_height()*UP/2,
run_time = 2,
rate_func = squish_rate_func(
there_and_back,
alpha, alpha+0.2,
)
)
for tick, alpha in zip(
self.ticks,
np.linspace(0, 0.8, len(self.ticks))
)
])
self.wait()
#Shrink dt just a bit
self.play(
morty.change_mode, "pondering",
rects.set_fill, None, 0.75,
*list(map(FadeOut, [
dt_copy, self.v_t, self.v_t_brace
]))
)
rects.align_submobjects(next_rects)
for every_other_rect in rects[::2]:
every_other_rect.set_fill(opacity = 0)
self.play(
self.dt_brace.stretch, 0.5, 0,
self.dt_brace.move_to, self.dt_brace, LEFT,
ReplacementTransform(
rects, next_rects,
run_time = 2,
lag_ratio = 0.5
),
Transform(
self.ticks, self.get_ticks(next_rects),
run_time = 2,
lag_ratio = 0.5,
),
)
self.rects = rects = next_rects
self.wait()
self.play(Blink(morty))
self.play(*[
ApplyFunction(
lambda r : r.shift(0.2*UP).set_fill(None, 1),
rect,
run_time = 2,
rate_func = squish_rate_func(
there_and_back,
alpha, alpha+0.2,
)
)
for rect, alpha in zip(
rects,
np.linspace(0, 0.8, len(rects))
)
]+[
morty.change_mode, "thinking",
])
self.wait()
self.morty = morty
def what_does_sum_approach(self):
morty = self.morty
rects = self.rects
cross = TexMobject("\\times")
cross.replace(self.sum_word, stretch = True)
cross.set_color(RED)
brace = Brace(self.integral, DOWN)
dt_to_0 = brace.get_text("$dt \\to 0$")
distance_words = TextMobject(
"Area", "= Distance traveled"
)
distance_words.next_to(rects, UP)
arrow = Arrow(
distance_words[0].get_bottom(),
rects.get_center(),
color = WHITE
)
self.play(PiCreatureSays(
morty, "Why not $\\Sigma$?",
target_mode = "sassy"
))
self.play(Blink(morty))
self.wait()
self.play(Write(cross))
self.wait()
self.play(
RemovePiCreatureBubble(morty, target_mode = "plain"),
*list(map(FadeOut, [
cross, self.sum_word, self.ticks,
self.dt_brace, self.dt_label,
]))
)
self.play(FadeIn(brace), FadeIn(dt_to_0))
for new_rects in self.rect_list[4:]:
rects.align_submobjects(new_rects)
for every_other_rect in rects[::2]:
every_other_rect.set_fill(opacity = 0)
self.play(
Transform(
rects, new_rects,
run_time = 2,
lag_ratio = 0.5
),
morty.look_at, rects,
)
self.wait()
self.play(
Write(distance_words),
ShowCreation(arrow),
morty.change_mode, "pondering",
morty.look_at, distance_words,
)
self.wait()
self.play(Blink(morty))
self.wait()
self.area_arrow = arrow
def label_integral(self):
words = TextMobject("``Integral of $v(t)$''")
words.to_edge(UP)
arrow = Arrow(
words.get_right(),
self.integral.get_left()
)
self.play(Indicate(self.integral))
self.play(Write(words, run_time = 2))
self.play(ShowCreation(arrow))
self.wait()
self.play(*[
ApplyFunction(
lambda r : r.shift(0.2*UP).set_fill(None, 1),
rect,
run_time = 3,
rate_func = squish_rate_func(
there_and_back,
alpha, alpha+0.2,
)
)
for rect, alpha in zip(
self.rects,
np.linspace(0, 0.8, len(self.rects))
)
]+[
Animation(self.area_arrow),
self.morty.change_mode, "happy",
self.morty.look_at, self.rects,
])
self.wait()
#####
def get_pw_constant_graph(self):
result = VGroup()
for left_x in range(8):
xs = [left_x, left_x+1]
y = self.v_graph.underlying_function(left_x)
line = Line(*[
self.coords_to_point(x, y)
for x in xs
])
line.set_color(self.v_graph.get_color())
result.add(line)
return result
def get_ticks(self, rects):
ticks = VGroup(*[
Line(
point+self.tick_size*UP/2,
point+self.tick_size*DOWN/2
)
for t in np.linspace(0, 8, len(rects)+1)
for point in [self.coords_to_point(t, 0)]
])
ticks.set_color(YELLOW)
return ticks
class DontKnowHowToHandleNonConstant(TeacherStudentsScene):
def construct(self):
self.play(*[
ApplyMethod(pi.change, "maybe", UP)
for pi in self.get_pi_creatures()
])
self.wait(3)
class CarJourneyApproximation(Scene):
CONFIG = {
"n_jumps" : 5,
"bottom_words" : "Approximated motion (5 jumps)",
}
def construct(self):
points = [5*LEFT + v for v in (UP, 2*DOWN)]
cars = [Car().move_to(point) for point in points]
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
words = [
TextMobject("Real motion (smooth)").shift(3*UP),
TextMobject(self.bottom_words).shift(0.5*DOWN),
]
words[1].set_color(GREEN)
self.add(h_line, *cars + words)
self.wait()
self.play(*[
MoveCar(
car, point+10*RIGHT,
run_time = 5,
rate_func = rf
)
for car, point, rf in zip(cars, points, [
s_rate_func,
self.get_approximated_rate_func(self.n_jumps)
])
])
self.wait()
def get_approximated_rate_func(self, n):
new_v_rate_func = lambda t : v_rate_func(np.floor(t*n)/n)
max_integral, err = scipy.integrate.quad(
v_rate_func, 0, 1
)
def result(t):
integral, err = scipy.integrate.quad(new_v_rate_func, 0, t)
return integral/max_integral
return result
class LessWrongCarJourneyApproximation(CarJourneyApproximation):
CONFIG = {
"n_jumps" : 20,
"bottom_words" : "Better approximation (20 jumps)",
}
class TellMeThatsNotSurprising(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Tell me that's \\\\ not surprising!",
target_mode = "hooray",
run_time = 1
)
self.wait(3)
class HowDoesThisHelp(TeacherStudentsScene):
def construct(self):
self.student_says(
"How does this help\\textinterrobang",
target_mode = "angry",
run_time = 1
)
self.change_student_modes(
"confused", "angry", "confused",
)
self.wait(2)
self.teacher_says(
"You're right.",
target_mode = "shruggie",
run_time = 1
)
self.change_student_modes(*["sassy"]*3)
self.wait(2)
class AreaUnderACurve(GraphScene):
CONFIG = {
"y_max" : 4,
"y_min" : 0,
"num_iterations" : 7
}
def construct(self):
self.setup_axes()
graph = self.get_graph(self.func)
rect_list = self.get_riemann_rectangles_list(
graph, self.num_iterations
)
VGroup(*rect_list).set_fill(opacity = 0.8)
rects = rect_list[0]
self.play(ShowCreation(graph))
self.play(Write(rects))
for new_rects in rect_list[1:]:
rects.align_submobjects(new_rects)
for every_other_rect in rects[::2]:
every_other_rect.set_fill(opacity = 0)
self.play(Transform(
rects, new_rects,
run_time = 2,
lag_ratio = 0.5
))
self.wait()
def func(self, x):
return np.sin(x) + 1
class AltAreaUnderCurve(AreaUnderACurve):
CONFIG = {
"graph_origin" : 2*DOWN,
"x_min" : -3,
"x_max" : 3,
"x_axis_width" : 12,
"y_max" : 2,
"y_axis_height" : 4,
}
def func(self, x):
return np.exp(-x**2)
class Chapter1Wrapper(Chapter2Wrapper):
CONFIG = {
"title" : "Essence of calculus, chapter 1",
}
class AreaIsDerivative(PlotVelocity, ReconfigurableScene):
CONFIG = {
"y_axis_label" : "",
"num_rects" : 400,
"dT" : 0.25,
"variable_point_label" : "T",
"area_opacity" : 0.8,
}
def setup(self):
PlotVelocity.setup(self)
ReconfigurableScene.setup(self)
self.setup_axes()
self.add(*self.get_v_graph_and_label())
self.x_axis_label_mob.shift(MED_LARGE_BUFF*DOWN)
self.v_graph_label.shift(MED_LARGE_BUFF*DOWN)
self.submobjects = []
def construct(self):
self.introduce_variable_area()
self.write_integral()
self.nudge_input()
self.show_rectangle_approximation()
def introduce_variable_area(self):
area = self.area = self.get_area(0, 6)
x_nums = self.x_axis.numbers
self.play(Write(area, run_time = 2))
self.play(FadeOut(self.x_axis.numbers))
self.add_T_label(6)
self.change_area_bounds(
new_t_max = 4,
rate_func = there_and_back,
run_time = 2
)
self.wait()
def write_integral(self):
integral = TexMobject("\\int", "^T", "_0", "v(t)", "\\,dt")
integral.to_corner(UP+RIGHT)
integral.shift(2*LEFT)
top_T = integral.get_part_by_tex("T")
moving_T = self.T_label_group[0]
s_T = TexMobject("s(T)", "= ")
s_T.set_color_by_tex("s", DISTANCE_COLOR)
s_T.next_to(integral, LEFT)
int_arrow, s_arrow = [
Arrow(
mob.get_left(), self.area.get_center(),
color = WHITE
)
for mob in (integral, s_T)
]
distance_word = TextMobject("Distance")
distance_word.move_to(self.area)
self.play(Write(integral))
self.play(ShowCreation(int_arrow))
self.submobjects.append(int_arrow)
self.wait()
self.change_area_bounds(
new_t_max = 8,
rate_func = there_and_back,
run_time = 3,
)
self.play(Indicate(top_T))
self.play(ReplacementTransform(
top_T.copy(), moving_T
))
self.change_area_bounds(
new_t_max = 3,
rate_func = there_and_back,
run_time = 3
)
self.wait()
self.play(Write(distance_word, run_time = 2))
self.play(
ReplacementTransform(int_arrow, s_arrow),
FadeIn(s_T)
)
self.wait()
self.play(FadeOut(distance_word))
self.change_area_bounds(new_t_max = 0, run_time = 2)
self.change_area_bounds(
new_t_max = 8,
rate_func=linear,
run_time = 7.9,
)
self.wait()
self.change_area_bounds(new_t_max = 5)
self.wait()
def nudge_input(self):
dark_area = self.area.copy()
dark_area.set_fill(BLACK, opacity = 0.5)
curr_T = self.x_axis.point_to_number(self.area.get_right())
new_T = curr_T + self.dT
rect = Rectangle(
stroke_width = 0,
fill_color = YELLOW,
fill_opacity = 0.75
)
rect.replace(
VGroup(
VectorizedPoint(self.coords_to_point(new_T, 0)),
self.right_v_line,
),
stretch = True
)
dT_brace = Brace(rect, DOWN, buff = 0)
dT_label = dT_brace.get_text("$dT$", buff = SMALL_BUFF)
dT_label_group = VGroup(dT_label, dT_brace)
ds_label = TexMobject("ds")
ds_label.next_to(rect, RIGHT, LARGE_BUFF, UP)
ds_label.set_color(DISTANCE_COLOR)
ds_arrow = Arrow(ds_label.get_left(), rect.get_left())
ds_arrow.set_color(WHITE)
v_brace = Brace(rect, LEFT, buff = SMALL_BUFF)
v_T_label = v_brace.get_text("$v(T)$", buff = SMALL_BUFF)
self.change_area_bounds(new_t_max = new_T)
self.play(
FadeIn(dark_area),
*list(map(Animation, self.submobjects))
)
self.play(
FadeOut(self.T_label_group),
FadeIn(dT_label_group)
)
self.wait()
self.play(Write(ds_label))
self.play(ShowCreation(ds_arrow))
self.wait(2)
self.play(GrowFromCenter(v_brace))
self.play(ReplacementTransform(
self.v_graph_label.get_part_by_tex("v").copy(),
v_T_label,
run_time = 2
))
self.wait()
self.play(Indicate(dT_label))
self.wait()
self.rect = rect
self.dT_label_group = dT_label_group
self.v_T_label_group = VGroup(v_T_label, v_brace)
self.dark_area = dark_area
self.ds_label = ds_label
self.ds_arrow = ds_arrow
def show_rectangle_approximation(self):
formula1 = TexMobject("ds", "=", "v(T)", "dT")
formula2 = TexMobject("{ds", "\\over\\,", "dT}", "=", "v(T)")
for formula in formula1, formula2:
formula.next_to(self.v_graph_label, UP, LARGE_BUFF)
formula.set_color_by_tex("ds", DISTANCE_COLOR)
self.play(
DrawBorderThenFill(self.rect),
Animation(self.ds_arrow)
)
self.wait()
self.play(*[
ReplacementTransform(
mob, formula1.get_part_by_tex(tex),
run_time = 2
)
for mob, tex in [
(self.ds_label, "ds"),
(self.ds_arrow, "="),
(self.v_T_label_group[0].copy(), "v(T)"),
(self.dT_label_group[0].copy(), "dT"),
]
])
self.wait()
self.transition_to_alt_config(
dT = self.dT/5.0,
transformation_kwargs = {"run_time" : 2},
)
self.wait()
self.play(*[
ReplacementTransform(
formula1.get_part_by_tex(tex),
formula2.get_part_by_tex(tex),
)
for tex in ("ds", "=", "v(T)", "dT")
] + [
Write(formula2.get_part_by_tex("over"))
])
self.wait()
####
def add_T_label(self, x_val, **kwargs):
triangle = RegularPolygon(n=3, start_angle = np.pi/2)
triangle.set_height(MED_SMALL_BUFF)
triangle.move_to(self.coords_to_point(x_val, 0), UP)
triangle.set_fill(WHITE, 1)
triangle.set_stroke(width = 0)
T_label = TexMobject(self.variable_point_label)
T_label.next_to(triangle, DOWN)
v_line = self.get_vertical_line_to_graph(
x_val, self.v_graph,
color = YELLOW
)
self.play(
DrawBorderThenFill(triangle),
ShowCreation(v_line),
Write(T_label, run_time = 1),
**kwargs
)
self.T_label_group = VGroup(T_label, triangle)
self.right_v_line = v_line
def get_area(self, t_min, t_max):
numerator = max(t_max - t_min, 0.01)
dx = float(numerator) / self.num_rects
return self.get_riemann_rectangles(
self.v_graph,
x_min = t_min,
x_max = t_max,
dx = dx,
stroke_width = 0,
).set_fill(opacity = self.area_opacity)
def change_area_bounds(self, new_t_min = None, new_t_max = None, **kwargs):
curr_t_min = self.x_axis.point_to_number(self.area.get_left())
curr_t_max = self.x_axis.point_to_number(self.area.get_right())
if new_t_min is None:
new_t_min = curr_t_min
if new_t_max is None:
new_t_max = curr_t_max
group = VGroup(self.area, self.right_v_line, self.T_label_group)
def update_group(group, alpha):
area, v_line, T_label = group
t_min = interpolate(curr_t_min, new_t_min, alpha)
t_max = interpolate(curr_t_max, new_t_max, alpha)
new_area = self.get_area(t_min, t_max)
new_v_line = self.get_vertical_line_to_graph(
t_max, self.v_graph
)
new_v_line.set_color(v_line.get_color())
T_label.move_to(new_v_line.get_bottom(), UP)
#Fade close to 0
T_label[0].set_fill(opacity = min(1, t_max))
Transform(area, new_area).update(1)
Transform(v_line, new_v_line).update(1)
return group
self.play(
UpdateFromAlphaFunc(group, update_group),
*list(map(Animation, self.submobjects)),
**kwargs
)
class DirectInterpretationOfDsDt(TeacherStudentsScene):
def construct(self):
equation = TexMobject("{ds", "\\over\\,", "dT}", "(T)", "=", "v(T)")
ds, over, dt, of_T, equals, v = equation
equation.next_to(self.get_pi_creatures(), UP, LARGE_BUFF)
equation.shift(RIGHT)
v.set_color(VELOCITY_COLOR)
s_words = TextMobject("Tiny change in", "distance")
s_words.next_to(ds, UP+LEFT, LARGE_BUFF)
s_words.shift_onto_screen()
s_arrow = Arrow(s_words[1].get_bottom(), ds.get_left())
s_words.add(s_arrow)
s_words.set_color(DISTANCE_COLOR)
t_words = TextMobject("Tiny change in", "time")
t_words.next_to(dt, DOWN+LEFT)
t_words.to_edge(LEFT)
t_arrow = Arrow(t_words[1].get_top(), dt.get_left())
t_words.add(t_arrow)
t_words.set_color(TIME_COLOR)
self.add(ds, over, dt, of_T)
for words, part in (s_words, ds), (t_words, dt):
self.play(
FadeIn(
words,
run_time = 2,
lag_ratio = 0.5,
),
self.students[1].change_mode, "raise_right_hand"
)
self.play(part.set_color, words.get_color())
self.wait()
self.play(Write(VGroup(equals, v)))
self.change_student_modes(*["pondering"]*3)
self.wait(3)
class FindAntiderivative(Antiderivative):
def construct(self):
self.introduce()
self.first_part()
self.second_part()
self.combine()
self.add_plus_C()
def introduce(self):
q_marks, rhs = functions = self.get_functions("???", "t(8-t)")
expanded_rhs = TexMobject("8t - t^2")
expanded_rhs.move_to(rhs, LEFT)
expanded_rhs.set_color(rhs.get_color())
self.v_part1 = VGroup(*expanded_rhs[:2])
self.v_part2 = VGroup(*expanded_rhs[2:])
for part in self.v_part1, self.v_part2:
part.save_state()
top_arc, bottom_arc = arcs = self.get_arcs(functions)
derivative, antiderivative = words = self.get_arc_labels(arcs)
self.add(functions)
self.play(*list(map(ShowCreation, arcs)))
for word in words:
self.play(FadeIn(word, lag_ratio = 0.5))
self.wait()
self.change_mode("confused")
self.wait(2)
self.play(*[
ReplacementTransform(
rhs[i], expanded_rhs[j],
run_time = 2,
path_arc = np.pi
)
for i, j in enumerate([1, 4, 0, 2, 3, 4])
]+[
self.pi_creature.change_mode, "hesitant"
])
self.wait()
self.q_marks = q_marks
self.arcs = arcs
self.words = words
def first_part(self):
four_t_squared, two_t = self.get_functions("4t^2", "2t")
four = four_t_squared[0]
four.shift(UP)
four.set_fill(opacity = 0)
t_squared = VGroup(*four_t_squared[1:])
two_t.move_to(self.v_part1, LEFT)
self.play(self.v_part2.to_corner, UP+RIGHT)
self.play(
self.pi_creature.change, "plain", self.v_part1
)
self.play(ApplyWave(
self.q_marks,
direction = UP,
amplitude = SMALL_BUFF
))
self.wait(2)
self.play(
FadeOut(self.q_marks),
FadeIn(t_squared),
self.v_part1.shift, DOWN+RIGHT,
)
self.play(*[
ReplacementTransform(
t_squared[i].copy(), two_t[1-i],
run_time = 2,
path_arc = -np.pi/6.
)
for i in (0, 1)
])
self.change_mode("thinking")
self.wait()
self.play(four.set_fill, YELLOW, 1)
self.play(four.shift, DOWN)
self.play(FadeOut(two_t))
self.play(self.v_part1.restore)
self.play(four.set_color, DISTANCE_COLOR)
self.wait(2)
self.s_part1 = four_t_squared
def second_part(self):
self.arcs_copy = self.arcs.copy()
self.words_copy = self.words.copy()
part1_group = VGroup(
self.s_part1, self.v_part1,
self.arcs_copy, self.words_copy
)
neg_third_t_cubed, three_t_squared = self.get_functions(
"- \\frac{1}{3} t^3", "3t^2"
)
three_t_squared.move_to(self.v_part1, LEFT)
neg = neg_third_t_cubed[0]
third = VGroup(*neg_third_t_cubed[1:4])
t_cubed = VGroup(*neg_third_t_cubed[4:])
three = three_t_squared[0]
t_squared = VGroup(*three_t_squared[1:])
self.play(
part1_group.scale, 0.5,
part1_group.to_corner, UP+LEFT,
self.pi_creature.change_mode, "plain"
)
self.play(
self.v_part2.restore,
self.v_part2.shift, LEFT
)
self.play(FadeIn(self.q_marks))
self.wait()
self.play(
FadeOut(self.q_marks),
FadeIn(t_cubed),
self.v_part2.shift, DOWN+RIGHT
)
self.play(*[
ReplacementTransform(
t_cubed[i].copy(), three_t_squared[j],
path_arc = -np.pi/6,
run_time = 2,
)
for i, j in [(0, 1), (1, 0), (1, 2)]
])
self.wait()
self.play(FadeIn(third))
self.play(FadeOut(three))
self.wait(2)
self.play(Write(neg))
self.play(
FadeOut(t_squared),
self.v_part2.shift, UP+LEFT
)
self.wait(2)
self.s_part2 = neg_third_t_cubed
def combine(self):
self.play(
self.v_part1.restore,
self.v_part2.restore,
self.s_part1.scale, 2,
self.s_part1.next_to, self.s_part2, LEFT,
FadeOut(self.arcs_copy),
FadeOut(self.words_copy),
run_time = 2,
)
self.change_mode("happy")
self.wait(2)
def add_plus_C(self):
s_group = VGroup(self.s_part1, self.s_part2)
plus_Cs = [
TexMobject("+%d"%d)
for d in range(1, 8)
]
for plus_C in plus_Cs:
plus_C.set_color(YELLOW)
plus_C.move_to(s_group, RIGHT)
plus_C = plus_Cs[0]
self.change_mode("sassy")
self.wait()
self.play(
s_group.next_to, plus_C.copy(), LEFT,
GrowFromCenter(plus_C),
)
self.wait()
for new_plus_C in plus_Cs[1:]:
self.play(Transform(plus_C, new_plus_C))
self.wait()
class GraphSPlusC(GraphDistanceVsTime):
CONFIG = {
"y_axis_label" : "Distance"
}
def construct(self):
self.setup_axes()
graph = self.get_graph(
s_func,
color = DISTANCE_COLOR,
x_min = 0,
x_max = 8,
)
tangent = self.get_secant_slope_group(
6, graph, dx = 0.01
).secant_line
v_line = self.get_vertical_line_to_graph(
6, graph, line_class = DashedLine
)
v_line.scale_in_place(2)
v_line.set_color(WHITE)
graph_label, plus_C = full_label = TexMobject(
"s(t) = 4t^2 - \\frac{1}{3}t^3", "+C"
)
plus_C.set_color(YELLOW)
full_label.next_to(graph.points[-1], DOWN)
full_label.to_edge(RIGHT)
self.play(ShowCreation(graph))
self.play(FadeIn(graph_label))
self.wait()
self.play(
graph.shift, UP,
run_time = 2,
rate_func = there_and_back
)
self.play(ShowCreation(tangent))
graph.add(tangent)
self.play(ShowCreation(v_line))
self.play(
graph.shift, 2*DOWN,
run_time = 4,
rate_func = there_and_back,
)
self.play(Write(plus_C))
self.play(
graph.shift, 2*UP,
rate_func = there_and_back,
run_time = 4,
)
self.wait()
class LowerBound(AreaIsDerivative):
CONFIG = {
"graph_origin" : 2.5*DOWN + 6*LEFT
}
def construct(self):
self.add_integral_and_area()
self.mention_lower_bound()
self.drag_right_endpoint_to_zero()
self.write_antiderivative_difference()
self.show_alternate_antiderivative_difference()
self.add_constant_to_antiderivative()
def add_integral_and_area(self):
self.area = self.get_area(0, 6)
self.integral = self.get_integral("0", "T")
self.remove(self.x_axis.numbers)
self.add(self.area, self.integral)
self.add_T_label(6, run_time = 0)
def mention_lower_bound(self):
lower_bound = self.integral.get_part_by_tex("0")
circle = Circle(color = YELLOW)
circle.replace(lower_bound)
circle.scale_in_place(3)
zero_label = lower_bound.copy()
self.play(ShowCreation(circle))
self.play(Indicate(lower_bound))
self.play(
zero_label.scale, 1.5,
zero_label.next_to, self.graph_origin, DOWN, MED_LARGE_BUFF,
FadeOut(circle)
)
self.wait()
self.zero_label = zero_label
def drag_right_endpoint_to_zero(self):
zero_integral = self.get_integral("0", "0")
zero_integral[1].set_color(YELLOW)
zero_int_bounds = list(reversed(
zero_integral.get_parts_by_tex("0")
))
for bound in zero_int_bounds:
circle = Circle(color = YELLOW)
circle.replace(bound)
circle.scale_in_place(3)
bound.circle = circle
self.integral.save_state()
equals_zero = TexMobject("=0")
equals_zero.next_to(zero_integral, RIGHT)
equals_zero.set_color(GREEN)
self.change_area_bounds(0, 0, run_time = 3)
self.play(ReplacementTransform(
self.zero_label.copy(), equals_zero
))
self.play(Transform(self.integral, zero_integral))
self.wait(2)
for bound in zero_int_bounds:
self.play(ShowCreation(bound.circle))
self.play(FadeOut(bound.circle))
self.play(*[
ReplacementTransform(
bound.copy(), VGroup(equals_zero[1])
)
for bound in zero_int_bounds
])
self.wait(2)
self.change_area_bounds(0, 5)
self.play(
self.integral.restore,
FadeOut(equals_zero)
)
self.zero_integral = zero_integral
def write_antiderivative_difference(self):
antideriv_diff = self.get_antiderivative_difference("0", "T")
equals, at_T, minus, at_zero = antideriv_diff
antideriv_diff_at_eight = self.get_antiderivative_difference("0", "8")
at_eight = antideriv_diff_at_eight.left_part
integral_at_eight = self.get_integral("0", "8")
for part in at_T, at_zero, at_eight:
part.brace = Brace(part, DOWN, buff = SMALL_BUFF)
part.brace.save_state()
antideriv_text = at_T.brace.get_text("Antiderivative", buff = SMALL_BUFF)
antideriv_text.set_color(MAROON_B)
value_at_eight = at_eight.brace.get_text(
"%.2f"%s_func(8)
)
happens_to_be_zero = at_zero.brace.get_text("""
Happens to
equal 0
""")
big_brace = Brace(VGroup(at_T, at_zero))
cancel_text = big_brace.get_text("Cancels when $T=0$")
self.play(*list(map(Write, [equals, at_T])))
self.play(
GrowFromCenter(at_T.brace),
Write(antideriv_text, run_time = 2)
)
self.change_area_bounds(0, 5.5, rate_func = there_and_back)
self.wait()
self.play(
ReplacementTransform(at_T.copy(), at_zero),
Write(minus)
)
self.wait()
self.play(
ReplacementTransform(at_T.brace, big_brace),
ReplacementTransform(antideriv_text, cancel_text)
)
self.change_area_bounds(0, 0, run_time = 4)
self.wait()
self.play(
ReplacementTransform(big_brace, at_zero.brace),
ReplacementTransform(cancel_text, happens_to_be_zero),
)
self.wait(2)
self.change_area_bounds(0, 8, run_time = 2)
self.play(
Transform(self.integral, integral_at_eight),
Transform(antideriv_diff, antideriv_diff_at_eight),
MaintainPositionRelativeTo(at_zero.brace, at_zero),
MaintainPositionRelativeTo(happens_to_be_zero, at_zero.brace),
)
self.play(
GrowFromCenter(at_eight.brace),
Write(value_at_eight)
)
self.wait(2)
self.play(*list(map(FadeOut, [
at_eight.brace, value_at_eight,
at_zero.brace, happens_to_be_zero,
])))
self.antideriv_diff = antideriv_diff
def show_alternate_antiderivative_difference(self):
new_integral = self.get_integral("1", "7")
new_antideriv_diff = self.get_antiderivative_difference("1", "7")
numbers = [
TexMobject("%d"%d).next_to(
self.coords_to_point(d, 0),
DOWN, MED_LARGE_BUFF
)
for d in (1, 7)
]
tex_mobs = [new_integral]+new_antideriv_diff[1::2]+numbers
for tex_mob in tex_mobs:
tex_mob.set_color_by_tex("1", RED)
tex_mob.set_color_by_tex("7", GREEN)
tex_mob.set_color_by_tex("\\frac{1}{3}", WHITE)
self.change_area_bounds(1, 7, run_time = 2)
self.play(
self.T_label_group[0].set_fill, None, 0,
*list(map(FadeIn, numbers))
)
self.play(
Transform(self.integral, new_integral),
Transform(self.antideriv_diff, new_antideriv_diff),
)
self.wait(3)
for part in self.antideriv_diff[1::2]:
self.play(Indicate(part, scale_factor = 1.1))
self.wait()
def add_constant_to_antiderivative(self):
antideriv_diff = self.antideriv_diff
plus_fives = VGroup(*[TexMobject("+5") for i in range(2)])
plus_fives.set_color(YELLOW)
for five, part in zip(plus_fives, antideriv_diff[1::2]):
five.next_to(part, DOWN)
group = VGroup(
plus_fives[0],
antideriv_diff[2].copy(),
plus_fives[1]
)
self.play(Write(plus_fives, run_time = 2))
self.wait(2)
self.play(
group.arrange,
group.next_to, antideriv_diff, DOWN, MED_LARGE_BUFF
)
self.wait()
self.play(FadeOut(group, run_time = 2))
self.wait()
#####
def get_integral(self, lower_bound, upper_bound):
result = TexMobject(
"\\int", "^"+upper_bound, "_"+lower_bound,
"t(8-t)", "\\,dt"
)
result.next_to(self.graph_origin, RIGHT, MED_LARGE_BUFF)
result.to_edge(UP)
return result
def get_antiderivative_difference(self, lower_bound, upper_bound):
strings = []
for bound in upper_bound, lower_bound:
try:
d = int(bound)
strings.append("(%d)"%d)
except:
strings.append(bound)
parts = []
for s in strings:
part = TexMobject(
"\\left(",
"4", s, "^2", "-", "\\frac{1}{3}", s, "^3"
"\\right))"
)
part.set_color_by_tex(s, YELLOW, substring = False)
parts.append(part)
result = VGroup(
TexMobject("="), parts[0],
TexMobject("-"), parts[1],
)
result.left_part, result.right_part = parts
result.arrange(RIGHT)
result.scale(0.9)
result.next_to(self.integral, RIGHT)
return result
class FundamentalTheorem(GraphScene):
CONFIG = {
"lower_bound" : 1,
"upper_bound" : 7,
"lower_bound_color" : RED,
"upper_bound_color" : GREEN,
"n_riemann_iterations" : 6,
}
def construct(self):
self.add_graph_and_integral()
self.show_f_dx_sum()
self.show_rects_approaching_area()
self.write_antiderivative()
self.write_fundamental_theorem_of_calculus()
self.show_integral_considering_continuum()
self.show_antiderivative_considering_bounds()
def add_graph_and_integral(self):
self.setup_axes()
integral = TexMobject("\\int", "^b", "_a", "f(x)", "\\,dx")
integral.next_to(ORIGIN, LEFT)
integral.to_edge(UP)
integral.set_color_by_tex("a", self.lower_bound_color)
integral.set_color_by_tex("b", self.upper_bound_color)
graph = self.get_graph(
lambda x : -0.01*x*(x-3)*(x-6)*(x-12) + 3,
)
self.add(integral, graph)
self.graph = graph
self.integral = integral
self.bound_labels = VGroup()
self.v_lines = VGroup()
for bound, tex in (self.lower_bound, "a"), (self.upper_bound, "b"):
label = integral.get_part_by_tex(tex).copy()
label.scale(1.5)
label.next_to(self.coords_to_point(bound, 0), DOWN)
v_line = self.get_vertical_line_to_graph(
bound, graph, color = label.get_color()
)
self.bound_labels.add(label)
self.v_lines.add(v_line)
self.add(label, v_line)
def show_f_dx_sum(self):
kwargs = {
"x_min" : self.lower_bound,
"x_max" : self.upper_bound,
"fill_opacity" : 0.75,
"stroke_width" : 0.25,
}
low_opacity = 0.25
start_rect_index = 3
num_shown_sum_steps = 5
last_rect_index = start_rect_index + num_shown_sum_steps + 1
self.rect_list = self.get_riemann_rectangles_list(
self.graph, self.n_riemann_iterations, **kwargs
)
rects = self.rects = self.rect_list[0]
rects.save_state()
start_rect = rects[start_rect_index]
f_brace = Brace(start_rect, LEFT, buff = 0)
dx_brace = Brace(start_rect, DOWN, buff = 0)
f_brace.label = f_brace.get_text("$f(x)$")
dx_brace.label = dx_brace.get_text("$dx$")
flat_rects = self.get_riemann_rectangles(
self.get_graph(lambda x : 0), dx = 0.5, **kwargs
)
self.transform_between_riemann_rects(
flat_rects, rects,
replace_mobject_with_target_in_scene = True,
)
self.play(*[
ApplyMethod(
rect.set_fill, None,
1 if rect is start_rect else low_opacity
)
for rect in rects
])
self.play(*it.chain(
list(map(GrowFromCenter, [f_brace, dx_brace])),
list(map(Write, [f_brace.label, dx_brace.label])),
))
self.wait()
for i in range(start_rect_index+1, last_rect_index):
self.play(
rects[i-1].set_fill, None, low_opacity,
rects[i].set_fill, None, 1,
f_brace.set_height, rects[i].get_height(),
f_brace.next_to, rects[i], LEFT, 0,
dx_brace.next_to, rects[i], DOWN, 0,
*[
MaintainPositionRelativeTo(brace.label, brace)
for brace in (f_brace, dx_brace)
]
)
self.wait()
self.play(*it.chain(
list(map(FadeOut, [
f_brace, dx_brace,
f_brace.label, dx_brace.label
])),
[rects.set_fill, None, kwargs["fill_opacity"]]
))
def show_rects_approaching_area(self):
for new_rects in self.rect_list:
self.transform_between_riemann_rects(
self.rects, new_rects
)
def write_antiderivative(self):
deriv = TexMobject(
"{d", "F", "\\over\\,", "dx}", "(x)", "=", "f(x)"
)
deriv_F = deriv.get_part_by_tex("F")
deriv.next_to(self.integral, DOWN, MED_LARGE_BUFF)
rhs = TexMobject(*"=F(b)-F(a)")
rhs.set_color_by_tex("a", self.lower_bound_color)
rhs.set_color_by_tex("b", self.upper_bound_color)
rhs.next_to(self.integral, RIGHT)
self.play(Write(deriv))
self.wait(2)
self.play(*it.chain(
[
ReplacementTransform(deriv_F.copy(), part)
for part in rhs.get_parts_by_tex("F")
],
[
Write(VGroup(*rhs.get_parts_by_tex(tex)))
for tex in "=()-"
]
))
for tex in "b", "a":
self.play(ReplacementTransform(
self.integral.get_part_by_tex(tex).copy(),
rhs.get_part_by_tex(tex)
))
self.wait()
self.wait(2)
self.deriv = deriv
self.rhs = rhs
def write_fundamental_theorem_of_calculus(self):
words = TextMobject("""
Fundamental
theorem of
calculus
""")
words.to_edge(RIGHT)
self.play(Write(words))
self.wait()
def show_integral_considering_continuum(self):
self.play(*[
ApplyMethod(mob.set_fill, None, 0.2)
for mob in (self.deriv, self.rhs)
])
self.play(
self.rects.restore,
run_time = 3,
rate_func = there_and_back
)
self.wait()
for x in range(2):
self.play(*[
ApplyFunction(
lambda m : m.shift(MED_SMALL_BUFF*UP).set_fill(opacity = 1),
rect,
run_time = 3,
rate_func = squish_rate_func(
there_and_back,
alpha, alpha+0.2
)
)
for rect, alpha in zip(
self.rects,
np.linspace(0, 0.8, len(self.rects))
)
])
self.wait()
def show_antiderivative_considering_bounds(self):
self.play(
self.integral.set_fill, None, 0.5,
self.deriv.set_fill, None, 1,
self.rhs.set_fill, None, 1,
)
for label, line in reversed(list(zip(self.bound_labels, self.v_lines))):
new_line = line.copy().set_color(YELLOW)
label.save_state()
self.play(label.set_color, YELLOW)
self.play(ShowCreation(new_line))
self.play(ShowCreation(line))
self.remove(new_line)
self.play(label.restore)
self.wait()
self.play(self.integral.set_fill, None, 1)
self.wait(3)
class LetsRecap(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Let's recap",
target_mode = "hesitant",
)
self.change_student_modes(*["happy"]*3)
self.wait(3)
class NegativeArea(GraphScene):
CONFIG = {
"x_axis_label" : "Time",
"y_axis_label" : "Velocity",
"graph_origin" : 1.5*DOWN + 5*LEFT,
"y_min" : -3,
"y_max" : 7,
"small_dx" : 0.01,
"sample_input" : 5,
}
def construct(self):
self.setup_axes()
self.add_graph_and_area()
self.write_negative_area()
self.show_negative_point()
self.show_car_going_backwards()
self.write_v_dt()
self.show_rectangle()
self.write_signed_area()
def add_graph_and_area(self):
graph = self.get_graph(
lambda x : -0.02*(x+1)*(x-3)*(x-7)*(x-10),
x_min = 0,
x_max = 8,
color = VELOCITY_COLOR
)
area = self.get_riemann_rectangles(
graph,
x_min = 0,
x_max = 8,
dx = self.small_dx,
start_color = BLUE_D,
end_color = BLUE_D,
fill_opacity = 0.75,
stroke_width = 0,
)
self .play(
ShowCreation(graph),
FadeIn(
area,
run_time = 2,
lag_ratio = 0.5,
)
)
self.graph = graph
self.area = area
def write_negative_area(self):
words = TextMobject("Negative area")
words.set_color(RED)
words.next_to(
self.coords_to_point(7, -2),
RIGHT,
)
arrow = Arrow(words, self.coords_to_point(
self.sample_input, -1,
))
self.play(
Write(words, run_time = 2),
ShowCreation(arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [self.area, arrow])))
self.negative_area_words = words
def show_negative_point(self):
v_line = self.get_vertical_line_to_graph(
self.sample_input, self.graph,
color = RED
)
self.play(ShowCreation(v_line))
self.wait()
self.v_line = v_line
def show_car_going_backwards(self):
car = Car()
start_point = 3*RIGHT + 2*UP
end_point = start_point + LEFT
nudged_end_point = end_point + MED_SMALL_BUFF*LEFT
car.move_to(start_point)
arrow = Arrow(RIGHT, LEFT, color = RED)
arrow.next_to(car, UP+LEFT)
arrow.shift(MED_LARGE_BUFF*RIGHT)
self.play(FadeIn(car))
self.play(ShowCreation(arrow))
self.play(MoveCar(
car, end_point,
moving_forward = False,
run_time = 3
))
self.wait()
ghost_car = car.copy().fade()
right_nose_line = self.get_car_nose_line(car)
self.play(ShowCreation(right_nose_line))
self.add(ghost_car)
self.play(MoveCar(
car, nudged_end_point,
moving_forward = False
))
left_nose_line = self.get_car_nose_line(car)
self.play(ShowCreation(left_nose_line))
self.nose_lines = VGroup(left_nose_line, right_nose_line)
self.car = car
self.ghost_car = ghost_car
def write_v_dt(self):
brace = Brace(self.nose_lines, DOWN, buff = 0)
equation = TexMobject("ds", "=", "v(t)", "dt")
equation.next_to(brace, DOWN, SMALL_BUFF, LEFT)
equation.set_color_by_tex("ds", DISTANCE_COLOR)
equation.set_color_by_tex("dt", TIME_COLOR)
negative = TextMobject("Negative")
negative.set_color(RED)
negative.next_to(equation.get_corner(UP+RIGHT), UP, LARGE_BUFF)
ds_arrow, v_arrow = arrows = VGroup(*[
Arrow(
negative.get_bottom(),
equation.get_part_by_tex(tex).get_top(),
color = RED,
)
for tex in ("ds", "v(t)")
])
self.play(
GrowFromCenter(brace),
Write(equation)
)
self.wait()
self.play(FadeIn(negative))
self.play(ShowCreation(v_arrow))
self.wait(2)
self.play(ReplacementTransform(
v_arrow.copy(),
ds_arrow
))
self.wait(2)
self.ds_equation = equation
self.negative_word = negative
self.negative_word_arrows = arrows
def show_rectangle(self):
rect_list = self.get_riemann_rectangles_list(
self.graph, x_min = 0, x_max = 8,
n_iterations = 6,
start_color = BLUE_D,
end_color = BLUE_D,
fill_opacity = 0.75,
)
rects = rect_list[0]
rect = rects[len(rects)*self.sample_input//8]
dt_brace = Brace(rect, UP, buff = 0)
v_brace = Brace(rect, LEFT, buff = 0)
dt_label = dt_brace.get_text("$dt$", buff = SMALL_BUFF)
dt_label.set_color(YELLOW)
v_label = v_brace.get_text("$v(t)$", buff = SMALL_BUFF)
v_label.add_background_rectangle()
self.play(FadeOut(self.v_line), FadeIn(rect))
self.play(
GrowFromCenter(dt_brace),
GrowFromCenter(v_brace),
Write(dt_label),
Write(v_label),
)
self.wait(2)
self.play(*it.chain(
[FadeIn(r) for r in rects if r is not rect],
list(map(FadeOut, [
dt_brace, v_brace, dt_label, v_label
]))
))
self.wait()
for new_rects in rect_list[1:]:
self.transform_between_riemann_rects(rects, new_rects)
self.wait()
def write_signed_area(self):
words = TextMobject("``Signed area''")
words.next_to(self.coords_to_point(self.sample_input, 0), UP)
symbols = VGroup(*[
TexMobject(sym).move_to(self.coords_to_point(*coords))
for sym, coords in [
("+", (1, 2)),
("-", (5, -1)),
("+", (7.6, 0.5)),
]
])
self.play(Write(words))
self.play(Write(symbols))
self.wait()
####
def get_car_nose_line(self, car):
line = DashedLine(car.get_top(), car.get_bottom())
line.move_to(car.get_right())
return line
class NextVideo(TeacherStudentsScene):
def construct(self):
series = VideoSeries()
series.to_edge(UP)
next_video = series[8]
integral = TexMobject("\\int")
integral.next_to(next_video, DOWN, LARGE_BUFF)
self.play(FadeIn(series, lag_ratio = 0.5))
self.play(
next_video.set_color, YELLOW,
next_video.shift, next_video.get_height()*DOWN/2,
self.teacher.change_mode, "raise_right_hand"
)
self.play(Write(integral))
self.wait(5)
class Chapter8PatreonThanks(PatreonThanks):
CONFIG = {
"specific_patrons" : [
"Ali Yahya",
"CrypticSwarm",
"Kaustuv DeBiswas",
"Kathryn Schmiedicke",
"Karan Bhargava",
"Ankit Agarwal",
"Yu Jun",
"Dave Nicponski",
"Damion Kistler",
"Juan Benet",
"Othman Alikhan",
"Markus Persson",
"Dan Buchoff",
"Derek Dai",
"Joseph John Cox",
"Luc Ritchie",
"Robert Teed",
"Jason Hise",
"Meshal Alshammari",
"Bernd Sing",
"Nils Schneider",
"James Thornton",
"Mustafa Mahdi",
"Jonathan Eppele",
"Mathew Bramson",
"Jerry Ling",
"Mark Govea",
"Vecht",
"Shimin Kuang",
"Rish Kundalia",
"Achille Brighton",
"Ripta Pasay",
]
}
class Thumbnail(Chapter1Thumbnail):
CONFIG = {
"x_axis_label" : "",
"y_axis_label" : "",
"graph_origin" : 1.5*DOWN + 4*LEFT,
"y_axis_height" : 5,
"x_max" : 5,
"x_axis_width" : 11,
}
def construct(self):
self.setup_axes()
self.remove(*self.x_axis.numbers)
self.remove(*self.y_axis.numbers)
graph = self.get_graph(self.func)
rects = self.get_riemann_rectangles(
graph,
x_min = 0,
x_max = 4,
dx = 0.25,
)
words = TextMobject("Integrals")
words.set_width(8)
words.to_edge(UP)
self.add(graph, rects, words)
| 30.77982 | 83 | 0.536657 | [
"MIT"
] | wofeicaoge/manim | from_3b1b/old/eoc/chapter8.py | 85,414 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import requests
import six
from six.moves.urllib import parse as urlparse
class _RequestObjectProxy(object):
"""A wrapper around a requests.Request that gives some extra information.
This will be important both for matching and so that when it's save into
the request_history users will be able to access these properties.
"""
def __init__(self, request, **kwargs):
self._request = request
self._matcher = None
self._url_parts_ = None
self._qs = None
# All of these params should always exist but we use a default
# to make the test setup easier.
self._timeout = kwargs.pop('timeout', None)
self._allow_redirects = kwargs.pop('allow_redirects', None)
self._verify = kwargs.pop('verify', None)
self._stream = kwargs.pop('stream', None)
self._cert = kwargs.pop('cert', None)
self._proxies = copy.deepcopy(kwargs.pop('proxies', {}))
# FIXME(jamielennox): This is part of bug #1584008 and should default
# to True (or simply removed) in a major version bump.
self._case_sensitive = kwargs.pop('case_sensitive', False)
def __getattr__(self, name):
return getattr(self._request, name)
@property
def _url_parts(self):
if self._url_parts_ is None:
url = self._request.url
if not self._case_sensitive:
url = url.lower()
self._url_parts_ = urlparse.urlparse(url)
return self._url_parts_
@property
def scheme(self):
return self._url_parts.scheme
@property
def netloc(self):
return self._url_parts.netloc
@property
def hostname(self):
try:
return self.netloc.split(':')[0]
except IndexError:
return ''
@property
def port(self):
components = self.netloc.split(':')
try:
return int(components[1])
except (IndexError, ValueError):
pass
if self.scheme == 'https':
return 443
if self.scheme == 'http':
return 80
# The default return shouldn't matter too much because if you are
# wanting to test this value you really should be explicitly setting it
# somewhere. 0 at least is a boolean False and an int.
return 0
@property
def path(self):
return self._url_parts.path
@property
def query(self):
return self._url_parts.query
@property
def qs(self):
if self._qs is None:
self._qs = urlparse.parse_qs(self.query)
return self._qs
@property
def timeout(self):
return self._timeout
@property
def allow_redirects(self):
return self._allow_redirects
@property
def verify(self):
return self._verify
@property
def stream(self):
return self._stream
@property
def cert(self):
return self._cert
@property
def proxies(self):
return self._proxies
@classmethod
def _create(cls, *args, **kwargs):
return cls(requests.Request(*args, **kwargs).prepare())
@property
def text(self):
body = self.body
if isinstance(body, six.binary_type):
body = body.decode('utf-8')
return body
def json(self, **kwargs):
return json.loads(self.text, **kwargs)
@property
def matcher(self):
"""The matcher that this request was handled by.
The matcher object is handled by a weakref. It will return the matcher
object if it is still available - so if the mock is still in place. If
the matcher is not available it will return None.
"""
return self._matcher()
def __str__(self):
return "{0.method} {0.url}".format(self._request)
| 27.110429 | 79 | 0.630686 | [
"MIT"
] | Guillaume-Fernandez/PhishInspector | venv/lib/python3.6/site-packages/requests_mock/request.py | 4,419 | Python |
from django.urls import path
from meetings.api.views import (MeetingViewset)
from rest_framework import routers
router = routers.DefaultRouter(trailing_slash='/?')
router.register('meetings', MeetingViewset)
urlpatterns = router.urls
| 21.636364 | 51 | 0.806723 | [
"MIT"
] | MaySoMusician/owaranai-meter | app/meetings/api/urls.py | 238 | Python |
'''
Acdfg class will have the class definitions for loading
and creating acdfg objects
'''
from __future__ import print_function
try:
from enum import Enum
except ImportError:
from enum34 import Enum
#import proto_acdfg
from protobuf.proto_acdfg_pb2 import Acdfg as ProtoAcdfg
import logging
class NodeType(Enum):
regular_node = 1
data_node = 2
method_node = 3
class EdgeType(Enum):
control_edge = 1
def_edge = 2
use_edge = 3
transitive_edge = 4
exceptional_edge = 5
class Node:
def __init__(self, node_type, key):
self.node_type = node_type
self.id = key
assert isinstance(key, int)
# assert isinstance(node_type, NodeType)
# def __init__(self, key):
# self.node_type = NodeType.regular_node
# self.id = key
# assert isinstance(key, int)
def get_type(self):
return self.node_type
def get_id(self):
return self.id
def get_node_type_str(self):
if (self.node_type == NodeType.regular_node):
return "regular node"
elif (self.node_type == NodeType.data_node):
return "data node"
elif (self.node_type == NodeType.method_node):
return "method node"
else:
assert False, ' Unhandled node type'
class DataNode(Node):
DATA_VAR = 0
DATA_CONST = 1
def __init__(self, key, name, data_type, data_type_type):
Node.__init__(self, NodeType.data_node, key)
self.name = name
self.data_type = data_type
if ("DATA_VAR" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)):
self.data_type_type = DataNode.DATA_VAR
elif ("DATA_CONST" == ProtoAcdfg.DataNode.DataType.Name(data_type_type)):
self.data_type_type = DataNode.DATA_CONST
else:
logging.error("Cannot determine the type %s for data node" % (str(data_type_type)))
raise Exception("Cannot determine the type %s for data node" % (str(data_type_type)))
logging.debug('DataNode: (%s,%s,%s,%s)' % (str(key), str(name),
str(data_type),
str(data_type_type)))
def get_name(self):
return self.name
def get_data_type(self):
return self.data_type
def get_data_type_type(self):
return self.data_type_type
class MethodNode(Node):
def __init__(self, key, name, receiver, arg_list):
Node.__init__(self, NodeType.method_node, key)
self.name = name
self.receiver = receiver
self.arg_list = arg_list
for a in arg_list:
assert isinstance(a, DataNode)
if receiver:
assert isinstance(receiver, DataNode)
logging.debug(type(name))
assert isinstance(name, str) or isinstance(name, unicode)
logging.debug('Method Node: %s,%s' % (str(key), str(name)))
def get_name(self):
return self.name
def get_receiver(self):
return self.receiver
def get_args(self):
return self.arg_list
class Edge:
def __init__(self, edge_type, key, src, tgt):
self.edge_type = edge_type
self.id = key
self.src = src
self.tgt = tgt
assert isinstance(src, Node)
assert isinstance(tgt, Node)
def get_id(self):
return self.id
def get_edge_type(self):
return self.edge_type
class DefEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.def_edge, key, src, tgt)
assert isinstance(tgt, DataNode)
class UseEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.use_edge, key, src, tgt)
assert isinstance(src, DataNode)
class ControlEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.control_edge, key, src, tgt)
class TransitiveEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.transitive_edge, key, src, tgt)
class ExceptionEdge(Edge):
def __init__(self, key, src, tgt):
Edge.__init__(self, EdgeType.exceptional_edge, key, src, tgt)
class Acdfg:
def __init__(self, acdfg_protobuf_obj):
self.acdfg_protobuf = acdfg_protobuf_obj
self.all_nodes = {}
self.data_nodes = {}
self.method_nodes = {}
self.regular_nodes = {}
self.all_edges = {}
def add_node(self, node):
assert isinstance(node, Node), \
'Only node objects can be added through add_node'
key = node.get_id()
assert key not in self.all_nodes, \
'key %d for node already present'%key
self.all_nodes[key] = node
if isinstance(node, DataNode):
self.data_nodes[key] = node
elif isinstance(node, MethodNode):
self.method_nodes[key] = node
else:
self.regular_nodes[key] = node
def get_data_nodes(self):
return self.data_nodes
def get_method_nodes(self):
return self.method_nodes
def add_edge(self, edge):
assert isinstance(edge, Edge)
key = edge.get_id()
assert key not in self.all_edges, 'key %d for edge already present'%key
self.all_edges[key] = edge
def get_node_from_id(self, id):
if id in self.data_nodes:
return self.data_nodes[id]
elif id in self.method_nodes:
return self.method_nodes[id]
elif id in self.regular_nodes:
return self.regular_nodes[id]
else:
assert False, 'ID: %d not found'%(id)
def get_node_obj_from_ids(acdfg_obj, proto_edge):
src = acdfg_obj.get_node_from_id(getattr(proto_edge, 'from'))
tgt = acdfg_obj.get_node_from_id(proto_edge.to)
return src, tgt
def read_acdfg(filename):
try:
f = open(filename, 'rb')
acdfg = ProtoAcdfg() # create a new acdfg
# acdfg.parse_from_bytes(f.read())
acdfg.ParseFromString(f.read())
acdfg_obj = Acdfg(acdfg)
for dNode in acdfg.data_node:
data_node_obj = DataNode(int ( getattr(dNode,'id') ),
dNode.name,
getattr(dNode,'type'),
dNode.data_type)
acdfg_obj.add_node(data_node_obj)
for mNode in acdfg.method_node:
arg_ids = mNode.argument
arg_list = [acdfg_obj.get_node_from_id(j) for j in arg_ids]
if mNode.invokee:
rcv = acdfg_obj.get_node_from_id(mNode.invokee)
else:
rcv = None
method_node_obj = MethodNode(int(mNode.id), mNode.name, rcv, arg_list)
acdfg_obj.add_node(method_node_obj)
for rNode in acdfg.misc_node:
misc_node_obj = Node(NodeType.regular_node,int(rNode.id))
acdfg_obj.add_node(misc_node_obj)
for ctrl_edge in acdfg.control_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, ctrl_edge)
cedge_obj = ControlEdge(ctrl_edge.id, src, tgt)
acdfg_obj.add_edge(cedge_obj)
for dedge in acdfg.def_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, dedge)
dedge_obj = ControlEdge(dedge.id, src, tgt)
acdfg_obj.add_edge(dedge_obj)
for uedge in acdfg.use_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, uedge)
uedge_obj = UseEdge(uedge.id, src, tgt)
acdfg_obj.add_edge(uedge_obj)
for tedge in acdfg.trans_edge:
src, tgt = get_node_obj_from_ids(acdfg_obj, tedge)
tedge_obj = TransitiveEdge(tedge.id, src, tgt)
acdfg_obj.add_edge(tedge_obj)
f.close()
return acdfg_obj
except IOError:
print('Could not open: ', filename, 'for reading in binary mode.')
assert False
| 30.705426 | 97 | 0.611083 | [
"Apache-2.0"
] | LesleyLai/biggroum | python/fixrgraph/annotator/acdfgClass.py | 7,922 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .rule_data_source import RuleDataSource
class RuleManagementEventDataSource(RuleDataSource):
"""A rule management event data source. The discriminator fields is always
RuleManagementEventDataSource in this case.
:param resource_uri: the resource identifier of the resource the rule
monitors.
:type resource_uri: str
:param odatatype: Polymorphic Discriminator
:type odatatype: str
:param event_name: the event name.
:type event_name: str
:param event_source: the event source.
:type event_source: str
:param level: the level.
:type level: str
:param operation_name: The name of the operation that should be checked
for. If no name is provided, any operation will match.
:type operation_name: str
:param resource_group_name: the resource group name.
:type resource_group_name: str
:param resource_provider_name: the resource provider name.
:type resource_provider_name: str
:param status: The status of the operation that should be checked for. If
no status is provided, any status will match.
:type status: str
:param sub_status: the substatus.
:type sub_status: str
:param claims: the claims.
:type claims: :class:`RuleManagementEventClaimsDataSource
<azure.mgmt.monitor.models.RuleManagementEventClaimsDataSource>`
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
'event_name': {'key': 'eventName', 'type': 'str'},
'event_source': {'key': 'eventSource', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'operation_name': {'key': 'operationName', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'resource_provider_name': {'key': 'resourceProviderName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'sub_status': {'key': 'subStatus', 'type': 'str'},
'claims': {'key': 'claims', 'type': 'RuleManagementEventClaimsDataSource'},
}
def __init__(self, resource_uri=None, event_name=None, event_source=None, level=None, operation_name=None, resource_group_name=None, resource_provider_name=None, status=None, sub_status=None, claims=None):
super(RuleManagementEventDataSource, self).__init__(resource_uri=resource_uri)
self.event_name = event_name
self.event_source = event_source
self.level = level
self.operation_name = operation_name
self.resource_group_name = resource_group_name
self.resource_provider_name = resource_provider_name
self.status = status
self.sub_status = sub_status
self.claims = claims
self.odatatype = 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'
| 44.103896 | 209 | 0.659305 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | EnjoyLifeFund/macSierra-py36-pkgs | azure/mgmt/monitor/models/rule_management_event_data_source.py | 3,396 | Python |
from argparse import Namespace
from .simulation_action import Action, SimulationAction
from .plot_action import PlotAction
from .gram_charlier_action import GramCharlierAction
from .cornish_fisher_action import CornishFisherAction
from .mc_combination_action import MCCombinationAction
from .print_benchmark_action import PrintBenchmarkAction
from .goals_action import GoalsAction
class ActionFactory:
@classmethod
def create_action(cls, cli_args: Namespace) -> Action:
if cli_args.simulate:
return SimulationAction(cli_args)
if cli_args.goals or cli_args.invariants:
return GoalsAction(cli_args)
if cli_args.plot:
return PlotAction(cli_args)
if cli_args.gram_charlier:
return GramCharlierAction(cli_args)
if cli_args.cornish_fisher:
return CornishFisherAction(cli_args)
if cli_args.mc_comb is not None:
return MCCombinationAction(cli_args)
return PrintBenchmarkAction(cli_args)
| 35.137931 | 58 | 0.747792 | [
"MIT"
] | daneshvar-amrollahi/polar | cli/actions/factory.py | 1,019 | Python |
import argparse
from datetime import datetime
from json import decoder
from os import path, mkdir, remove
from os.path import isfile
from threading import Thread
from time import sleep
try:
from progress.bar import Bar
import requests
import termcolor
except ImportError:
print("You are missing modules. Run \"python3 -m pip install -r requirements.txt\" to "
"install them.")
exit(0)
# print a message with a time stamp
def status(message):
print("{0} {1}".format(datetime.now(), message))
# clean any temp files created during runtime
def cleanup():
if isfile("runfile"):
remove("runfile")
# main loop
def main():
status("Fetching latest paste IDs...")
# fetch latest 100 paste IDs
fetch_limit = 100
current_request = requests.get("https://scrape.pastebin.com/api_scraping.php?limit={0}".format(fetch_limit))
current_json = []
try:
current_json = current_request.json()
except decoder.JSONDecodeError:
status(termcolor.colored("Unable to fetch latest paste IDs. Make sure your IP is whitelisted at "
"https://pastebin.com/doc_scraping_api", "red"))
cleanup()
exit(0)
status("Paste IDs fetched. Processing...")
# clean up fetched ids
cleaned_json = []
for entry in current_json:
if entry["key"] not in paste_ids:
cleaned_json.append(entry)
# create a progress bar and start downloading pastes if we have new ones
if len(cleaned_json) is not 0:
with Bar("Processing", max=len(cleaned_json), fill=">") as bar:
for entry in cleaned_json:
# download the raw paste data
entry_request = requests.get("https://scrape.pastebin.com/api_scrape_item.php?i={0}"
.format(entry["key"]))
entry_content = entry_request.text
path_file = path.join("files", "{0}.txt".format(entry["key"]))
paste_ids.append(entry["key"])
# if we have a provided keyword list, check for keywords
if keywords is not None:
for keyword in keywords:
if keyword.upper() in entry_content.upper():
bar.suffix = "%(index)d/%(max)d " + termcolor.colored("[KEYWORD] Paste \'{0}\' contains "
"keyword \'{1}\'".format(entry["key"],
keyword),
"green")
if args.noSorting is False:
path_file = path.join("files", keyword, "{0}.txt".format(entry["key"]))
with open(path_file, "w+", encoding='utf-8') as entry_file:
entry_file.write(entry_content)
break
else:
with open(path_file, "w+", encoding='utf-8') as entry_file:
entry_file.write(entry_content)
bar.suffix = "%(index)d/%(max)d Saving paste \'{0}\'".format(entry["key"])
bar.next()
bar.finish()
# otherwise, just say that we didn't have any new content
else:
status("No new pastes found, skipping downloads...")
if args.infinite is False:
if not isfile("runfile"):
print()
status("Runfile no longer found, exiting...")
exit(0)
skipped_pastes = fetch_limit - len(cleaned_json)
if skipped_pastes != 0:
status("Skipped {0} previously fetched pastes".format(skipped_pastes))
status("Cleaning up internal ID list...")
while len(paste_ids) > max_id_list_size:
paste_ids.pop(0)
# start 60 second loop
status("Hibernating for 60 seconds...")
with Bar("Hibernating", max=60, fill=">", suffix="") as bar:
for i in range(60):
sleep(1)
bar.next()
bar.finish()
print()
Thread(main()).start()
if __name__ == '__main__':
AUTHOR = "SYRAPT0R"
COPYRIGHT = "2019-2022"
VERSION = "0.5.3"
# parse arguments
keywords = None
parser = argparse.ArgumentParser(description="A script to scrape pastebin.com with optional keyword search")
parser.add_argument("-k", "--keywords", help="A file containing keywords for the search")
parser.add_argument("-i", "--infinite", help="Whether to run in infinite mode (Default: false)",
action="store_true", default=False)
parser.add_argument("-nS", "--noSorting", help="Whether to sort keyword pastes into subdirectories",
action="store_true", default=False)
args = parser.parse_args()
status("STARTING PASTA SCRAPER {0}, (c) {1} {2}".format(VERSION, COPYRIGHT, AUTHOR))
print()
# make sure file directories exists
if not path.isdir("files"):
status(termcolor.colored("No file directory found, creating...", "yellow"))
mkdir("files")
if args.keywords is not None:
try:
with open(args.keywords, "r") as f:
keywords = f.readlines()
except IOError:
status(termcolor.colored("Unable to load specified keyword file. Aborting...", "red"))
exit(0)
keywords = [keyword.strip() for keyword in keywords]
# create subdirectories if required
if args.noSorting is False:
for keyword in keywords:
current_path = path.join("files", keyword)
if not path.isdir(current_path):
status(termcolor.colored("Creating directory {0}".format(current_path), "yellow"))
mkdir(current_path)
status("Loaded {0} keywords".format(len(keywords)))
# create paste ID index
paste_ids = []
max_id_list_size = 200
# create non infinite file if needed
if args.infinite is False:
status("Creating run file...")
f = open("runfile", "w+")
f.close()
else:
status("Running in infinite mode...")
# preparation done, enter main loop
status("Entering main loop...")
print()
main()
| 32.774359 | 120 | 0.560476 | [
"MIT"
] | Syrapt0r/PastaScraper | scrape.py | 6,391 | Python |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Parsplice(CMakePackage):
"""ParSplice code implements the Parallel Trajectory Splicing algorithm"""
homepage = "https://gitlab.com/exaalt/parsplice"
url = "https://gitlab.com/api/v4/projects/exaalt%2Fparsplice/repository/archive.tar.gz?sha=v1.1"
git = "https://gitlab.com/exaalt/parsplice.git"
tags = ['ecp', 'ecp-apps']
version('develop', branch='master')
version('1.1', '3a72340d49d731a076e8942f2ae2f4e9')
depends_on("[email protected]:", type='build')
depends_on("berkeley-db")
depends_on("nauty")
depends_on("boost")
depends_on("mpi")
depends_on("eigen@3:")
depends_on("lammps+lib@20170901:")
def cmake_args(self):
options = ['-DBUILD_SHARED_LIBS=ON']
return options
| 28.882353 | 105 | 0.685336 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1nf1n1t3l00p/spack | var/spack/repos/builtin/packages/parsplice/package.py | 982 | Python |
class QuizBrain:
def __init__(self, questions):
self.question_no = 0
self.score = 0
self.questions = questions
self.current_question = None
def has_more_questions(self):
"""To check if the quiz has more questions"""
return self.question_no < len(self.questions)
def next_question(self):
"""Get the next question by incrementing the question number"""
self.current_question = self.questions[self.question_no]
self.question_no += 1
q_text = self.current_question.question_text
return f"Q.{self.question_no}: {q_text}"
def check_answer(self, user_answer):
"""Check the user answer against the correct answer and maintain the score"""
correct_answer = self.current_question.correct_answer
if user_answer.lower() == correct_answer.lower():
self.score += 1
return True
else:
return False
def get_score(self):
"""Get the number of correct answers, wrong answers and score percentage."""
wrong = self.question_no - self.score
score_percent = int(self.score / self.question_no * 100)
return (self.score, wrong, score_percent)
| 33.315789 | 85 | 0.627172 | [
"MIT"
] | ashutoshkrris/GUI-Quiz-Tkinter | quiz_brain.py | 1,266 | Python |
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import render, setup
class DefaultTests(SimpleTestCase):
"""
Literal string arguments to the default filter are always treated as
safe strings, regardless of the auto-escaping state.
Note: we have to use {"a": ""} here, otherwise the invalid template
variable string interferes with the test result.
"""
@setup({'default01': '{{ a|default:"x<" }}'})
def test_default01(self):
output = render('default01', {"a": ""})
self.assertEqual(output, "x<")
@setup({'default02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'})
def test_default02(self):
output = render('default02', {"a": ""})
self.assertEqual(output, "x<")
@setup({'default03': '{{ a|default:"x<" }}'})
def test_default03(self):
output = render('default03', {"a": mark_safe("x>")})
self.assertEqual(output, "x>")
@setup({'default04': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'})
def test_default04(self):
output = render('default04', {"a": mark_safe("x>")})
self.assertEqual(output, "x>")
class DefaultIfNoneTests(SimpleTestCase):
@setup({'default_if_none01': '{{ a|default:"x<" }}'})
def test_default_if_none01(self):
output = render('default_if_none01', {"a": None})
self.assertEqual(output, "x<")
@setup({'default_if_none02': '{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}'})
def test_default_if_none02(self):
output = render('default_if_none02', {"a": None})
self.assertEqual(output, "x<")
| 34.9375 | 96 | 0.619559 | [
"BSD-3-Clause"
] | DasAllFolks/django | tests/template_tests/filter_tests/test_default.py | 1,677 | Python |
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
from util import util
if __name__ == '__main__':
# Create dataset with fixed sample patches
opt_eval = TrainOptions().parse()
opt_eval.num_train, opt_eval.num_test, opt_eval.eval_mode = 100, 100, True
dataset_eval = create_dataset(opt_eval) # create a dataset given opt.eval_mode and other options
# Intialize training dataset and model
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0 # the total number of training iterations
# Evaluate metrics before running the model
metrics_log_file = model.save_dir + '/' + opt.name + '_metrics.txt'
util.eval_error_metrics(0, model, dataset_eval, log_filename=metrics_log_file)
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
util.eval_error_metrics(epoch, model, dataset_eval, log_filename=metrics_log_file)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
| 59.423913 | 180 | 0.690324 | [
"MIT"
] | supri-a/TXM2SEM | train.py | 5,467 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer
@dataclass
class Wav2Vec2AsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded wav2vec args
w2v_args: Any = None
@dataclass
class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):
pass
@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig)
class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):
"""Build a new model instance."""
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return Wav2Vec2Seq2SeqModel(encoder, decoder)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig):
return Wav2VecEncoder(cfg)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(tbc=False, **kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
class Wav2VecEncoder(FairseqEncoder):
def __init__(self, cfg: Wav2Vec2AsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
model.load_state_dict(state["model"], strict=True)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg: Wav2Vec2Seq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 34.521886 | 90 | 0.631913 | [
"MIT"
] | 227514/Supervised-Simultaneous-MT | fairseq/models/wav2vec/wav2vec2_asr.py | 20,506 | Python |
"""
Django settings for webappexample project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from dotenv import load_dotenv, find_dotenv
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*dn4z%$4b6-d1+epmb=hd1m3g#$*1*%&%x+4m_8*cvakee%=7q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'auth0login'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webappexample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webappexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
# SOCIAL AUTH AUTH0 BACKEND CONFIG
SOCIAL_AUTH_TRAILING_SLASH = False
SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID')
SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile'
]
SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
AUDIENCE = None
if os.environ.get('AUTH0_AUDIENCE'):
AUDIENCE = os.environ.get('AUTH0_AUDIENCE')
else:
if SOCIAL_AUTH_AUTH0_DOMAIN:
AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo'
if AUDIENCE:
SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE}
AUTHENTICATION_BACKENDS = {
'auth0login.auth0backend.Auth0',
'django.contrib.auth.backends.ModelBackend'
}
LOGIN_URL = '/login/auth0'
LOGIN_REDIRECT_URL = '/dashboard'
| 26.096774 | 91 | 0.710507 | [
"MIT"
] | alexisluque/auth0-django-web-app | 01-Login/webappexample/settings.py | 4,045 | Python |
"""
Author: thangbk2209
Project: Autoscaling
Created: 3/15/19 16:48
Purpose:
"""
import random
import os
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import tensorflow as tf
from config import *
def draw_time_series(data, title, x_label, y_label, file_name):
plt.plot(data)
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
# plt.legend([/], loc='upper left')
plt.savefig(file_name + '.png')
plt.show()
plt.close()
def get_scaler(scaler_method):
if scaler_method == 'min_max_scaler':
return MinMaxScaler(feature_range=(0, 1))
if scaler_method == 'standard_scaler':
return StandardScaler()
else:
print(f'|-> ERROR: Not support {scaler_method}')
def get_activation(activation_name):
if activation_name == 'sigmoid':
return tf.nn.sigmoid
elif activation_name == 'relu':
return tf.nn.relu
elif activation_name == 'tanh':
return tf.nn.tanh
elif activation_name == 'elu':
return tf.nn.elu
else:
print(">>> Can not apply your activation <<<")
def get_optimizer(optimizer_name, lr):
if optimizer_name == 'momentum':
return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
elif optimizer_name == 'adam':
return tf.train.AdamOptimizer(learning_rate=lr)
elif optimizer_name == 'rmsprop':
return tf.train.RMSPropOptimizer(learning_rate=lr)
else:
print(">>> Can not apply your optimizer <<<")
def early_stopping_decision(array, patience):
value = array[len(array) - patience - 1]
arr = array[len(array) - patience:]
check = 0
for val in arr:
if(val > value):
check += 1
if(check == patience):
return False
else:
return True
def draw_train_loss(loss_train, loss_valid, save_path):
plt.plot(loss_train)
plt.plot(loss_valid)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(save_path)
plt.close()
def average(arr):
return sum(arr) / len(arr)
def create_name(**kwargs):
key = list(kwargs.keys()) # collect the first key in kwargs dict
name = []
for _key in key:
value = str(kwargs[_key]).replace('[', '')
value = value.replace(']', '')
_name = f'{_key}_{value}'
name.append(_name)
return '-'.join(name)
def generate_units_size(network_size, layer_size):
assert network_size > 0, 'Network size invalid'
assert layer_size > 0, 'Layer size invalid'
num_units = []
for i in range(network_size):
# num_units.append(random.choice(range(1, layer_size, 1)))
num_units.append(int(layer_size))
if layer_size != 2:
layer_size /= 2
return num_units
def compute_scale_fitness_value(upper_prediction, lower_prediction, real_value):
rate_real_value_in_prediction_interval = 0
num_sample = len(upper_prediction)
for i in range(num_sample):
_real_value = real_value[i][0]
lower_border = lower_prediction[i]
higher_border = upper_prediction[i]
if _real_value <= higher_border and _real_value >= lower_border:
rate_real_value_in_prediction_interval += 1 / num_sample
return rate_real_value_in_prediction_interval
def gen_folder_in_path(path):
path_component = path.split('/')
path_infor = ''
for _path_component in path_component:
path_infor += f'/{_path_component}'
if not os.path.exists(path_infor):
os.mkdir(path_infor)
assert os.path.exists(path_infor), f'Can not generate folder in path {path}'
| 26.244755 | 80 | 0.656541 | [
"MIT"
] | vutriancode/mfea_autoscaling | lib/includes/utility.py | 3,753 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
import logging
from fvcore.common.config import CfgNode as _CfgNode
from detectron2.utils.file_io import PathManager
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
@classmethod
def _open_cfg(cls, filename):
return PathManager.open(filename, "r")
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
::
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
def check_docstring(func):
if func.__module__.startswith("detectron2."):
assert (
func.__doc__ is not None and "experimental" in func.__doc__.lower()
), f"configurable {func} should be marked experimental"
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
check_docstring(init_func)
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
check_docstring(orig_func)
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
return wrapped
return wrapper
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
if len(args) and isinstance(args[0], _CfgNode):
return True
if isinstance(kwargs.pop("cfg", None), _CfgNode):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
| 35.019455 | 98 | 0.62 | [
"Apache-2.0"
] | 873900204/datasets | detectron2/config/config.py | 9,000 | Python |
#!/usr/bin/env python3
""" This script builds the ASKE deliverable reports as PDFs by combining the
markdown files, using pandoc.
Usage:
./build_report.py <report_name>
"""
import os, sys
from glob import glob
import subprocess as sp
def transform_line(line):
# Transform headers - numbered headings are not supported in Jekyll,
# and we want them for the LaTeX output.
if line.startswith("#"):
header_level = line.split()[0]
line = line.replace(header_level, header_level[:-1])
if line.split()[1][0].isdigit():
line = "# " + " ".join(line.split()[2:])
# Skip captions intended for web
if line.startswith("**Figure"):
line=""
# Transform math expression delimiters
line = line.replace("$$", "$")
# Recursively include markdown files
if "include_relative" in line:
filename = line.split()[2]
with open(filename, "r") as f:
line = "\n" + "".join([transform_line(line) for line in f])
return line
def build_report(report_name):
""" Apply appropriate transformations to markdown files
so that they can be compiled properly into a PDF report via
LaTeX. """
with open("index.md", "r") as f:
lines = [transform_line(line) for line in f]
with open("index_transformed.md", "w") as f:
f.writelines(lines)
sp.call(
[
"pandoc",
"--template",
"../pandoc_report_template.tex",
"--pdf-engine",
"lualatex",
"-V",
f"reportname={report_name}",
"-N",
"-f",
"markdown+tex_math_dollars",
"index_transformed.md",
"-o",
f"{report_name}.tex",
]
)
sp.call(["latexmk","-lualatex",f"{report_name}.tex"])
os.remove("index_transformed.md")
if __name__ == "__main__":
report_name = sys.argv[1]
if report_name.endswith("/"):
report_name = report_name[:-1]
cwd = os.getcwd()
os.chdir(report_name)
build_report(report_name)
os.rename(report_name+".pdf", "../"+report_name+".pdf")
os.chdir(cwd)
| 27.564103 | 76 | 0.583256 | [
"Apache-2.0"
] | mikiec84/automates | documentation/deliverable_reports/build_report.py | 2,150 | Python |
#
# PySNMP MIB module Juniper-MPLS-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-MPLS-CONF
# Produced by pysmi-0.3.4 at Wed May 1 14:03:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents")
AgentCapabilities, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, TimeTicks, Counter32, NotificationType, Integer32, Bits, ObjectIdentity, Gauge32, Counter64, Unsigned32, MibIdentifier, iso, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "Integer32", "Bits", "ObjectIdentity", "Gauge32", "Counter64", "Unsigned32", "MibIdentifier", "iso", "IpAddress")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
juniMplsAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51))
juniMplsAgent.setRevisions(('2004-06-11 21:36', '2003-01-24 18:34', '2002-11-04 15:47', '2001-12-05 21:41',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniMplsAgent.setRevisionsDescriptions(('Added agent capabilities definitions for MPLS-LSR-STD-MIB.', 'Replaced Unisphere names with Juniper names. Added IP TTL Propagate object to the MPLS scalar group.', 'Added RowStatus support to the minor layer and the tunnel profile groups.', 'The initial release of this management information module.',))
if mibBuilder.loadTexts: juniMplsAgent.setLastUpdated('200406231509Z')
if mibBuilder.loadTexts: juniMplsAgent.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: juniMplsAgent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: [email protected]')
if mibBuilder.loadTexts: juniMplsAgent.setDescription('The agent capabilities definitions for the MultiProtocol Label Switching (MPLS) component of the SNMP agent in the Juniper E-series family of products.')
juniMplsAgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV1 = juniMplsAgentV1.setProductRelease('Version 1 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component was supported in\n JUNOSe 4.0 system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV1 = juniMplsAgentV1.setStatus('obsolete')
if mibBuilder.loadTexts: juniMplsAgentV1.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when new RowStatus objects were added to the tables in juniMplsMinorLayerConfGroup and juniMplsTunnelProfileConfGroup.')
juniMplsAgentV2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV2 = juniMplsAgentV2.setProductRelease('Version 2 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component was supported in\n JUNOSe 4.1 and subsequent 4.x system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV2 = juniMplsAgentV2.setStatus('obsolete')
if mibBuilder.loadTexts: juniMplsAgentV2.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when the IP TTL Propagate object was added to the MPLS scalar group.')
juniMplsAgentV3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV3 = juniMplsAgentV3.setProductRelease('Version 3 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 5.0 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV3 = juniMplsAgentV3.setStatus('obsolete')
if mibBuilder.loadTexts: juniMplsAgentV3.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when some of the objects in that MIB became obsolete.')
juniMplsAgentV4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV4 = juniMplsAgentV4.setProductRelease('Version 4 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 6.0 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV4 = juniMplsAgentV4.setStatus('obsolete')
if mibBuilder.loadTexts: juniMplsAgentV4.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe. These capabilities became obsolete when the MPLS-LSR-STD-MIB support is added.')
juniMplsAgentV5 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV5 = juniMplsAgentV5.setProductRelease('Version 5 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 6.1 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV5 = juniMplsAgentV5.setStatus('obsolete')
if mibBuilder.loadTexts: juniMplsAgentV5.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe.')
juniMplsAgentV6 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 51, 6))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV6 = juniMplsAgentV6.setProductRelease('Version 6 of the MultiProtocol Label Switching (MPLS) component of the\n JUNOSe SNMP agent. This version of the MPLS component is supported in\n JUNOSe 7.1 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniMplsAgentV6 = juniMplsAgentV6.setStatus('current')
if mibBuilder.loadTexts: juniMplsAgentV6.setDescription('The MIB supported by the SNMP agent for the MPLS application in JUNOSe.')
mibBuilder.exportSymbols("Juniper-MPLS-CONF", juniMplsAgentV1=juniMplsAgentV1, juniMplsAgentV6=juniMplsAgentV6, juniMplsAgentV3=juniMplsAgentV3, juniMplsAgentV2=juniMplsAgentV2, juniMplsAgentV5=juniMplsAgentV5, PYSNMP_MODULE_ID=juniMplsAgent, juniMplsAgentV4=juniMplsAgentV4, juniMplsAgent=juniMplsAgent)
| 120.918033 | 477 | 0.759219 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/Juniper-MPLS-CONF.py | 7,376 | Python |
# -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for optimizing pytd syntax trees.
pytd files come from various sources, and are typically redundant (duplicate
functions, different signatures saying the same thing, overlong type
disjunctions). The Visitors in this file remove various forms of these
redundancies.
"""
import collections
import logging
from pytype import utils
from pytype.pytd import abc_hierarchy
from pytype.pytd import booleq
from pytype.pytd import mro
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import type_match
from pytype.pytd import visitors
import six
log = logging.getLogger(__name__)
class RenameUnknowns(visitors.Visitor):
"""Give unknowns that map to the same set of concrete types the same name."""
def __init__(self, mapping):
super(RenameUnknowns, self).__init__()
self.name_to_cls = {name: hash(cls) for name, cls in mapping.items()}
self.cls_to_canonical_name = {
cls: name for name, cls in self.name_to_cls.items()}
def VisitClassType(self, node):
if node.name.startswith("~unknown"):
return pytd.ClassType(
self.cls_to_canonical_name[self.name_to_cls[node.name]], None)
else:
return node
class RemoveDuplicates(visitors.Visitor):
"""Remove duplicate function signatures.
For example, this transforms
def f(x: int) -> float
def f(x: int) -> float
to
def f(x: int) -> float
In order to be removed, a signature has to be exactly identical to an
existing one.
"""
def VisitFunction(self, node):
# We remove duplicates, but keep existing entries in the same order.
return node.Replace(
signatures=tuple(pytd_utils.OrderedSet(node.signatures)))
class RemoveRedundantSignatures(visitors.Visitor):
"""Remove duplicate function signatures.
For example, this transforms
def f(x: int) -> float
def f(x: int or float) -> float
to
def f(x: int or float) -> float
In order to be removed, a signature has to be "contained" (a subclass of)
an existing one.
"""
def __init__(self, hierarchy):
super(RemoveRedundantSignatures, self).__init__()
self.match = type_match.TypeMatch(hierarchy.GetSuperClasses(),
any_also_is_bottom=False)
self.subst = {}
def EnterClass(self, cls):
# Preserve the identify of each type parameter, and don't
# allow them to match against anything by themselves.
self.subst = {p.type_param: pytd.NamedType("$" + p.name)
for p in cls.template}
def LeaveClass(self, _):
self.subst = {}
def VisitFunction(self, node):
new_signatures = []
matches = set()
# We keep track of which signature matched which other signatures, purely
# for optimization - that way we don't have to query the reverse direction.
for i, s1 in enumerate(node.signatures):
for j, s2 in enumerate(node.signatures):
if i != j and (j, i) not in matches:
if s1.exceptions or s2.exceptions:
# We don't support matching of exceptions.
continue
if s1.template:
# type_match doesn't support polymorphic functions on the
# left side yet.
continue
if self.match.match(s1, s2, self.subst) == booleq.TRUE:
matches.add((i, j))
break
else:
new_signatures.append(s1)
return node.Replace(signatures=tuple(new_signatures))
class SimplifyUnions(visitors.Visitor):
"""Remove duplicate or redundant entries in union types.
For example, this transforms
a: int or int
b: int or ?
c: int or (int or float)
to
a: int
b: ?
c: int or float
"""
def VisitUnionType(self, union):
return pytd_utils.JoinTypes(union.type_list)
class _ReturnsAndExceptions(object):
"""Mutable class for collecting return types and exceptions of functions.
The collecting is stable: Items are kept in the order in which they were
encountered.
Attributes:
return_types: Return types seen so far.
exceptions: Exceptions seen so far.
"""
def __init__(self):
self.return_types = []
self.exceptions = []
def Update(self, signature):
"""Add the return types / exceptions of a signature to this instance."""
if signature.return_type not in self.return_types:
self.return_types.append(signature.return_type)
self.exceptions.extend(exception
for exception in signature.exceptions
if exception not in self.exceptions)
class CombineReturnsAndExceptions(visitors.Visitor):
"""Group function signatures that only differ in exceptions or return values.
For example, this transforms
def f(x: int) -> float:
raise OverflowError()
def f(x: int) -> int:
raise IndexError()
to
def f(x: int) -> float or int:
raise IndexError()
raise OverflowError()
"""
def _GroupByArguments(self, signatures):
"""Groups signatures by arguments.
Arguments:
signatures: A list of function signatures (Signature instances).
Returns:
A dictionary mapping signatures (without return and exceptions) to
a tuple of return values and exceptions.
"""
groups = collections.OrderedDict() # Signature -> ReturnsAndExceptions
for sig in signatures:
stripped_signature = sig.Replace(return_type=None, exceptions=None)
ret = groups.get(stripped_signature)
if not ret:
ret = _ReturnsAndExceptions()
groups[stripped_signature] = ret
ret.Update(sig)
return groups
def VisitFunction(self, f):
"""Merge signatures of a function.
This groups signatures by arguments and then for each group creates a
single signature that joins the return values / exceptions using "or".
Arguments:
f: A pytd.Function instance
Returns:
Function with simplified / combined signatures.
"""
groups = self._GroupByArguments(f.signatures)
new_signatures = []
for stripped_signature, ret_exc in groups.items():
ret = pytd_utils.JoinTypes(ret_exc.return_types)
exc = tuple(ret_exc.exceptions)
new_signatures.append(
stripped_signature.Replace(return_type=ret, exceptions=exc)
)
return f.Replace(signatures=tuple(new_signatures))
class CombineContainers(visitors.Visitor):
"""Change unions of containers to containers of unions.
For example, this transforms
list[int] or list[float]
to
list[int or float]
.
"""
_CONTAINER_NAMES = {
pytd.TupleType: ("__builtin__.tuple", "typing.Tuple"),
pytd.CallableType: ("typing.Callable",),
}
def _key(self, t):
if isinstance(t, (pytd.CallableType, pytd.TupleType)):
return (t.base_type, len(t.parameters))
else:
return t.base_type
def _should_merge(self, pytd_type, union):
"""Determine whether pytd_type values in the union should be merged.
If the union contains the homogeneous flavor of pytd_type (e.g.,
GenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type
values of different lengths, we want to turn all of the pytd_type values
into homogeneous ones so that they can be merged into a single container.
Args:
pytd_type: The pytd type, either TupleType or CallableType.
union: a pytd.UnionType
Returns:
True if the pytd_type values should be merged, False otherwise.
"""
names = self._CONTAINER_NAMES[pytd_type]
length = None
for t in union.type_list:
if isinstance(t, pytd_type):
if length is None:
length = len(t.parameters)
elif length != len(t.parameters):
return True
elif (isinstance(t, pytd.GenericType) and
t.base_type.name in names):
return True
return False
def VisitUnionType(self, union):
"""Push unions down into containers.
This collects similar container types in unions and merges them into
single instances with the union type pushed down to the element_type level.
Arguments:
union: A pytd.Union instance. Might appear in a parameter, a return type,
a constant type, etc.
Returns:
A simplified pytd.Union.
"""
if not any(isinstance(t, pytd.GenericType) for t in union.type_list):
# Optimization: If we're not going to change anything, return original.
return union
union = pytd_utils.JoinTypes(union.type_list) # flatten
if not isinstance(union, pytd.UnionType):
union = pytd.UnionType((union,))
merge_tuples = self._should_merge(pytd.TupleType, union)
merge_callables = self._should_merge(pytd.CallableType, union)
if merge_tuples or merge_callables:
type_list = []
for t in union.type_list:
if merge_tuples and isinstance(t, pytd.TupleType):
t = pytd.GenericType(base_type=t.base_type,
parameters=(pytd.UnionType(t.parameters),))
elif merge_callables and isinstance(t, pytd.CallableType):
t = pytd.GenericType(base_type=t.base_type,
parameters=(pytd.AnythingType(), t.ret))
type_list.append(t)
union = union.Replace(type_list=tuple(type_list))
collect = {}
has_redundant_base_types = False
for t in union.type_list:
if isinstance(t, pytd.GenericType):
key = self._key(t)
if key in collect:
has_redundant_base_types = True
collect[key] = tuple(
pytd_utils.JoinTypes([p1, p2])
for p1, p2 in zip(collect[key], t.parameters))
else:
collect[key] = t.parameters
if not has_redundant_base_types:
return union
result = pytd.NothingType()
done = set()
for t in union.type_list:
if isinstance(t, pytd.GenericType):
key = self._key(t)
if key in done:
continue # already added
parameters = collect[key]
add = t.Replace(parameters=tuple(p.Visit(CombineContainers())
for p in parameters))
done.add(key)
else:
add = t
result = pytd_utils.JoinTypes([result, add])
return result
class Factorize(visitors.Visitor):
"""Opposite of ExpandSignatures. Factorizes cartesian products of functions.
For example, this transforms
def f(x: int, y: int)
def f(x: int, y: float)
def f(x: float, y: int)
def f(x: float, y: float)
to
def f(x: int or float, y: int or float)
"""
def _GroupByOmittedArg(self, signatures, i):
"""Group functions that are identical if you ignore one of the arguments.
Arguments:
signatures: A list of function signatures
i: The index of the argument to ignore during comparison.
Returns:
A list of tuples (signature, types). "signature" is a signature with
argument i omitted, "types" is the list of types that argument was
found to have. signatures that don't have argument i are represented
as (original, None).
"""
groups = collections.OrderedDict()
for sig in signatures:
if i >= len(sig.params):
# We can't omit argument i, because this signature has too few
# arguments. Represent this signature as (original, None).
groups[sig] = None
continue
if sig.params[i].mutated_type is not None:
# We can't group mutable parameters. Leave this signature alone.
groups[sig] = None
continue
# Set type of parameter i to None
params = list(sig.params)
param_i = params[i]
params[i] = param_i.Replace(type=None)
stripped_signature = sig.Replace(params=tuple(params))
existing = groups.get(stripped_signature)
if existing:
existing.append(param_i.type)
else:
groups[stripped_signature] = [param_i.type]
return groups.items()
def VisitFunction(self, f):
"""Shrink a function, by factorizing cartesian products of arguments.
Greedily groups signatures, looking at the arguments from left to right.
This algorithm is *not* optimal. But it does the right thing for the
typical cases.
Arguments:
f: An instance of pytd.Function. If this function has more
than one signature, we will try to combine some of these signatures by
introducing union types.
Returns:
A new, potentially optimized, instance of pytd.Function.
"""
max_argument_count = max(len(s.params) for s in f.signatures)
signatures = f.signatures
for i in six.moves.xrange(max_argument_count):
new_sigs = []
for sig, types in self._GroupByOmittedArg(signatures, i):
if types:
# One or more options for argument <i>:
new_params = list(sig.params)
new_params[i] = sig.params[i].Replace(
type=pytd_utils.JoinTypes(types))
sig = sig.Replace(params=tuple(new_params))
new_sigs.append(sig)
else:
# Signature doesn't have argument <i>, so we store the original:
new_sigs.append(sig)
signatures = new_sigs
return f.Replace(signatures=tuple(signatures))
class ApplyOptionalArguments(visitors.Visitor):
"""Removes functions that are instances of a more specific case.
For example, this reduces
def f(x: int, ...) # [1]
def f(x: int, y: int) # [2]
to just
def f(x: int, ...)
Because "..." makes it possible to pass any additional arguments to [1],
it encompasses both declarations, hence we can omit [2].
"""
def _HasShorterVersion(self, sig, optional_arg_sigs):
"""Find a shorter signature with optional arguments for a longer signature.
Arguments:
sig: The function signature we'd like to shorten
optional_arg_sigs: A set of function signatures with optional arguments
that will be matched against sig.
Returns:
True if there is a shorter signature that generalizes sig, but is not
identical to sig.
"""
param_count = len(sig.params)
if not sig.has_optional:
param_count += 1 # also consider f(x, y, ...) for f(x, y)
for i in six.moves.xrange(param_count):
if sig.params[0:i] in optional_arg_sigs:
return True
return False
def VisitFunction(self, f):
"""Remove all signatures that have a shorter version.
We use signatures with optional argument (has_opt=True) as template
and then match all signatures against those templates, removing those
that match.
Arguments:
f: An instance of pytd.Function
Returns:
A potentially simplified instance of pytd.Function.
"""
# Set of signatures that can replace longer ones. Only used for matching,
# hence we can use an unordered data structure.
optional_arg_sigs = frozenset(s.params
for s in f.signatures
if s.has_optional)
new_signatures = (s for s in f.signatures
if not self._HasShorterVersion(s, optional_arg_sigs))
return f.Replace(signatures=tuple(new_signatures))
class SuperClassHierarchy(object):
"""Utility class for optimizations working with superclasses."""
def __init__(self, superclasses):
self._superclasses = superclasses
self._subclasses = utils.invert_dict(self._superclasses)
def GetSuperClasses(self):
return self._superclasses
def _CollectSuperclasses(self, type_name, collect):
"""Recursively collect super classes for a type.
Arguments:
type_name: A string, the type's name.
collect: A set() of strings, modified to contain all superclasses.
"""
collect.add(type_name)
superclasses = [name
for name in self._superclasses.get(type_name, [])]
# The superclasses might have superclasses of their own, so recurse.
for superclass in superclasses:
self._CollectSuperclasses(superclass, collect)
def ExpandSuperClasses(self, t):
"""Generate a list of all (known) superclasses for a type.
Arguments:
t: A type name. E.g. "int".
Returns:
A set of types. This set includes t as well as all its superclasses. For
example, this will return "bool", "int" and "object" for "bool".
"""
superclasses = set()
self._CollectSuperclasses(t, superclasses)
return superclasses
def ExpandSubClasses(self, t):
"""Generate a set of all (known) subclasses for a type.
Arguments:
t: A type. E.g. NamedType("int").
Returns:
A set of types. This set includes t as well as all its subclasses. For
example, this will return "int" and "bool" for "int".
"""
queue = [t]
seen = set()
while queue:
item = queue.pop()
if item not in seen:
seen.add(item)
queue.extend(self._subclasses[item])
return seen
def HasSubClassInSet(self, cls, known):
"""Queries whether a subclass of a type is present in a given set."""
return any(sub in known
for sub in self._subclasses[cls])
def HasSuperClassInSet(self, cls, known):
"""Queries whether a superclass of a type is present in a given set."""
return any(sub in known
for sub in self._superclasses[cls])
class SimplifyUnionsWithSuperclasses(visitors.Visitor):
"""Simplify Unions with superclasses.
E.g., this changes
int or bool
to
int
since bool is a subclass of int.
(Interpreting types as "sets of values", this simplification is sound since
A union B = A, if B is a subset of A.)
"""
def __init__(self, hierarchy):
super(SimplifyUnionsWithSuperclasses, self).__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
c = collections.Counter()
for t in set(union.type_list):
# TODO(rechen): How can we make this work with GenericType?
if isinstance(t, pytd.GENERIC_BASE_TYPE):
c += collections.Counter(self.hierarchy.ExpandSubClasses(str(t)))
# Below, c[str[t]] can be zero - that's the default for non-existent items
# in collections.Counter. It'll happen for types that are not
# instances of GENERIC_BASE_TYPE, like container types.
new_type_list = [t for t in union.type_list if c[str(t)] <= 1]
return pytd_utils.JoinTypes(new_type_list)
class FindCommonSuperClasses(visitors.Visitor):
"""Find common super classes. Optionally also uses abstract base classes.
E.g., this changes
def f(x: list or tuple, y: frozenset or set) -> int or float
to
def f(x: Sequence, y: Set) -> Real
"""
def __init__(self, hierarchy):
super(FindCommonSuperClasses, self).__init__()
self.hierarchy = hierarchy
def VisitUnionType(self, union):
"""Given a union type, try to find a simplification by using superclasses.
This is a lossy optimization that tries to map a list of types to a common
base type. For example, int and bool are both base classes of int, so it
would convert "int or bool" to "int".
Arguments:
union: A union type.
Returns:
A simplified type, if available.
"""
intersection = self.hierarchy.ExpandSuperClasses(str(union.type_list[0]))
for t in union.type_list[1:]:
intersection.intersection_update(
self.hierarchy.ExpandSuperClasses(str(t)))
# Remove "redundant" superclasses, by removing everything from the tree
# that's not a leaf. I.e., we don't need "object" if we have more
# specialized types.
new_type_list = tuple(
pytd.NamedType(cls) for cls in intersection
if not self.hierarchy.HasSubClassInSet(cls, intersection))
if not new_type_list:
return union # if types don't intersect, leave them alone
return pytd_utils.JoinTypes(new_type_list)
class CollapseLongUnions(visitors.Visitor):
"""Shortens long unions to object (or "?").
Poor man's version of FindCommonSuperClasses. Shorten types like
"str or unicode or int or float or list" to just "object" or "?".
Additionally, if the union already contains at least one "object", we also
potentially replace the entire union with just "object".
Attributes:
max_length: The maximum number of types to allow in a union. If there are
more types than this, it is shortened.
"""
def __init__(self, max_length=7):
assert isinstance(max_length, six.integer_types)
super(CollapseLongUnions, self).__init__()
self.generic_type = pytd.AnythingType()
self.max_length = max_length
def VisitUnionType(self, union):
if len(union.type_list) > self.max_length:
return self.generic_type
elif self.generic_type in union.type_list:
return self.generic_type
else:
return union
class AdjustGenericType(visitors.Visitor):
"""Changes the generic type from "object" to "Any"."""
def __init__(self):
super(AdjustGenericType, self).__init__()
self.old_generic_type = pytd.ClassType("__builtin__.object")
self.new_generic_type = pytd.AnythingType()
def VisitClassType(self, t):
if t == self.old_generic_type:
return self.new_generic_type
else:
return t
class AdjustReturnAndConstantGenericType(visitors.Visitor):
"""Changes "object" to "Any" in return and constant types."""
def VisitSignature(self, sig):
return sig.Replace(return_type=sig.return_type.Visit(AdjustGenericType()))
def VisitConstant(self, c):
return c.Replace(type=c.type.Visit(AdjustGenericType()))
class AddInheritedMethods(visitors.Visitor):
"""Copy methods and constants from base classes into their derived classes.
E.g. this changes
class Bar:
[methods and constants of Bar]
class Foo(Bar):
[methods and constants of Foo]
to
class Bar:
[methods and constants of Bar]
class Foo(Bar):
[methods and constants of Bar]
[methods and constants of Foo]
.
This is not an optimization by itself, but it can help with other
optimizations (like signature merging), and is also useful as preprocessor
for type matching.
"""
def VisitLateType(self, _):
raise NotImplementedError("Can't use AddInheritedMethods with LateType.")
def VisitClass(self, cls):
"""Add superclass methods and constants to this Class."""
if any(base for base in cls.parents if isinstance(base, pytd.NamedType)):
raise AssertionError("AddInheritedMethods needs a resolved AST")
# Filter out only the types we can reason about.
# TODO(kramm): Do we want handle UnionTypes and GenericTypes at some point?
bases = [base.cls
for base in cls.parents
if isinstance(base, pytd.ClassType)]
# Don't pull in methods that are named the same as existing methods in
# this class, local methods override parent class methods.
names = {m.name for m in cls.methods} | {c.name for c in cls.constants}
# TODO(kramm): This should do full-blown MRO.
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
new_methods = list(cls.methods)
for base in bases:
for m in base.methods:
if m.name not in names:
new_methods.append(m.Visit(adjust_self))
new_constants = list(cls.constants)
for base in bases:
for c in base.constants:
if c.name not in names:
new_constants.append(c)
return cls.Replace(methods=tuple(new_methods),
constants=tuple(new_constants))
class RemoveInheritedMethods(visitors.Visitor):
"""Removes methods from classes if they also exist in their superclass.
E.g. this changes
class A:
def f(self, y: int) -> bool
class B(A):
def f(self, y: int) -> bool
to
class A:
def f(self, y: int) -> bool
class B(A):
pass
.
"""
def __init__(self):
super(RemoveInheritedMethods, self).__init__()
self.class_to_stripped_signatures = {}
def _StrippedSignatures(self, t):
"""Given a class, list method name + signature without "self".
Args:
t: A pytd.TYPE.
Returns:
A set of name + signature tuples, with the self parameter of the
signature removed.
"""
if not isinstance(t, pytd.ClassType):
# For union types, generic types etc., inheritance is more complicated.
# Be conservative and default to not removing methods inherited from
# those.
return {}
stripped_signatures = {}
for method in t.cls.methods:
for sig in method.signatures:
if (sig.params and
sig.params[0].name == "self" and
isinstance(sig.params[0].type, pytd.ClassType)):
stripped_signatures[method.name] = (
sig.Replace(params=sig.params[1:]), method.is_abstract)
return stripped_signatures
def _FindNameAndSig(self, classes, name, sig):
"""Find a tuple(name, signature) in all methods of a type/class."""
if classes:
t = classes[0]
classes = classes[1:]
if t not in self.class_to_stripped_signatures:
self.class_to_stripped_signatures[t] = self._StrippedSignatures(t)
if name in self.class_to_stripped_signatures[t]:
return sig == self.class_to_stripped_signatures[t][name]
return self._FindNameAndSig(classes, name, sig)
return False
def _MaybeRemoveSignature(self, name, sig, is_abstract):
"""Visit a Signature and return None if we can remove it."""
if (not sig.params or
sig.params[0].name != "self" or
not isinstance(sig.params[0].type, pytd.ClassType)):
return sig # Not a method
cls = sig.params[0].type.cls
if cls is None:
# TODO(kramm): Remove once pytype stops generating ClassType(name, None).
return sig
try:
if self._FindNameAndSig(
mro.GetBasesInMRO(cls), name,
(sig.Replace(params=sig.params[1:]), is_abstract)):
return None # remove (see VisitFunction)
except mro.MROError:
return sig
return sig
def _MaybeDeleteFunction(self, f):
"""Visit a Function and return None if we can remove it."""
signatures = tuple(self._MaybeRemoveSignature(f.name, sig, f.is_abstract)
for sig in f.signatures)
if any(signatures):
if signatures.count(None):
return f.Replace(
signatures=tuple(s for s in signatures if s is not None))
else:
return f # unchanged
else:
return None # delete function
def VisitClass(self, cls):
methods = tuple(self._MaybeDeleteFunction(m) for m in cls.methods)
if methods.count(None):
return cls.Replace(methods=tuple(m for m in methods if m is not None))
else:
return cls # unchanged
class PullInMethodClasses(visitors.Visitor):
"""Simplifies classes with only a __call__ function to just a method.
This transforms
class Foo:
m: Bar
class Bar:
def __call__(self: Foo, ...)
to
class Foo:
def m(self, ...)
.
"""
def __init__(self):
super(PullInMethodClasses, self).__init__()
self._module = None
self._total_count = collections.defaultdict(int)
self._processed_count = collections.defaultdict(int)
def _MaybeLookup(self, t):
if isinstance(t, pytd.NamedType):
try:
return self._module.Lookup(t.name)
except KeyError:
return None
elif isinstance(t, pytd.ClassType):
return t.cls
else:
return None
def _HasSelf(self, sig):
"""True if a signature has a self parameter.
This only checks for the name, since the type can be too many different
things (type of the method, type of the parent class, object, unknown etc.)
and doesn't carry over to the simplified version, anyway.
Arguments:
sig: Function signature (instance of pytd.Signature)
Returns:
True if the signature has "self".
"""
return sig.params and sig.params[0].name == "self"
def _LookupIfSimpleCall(self, t):
"""Looks up the type if it has only one method, "__call__"."""
if not isinstance(t, (pytd.NamedType, pytd.ClassType)):
# We only do this for simple types.
return None
cls = self._MaybeLookup(t)
if not isinstance(cls, pytd.Class):
# This is not a class or it doesn't exist, so assume it's not a method.
return None
if [f.name for f in cls.methods] != ["__call__"]:
return None
method, = cls.methods
return cls if all(self._HasSelf(sig) for sig in method.signatures) else None
def _CanDelete(self, cls):
"""Checks whether this class can be deleted.
Returns whether all occurences of this class as a type were due to
constants we removed.
Arguments:
cls: A pytd.Class.
Returns:
True if we can delete this class.
"""
if not self._processed_count[cls.name]:
# Leave standalone classes alone. E.g. the pytd files in
# pytd/builtins/ defines classes not used by anything else.
return False
return self._processed_count[cls.name] == self._total_count[cls.name]
def EnterTypeDeclUnit(self, module):
# Since modules are hierarchical, we enter TypeDeclUnits multiple times-
# but we only want to record the top-level one.
if not self._module:
self._module = module
def VisitTypeDeclUnit(self, unit):
return unit.Replace(classes=tuple(c for c in unit.classes
if not self._CanDelete(c)))
def VisitClassType(self, t):
self._total_count[t.name] += 1
return t
def VisitNamedType(self, t):
self._total_count[t.name] += 1
return t
def VisitClass(self, cls):
"""Visit a class, and change constants to methods where possible."""
new_constants = []
new_methods = list(cls.methods)
adjust_self = visitors.AdjustSelf(force=True)
adjust_self.class_types.append(visitors.ClassAsType(cls))
for const in cls.constants:
c = self._LookupIfSimpleCall(const.type)
if c:
signatures = c.methods[0].signatures
self._processed_count[c.name] += 1
new_method = pytd.Function(const.name, signatures, c.methods[0].kind)
new_methods.append(new_method.Visit(adjust_self))
else:
new_constants.append(const) # keep
return cls.Replace(constants=tuple(new_constants),
methods=tuple(new_methods))
class AbsorbMutableParameters(visitors.Visitor):
"""Converts mutable parameters to unions. This is lossy.
For example, this will change
def f(x: list[int]):
x = list[int or float]
to
def f(x: list[int] or list[int or float])
.
(Use optimize.CombineContainers to then change x to list[int or float].)
This also works for methods - it will then potentially change the type of
"self". The resulting AST is temporary and needs careful handling.
"""
def VisitParameter(self, p):
if p.mutated_type is None:
return p
else:
return p.Replace(type=pytd_utils.JoinTypes([p.type, p.mutated_type]),
mutated_type=None)
class SimplifyContainers(visitors.Visitor):
"""Simplifies containers whose type parameters are all Any.
For example, this will change
def f() -> List[any]
to
def f() -> list
Note that we don't simplify TupleType or CallableType, since they have
variable-length parameters, and the parameter length is meaningful even when
the parameters are all Any.
"""
def _Simplify(self, t):
if all(isinstance(p, pytd.AnythingType) for p in t.parameters):
return t.base_type
else:
return t
def VisitGenericType(self, t):
return self._Simplify(t)
class TypeParameterScope(visitors.Visitor):
"""Common superclass for optimizations that track type parameters."""
def __init__(self):
super(TypeParameterScope, self).__init__()
self.type_params_stack = [{}]
def EnterClass(self, cls):
new = self.type_params_stack[-1].copy()
new.update({t.type_param: cls for t in cls.template})
self.type_params_stack.append(new)
def EnterSignature(self, sig):
new = self.type_params_stack[-1].copy()
new.update({t.type_param: sig for t in sig.template})
self.type_params_stack.append(new)
def IsClassTypeParameter(self, type_param):
class_or_sig = self.type_params_stack[-1].get(type_param)
return isinstance(class_or_sig, pytd.Class)
def IsFunctionTypeParameter(self, type_param):
class_or_sig = self.type_params_stack[-1].get(type_param)
return isinstance(class_or_sig, pytd.Signature)
def LeaveClass(self, _):
self.type_params_stack.pop()
def LeaveSignature(self, _):
self.type_params_stack.pop()
class MergeTypeParameters(TypeParameterScope):
"""Remove all function type parameters in a union with a class type param.
For example, this will change
class A(typing.Generic(T)):
def append(self, T or T2) -> T2
to
class A(typing.Generic(T)):
def append(self, T) -> T
.
Use this visitor after using AbsorbMutableParameters.
As another example, the combination of AbsorbMutableParameters and
MergeTypeParameters transforms
class list(typing.Generic(T)):
def append(self, v: T2) -> NoneType:
self = T or T2
to
class list(typing.Generic(T')):
def append(self, V:T') -> NoneType
by creating a *new* template variable T' that propagates the
mutations to the outermost level (in this example, T' = T or T2)
"""
def __init__(self):
super(MergeTypeParameters, self).__init__()
self.type_param_union = None
def _AppendNew(self, l1, l2):
"""Appends all items to l1 that are not in l2."""
# l1 and l2 are small (2-3 elements), so just use two loops.
for e2 in l2:
if not any(e1 is e2 for e1 in l1):
l1.append(e2)
def EnterSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super(MergeTypeParameters, self).EnterSignature(node)
assert self.type_param_union is None
self.type_param_union = collections.defaultdict(list)
def LeaveSignature(self, node):
# Necessary because TypeParameterScope also defines this function
super(MergeTypeParameters, self).LeaveSignature(node)
self.type_param_union = None
def VisitUnionType(self, u):
type_params = [t for t in u.type_list if isinstance(t, pytd.TypeParameter)]
for t in type_params:
if self.IsFunctionTypeParameter(t):
self._AppendNew(self.type_param_union[t.name], type_params)
return u
def _AllContaining(self, type_param, seen=None):
"""Gets all type parameters that are in a union with the passed one."""
seen = seen or set()
result = [type_param]
for other in self.type_param_union[type_param.name]:
if other in seen:
continue # break cycles
seen.add(other)
self._AppendNew(result, self._AllContaining(other, seen) or [other])
return result
def _ReplaceByOuterIfNecessary(self, item, substitutions):
"""Potentially replace a function type param with a class type param.
Args:
item: A pytd.TemplateItem
substitutions: A dictionary to update with what we replaced.
Returns:
Either [item] or [].
"""
containing_union = self._AllContaining(item.type_param)
if not containing_union:
return [item]
class_type_parameters = [type_param
for type_param in containing_union
if self.IsClassTypeParameter(type_param)]
if class_type_parameters:
substitutions[item.type_param] = pytd_utils.JoinTypes(
class_type_parameters)
return []
else:
# It's a function type parameter that appears in a union with other
# function type parameters.
# TODO(kramm): We could merge those, too.
return [item]
def VisitSignature(self, sig):
new_template = []
substitutions = {k: k for k in self.type_params_stack[-1]}
for item in sig.template:
new_template += self._ReplaceByOuterIfNecessary(item, substitutions)
if sig.template == new_template:
return sig # Nothing changed.
else:
return sig.Replace(template=tuple(new_template)).Visit(
visitors.ReplaceTypeParameters(substitutions)).Visit(SimplifyUnions())
def Optimize(node,
builtins=None,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False,
can_do_lookup=True):
"""Optimize a PYTD tree.
Tries to shrink a PYTD tree by applying various optimizations.
Arguments:
node: A pytd node to be optimized. It won't be modified - this function
will return a new node.
builtins: Definitions of all of the external types in node.
lossy: Allow optimizations that change the meaning of the pytd.
use_abcs: Use abstract base classes to represent unions like
e.g. "float or int" as "Real".
max_union: How many types we allow in a union before we simplify
it to just "object".
remove_mutable: Whether to simplify mutable parameters to normal
parameters.
can_do_lookup: True: We're either allowed to try to resolve NamedType
instances in the AST, or the AST is already resolved. False: Skip any
optimizations that would require NamedTypes to be resolved.
Returns:
An optimized node.
"""
node = node.Visit(RemoveDuplicates())
node = node.Visit(SimplifyUnions())
node = node.Visit(CombineReturnsAndExceptions())
node = node.Visit(Factorize())
node = node.Visit(ApplyOptionalArguments())
node = node.Visit(CombineContainers())
node = node.Visit(SimplifyContainers())
if builtins:
superclasses = builtins.Visit(visitors.ExtractSuperClassesByName())
superclasses.update(node.Visit(visitors.ExtractSuperClassesByName()))
if use_abcs:
superclasses.update(abc_hierarchy.GetSuperClasses())
hierarchy = SuperClassHierarchy(superclasses)
node = node.Visit(SimplifyUnionsWithSuperclasses(hierarchy))
if lossy:
node = node.Visit(FindCommonSuperClasses(hierarchy))
if max_union:
node = node.Visit(CollapseLongUnions(max_union))
node = node.Visit(AdjustReturnAndConstantGenericType())
if remove_mutable:
node = node.Visit(AbsorbMutableParameters())
node = node.Visit(CombineContainers())
node = node.Visit(MergeTypeParameters())
node = node.Visit(visitors.AdjustSelf())
node = node.Visit(SimplifyContainers())
if builtins and can_do_lookup:
node = visitors.LookupClasses(node, builtins, ignore_late_types=True)
node = node.Visit(RemoveInheritedMethods())
node = node.Visit(RemoveRedundantSignatures(hierarchy))
return node
| 33.007563 | 80 | 0.675705 | [
"Apache-2.0"
] | Jason-Cooke/pytype | pytype/pytd/optimize.py | 39,279 | Python |
"""
--- Ångström ---
Read, manipulate and analyze molecular trajectory files.
"""
from .read import read_xyz_traj
from .write import write_xyz_traj
from angstrom.geometry import get_molecule_center
from angstrom import Molecule
import numpy as np
import os
class Trajectory:
"""
Reading and analyzing trajectories in xyz format.
"""
def __init__(self, atoms=None, coordinates=None, read=None, molecule=None):
"""
Create a trajectory object.
Parameters
----------
atoms : list or None
List of elements of the molecule for each frame.
coordinates : list or None
List of atomic positions of the molecule for each frame.
read : str or None
File name to read molecule file (formats: xyz).
molecule : Molecule
Create a Trajectory with 1 frame from a Molecule object.
"""
self.name = 'Trajectory'
if atoms is not None and coordinates is not None:
self.atoms = atoms
self.coordinates = coordinates
elif read is not None:
self.read(read)
elif molecule is not None:
self.atoms = np.array([molecule.atoms])
self.coordinates = np.array([molecule.coordinates])
self.name = molecule.name
else:
self.atoms = []
self.coordinates = []
self.current_frame = 0
def __repr__(self):
"""
Returns basic trajectory info.
"""
return "<Trajectory frames: %i | atoms: %i | dimensions: %i>" % tuple(np.shape(self.coordinates))
def __len__(self):
"""
Returns number of frames.
"""
return len(self.atoms)
def __add__(self, traj):
"""
Trajectory addition for joining the coordinates and elements into a new Trajectory object.
Parameters
----------
traj : Trajectory
Trajectory object to be added
Returns
-------
Trajectory
Joined Trajectory object.
"""
new_traj = Trajectory(atoms=np.append(self.atoms, traj.atoms, axis=0),
coordinates=np.append(self.coordinates, traj.coordinates, axis=0))
return new_traj
def __getitem__(self, i):
"""
Indexing method.
Returns a Molecule object for given index (frame).
Returns a Trajectory object if used as slicing.
"""
if isinstance(i, slice):
indices = range(len(self))[i.start:i.stop:i.step]
if len(indices) == 0:
return []
else:
new_traj = Trajectory(molecule=self[indices[0]])
for j in indices[1:]:
new_traj.append(self[j])
return new_traj
else:
return Molecule(atoms=self.atoms[i], coordinates=self.coordinates[i])
def __iter__(self):
"""
Initialize iterator, reset frame index.
"""
self.current_frame = 0
return self
def __next__(self):
"""
Returns the next frame in Trajectory as a Molecule object.
"""
if self.current_frame >= len(self):
raise StopIteration
next_mol = self[self.current_frame]
self.current_frame += 1
return next_mol
def append(self, mol):
"""
Append molecule to trajectory.
The number of atoms in the molecule must match that of the trajectory.
Parameters
----------
mol : Molecule
Molecule object to be added
Returns
-------
None
Added to Trajectory object.
"""
if len(mol.atoms) != self.atoms.shape[1]:
raise Exception('Trajectory cannot have different number of atoms per frame')
self.atoms = np.append(self.atoms, [mol.atoms], axis=0)
self.coordinates = np.append(self.coordinates, [mol.coordinates], axis=0)
def read(self, filename):
"""
Read xyz formatted trajectory file.
Parameters
----------
filename : str
Trajectory file name.
Returns
-------
None
Assigns 'coordinates', 'atoms', and 'headers' attributes.
"""
self.name = os.path.splitext(os.path.basename(filename))[0]
traj = read_xyz_traj(filename)
self.atoms, self.coordinates, self.headers = traj['atoms'], traj['coordinates'], traj['headers']
def write(self, filename):
"""
Write xyz formatted trajectory file.
Parameters
----------
filename : str
Trajectory file name (formats: xyz).
Returns
-------
None
Writes molecule information to given file name.
"""
with open(filename, 'w') as traj_file:
if hasattr(self, 'headers'):
write_xyz_traj(traj_file, self.atoms, self.coordinates, headers=self.headers)
else:
write_xyz_traj(traj_file, self.atoms, self.coordinates)
def get_center(self, mass=True):
"""
Get coordinates of molecule center at each frame.
Parameters
----------
mass : bool
Calculate center of mass (True) or geometric center (False).
Returns
-------
ndarray
Molecule center coordinates for each frame.
"""
centers = np.empty((len(self.atoms), 3))
for f, (frame_atoms, frame_coors) in enumerate(zip(self.atoms, self.coordinates)):
centers[f] = get_molecule_center(frame_atoms, frame_coors, mass=mass)
return centers
| 28.542289 | 105 | 0.56092 | [
"BSD-3-Clause"
] | kbsezginel/angstrom | angstrom/trajectory/trajectory.py | 5,739 | Python |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import sys
import numpy
import awkward as ak
def convert_to_array(layout, args, kwargs):
out = ak.operations.convert.to_numpy(layout, allow_missing=False)
if args == () and kwargs == {}:
return out
else:
return numpy.array(out, *args, **kwargs)
implemented = {}
def array_function(func, types, args, kwargs):
function = implemented.get(func)
if function is None:
return NotImplemented
else:
return function(*args, **kwargs)
def implements(numpy_function):
def decorator(function):
implemented[getattr(numpy, numpy_function)] = function
return function
return decorator
def array_ufunc(ufunc, method, inputs, kwargs):
if method != "__call__" or len(inputs) == 0 or "out" in kwargs:
return NotImplemented
behavior = ak._util.behaviorof(*inputs)
nextinputs = []
for x in inputs:
cast_fcn = ak._util.custom_cast(x, behavior)
if cast_fcn is not None:
x = cast_fcn(x)
nextinputs.append(
ak.operations.convert.to_layout(x, allow_record=True, allow_other=True)
)
inputs = nextinputs
def adjust(custom, inputs, kwargs):
args = [
ak._util.wrap(x, behavior)
if isinstance(x, (ak.layout.Content, ak.layout.Record))
else x
for x in inputs
]
out = custom(*args, **kwargs)
if not isinstance(out, tuple):
out = (out,)
return tuple(
x.layout if isinstance(x, (ak.highlevel.Array, ak.highlevel.Record)) else x
for x in out
)
def adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs):
nextinputs = [
ak._util.wrap(x, behavior)
if isinstance(x, (ak.layout.Content, ak.layout.Record))
else x
for x in inputs
]
out = apply_ufunc(ufunc, method, nextinputs, kwargs)
if out is NotImplemented:
return None
else:
if not isinstance(out, tuple):
out = (out,)
out = tuple(
x.layout
if isinstance(x, (ak.highlevel.Array, ak.highlevel.Record))
else x
for x in out
)
return lambda: out
def is_fully_regular(layout):
if (
isinstance(layout, ak.layout.RegularArray)
and layout.parameter("__record__") is None
and layout.parameter("__array__") is None
):
if isinstance(layout.content, ak.layout.NumpyArray):
return True
elif isinstance(layout.content, ak.layout.RegularArray):
return is_fully_regular(layout.content)
else:
return False
else:
return False
def deregulate(layout):
if not is_fully_regular(layout):
return layout
else:
shape = [len(layout)]
node = layout
while isinstance(node, ak.layout.RegularArray):
shape.append(node.size)
node = node.content
nparray = ak.nplike.of(node).asarray(node)
nparray = nparray.reshape(tuple(shape) + nparray.shape[1:])
return ak.layout.NumpyArray(
nparray,
node.identities,
node.parameters,
)
def getfunction(inputs):
signature = [ufunc]
for x in inputs:
if isinstance(x, ak.layout.Content):
record = x.parameter("__record__")
array = x.parameter("__array__")
if record is not None:
signature.append(record)
elif array is not None:
signature.append(array)
elif isinstance(x, ak.layout.NumpyArray):
signature.append(ak.nplike.of(x).asarray(x).dtype.type)
else:
signature.append(None)
else:
signature.append(type(x))
custom = ak._util.overload(behavior, signature)
if custom is not None:
return lambda: adjust(custom, inputs, kwargs)
if ufunc is numpy.matmul:
custom_matmul = getfunction_matmul(inputs)
if custom_matmul is not None:
return custom_matmul
inputs = [deregulate(x) for x in inputs]
if all(
isinstance(x, ak.layout.NumpyArray)
or not isinstance(x, (ak.layout.Content, ak.partition.PartitionedArray))
for x in inputs
):
nplike = ak.nplike.of(*inputs)
result = getattr(ufunc, method)(
*[nplike.asarray(x) for x in inputs], **kwargs
)
return lambda: (ak.operations.convert.from_numpy(result, highlevel=False),)
for x in inputs:
if isinstance(x, ak.layout.Content):
chained_behavior = ak._util.Behavior(ak.behavior, behavior)
apply_ufunc = chained_behavior[numpy.ufunc, x.parameter("__array__")]
if apply_ufunc is not None:
out = adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs)
if out is not None:
return out
apply_ufunc = chained_behavior[numpy.ufunc, x.parameter("__record__")]
if apply_ufunc is not None:
out = adjust_apply_ufunc(apply_ufunc, ufunc, method, inputs, kwargs)
if out is not None:
return out
if all(
x.parameter("__array__") is not None
or x.parameter("__record__") is not None
for x in inputs
if isinstance(x, ak.layout.Content)
):
custom_types = []
for x in inputs:
if isinstance(x, ak.layout.Content):
if x.parameter("__array__") is not None:
custom_types.append(x.parameter("__array__"))
elif x.parameter("__record__") is not None:
custom_types.append(x.parameter("__record__"))
else:
custom_types.append(type(x).__name__)
else:
custom_types.append(type(x).__name__)
raise ValueError(
"no overloads for custom types: {0}({1})".format(
ufunc.__name__,
", ".join(custom_types),
)
+ ak._util.exception_suffix(__file__)
)
return None
out = ak._util.broadcast_and_apply(
inputs, getfunction, behavior, allow_records=False, pass_depth=False
)
assert isinstance(out, tuple) and len(out) == 1
return ak._util.wrap(out[0], behavior)
def matmul_for_numba(lefts, rights, dtype):
total_outer = 0
total_inner = 0
total_content = 0
for A, B in zip(lefts, rights):
first = -1
for Ai in A:
if first == -1:
first = len(Ai)
elif first != len(Ai):
raise ValueError(
"one of the left matrices in np.matmul is not rectangular"
)
if first == -1:
first = 0
rowsA = len(A)
colsA = first
first = -1
for Bi in B:
if first == -1:
first = len(Bi)
elif first != len(Bi):
raise ValueError(
"one of the right matrices in np.matmul is not rectangular"
)
if first == -1:
first = 0
rowsB = len(B)
colsB = first
if colsA != rowsB:
raise ValueError(
u"one of the pairs of matrices in np.matmul do not match shape: "
u"(n \u00d7 k) @ (k \u00d7 m)"
)
total_outer += 1
total_inner += rowsA
total_content += rowsA * colsB
outer = numpy.empty(total_outer + 1, numpy.int64)
inner = numpy.empty(total_inner + 1, numpy.int64)
content = numpy.zeros(total_content, dtype)
outer[0] = 0
inner[0] = 0
outer_i = 1
inner_i = 1
content_i = 0
for A, B in zip(lefts, rights):
rows = len(A)
cols = 0
if len(B) > 0:
cols = len(B[0])
mids = 0
if len(A) > 0:
mids = len(A[0])
for i in range(rows):
for j in range(cols):
for v in range(mids):
pos = content_i + i * cols + j
content[pos] += A[i][v] * B[v][j]
outer[outer_i] = outer[outer_i - 1] + rows
outer_i += 1
for _ in range(rows):
inner[inner_i] = inner[inner_i - 1] + cols
inner_i += 1
content_i += rows * cols
return outer, inner, content
matmul_for_numba.numbafied = None
def getfunction_matmul(inputs):
inputs = [
ak._util.recursively_apply(
x, (lambda _: _), pass_depth=False, numpy_to_regular=True
)
for x in inputs
]
if len(inputs) == 2 and all(
isinstance(x, ak._util.listtypes)
and isinstance(x.content, ak._util.listtypes)
and isinstance(x.content.content, ak.layout.NumpyArray)
for x in inputs
):
ak._connect._numba.register_and_check()
import numba
if matmul_for_numba.numbafied is None:
matmul_for_numba.numbafied = numba.njit(matmul_for_numba)
lefts = ak.highlevel.Array(inputs[0])
rights = ak.highlevel.Array(inputs[1])
dtype = numpy.asarray(lefts[0:0, 0:0, 0:0] + rights[0:0, 0:0, 0:0]).dtype
outer, inner, content = matmul_for_numba.numbafied(lefts, rights, dtype)
return lambda: (
ak.layout.ListOffsetArray64(
ak.layout.Index64(outer),
ak.layout.ListOffsetArray64(
ak.layout.Index64(inner),
ak.layout.NumpyArray(content),
),
),
)
else:
return None
try:
NDArrayOperatorsMixin = numpy.lib.mixins.NDArrayOperatorsMixin
except AttributeError:
from numpy.core import umath as um
def _disables_array_ufunc(obj):
try:
return obj.__array_ufunc__ is None
except AttributeError:
return False
def _binary_method(ufunc, name):
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(self, other)
func.__name__ = "__{}__".format(name)
return func
def _reflected_binary_method(ufunc, name):
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(other, self)
func.__name__ = "__r{}__".format(name)
return func
def _inplace_binary_method(ufunc, name):
def func(self, other):
return ufunc(self, other, out=(self,))
func.__name__ = "__i{}__".format(name)
return func
def _numeric_methods(ufunc, name):
return (
_binary_method(ufunc, name),
_reflected_binary_method(ufunc, name),
_inplace_binary_method(ufunc, name),
)
def _unary_method(ufunc, name):
def func(self):
return ufunc(self)
func.__name__ = "__{}__".format(name)
return func
class NDArrayOperatorsMixin(object):
__lt__ = _binary_method(um.less, "lt")
__le__ = _binary_method(um.less_equal, "le")
__eq__ = _binary_method(um.equal, "eq")
__ne__ = _binary_method(um.not_equal, "ne")
__gt__ = _binary_method(um.greater, "gt")
__ge__ = _binary_method(um.greater_equal, "ge")
__add__, __radd__, __iadd__ = _numeric_methods(um.add, "add")
__sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, "sub")
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, "mul")
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(um.matmul, "matmul")
if sys.version_info.major < 3:
__div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, "div")
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
um.true_divide, "truediv"
)
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
um.floor_divide, "floordiv"
)
__mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, "mod")
if hasattr(um, "divmod"):
__divmod__ = _binary_method(um.divmod, "divmod")
__rdivmod__ = _reflected_binary_method(um.divmod, "divmod")
__pow__, __rpow__, __ipow__ = _numeric_methods(um.power, "pow")
__lshift__, __rlshift__, __ilshift__ = _numeric_methods(um.left_shift, "lshift")
__rshift__, __rrshift__, __irshift__ = _numeric_methods(
um.right_shift, "rshift"
)
__and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, "and")
__xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, "xor")
__or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, "or")
__neg__ = _unary_method(um.negative, "neg")
if hasattr(um, "positive"):
__pos__ = _unary_method(um.positive, "pos")
__abs__ = _unary_method(um.absolute, "abs")
__invert__ = _unary_method(um.invert, "invert")
| 32.475059 | 88 | 0.557636 | [
"BSD-3-Clause"
] | drahnreb/awkward-1.0 | src/awkward/_connect/_numpy.py | 13,672 | Python |
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
import base64
import gzip
import hashlib
import hmac
import json
import os
import requests
import secrets
import sys
import uuid
APP_NAME = "com.amazon.avod.thirdpartyclient"
APP_VERSION = "296016847"
DEVICE_NAME = "walleye/google/Pixel 2"
MANUFACTURER = "Google"
OS_VERSION = "google/walleye/walleye:8.1.0/OPM1.171019.021/4565141:user/release-keys"
def pkcs7_pad(data):
padsize = 16 - len(data) % 16
return data + bytes([padsize]) * padsize
def pkcs7_unpad(data):
offset = data[-1]
return data[:-offset]
def get_headers(domain):
return {
"Accept-Charset": "utf-8",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 10; Pixel 2 Build/OPM1.171019.021)",
"x-amzn-identity-auth-domain": f"api.amazon.{domain}",
"x-amzn-requestid": str(uuid.uuid4()),
}
def generate_frc(device_id):
cookies = json.dumps({
"ApplicationName": APP_NAME,
"ApplicationVersion": APP_VERSION,
"DeviceLanguage": "en",
"DeviceName": DEVICE_NAME,
"DeviceOSVersion": OS_VERSION,
"IpAddress": requests.get('https://api.ipify.org').text,
"ScreenHeightPixels": "1920",
"ScreenWidthPixels": "1280",
"TimeZone": "00:00",
})
compressed = gzip.compress(cookies.encode())
key = PBKDF2(device_id, b"AES/CBC/PKCS7Padding")
iv = secrets.token_bytes(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
ciphertext = cipher.encrypt(pkcs7_pad(compressed))
hmac_ = hmac.new(PBKDF2(device_id, b"HmacSHA256"), iv + ciphertext, hashlib.sha256).digest()
return base64.b64encode(b"\0" + hmac_[:8] + iv + ciphertext).decode()
def login(email, password, device_id, domain = "com"):
body = {
"auth_data": {
"use_global_authentication": "true",
"user_id_password": {
"password" : password,
"user_id": email,
},
},
"registration_data": {
"domain": "DeviceLegacy",
"device_type": "A43PXU4ZN2AL1",
"device_serial": device_id,
"app_name": APP_NAME,
"app_version": APP_VERSION,
"device_model": DEVICE_NAME,
"os_version": OS_VERSION,
"software_version": "130050002"
},
"requested_token_type": ["bearer","mac_dms","store_authentication_cookie","website_cookies"],
"cookies": {
"domain": f"amazon.{domain}",
"website_cookies": []
},
"user_context_map": {
"frc": generate_frc(device_id)
},
"device_metadata": {
"device_os_family": "android",
"device_type": "A43PXU4ZN2AL1",
"device_serial": device_id,
"mac_address": secrets.token_hex(64).upper(),
"manufacturer": MANUFACTURER,
"model": DEVICE_NAME,
"os_version": "30",
"android_id": "f1c56f6030b048a7",
"product": DEVICE_NAME
},
"requested_extensions": ["device_info","customer_info"]
}
response_json = requests.post(f"https://api.amazon.{domain}/auth/register", headers=get_headers(domain), json=body).json()
try:
return {
"domain": domain,
"access_token": response_json["response"]["success"]["tokens"]["bearer"]["access_token"],
"refresh_token": response_json["response"]["success"]["tokens"]["bearer"]["refresh_token"]
}
except:
print(json.dumps(response_json))
return None
def refresh(tokens):
body = {
"app_name": APP_NAME,
"app_version": APP_VERSION,
"source_token_type": "refresh_token",
"source_token": tokens["refresh_token"],
"requested_token_type": "access_token"
}
response_json = requests.post(f"https://api.amazon.com/auth/token", headers=get_headers(tokens["domain"]), json=body).json()
try:
tokens["access_token"] = response_json["access_token"]
except:
print(json.dumps(response_json))
return tokens
if __name__ == "__main__":
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
device_id_path = os.path.join(script_path, "device_id")
tokens_path = os.path.join(script_path, "tokens")
if os.path.isfile(device_id_path):
with open(device_id_path, "r") as f:
device_id = f.read()
else:
with open(device_id_path, "w") as f:
device_id = secrets.token_hex(16)
f.write(device_id)
arg_count = len(sys.argv)
if arg_count != 4:
print("usage: amazon_auth.py <email> <password> <domain>")
print("domains: com, co.uk, co.jp, de")
exit()
tokens = login(sys.argv[1], sys.argv[2], device_id, sys.argv[3])
if tokens == None:
print("Could not login!")
else:
print(json.dumps(tokens))
with open(tokens_path, "w") as f:
f.write(json.dumps(tokens))
| 31.578616 | 128 | 0.601872 | [
"MIT"
] | Sapphicality/comix | amazon_auth.py | 5,021 | Python |
# Generated by Django 3.0.4 on 2020-03-26 03:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ViralScreener', '0008_auto_20200326_0338'),
]
operations = [
migrations.AlterField(
model_name='employeescreeningresponses',
name='DateTime',
field=models.DateTimeField(),
),
]
| 21.315789 | 53 | 0.619753 | [
"MIT"
] | memtech3/Viral-Screener | mysite/ViralScreener/migrations/0009_auto_20200326_0339.py | 405 | Python |
# Crie um programa onde o usuário possa digitar sete valores numéricos
# e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares.
# No final, mostre os valores pares e ímpares em ordem crescente.
lista_unic = [[], []]
print('-=' * 20)
for c in range(0, 7):
nums = int(input(f'Informe um {c+1}° valor: '))
if nums%2 == 0:
lista_unic[0].append(nums)
else:
lista_unic[1].append(nums)
print('-=-' * 30)
lista_unic[0].sort()
lista_unic[1].sort()
print(f'Os valores pares foram: {lista_unic[0]}')
print(f'Os valores ímpares foram: {lista_unic[1]}')
print('-=-' * 30)
| 34.222222 | 85 | 0.662338 | [
"MIT"
] | GustavoVieiraa/CursoemVideo---Python-3 | CursoemVideo - Python 3/aula 18/ex085.py | 623 | Python |
# Generated by Django 3.2.4 on 2021-06-05 20:41
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 63.977778 | 329 | 0.664467 | [
"MIT"
] | aleattene/django-for-professionals | accounts/migrations/0001_initial.py | 2,879 | Python |
import numpy as np
import pytest
import xarray as xr
import cf_xarray as cfxr
from . import requires_shapely
@pytest.fixture
def geometry_ds():
from shapely.geometry import MultiPoint, Point
# empty/fill workaround to avoid numpy deprecation(warning) due to the array interface of shapely geometries.
geoms = np.empty(4, dtype=object)
geoms[:] = [
MultiPoint([(1.0, 2.0), (2.0, 3.0)]),
Point(3.0, 4.0),
Point(4.0, 5.0),
Point(3.0, 4.0),
]
ds = xr.Dataset(
{
"data": xr.DataArray(range(len(geoms)), dims=("index",)),
"time": xr.DataArray([0, 0, 0, 1], dims=("index",)),
}
)
shp_ds = ds.assign(geometry=xr.DataArray(geoms, dims=("index",)))
cf_ds = ds.assign(
x=xr.DataArray([1.0, 2.0, 3.0, 4.0, 3.0], dims=("node",), attrs={"axis": "X"}),
y=xr.DataArray([2.0, 3.0, 4.0, 5.0, 4.0], dims=("node",), attrs={"axis": "Y"}),
node_count=xr.DataArray([2, 1, 1, 1], dims=("index",)),
crd_x=xr.DataArray([1.0, 3.0, 4.0, 3.0], dims=("index",), attrs={"nodes": "x"}),
crd_y=xr.DataArray([2.0, 4.0, 5.0, 4.0], dims=("index",), attrs={"nodes": "y"}),
geometry_container=xr.DataArray(
attrs={
"geometry_type": "point",
"node_count": "node_count",
"node_coordinates": "x y",
"coordinates": "crd_x crd_y",
}
),
)
cf_ds = cf_ds.set_coords(["x", "y", "crd_x", "crd_y"])
return cf_ds, shp_ds
@requires_shapely
def test_shapely_to_cf(geometry_ds):
from shapely.geometry import Point
expected, in_ds = geometry_ds
out = xr.merge([in_ds.drop_vars("geometry"), cfxr.shapely_to_cf(in_ds.geometry)])
xr.testing.assert_identical(out, expected)
out = xr.merge(
[
in_ds.drop_vars("geometry").isel(index=slice(1, None)),
cfxr.shapely_to_cf(in_ds.geometry.isel(index=slice(1, None))),
]
)
expected = expected.isel(index=slice(1, None), node=slice(2, None)).drop_vars(
"node_count"
)
del expected.geometry_container.attrs["node_count"]
xr.testing.assert_identical(out, expected)
out = xr.merge(
[
in_ds.drop_vars("geometry").isel(index=slice(1, None)),
cfxr.shapely_to_cf(
in_ds.geometry.isel(index=slice(1, None)),
grid_mapping="longitude_latitude",
),
]
)
np.testing.assert_array_equal(out.lon, expected.crd_x)
assert "longitude" in out.cf
assert "latitude" in out.cf
out = cfxr.shapely_to_cf([Point(2, 3)])
assert set(out.dims) == {"features", "node"}
@requires_shapely
def test_shapely_to_cf_errors():
from shapely.geometry import LineString, Point
geoms = [LineString([[1, 2], [2, 3]]), LineString([[2, 3, 4], [4, 3, 2]])]
with pytest.raises(NotImplementedError, match="Only point geometries conversion"):
cfxr.shapely_to_cf(geoms)
geoms.append(Point(1, 2))
with pytest.raises(ValueError, match="Mixed geometry types are not supported"):
cfxr.shapely_to_cf(geoms)
with pytest.raises(
NotImplementedError, match="Only grid mapping longitude_latitude"
):
cfxr.shapely_to_cf([Point(4, 5)], grid_mapping="albers_conical_equal_area")
@requires_shapely
def test_cf_to_shapely(geometry_ds):
in_ds, exp = geometry_ds
xr.testing.assert_identical(
cfxr.cf_to_shapely(in_ds).drop_vars(["crd_x", "crd_y"]), exp.geometry
)
in_ds = in_ds.isel(index=slice(1, None), node=slice(2, None)).drop_vars(
"node_count"
)
del in_ds.geometry_container.attrs["node_count"]
out = cfxr.cf_to_shapely(in_ds)
assert out.dims == ("index",)
@requires_shapely
def test_cf_to_shapely_errors(geometry_ds):
in_ds, expected = geometry_ds
in_ds.geometry_container.attrs["geometry_type"] = "line"
with pytest.raises(NotImplementedError, match="Only point geometries conversion"):
cfxr.cf_to_shapely(in_ds)
in_ds.geometry_container.attrs["geometry_type"] = "punkt"
with pytest.raises(ValueError, match="Valid CF geometry types are "):
cfxr.cf_to_shapely(in_ds)
@requires_shapely
def test_reshape_unique_geometries(geometry_ds):
_, in_ds = geometry_ds
out = cfxr.geometry.reshape_unique_geometries(in_ds)
assert out.geometry.dims == ("features",)
assert out.data.dims == ("features", "index")
np.testing.assert_array_equal(
out.geometry, in_ds.geometry.values[np.array([1, 2, 0])]
)
in_ds["index"] = in_ds.time
in_ds = in_ds.drop_vars("time").rename(index="time")
out = cfxr.geometry.reshape_unique_geometries(in_ds)
assert out.geometry.dims == ("features",)
assert out.data.dims == ("features", "time")
np.testing.assert_array_equal(out.time, [0, 1])
geoms = in_ds.geometry.expand_dims(n=[1, 2])
in_ds = in_ds.assign(geometry=geoms)
with pytest.raises(ValueError, match="The geometry variable must be 1D"):
cfxr.geometry.reshape_unique_geometries(in_ds)
| 32.100629 | 113 | 0.631074 | [
"Apache-2.0"
] | dcherian/cf-xarray | cf_xarray/tests/test_geometry.py | 5,104 | Python |
"""
list of movies that feed into fresh_tomatoes.py file
"""
import fresh_tomatoes
from get_movie_list import get_movie_list
def main():
"""
Main entry point for the script.
"""
# Read in the movies from the json file
movie_list = get_movie_list("src/data/movies.json")
# Generate the html file and display in a browser window
fresh_tomatoes.open_movies_page(movie_list)
main()
| 22.944444 | 60 | 0.714286 | [
"MIT"
] | golgistudio/udacity-movie-trailer | src/entertainment_center.py | 413 | Python |
from bson.objectid import ObjectId
from faker import Faker
from flask import url_for
from dawdle import create_app
from dawdle.models.board import Board, BoardType
from dawdle.models.user import User
class TestBase:
@classmethod
def setup_class(cls):
cls.fake = Faker()
cls.app = create_app(testing=True)
cls.app.app_context().push()
cls.client = cls.app.test_client()
cls.password = cls.fake.password()
cls.user = cls.create_user(password=cls.password)
cls.login()
@classmethod
def teardown_class(cls):
cls.clear_db()
@classmethod
def create_user(cls, **kwargs):
user = User()
user.active = kwargs.get('active', True)
user.email = kwargs.get('email', cls.fake.email())
user.initials = kwargs.get(
'initials',
cls.fake.pystr(min_chars=1, max_chars=4),
).upper()
user.name = kwargs.get('name', cls.fake.name())
user.password = User.encrypt_password(
kwargs.get('password', cls.fake.password()),
)
return user.save()
@classmethod
def create_boards(cls, owner_id, min_boards=1, max_boards=1):
num = cls.fake.pyint(min_boards, max_boards)
boards = []
for n in range(num):
boards.append(cls.create_board(owner_id=owner_id))
return boards
@classmethod
def create_board(cls, **kwargs):
board = Board()
board.created_by = kwargs.get('created_by', ObjectId())
board.name = kwargs.get(
'name',
cls.fake.pystr(min_chars=1, max_chars=256),
)
board.owner_id = kwargs.get('owner_id', ObjectId())
board.type = kwargs.get('type', BoardType.PERSONAL.id)
return board.save()
@classmethod
def as_new_user(cls):
password = cls.fake.password()
user = cls.create_user(password=password)
cls.login(email=user.email, password=password)
return user, password
@classmethod
def login(cls, **kwargs):
email = kwargs.get('email', cls.user.email)
password = kwargs.get('password', cls.password)
data = {'email': email, 'password': password}
cls.client.post(url_for('auth.login_POST'), data=data)
cls.logged_in = cls.user.email == email and cls.password == password
@classmethod
def logout(cls):
cls.client.get(url_for('auth.logout_GET'))
cls.logged_in = False
@classmethod
def clear_db(cls):
Board.objects.delete()
User.objects.delete()
def setup_method(self):
if not self.logged_in:
self.login()
| 29.428571 | 76 | 0.612024 | [
"MIT"
] | simba999/dawdle | tests/test_base.py | 2,678 | Python |
import pandas as pd
import numpy as np
import itertools as it
from collections import defaultdict
from collections import Counter
from six.moves import map as imap
def dict_subset(d, fields):
# return a subset of the provided dict containing only the
# fields specified in fields
return {k: d[k] for k in d if k in fields and d[k] is not None}
class MessageFieldCounter:
"""
Count occurrences of values in a stream of messages for a specified set of fields
Usage:
messages = [
{'a': 'apple', 'b': 'boat'},
{'a': 'pear', 'b': 'boat'},
{'a': 'apple', 'b': 'boat'},
]
fields = ['a', 'b']
mfc = MessageFieldCounter(messages, fields)
# this class is designed to pass through a long stream of messages
# so we have to pull them through in order to count them
for msg in mcf:
pass
print mfc.most_common('a')
>>> [('apple', 2)]
"""
def __init__(self, messages, fields):
self.fields = set(fields)
self.messages = messages
self.counters = defaultdict(Counter)
def __iter__(self):
return self.process()
def process(self):
for msg in self.messages:
for key in self.fields:
value = msg.get(key, None)
if value is not None:
self.counters[key][value] += 1
yield msg
def most_common(self, field, n=1):
return self.counters[field].most_common(n)
class MessageStats():
"""
Extract a set of stats from as stream of messages.
numeric_fields: list of field names to compute numeric stats (eg min, max, avg)
frequency_fields: list of field names to compute frequency of values
"""
NUMERIC_STATS = ['min', 'max', 'first', 'last', 'count']
FREQUENCY_STATS = ['most_common', 'most_common_count']
def __init__(self, messages, numeric_fields, frequency_fields):
self._numeric_fields = numeric_fields
self._frequency_fields = frequency_fields
self.counter = MessageFieldCounter(messages, frequency_fields)
messages = self.counter.process()
messages = imap(dict_subset, messages, it.repeat(numeric_fields))
# DataFrame won't take an iterator, but it will take a generator
messages = (m for m in messages)
self.df = pd.DataFrame(messages)
@property
def numeric_fields(self):
return self._numeric_fields
@property
def frequency_fields(self):
return self._frequency_fields
@property
def frequency_counter(self):
return self.counter
@property
def data_frame(self):
return self.df
def numeric_stats(self, field):
def first(col):
idx = col.first_valid_index()
return col[idx] if idx is not None else None
def last(col):
idx = col.last_valid_index()
return col[idx] if idx is not None else None
assert field in self.numeric_fields
if field in self.df:
col = self.df[field]
return dict(
min=np.nanmin(col),
max=np.nanmax(col),
first=first(col),
last=last(col),
count=np.count_nonzero(~np.isnan(col)),
)
else:
return {}
def frequency_stats(self, field):
assert field in self.frequency_fields
stat = self.frequency_counter.most_common(field)
if stat:
value, count = stat[0]
return dict(
most_common=value,
most_common_count=count
)
else:
return {}
def field_stats(self, field):
stats = {}
if field in self.numeric_fields:
stats.update(self.numeric_stats(field))
if field in self.frequency_fields:
stats.update(self.frequency_stats(field))
return stats | 29.954887 | 85 | 0.592369 | [
"Apache-2.0"
] | GlobalFishingWatch/pipe-segment | pipe_segment/stats/stats.py | 3,984 | Python |
"""[Lambda Expressions]
Lambda expressions are simply another way to create functions anonymous functions
keyword \ parameter list optional
\ \ the : is required, even for zero arguments
\ \ / / this expression is evaluated and returned when the lambda function is called. (think of it as "the body" of the function)
lambda [parameter list]: expression
\
the expression returns a function object
that evaluates and returns the expression when it is called
Examples
from tkinter import Y
from unittest import FunctionTestCase
lambda x: x**2
lambda x, y: x + y
lambda : 'hello'
lambda s: s[::-1].upper()
type(lambda x: x**2) -> function
Note that these expressions are function objects, but are not "named"
-> anonymous Functions
lambdas, or anonymous functions, are NOT equivalent to closures
Assigning a Lambda to a Variable name
my_func = lambda x: x**2
type(my_func) -> fuunction
my_func(3) -> 9
my_func(4) -> 16
# identical to:
def my_func(x):
return x**2
type(my_func) -> function
""" | 19.966102 | 161 | 0.633277 | [
"Unlicense"
] | minefarmer/deep-Dive-1 | .history/my_classes/FirstClassFunctions/LambdaExpressions_20210704152007.py | 1,178 | Python |
import json
from flask import Flask, request, Response, send_from_directory, redirect, flash
from flask_login import LoginManager, current_user, login_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
from ..models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
def create_user_routes(server):
@server.login.user_loader
def load_user(id):
return User.query.get(int(id))
@server.app.route(f'/{server.api_version}/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
nxt = request.args.get('next')
if form.validate_on_submit():
user = User()
login_user(user)
flash('Logged in successfully.')
return redirect(nxt or '/')
| 31.727273 | 80 | 0.698185 | [
"MIT"
] | Arianardo/augur-group25 | augur/routes/user.py | 1,047 | Python |
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions. Here are some tests.
"""
import unittest
from djmodels.contrib.gis.measure import A, Area, D, Distance
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialization from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialization from invalid units"
with self.assertRaises(AttributeError):
D(banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
with self.assertRaises(TypeError):
d1 + 1
with self.assertRaises(TypeError):
d1 - 1
with self.assertRaises(TypeError):
d1 += 1
with self.assertRaises(TypeError):
d1 -= 1
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = d1 / D(m=2)
self.assertEqual(d5, 50)
a5 = d1 * D(m=10)
self.assertIsInstance(a5, Area)
self.assertEqual(a5.sq_m, 100 * 10)
with self.assertRaises(TypeError):
d1 *= D(m=1)
with self.assertRaises(TypeError):
d1 /= D(m=1)
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertGreater(d2, d1)
self.assertEqual(d1, d1)
self.assertLess(d1, d2)
self.assertFalse(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
with self.subTest(nm=nm):
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialization from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialization from invalid units"
with self.assertRaises(AttributeError):
A(banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
with self.assertRaises(TypeError):
a1 + 1
with self.assertRaises(TypeError):
a1 - 1
with self.assertRaises(TypeError):
a1 += 1
with self.assertRaises(TypeError):
a1 -= 1
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
a1 * A(sq_m=1)
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
with self.assertRaises(TypeError):
a1 / A(sq_m=1)
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertGreater(a2, a1)
self.assertEqual(a1, a1)
self.assertLess(a1, a2)
self.assertFalse(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == "__main__":
run()
| 26.204225 | 105 | 0.555496 | [
"BSD-3-Clause"
] | iMerica/dj-models | tests/gis_tests/test_measure.py | 7,442 | Python |
import keras
import numpy as np
from keras.datasets import mnist
(x_train,y_train),(x_test,y_test)=mnist.load_data()
x_train = x_train.reshape(60000,784)
x_test = x_test.reshape(10000,784)
x_train = x_train/255.0
x_test = x_test/255.0
from keras.utils import to_categorical
y_train = to_categorical(y_train,num_classes = 10)
y_test = to_categorical(y_test,num_classes = 10)
from keras.layers import Input, Dense, Activation
from keras.models import Model
img_input = Input(shape=(784,))
x = Dense(units = 30, activation = "relu")(img_input)
y = Dense(units = 10, activation = "sigmoid")(x)
model= Model(inputs = img_input, outputs=y)
stringlist = []
model.summary(print_fn=lambda x: stringlist.append(x))
short_model_summary = "\n".join(stringlist)
print(short_model_summary)
#print(model.summary)
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["accuracy"])
model.fit(x_train,y_train, batch_size=150,epochs=4, validation_split=0.2)
print(model.metrics_names)
model.evaluate(x_test,y_test, batch_size = 128)
preds=model.predict(x_test,batch_size = 125)
preds = preds.argmax(axis = 1)
y_test = y_test.argmax(axis = 1)
print(preds[:10])
print(y_test[:10])
from sklearn.metrics import classification_report
print(classification_report(y_test, preds)) | 29.488889 | 85 | 0.747551 | [
"MIT"
] | Aadesh-1404/Deep-Neural-Networks-Model---Keras-Tensorflow- | MNIST dataset - FCNN/FCNN.py | 1,327 | Python |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="choroplethmapbox.hoverlabel.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 27.944444 | 66 | 0.620278 | [
"MIT"
] | 1abner1/plotly.py | packages/python/plotly/plotly/validators/choroplethmapbox/hoverlabel/font/_color.py | 503 | Python |
# console converter - USD to BGN
# Write a program for converting US dollars (USD) into Bulgarian levs (BGN).
# Round the result to 2 digits after the decimal point. Use a fixed exchange rate between the dollar and the lev: 1 USD = 1.79549 BGN.
USD = float(input())
BGN = round(USD * 1.79549, 2)
print(BGN)
| 38.5 | 134 | 0.724026 | [
"MIT"
] | karolinanikolova/SoftUni-Software-Engineering | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/01_First-Steps-in-Coding/00.Book-Exercise-2.1-11-USD-to-BGN.py | 308 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import DesktopVirtualizationAPIClientConfiguration
from .operations import Operations
from .operations import WorkspacesOperations
from .operations import ScalingPlansOperations
from .operations import ApplicationGroupsOperations
from .operations import StartMenuItemsOperations
from .operations import ApplicationsOperations
from .operations import DesktopsOperations
from .operations import HostPoolsOperations
from .operations import UserSessionsOperations
from .operations import SessionHostsOperations
from .operations import MsixPackagesOperations
from .operations import MsixImagesOperations
from . import models
class DesktopVirtualizationAPIClient(object):
"""DesktopVirtualizationAPIClient.
:ivar operations: Operations operations
:vartype operations: desktop_virtualization_api_client.operations.Operations
:ivar workspaces: WorkspacesOperations operations
:vartype workspaces: desktop_virtualization_api_client.operations.WorkspacesOperations
:ivar scaling_plans: ScalingPlansOperations operations
:vartype scaling_plans: desktop_virtualization_api_client.operations.ScalingPlansOperations
:ivar application_groups: ApplicationGroupsOperations operations
:vartype application_groups: desktop_virtualization_api_client.operations.ApplicationGroupsOperations
:ivar start_menu_items: StartMenuItemsOperations operations
:vartype start_menu_items: desktop_virtualization_api_client.operations.StartMenuItemsOperations
:ivar applications: ApplicationsOperations operations
:vartype applications: desktop_virtualization_api_client.operations.ApplicationsOperations
:ivar desktops: DesktopsOperations operations
:vartype desktops: desktop_virtualization_api_client.operations.DesktopsOperations
:ivar host_pools: HostPoolsOperations operations
:vartype host_pools: desktop_virtualization_api_client.operations.HostPoolsOperations
:ivar user_sessions: UserSessionsOperations operations
:vartype user_sessions: desktop_virtualization_api_client.operations.UserSessionsOperations
:ivar session_hosts: SessionHostsOperations operations
:vartype session_hosts: desktop_virtualization_api_client.operations.SessionHostsOperations
:ivar msix_packages: MsixPackagesOperations operations
:vartype msix_packages: desktop_virtualization_api_client.operations.MsixPackagesOperations
:ivar msix_images: MsixImagesOperations operations
:vartype msix_images: desktop_virtualization_api_client.operations.MsixImagesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = DesktopVirtualizationAPIClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.workspaces = WorkspacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scaling_plans = ScalingPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_groups = ApplicationGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.start_menu_items = StartMenuItemsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.applications = ApplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.desktops = DesktopsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.host_pools = HostPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_sessions = UserSessionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.session_hosts = SessionHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.msix_packages = MsixPackagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.msix_images = MsixImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> DesktopVirtualizationAPIClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 49.472 | 105 | 0.747413 | [
"MIT"
] | Caoxuyang/azure-cli-extensions | src/desktopvirtualization/azext_desktopvirtualization/vendored_sdks/desktopvirtualization/_desktop_virtualization_api_client.py | 6,184 | Python |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility for creating release candidates and promoting release candidates to a final relase.
Usage: release.py
The utility is interactive; you will be prompted for basic release information and guided through the process.
This utility assumes you already have local a kafka git folder and that you
have added remotes corresponding to both:
(i) the github apache kafka mirror and
(ii) the apache kafka git repo.
"""
from __future__ import print_function
import datetime
from getpass import getpass
import json
import os
import subprocess
import sys
import tempfile
PROJECT_NAME = "kafka"
CAPITALIZED_PROJECT_NAME = "kafka".upper()
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# Location of the local git repository
REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, SCRIPT_DIR)
# Remote name, which points to Github by default
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache-github")
PREFS_FILE = os.path.join(SCRIPT_DIR, '.release-settings.json')
delete_gitrefs = False
work_dir = None
def fail(msg):
if work_dir:
cmd("Cleaning up work directory", "rm -rf %s" % work_dir)
if delete_gitrefs:
try:
cmd("Resetting repository working state to branch %s" % starting_branch, "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
cmd("Deleting git tag %s" %rc_tag , "git tag -d %s" % rc_tag, shell=True)
except subprocess.CalledProcessError:
print("Failed when trying to clean up git references added by this script. You may need to clean up branches/tags yourself before retrying.")
print("Expected git branch: " + release_version)
print("Expected git tag: " + rc_tag)
print(msg)
sys.exit(1)
def print_output(output):
if output is None or len(output) == 0:
return
for line in output.split('\n'):
print(">", line)
def cmd(action, cmd, *args, **kwargs):
if isinstance(cmd, basestring) and not kwargs.get("shell", False):
cmd = cmd.split()
allow_failure = kwargs.pop("allow_failure", False)
stdin_log = ""
if "stdin" in kwargs and isinstance(kwargs["stdin"], basestring):
stdin_log = "--> " + kwargs["stdin"]
stdin = tempfile.TemporaryFile()
stdin.write(kwargs["stdin"])
stdin.seek(0)
kwargs["stdin"] = stdin
print(action, cmd, stdin_log)
try:
output = subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
print_output(output)
except subprocess.CalledProcessError as e:
print_output(e.output)
if allow_failure:
return
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
def cmd_output(cmd, *args, **kwargs):
if isinstance(cmd, basestring):
cmd = cmd.split()
return subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
def replace(path, pattern, replacement):
updated = []
with open(path, 'r') as f:
for line in f:
updated.append((replacement + '\n') if line.startswith(pattern) else line)
with open(path, 'w') as f:
for line in updated:
f.write(line)
def user_ok(msg):
ok = raw_input(msg)
return ok.lower() == 'y'
def sftp_mkdir(dir):
basedir, dirname = os.path.split(dir)
if not basedir:
basedir = "."
try:
cmd_str = """
cd %s
-mkdir %s
""" % (basedir, dirname)
cmd("Creating '%s' in '%s' in your Apache home directory if it does not exist (errors are ok if the directory already exists)" % (dirname, basedir), "sftp -b - %[email protected]" % apache_id, stdin=cmd_str, allow_failure=True)
except subprocess.CalledProcessError:
# This is ok. The command fails if the directory already exists
pass
def get_pref(prefs, name, request_fn):
"Get a preference from existing preference dictionary or invoke a function that can collect it from the user"
val = prefs.get(name)
if not val:
val = request_fn()
prefs[name] = val
return val
# Load saved preferences
prefs = {}
if os.path.exists(PREFS_FILE):
with open(PREFS_FILE, 'r') as prefs_fp:
prefs = json.load(prefs_fp)
if not user_ok("""Requirements:
1. Updated docs to reference the new release version where appropriate.
2. JDK7 and JDK8 compilers and libraries
3. Your Apache ID, already configured with SSH keys on id.apache.org and SSH keys available in this shell session
4. All issues in the target release resolved with valid resolutions (if not, this script will report the problematic JIRAs)
5. A GPG key used for signing the release. This key should have been added to public Apache servers and the KEYS file on the Kafka site
6. Standard toolset installed -- git, gpg, gradle, sftp, etc.
7. ~/.gradle/gradle.properties configured with the signing properties described in the release process wiki, i.e.
mavenUrl=https://repository.apache.org/service/local/staging/deploy/maven2
mavenUsername=your-apache-id
mavenPassword=your-apache-passwd
signing.keyId=your-gpgkeyId
signing.password=your-gpg-passphrase
signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
<server>
<id>apache.releases.https</id>
<username>your-apache-id</username>
<password>your-apache-passwd</password>
</server>
<server>
<id>your-gpgkeyId</id>
<passphrase>your-gpg-passphase</passphrase>
</server>
<profile>
<id>gpg-signing</id>
<properties>
<gpg.keyname>your-gpgkeyId</gpg.keyname>
<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
</properties>
</profile>
9. You may also need to update some gnupgp configs:
~/.gnupg/gpg-agent.conf
allow-loopback-pinentry
~/.gnupg/gpg.conf
use-agent
pinentry-mode loopback
echo RELOADAGENT | gpg-connect-agent
If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up.
Some of these may be used from these previous settings loaded from %s:
%s
Do you have all of of these setup? (y/n): """ % (PREFS_FILE, json.dumps(prefs, indent=2))):
fail("Please try again once you have all the prerequisites ready.")
starting_branch = cmd_output('git rev-parse --abbrev-ref HEAD')
cmd("Verifying that you have no unstaged git changes", 'git diff --exit-code --quiet')
cmd("Verifying that you have no staged git changes", 'git diff --cached --exit-code --quiet')
release_version = raw_input("Release version (without any RC info, e.g. 0.10.2.0): ")
try:
release_version_parts = release_version.split('.')
if len(release_version_parts) != 4:
fail("Invalid release version, should have 4 version number components")
# Validate each part is a number
[int(x) for x in release_version_parts]
except ValueError:
fail("Invalid release version, should be a dotted version number")
rc = raw_input("Release candidate number: ")
dev_branch = '.'.join(release_version_parts[:3])
docs_version = ''.join(release_version_parts[:3])
# Validate that the release doesn't already exist and that the
cmd("Fetching tags from upstream", 'git fetch --tags %s' % PUSH_REMOTE_NAME)
tags = cmd_output('git tag').split()
if release_version in tags:
fail("The specified version has already been tagged and released.")
# TODO promotion
if not rc:
fail("Automatic Promotion is not yet supported.")
# Find the latest RC and make sure they want to promote that one
rc_tag = sorted([t for t in tags if t.startswith(release_version + '-rc')])[-1]
if not user_ok("Found %s as latest RC for this release. Is this correct? (y/n): "):
fail("This script couldn't determine which RC tag to promote, you'll need to fix up the RC tags and re-run the script.")
sys.exit(0)
# Prereq checks
apache_id = get_pref(prefs, 'apache_id', lambda: raw_input("Enter your apache username: "))
jdk7_java_home = get_pref(prefs, 'jdk7', lambda: raw_input("Enter the path for JAVA_HOME for a JDK7 compiler (blank to use default JAVA_HOME): "))
jdk7_env = dict(os.environ) if jdk7_java_home.strip() else None
if jdk7_env is not None: jdk7_env['JAVA_HOME'] = jdk7_java_home
if "1.7.0" not in cmd_output("java -version", env=jdk7_env):
fail("You must be able to build artifacts with JDK7 for Scala 2.10 and 2.11 artifacts")
jdk8_java_home = get_pref(prefs, 'jdk8', lambda: raw_input("Enter the path for JAVA_HOME for a JDK8 compiler (blank to use default JAVA_HOME): "))
jdk8_env = dict(os.environ) if jdk8_java_home.strip() else None
if jdk8_env is not None: jdk8_env['JAVA_HOME'] = jdk8_java_home
if "1.8.0" not in cmd_output("java -version", env=jdk8_env):
fail("You must be able to build artifacts with JDK8 for Scala 2.12 artifacts")
def select_gpg_key():
print("Here are the available GPG keys:")
available_keys = cmd_output("gpg --list-secret-keys")
print(available_keys)
key_name = raw_input("Which user name (enter the user name without email address): ")
if key_name not in available_keys:
fail("Couldn't find the requested key.")
return key_name
key_name = get_pref(prefs, 'gpg-key', select_gpg_key)
gpg_passphrase = get_pref(prefs, 'gpg-pass', lambda: getpass("Passphrase for this GPG key: "))
# Do a quick validation so we can fail fast if the password is incorrect
with tempfile.NamedTemporaryFile() as gpg_test_tempfile:
gpg_test_tempfile.write("abcdefg")
cmd("Testing GPG key & passphrase", ["gpg", "--batch", "--pinentry-mode", "loopback", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", gpg_test_tempfile.name + ".asc", "--detach-sig", gpg_test_tempfile.name], stdin=gpg_passphrase)
# Save preferences
print("Saving preferences to %s" % PREFS_FILE)
with open(PREFS_FILE, 'w') as prefs_fp:
prefs = json.dump(prefs, prefs_fp)
# Generate RC
try:
int(rc)
except ValueError:
fail("Invalid release candidate number: %s" % rc)
rc_tag = release_version + '-rc' + rc
delete_gitrefs = True # Since we are about to start creating new git refs, enable cleanup function on failure to try to delete them
cmd("Checking out current development branch", "git checkout -b %s %s" % (release_version, PUSH_REMOTE_NAME + "/" + dev_branch))
print("Updating version numbers")
replace("gradle.properties", "version", "version=%s" % release_version)
replace("tests/kafkatest/__init__.py", "__version__", "__version__ = '%s'" % release_version)
cmd("update streams quickstart pom", ["sed", "-i", ".orig"," s/-SNAPSHOT//", "streams/quickstart/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/src/main/resources/archetype-resources/pom.xml"])
cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
# Command in explicit list due to messages with spaces
cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
# Command in explicit list due to messages with spaces
cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
rc_githash = cmd_output("git show-ref --hash " + rc_tag)
cmd("Switching back to your starting branch", "git checkout %s" % starting_branch)
# Note that we don't use tempfile here because mkdtemp causes problems with sftp and being able to determine the absolute path to a file.
# Instead we rely on a fixed path and if it
work_dir = os.path.join(REPO_HOME, ".release_work_dir")
if os.path.exists(work_dir):
fail("A previous attempt at a release left dirty state in the work directory. Clean up %s before proceeding. (This attempt will try to cleanup, simply retrying may be sufficient now...)" % work_dir)
os.makedirs(work_dir)
print("Temporary build working director:", work_dir)
kafka_dir = os.path.join(work_dir, 'kafka')
streams_quickstart_dir = os.path.join(kafka_dir, 'streams/quickstart')
print("Streams quickstart dir", streams_quickstart_dir)
cmd("Creating staging area for release artifacts", "mkdir kafka-" + rc_tag, cwd=work_dir)
artifacts_dir = os.path.join(work_dir, "kafka-" + rc_tag)
cmd("Cloning clean copy of repo", "git clone %s kafka" % REPO_HOME, cwd=work_dir)
cmd("Checking out RC tag", "git checkout -b %s %s" % (release_version, rc_tag), cwd=kafka_dir)
current_year = datetime.datetime.now().year
cmd("Verifying the correct year in NOTICE", "grep %s NOTICE" % current_year, cwd=kafka_dir)
with open(os.path.join(artifacts_dir, "RELEASE_NOTES.html"), 'w') as f:
print("Generating release notes")
try:
subprocess.check_call(["./release_notes.py", release_version], stdout=f)
except subprocess.CalledProcessError as e:
print_output(e.output)
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
params = { 'release_version': release_version,
'rc_tag': rc_tag,
'artifacts_dir': artifacts_dir
}
cmd("Creating source archive", "git archive --format tar.gz --prefix kafka-%(release_version)s-src/ -o %(artifacts_dir)s/kafka-%(release_version)s-src.tgz %(rc_tag)s" % params)
cmd("Building artifacts", "gradle", cwd=kafka_dir, env=jdk7_env)
cmd("Building artifacts", "./gradlew clean releaseTarGzAll aggregatedJavadoc", cwd=kafka_dir, env=jdk7_env)
# This should be removed when Java7 is dropped (cf. KAFKA-4421)
cmd("Building artifacts for Scala 2.12", "./gradlew releaseTarGz -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Copying artifacts", "cp %s/core/build/distributions/* %s" % (kafka_dir, artifacts_dir), shell=True)
cmd("Copying artifacts", "cp -R %s/build/docs/javadoc %s" % (kafka_dir, artifacts_dir))
for filename in os.listdir(artifacts_dir):
full_path = os.path.join(artifacts_dir, filename)
if not os.path.isfile(full_path):
continue
# Commands in explicit list due to key_name possibly containing spaces
cmd("Signing " + full_path, ["gpg", "--batch", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", full_path + ".asc", "--detach-sig", full_path], stdin=gpg_passphrase)
cmd("Verifying " + full_path, ["gpg", "--verify", full_path + ".asc", full_path])
# Note that for verification, we need to make sure only the filename is used with --print-md because the command line
# argument for the file is included in the output and verification uses a simple diff that will break if an absolut path
# is used.
dir, fname = os.path.split(full_path)
cmd("Generating MD5 for " + full_path, "gpg --print-md md5 %s > %s.md5" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA1 for " + full_path, "gpg --print-md sha1 %s > %s.sha1" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA512 for " + full_path, "gpg --print-md sha512 %s > %s.sha512" % (fname, fname), shell=True, cwd=dir)
cmd("Listing artifacts to be uploaded:", "ls -R %s" % artifacts_dir)
if not user_ok("Going to upload the artifacts in %s, listed above, to your Apache home directory. Ok (y/n)?): " % artifacts_dir):
fail("Quitting")
sftp_mkdir("public_html")
kafka_output_dir = "kafka-" + rc_tag
sftp_mkdir(os.path.join("public_html", kafka_output_dir))
public_release_dir = os.path.join("public_html", kafka_output_dir)
# The sftp -r option doesn't seem to work as would be expected, at least with the version shipping on OS X. To work around this we process all the files and directories manually...
sftp_cmds = ""
for root, dirs, files in os.walk(artifacts_dir):
assert root.startswith(artifacts_dir)
for dir in dirs:
sftp_mkdir(os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], dir))
for file in files:
local_path = os.path.join(root, file)
remote_path = os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], file)
sftp_cmds = """
put %s %s
""" % (local_path, remote_path)
cmd("Uploading artifacts in %s to your Apache home directory" % root, "sftp -b - %[email protected]" % apache_id, stdin=sftp_cmds)
with open(os.path.expanduser("~/.gradle/gradle.properties")) as f:
contents = f.read()
if not user_ok("Going to build and upload mvn artifacts based on these settings:\n" + contents + '\nOK (y/n)?: '):
fail("Retry again later")
cmd("Building and uploading archives", "./gradlew uploadArchivesAll", cwd=kafka_dir, env=jdk7_env)
cmd("Building and uploading archives", "./gradlew uploadCoreArchives_2_12 -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Building and uploading archives", "mvn deploy -Pgpg-signing", cwd=streams_quickstart_dir, env=jdk7_env)
release_notification_props = { 'release_version': release_version,
'rc': rc,
'rc_tag': rc_tag,
'rc_githash': rc_githash,
'dev_branch': dev_branch,
'docs_version': docs_version,
'apache_id': apache_id,
}
# TODO: Many of these suggested validation steps could be automated and would help pre-validate a lot of the stuff voters test
print("""
*******************************************************************************************************************************************************
Ok. We've built and staged everything for the %(rc_tag)s.
Now you should sanity check it before proceeding. All subsequent steps start making RC data public.
Some suggested steps:
* Grab the source archive and make sure it compiles: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz
* Grab one of the binary distros and run the quickstarts against them: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s.tgz
* Extract and verify one of the site docs jars: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s-site-docs.tgz
* Build a sample against jars in the staging repo: (TODO: Can we get a temporary URL before "closing" the staged artifacts?)
* Validate GPG signatures on at least one file:
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.asc &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.md5 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha1 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha512 &&
gpg --verify kafka-%(release_version)s-src.tgz.asc kafka-%(release_version)s-src.tgz &&
gpg --print-md md5 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.md5 &&
gpg --print-md sha1 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha1 &&
gpg --print-md sha512 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha512 &&
rm kafka-%(release_version)s-src.tgz* &&
echo "OK" || echo "Failed"
* Validate the javadocs look ok. They are at http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
*******************************************************************************************************************************************************
""" % release_notification_props)
if not user_ok("Have you sufficiently verified the release artifacts (y/n)?: "):
fail("Ok, giving up")
print("Next, we need to get the Maven artifacts we published into the staging repository.")
# TODO: Can we get this closed via a REST API since we already need to collect credentials for this repo?
print("Go to https://repository.apache.org/#stagingRepositories and hit 'Close' for the new repository that was created by uploading artifacts.")
if not user_ok("Have you successfully deployed the artifacts (y/n)?: "):
fail("Ok, giving up")
if not user_ok("Ok to push RC tag %s (y/n)?: " % rc_tag):
fail("Ok, giving up")
cmd("Pushing RC tag", "git push %s %s" % (PUSH_REMOTE_NAME, rc_tag))
# Move back to starting branch and clean out the temporary release branch (e.g. 0.10.2.0) we used to generate everything
cmd("Resetting repository working state", "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
email_contents = """
To: [email protected], [email protected], [email protected]
Subject: [VOTE] %(release_version)s RC%(rc)s
Hello Kafka users, developers and client-developers,
This is the first candidate for release of Apache Kafka %(release_version)s.
<DESCRIPTION OF MAJOR CHANGES, INCLUDE INDICATION OF MAJOR/MINOR RELEASE>
Release notes for the %(release_version)s release:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/RELEASE_NOTES.html
*** Please download, test and vote by <VOTING DEADLINE, e.g. Monday, March 28, 9am PT>
Kafka's KEYS file containing PGP keys we use to sign the release:
http://kafka.apache.org/KEYS
* Release artifacts to be voted upon (source and binary):
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/
* Maven artifacts to be voted upon:
https://repository.apache.org/content/groups/staging/
* Javadoc:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
* Tag to be voted upon (off %(dev_branch)s branch) is the %(release_version)s tag:
https://github.com/apache/kafka/releases/tag/%(rc_tag)s
* Documentation:
http://kafka.apache.org/%(docs_version)s/documentation.html
* Protocol:
http://kafka.apache.org/%(docs_version)s/protocol.html
* Successful Jenkins builds for the %(dev_branch)s branch:
Unit/integration tests: https://builds.apache.org/job/kafka-%(dev_branch)s-jdk7/<BUILD NUMBER>/
System tests: https://jenkins.confluent.io/job/system-test-kafka/job/%(dev_branch)s/<BUILD_NUMBER>/
/**************************************
Thanks,
<YOU>
""" % release_notification_props
print()
print()
print("*****************************************************************")
print()
print(email_contents)
print()
print("*****************************************************************")
print()
print("All artifacts should now be fully staged. Use the above template to send the announcement for the RC to the mailing list.")
print("IMPORTANT: Note that there are still some substitutions that need to be made in the template:")
print(" - Describe major changes in this release")
print(" - Deadline for voting, which should be at least 3 days after you send out the email")
print(" - Jenkins build numbers for successful unit & system test builds")
print(" - Fill in your name in the signature")
print(" - Finally, validate all the links before shipping!")
print("Note that all substitutions are annotated with <> around them.")
| 48.515564 | 275 | 0.685447 | [
"Apache-2.0"
] | SharaWeil/kafka-0.11.0 | release.py | 24,937 | Python |
from masonite.foundation import response_handler
from masonite.storage import StorageCapsule
from masonite.auth import Sign
from masonite.environment import LoadEnvironment
from masonite.utils.structures import load
from masonite.utils.location import base_path
from masonite.middleware import (
SessionMiddleware,
EncryptCookies,
LoadUserMiddleware,
MaintenanceModeMiddleware,
)
from masonite.routes import Route
from masonite.configuration.Configuration import Configuration
from masonite.configuration import config
from config.filesystem import STATICFILES
from app.middlewares.VerifyCsrfToken import VerifyCsrfToken
class Kernel:
http_middleware = [MaintenanceModeMiddleware, EncryptCookies]
route_middleware = {
"web": [SessionMiddleware, LoadUserMiddleware, VerifyCsrfToken],
}
def __init__(self, app):
self.application = app
def register(self):
# Register routes
self.load_environment()
self.register_configurations()
self.register_middleware()
self.register_routes()
self.register_database()
self.register_templates()
self.register_storage()
def load_environment(self):
LoadEnvironment()
def register_configurations(self):
# load configuration
self.application.bind("config.location", "config")
configuration = Configuration(self.application)
configuration.load()
self.application.bind("config", configuration)
key = config("application.key")
self.application.bind("key", key)
self.application.bind("sign", Sign(key))
# set locations
self.application.bind("resources.location", "resources/")
self.application.bind("controllers.location", "app/controllers")
self.application.bind("jobs.location", "app/jobs")
self.application.bind("providers.location", "app/providers")
self.application.bind("mailables.location", "app/mailables")
self.application.bind("listeners.location", "app/listeners")
self.application.bind("validation.location", "app/validation")
self.application.bind("notifications.location", "app/notifications")
self.application.bind("events.location", "app/events")
self.application.bind("tasks.location", "app/tasks")
self.application.bind("models.location", "app/models")
self.application.bind("observers.location", "app/models/observers")
self.application.bind("policies.location", "app/policies")
self.application.bind("commands.location", "app/commands")
self.application.bind("middlewares.location", "app/middlewares")
self.application.bind("server.runner", "masonite.commands.ServeCommand.main")
def register_middleware(self):
self.application.make("middleware").add(self.route_middleware).add(self.http_middleware)
def register_routes(self):
Route.set_controller_locations(self.application.make("controllers.location"))
self.application.bind("routes.location", "routes/web")
self.application.make("router").add(
Route.group(
load(self.application.make("routes.location"), "ROUTES"), middleware=["web"]
)
)
def register_database(self):
from masoniteorm.query import QueryBuilder
self.application.bind(
"builder",
QueryBuilder(connection_details=config("database.databases")),
)
self.application.bind("migrations.location", "databases/migrations")
self.application.bind("seeds.location", "databases/seeds")
self.application.bind("resolver", config("database.db"))
def register_templates(self):
self.application.bind("views.location", "templates/")
def register_storage(self):
storage = StorageCapsule()
storage.add_storage_assets(STATICFILES)
self.application.bind("storage_capsule", storage)
self.application.set_response_handler(response_handler)
self.application.use_storage_path(base_path("storage"))
| 37.990741 | 96 | 0.700219 | [
"MIT"
] | MasoniteFramework/masonite-packages | Kernel.py | 4,103 | Python |
import html
import os
import pathlib
import shutil
import sqlite3
import sys
from collections import OrderedDict
from scripts.html_parts import *
from scripts.ilapfuncs import logfunc
from scripts.version_info import aleapp_version, aleapp_contributors
def get_icon_name(category, artifact):
''' Returns the icon name from the feathericons collection. To add an icon type for
an artifact, select one of the types from ones listed @ feathericons.com
If no icon is available, the alert triangle is returned as default icon.
'''
category = category.upper()
artifact = artifact.upper()
icon = 'alert-triangle' # default (if not defined!)
## Please keep list below SORTED by category
if category.find('ACCOUNT') >= 0:
if artifact.find('AUTH') >= 0:
icon = 'key'
else:
icon = 'user'
elif category == 'ADDRESS BOOK':
icon = 'book-open'
elif category == 'ALARMS':
icon = 'clock'
elif category == 'AIRTAGS':
icon = 'map-pin'
elif category == 'APPLE PODCASTS':
icon = 'play-circle'
elif category == 'APPLE WALLET':
if artifact == 'TRANSACTIONS':
icon = 'dollar-sign'
if artifact == 'CARDS':
icon = 'credit-card'
if artifact == 'PASSES':
icon = 'send'
elif category == 'APP CONDUIT':
icon = 'activity'
elif category == 'APP PERMISSIONS':
icon = 'key'
elif category == 'CARPLAY':
icon = 'package'
elif category == 'CASH APP':
icon = 'credit-card'
elif category == 'APP UPDATES':
icon = 'codepen'
elif category == 'APPLICATIONS':
icon = 'grid'
elif category == 'AGGREGATE DICTIONARY':
icon = 'book'
elif category == 'BLUETOOTH':
icon = 'bluetooth'
elif category == 'CALENDAR':
icon = 'calendar'
elif category == 'CALL HISTORY':
icon = 'phone-call'
elif category == 'CELLULAR WIRELESS':
icon = 'bar-chart'
elif category == 'CLOUDKIT':
if artifact == 'PARTICIPANTS':
icon = 'user'
elif artifact == 'NOTE SHARING':
icon = 'share-2'
elif category == 'CONNECTED TO':
icon = 'zap'
elif category == 'COREDUET':
if artifact == 'AIRPLANE MODE':
icon = 'pause'
if artifact == 'LOCK STATE':
icon = 'lock'
if artifact == 'PLUGGED IN':
icon = 'battery-charging'
elif category == 'DATA USAGE':
icon = 'wifi'
elif category == 'DEVICE INFO':
if artifact == 'BUILD INFO':
icon = 'terminal'
elif artifact == 'IOS SYSTEM VERSION':
icon = 'git-commit'
elif artifact == 'PARTNER SETTINGS':
icon = 'settings'
elif artifact.find('SETTINGS_SECURE_') >= 0:
icon = 'settings'
else:
icon = 'info'
elif category == 'DHCP':
icon = 'settings'
elif category == 'DISCORD':
if artifact == 'DISCORD MESSAGES':
icon = 'message-square'
if artifact == 'DISCORD ACCOUNT':
icon = 'user'
if artifact == 'DISCORD MANIFEST':
icon = 'file-text'
elif category == 'FACEBOOK MESSENGER':
icon = 'facebook'
elif category == 'FILES APP':
icon = 'file-text'
elif category == 'GEOLOCATION':
if artifact == 'APPLICATIONS':
icon = 'grid'
elif artifact == 'MAP TILE CACHE':
icon = 'map'
elif artifact == 'PD PLACE CACHE':
icon = 'map-pin'
elif category == 'GOOGLE DUO':
if artifact == 'GOOGLE DUO - CALL HISTORY':
icon = 'phone-call'
if artifact == 'GOOGLE DUO - CONTACTS':
icon = 'user'
if artifact == 'GOOGLE DUO - CLIPS':
icon = 'video'
elif category == 'HEALTH DATA':
icon = 'heart'
elif category == 'ICLOUD QUICK LOOK':
icon = 'file'
elif category == 'ICLOUD RETURNS':
icon = 'cloud'
elif category == 'ICLOUD SHARED ALBUMS':
icon = 'cloud'
elif category == 'IMO HD CHAT':
if artifact == 'IMO HD CHAT - MESSAGES':
icon = 'message-circle'
if artifact == 'IMO HD CHAT - CONTACTS':
icon = 'user'
elif category == 'INSTAGRAM':
if artifact == 'INSTAGRAM THREADS':
icon = 'message-square'
if artifact == 'INSTAGRAM THREADS CALLS':
icon = 'phone'
elif category == 'INSTALLED APPS':
icon = 'package'
elif category == 'INTERACTIONC':
if artifact == 'CONTACTS':
icon = 'user'
elif artifact == 'ATTACHMENTS':
icon = 'paperclip'
elif category == 'IOS BUILD':
icon = 'git-commit'
elif category == 'IOS MAIL':
icon = 'mail'
elif category == 'IOS SCREENS':
icon = 'maximize'
elif category == 'KEYBOARD':
if artifact == 'KEYBOARD DYNAMIC LEXICON':
icon = 'type'
elif artifact == 'KEYBOARD APPLICATION USAGE':
icon = 'type'
elif category == 'KIK':
if artifact == 'KIK MESSAGES':
icon = 'message-square'
if artifact == 'KIK USERS':
icon = 'user'
if artifact == 'KIK MEDIA METADATA':
icon = 'file-plus'
if artifact == 'KIK PENDING UPLOADS':
icon = 'upload'
elif category == 'KNOWLEDGEC':
if artifact == 'KNOWLEDGEC DEVICE LOCKED':
icon = 'lock'
elif artifact == 'KNOWLEDGEC PLUGGED IN':
icon = 'battery-charging'
elif artifact == 'KNOWLEDGEC BATTERY LEVEL':
icon = 'battery'
else:
icon = 'activity'
elif category == 'LOCATIONS':
if artifact == 'APPLE MAPS SEARCH HISTORY':
icon = 'search'
else:
icon = 'map-pin'
elif category == 'LOCATION SERVICES CONFIGURATIONS':
icon = 'settings'
elif category == 'MEDIA LIBRARY':
icon = 'play-circle'
elif category == 'MEDIA METADATA':
icon = 'file-plus'
elif category == 'MEDICAL ID':
icon = 'thermometer'
elif category == 'MICROSOFT TEAMS - LOGS':
if artifact == 'TEAMS LOCATIONS':
icon = 'map-pin'
if artifact == 'TEAMS MOTION':
icon = 'move'
if artifact == 'TEAMS STATE CHANGE':
icon = 'truck'
if artifact == 'TEAMS POWER LOG':
icon = 'battery-charging'
if artifact == 'TEAMS TIMEZONE':
icon = 'clock'
elif category == 'MICROSOFT TEAMS':
if artifact == 'TEAMS MESSAGES':
icon = 'message-square'
if artifact == 'TEAMS CONTACT':
icon = 'users'
if artifact == 'TEAMS USER':
icon = 'user'
if artifact == 'TEAMS CALL LOGS':
icon = 'phone'
if artifact == 'TEAMS SHARED LOCATIONS':
icon = 'map-pin'
elif category == 'MOBILE ACTIVATION LOGS':
icon = 'clipboard'
elif category == 'MOBILE BACKUP':
icon = 'save'
elif category == 'MOBILE CONTAINER MANAGER':
icon = 'save'
elif category == 'MOBILE INSTALLATION LOGS':
icon = 'clipboard'
elif category == 'MOBILE SOFTWARE UPDATE':
icon = 'refresh-cw'
elif category == 'NOTES':
icon = 'file-text'
elif category == 'NOTIFICATIONS':
icon = 'bell'
elif category == 'PHOTOS':
icon = 'image'
elif category == 'POWERLOG':
icon = 'power'
elif category == 'POWERLOG BACKUPS':
icon = 'power'
elif category == 'PROTON MAIL':
icon = 'mail'
elif category == 'RECENT ACTIVITY':
icon = 'activity'
elif category == 'REMINDERS':
icon = 'list'
elif category == 'ROUTINED':
icon = 'map'
elif category == 'SAFARI BROWSER':
icon = 'compass'
elif category == 'SCREENTIME':
icon = 'monitor'
elif category == 'SCRIPT LOGS':
icon = 'archive'
elif category == 'SLACK':
if artifact == 'SLACK MESSAGES':
icon = 'message-square'
if artifact == 'SLACK USER DATA':
icon = 'user'
if artifact == 'SLACK ATTACHMENTS':
icon = 'paperclip'
if artifact == 'SLACK WORKSPACE DATA':
icon = 'slack'
if artifact == 'SLACK TEAM DATA':
icon = 'slack'
if artifact == 'SLACK CHANNEL DATA':
icon = 'slack'
elif category == 'SMS & IMESSAGE':
icon = 'message-square'
elif category == 'SQLITE JOURNALING':
icon = 'book-open'
elif category == 'TEXT INPUT MESSAGES':
icon = 'message-square'
elif category == 'TIKTOK':
if artifact == 'TIKTOK MESSAGES':
icon = 'message-square'
if artifact == 'TIKTOK CONTACTS':
icon = 'user'
elif category == 'USER DICTIONARY':
icon = 'book'
elif category == 'VENMO':
icon = 'dollar-sign'
elif category == 'VIBER':
if artifact == 'VIBER - SETTINGS':
icon = 'settings'
if artifact == 'VIBER - CONTACTS':
icon = 'users'
if artifact == 'VIBER - CHATS':
icon = 'message-square'
if artifact == 'VIBER - CALL REMNANTS':
icon = 'phone-call'
elif category == 'VOICE-RECORDINGS':
icon = 'mic'
elif category == 'VOICE-TRIGGERS':
icon = 'mic'
elif category == 'WHATSAPP':
if artifact == 'WHATSAPP - MESSAGES':
icon = 'message-square'
if artifact == 'WHATSAPP - CONTACTS':
icon = 'users'
elif category == 'WIFI CONNECTIONS':
icon = 'wifi'
elif category == 'WIFI KNOWN NETWORKS':
icon = 'wifi'
return icon
def generate_report(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path):
control = None
side_heading = \
"""<h6 class="sidebar-heading justify-content-between align-items-center px-3 mt-4 mb-1 text-muted">
{0}
</h6>
"""
list_item = \
"""
<li class="nav-item">
<a class="nav-link {0}" href="{1}">
<span data-feather="{2}"></span> {3}
</a>
</li>
"""
# Populate the sidebar dynamic data (depends on data/files generated by parsers)
# Start with the 'saved reports' (home) page link and then append elements
nav_list_data = side_heading.format('Saved Reports') + list_item.format('', 'index.html', 'home', 'Report Home')
# Get all files
side_list = OrderedDict() # { Category1 : [path1, path2, ..], Cat2:[..] } Dictionary containing paths as values, key=category
for root, dirs, files in sorted(os.walk(reportfolderbase)):
for file in files:
if file.endswith(".temphtml"):
fullpath = (os.path.join(root, file))
head, tail = os.path.split(fullpath)
p = pathlib.Path(fullpath)
SectionHeader = (p.parts[-2])
if SectionHeader == '_elements':
pass
else:
if control == SectionHeader:
side_list[SectionHeader].append(fullpath)
icon = get_icon_name(SectionHeader, tail.replace(".temphtml", ""))
nav_list_data += list_item.format('', tail.replace(".temphtml", ".html"), icon,
tail.replace(".temphtml", ""))
else:
control = SectionHeader
side_list[SectionHeader] = []
side_list[SectionHeader].append(fullpath)
nav_list_data += side_heading.format(SectionHeader)
icon = get_icon_name(SectionHeader, tail.replace(".temphtml", ""))
nav_list_data += list_item.format('', tail.replace(".temphtml", ".html"), icon,
tail.replace(".temphtml", ""))
# Now that we have all the file paths, start writing the files
for category, path_list in side_list.items():
for path in path_list:
old_filename = os.path.basename(path)
filename = old_filename.replace(".temphtml", ".html")
# search for it in nav_list_data, then mark that one as 'active' tab
active_nav_list_data = mark_item_active(nav_list_data, filename) + nav_bar_script
artifact_data = get_file_content(path)
# Now write out entire html page for artifact
f = open(os.path.join(reportfolderbase, filename), 'w', encoding='utf8')
artifact_data = insert_sidebar_code(artifact_data, active_nav_list_data, path)
f.write(artifact_data)
f.close()
# Now delete .temphtml
os.remove(path)
# If dir is empty, delete it
try:
os.rmdir(os.path.dirname(path))
except OSError:
pass # Perhaps it was not empty!
# Create index.html's page content
create_index_html(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path, nav_list_data)
elements_folder = os.path.join(reportfolderbase, '_elements')
os.mkdir(elements_folder)
__location__ = os.path.dirname(os.path.abspath(__file__))
def copy_no_perm(src, dst, *, follow_symlinks=True):
if not os.path.isdir(dst):
shutil.copyfile(src, dst)
return dst
try:
shutil.copyfile(os.path.join(__location__, "logo.jpg"), os.path.join(elements_folder, "logo.jpg"))
shutil.copyfile(os.path.join(__location__, "dashboard.css"), os.path.join(elements_folder, "dashboard.css"))
shutil.copyfile(os.path.join(__location__, "feather.min.js"), os.path.join(elements_folder, "feather.min.js"))
shutil.copyfile(os.path.join(__location__, "dark-mode.css"), os.path.join(elements_folder, "dark-mode.css"))
shutil.copyfile(os.path.join(__location__, "dark-mode-switch.js"),
os.path.join(elements_folder, "dark-mode-switch.js"))
shutil.copyfile(os.path.join(__location__, "chats.css"), os.path.join(elements_folder, "chats.css"))
shutil.copytree(os.path.join(__location__, "MDB-Free_4.13.0"), os.path.join(elements_folder, 'MDB-Free_4.13.0'),
copy_function=copy_no_perm)
except shutil.Error:
print("shutil reported an error. Maybe due to recursive directory copying.")
if os.path.exists(os.path.join(elements_folder, 'MDB-Free_4.13.0')):
print("_elements folder seems fine. Probably nothing to worry about")
def get_file_content(path):
f = open(path, 'r', encoding='utf8')
data = f.read()
f.close()
return data
def create_index_html(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path, nav_list_data):
'''Write out the index.html page to the report folder'''
content = '<br />'
content += """
<div class="card bg-white" style="padding: 20px;">
<h2 class="card-title">Case Information</h2>
""" # CARD start
case_list = [['Extraction location', image_input_path],
['Extraction type', extraction_type],
['Report directory', reportfolderbase],
['Processing time', f'{time_HMS} (Total {time_in_secs} seconds)']]
tab1_content = generate_key_val_table_without_headings('', case_list) + \
""" <p class="note note-primary mb-4">
All dates and times are in UTC unless noted otherwise!
</p>
"""
# Get script run log (this will be tab2)
devinfo_files_path = os.path.join(reportfolderbase, 'Script Logs', 'DeviceInfo.html')
tab2_content = get_file_content(devinfo_files_path)
# Get script run log (this will be tab3)
script_log_path = os.path.join(reportfolderbase, 'Script Logs', 'Screen Output.html')
tab3_content = get_file_content(script_log_path)
# Get processed files list (this will be tab3)
processed_files_path = os.path.join(reportfolderbase, 'Script Logs', 'ProcessedFilesLog.html')
tab4_content = get_file_content(processed_files_path)
content += tabs_code.format(tab1_content, tab2_content, tab3_content, tab4_content)
content += '</div>' # CARD end
authors_data = generate_authors_table_code(aleapp_contributors)
credits_code = credits_block.format(authors_data)
# WRITE INDEX.HTML LAST
filename = 'index.html'
page_title = 'iLEAPP Report'
body_heading = 'iOS Logs Events And Protobuf Parser'
body_description = 'iLEAPP is an open source project that aims to parse every known iOS artifact for the purpose of forensic analysis.'
active_nav_list_data = mark_item_active(nav_list_data, filename) + nav_bar_script
f = open(os.path.join(reportfolderbase, filename), 'w', encoding='utf8')
f.write(page_header.format(page_title))
f.write(body_start.format(f"iLEAPP {aleapp_version}"))
f.write(body_sidebar_setup + active_nav_list_data + body_sidebar_trailer)
f.write(body_main_header + body_main_data_title.format(body_heading, body_description))
f.write(content)
f.write(thank_you_note)
f.write(credits_code)
f.write(body_main_trailer + body_end + nav_bar_script_footer + page_footer)
f.close()
def generate_authors_table_code(aleapp_contributors):
authors_data = ''
for author_name, blog, tweet_handle, git in aleapp_contributors:
author_data = ''
if blog:
author_data += f'<a href="{blog}" target="_blank">{blog_icon}</a> \n'
else:
author_data += f'{blank_icon} \n'
if tweet_handle:
author_data += f'<a href="https://twitter.com/{tweet_handle}" target="_blank">{twitter_icon}</a> \n'
else:
author_data += f'{blank_icon} \n'
if git:
author_data += f'<a href="{git}" target="_blank">{github_icon}</a>\n'
else:
author_data += f'{blank_icon}'
authors_data += individual_contributor.format(author_name, author_data)
return authors_data
def generate_key_val_table_without_headings(title, data_list, html_escape=True, width="70%"):
'''Returns the html code for a key-value table (2 cols) without col names'''
code = ''
if title:
code += f'<h2>{title}</h2>'
table_header_code = \
"""
<div class="table-responsive">
<table class="table table-bordered table-hover table-sm" width={}>
<tbody>
"""
table_footer_code = \
"""
</tbody>
</table>
</div>
"""
code += table_header_code.format(width)
# Add the rows
if html_escape:
for row in data_list:
code += '<tr>' + ''.join(('<td>{}</td>'.format(html.escape(str(x))) for x in row)) + '</tr>'
else:
for row in data_list:
code += '<tr>' + ''.join(('<td>{}</td>'.format(str(x)) for x in row)) + '</tr>'
# Add footer
code += table_footer_code
return code
def insert_sidebar_code(data, sidebar_code, filename):
pos = data.find(body_sidebar_dynamic_data_placeholder)
if pos < 0:
logfunc(f'Error, could not find {body_sidebar_dynamic_data_placeholder} in file {filename}')
return data
else:
ret = data[0: pos] + sidebar_code + data[pos + len(body_sidebar_dynamic_data_placeholder):]
return ret
def mark_item_active(data, itemname):
'''Finds itemname in data, then marks that node as active. Return value is changed data'''
pos = data.find(f'" href="{itemname}"')
if pos < 0:
logfunc(f'Error, could not find {itemname} in {data}')
return data
else:
ret = data[0: pos] + " active" + data[pos:]
return ret
| 37.477528 | 139 | 0.578574 | [
"MIT"
] | theAtropos4n6/iLEAPP | scripts/report.py | 20,013 | Python |
import unittest
from realm.cli.application import Application
from realm.cli.commands.install import InstallCommand
from realm.cli.commands.ls import LsCommand
from realm.cli.commands.task import TaskCommand
from realm.entities import Config, RealmContext
from realm.utils.child_process import ChildProcess
from tests.common import get_tests_root_dir, captured_output
REPO_DIR = get_tests_root_dir().joinpath('scenarios/multiple_packages_with_tasks')
class TestCommands(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
# Create config once
cls.cfg = Config.from_file(realm_json_file=str(REPO_DIR.joinpath('realm.json')))
def setUp(self) -> None:
# Create context every test
self.ctx = RealmContext(config=self.cfg,
projects=Application.get_projects(self.cfg))
def test_scan(self):
found = len(self.ctx.projects)
self.assertEqual(found, 1)
def test_ls(self):
cmd = LsCommand(self.ctx)
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '[email protected]')
def test_task_install(self):
install_cmd = InstallCommand(self.ctx)
task_cmd = TaskCommand(self.ctx, task_name='test')
self.assertEqual(len(task_cmd.ctx.projects), 1)
with captured_output(stderr=False) as (out, _):
install_cmd.run()
task_cmd.run()
output = out.getvalue()
self.assertIn('Installing the current project: pkg', output)
self.assertIn('Poe => python -m unittest discover -s tests -v -p "test_*.py"', output)
def test_git_diff(self):
cmd = LsCommand(self.ctx, since='.')
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '')
def test_git_diff_with_change(self):
pkg_proj = [p for p in self.ctx.projects if p.name == 'pkg'][0]
try:
with pkg_proj.source_dir.joinpath('pyproject.toml').open('a') as f:
print('', file=f)
cmd = LsCommand(self.ctx, since='.')
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '[email protected]')
finally:
ChildProcess.run(f'git checkout {pkg_proj.source_dir}')
def test_scope_filter(self):
cmd = LsCommand(self.ctx, scope=['p*'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '[email protected]')
def test_ignore_filter(self):
cmd = LsCommand(self.ctx, ignore=['p*'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '')
def test_match_filter(self):
cmd = LsCommand(self.ctx, match=['labels.type=package'])
with captured_output() as (out, _):
cmd.run()
output = out.getvalue().strip()
self.assertEqual(output, '[email protected]')
| 33.5 | 94 | 0.617021 | [
"MIT"
] | orlevii/realm | tests/test_multiple_packages_with_tasks.py | 3,149 | Python |
#! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from sys import version_info
install_requires = []
if version_info[:2] <= (2, 5):
install_requires.append('simplejson >= 2.0.9')
setup(
name = 'avro',
version = '1.7.6',
packages = ['avro',],
package_dir = {'avro': 'src/avro'},
scripts = ["./scripts/avro"],
# Project uses simplejson, so ensure that it gets installed or upgraded
# on the target machine
install_requires = install_requires,
# metadata for upload to PyPI
author = 'Apache Avro',
author_email = '[email protected]',
description = 'Avro is a serialization and RPC framework.',
license = 'Apache License 2.0',
keywords = 'avro serialization rpc',
url = 'http://hadoop.apache.org/avro',
extras_require = {
'snappy': ['python-snappy'],
},
)
| 33.08 | 74 | 0.724305 | [
"Apache-2.0"
] | Albertsss/hue | desktop/core/ext-py/avro-1.7.6/setup.py | 1,654 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .core import * | 16.75 | 23 | 0.597015 | [
"MIT"
] | IlyaDanilenko/kiberdrom_core | kiberdrom_core/controller/__init__.py | 67 | Python |
from django import template
from django.utils.safestring import mark_safe
import markdown
from markdownx.utils import markdownify
from markdownx.settings import (
MARKDOWNX_MARKDOWN_EXTENSIONS,
MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS
)
from markdown.extensions import Extension
register = template.Library()
@register.filter
def markdown_to_html(text):
"""マークダウンをhtmlに変換する。"""
return mark_safe(markdownify(text))
class EscapeHtml(Extension):
def extendMarkdown(self, md):
md.preprocessors.deregister('html_block')
md.inlinePatterns.deregister('html')
@register.filter
def markdown_to_html_with_escape(text):
"""マークダウンをhtmlに変換する。
生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。
公開しているコメント欄等には、こちらを使ってください。
"""
extensions = MARKDOWNX_MARKDOWN_EXTENSIONS + [EscapeHtml()]
html = markdown.markdown(
text, extensions=extensions,
extension_configs=MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS
)
return mark_safe(html)
| 25.15 | 63 | 0.762425 | [
"MIT"
] | whitecat-22/blog_site | blog/templatetags/markdown_html.py | 1,180 | Python |
import numpy as np
from bayes_implicit_solvent.continuous_parameter_experiments.elemental_types_mh import log_prior, mols, ll, data_path, \
smiles
smiles_list = smiles
from bayes_implicit_solvent.typers import RADIUS_UNIT
from bayes_implicit_solvent.freesolv import smiles_list
from bayes_implicit_solvent.typers import AtomSpecificationProposal
np.random.seed(0)
from bayes_implicit_solvent.gb_models.obc2_parameters import mbondi_model
initial_tree = mbondi_model
initial_tree.remove_node('[#14]') # otherwise everything is -inf, because this type will be empty
initial_tree.proposal_sigmas['radius'] = 1e-2 * RADIUS_UNIT
initial_tree.proposal_sigmas['scale_factor'] = 1e-2
# add one more parameter per element appearing in FreeSolv but not specified in obc2 parameter set to initial tree
for i in [17, 35, 53]:
smirks = '[#{}]'.format(i)
initial_tree.add_child(smirks, '*')
initial_tree.un_delete_able_types.add(smirks)
specifiers = ['X1', 'X2', 'X3', 'X4', 'a', 'A', '-1', '+0', '+1', '+2']
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
smirks_elaboration_proposal = atom_specification_proposal
print('initial tree:')
print(initial_tree)
n_configuration_samples = 25
import os
name = 'tree_rjmc_n_config={}_{}_ll'.format(n_configuration_samples, ll)
smiles_subset_fname = os.path.join(data_path,
'smiles_subset_{}.txt'.format(name))
with open(smiles_subset_fname, 'w') as f:
f.writelines(['{}\n'.format(s) for s in smiles_list])
from bayes_implicit_solvent.prior_checking import check_no_empty_types
error_y_trees = []
def log_prob(tree):
log_prior_value = check_no_empty_types(tree)
theta = np.hstack([tree.get_radii(), tree.get_scale_factors()])
log_prior_value += log_prior(theta)
if log_prior_value > -np.inf:
try:
# TODO: Parallelize. Note that multiprocessing.Pool won't work here because it doesn't play nice with SwigPy objects
# TODO: update to allow scale factors to be variable also
log_likelihood_value = 0
for mol in mols:
radii = tree.assign_radii(mol.mol) / RADIUS_UNIT
scale_factors = tree.assign_scale_factors(mol.mol)
log_likelihood_value += mol.log_prob(radii, scale_factors)
except:
global error_y_trees
error_y_trees.append(tree)
print('Warning! Encountered un-anticipated exception!')
return - np.inf
return log_prior_value + log_likelihood_value
else:
return log_prior_value
from bayes_implicit_solvent.samplers import tree_rjmc
from pickle import dump
n_iterations = 10000
result = tree_rjmc(initial_tree, log_prob, smirks_elaboration_proposal, n_iterations=n_iterations,
fraction_cross_model_proposals=0.1)
with open('elaborate_tree_rjmc2_run_n_compounds={}_n_iter={}_gaussian_ll.pkl'.format(len(mols), n_iterations),
'wb') as f:
dump(result, f)
with open('error_y_trees.pkl', 'wb') as f:
dump(error_y_trees, f)
| 34.764045 | 128 | 0.723659 | [
"MIT"
] | openforcefield/bayes-implicit-solvent | bayes_implicit_solvent/rjmc_experiments/tree_rjmc2.py | 3,094 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from ray.rllib.agents import Agent, with_common_config
from ray.rllib.agents.ppo.ppo_policy_graph import PPOPolicyGraph
from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer
from ray.rllib.utils.annotations import override
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# GAE(lambda) parameter
"lambda": 1.0,
# Initial coefficient for KL divergence
"kl_coeff": 0.2,
# Size of batches collected from each worker
"sample_batch_size": 200,
# Number of timesteps collected for each SGD round
"train_batch_size": 4000,
# Total SGD batch size across all devices for SGD
"sgd_minibatch_size": 128,
# Number of SGD iterations in each outer loop
"num_sgd_iter": 30,
# Stepsize of SGD
"lr": 5e-5,
# Learning rate schedule
"lr_schedule": None,
# Share layers for value function
"vf_share_layers": False,
# Coefficient of the value function loss
"vf_loss_coeff": 1.0,
# Coefficient of the entropy regularizer
"entropy_coeff": 0.0,
# PPO clip parameter
"clip_param": 0.3,
# Clip param for the value function. Note that this is sensitive to the
# scale of the rewards. If your expected V is large, increase this.
"vf_clip_param": 10.0,
# If specified, clip the global norm of gradients by this amount
"grad_clip": None,
# Target value for KL divergence
"kl_target": 0.01,
# Whether to rollout "complete_episodes" or "truncate_episodes"
"batch_mode": "truncate_episodes",
# Which observation filter to apply to the observation
"observation_filter": "NoFilter",
# Uses the sync samples optimizer instead of the multi-gpu one. This does
# not support minibatches.
"simple_optimizer": False,
# (Deprecated) Use the sampling behavior as of 0.6, which launches extra
# sampling tasks for performance but can waste a large portion of samples.
"straggler_mitigation": False,
})
# __sphinx_doc_end__
# yapf: enable
class PPOAgent(Agent):
"""Multi-GPU optimized implementation of PPO in TensorFlow."""
_agent_name = "PPO"
_default_config = DEFAULT_CONFIG
_policy_graph = PPOPolicyGraph
@override(Agent)
def _init(self):
self._validate_config()
self.local_evaluator = self.make_local_evaluator(
self.env_creator, self._policy_graph)
self.remote_evaluators = self.make_remote_evaluators(
self.env_creator, self._policy_graph, self.config["num_workers"])
if self.config["simple_optimizer"]:
self.optimizer = SyncSamplesOptimizer(
self.local_evaluator, self.remote_evaluators, {
"num_sgd_iter": self.config["num_sgd_iter"],
"train_batch_size": self.config["train_batch_size"],
})
else:
self.optimizer = LocalMultiGPUOptimizer(
self.local_evaluator, self.remote_evaluators, {
"sgd_batch_size": self.config["sgd_minibatch_size"],
"num_sgd_iter": self.config["num_sgd_iter"],
"num_gpus": self.config["num_gpus"],
"sample_batch_size": self.config["sample_batch_size"],
"num_envs_per_worker": self.config["num_envs_per_worker"],
"train_batch_size": self.config["train_batch_size"],
"standardize_fields": ["advantages"],
"straggler_mitigation": (
self.config["straggler_mitigation"]),
})
@override(Agent)
def _train(self):
if "observation_filter" not in self.raw_user_config:
# TODO(ekl) remove this message after a few releases
logger.info(
"Important! Since 0.7.0, observation normalization is no "
"longer enabled by default. To enable running-mean "
"normalization, set 'observation_filter': 'MeanStdFilter'. "
"You can ignore this message if your environment doesn't "
"require observation normalization.")
prev_steps = self.optimizer.num_steps_sampled
fetches = self.optimizer.step()
if "kl" in fetches:
# single-agent
self.local_evaluator.for_policy(
lambda pi: pi.update_kl(fetches["kl"]))
else:
def update(pi, pi_id):
if pi_id in fetches:
pi.update_kl(fetches[pi_id]["kl"])
else:
logger.debug(
"No data for {}, not updating kl".format(pi_id))
# multi-agent
self.local_evaluator.foreach_trainable_policy(update)
res = self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"])
res.update(
timesteps_this_iter=self.optimizer.num_steps_sampled - prev_steps,
info=dict(fetches, **res.get("info", {})))
# Warn about bad clipping configs
if self.config["vf_clip_param"] <= 0:
rew_scale = float("inf")
elif res["policy_reward_mean"]:
rew_scale = 0 # punt on handling multiagent case
else:
rew_scale = round(
abs(res["episode_reward_mean"]) / self.config["vf_clip_param"],
0)
if rew_scale > 100:
logger.warning(
"The magnitude of your environment rewards are more than "
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
"This means that it will take more than "
"{} iterations for your value ".format(rew_scale) +
"function to converge. If this is not intended, consider "
"increasing `vf_clip_param`.")
return res
def _validate_config(self):
if self.config["sgd_minibatch_size"] > self.config["train_batch_size"]:
raise ValueError(
"Minibatch size {} must be <= train batch size {}.".format(
self.config["sgd_minibatch_size"],
self.config["train_batch_size"]))
if (self.config["batch_mode"] == "truncate_episodes"
and not self.config["use_gae"]):
raise ValueError(
"Episode truncation is not supported without a value "
"function. Consider setting batch_mode=complete_episodes.")
if (self.config["multiagent"]["policy_graphs"]
and not self.config["simple_optimizer"]):
logger.info(
"In multi-agent mode, policies will be optimized sequentially "
"by the multi-GPU optimizer. Consider setting "
"simple_optimizer=True if this doesn't work for you.")
if not self.config["vf_share_layers"]:
logger.warning(
"FYI: By default, the value function will not share layers "
"with the policy model ('vf_share_layers': False).")
| 42.410405 | 79 | 0.622189 | [
"Apache-2.0"
] | FieldMrFive/ray | python/ray/rllib/agents/ppo/ppo.py | 7,337 | Python |
import re
from itertools import chain
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils.six.moves import zip
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
self.subquery = False
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.tables[0]):
pk = expr
break
if pk:
# MySQLism: Columns in HAVING clause must be added to the GROUP BY.
expressions = [pk] + [expr for expr in expressions if expr in having]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
pks = {expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
ret.append((col, self.compile(col, select_format=True), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for pos, field in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format and not self.subquery:
return node.output_field.select_format(self, sql, params)
return sql, params
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented.")
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
# If we've been asked for a NOWAIT query but the backend does
# not support it, raise a DatabaseError otherwise we could get
# an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
nested_sql = obj.get_compiler(connection=self.connection).as_sql(subquery=True)
if nested_sql == ('', ()):
raise EmptyResultSet
return nested_sql
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or (cur_depth == 1 and f.name in requested):
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (convs, expression) in converters.items():
value = row[pos]
for converter in convs:
value = converter(value, expression, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
converters = None
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Checks for raw values,
expressions and fields with get_placeholder() defined in that order.
When field is None, the value is considered raw and is used as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values,
generate placeholder SQL and parameters for each field and value, and
return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = param_rows[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len([t for t in self.query.tables if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
# Empty SQL for the inner query is a marker that the inner query
# isn't going to produce any results. This can happen when doing
# LIMIT 0 queries (generated by qs[:0]) for example.
if not self.query.subquery:
raise EmptyResultSet
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=True)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
| 43.823339 | 106 | 0.585414 | [
"BSD-3-Clause"
] | hottwaj/django | django/db/models/sql/compiler.py | 54,078 | Python |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
# 데이터 준비
x = np.arange(0, 6, 0.1) # 0에서 6까지 0.1 간격으로 생성
y1 = np.sin(x)
y2 = np.cos(x)
# 그래프 그리기
plt.plot(x, y1, label='sin')
plt.plot(x, y2, linestyle='--', label='cos') # cos 함수는 점선으로 그리기
plt.xlabel('x') # x축 이름
plt.ylabel('y') # y축 이름
plt.title('sin & cos') # 제목
plt.legend()
# 이미지 표시
img = imread('background.jpg')
plt.imshow(img)
plt.show() | 20.904762 | 64 | 0.646925 | [
"MIT"
] | Tim232/Python-Things | Books/DeepLearningfromScratch/P01_HelloPython/numpy_pyplot.py | 527 | Python |
import torch
import torch.nn.functional as F
import torch_glow
from collections import namedtuple
from tests.utils import jitVsGlow
# Basic test of the PyTorch conv2d Node on Glow.
def test_conv2d_basic():
def conv2d_basic(inputs, filters):
conv = F.conv2d(inputs, filters, padding=1)
return F.relu(conv)
inputs = torch.randn(1, 4, 5, 5)
filters = torch.randn(8, 4, 3, 3)
jitVsGlow(conv2d_basic, inputs, filters)
# Test of the PyTorch conv2d Node with a provided bias tensor.
def test_conv2d_with_bias():
def conv2d_with_bias(inputs, filters, bias):
conv = F.conv2d(inputs, filters, bias)
return F.relu(conv)
inputs = torch.randn(1, 4, 5, 5)
filters = torch.randn(8, 4, 3, 3)
bias = torch.randn(8)
jitVsGlow(conv2d_with_bias, inputs, filters, bias)
# Test of the PyTorch conv2d Node sweeping through various parameters of the
# Node to test that they work correctly.
def test_conv2d_param_sweep():
hwOpts = [3, 4]
padOpts = [0, 1]
groupsOpts = [1, 2]
dilationOpts = [1, 2]
strideOpts = [1, 2]
Setting = namedtuple('Setting', ['h', 'w', 'p', 'g', 'd', 's',])
settings = [Setting(h=h, w=w, p=p, g=g, d=d, s=s) for h in hwOpts for w in hwOpts for p in padOpts for g in groupsOpts for d in dilationOpts for s in strideOpts]
for setting in settings:
def conv2d_param_sweep(inputs, filters):
conv = F.conv2d(inputs, filters, padding=setting.p, groups=setting.g)
return F.relu(conv)
inputs = torch.randn(2, 4, setting.h, setting.w)
filters = torch.randn(8, 4/setting.g, 3, 3)
jitVsGlow(conv2d_param_sweep, inputs, filters)
| 29.909091 | 163 | 0.677204 | [
"Apache-2.0"
] | a1f/glow | torch_glow/tests/nodes/conv2d_test.py | 1,645 | Python |
import argparse
import sys
import pathlib
import random
from unittest import mock
import pytest
from _repobee import plugin
import repobee_plug as plug
from repobee_feedback import feedback
ASSIGNMENT_NAMES = ("task-1", "task-2")
STUDENT_TEAMS = tuple(
[
plug.StudentTeam(members=members)
for members in (["slarse"], ["glassey"], ["grundb", "glennol"])
]
)
STUDENT_TEAM_NAMES = tuple(map(str, STUDENT_TEAMS))
PASS_ISSUE = plug.Issue(title="Pass", body="Well done!\nAbsolutely flawless!")
KOMP_ISSUE = plug.Issue(
title="Komplettering", body="Not perfect, you need to fix this."
)
FAIL_ISSUE = plug.Issue(
title="Fail", body="Unfortunately, there are severe errors."
)
ISSUES = (PASS_ISSUE, KOMP_ISSUE, FAIL_ISSUE)
random.seed(512)
def _write_issue(issue: plug.Issue, path: pathlib.Path):
text = "{}\n{}".format(issue.title, issue.body)
path.write_text(text, encoding=sys.getdefaultencoding())
def _write_multi_issues_file(repos_and_issues, path):
with open(str(path), mode="w", encoding=sys.getdefaultencoding()) as file:
cur = 0
for repo_name, issue in repos_and_issues:
if cur:
file.write("\n")
file.write("#ISSUE#{}#{}\n".format(repo_name, issue.title))
file.write(issue.body)
cur += 1
def test_register():
"""Just test that there is no crash"""
plugin.register_plugins([feedback])
@pytest.fixture
def parsed_args_issues_dir(tmp_path):
return argparse.Namespace(
students=list(STUDENT_TEAMS),
assignments=list(ASSIGNMENT_NAMES),
batch_mode=True,
issues_dir=str(tmp_path),
multi_issues_file=None,
truncation_length=50,
allow_missing=False,
)
@pytest.fixture
def parsed_args_multi_issues_file(with_multi_issues_file):
issues_file, _ = with_multi_issues_file
return argparse.Namespace(
students=list(STUDENT_TEAMS),
assignments=list(ASSIGNMENT_NAMES),
batch_mode=True,
issues_dir=None,
multi_issues_file=str(issues_file),
truncation_length=50,
allow_missing=False,
)
@pytest.fixture
def api_mock():
return mock.MagicMock(spec=plug.PlatformAPI)
@pytest.fixture
def with_issues(tmp_path):
"""Create issue files in a temporary directory and return a list of (team,
issue) tuples.
"""
repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES)
existing_issues = []
for repo_name in repo_names:
issue_file = tmp_path / "{}.md".format(repo_name)
issue = random.choice(ISSUES)
_write_issue(issue, issue_file)
existing_issues.append((repo_name, issue))
return existing_issues
@pytest.fixture
def with_multi_issues_file(tmp_path):
"""Create the multi issues file."""
repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES)
repos_and_issues = [
(repo_name, random.choice(ISSUES)) for repo_name in repo_names
]
issues_file = tmp_path / "issues.md"
_write_multi_issues_file(repos_and_issues, issues_file)
return issues_file, repos_and_issues
class TestCallback:
"""Tests for the primary callback."""
def test_opens_issues_from_issues_dir(
self, with_issues, parsed_args_issues_dir, api_mock
):
"""Test that the callback calls the API.open_issue for the expected
repos and issues, when the issues all exist and are well formed.
"""
expected_calls = [
mock.call(issue.title, issue.body, mock.ANY)
for repo_name, issue in with_issues
]
feedback.callback(args=parsed_args_issues_dir, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
def test_aborts_if_issue_is_missing(
self, with_issues, parsed_args_issues_dir, api_mock, tmp_path
):
"""Test that the callback exits with a plug.PlugError if any of the
expected issues is not found.
"""
repo_without_issue = plug.generate_repo_name(
STUDENT_TEAM_NAMES[-1], ASSIGNMENT_NAMES[0]
)
missing_file = tmp_path / "{}.md".format(repo_without_issue)
missing_file.unlink()
with pytest.raises(plug.PlugError) as exc_info:
feedback.callback(args=parsed_args_issues_dir, api=api_mock)
assert repo_without_issue in str(exc_info.value)
assert not api_mock.create_issue.called
def test_ignores_missing_issue_if_allow_missing(
self, with_issues, parsed_args_issues_dir, api_mock, tmp_path
):
"""Test that missing issues are ignored if --allow-mising is set."""
repo_without_issue = plug.generate_repo_name(
STUDENT_TEAM_NAMES[-1], ASSIGNMENT_NAMES[0]
)
(tmp_path / "{}.md".format(repo_without_issue)).unlink()
expected_calls = [
mock.call(issue.title, issue.body, mock.ANY)
for repo_name, issue in with_issues
if repo_name != repo_without_issue
]
args_dict = vars(parsed_args_issues_dir)
args_dict["allow_missing"] = True
args = argparse.Namespace(**args_dict)
feedback.callback(args=args, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
def test_opens_nothing_if_open_prompt_returns_false(
self, with_issues, parsed_args_issues_dir, api_mock
):
"""Test that the callback does not attempt to open any issues if the
'may I open' prompt returns false.
"""
args_dict = vars(parsed_args_issues_dir)
args_dict["batch_mode"] = False
parsed_args_interactive = argparse.Namespace(**args_dict)
with mock.patch("builtins.input", return_value="n", autospec=True):
feedback.callback(args=parsed_args_interactive, api=api_mock)
assert not api_mock.create_issue.called
def test_opens_issues_from_multi_issues_file(
self, with_multi_issues_file, api_mock, parsed_args_multi_issues_file
):
"""Test that the callback opens issues correctly when they are all
contained in a multi issues file.
"""
issues_file, repos_and_issues = with_multi_issues_file
expected_calls = [
mock.call(issue.title, issue.body, mock.ANY)
for repo_name, issue in repos_and_issues
]
feedback.callback(args=parsed_args_multi_issues_file, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls)
def test_skips_unexpected_issues_in_multi_issues_file(
self, with_multi_issues_file, parsed_args_multi_issues_file, api_mock
):
"""Test that an exception is raised if one or more issues are found
relating to student repos that ar not in prod(assignments, students).
"""
student_teams = parsed_args_multi_issues_file.students
args_dict = vars(parsed_args_multi_issues_file)
args_dict["students"] = student_teams[:-1]
args = argparse.Namespace(**args_dict)
unexpected_repos = plug.generate_repo_names(
student_teams[-1:], ASSIGNMENT_NAMES
)
_, repos_and_issues = with_multi_issues_file
expected_calls = [
mock.call(issue.title, issue.body, mock.ANY)
for repo_name, issue in repos_and_issues
if repo_name not in unexpected_repos
]
feedback.callback(args=args, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
| 33.548673 | 79 | 0.684648 | [
"MIT"
] | slarse/repobee-feedback | tests/test_feedback.py | 7,582 | Python |
import tkinter
key= ""
def key_down(e):
global key
key = e.keysym
def key_up(e):
global key
key = ""
mx, my = (1, 1)
def main_proc():
global mx, my
if key == "Up" and maze[my-1][mx] == 0:
my -= 1
if key == "Down" and maze[my+1][mx] == 0:
my += 1
if key == "Left" and maze[my][mx-1] == 0:
mx -= 1
if key == "Right" and maze[my][mx+1] == 0:
mx += 1
canvas.coords("MYCHR", mx*80 + 40, my*80 +40)
root.after(300, main_proc)
root = tkinter.Tk()
root.title("미로 안 이동하기")
root.bind("<KeyPress>", key_down)
root.bind("<KeyRelease>", key_up)
canvas = tkinter.Canvas(width=800, height=560, bg='white')
canvas.pack()
maze = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 1, 0, 0, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
for y in range(7):
for x in range(10):
if maze[y][x] == 1:
canvas.create_rectangle(x * 80, y * 80, x * 80 + 79, y * 80 + 79, fill='skyblue', width=0)
img = tkinter.PhotoImage(file='image/mimi_s.png')
canvas.create_image(mx * 80 + 40, my * 80 + 40, image=img, tag='MYCHR')
main_proc()
root.mainloop()
| 22.839286 | 102 | 0.503518 | [
"MIT"
] | rrbb014/rrbb-playground | python/game/gui/maze.py | 1,293 | Python |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A recursive minimal eigen optimizer in Qiskit's optimization module."""
from copy import deepcopy
from enum import Enum
from typing import Optional, Union, List, Tuple, Dict
import logging
import numpy as np
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.utils.validation import validate_min
from .optimization_algorithm import OptimizationAlgorithm, OptimizationResult
from .minimum_eigen_optimizer import MinimumEigenOptimizer, MinimumEigenOptimizationResult
from ..converters.quadratic_program_to_qubo import QuadraticProgramToQubo
from ..exceptions import QiskitOptimizationError
from ..problems import Variable
from ..problems.quadratic_program import QuadraticProgram
logger = logging.getLogger(__name__)
class IntermediateResult(Enum):
"""
Defines whether the intermediate results of
:class:`~qiskit.optimization.algorithms.RecursiveMinimumEigenOptimizer`
at each iteration should be stored and returned to the end user.
"""
NO_ITERATIONS = 0
"""No intermediate results are stored."""
LAST_ITERATION = 1
"""Only results from the last iteration are stored."""
ALL_ITERATIONS = 2
"""All intermediate results are stored."""
class RecursiveMinimumEigenOptimizationResult(OptimizationResult):
"""Recursive Eigen Optimizer Result."""
def __init__(self, x: Union[List[float], np.ndarray], fval: float,
variables: List[Variable],
replacements: Dict[str, Tuple[str, int]],
history: Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]) -> None:
"""
Constructs an instance of the result class.
Args:
x: the optimal value found in the optimization.
fval: the optimal function value.
variables: the list of variables of the optimization problem.
replacements: a dictionary of substituted variables. Key is a variable being
substituted, value is a tuple of substituting variable and a weight, either 1 or -1.
history: a tuple containing intermediate results. The first element is a list of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by
invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively,
the second element is an instance of
:class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step
via `min_num_vars_optimizer`.
"""
super().__init__(x, fval, variables, None)
self._replacements = replacements
self._history = history
@property
def replacements(self) -> Dict[str, Tuple[str, int]]:
"""
Returns a dictionary of substituted variables. Key is a variable being substituted, value
is a tuple of substituting variable and a weight, either 1 or -1."""
return self._replacements
@property
def history(self) -> Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]:
"""
Returns intermediate results. The first element is a list of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second
element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult`
obtained at the last step via `min_num_vars_optimizer`.
"""
return self._history
class RecursiveMinimumEigenOptimizer(OptimizationAlgorithm):
"""A meta-algorithm that applies a recursive optimization.
The recursive minimum eigen optimizer applies a recursive optimization on top of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer`.
The algorithm is introduced in [1].
Examples:
Outline of how to use this class:
.. code-block::
from qiskit.aqua.algorithms import QAOA
from qiskit.optimization.problems import QuadraticProgram
from qiskit.optimization.algorithms import RecursiveMinimumEigenOptimizer
problem = QuadraticProgram()
# specify problem here
# specify minimum eigen solver to be used, e.g., QAOA
qaoa = QAOA(...)
optimizer = RecursiveMinimumEigenOptimizer(qaoa)
result = optimizer.solve(problem)
References:
[1]: Bravyi et al. (2019), Obstacles to State Preparation and Variational Optimization
from Symmetry Protection. http://arxiv.org/abs/1910.08980.
"""
def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int = 1,
min_num_vars_optimizer: Optional[OptimizationAlgorithm] = None,
penalty: Optional[float] = None,
history: Optional[IntermediateResult] = IntermediateResult.LAST_ITERATION) -> None:
""" Initializes the recursive minimum eigen optimizer.
This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to
to apply the iterative scheme, and the optimizer to be applied once the threshold number of
variables is reached.
Args:
min_eigen_optimizer: The eigen optimizer to use in every iteration.
min_num_vars: The minimum number of variables to apply the recursive scheme. If this
threshold is reached, the min_num_vars_optimizer is used.
min_num_vars_optimizer: This optimizer is used after the recursive scheme for the
problem with the remaining variables.
penalty: The factor that is used to scale the penalty terms corresponding to linear
equality constraints.
history: Whether the intermediate results are stored.
Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`.
Raises:
QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1).
"""
validate_min('min_num_vars', min_num_vars, 1)
self._min_eigen_optimizer = min_eigen_optimizer
self._min_num_vars = min_num_vars
if min_num_vars_optimizer:
self._min_num_vars_optimizer = min_num_vars_optimizer
else:
self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
self._penalty = penalty
self._history = history
self._qubo_converter = QuadraticProgramToQubo()
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
"""Checks whether a given problem can be solved with this optimizer.
Checks whether the given problem is compatible, i.e., whether the problem can be converted
to a QUBO, and otherwise, returns a message explaining the incompatibility.
Args:
problem: The optimization problem to check compatibility.
Returns:
A message describing the incompatibility.
"""
return QuadraticProgramToQubo.get_compatibility_msg(problem)
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
"""Tries to solve the given problem using the recursive optimizer.
Runs the optimizer to try to solve the optimization problem.
Args:
problem: The problem to be solved.
Returns:
The result of the optimizer applied to the problem.
Raises:
QiskitOptimizationError: Incompatible problem.
QiskitOptimizationError: Infeasible due to variable substitution
"""
self._verify_compatibility(problem)
# convert problem to QUBO, this implicitly checks if the problem is compatible
problem_ = self._qubo_converter.convert(problem)
problem_ref = deepcopy(problem_)
# run recursive optimization until the resulting problem is small enough
replacements = {} # type: Dict[str, Tuple[str, int]]
min_eigen_results = [] # type: List[MinimumEigenOptimizationResult]
while problem_.get_num_vars() > self._min_num_vars:
# solve current problem with optimizer
res = self._min_eigen_optimizer.solve(problem_) # type: MinimumEigenOptimizationResult
if self._history == IntermediateResult.ALL_ITERATIONS:
min_eigen_results.append(res)
# analyze results to get strongest correlation
correlations = res.get_correlations()
i, j = self._find_strongest_correlation(correlations)
x_i = problem_.variables[i].name
x_j = problem_.variables[j].name
if correlations[i, j] > 0:
# set x_i = x_j
problem_ = problem_.substitute_variables(variables={i: (j, 1)})
if problem_.status == QuadraticProgram.Status.INFEASIBLE:
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, 1)
else:
# set x_i = 1 - x_j, this is done in two steps:
# 1. set x_i = 1 + x_i
# 2. set x_i = -x_j
# 1a. get additional offset
constant = problem_.objective.constant
constant += problem_.objective.linear[i]
constant += problem_.objective.quadratic[i, i]
problem_.objective.constant = constant
# 1b. get additional linear part
for k in range(problem_.get_num_vars()):
coeff = problem_.objective.linear[k]
if k == i:
coeff += 2*problem_.objective.quadratic[i, k]
else:
coeff += problem_.objective.quadratic[i, k]
# set new coefficient if not too small
if np.abs(coeff) > 1e-10:
problem_.objective.linear[k] = coeff
else:
problem_.objective.linear[k] = 0
# 2. replace x_i by -x_j
problem_ = problem_.substitute_variables(variables={i: (j, -1)})
if problem_.status == QuadraticProgram.Status.INFEASIBLE:
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, -1)
# solve remaining problem
result = self._min_num_vars_optimizer.solve(problem_)
# unroll replacements
var_values = {}
for i, x in enumerate(problem_.variables):
var_values[x.name] = result.x[i]
def find_value(x, replacements, var_values):
if x in var_values:
# if value for variable is known, return it
return var_values[x]
elif x in replacements:
# get replacement for variable
(y, sgn) = replacements[x]
# find details for replacing variable
value = find_value(y, replacements, var_values)
# construct, set, and return new value
var_values[x] = value if sgn == 1 else 1 - value
return var_values[x]
else:
raise QiskitOptimizationError('Invalid values!')
# loop over all variables to set their values
for x_i in problem_ref.variables:
if x_i.name not in var_values:
find_value(x_i.name, replacements, var_values)
# build history before any translations are applied
# min_eigen_results is an empty list if history is set to NO or LAST.
history = (min_eigen_results,
None if self._history == IntermediateResult.NO_ITERATIONS else result)
# construct result
x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables]
fval = result.fval
result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables)
result = self._qubo_converter.interpret(result)
return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval,
variables=result.variables,
replacements=replacements,
history=history)
def _find_strongest_correlation(self, correlations):
# get absolute values and set diagonal to -1 to make sure maximum is always on off-diagonal
abs_correlations = np.abs(correlations)
for i in range(len(correlations)):
abs_correlations[i, i] = -1
# get index of maximum (by construction on off-diagonal)
m_max = np.argmax(abs_correlations.flatten())
# translate back to indices
i = int(m_max // len(correlations))
j = int(m_max - i*len(correlations))
return (i, j)
| 43.584416 | 100 | 0.65003 | [
"Apache-2.0"
] | Cristian-Malinescu/qiskit-aqua | qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py | 13,424 | Python |
import mechanize
from DictUtils import listToDict
from Scraper import Scraper
import pprint
prettyPrinter = pprint.PrettyPrinter(indent=4, width=50)
class HeadlessScraper():
def __init__(self, username):
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.baseUrl = "https://www.instagram.com"
self.username = username
self.userLink = Scraper.getUserLink(username)
def scrapeUser(self, userLink=None):
if userLink is None:
userLink = self.userLink
response = self.browser.open(userLink)
text = response.read()
allUserLinks = {}
for link in self.browser.links(url_regex='/p/'):
self.browser.follow_link(link)
userLinks = [link for link in self.browser.links()
if HeadlessScraper.isUserLink(link) and
self.isNotCurrentUserLink(link, userLink)
]
userLinksDict = listToDict(lambda userLink: Scraper.getUserLink(userLink.text), userLinks)
allUserLinks.update(userLinksDict)
return allUserLinks
def isNotCurrentUserLink(self, link, userLink):
return link.url.strip() not in userLink.strip()
@classmethod
def extractAdressAndTitle(cls, link):
address = link.url
titles = [(key, value) for key, value in link.attrs if key.lower() == 'title']
if len(titles) == 0:
return None, None
_, title = titles.pop()
return address, title
@classmethod
def isUserLink(cls, link):
address, title = cls.extractAdressAndTitle(link)
return address is not None and\
title is not None and\
Scraper.isUserLink(address, title)
prettyPrinter.pprint(HeadlessScraper('pipapo').scrapeUser())
| 30.95 | 102 | 0.62951 | [
"MIT"
] | ransomwarezz/instagram-scraper | core/HeadlessScraper.py | 1,857 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Base class for a stock configuration.
@author: rajsaswa
"""
class StockConfig:
def __init__(self):
pass
def get_stock_url(self):
pass
| 14.666667 | 37 | 0.590909 | [
"MIT"
] | saswatraj/stock_notifier | stock_notifier/stock_config/stock_config.py | 220 | Python |
import warnings
import pandas as pd
from ..config import Config
from ..backend import Backend
from ..backend import PYOORB
from ..backend import FINDORB
from ..backend import MJOLNIR
__all__ = [
"generateEphemeris"
]
def generateEphemeris(
orbits,
observers,
backend="MJOLNIR",
backend_kwargs={},
test_orbit=None,
threads=Config.NUM_THREADS,
chunk_size=1
):
"""
Generate ephemeris for the orbits and the given observatories.
Parameters
----------
orbits : `~numpy.ndarray` (N, 6)
Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
observers : dict
A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values.
Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state.
The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz.
If no velocities are not correctly given, then sky-plane velocities will all be zero.
(See: `~thor.observatories.getObserverState`)
backend : {'MJOLNIR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
Returns
-------
ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18)
A DataFrame containing the generated ephemeris.
"""
if backend == "MJOLNIR":
backend = MJOLNIR(**backend_kwargs)
elif backend == "PYOORB":
backend = PYOORB(**backend_kwargs)
elif backend == "FINDORB":
backend = FINDORB(**backend_kwargs)
elif isinstance(backend, Backend):
backend = backend
if len(backend_kwargs) > 0:
warnings.warn("backend_kwargs will be ignored since a instantiated backend class has been given.")
else:
err = (
"backend should be one of 'MJOLNIR', 'PYOORB', 'FINDORB' or an instantiated Backend class"
)
raise ValueError(err)
ephemeris = backend.generateEphemeris(
orbits,
observers,
test_orbit=test_orbit,
threads=threads,
chunk_size=chunk_size
)
ephemeris.sort_values(
by=["orbit_id", "observatory_code", "mjd_utc"],
inplace=True
)
ephemeris.reset_index(
inplace=True,
drop=True
)
return ephemeris
| 31.103448 | 123 | 0.645233 | [
"BSD-3-Clause"
] | B612-Asteroid-Institute/thor | thor/orbits/ephemeris.py | 2,706 | Python |
import pytest
from tickit.devices.eiger.eiger_status import EigerStatus
# # # # # EigerStatus Tests # # # # #
@pytest.fixture
def eiger_status() -> EigerStatus:
return EigerStatus()
def test_eiger_status_constructor():
EigerStatus()
def test_eiger_status_getitem(eiger_status):
assert 24.5 == eiger_status["th0_temp"]
| 17.789474 | 57 | 0.730769 | [
"Apache-2.0"
] | dls-controls/tickit | tests/devices/eiger/test_eiger_status.py | 338 | Python |
from DockerBuildSystem import DockerComposeTools, YamlTools, TerminalTools
from SwarmManagement import SwarmTools
from DockerBuildManagement import BuildTools
import sys
import os
BUILD_KEY = 'build'
SAVE_IMAGES_KEY = 'saveImages'
def GetInfoMsg():
infoMsg = "Build selections is configured by adding a 'build' property to the .yaml file.\r\n"
infoMsg += "The 'build' property is a dictionary of build selections.\r\n"
infoMsg += "Add '-build' to the arguments to build all selections in sequence, \r\n"
infoMsg += "or add specific selection names to build those only.\r\n"
infoMsg += "Example: 'dbm -build myBuildSelection'.\r\n"
return infoMsg
def GetBuildSelections(arguments):
yamlData = SwarmTools.LoadYamlDataFromFiles(
arguments, BuildTools.DEFAULT_BUILD_MANAGEMENT_YAML_FILES)
buildProperty = YamlTools.GetProperties(BUILD_KEY, yamlData)
if BuildTools.SELECTIONS_KEY in buildProperty:
return buildProperty[BuildTools.SELECTIONS_KEY]
return {}
def BuildSelections(selectionsToBuild, buildSelections):
if len(selectionsToBuild) == 0:
for buildSelection in buildSelections:
BuildSelection(buildSelections[buildSelection], buildSelection)
else:
for selectionToBuild in selectionsToBuild:
if selectionToBuild in buildSelections:
BuildSelection(buildSelections[selectionToBuild], selectionToBuild)
def BuildSelection(buildSelection, selectionToBuild):
cwd = BuildTools.TryChangeToDirectoryAndGetCwd(buildSelection)
oldEnvironmentVariable = BuildTools.AddEnvironmentVariablesFromSelection(buildSelection)
BuildTools.HandleTerminalCommandsSelection(buildSelection)
TerminalTools.LoadDefaultEnvironmentVariablesFile()
if BuildTools.FILES_KEY in buildSelection:
composeFiles = buildSelection[BuildTools.FILES_KEY]
buildComposeFile = BuildTools.GetAvailableComposeFilename('build', selectionToBuild)
DockerComposeTools.MergeComposeFiles(composeFiles, buildComposeFile)
try:
DockerComposeTools.DockerComposeBuild([buildComposeFile])
except:
BuildTools.RemoveComposeFileIfNotPreserved(buildComposeFile, buildSelection)
raise
if BuildTools.ADDITIONAL_TAG_KEY in buildSelection:
DockerComposeTools.TagImages(buildComposeFile, buildSelection[BuildTools.ADDITIONAL_TAG_KEY])
if BuildTools.ADDITIONAL_TAGS_KEY in buildSelection:
for tag in buildSelection[BuildTools.ADDITIONAL_TAGS_KEY]:
DockerComposeTools.TagImages(buildComposeFile, tag)
if SAVE_IMAGES_KEY in buildSelection:
outputFolder = buildSelection[SAVE_IMAGES_KEY]
DockerComposeTools.SaveImages(buildComposeFile, outputFolder)
if BuildTools.COMPOSE_FILE_WITH_DIGESTS_KEY in buildSelection:
composeFileWithDigests = buildSelection[BuildTools.COMPOSE_FILE_WITH_DIGESTS_KEY]
BuildTools.GenerateComposeFileWithDigests(composeFiles, composeFileWithDigests)
BuildTools.RemoveComposeFileIfNotPreserved(buildComposeFile, buildSelection)
BuildTools.RemoveEnvironmentVariables(oldEnvironmentVariable)
os.chdir(cwd)
def HandleBuildSelections(arguments):
if len(arguments) == 0:
return
if not('-build' in arguments):
return
if '-help' in arguments:
print(GetInfoMsg())
return
selectionsToBuild = SwarmTools.GetArgumentValues(arguments, '-build')
selectionsToBuild += SwarmTools.GetArgumentValues(arguments, '-b')
buildSelections = GetBuildSelections(arguments)
BuildSelections(selectionsToBuild, buildSelections)
if __name__ == "__main__":
arguments = sys.argv[1:]
HandleBuildSelections(arguments)
| 39.768421 | 105 | 0.754367 | [
"MIT"
] | DIPSAS/DockerBuildManagement | DockerBuildManagement/BuildSelections.py | 3,778 | Python |
from typing import Dict, List, Any
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
class S3BucketObjectLock(BaseResourceCheck):
def __init__(self) -> None:
name = "Ensure that S3 bucket has lock configuration enabled by default"
id = "CKV_AWS_143"
supported_resources = ["aws_s3_bucket"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
lock_conf = conf.get("object_lock_configuration")
if lock_conf and lock_conf[0]:
lock_enabled = lock_conf[0].get("object_lock_enabled")
if lock_enabled in ["Enabled", ["Enabled"]]:
return CheckResult.PASSED
return CheckResult.FAILED
return CheckResult.UNKNOWN
def get_evaluated_keys(self) -> List[str]:
return ["object_lock_configuration/[0]/object_lock_enabled"]
check = S3BucketObjectLock()
| 38.533333 | 106 | 0.712803 | [
"Apache-2.0"
] | 0xflotus/checkov | checkov/terraform/checks/resource/aws/S3BucketObjectLock.py | 1,156 | Python |
#! -*- coding:utf-8 -*-
# 评估脚本
# 数据集:IFLYTEK' 长文本分类 (https://github.com/CLUEbenchmark/CLUE)
import json
import numpy as np
from bert4keras.backend import keras, set_gelu
from bert4keras.tokenizers import Tokenizer
from bert4keras.models import build_transformer_model
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, DataGenerator
from bert4keras.snippets import open
from keras.layers import *
set_gelu('tanh') # 切换tanh版本
num_classes = 119
maxlen = 128
batch_size = 32
# RoBERTa small
config_path = '/root/kg/bert/chinese_roberta_L-6_H-384_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_roberta_L-6_H-384_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_roberta_L-6_H-384_A-12/vocab.txt'
model_type = 'bert'
"""
# albert small
config_path = '/root/kg/bert/albert_small_zh_google/albert_config.json'
checkpoint_path = '/root/kg/bert/albert_small_zh_google/albert_model.ckpt'
dict_path = '/root/kg/bert/albert_small_zh_google/vocab.txt'
model_type = 'albert'
# RoBERTa tiny
config_path = '/root/kg/bert/chinese_roberta_L-4_H-312_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_roberta_L-4_H-312_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_roberta_L-4_H-312_A-12/vocab.txt'
model_type = 'bert'
# albert tiny
config_path = '/root/kg/bert/albert_tiny_zh_google/albert_config.json'
checkpoint_path = '/root/kg/bert/albert_tiny_zh_google/albert_model.ckpt'
dict_path = '/root/kg/bert/albert_tiny_zh_google/vocab.txt'
model_type = 'albert'
"""
def load_data(filename):
D = []
with open(filename) as f:
for i, l in enumerate(f):
l = json.loads(l)
text, label = l['sentence'], l['label']
D.append((text, int(label)))
return D
# 加载数据集
train_data = load_data('/root/CLUE-master/baselines/CLUEdataset/iflytek/train.json')
valid_data = load_data('/root/CLUE-master/baselines/CLUEdataset/iflytek/dev.json')
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, (text, label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(text, max_length=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
# 加载预训练模型
bert = build_transformer_model(
config_path=config_path,
checkpoint_path=checkpoint_path,
model=model_type,
return_keras_model=False,
)
output = Lambda(lambda x: x[:, 0])(bert.model.output)
output = Dense(units=num_classes,
activation='softmax',
kernel_initializer=bert.initializer)(output)
model = keras.models.Model(bert.model.input, output)
model.summary()
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=Adam(5e-5),
metrics=['accuracy'],
)
# 转换数据集
train_generator = data_generator(train_data, batch_size)
valid_generator = data_generator(valid_data, batch_size)
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
y_true = y_true[:, 0]
total += len(y_true)
right += (y_true == y_pred).sum()
return right / total
class Evaluator(keras.callbacks.Callback):
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, epoch, logs=None):
val_acc = evaluate(valid_generator)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
model.save_weights('best_model.weights')
print(u'val_acc: %.5f, best_val_acc: %.5f\n' % (val_acc, self.best_val_acc))
evaluator = Evaluator()
model.fit_generator(train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=50,
callbacks=[evaluator])
| 31.719424 | 84 | 0.701066 | [
"Apache-2.0"
] | CurisZhou/pretrained-models | examples/task_iflytek.py | 4,497 | Python |
# Athena Health Preliminary Test - II
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'moves' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def moves(arr):
ee=e=0
for i in range(0,len(arr)):
if(arr[i]%2==0): e+=1
for i in range(0,len(arr)):
if(arr[i]%2==0):
if(i>=e): ee+=1
return ee
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = []
for _ in range(arr_count):
arr_item = int(input().strip())
arr.append(arr_item)
result = moves(arr)
fptr.write(str(result) + '\n')
fptr.close()
| 17.651163 | 54 | 0.598155 | [
"MIT"
] | allenalvin333/Hackerrank_Interview | 0_Companies/ATHENA2.py | 759 | Python |
from tests.test_utils.test_logger import TestLogger
from tests.test_utils.test_models import TestClassicalModel
from tests.test_utils.test_utils import TestUtils
| 40.5 | 59 | 0.888889 | [
"MIT"
] | Darshan-ko/genrl | tests/test_utils/__init__.py | 162 | Python |
import os
import requests
from os.path import join, isfile
from nerblackbox.modules.datasets.formatter.base_formatter import BaseFormatter
class CoNLL2003Formatter(BaseFormatter):
def __init__(self):
ner_dataset = "conll2003"
ner_tag_list = ["PER", "ORG", "LOC", "MISC"]
super().__init__(ner_dataset, ner_tag_list)
####################################################################################################################
# ABSTRACT BASE METHODS
####################################################################################################################
def get_data(self, verbose: bool):
"""
I: get data
-----------
:param verbose: [bool]
:return: -
"""
url_base = "https://raw.githubusercontent.com/patverga/torch-ner-nlp-from-scratch/master/data/conll2003/"
targets = ["eng.train", "eng.testa", "eng.testb"]
for target in targets:
target_file = join(self.dataset_path, target)
# fetch tgz from url
if isfile(target_file):
if verbose:
print(f".. file at {target_file} already exists")
else:
url = url_base + target
myfile = requests.get(url, allow_redirects=True)
open(target_file, "wb").write(myfile.content)
if verbose:
print(f".. file fetched from {url} and saved at {target_file}")
def create_ner_tag_mapping(self):
"""
II: customize ner_training tag mapping if wanted
-------------------------------------
:return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
"""
return dict()
def format_data(self):
"""
III: format data
----------------
:return: -
"""
for phase in ["train", "val", "test"]:
rows = self._read_original_file(phase)
self._write_formatted_csv(phase, rows)
def resplit_data(self, val_fraction: float):
"""
IV: resplit data
----------------
:param val_fraction: [float]
:return: -
"""
# train -> train
df_train = self._read_formatted_csvs(["train"])
self._write_final_csv("train", df_train)
# val -> val
df_val = self._read_formatted_csvs(["val"])
self._write_final_csv("val", df_val)
# test -> test
df_test = self._read_formatted_csvs(["test"])
self._write_final_csv("test", df_test)
####################################################################################################################
# HELPER: READ ORIGINAL
####################################################################################################################
def _read_original_file(self, phase):
"""
III: format data
---------------------------------------------
:param phase: [str] 'train' or 'test'
:return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..]
"""
file_name = {
"train": "eng.train",
"val": "eng.testa",
"test": "eng.testb",
}
file_path_original = join(self.dataset_path, file_name[phase])
_rows = list()
if os.path.isfile(file_path_original):
with open(file_path_original) as f:
for i, row in enumerate(f.readlines()):
_rows.append(row.strip().split())
print(f"\n> read {file_path_original}")
_rows = [
[row[0], row[-1]] if (len(row) == 4 and row[0] != "-DOCSTART-") else list()
for row in _rows
]
return _rows
| 35.472222 | 120 | 0.456017 | [
"Apache-2.0"
] | af-ai-center/nerblackbox | nerblackbox/modules/datasets/formatter/conll2003_formatter.py | 3,832 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class Visit(CMakePackage):
"""VisIt is an Open Source, interactive, scalable, visualization,
animation and analysis tool. See comments in VisIt's package.py
for tips about building VisIt with spack. Building VisIt with
Spack is still experimental and many standard features are likely
disabled
LINUX-------------------------------------------------------------------
spack install visit ^python+shared ^[email protected] ^[email protected]
LINUX-W/O-OPENGL--------------------------------------------------------
spack install visit ^python+shared ^[email protected] ^[email protected] \\
^mesa+opengl
MACOS-------------------------------------------------------------------
spack install visit ^python+shared ^[email protected] ^[email protected] \\
^qt~framework
"""
############################
# Suggestions for building:
############################
# cyrush note:
#
# Out of the box, VisIt's python 2 requirement will cause
# spack spec constraint errors due Qt + Mesa build
# dependencies.
#
# You can avoid this using:
#
# linux:
# spack install visit ^python+shared ^[email protected] ^[email protected]
#
# linux w/o opengl: (add mesa as opengl if system lacks system opengl )
#
# spack install visit ^python+shared ^[email protected] ^[email protected] \
# ^mesa+opengl
#
# macOS:
# spack install visit ^python+shared ^[email protected] ^[email protected] \
# ^qt~framework
#
# Rpath issues undermine qwt (not qt) when a build as a framework
# VisIt's osxfixup resolves this for us in other cases,
# but we can't use osxfixup with spack b/c it will undermine other libs.
#
# Even with these changes, VisIt's Python CLI does not work on macOS,
# there is a linking issue related to OpenSSL.
# (dyld: Symbol not found: _GENERAL_NAME_free - which comes from OpenSSL)
#
############################
homepage = "https://wci.llnl.gov/simulation/computer-codes/visit/"
git = "https://github.com/visit-dav/visit.git"
url = "https://github.com/visit-dav/visit/releases/download/v3.2.1/visit3.2.1.tar.gz"
tags = ['radiuss']
maintainers = ['cyrush']
extendable = True
executables = ['^visit$']
version('develop', branch='develop')
version('3.2.2', sha256='d19ac24c622a3bc0a71bc9cd6e5c9860e43f39e3279672129278b6ebce8d0ead')
version('3.2.1', sha256='779d59564c63f31fcbfeff24b14ddd6ac941b3bb7d671d31765a770d193f02e8')
version('3.1.1', sha256='0b60ac52fd00aff3cf212a310e36e32e13ae3ca0ddd1ea3f54f75e4d9b6c6cf0')
version('3.0.1', sha256='a506d4d83b8973829e68787d8d721199523ce7ec73e7594e93333c214c2c12bd')
root_cmakelists_dir = 'src'
generator = "Ninja"
variant('gui', default=True, description='Enable VisIt\'s GUI')
variant('osmesa', default=False, description='Use OSMesa for off-screen CPU rendering')
variant('adios2', default=True, description='Enable ADIOS2 file format')
variant('hdf5', default=True, description='Enable HDF5 file format')
variant('silo', default=True, description='Enable Silo file format')
variant('python', default=True, description='Enable Python support')
variant('mpi', default=True, description='Enable parallel engine')
patch('spack-changes-3.1.patch', when="@3.1.0:,develop")
patch('spack-changes-3.0.1.patch', when="@3.0.1")
patch('nonframework-qwt.patch', when='^qt~framework platform=darwin')
patch('parallel-hdf5.patch', when='+hdf5+mpi')
# Exactly one of 'gui' or 'osmesa' has to be enabled
conflicts('+gui', when='+osmesa')
#############################################
# Full List of dependencies from build_visit
#############################################
# cyrush note:
# I added these here to give folks details
# to help eventually build up to full
# support for visit
#############################################
# =====================================
# core:
# =====================================
# cmake (build)
# vtk
# qt
# qwt
# python
# mpi
#
# =====================================
# rendering (optional):
# =====================================
# icet
# vtk-m
# vtk-h
# llvm
# mesagl
# osmesa
# tbb
# embree
# ispc
# ospray
#
# =====================================
# python modules:
# =====================================
# numpy
# pillow
# mpi4py
# seedme
# sphinx (build, docs)
# sphinx rtd theme (build, docs)
# pyqt (visit support deprecated)
# pyside (note: we want pyside 2)
#
# =====================================
# testing related:
# =====================================
# p7zip (build, test)
#
# =====================================
# io libs:
# =====================================
# adios
# adios2
# advio
# boost
# boxlib
# cfitsio
# cgns
# conduit
# damaris
# fastbit
# fastquery
# gdal
# h5part
# hdf4
# hdf5
# mdsplus
# mfem
# mili
# moab
# mxml
# nektarpp
# netcdf
# openexr
# pidx
# silo
# stripack
# szip
# tbb
# uintah
# xdmf
# xercesc
# xsd
# zlib
#
# =====================================
depends_on('[email protected]:', type='build')
depends_on('ninja', type='build')
depends_on('mpi', when='+mpi')
# VTK flavors
depends_on('[email protected]:8 +opengl2')
depends_on('vtk +osmesa', when='+osmesa')
depends_on('vtk +qt', when='+gui')
depends_on('vtk +python', when='+python')
depends_on('vtk +mpi', when='+mpi')
depends_on('vtk ~mpi', when='~mpi')
# Necessary VTK patches
depends_on('vtk', patches=[patch('vtk_compiler_visibility.patch')])
depends_on('vtk', patches=[patch('vtk_rendering_opengl2_x11.patch')],
when='~osmesa platform=linux')
depends_on('vtk', patches=[patch('vtk_wrapping_python_x11.patch')],
when='+python')
depends_on('glu', when='~osmesa')
depends_on('mesa-glu+osmesa', when='+osmesa')
# VisIt doesn't work with later versions of qt.
depends_on('qt+gui+opengl@5:5.14', when='+gui')
depends_on('qwt', when='+gui')
# [email protected] doesn't work with VisIt.
depends_on('[email protected]:3.7', when='+python')
extends('python', when='+python')
# VisIt uses the hdf5 1.8 api
# set the API version later on down in setup_build_environment
depends_on('[email protected]:', when='+hdf5')
depends_on('hdf5+mpi', when='+hdf5+mpi')
depends_on('hdf5~mpi', when='+hdf5~mpi')
# VisIt uses Silo's 'ghost zone' data structures, which are only available
# in v4.10+ releases: https://wci.llnl.gov/simulation/computer-codes/silo/releases/release-notes-4.10
depends_on('[email protected]: +shared', when='+silo')
depends_on('silo+hdf5', when='+silo+hdf5')
depends_on('silo~hdf5', when='+silo~hdf5')
depends_on('silo+mpi', when='+silo+mpi')
depends_on('silo~mpi', when='+silo~mpi')
depends_on('[email protected]:', when='+adios2')
depends_on('adios2+hdf5', when='+adios2+hdf5')
depends_on('adios2~hdf5', when='+adios2~hdf5')
depends_on('adios2+mpi', when='+adios2+mpi')
depends_on('adios2~mpi', when='+adios2~mpi')
depends_on('adios2+python', when='+adios2+python')
depends_on('adios2~python', when='+adios2~python')
depends_on('zlib')
@when('@3:,develop')
def patch(self):
# Some of VTK's targets don't create explicit libraries, so there is no
# 'vtktiff'. Instead, replace with the library variable defined from
# VTK's module flies (e.g. lib/cmake/vtk-8.1/Modules/vtktiff.cmake)
for filename in find('src', 'CMakeLists.txt'):
filter_file(r'\bvtk(tiff|jpeg|png)', r'${vtk\1_LIBRARIES}',
filename)
def flag_handler(self, name, flags):
if name in ('cflags', 'cxxflags'):
# NOTE: This is necessary in order to allow VisIt to compile a couple
# of lines of code with 'const char*' to/from 'char*' conversions.
if '@3:%gcc' in self.spec:
flags.append('-fpermissive')
# VisIt still uses the hdf5 1.8 api
if '+hdf5' in self.spec and '@1.10:' in self.spec['hdf5']:
flags.append('-DH5_USE_18_API')
return (flags, None, None)
def cmake_args(self):
spec = self.spec
args = [
self.define('CMAKE_POSITION_INDEPENDENT_CODE', True),
self.define('VTK_MAJOR_VERSION', spec['vtk'].version[0]),
self.define('VTK_MINOR_VERSION', spec['vtk'].version[1]),
self.define('VISIT_VTK_DIR', spec['vtk'].prefix),
self.define('VISIT_ZLIB_DIR', spec['zlib'].prefix),
self.define('VISIT_USE_GLEW', False),
self.define('VISIT_CONFIG_SITE', 'NONE'),
self.define('VISIT_INSTALL_THIRD_PARTY', True),
]
if '@3.1: platform=darwin' in spec:
args.append(self.define('FIXUP_OSX', False))
if '+python' in spec:
args.extend([
self.define('VISIT_PYTHON_FILTERS', True),
self.define('VISIT_PYTHON_SCRIPTING', True),
self.define('PYTHON_DIR', spec['python'].home),
])
else:
args.extend([
self.define('VISIT_PYTHON_FILTERS', False),
self.define('VISIT_PYTHON_SCRIPTING', False),
])
if '+gui' in spec:
qt_bin = spec['qt'].prefix.bin
qmake_exe = os.path.join(qt_bin, 'qmake')
args.extend([
self.define('VISIT_SERVER_COMPONENTS_ONLY', False),
self.define('VISIT_ENGINE_ONLY', False),
self.define('VISIT_LOC_QMAKE_EXE', qmake_exe),
self.define('VISIT_QT_DIR', spec['qt'].prefix),
self.define('VISIT_QWT_DIR', spec['qwt'].prefix),
])
else:
args.extend([
self.define('VISIT_SERVER_COMPONENTS_ONLY', True),
self.define('VISIT_ENGINE_ONLY', True),
])
# No idea why this is actually needed
if '^mesa' in spec:
args.append(self.define('VISIT_MESAGL_DIR', spec['mesa'].prefix))
if '+llvm' in spec['mesa']:
args.append(self.define('VISIT_LLVM_DIR', spec['libllvm'].prefix))
if '+hdf5' in spec:
args.append(self.define('VISIT_HDF5_DIR', spec['hdf5'].prefix))
if '+mpi' in spec and '+mpi' in spec['hdf5']:
args.append(self.define('VISIT_HDF5_MPI_DIR', spec['hdf5'].prefix))
if '+silo' in spec:
args.append(self.define('VISIT_SILO_DIR', spec['silo'].prefix))
if '+mpi' in spec:
args.extend([
self.define('VISIT_PARALLEL', True),
self.define('VISIT_MPI_COMPILER', spec['mpi'].mpicxx),
])
else:
args.append(self.define('VISIT_PARALLEL', False))
return args
# https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=executables#making-a-package-discoverable-with-spack-external-find
# Here we are only able to determine the latest version
# despite VisIt may have multiple versions
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('-version', output=str, error=str)
match = re.search(r'\s*(\d[\d\.]+)\.', output)
return match.group(1) if match else None
| 35.494083 | 142 | 0.562474 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | jmellorcrummey/spack | var/spack/repos/builtin/packages/visit/package.py | 11,997 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0002_auto_20141114_1935'),
]
operations = [
migrations.RemoveField(
model_name='registryentry',
name='fixity_algorithm',
),
migrations.RemoveField(
model_name='registryentry',
name='fixity_value',
),
migrations.RemoveField(
model_name='registryentry',
name='last_fixity_date',
),
migrations.RemoveField(
model_name='registryentry',
name='published',
),
]
| 23.064516 | 44 | 0.569231 | [
"Apache-2.0"
] | APTrust/EarthDiver | dpnode/dpn/data/migrations/0003_auto_20141117_2011.py | 715 | Python |
import math
import numpy as np
from enum import IntEnum
class Mode(IntEnum):
CUSTOM = 0
EQUAL = 1
GAUSS = 2
GAUSS_SYM = 3
PYRAMID = 4
PYRAMID_SYM = 5
SIVEROO_1 = 6
SIVEROO_2 = 7
#This function will return an list of value, like below:
# [0,1,2,3,...,n] -> [a,...,b]
def scaleRange(n: int, a: int, b: int):
return [(x*(b-a)/(n-1))+a for x in range(0,n)]
def equal(n: int):
return [1/n]*n
def gauss(n: int):
r = range(n,0,-1)
val = [math.exp(-(2.0*x/n)**2) for x in r]
val = val/np.sum(val)
return val
def gauss_sym(n: int):
n = n/2
r = range(int(n),-math.ceil(n),-1)
val = ([math.exp(-(2.0*x/(n))**2) for x in r])
val = val/np.sum(val)
return val
def pyramid(n: int):
r = range(1,n+1)
val = [x/n for x in r]
val = val/np.sum(val)
return val
def pyramid_sym(n: int):
r = range(0,n)
val = [(n/2)-abs(x-(n-1)/2) for x in r]
val = val/np.sum(val)
return val
def siveroo1(n: int):
r = scaleRange(n,-3,0.1)
val = [math.floor(3*math.exp(-(x/1.9)**2))/3+0.1 for x in r]
val = val/np.sum(val)
return val
# this function will stretch the given array (w) to a specific length (n)
# example : n = 10, w = [1,2]
# result : val = [1,1,1,1,1,2,2,2,2,2] , flip it, and then normalize it so its sum is equal to 1
def stretch(n: int, w: int):
r = scaleRange(n,0,len(w)-0.1)
val = []
idx = [math.floor(x) for x in r]
for x in range(0,n):
index = int(idx[x])
val.append(w[index])
val = val/np.sum(val)
return val
def null(n: int):
return [0]*n
def get_weight(mode: Mode, count: int):
if count == 1:
return [1.0]
else:
return {
Mode.EQUAL : equal(count),
Mode.GAUSS : gauss(count),
Mode.GAUSS_SYM : gauss_sym(count),
Mode.PYRAMID : pyramid(count),
Mode.PYRAMID_SYM : pyramid_sym(count),
Mode.SIVEROO_1 : siveroo1(count),
Mode.SIVEROO_2 : stretch(count,[1,3,3,2,2])
}.get(mode, [1, 0]) # fallback to [1,0] if fucked up
def modeName(mode: Mode):
return {
Mode.EQUAL : "[1] Equal",
Mode.GAUSS : "[2] Gaussian Asymmetric",
Mode.GAUSS_SYM : "[3] Gaussian Symmetric",
Mode.PYRAMID : "[4] Pyramid Asymmetric",
Mode.PYRAMID_SYM : "[5] Pyramid Symmetric",
Mode.SIVEROO_1 : "[6] Siveroo's Preset I",
Mode.SIVEROO_2 : "[7] Siveroo's Preset II"
}[mode]
| 27.968421 | 96 | 0.515995 | [
"MIT"
] | FireRedz/HFR-Resampler | weights.py | 2,657 | Python |
#!/usr/bin/env python
from setuptools import setup
setup(
name='mobib',
version='0.1',
description='Retrieve remaining number of trips from your MOBIB Basic',
author='Bruno Parmentier',
author_email='[email protected]',
url='https://github.com/bparmentier/mobib-reader/',
py_modules=['mobib'],
entry_points={
'console_scripts': ['mobib = mobib:main']
},
install_requires=['pyscard']
)
| 24.444444 | 75 | 0.665909 | [
"MIT"
] | bparmentier/mobib-basic-reader | setup.py | 440 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Select(self):
return self.get_query_params().get('Select')
def set_Select(self,Select):
self.add_query_param('Select',Select)
def get_RuleDesc(self):
return self.get_query_params().get('RuleDesc')
def set_RuleDesc(self,RuleDesc):
self.add_query_param('RuleDesc',RuleDesc)
def get_ShortTopic(self):
return self.get_query_params().get('ShortTopic')
def set_ShortTopic(self,ShortTopic):
self.add_query_param('ShortTopic',ShortTopic)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_DataType(self):
return self.get_query_params().get('DataType')
def set_DataType(self,DataType):
self.add_query_param('DataType',DataType)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_Where(self):
return self.get_query_params().get('Where')
def set_Where(self,Where):
self.add_query_param('Where',Where)
def get_TopicType(self):
return self.get_query_params().get('TopicType')
def set_TopicType(self,TopicType):
self.add_query_param('TopicType',TopicType)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Topic(self):
return self.get_query_params().get('Topic')
def set_Topic(self,Topic):
self.add_query_param('Topic',Topic) | 31.040816 | 74 | 0.753123 | [
"Apache-2.0"
] | ankitdobhal/aliyun-openapi-python-sdk | aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/CreateRuleRequest.py | 3,042 | Python |
import tweepy
from time import sleep
from datetime import datetime
from keys import *
from tqdm import tqdm
def play(test=True,i_pages=3, i_hashtag=20, like_pages=False, like_hashtag=False):
while True == True:
try:
econotwbot(test, i_pages, i_hashtag)
except Exception as e:
print(e)
sleep(60*30)
pass
class econotwbot:
def __init__(self, test=True, i_pages=3, i_hashtag=20, like_pages=False, like_hashtag=False):
self.test = test
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth)
self._file_following = 'to_follow.txt'
self.i_pages = i_pages
self.i_hashtag = i_hashtag
self.like_pages = like_pages
self.like_hashtag = like_hashtag
self.pbar = 0
self.iteractions = self.i_pages*len(self.following_pages())+self.i_hashtag
if self.test==True:
self.hellow_world()
self.retweepy()
while self.test==False:
print()
print("Starting!",datetime.now())
print()
with tqdm(total=self.iteractions) as self.pbar:
self.retweepy_following()
tqdm.write("Just give me 5 more minutes to sleep please!")
sleep(5*60)
self.retweepy()
print()
print("Iteraction done!",datetime.now())
sleep(30*60)
def following_pages(self):
with open(self._file_following, 'r') as f:
return f.read().splitlines()
def retweepy(self):
tqdm.write('Delivering tweets with #econotw OR #EconTwitter')
dici={'q':'#econotw OR #EconTwitter'}
args={'method':self.api.search,
'dici':dici,
'like':self.like_hashtag,
'i':self.i_hashtag}
self.like_and_rt(**args)
def retweepy_following(self):
tqdm.write('Delivering interesting tweets')
for page in self.following_pages():
dici={'screen_name':page}
args={'method':self.api.user_timeline,
'dici':dici,
'like':self.like_pages,
'i':self.i_pages}
self.like_and_rt(**args)
def like_and_rt(self,method,dici,like,i):
count=0
for tweet in tweepy.Cursor(method=method,**dici).items(i):
self.pbar.update(1)
count+=1
try:
if like==True:
self.api.create_favorite(tweet.id)
sleep(1)
tweet.retweet()
string= 'Retweeted: '+ str(tweet.id) +' @'+tweet.user.screen_name
tqdm.write(string)
sleep(10)
# Print retweet errors
except tweepy.TweepError as error:
if (eval(error.reason)[0]['code'] != 139) and (eval(error.reason)[0]['code'] != 327):
tqdm.write('\nError. '+str(tweet.id)+' Retweet not successful. Reason: ')
tqdm.write(str(error.reason) +' '+ str(datetime.now()))
self.pbar.update(i-count)
except StopIteration:
break
def hello_world(self):
self.api.update_status("""Hello World! #econotw""")
if __name__ == "__main__":
play(test=False) | 34.038462 | 101 | 0.54096 | [
"MIT"
] | ppnasser/econotw | econotw.py | 3,540 | Python |
#!/usr/bin/env python3
import uuid
import random
import datetime
from faker import Factory
fake = Factory.create()
num_people = 1000
last_jump_start = datetime.datetime(2008, 9, 1)
last_jump_end = datetime.datetime(2016, 8, 1)
print('COPY members (uuid, name, email, phone_number, last_jump, created_at, updated_at) FROM stdin;')
for i in range(0, num_people):
member_uuid = str(uuid.uuid4())
name = fake.name()
email = fake.email()
phone_number = '+447' + str(random.randrange(100000000, 999999999, 1))
last_jump = fake.date_time_between_dates(datetime_start = last_jump_start, datetime_end = last_jump_end).strftime('%Y-%m-%d')
created_at = fake.date_time_between_dates(datetime_start = last_jump_start, datetime_end = last_jump_end)
updated_at = fake.date_time_between_dates(datetime_start = created_at, datetime_end = last_jump_end).strftime('%Y-%m-%d %H:%M:%S')
print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (member_uuid, name, email, phone_number, last_jump, created_at.strftime('%Y-%m-%d %H:%M:%S'), updated_at))
print('\\.')
| 36.137931 | 145 | 0.733779 | [
"MIT"
] | colorshifter/lsd-members | test-data/members.py | 1,048 | Python |
import os
from selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess
from selfdrive.hardware import EON, TICI, PC
from common.op_params import opParams
WEBCAM = os.getenv("USE_WEBCAM") is not None
procs = [
DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
NativeProcess("camerad", "selfdrive/camerad", ["./camerad"], unkillable=True, driverview=True),
NativeProcess("clocksd", "selfdrive/clocksd", ["./clocksd"]),
NativeProcess("dmonitoringmodeld", "selfdrive/modeld", ["./dmonitoringmodeld"], enabled=(not PC or WEBCAM), driverview=True),
NativeProcess("logcatd", "selfdrive/logcatd", ["./logcatd"]),
NativeProcess("loggerd", "selfdrive/loggerd", ["./loggerd"]),
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"]),
NativeProcess("proclogd", "selfdrive/proclogd", ["./proclogd"]),
NativeProcess("sensord", "selfdrive/sensord", ["./sensord"], enabled=not PC, persistent=EON, sigkill=EON),
NativeProcess("ubloxd", "selfdrive/locationd", ["./ubloxd"], enabled=(not PC or WEBCAM)),
NativeProcess("ui", "selfdrive/ui", ["./ui"], persistent=True, watchdog_max_dt=(5 if TICI else None)),
NativeProcess("soundd", "selfdrive/ui", ["./soundd"]),
NativeProcess("locationd", "selfdrive/locationd", ["./locationd"]),
NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], enabled=False),
PythonProcess("calibrationd", "selfdrive.locationd.calibrationd"),
PythonProcess("controlsd", "selfdrive.controls.controlsd"),
PythonProcess("deleter", "selfdrive.loggerd.deleter", persistent=True),
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", enabled=(not PC or WEBCAM), driverview=True),
PythonProcess("logmessaged", "selfdrive.logmessaged", persistent=True),
PythonProcess("pandad", "selfdrive.pandad", persistent=True),
PythonProcess("paramsd", "selfdrive.locationd.paramsd"),
PythonProcess("plannerd", "selfdrive.controls.plannerd"),
PythonProcess("radard", "selfdrive.controls.radard"),
PythonProcess("rtshield", "selfdrive.rtshield", enabled=EON),
PythonProcess("thermald", "selfdrive.thermald.thermald", persistent=True),
PythonProcess("timezoned", "selfdrive.timezoned", enabled=TICI, persistent=True),
PythonProcess("tombstoned", "selfdrive.tombstoned", enabled=not PC, persistent=True),
PythonProcess("uploader", "selfdrive.loggerd.uploader", persistent=True),
]
if not opParams().get('update_behavior').lower().strip() == 'off' or os.path.exists('/data/no_ota_updates'):
procs.append(PythonProcess("updated", "selfdrive.updated", enabled=not PC, persistent=True))
managed_processes = {p.name: p for p in procs}
| 60.866667 | 127 | 0.741512 | [
"MIT"
] | SebastienLubrano/openpilot | selfdrive/manager/process_config.py | 2,739 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# File Name: test_ADMM.py
# Purpose : test ADMM solver for primal
# problem and dual problem
# =======================================
from utils import get_params
from ADMM_primal import ADMM_primal
from ADMM_dual import ADMM_dual
import numpy as np
import argparse
import time
import sys
"""Parser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=64)
parser.add_argument('--dataset', type=str, choices=['random', 'caffarelli', 'ellipse', 'DOTmark'], default='random')
parser.add_argument('--imageclass', type=str, default='WhiteNoise')
parser.add_argument('--method', type=str, choices=['primal', 'dual'], default='primal')
parser.add_argument('--iters', type=int, default=10000)
parser.add_argument('--alpha', type=float, default=1.618)
parser.add_argument('--rho', type=float, default=1024)
args = parser.parse_args()
def main():
"""Main routine
"""
print("\nTesting ADMM")
print("====================")
print("m = n : ", args.n)
print("dataset: ", args.dataset)
if args.dataset == 'DOTmark':
print("class : ", args.imageclass)
print("method : ", args.method)
print("====================")
mu, nu, c = get_params(args.n, args.dataset, args.imageclass)
start = time.time()
if args.method == 'primal':
ADMM_primal(mu, nu, c, args.iters, args.rho, args.alpha)
elif args.method == 'dual':
ADMM_dual(mu, nu, c, args.iters, args.rho, args.alpha)
t = time.time() - start
print('time = %.5e' % t)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print (" Ctrl+C pressed...")
sys.exit(1) | 29.915254 | 116 | 0.5983 | [
"MIT"
] | CrazyIvanPro/Optimal_Transport | test_ADMM.py | 1,765 | Python |
# -*- coding: utf-8 -*-
"""
solutions_by_text.sbt_token_generator
~~~~~~~~~~~~
This module handles security token generation.
"""
# @Author: sijanonly
# @Date: 2018-03-19 10:57:26
# @Last Modified by: sijanonly
# @Last Modified time: 2018-03-19 14:51:07
import json
from urllib import parse
import requests
from .handle_exceptions import CustomException
_base_url = 'https://{}.solutionsbytext.com/SBT.App.SetUp/RSServices/'
def create_security_token(api_key, stage):
"""
Generates a security token for SBT API access.
Args:
api_key (string): API_KEY value provided by solutionsbytext
stage (string): STAGE values (test or ui)
Returns:
string: SecurityToken returns by LoginAPIService
Raises:
CustomException: Raises while error during GET request.
"""
url = ''.join(
[
_base_url.format(stage),
'LoginAPIService.svc/AuthenticateAPIKey?',
parse.urlencode({'APIKey': api_key})
]
)
response_data = json.loads(requests.get(url).text)
if response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1402:
raise CustomException(
'Error in generating security key.')
if response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1401:
raise CustomException(
'SecurityToken generation is failed.')
return response_data['AuthenticateAPIKeyResult'].get('SecurityToken')
| 25.103448 | 74 | 0.666896 | [
"MIT"
] | sijanonly/sbt-python-client | solutions_by_text/sbt_token_generator.py | 1,456 | Python |
n = []
v = int(input())
n.append([v*x*2 for x in range(1,11)])
print(n)
for i in range(len(n)):
print('N[%d] = %d' % (i, n[i]))
| 18.857143 | 38 | 0.5 | [
"MIT"
] | pedrodanieljardim/Desafios-URI-feitos-em-java | Beginner/1173.py | 132 | Python |
from curielogserver import app, get_default_dbconfig
import os
import time
import psycopg2.pool
from psycopg2 import OperationalError
retries = 10
while retries > 0:
retries -= 1
try:
app.config['postgreSQL_pool'] = psycopg2.pool.ThreadedConnectionPool(1, 20, get_default_dbconfig())
break
except OperationalError:
if retries == 0:
raise
time.sleep(1)
| 24.058824 | 107 | 0.691932 | [
"Apache-2.0"
] | bochuxt/curiefense | curiefense/curielogserver/app/main.py | 409 | Python |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.urls import path
from . import views
urlpatterns = [
path("", views.DebugViewSet.as_view({"get": "list"}), name="debug.list_debug"),
path("<str:id>/", views.DebugViewSet.as_view({"get": "retrieve"}), name="debug.detail"),
]
| 50.631579 | 115 | 0.754678 | [
"MIT"
] | Canway-shiisa/bk-iam-saas | saas/backend/debug/urls.py | 978 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Youngseokcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
from test_framework.test_framework import YoungseokcoinTestFramework
from test_framework.util import *
class AbandonConflictTest(YoungseokcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10ysc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10ysc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998ysc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 YSC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 YSC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 48.628931 | 137 | 0.662442 | [
"MIT"
] | youngseoka/youngseokcoin | test/functional/abandonconflict.py | 7,732 | Python |
from django.shortcuts import render
def landing_page(request):
return render(request,'landing_page.html',)
| 23.2 | 48 | 0.758621 | [
"MIT"
] | yosif88/SoftUni | python-web/petstagram/petstagram/common/views.py | 116 | Python |
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot.search import search_selfcenter
from metalprot.basic import filter
import pickle
import time
import prody as pr
'''
python /mnt/e/GitHub_Design/Metalprot/scrips/search_selfcenter/run_selfcenter_search.py
'''
start_time = time.time()
query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/20211013_selfcenter/pickle_noCYS/'
with open(query_dir + 'all_metal_vdm.pkl', 'rb') as f:
query_all_metal = pickle.load(f)
with open(query_dir + 'AAMetalPhiPsi.pkl', 'rb') as f:
all_querys = pickle.load(f)
with open(query_dir + 'cluster_centroid_dict.pkl', 'rb') as f:
cluster_centroid_dict = pickle.load(f)
print(len(all_querys))
### run Search_struct
workdir = '/mnt/e/DesignData/ligands/LigandBB/MID1sc10/'
outdir = workdir + 'output_selfcenter/'
target_path = workdir + '5od1_zn.pdb'
win_filter = [35, 61, 65]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/6dwv/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '6dwv_core.pdb'
# win_filter = []
# workdir = '/mnt/e/DesignData/ligands/LigandBB/8adh/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '1989_8adh_ZN_1.pdb'
# win_filter = []
# workdir = '/mnt/e/DesignData/ligands/LigandBB/3f7u_lig/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '3f7u1aa.pdb'
# win_filter = [94, 96, 119]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/2afw_lig/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + '2afw_aa.pdb'
# win_filter = [159, 202, 330]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/huong/'
# outdir = workdir + 'output_selfcenter/'
# target_path = workdir + 'aQ4x_aa.pdb'
# win_filter = ['I-3', 'I-6', 'I-10', 'I-13', 'I-17', 'I-20',
# 'J-3', 'J-6', 'J-7', 'J-10', 'J-13', 'J-14', 'J-17', 'J-20', 'J-21',
# 'K-6', 'K-10', 'K-13', 'K-17', 'K-20',
# 'L-3', 'L-6', 'L-7', 'L-10', 'L-13', 'L-14', 'L-17', 'L-20', 'L-21', 'L-24',
# 'M-3', 'M-6', 'M-10', 'M-13', 'M-17', 'M-20',
# 'N-3', 'N-6', 'N-7', 'N-10', 'N-13', 'N-14', 'N-17', 'N-20', 'N-21'
# ]
geometry_path = None
#geometry_path = workdir + 'tetrahydral_geo.pdb'
metal_metal_dist = 0.3
num_contact_vdms = [3]
allowed_aa_combinations = [['H', 'H', 'H']]
allowed_aa_combinations = []
_filter = filter.Search_filter(filter_abple = False, filter_phipsi = True, max_phipsi_val = 25,
filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20,
after_search_filter_geometry = True, filter_based_geometry_structure = False, angle_tol = 15, aa_aa_tol = 0.3, aa_metal_tol = 0.2,
pair_angle_range = [85, 130], pair_aa_aa_dist_range = [2.8, 4], pair_metal_aa_dist_range = None,
after_search_filter_qt_clash = True, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2,
after_search_open_site_clash = True, open_site_dist = 3.0,
write_filtered_result = False, selfcenter_filter_member_phipsi=True)
ss = search_selfcenter.Search_selfcenter(target_path, outdir, all_querys, cluster_centroid_dict, query_all_metal,
num_contact_vdms, metal_metal_dist, win_filter, validateOriginStruct = True, search_filter= _filter, geometry_path = None,
density_radius = 0.6, allowed_aa_combinations = allowed_aa_combinations, output_wincomb_overlap=True)
#ss.run_selfcenter_search()
search_selfcenter.run_search_selfcenter(ss)
end_time = time.time()
print(end_time - start_time, "seconds") | 30.555556 | 134 | 0.687552 | [
"MIT"
] | lonelu/Metalprot | scrips/search_selfcenter/run_selfcenter_search.py | 3,575 | Python |
# coding=utf-8
import logging
import pytest
from rasa_nlu.evaluate import (
is_token_within_entity, do_entities_overlap,
merge_labels, remove_duckling_entities,
remove_empty_intent_examples, get_entity_extractors,
get_duckling_dimensions, known_duckling_dimensions,
find_component, remove_duckling_extractors, drop_intents_below_freq,
run_cv_evaluation, substitute_labels, IntentEvaluationResult,
evaluate_intents, evaluate_entities)
from rasa_nlu.evaluate import does_token_cross_borders
from rasa_nlu.evaluate import align_entity_predictions
from rasa_nlu.evaluate import determine_intersection
from rasa_nlu.evaluate import determine_token_labels
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.tokenizers import Token
from rasa_nlu import utils
import json
import os
from rasa_nlu import training_data, config
from tests import utilities
logging.basicConfig(level="DEBUG")
@pytest.fixture(scope="session")
def duckling_interpreter(component_builder, tmpdir_factory):
conf = RasaNLUModelConfig({"pipeline": [{"name": "ner_duckling_http"}]})
return utilities.interpreter_for(
component_builder,
data="./data/examples/rasa/demo-rasa.json",
path=tmpdir_factory.mktemp("projects").strpath,
config=conf)
# Chinese Example
# "对面食过敏" -> To be allergic to wheat-based food
CH_wrong_segmentation = [Token("对面", 0),
Token("食", 2),
Token("过敏", 3)] # opposite, food, allergy
CH_correct_segmentation = [Token("对", 0),
Token("面食", 1),
Token("过敏", 3)] # towards, wheat-based food, allergy
CH_wrong_entity = {
"start": 0,
"end": 2,
"value": "对面",
"entity": "direction"
}
CH_correct_entity = {
"start": 1,
"end": 3,
"value": "面食",
"entity": "food_type"
}
# EN example
# "Hey Robot, I would like to eat pizza near Alexanderplatz tonight"
EN_indices = [0, 4, 9, 11, 13, 19, 24, 27, 31, 37, 42, 57]
EN_tokens = ["Hey", "Robot", ",", "I", "would", "like", "to", "eat", "pizza",
"near", "Alexanderplatz", "tonight"]
EN_tokens = [Token(t, i) for t, i in zip(EN_tokens, EN_indices)]
EN_targets = [
{
"start": 31,
"end": 36,
"value": "pizza",
"entity": "food"
},
{
"start": 37,
"end": 56,
"value": "near Alexanderplatz",
"entity": "location"
},
{
"start": 57,
"end": 64,
"value": "tonight",
"entity": "datetime"
}
]
EN_predicted = [
{
"start": 4,
"end": 9,
"value": "Robot",
"entity": "person",
"extractor": "A"
},
{
"start": 31,
"end": 36,
"value": "pizza",
"entity": "food",
"extractor": "A"
},
{
"start": 42,
"end": 56,
"value": "Alexanderplatz",
"entity": "location",
"extractor": "A"
},
{
"start": 42,
"end": 64,
"value": "Alexanderplatz tonight",
"entity": "movie",
"extractor": "B"
}
]
def test_token_entity_intersection():
# included
intsec = determine_intersection(CH_correct_segmentation[1],
CH_correct_entity)
assert intsec == len(CH_correct_segmentation[1].text)
# completely outside
intsec = determine_intersection(CH_correct_segmentation[2],
CH_correct_entity)
assert intsec == 0
# border crossing
intsec = determine_intersection(CH_correct_segmentation[1],
CH_wrong_entity)
assert intsec == 1
def test_token_entity_boundaries():
# smaller and included
assert is_token_within_entity(CH_wrong_segmentation[1],
CH_correct_entity)
assert not does_token_cross_borders(CH_wrong_segmentation[1],
CH_correct_entity)
# exact match
assert is_token_within_entity(CH_correct_segmentation[1],
CH_correct_entity)
assert not does_token_cross_borders(CH_correct_segmentation[1],
CH_correct_entity)
# completely outside
assert not is_token_within_entity(CH_correct_segmentation[0],
CH_correct_entity)
assert not does_token_cross_borders(CH_correct_segmentation[0],
CH_correct_entity)
# border crossing
assert not is_token_within_entity(CH_wrong_segmentation[0],
CH_correct_entity)
assert does_token_cross_borders(CH_wrong_segmentation[0], CH_correct_entity)
def test_entity_overlap():
assert do_entities_overlap([CH_correct_entity, CH_wrong_entity])
assert not do_entities_overlap(EN_targets)
def test_determine_token_labels_throws_error():
with pytest.raises(ValueError):
determine_token_labels(CH_correct_segmentation,
[CH_correct_entity,
CH_wrong_entity], ["ner_crf"])
def test_determine_token_labels_no_extractors():
determine_token_labels(CH_correct_segmentation[0],
[CH_correct_entity, CH_wrong_entity], None)
def test_determine_token_labels_with_extractors():
determine_token_labels(CH_correct_segmentation[0],
[CH_correct_entity, CH_wrong_entity], ["A", "B"])
def test_label_merging():
aligned_predictions = [
{"target_labels": ["O", "O"], "extractor_labels":
{"A": ["O", "O"]}},
{"target_labels": ["LOC", "O", "O"], "extractor_labels":
{"A": ["O", "O", "O"]}}
]
assert all(merge_labels(aligned_predictions) ==
["O", "O", "LOC", "O", "O"])
assert all(merge_labels(aligned_predictions, "A") ==
["O", "O", "O", "O", "O"])
def test_duckling_patching():
entities = [[
{
"start": 37,
"end": 56,
"value": "near Alexanderplatz",
"entity": "location",
"extractor": "ner_crf"
},
{
"start": 57,
"end": 64,
"value": "tonight",
"entity": "Time",
"extractor": "ner_duckling_http"
}
]]
patched = [[
{
"start": 37,
"end": 56,
"value": "near Alexanderplatz",
"entity": "location",
"extractor": "ner_crf"
}
]]
assert remove_duckling_entities(entities) == patched
def test_drop_intents_below_freq():
td = training_data.load_data('data/examples/rasa/demo-rasa.json')
clean_td = drop_intents_below_freq(td, 0)
assert clean_td.intents == {'affirm', 'goodbye', 'greet',
'restaurant_search'}
clean_td = drop_intents_below_freq(td, 10)
assert clean_td.intents == {'affirm', 'restaurant_search'}
def test_run_cv_evaluation():
td = training_data.load_data('data/examples/rasa/demo-rasa.json')
nlu_config = config.load("sample_configs/config_spacy.yml")
n_folds = 2
results, entity_results = run_cv_evaluation(td, n_folds, nlu_config)
assert len(results.train["Accuracy"]) == n_folds
assert len(results.train["Precision"]) == n_folds
assert len(results.train["F1-score"]) == n_folds
assert len(results.test["Accuracy"]) == n_folds
assert len(results.test["Precision"]) == n_folds
assert len(results.test["F1-score"]) == n_folds
assert len(entity_results.train['ner_crf']["Accuracy"]) == n_folds
assert len(entity_results.train['ner_crf']["Precision"]) == n_folds
assert len(entity_results.train['ner_crf']["F1-score"]) == n_folds
assert len(entity_results.test['ner_crf']["Accuracy"]) == n_folds
assert len(entity_results.test['ner_crf']["Precision"]) == n_folds
assert len(entity_results.test['ner_crf']["F1-score"]) == n_folds
def test_intent_evaluation_report(tmpdir_factory):
path = tmpdir_factory.mktemp("evaluation").strpath
report_folder = os.path.join(path, "reports")
report_filename = os.path.join(report_folder, "intent_report.json")
utils.create_dir(report_folder)
intent_results = [
IntentEvaluationResult("", "restaurant_search",
"I am hungry", 0.12345),
IntentEvaluationResult("greet", "greet",
"hello", 0.98765)]
result = evaluate_intents(intent_results,
report_folder,
successes_filename=None,
errors_filename=None,
confmat_filename=None,
intent_hist_filename=None)
report = json.loads(utils.read_file(report_filename))
greet_results = {"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1}
prediction = {'text': 'hello',
'intent': 'greet',
'predicted': 'greet',
'confidence': 0.98765}
assert len(report.keys()) == 4
assert report["greet"] == greet_results
assert result["predictions"][0] == prediction
def test_entity_evaluation_report(tmpdir_factory):
path = tmpdir_factory.mktemp("evaluation").strpath
report_folder = os.path.join(path, "reports")
mock_extractors = ["A", "B"]
report_filename_a = os.path.join(report_folder, "A_report.json")
report_filename_b = os.path.join(report_folder, "B_report.json")
utils.create_dir(report_folder)
result = evaluate_entities([EN_targets],
[EN_predicted],
[EN_tokens],
mock_extractors,
report_folder)
report_a = json.loads(utils.read_file(report_filename_a))
report_b = json.loads(utils.read_file(report_filename_b))
assert len(report_a) == 8
assert report_a["datetime"]["support"] == 1.0
assert report_b["macro avg"]["recall"] == 0.2
assert result["A"]["accuracy"] == 0.75
def test_empty_intent_removal():
intent_results = [
IntentEvaluationResult("", "restaurant_search",
"I am hungry", 0.12345),
IntentEvaluationResult("greet", "greet",
"hello", 0.98765)
]
intent_results = remove_empty_intent_examples(intent_results)
assert len(intent_results) == 1
assert intent_results[0].target == "greet"
assert intent_results[0].prediction == "greet"
assert intent_results[0].confidence == 0.98765
assert intent_results[0].message == "hello"
def test_evaluate_entities_cv_empty_tokens():
mock_extractors = ["A", "B"]
result = align_entity_predictions(EN_targets, EN_predicted,
[], mock_extractors)
assert result == {
"target_labels": [],
"extractor_labels": {
"A": [],
"B": []
}
}, "Wrong entity prediction alignment"
def test_evaluate_entities_cv():
mock_extractors = ["A", "B"]
result = align_entity_predictions(EN_targets, EN_predicted,
EN_tokens, mock_extractors)
assert result == {
"target_labels": ["O", "O", "O", "O", "O", "O", "O", "O", "food",
"location", "location", "datetime"],
"extractor_labels": {
"A": ["O", "person", "O", "O", "O", "O", "O", "O", "food",
"O", "location", "O"],
"B": ["O", "O", "O", "O", "O", "O", "O", "O", "O", "O",
"movie", "movie"]
}
}, "Wrong entity prediction alignment"
def test_get_entity_extractors(duckling_interpreter):
assert get_entity_extractors(duckling_interpreter) == {"ner_duckling_http"}
def test_get_duckling_dimensions(duckling_interpreter):
dims = get_duckling_dimensions(duckling_interpreter, "ner_duckling_http")
assert set(dims) == known_duckling_dimensions
def test_find_component(duckling_interpreter):
name = find_component(duckling_interpreter, "ner_duckling_http").name
assert name == "ner_duckling_http"
def test_remove_duckling_extractors(duckling_interpreter):
target = set([])
patched = remove_duckling_extractors({"ner_duckling_http"})
assert patched == target
def test_label_replacement():
original_labels = ["O", "location"]
target_labels = ["no_entity", "location"]
assert substitute_labels(original_labels, "O", "no_entity") == target_labels
| 31.982368 | 80 | 0.595259 | [
"Apache-2.0"
] | careless25/rasa_nlu | tests/base/test_evaluation.py | 12,735 | Python |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import sys
import unittest
import petstore_api
try:
from petstore_api.model import child_all_of
except ImportError:
child_all_of = sys.modules[
'petstore_api.model.child_all_of']
try:
from petstore_api.model import parent
except ImportError:
parent = sys.modules[
'petstore_api.model.parent']
from petstore_api.model.child import Child
class TestChild(unittest.TestCase):
"""Child unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testChild(self):
"""Test Child
This will fail because additional_properties_type is None in ChildAllOf and it must be defined as any type
to allow in the property radio_waves which is not defined in ChildAllOf, it is defined in Grandparent
"""
# make an instance of Child, a composed schema model
radio_waves = True
tele_vision = True
inter_net = True
with self.assertRaises(petstore_api.exceptions.ApiValueError):
child = Child(
radio_waves=radio_waves,
tele_vision=tele_vision,
inter_net=inter_net
)
if __name__ == '__main__':
unittest.main()
| 27.155172 | 174 | 0.674921 | [
"Apache-2.0"
] | 0x0c/openapi-generator | samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/test/test_child.py | 1,575 | Python |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, Optional
import torch
from monai.utils import exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events")
Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine")
class CheckpointLoader:
"""
CheckpointLoader acts as an Ignite handler to load checkpoint data from file.
It can load variables for network, optimizer, lr_scheduler, etc.
If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead
as PyTorch recommended and then use this loader to load the model.
Args:
load_path: the file path of checkpoint, it should be a PyTorch `pth` file.
load_dict: target objects that load checkpoint to. examples::
{'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
map_location: when loading the module for distributed training/evaluation,
need to provide an appropriate map_location argument to prevent a process
to step into others’ devices. If map_location is missing, torch.load will
first load the module to CPU and then copy each parameter to where it was
saved, which would result in all processes on the same machine using the
same set of devices.
"""
def __init__(
self, load_path: str, load_dict: Dict, name: Optional[str] = None, map_location: Optional[Dict] = None,
) -> None:
assert load_path is not None, "must provide clear path to load checkpoint."
self.load_path = load_path
assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load."
self.logger = logging.getLogger(name)
for k, v in load_dict.items():
if hasattr(v, "module"):
load_dict[k] = v.module
self.load_dict = load_dict
self._name = name
self.map_location = map_location
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
engine.add_event_handler(Events.STARTED, self)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
checkpoint = torch.load(self.load_path, map_location=self.map_location)
if len(self.load_dict) == 1:
key = list(self.load_dict.keys())[0]
if not (key in checkpoint):
checkpoint = {key: checkpoint}
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f"Restored all variables from {self.load_path}")
| 42.72093 | 111 | 0.679096 | [
"Apache-2.0"
] | BRAINSia/MONAI | monai/handlers/checkpoint_loader.py | 3,676 | Python |
#pip install flask-bcrypt
from flask_bcrypt import Bcrypt
# Create the Hasher
bcrypt = Bcrypt()
hashed_pass = bcrypt.generate_password_hash('somethingSuperSecret')
print(hashed_pass)
wrong_check = bcrypt.check_password_hash(hashed_pass, 'wrongpass')
print(wrong_check)
right_check = bcrypt.check_password_hash(hashed_pass, 'somethingSuperSecret')
print(right_check)
| 24.666667 | 77 | 0.827027 | [
"Apache-2.0"
] | saidulislam/flask-bootcamp-1 | 07-User-Authentication/00-Password-Hashing/Using-Bcrypt.py | 370 | Python |
_k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]
_h = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19]
def rotr(x, y):
return ((x >> y) | (x << (32 - y))) & 0xFFFFFFFF
def sha256(b4x16):
w = [0] * 64
print(b4x16)
for i in range(16):
w[i] = b4x16[i]
for i in range(16, 64):
wi_15 = w[i - 15]
s0 = rotr(wi_15, 7) ^ rotr(wi_15, 18) ^ (wi_15 >> 3)
wi_2 = w[i - 2]
s1 = rotr(wi_2, 17) ^ rotr(wi_2, 19) ^ (wi_2 >> 10)
wi_16 = w[i - 16]
wi_7 = w[i - 7]
w[i] = (wi_16 + s0 + wi_7 + s1) & 0xFFFFFFFF
a, b, c, d, e, f, g, h = _h
for i in range(64):
s0 = rotr(a, 2) ^ rotr(a, 13) ^ rotr(a, 22)
maj = (a & b) ^ (a & c) ^ (b & c)
t2 = s0 + maj
s1 = rotr(e, 6) ^ rotr(e, 11) ^ rotr(e, 25)
ch = (e & f) ^ ((~e) & g)
t1 = h + s1 + ch + _k[i] + w[i]
h = g
g = f
f = e
e = (d + t1) & 0xFFFFFFFF
d = c
c = b
b = a
a = (t1 + t2) & 0xFFFFFFFF
_lst = [a, b, c, d, e, f, g, h]
for i in range(8):
_h[i] = (_h[i] + _lst[i]) & 0xFFFFFFFF
for i in _h:
print('{:08x}'.format(i))
print("===========")
return _h
lst = [0x61616161] * 16
sha256(lst)
lst = [0] * 16
lst[0] = 0x80000000
lst[15] = 0x00000200
rv = sha256(lst)
for i in rv:
print('R {:08x}'.format(i))
| 28.974359 | 60 | 0.553097 | [
"MIT"
] | ryos36/polyphony-tutorial | sha2/py_sha256.py | 2,260 | Python |
#!/usr/bin/env python3
# ===============================================================================
# NAME: XmlSerializeParser.py
#
# DESCRIPTION: This class parses the XML serializable types files.
#
# USAGE:
#
# AUTHOR: reder
# EMAIL: [email protected]
# DATE CREATED : June 4, 2013
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import hashlib
import logging
import os
import sys
from lxml import etree
from fprime_ac.utils import ConfigManager
from fprime_ac.utils.buildroot import (
BuildRootCollisionException,
BuildRootMissingException,
locate_build_root,
)
from fprime_ac.utils.exceptions import FprimeXmlException
#
# Python extention modules and custom interfaces
#
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
format_dictionary = {
"U8": "%u",
"I8": "%d",
"U16": "%u",
"I16": "%d",
"U32": "%u",
"I32": "%d",
"U64": "%lu",
"I64": "%ld",
"F32": "%g",
"F64": "%g",
"bool": "%s",
"string": "%s",
"ENUM": "%d",
}
#
class XmlSerializeParser:
"""
An XML parser class that uses lxml.etree to consume an XML
serializable type documents. The class is instanced with
an XML file name.
"""
def __init__(self, xml_file=None):
"""
Given a well formed XML file (xml_file), read it and turn it into
a big string.
"""
self.__root = None
self.__name = ""
self.__namespace = None
# List of C++ include files for serializable *.hpp file
self.__include_header_files = []
# List of XML serializable description dependencies
self.__includes = []
# List of XML enum type files
self.__include_enum_files = []
# List of XML array type files
self.__include_array_files = []
# Comment block of text for serializable
self.__comment = ""
# List of (name, type, comment) tuples
self.__members = []
# Type ID for serialized type
self.__type_id = None
#
if os.path.isfile(xml_file) == False:
stri = "ERROR: Could not find specified XML file %s." % xml_file
raise OSError(stri)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
# xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
self.__config = ConfigManager.ConfigManager.getInstance()
#
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
# Validate new imports using their root tag as a key to find what schema to use
rng_file = self.__config.get(
"schema", element_tree.getroot().tag.lower()
).lstrip("/")
try:
rng_file = locate_build_root(rng_file)
except (BuildRootMissingException, BuildRootCollisionException) as bre:
stri = "ERROR: Could not find specified RNG file {}. {}".format(
rng_file,
str(bre),
)
raise OSError(stri)
file_handler = open(rng_file)
relax_parsed = etree.parse(file_handler)
file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
# 2/3 conversion
if not relax_compiled.validate(element_tree):
msg = "XML file {} is not valid according to schema {}.".format(
xml_file, rng_file
)
raise FprimeXmlException(msg)
serializable = element_tree.getroot()
if serializable.tag != "serializable":
PRINT.info("%s is not a serializable definition file" % xml_file)
sys.exit(-1)
print("Parsing Serializable %s" % serializable.attrib["name"])
self.__name = serializable.attrib["name"]
if "namespace" in serializable.attrib:
self.__namespace = serializable.attrib["namespace"]
else:
self.__namespace = None
if "typeid" in serializable.attrib:
self.__type_id = serializable.attrib["typeid"]
else:
self.__type_id = None
for serializable_tag in serializable:
if serializable_tag.tag == "comment":
self.__comment = serializable_tag.text.strip()
elif serializable_tag.tag == "include_header":
self.__include_header_files.append(serializable_tag.text)
elif serializable_tag.tag == "import_serializable_type":
self.__includes.append(serializable_tag.text)
elif serializable_tag.tag == "import_enum_type":
self.__include_enum_files.append(serializable_tag.text)
elif serializable_tag.tag == "import_array_type":
self.__include_array_files.append(serializable_tag.text)
elif serializable_tag.tag == "members":
for member in serializable_tag:
if member.tag != "member":
PRINT.info(
"%s: Invalid tag %s in serializable member definition"
% (xml_file, member.tag)
)
sys.exit(-1)
n = member.attrib["name"]
t = member.attrib["type"]
if "size" in list(member.attrib.keys()):
if t == "ENUM":
PRINT.info(
"%s: Member %s: arrays of enums not supported yet!"
% (xml_file, n)
)
sys.exit(-1)
s = member.attrib["size"]
if not s.isdigit():
PRINT.info(
"{}: Member {}: size must be a number".format(
xml_file, n
)
)
sys.exit(-1)
else:
s = None
if "format" in list(member.attrib.keys()):
f = member.attrib["format"]
else:
if t in list(format_dictionary.keys()):
f = format_dictionary[t]
else: # Must be included type, which will use toString method
f = "%s"
if t == "string":
if s is None:
PRINT.info(
"%s: member %s string must specify size tag"
% (xml_file, member.tag)
)
sys.exit(-1)
if "comment" in list(member.attrib.keys()):
c = member.attrib["comment"]
else:
c = None
for member_tag in member:
if member_tag.tag == "enum" and t == "ENUM":
en = member_tag.attrib["name"]
enum_members = []
for mem in member_tag:
mn = mem.attrib["name"]
if "value" in list(mem.attrib.keys()):
v = mem.attrib["value"]
else:
v = None
if "comment" in list(mem.attrib.keys()):
mc = mem.attrib["comment"].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(
"%s: Invalid member tag %s in serializable member %s"
% (xml_file, member_tag.tag, n)
)
sys.exit(-1)
self.__members.append((n, t, s, f, c))
#
# Generate a type id here using SHA256 algorithm and XML stringified file.
#
if not "typeid" in serializable.attrib:
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = "0x" + n.upper()[-8:]
def get_typeid(self):
"""
Return a generated type ID from contents of XML file.
"""
return self.__type_id
def get_xml_filename(self):
"""
Return the original XML filename parsed.
"""
return self.__xml_filename
def get_name(self):
return self.__name
def get_namespace(self):
return self.__namespace
def get_include_header_files(self):
"""
Return a list of all imported Port type XML files.
"""
return self.__include_header_files
def get_includes(self):
"""
Returns a list of all imported XML serializable files.
"""
return self.__includes
def get_include_enums(self):
"""
Returns a list of all imported XML enum files.
"""
return self.__include_enum_files
def get_include_arrays(self):
"""
Returns a list of all imported XML array files.
"""
return self.__include_array_files
def get_comment(self):
"""
Return text block string of comment for serializable class.
"""
return self.__comment
def get_members(self):
"""
Returns a list of member (name, type, optional size, optional format, optional comment) needed.
"""
return self.__members
| 34.324324 | 103 | 0.500197 | [
"Apache-2.0"
] | 1Blackdiamondsc/fprime | Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py | 10,160 | Python |
from .core import Pool, CatboostError, get_catboost_bin_module, ARRAY_TYPES
from collections import defaultdict
import numpy as np
_catboost = get_catboost_bin_module()
_eval_metric_util = _catboost._eval_metric_util
_get_roc_curve = _catboost._get_roc_curve
_select_threshold = _catboost._select_threshold
def create_cd(
label=None,
cat_features=None,
weight=None,
baseline=None,
doc_id=None,
group_id=None,
subgroup_id=None,
timestamp=None,
auxiliary_columns=None,
feature_names=None,
output_path='train.cd'
):
_from_param_to_cd = {
'label': 'Label',
'weight': 'Weight',
'baseline': 'Baseline',
'doc_id': 'DocId',
'group_id': 'GroupId',
'subgroup_id': 'SubgroupId',
'timestamp': 'Timestamp'
}
_column_description = defaultdict(lambda: ['Num', ''])
for key, value in locals().copy().items():
if not (key.startswith('_') or value is None):
if key in ('cat_features', 'auxiliary_columns'):
if isinstance(value, int):
value = [value]
for index in value:
if not isinstance(index, int):
raise CatboostError('Unsupported index type. Expected int, got {}'.format(type(index)))
if index in _column_description:
raise CatboostError('The index {} occurs more than once'.format(index))
_column_description[index] = ['Categ', ''] if key == 'cat_features' else ['Auxiliary', '']
elif key not in ('feature_names', 'output_path'):
if not isinstance(value, int):
raise CatboostError('Unsupported index type. Expected int, got {}'.format(type(value)))
if value in _column_description:
raise CatboostError('The index {} occurs more than once'.format(value))
_column_description[value] = [_from_param_to_cd[key], '']
if feature_names is not None:
for feature_index, name in feature_names.items():
real_feature_index = feature_index
for column_index, (title, _) in sorted(_column_description.items()):
if column_index > real_feature_index:
break
if title not in ('Num', 'Categ'):
real_feature_index += 1
_column_description[real_feature_index][1] = name
with open(output_path, 'w') as f:
for index, (title, name) in sorted(_column_description.items()):
f.write('{}\t{}\t{}\n'.format(index, title, name))
def eval_metric(label, approx, metric, weight=None, group_id=None, thread_count=-1):
"""
Evaluate metrics with raw approxes and labels.
Parameters
----------
label : list or numpy.arrays or pandas.DataFrame or pandas.Series
Object labels.
approx : list or numpy.arrays or pandas.DataFrame or pandas.Series
Object approxes.
metrics : list of strings
List of eval metrics.
weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)
Object weights.
group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)
Object group ids.
thread_count : int, optional (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
metric results : list with metric values.
"""
if len(approx) == 0:
approx = [[]]
if not isinstance(approx[0], ARRAY_TYPES):
approx = [approx]
return _eval_metric_util(label, approx, metric, weight, group_id, thread_count)
def get_gpu_device_count():
return get_catboost_bin_module()._get_gpu_device_count()
def reset_trace_backend(filename):
get_catboost_bin_module()._reset_trace_backend(filename)
def get_roc_curve(model, data, thread_count=-1):
"""
Build points of ROC curve.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
A set of samples to build ROC curve with.
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
curve points : tuple of three arrays (fpr, tpr, thresholds)
"""
if type(data) == Pool:
data = [data]
if not isinstance(data, list):
raise CatboostError('data must be a catboost.Pool or list of pools.')
for pool in data:
if not isinstance(pool, Pool):
raise CatboostError('one of data pools is not catboost.Pool')
return _get_roc_curve(model._object, data, thread_count)
def get_fpr_curve(model=None, data=None, curve=None, thread_count=-1):
"""
Build points of FPR curve.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
A set of samples to build ROC curve with.
curve : tuple of three arrays (fpr, tpr, thresholds)
ROC curve points in format of get_roc_curve returned value.
If set, data parameter must not be set.
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
curve points : tuple of two arrays (thresholds, fpr)
"""
if curve is not None:
if data is not None:
raise CatboostError('Only one of the parameters data and curve should be set.')
if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3:
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
fpr, thresholds = curve[0][:], curve[2][:]
else:
if model is None or data is None:
raise CatboostError('model and data parameters should be set when curve parameter is None.')
fpr, _, thresholds = get_roc_curve(model, data, thread_count)
return thresholds, fpr
def get_fnr_curve(model=None, data=None, curve=None, thread_count=-1):
"""
Build points of FNR curve.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
A set of samples to build ROC curve with.
curve : tuple of three arrays (fpr, tpr, thresholds)
ROC curve points in format of get_roc_curve returned value.
If set, data parameter must not be set.
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
curve points : tuple of two arrays (thresholds, fnr)
"""
if curve is not None:
if data is not None:
raise CatboostError('Only one of the parameters data and curve should be set.')
if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3:
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
tpr, thresholds = curve[1], curve[2][:]
else:
if model is None or data is None:
raise CatboostError('model and data parameters should be set when curve parameter is None.')
_, tpr, thresholds = get_roc_curve(model, data, thread_count)
fnr = np.array([1 - x for x in tpr])
return thresholds, fnr
def select_threshold(model=None, data=None, curve=None, FPR=None, FNR=None, thread_count=-1):
"""
Selects a threshold for prediction.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
Set of samples to build ROC curve with.
If set, curve parameter must not be set.
curve : tuple of three arrays (fpr, tpr, thresholds)
ROC curve points in format of get_roc_curve returned value.
If set, data parameter must not be set.
FPR : desired false-positive rate
FNR : desired false-negative rate (only one of FPR and FNR should be chosen)
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
threshold : double
"""
if data is not None:
if curve is not None:
raise CatboostError('Only one of the parameters data and curve should be set.')
if model is None:
raise CatboostError('model and data parameters should be set when curve parameter is None.')
if type(data) == Pool:
data = [data]
if not isinstance(data, list):
raise CatboostError('data must be a catboost.Pool or list of pools.')
for pool in data:
if not isinstance(pool, Pool):
raise CatboostError('one of data pools is not catboost.Pool')
elif curve is not None:
if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3:
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
else:
raise CatboostError('One of the parameters data and curve should be set.')
return _select_threshold(model._object, data, curve, FPR, FNR, thread_count)
| 35.939163 | 111 | 0.632988 | [
"Apache-2.0"
] | infected-mushroom/catboost | catboost/python-package/catboost/utils.py | 9,452 | Python |
"""Top-level package for Tinned Python."""
__author__ = """Tom Finill"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 21.666667 | 42 | 0.684615 | [
"MIT"
] | tmfnll/tinned_python | tinned_python/__init__.py | 130 | Python |
"""
Example code that implements a simple Neural Net predictor
for z_mode, and Gaussian centered at z_mode with base_width
read in fromfile and pdf width set to base_width*(1+zmode).
"""
import numpy as np
# from numpy import inf
import sklearn.neural_network as sknn
from sklearn.preprocessing import StandardScaler
from scipy.stats import norm
from rail.estimation.estimator import Estimator as BaseEstimation
def make_color_data(data_dict):
"""
make a dataset consisting of the i-band mag and the five colors
Returns:
--------
input_data: (nd-array)
array of imag and 5 colors
"""
input_data = data_dict['mag_i_lsst']
bands = ['u', 'g', 'r', 'i', 'z', 'y']
# make colors and append to input data
for i in range(5):
# replace the non-detect 99s with 28.0 just arbitrarily for now
band1 = data_dict[f'mag_{bands[i]}_lsst']
# band1err = data_dict[f'mag_err_{bands[i]}_lsst']
band2 = data_dict[f'mag_{bands[i+1]}_lsst']
# band2err = data_dict[f'mag_err_{bands[i+1]}_lsst']
# for j,xx in enumerate(band1):
# if np.isclose(xx,99.,atol=.01):
# band1[j] = band1err[j]
# band1err[j] = 1.0
# for j,xx in enumerate(band2):
# if np.isclose(xx,99.,atol=0.01):
# band2[j] = band2err[j]
# band2err[j] = 1.0
input_data = np.vstack((input_data, band1-band2))
return input_data.T
def regularize_data(data):
scaler = StandardScaler()
scaler.fit(data)
regularized_data = scaler.transform(data)
return regularized_data
class simpleNN(BaseEstimation):
"""
Subclass to implement a simple point estimate Neural Net photoz
rather than actually predict PDF, for now just predict point zb
and then put an error of width*(1+zb). We'll do a "real" NN
photo-z later.
"""
def __init__(self, base_config, config_dict):
"""
Parameters:
-----------
run_dict: dict
dictionary of all variables read in from the run_params
values in the yaml file
"""
super().__init__(base_config=base_config, config_dict=config_dict)
inputs = self.config_dict['run_params']
self.width = inputs['width']
self.zmin = inputs['zmin']
self.zmax = inputs['zmax']
self.nzbins = inputs['nzbins']
np.random.seed(71)
def inform(self):
"""
train the NN model
"""
speczs = self.training_data['redshift']
print("stacking some data...")
color_data = make_color_data(self.training_data)
input_data = regularize_data(color_data)
simplenn = sknn.MLPRegressor(hidden_layer_sizes=(12, 12),
activation='tanh', solver='lbfgs')
simplenn.fit(input_data, speczs)
self.model = simplenn
def estimate(self, test_data):
color_data = make_color_data(test_data)
input_data = regularize_data(color_data)
zmode = np.round(self.model.predict(input_data), 3)
pdfs = []
widths = self.width * (1.0+zmode)
self.zgrid = np.linspace(self.zmin, self.zmax, self.nzbins)
for i, zb in enumerate(zmode):
pdfs.append(norm.pdf(self.zgrid, zb, widths[i]))
pz_dict = {'zmode': zmode, 'pz_pdf': pdfs}
return pz_dict
| 33.94 | 74 | 0.61756 | [
"MIT"
] | pwhatfield/RAIL | rail/estimation/algos/sklearn_nn.py | 3,394 | Python |
import os
PROJECT_PATH = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SOURCE_DIR_PATH = os.path.join(PROJECT_PATH, 'doc', 'source', 'markdown_files')
| 41.25 | 79 | 0.769697 | [
"MIT"
] | williamfzc/md2doc | md2doc/utils/static.py | 165 | Python |
# problem 37
# Project Euler
__author__ = 'Libao Jin'
__date__ = 'July 17, 2015'
def rotateDigits(number):
string = str(number)
rotatedString = string[::-1]
rotatedNumber = int(rotatedString)
return rotatedNumber
def isSymmetrical(number):
rotatedNumber = rotateDigits(number)
if rotatedNumber == number:
return True
else:
return False
def toBinary(number):
string = bin(number)[2:]
bNumber = int(string)
return bNumber
def isPalindromic(number):
if isSymmetrical(number):
binaryNumber = toBinary(number)
if isSymmetrical(binaryNumber):
return True
return False
def doubleBasePalindromes(UPPER_BOUND):
number = 1
DBP = []
while number < UPPER_BOUND:
if isPalindromic(number):
DBP.append(number)
number += 1
pureDBP = DBP.copy()
sumDBP = sum(DBP)
for i,e in enumerate(DBP):
DBP[i] = (e, toBinary(e))
return (sumDBP, pureDBP, DBP)
def solution():
UPPER_BOUND = 1000000
DBP_Info = doubleBasePalindromes(UPPER_BOUND)
print(DBP_Info)
solution()
| 19.057692 | 46 | 0.726539 | [
"MIT"
] | imthomasking/MATLAB-files | Python/Project.Euler/Answers.Python/36.py | 991 | Python |
"""
Django settings for testapp project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_PATH = os.path.abspath(os.path.dirname(__file__))
MAP_WIDGETS_PATH = os.path.normpath(os.path.join(SITE_PATH, '..', '..', '..'))
if MAP_WIDGETS_PATH not in sys.path:
sys.path.insert(0, MAP_WIDGETS_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o6b2c!r921-+^h7jlm&4x#sn53qwfif+@8(!4b*csitx+69b=5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'mapwidgets',
'widgets'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'mapwidget_db',
'USER': 'mapwidgetdbu',
'PASSWORD': 'mapwidgetdbu',
'HOST': 'postgres',
'PORT': '5432',
}
}
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/uploads/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
SITE_DOMAIN = 'django'
TESTING = sys.argv[1:2] == ['test']
GOOGLE_MAP_API_KEY = os.environ.get('GOOGLE_MAP_API_KEY')
try:
from tests.testapp.testapp.settings_local import *
except:
pass
| 25.375758 | 91 | 0.666826 | [
"MIT"
] | felocru/django-map-widgets | tests/testapp/testapp/settings.py | 4,187 | Python |
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import random
import unittest
import imath
import IECore
import IECoreScene
class MeshAlgoWindingTest( unittest.TestCase ) :
def makeSingleTriangleMesh( self ):
verticesPerFace = IECore.IntVectorData( [ 3 ] )
vertexIds = IECore.IntVectorData( [ 0, 1, 2 ] )
p = IECore.V3fVectorData( [ imath.V3f( 0, 0, 0 ), imath.V3f( 1, 0, 0 ), imath.V3f( 0, 1, 0 ) ] )
uv = IECore.V2fVectorData( [ imath.V2f( 0, 0 ), imath.V2f( 1, 0 ), imath.V2f( 0, 1 ) ] )
mesh = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds, "linear", p )
mesh["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, uv )
mesh["foo"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.V2fVectorData( [ imath.V2f( 0, 0 ), imath.V2f( 0, 1 ), imath.V2f( 1, 0 ) ] ) )
prefData = IECore.V3fVectorData( [ imath.V3f( 0, 0, 0 ), imath.V3f( 0, -1, 0 ), imath.V3f( 1, 0, 0 ) ] )
mesh["Pref"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, prefData )
return mesh
def testSingleTriangle( self ) :
mesh = self.makeSingleTriangleMesh()
mesh.blindData()["test"] = IECore.IntData( 10 )
meshReversed = mesh.copy()
IECoreScene.MeshAlgo.reverseWinding( meshReversed )
# Meshes should be identical
self.assertEqual( meshReversed.interpolation, mesh.interpolation )
for interpolation in IECoreScene.PrimitiveVariable.Interpolation.values.values() :
self.assertEqual( meshReversed.variableSize( interpolation ), mesh.variableSize( interpolation ) )
self.assertEqual( mesh.keys(), meshReversed.keys() )
self.assertEqual( mesh["P"], meshReversed["P"] )
self.assertEqual( mesh["Pref"], meshReversed["Pref"] )
self.assertEqual( mesh.blindData(), meshReversed.blindData() )
# Except for vertex ids, and facevarying data
self.assertEqual( list( meshReversed.vertexIds ), list( reversed( mesh.vertexIds ) ) )
self.assertEqual( list( meshReversed["uv"].data ), list( reversed( mesh["uv"].data ) ) )
def testPlane( self ) :
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) )
IECoreScene.TriangulateOp()( input = mesh, copyInput = False )
meshReversed = mesh.copy()
IECoreScene.MeshAlgo.reverseWinding( meshReversed )
evaluator = IECoreScene.MeshPrimitiveEvaluator( mesh )
evaluatorReversed = IECoreScene.MeshPrimitiveEvaluator( meshReversed )
result = evaluator.createResult()
resultReversed = evaluatorReversed.createResult()
for i in range( 0, 1000 ) :
p = imath.V3f( random.uniform( -1.0, 1.0 ), random.uniform( -1.0, 1.0 ), 0 )
evaluator.closestPoint( p, result )
evaluatorReversed.closestPoint( p, resultReversed )
self.assertEqual( resultReversed.normal(), -result.normal() )
reversedUV = resultReversed.vec2PrimVar( meshReversed["uv"] )
uv = result.vec2PrimVar( mesh["uv"] )
self.assertAlmostEqual( reversedUV[0], uv[0], delta = 0.0001 )
self.assertAlmostEqual( reversedUV[1], uv[1], delta = 0.0001 )
def testRoundTrip( self ) :
mesh = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) )
meshReversed = mesh.copy()
IECoreScene.MeshAlgo.reverseWinding( meshReversed )
meshReversedAgain = meshReversed.copy()
IECoreScene.MeshAlgo.reverseWinding( meshReversedAgain )
self.assertEqual( mesh, meshReversedAgain )
def testUVIndices( self ) :
verticesPerFace = IECore.IntVectorData( [ 3 ] )
vertexIds = IECore.IntVectorData( [ 0, 1, 2 ] )
p = IECore.V3fVectorData( [ imath.V3f( 0, 0, 0 ), imath.V3f( 1, 0, 0 ), imath.V3f( 0, 1, 0 ) ] )
uv = IECore.V2fVectorData( [ imath.V2f( 0, 0 ), imath.V2f( 1, 0 ), imath.V2f( 0, 1 ) ] )
uvIndices = IECore.IntVectorData( [ 0, 1, 2 ] )
mesh = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds, "linear", p )
mesh["uv"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, uv, uvIndices )
meshReversed = mesh.copy()
IECoreScene.MeshAlgo.reverseWinding( meshReversed )
# Meshes should be identical
self.assertEqual( meshReversed.interpolation, mesh.interpolation )
for interpolation in IECoreScene.PrimitiveVariable.Interpolation.values.values() :
self.assertEqual( meshReversed.variableSize( interpolation ), mesh.variableSize( interpolation ) )
self.assertEqual( mesh.keys(), meshReversed.keys() )
self.assertEqual( mesh["P"], meshReversed["P"] )
# UV indices should change, but UV data doesn't need to
self.assertEqual( meshReversed["uv"].data, mesh["uv"].data )
self.assertEqual( list( meshReversed["uv"].indices ), list( reversed( mesh["uv"].indices ) ) )
if __name__ == "__main__":
unittest.main()
| 43.738255 | 189 | 0.709069 | [
"BSD-3-Clause"
] | carlosg-ie/cortex | test/IECoreScene/MeshAlgoWindingTest.py | 6,517 | Python |
from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
SqlTranslator,
extend_base,
sql_scalar,
sql_agg,
win_agg,
win_cumul,
annotate
)
#from .postgresql import PostgresqlColumn, PostgresqlColumnAgg
from .base import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
# Data ----
class SnowflakeColumn(SqlColumn): pass
class SnowflakeColumnAgg(SqlColumnAgg, SnowflakeColumn): pass
# Translations ================================================================
@_dt.sql_func_last_day_in_period.register
def sql_func_last_day_in_period(codata: SnowflakeColumn, col, period):
return _dt.date_trunc(codata, col, period) \
+ sql.text("interval '1 %s'" % period) \
- sql.text("interval '1 day'")
# Scalar ----
extend_base(
SnowflakeColumn,
__floordiv__ = lambda _, x, y: fn.floor(x / y),
__rfloordiv__ = lambda _, x, y: fn.floor(y / x),
# connector has a bug with %
# see: https://github.com/snowflakedb/snowflake-sqlalchemy/issues/246
__mod__ = lambda _, x, y: fn.mod(x, y),
__rmod__ = lambda _, x, y: fn.mod(y, x),
mod = lambda _, x, y: fn.mod(x,y),
rmod = lambda _, x, y: fn.mod(y,x),
# TODO: str.contains
)
# Window ----
extend_base(
SnowflakeColumn,
all = win_agg("booland_agg"),
any = win_agg("boolor_agg"),
count = win_agg("count"),
cumsum = annotate(win_cumul("sum"), result_type="variable"),
# note that the number of decimal places Snowflake returns, and whether
# the result is numeric depends on the input. mark as variable, so tests
# do not check dtype
# see https://community.snowflake.com/s/question/0D50Z000079hpxvSAA/numeric-calculations-truncated-to-3-decimal-places
mean = annotate(win_agg("avg"), result_type="variable"),
std = win_agg("stddev_samp"),
sum = annotate(win_agg("sum"), result_type="variable"),
var = win_agg("var_samp"),
# str.contains
# dt methods are more like base
)
# Agg ----
extend_base(
SnowflakeColumnAgg,
all = sql_agg("booland_agg"),
any = sql_agg("boolor_agg"),
count = sql_agg("count"),
std = sql_agg("stddev_samp"),
var = sql_agg("var_samp"),
)
translator = SqlTranslator.from_mappings(
SnowflakeColumn, SnowflakeColumnAgg
)
| 27.034884 | 122 | 0.653333 | [
"MIT"
] | Techzune/siuba | siuba/sql/dialects/snowflake.py | 2,325 | Python |
from __future__ import unicode_literals
from __future__ import absolute_import
if False:
from typing import Type
import six
from smoke_tests.tools.compat import Path
from smoke_tests.tools.package.base_builder import AgentImageBuilder
AMAZONLINUX = "amazonlinux"
UBUNTU = "ubuntu"
ALL_DISTRIBUTION_NAMES = [AMAZONLINUX, UBUNTU]
def get_agent_distribution_builder(distribution, python_version):
# type: (six.text_type, six.text_type) -> Type[AgentImageBuilder]
"""
Find agent distribution docker image for smoke testing.
:param distribution: distribution name on which agent package should be installed.
Possible values are in the 'ALL_DISTRIBUTION_NAMES' constant.
:param python_version: Version of the python interpreter in the distribution.
"""
distribution = distribution.lower()
dockerfiles_directory_path = Path(__file__).parent / "distribution_dockerfiles"
fpm_builder_dockerfile_path = dockerfiles_directory_path / "Dockerfile.fpm_package_builder"
fpm_package_builder_dockerfile_content = fpm_builder_dockerfile_path.read_text()
if distribution == AMAZONLINUX:
class AmazonLinuxSmokeImageBuilder(AgentImageBuilder):
PYTHON_VERSION = python_version
COPY_AGENT_SOURCE = True
IMAGE_TAG = "scalyr_agent_smoke_{0}_{1}".format(
distribution, python_version
)
@classmethod
def get_dockerfile_content(cls): # type: () -> six.text_type
dockerfile_path = dockerfiles_directory_path / "Dockerfile.amazonlinux"
dockerfile_content = dockerfile_path.read_text()
return dockerfile_content.format(
fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content,
python_version=cls.PYTHON_VERSION,
)
return AmazonLinuxSmokeImageBuilder
elif distribution == UBUNTU:
class _UbuntuSmokeImageBuilder(AgentImageBuilder):
PYTHON_VERSION = python_version
COPY_AGENT_SOURCE = True
IMAGE_TAG = "scalyr_agent_smoke_{0}_{1}".format(
distribution, python_version
)
@classmethod
def get_dockerfile_content(cls): # type: () -> six.text_type
dockerfile_path = dockerfiles_directory_path / "Dockerfile.ubuntu"
dockerfile_content = dockerfile_path.read_text()
return dockerfile_content.format(
fpm_package_builder_dockerfile=fpm_package_builder_dockerfile_content,
python_package_name="python"
if cls.PYTHON_VERSION == "python2"
else cls.PYTHON_VERSION,
python_version=cls.PYTHON_VERSION,
)
return _UbuntuSmokeImageBuilder
else:
raise IOError("Can not find such distribution: {0}".format(distribution))
| 35.566265 | 95 | 0.679878 | [
"Apache-2.0"
] | zak905/scalyr-agent-2 | smoke_tests/tools/package/__init__.py | 2,952 | Python |
import csv
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
from skimage.color import rgb2gray
import numpy as np
from tqdm import tqdm
from tadataka import VisualOdometry, CameraParameters
from tadataka.rigid import exp_se3, log_se3
from tadataka.projection import warp
from tadataka.mapping import MapBuilder
from tadataka.quaternion import quaternion_to_rotation
from tadataka.datasets.tum_rgbd import TUMDataset, PoseSequence
from visualization.plot import plot
# dataset format is explained at
# https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats#
# intrinsic_camera_calibration_of_the_kinect
dataset_root = Path("datasets", "rgbd_dataset_freiburg1_desk")
# dataset_root = Path("datasets", "rgbd_dataset_freiburg2_pioneer_360")
# dataset_root = Path("datasets", "rgbd_dataset_freiburg3_structure_texture_near")
def error(image_true, image_pred, mask):
return np.power(image_true[mask]-image_pred[mask], 2).mean()
def visualize_error_function(camera_parameters, I0, D0, I1, xi_pred):
def generate_error_curve(i, start, stop, n):
xi = np.copy(xi_pred)
vs = xi[i] + np.linspace(start, stop, n)
errors = []
for v in vs:
xi[i] = v
DG = exp_se3(xi)
estimated, mask = warp(camera_parameters, I1, D0, DG)
errors.append(error(I0, estimated, mask))
errors = np.array(errors)
return vs, errors
from matplotlib import pyplot as plt
fig = plt.figure()
for xi_index, ax_index in enumerate([1, 3, 5, 2, 4, 6]):
ax = fig.add_subplot(3, 2, ax_index)
vs, errors = generate_error_curve(xi_index,
start=-0.10, stop=0.10, n=101)
ax.set_title("Axis {}".format(xi_index+1))
ax.axvline(vs[np.argmin(errors)], label="ground truth")
ax.axvline(xi_pred[xi_index], color="red", label="prediction")
ax.legend()
ax.plot(vs, errors)
plt.show()
def main():
np.set_printoptions(suppress=True, precision=8, linewidth=1e8)
camera_parameters = CameraParameters(
focal_length=[525.0, 525.0],
offset=[319.5, 239.5]
)
dataset = TUMDataset(dataset_root)
G = np.eye(4)
frame0 = dataset.load_color(0)
sequence_pred = PoseSequence()
sequence_pred.add(frame0.timestamp_depth, G)
for i in tqdm(range(1, dataset.size)):
frame1 = dataset.load_color(i)
# TODO not necessary to convert the color of the same image twice
# we need to create a better interface
vo = VisualOdometry(camera_parameters,
rgb2gray(frame0.image), frame0.depth_map,
rgb2gray(frame1.image))
DG = vo.estimate_motion(n_coarse_to_fine=6)
G = np.dot(G, np.linalg.inv(DG))
sequence_pred.add(frame1.timestamp_depth, G)
frame0 = frame1
sequence_pred.save("poses.txt")
# TODO implement the following
# pointcloud = map_builder.export()
# export_pointcloud(pointcloud)
main()
| 29.47619 | 82 | 0.668498 | [
"Apache-2.0"
] | IshitaTakeshi/DVO | examples/rgbd_desk.py | 3,095 | Python |
from datetime import date
ano_nascimento = int(input('Digite o ano de nascimento:'))
ano_atual = date.today().year
idade = abs(ano_nascimento - ano_atual)
falta1 = (18 - idade)
falta2 = (idade - 18)
if idade < 18:
print('Ainda vai se alistar , faltam {} anos'.format(abs(falta1)))
ano = falta1 + ano_atual
print('Seu alistamento será em {}'.format(ano))
elif idade == 18:
print('Esta na hora de você se alistar')
elif idade > 18:
print('Já passou {} anos do tempo de alistamento'.format(abs(falta2)))
ano = ano_atual - falta2
print('Seu alistamento foi em {}'.format(ano))
| 31.15 | 75 | 0.654896 | [
"Apache-2.0"
] | thaisouza30/Exercicios-Python3-Curso-em-Video | ex039.py | 626 | Python |
DEPS = [
'archive',
'chromium',
'chromium_android',
'depot_tools/bot_update',
'depot_tools/gclient',
'depot_tools/tryserver',
'file',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'trigger',
]
# TODO(phajdan.jr): provide coverage (http://crbug.com/693058).
DISABLE_STRICT_COVERAGE = True
| 19.6 | 63 | 0.701531 | [
"BSD-3-Clause"
] | bopopescu/chromium-build | scripts/slave/recipe_modules/libyuv/__init__.py | 392 | Python |
from PIL import Image
import numpy
from random import randint
"""
A plugin used to create a set of variable spefications for permutation groups.
"""
def transform(img,source,target,**kwargs):
cv_image = numpy.array(img)
shape = cv_image.shape
snapto8 = 'eightbit_boundary' in kwargs and kwargs['eightbit_boundary'] == 'yes'
percentageWidth = float(kwargs['percentage_width'])
percentageHeight = float(kwargs['percentage_height'])
divisionsWidth = float(kwargs['divisions_width'] if 'divisions_width' in kwargs else shape[1])
divisionsHeight = float(kwargs['divisions_height'] if 'divisions_height' in kwargs else shape[0])
pixelWidth = int(shape[1] * percentageWidth)
pixelHeight = int(shape[0] * percentageHeight)
if snapto8:
pixelWidth = pixelWidth - pixelWidth % 8
pixelHeight = pixelHeight - pixelHeight % 8
incrementsWidth = max(8,int(pixelWidth/divisionsWidth))
incrementsHeight = max(8,int(pixelHeight/divisionsHeight))
crop_x = { "type": "list", "values" : [i for i in xrange(incrementsWidth,pixelWidth,incrementsWidth)]}
crop_y = { "type": "list", "values" : [i for i in xrange(incrementsHeight, pixelHeight, incrementsHeight)]}
return {'crop_x':crop_x,'crop_y':crop_y, 'crop_width':pixelWidth,'crop_height':pixelHeight},None
def operation():
return {
'category': 'Select',
'name': 'SelectRegion',
'description':'Select a region to crop',
'software':'OpenCV',
'version':cv2.__version__,
'arguments':{'percentage_width':
{'type': "float[0:0.5]", 'description':'the percentage of pixels to remove horizontal'},
'percentage_height':
{'type': "float[0:0.5]", 'description':'the percentage of pixels to remove vertically'},
'divisions_width':
{'type': "int[0:100000]", 'description': 'the number samples in the x direction'},
'divisions_height':
{'type': "int[0:100000]", 'description': 'the number of samples in the y direction'},
'eightbit_boundary':
{'type': "yesno", 'defaultvalue':'no', 'description':'Snap to 8 bit boundary'}
},
'transitions': [
'image.image'
]
}
| 48.22 | 115 | 0.608461 | [
"BSD-3-Clause"
] | spongezhang/maskgen | plugins/CropPermutations/__init__.py | 2,411 | Python |
# -*- coding: utf-8 -*-
# Copyright 2017-2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tbears.block_manager.block_manager import PRepManager
from tbears.config.tbears_config import keystore_test1
PREP_LIST = [
{
"id": "hx86aba2210918a9b116973f3c4b27c41a54d5dafe",
"publicKey": "04a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26",
"p2pEndPoint": "target://123.45.67.89:7100"
},
{
"id": "hx13aca3210918a9b116973f3c4b27c41a54d5dad1",
"publicKey": "0483ae642ca89c9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281e3a27",
"p2pEndPoint": "target://210.34.56.17:7100"
}
]
class TestTBearsPRepManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_prev_block_contributors_info(self):
# There is no P-Reps
manager = PRepManager(is_generator_rotation=True, gen_count_per_leader=1)
info = manager.get_prev_block_contributors_info()
self.assertEqual(keystore_test1.get('address'), info.get('prevBlockGenerator'))
self.assertEqual(0, len(info.get('prevBlockValidators')))
# There is 2 P-Reps
manager = PRepManager(is_generator_rotation=True, gen_count_per_leader=1, prep_list=PREP_LIST)
info = manager.get_prev_block_contributors_info()
self.assertEqual(PREP_LIST[0].get('id'), info.get('prevBlockGenerator'))
self.assertEqual(len(PREP_LIST) - 1, len(info.get('prevBlockValidators')))
self.assertEqual(PREP_LIST[1].get('id'), info.get('prevBlockValidators')[0])
# after rotate
info = manager.get_prev_block_contributors_info()
self.assertEqual(PREP_LIST[1].get('id'), info.get('prevBlockGenerator'))
self.assertEqual(len(PREP_LIST) - 1, len(info.get('prevBlockValidators')))
self.assertEqual(PREP_LIST[0].get('id'), info.get('prevBlockValidators')[0])
| 41.645161 | 154 | 0.731603 | [
"Apache-2.0"
] | Transcranial-Solutions/t-bears | tests/test_prep_manager.py | 2,582 | Python |
# Python function to manipulate OpenFOAM files
# Developer: Jian-Xun Wang ([email protected])
###############################################################################
# system import
import numpy as np
import numpy.matlib
import sys # Add extra path/directory
import os
import os.path as ospt
import shutil
import subprocess # Call the command line
from subprocess import call
import matplotlib.pyplot as plt # For plotting
import re
import tempfile
import pdb
from matplotlib import pyplot as plt
# local import
from PIL import Image
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neural_network import MLPRegressor
import multiprocessing
from functools import partial
import time
import multiprocessing
from functools import partial
import scipy.sparse as sp
global unitTest
unitTest = False;
def readVectorFromFile(UFile):
"""
Arg:
tauFile: The directory path of OpenFOAM vector file (e.g., velocity)
Regurn:
vector: Matrix of vector
"""
resMid = extractVector(UFile)
fout = open('Utemp', 'w');
glob_pattern = resMid.group()
glob_pattern = re.sub(r'\(', '', glob_pattern)
glob_pattern = re.sub(r'\)', '', glob_pattern)
fout.write(glob_pattern)
fout.close();
vector = np.loadtxt('Utemp')
return vector
def readScalarFromFile(fileName):
"""
Arg:
fileName: The file name of OpenFOAM scalar field
Regurn:
a vector of scalar field
"""
resMid = extractScalar(fileName)
# write it in Tautemp
fout = open('temp.txt', 'w')
glob_patternx = resMid.group()
glob_patternx = re.sub(r'\(', '', glob_patternx)
glob_patternx = re.sub(r'\)', '', glob_patternx)
fout.write(glob_patternx)
fout.close();
scalarVec = np.loadtxt('temp.txt')
return scalarVec
################################################ Regular Expression #####################################################
def extractVector(vectorFile):
""" Function is using regular expression select Vector value out
Args:
UFile: The directory path of file: U
Returns:
resMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........
"""
fin = open(vectorFile, 'r') # need consider directory
line = fin.read() # line is U file to read
fin.close()
### select U as (X X X)pattern (Using regular expression)
patternMid = re.compile(r"""
(
\( # match(
[\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures
(\ ) # match space
[\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures
(\ ) # match space
[\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures
\) # match )
\n # match next line
)+ # search greedly
""",re.DOTALL | re.VERBOSE)
resMid = patternMid.search(line)
return resMid
def extractScalar(scalarFile):
""" subFunction of readTurbStressFromFile
Using regular expression to select scalar value out
Args:
scalarFile: The directory path of file of scalar
Returns:
resMid: scalar selected;
you need use resMid.group() to see the content.
"""
fin = open(scalarFile, 'r') # need consider directory
line = fin.read() # line is k file to read
fin.close()
### select k as ()pattern (Using regular expression)
patternMid = re.compile(r"""
\( # match"("
\n # match next line
(
[\+\-]?[\d]+([\.][\d]*)?([Ee][+-]?[\d]+)? # match figures
\n # match next line
)+ # search greedly
\) # match")"
""",re.DOTALL | re.VERBOSE)
resMid = patternMid.search(line)
return resMid
| 27.205479 | 122 | 0.542296 | [
"MIT"
] | Jianxun-Wang/PICNNSR | demo0/foamFileOperation.py | 3,972 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import SchleemsTestFramework
from test_framework.util import *
class MempoolLimitTest(SchleemsTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
| 42.148936 | 118 | 0.664816 | [
"MIT"
] | BlenderSleuth/schleems | test/functional/mempool_limit.py | 1,981 | Python |
import numpy as np
def evaluate(X: np.ndarray, A: float = 7.0, B: float = 0.1) -> np.ndarray:
"""Non-monotonic Ishigami-Homma three parameter test function:
`f(x) = \sin(x_{1}) + A \sin(x_{2})^2 + Bx^{4}_{3}\sin(x_{1})`
This test function is commonly used to benchmark global sensitivity
methods as variance-based sensitivities of this function can be
analytically determined.
See listed references below.
In [2], the expected first-order indices are:
x1: 0.3139
x2: 0.4424
x3: 0.0
when A = 7, B = 0.1 when conducting Sobol' analysis with the
Saltelli sampling method with a sample size of 1000.
Parameters
----------
X : np.ndarray
An `N*D` array holding values for each parameter, where `N` is the
number of samples and `D` is the number of parameters
(in this case, three).
A : float
Constant `A` parameter
B : float
Constant `B` parameter
Returns
-------
Y : np.ndarray
References
----------
.. [1] Ishigami, T., Homma, T., 1990.
An importance quantification technique in uncertainty analysis for
computer models.
Proceedings. First International Symposium on Uncertainty Modeling
and Analysis.
https://doi.org/10.1109/ISUMA.1990.151285
.. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J.,
Gatelli, D., Saisana, M., Tarantola, S., 2008.
Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K.
https://dx.doi.org/10.1002/9780470725184
"""
Y = np.zeros(X.shape[0])
Y = np.sin(X[:, 0]) + A * np.power(np.sin(X[:, 1]), 2) + \
B * np.power(X[:, 2], 4) * np.sin(X[:, 0])
return Y
| 31.457627 | 79 | 0.566272 | [
"MIT"
] | QianWanghhu/SALib | src/SALib/test_functions/Ishigami.py | 1,856 | Python |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
class ResizeNearestNeighborAlignCornerT(nn.Cell):
def __init__(self, size):
super(ResizeNearestNeighborAlignCornerT, self).__init__()
self.ResizeNearestNeighborAlignCornerT = P.ResizeNearestNeighbor(size, align_corners=True)
def construct(self, x):
return self.ResizeNearestNeighborAlignCornerT(x)
class ResizeNearestNeighborAlignCornerF(nn.Cell):
def __init__(self, size):
super(ResizeNearestNeighborAlignCornerF, self).__init__()
self.ResizeNearestNeighborAlignCornerF = P.ResizeNearestNeighbor(size, align_corners=False)
def construct(self, x):
return self.ResizeNearestNeighborAlignCornerF(x)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ResizeNearestNeighborAlignCornerT():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float32))
expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float32)
rnn = ResizeNearestNeighborAlignCornerT((4, 4))
output = rnn(input_tensor)
assert np.all(output.asnumpy() == expect)
input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float16))
expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float16)
rnn = ResizeNearestNeighborAlignCornerT((4, 4))
output = rnn(input_tensor)
assert np.all(output.asnumpy() == expect)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ResizeNearestNeighborAlignCornerF():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float32))
expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float32)
rnn = ResizeNearestNeighborAlignCornerF((4, 4))
output = rnn(input_tensor)
assert np.all(output.asnumpy() == expect)
input_tensor = Tensor(np.array([[[[1, 0], [0, 1]]]]).astype(np.float16))
expect = np.array([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]).astype(np.float16)
rnn = ResizeNearestNeighborAlignCornerF((4, 4))
output = rnn(input_tensor)
assert np.all(output.asnumpy() == expect)
| 43.680556 | 102 | 0.683625 | [
"Apache-2.0"
] | Gavin-Hoang/mindspore | tests/st/ops/gpu/test_resize_nearest_neighbor_op.py | 3,145 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.