hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18e8503b78654356f0c5aa6679c6af2a2edc0171
| 623 |
py
|
Python
|
zom/manage.py
|
Neha-Prabhu/Zomato-clone
|
4d57f784c6de91780a171fe4e2853ccb20c8e8ca
|
[
"MIT"
] | null | null | null |
zom/manage.py
|
Neha-Prabhu/Zomato-clone
|
4d57f784c6de91780a171fe4e2853ccb20c8e8ca
|
[
"MIT"
] | null | null | null |
zom/manage.py
|
Neha-Prabhu/Zomato-clone
|
4d57f784c6de91780a171fe4e2853ccb20c8e8ca
|
[
"MIT"
] | 1 |
2020-08-09T11:54:26.000Z
|
2020-08-09T11:54:26.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zom.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.318182 | 73 | 0.680578 |
294397673ffc2d8e60a9a7d65c476eb07bff12cc
| 255 |
py
|
Python
|
computer_vision/object_measurement/capture_image.py
|
Gabriellgpc/my_personal_projects
|
0d23d31dbb61538bac1ff8bf75d5c3cd5701f833
|
[
"MIT"
] | null | null | null |
computer_vision/object_measurement/capture_image.py
|
Gabriellgpc/my_personal_projects
|
0d23d31dbb61538bac1ff8bf75d5c3cd5701f833
|
[
"MIT"
] | null | null | null |
computer_vision/object_measurement/capture_image.py
|
Gabriellgpc/my_personal_projects
|
0d23d31dbb61538bac1ff8bf75d5c3cd5701f833
|
[
"MIT"
] | 1 |
2021-04-08T14:44:53.000Z
|
2021-04-08T14:44:53.000Z
|
import cv2
video = cv2.VideoCapture('/dev/video0')
while True:
ok, img = video.read()
cv2.imshow('Camera', img)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
if key == ord('s'):
cv2.imwrite('imagem.png', img)
| 19.615385 | 39 | 0.556863 |
87849717de45da49562e5a74545d3d7563e8948d
| 34,997 |
py
|
Python
|
src/m1.py
|
jenhong20/CircleChanger
|
3560f21966982a9def353acd2185c4c7a358cd8e
|
[
"MIT"
] | null | null | null |
src/m1.py
|
jenhong20/CircleChanger
|
3560f21966982a9def353acd2185c4c7a358cd8e
|
[
"MIT"
] | null | null | null |
src/m1.py
|
jenhong20/CircleChanger
|
3560f21966982a9def353acd2185c4c7a358cd8e
|
[
"MIT"
] | null | null | null |
"""
A problem in which to practice:
-- IMPLEMENTING a CLASS
-- using SEQUENCES
Authors: Valerie Galluzzi, David Mutchler, Dave Fisher, Amanda Stouder,
their colleagues and YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import random
import sys
import time
import m1t_test_CircleChanger as m1_tests
########################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
########################################################################
def main():
"""
Calls the TEST functions in this module, but ONLY if
the method to be tested has at least a partial implementation.
That is, a TEST function will not be called
until you begin work on the code that it is testing.
"""
if m1_tests.is_implemented('__init__', 20):
run_test_init()
if m1_tests.is_implemented('get_distance_from'):
run_test_get_distance_from()
if m1_tests.is_implemented('swell_or_shrink_once'):
run_test_swell_or_shrink_once()
if m1_tests.is_implemented('swell_or_shrink_repeatedly', 4):
run_test_swell_or_shrink_repeatedly()
if m1_tests.is_implemented('swallow'):
run_test_swallow()
if m1_tests.is_implemented('change_color'):
run_test_change_color()
if m1_tests.is_implemented('change_to_original_color'):
run_test_change_to_original_color()
if m1_tests.is_implemented('change_to_next_color_in_tuple'):
run_test_change_to_next_color_in_tuple()
########################################################################
# The CircleChanger class (and its methods) begins here.
########################################################################
class CircleChanger(object):
"""
A CircleChanger has an rg.Circle and a list of colors.
Methods can draw the circle and change its characteristics,
"""
def __init__(self, x, y, radius, fill_color, colors):
"""
What comes in:
-- self
-- Three integers: x, y and radius
-- A string (or other value) that represents a color
in RoseGraphics
-- A non-empty tuple of strings that represent colors
in RoseGraphics, e.g.
('blue', 'gray', 'light green', 'yellow', 'black')
What goes out: Nothing (i.e., None).
Side effects:
-- Constructs an rg.Circle whose:
-- center is an rg.Point at the given x and y
-- radius is the given radius
-- fill_color is the given fill color
-- Stores that rg.Circle in the instance variable:
circle
-- Stores the given tuple of colors in the instance variable:
colors
-- [Eventually] Sets additional instance variables
as needed for other methods.
Example: See run_test_init below for an example.
Type hints:
:type x: int
:type y: int
:type radius: int
:type fill_color: str
:type colors: sequence of str
"""
self.animation_factor = 1 # Smaller => faster animations
self.seconds_to_sleep = 0.5 # Default for each call to draw
# --------------------------------------------------------------
# Change the above "animation_factor" if the animations
# go too fast or too slow for your tastes. Setting it to N
# makes the animations go N times SLOWER.
# --------------------------------------------------------------
################################################################
# TODO: 2.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_init function (below).
# Third, implement and test this method.
#
# Each TEST function gives an EXAMPLE that helps you understand
# the SPECIFICATION of the method. That is why you read the
# TEST function before implementing the method that it tests.
################################################################
def __repr__(self):
"""
What comes in:
-- self
What comes out:
Returns a string that represents this CircleChanger.
The string has the form of the following example:
ANIMATED_CIRCLE:
Circle: center=(10, 23), radius=50, fill_color=None,
outline_color=black, outline_thickness=1.
colors: ('blue', 'gray', 'light green', 'black')
Side effects: None.
"""
# --------------------------------------------------------------
# We have already implemented this __repr__ function for you.
# Do NOT modify it.
# --------------------------------------------------------------
# Break the string for the circle into two lines:
circle_string = repr(self.circle)
circle_string = circle_string.replace(' outline_color',
('\n' +
' ' +
'outline_color'))
# Use lower-case C for 'circle' to match instance-variable name:
circle_string = circle_string.replace('Circle', 'circle')
s = 'CircleChanger:\n'
s = s + ' ' + circle_string + '\n'
s = s + ' ' + 'colors: ' + repr(self.colors)
return s
def draw(self, message=None):
# ###### DO NOT MODIFY THIS METHOD #######
"""
What comes in:
-- self
-- an optional message
What comes out: Nothing (i.e., None)
Side effects:
This method draws and renders this CircleChanger
on an rg.RoseWindow.
Then, if message is:
None (the default):
This method pauses for the default number of seconds
(multiplied by self.animation_factor).
Any number:
This method pauses for the given number of seconds
(multiplied by self.animation_factor).
There is no pause if the number is 0.
Anything else:
This method prints the message on the rg.RoseWindow
and waits for the user to click the mouse.
"""
# --------------------------------------------------------------
# We have already implemented this draw function for you.
# Do NOT modify it.
#
# You can do this entire exercise knowing only that draw
# "Draws this CircleChanger on a window,
# pausing briefly after doing so."
#
# per the doc_string (specification) above.
# You do NOT need to know HOW draw does its work.
#
# But feel free to ask your instructor about it if you
# are curious as to how draw works.
# --------------------------------------------------------------
# ###### DO NOT MODIFY THIS METHOD #######
m1_tests.draw(self, message)
def get_distance_from(self, point):
"""
What comes in:
-- self
-- An rg.Point
What goes out: Returns the distance between the center of
this CircleChanger's circle and the given rg.Point
Side effects: None.
Example: See run_test_get_distance_from below for examples.
Type hints:
:type point: rg.Point
"""
################################################################
# TODO: 3.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_get_distance_from function
# (below). Third, implement and test this method.
#
# *** YOU ** MUST ** USE the relevant method
# of the rg.Point class to compute this distance.
# NO CREDIT if you use the distance formula here.
################################################################
def swell_or_shrink_once(self, amount_to_swell_or_shrink):
"""
What comes in:
-- self
-- An integer that indicates how much this CircleChanger's
circle is to swell or shrink by, that is, how much
this CircleChanger's circle's radius is to change by.
What goes out: Nothing (i.e., None).
Side effects:
The following happens IN THE ORDER LISTED.
1. The radius of this CircleChanger's circle changes
by adding the given amount_to_swell_or_shrink to it.
So the circle:
-- swells if amount_to_swell_or_shrink is positive
-- shrinks if amount_to_swell_or_shrink is negative.
Exception: If the change would make the radius
LESS THAN 1, the radius should be set to 1.
2. The outline_thickness of this CircleChanger's circle
is set to a random number chosen from [3, 15]
(that is, between 3 and 15, inclusive).
3. The fill color of this CircleChanger's circle
changes to a color chosen at random from this
CircleChanger's list of colors.
Example: See run_test_swell_or_shrink_once below for examples.
Type hints:
:type amount_to_swell_or_shrink: int
"""
################################################################
# TODO: 4.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_swell_or_shrink_once function
# (below). Third, implement and test this method.
################################################################
#
# IMPORTANT ** HINT ** Regarding randomness:
#
# The following statement chooses a random number between
# 3 and 15, inclusive:
# r = random.randrange(3, 16)
# Note the 16 -- randrange is like range in that
# it does NOT include the ending number.
# Use the above to help set the outline_thickness.
#
# To get a random COLOR, you need to get a random INDEX
# into the self.colors tuple. So something like:
# r_index = random.randrange(0, ??)
#
# where you figure out what ?? must be, and then
#
# r_color = self.colors[r_index]
#
# will give a randomly chosen color from self.colors.
#
# Simply ** ASK FOR HELP **
# if this does not make sense to you.
################################################################
def swell_or_shrink_repeatedly(self,
amount_to_swell_or_shrink,
times_to_swell_or_shrink):
"""
What comes in:
-- self
-- Two integers (see Side effects below for what they do)
What goes out: Nothing (i.e., None).
Side effects:
Does the following 4 steps repeatedly:
1. Calls its
swell_or_shrink_once
method with the given amount_to_swell_or_shrink.
2. Draws this CircleChanger
(by using its draw method with no arguments).
3. Calls its
swell_or_shrink_once
method with an argument that will UNDO the amount
of swell/shrink it just did.
4. Draws this CircleChanger
(by using its draw method with no arguments).
The argument times_to_swell_or_shrink says how many times
to do the above 4 steps.
Example:
Suppose that the radius of this CircleChanger's circle
is currently 50. Suppose further that:
-- amount_to_swell_or_shrink is 10
-- times_to_swell_or_shrink is 3.
Then this method would:
-- Increase the circle's radius to 60.
Draw the CircleChanger
(by using its draw method with no arguments).
-- Decrease the circle's radius back to 50.
Draw the CircleChanger again.
-- Increase the circle's radius to 60.
Draw the CircleChanger again.
-- Decrease the circle's radius back to 50.
Draw the CircleChanger again.
-- Increase the circle's radius to 60.
Draw the CircleChanger again.
-- Decrease the circle's radius back to 50.
Draw the CircleChanger again.
So, 3 times it did an increase by 10 followed by a decrease
by 10. Since the draw method pauses briefly each time
it is called, this creates an animation.
Type hints:
:type amount_to_swell_or_shrink: int
:type times_to_swell_or_shrink: int
"""
################################################################
# TODO: 5.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_swell_or_shrink_repeatedly function
# (below). Third, implement and test this method.
################################################################
def swallow(self, other_circle_changer):
"""
What comes in:
-- self
-- Another CircleChanger
What goes out:
-- Returns a new CircleChanger:
-- whose circle is a new rg.Circle that:
-- is centered at the point that is HALFWAY between
the center of this CircleChanger's circle and
the center of the other CircleChanger's circle.
-- has radius that is HALF of the DISTANCE from:
-- the center of this CircleChanger's circle to
-- the center of the other CircleChanger's circle.
-- has 'red' as its fill color
-- whose tuple of colors a new tuple
that is this CircleChanger's tuple of colors
plus (that is, concatenated with)
the other CircleChanger's tuple of colors.
Side effects: None.
Example: See run_test_swallow below for examples.
Type hints:
:type other_circle_changer: CircleChanger
:rtype CircleChanger
"""
################################################################
# TODO: 6.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_swallow function (below).
# Third, implement and test this method.
#
# *** YOU ** MUST ** USE the relevant method(s)
# of the rg.Point class AND this class to compute
# the center and radius of the new CircleChanger.
# NO CREDIT if you use the distance formula here.
################################################################
def change_color(self, index_of_color):
"""
What comes in:
-- self
-- An nonnegative integer that is less than the length
of this CircleChanger's tuple of colors.
What goes out: Nothing (i.e., None).
Side effects:
-- The fill_color of this CircleChanger's circle becomes
the color in this CircleChanger's tuple of colors
whose index is the given index_of_color.
Example:
If this CircleChanger's tuple of colors is:
('blue', 'gray', 'green', 'black')
and if index_of_color is 2,
then the fill_color of this CircleChanger becomes 'green'.
Type hints:
:type index_of_color: int
"""
################################################################
# TODO: 7.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_change_color function (below).
# Third, implement and test this method.
################################################################
def change_to_original_color(self):
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects:
-- The fill_color of this CircleChanger's circle becomes
the same color that it was when this CircleChanger
was constructed.
"""
################################################################
# TODO: 8.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_change_to_original_color function
# (below). Third, implement and test this method.
################################################################
def change_to_next_color_in_tuple(self):
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects:
-- The first time that this method is called, this
CircleChanger's circle's fill color changes to the first
(beginning) color in this CircleChanger's tuple of colors.
-- Each time therafter that this method is called, this
CircleChanger's circle's fill color changes to the NEXT
color in this CircleChanger's tuple of colors.
-- It "wraps" when it reaches the end of the list.
Example:
If this CircleChanger's tuple of colors is:
('blue', 'gray', 'red')
then:
--- The first time this method is called,
it sets this CircleChanger's circle's fill color
to 'blue'.
--- The second time this method is called,
it sets this CircleChanger's circle's fill color
to 'gray'.
--- The third time ... to 'red'.
--- The fourth time ... to 'blue'.
--- The fifth time ... to 'gray'.
--- The sixth time ... to 'red'.
--- The seventh time ... to 'blue'.
--- and so forth.
Note: Other methods that affect this CircleChanger's circle's
fill color have no effect on or interaction with this method.
"""
################################################################
# TODO: 9.
# First, READ the doc-string (specification) above.
# Second, READ the run_test_change_to_next_color_in_tuple
# function (below). Third, implement and test this method.
################################################################
########################################################################
# The TEST functions for the CircleChanger class begin here.
########################################################################
def run_test_init():
""" Tests the __init__ method of the CircleChanger class. """
m1_tests.run_test_init() # This runs OUR tests.
# This is a VISUAL test.
m1_tests.start_drawing('Testing: the __init__ method')
# Construct two CircleChanger objects:
circle_changer1 = CircleChanger(100, 150, 100, 'blue', ('red', 'blue', 'green'))
circle_changer2 = CircleChanger(300, 50, 30, 'yellow', ('green', 'gold'))
# Print and draw them:
print('After construction:')
print(circle_changer1, '\n', circle_changer2, '\n', sep='')
circle_changer1.draw(0.5)
circle_changer2.draw("""
A BLUE circle at (100, 150) with radius 100 and thickness 1,
and a YELLOW circle at (250, 50) with radius 30 and thickness 1.""")
# Change some of their characteristics, then print and redraw them:
circle_changer1.circle.fill_color = circle_changer1.colors[2]
circle_changer2.circle.outline_thickness = 10
print('After changing characteristics:')
print(circle_changer1, '\n', circle_changer2, sep='')
circle_changer1.draw()
circle_changer2.draw("""
Now the leftmost (formerly BLUE) circle is GREEN
and the YELLOW circle has a thicker outline.""")
def run_test_get_distance_from():
m1_tests.run_test_get_distance_from() # This runs OUR tests
print()
print('The following are tests from within m1 itself.')
circle_changer1 = CircleChanger(100, 50, 30, 'blue',
('red', 'blue', 'green'))
circle_changer2 = CircleChanger(160, 50, 10, 'red',
('red',))
circle_changer3 = CircleChanger(163, 46, 40, 'blue',
('red', 'green'))
print()
print('Expected:', 60.0)
print(' Actual:',
circle_changer1.
get_distance_from(circle_changer2.circle.center))
print()
print('Expected:', 5.0)
print(' Actual:',
circle_changer2.
get_distance_from(circle_changer3.circle.center))
print()
print('Expected: about', 63.126856)
print(' Actual: ',
circle_changer1.
get_distance_from(circle_changer3.circle.center))
def run_test_swell_or_shrink_once():
""" Tests the swell_or_shrink_once method. """
m1_tests.run_test_swell_or_shrink_once() # This runs OUR tests
random.seed(42) # Lets us determine the results of the randomness
# This is a VISUAL test.
# Construct 3 CircleChanger objects, printing and drawing them.
m1_tests.start_drawing('Testing: the swell_or_shrink_once method')
print('After construction:')
circle_changer1 = CircleChanger(200, 150, 30, 'blue',
('blue', 'yellow', 'green',
'aquamarine', 'brown'))
print(circle_changer1)
circle_changer1.draw()
circle_changer2 = CircleChanger(400, 100, 50, 'red', ('green',))
print(circle_changer2)
circle_changer2.draw()
circle_changer3 = CircleChanger(300, 200, 10, 'green',
('yellow', 'blue'))
print(circle_changer3)
circle_changer3.draw("""
A BLUE circle at (200, 150) with radius 30 and thickness 1,
and a RED circle at (400, 100) with radius 50 and thickness 1,
and a GREEN circle at (300, 200) with radius 10 and thickness 1.""")
# For each of the three CircleChanger objects,
# apply the swell_or_shrink_once method, then redraw/reprint.
print('\nAfter the first set of swell_or_shrink_once calls:')
circle_changer1.swell_or_shrink_once(100)
print(circle_changer1)
circle_changer1.draw()
circle_changer2.swell_or_shrink_once(-30)
print(circle_changer2)
circle_changer1.draw()
circle_changer3.swell_or_shrink_once(40)
print(circle_changer3)
circle_changer3.draw("""
After the first swell_or_shrink, now:
Left circle is bigger (radius 130), still BLUE but thickness 13,
Right circle is smaller (radius 20), GREEN with thickness 3,
Middle circle is bigger (radius 50), YELLOW with thickness 6.""")
# # Apply the swell_or_shrink_once method to each a second time:
# circle_changer1.swell_or_shrink_once(-80)
# circle_changer2.swell_or_shrink_once(30)
# circle_changer3.swell_or_shrink_once(50)
#
# print('After the second swell_or_shrink_once:')
# print(circle_changer1, '\n',
# circle_changer2, '\n',
# circle_changer3, '\n', sep='')
# m1_tests.start_drawing('After the second swell_or_shrink_once')
# circle_changer1.draw()
# circle_changer2.draw()
# circle_changer3.draw("""
# After the second swell_or_shrink:
# The leftmost circle swelled to radius 130 and thickness 4,
# and a RED circle at (400, 100) with radius 50 and thickness 1,
# and a GREEN circle at (300, 200) with radius 10 and thickness 1.""")
#
# circle_changer1.swell_or_shrink_once(-50)
# circle_changer2.swell_or_shrink_once(100)
# circle_changer3.swell_or_shrink_once(70)
# print(circle_changer1)
# circle_changer1.draw('Now GREEN, radius 80, thickness 10')
def run_test_swell_or_shrink_repeatedly():
""" Tests the swell_or_shrink_repeatedly method. """
m1_tests.run_test_swell_or_shrink_repeatedly() # This runs OUR tests
random.seed(999) # Lets us determine the results of the randomness
# This is a VISUAL test.
# Construct 1 CircleChanger object, printing and drawing it.
title = 'Testing: the swell_or_shrink_repeatedly method'
m1_tests.start_drawing(title)
print('After construction:')
circle_changer1 = CircleChanger(200, 150, 30, 'blue',
('blue', 'yellow', 'green',
'aquamarine', 'brown'))
print(circle_changer1)
circle_changer1.draw("""
A BLUE circle at (200, 150) with radius 30 and thickness 1.""")
# Apply the swell_or_shrink_repeatedly method.
print('\nAfter the first swell_or_shrink_repeatedly call:')
circle_changer1.animation_factor = 0.25 # faster animation
circle_changer1.swell_or_shrink_repeatedly(50, 10)
print(circle_changer1)
circle_changer1.draw("""
The circle should have swelled/shrunk by 50 10 times,
changing color and thickness each time.
It should end with the radius that it began (30),
GREEN with thickness 10.""")
def run_test_swallow():
""" Tests the swallow method. """
m1_tests.run_test_swallow() # This runs OUR tests.
# This is a VISUAL test.
# Construct 2 CircleChanger objects, printing and drawing them.
title = 'Testing: the swallow method'
m1_tests.start_drawing(title)
print('After construction:')
circle_changer1 = CircleChanger(200, 150, 50, 'blue',
('blue', 'yellow', 'green'))
circle_changer1.draw()
print(circle_changer1)
circle_changer2 = CircleChanger(450, 180, 30, 'green',
('yellow', 'magenta'))
print(circle_changer2)
circle_changer2.draw("""
A BLUE circle at (200, 150) with radius 50,
and a GREEN circle at (450, 180) with radius 30.""")
# Apply the swallow method. Then print/draw the resulting
# circle, drawing the original circles on top of it.
print('\nAfter the first swallow call:')
circle_changer3 = circle_changer1.swallow(circle_changer2)
print(circle_changer3)
circle_changer3.draw("""
The RED circle should be centered at (325, 165) with radius about 126.
It should cover approximately HALF of the BLUE and GREEN circles.""")
circle_changer4 = CircleChanger(200, 150, 50, 'blue',
('blue', 'yellow', 'green'))
circle_changer4.draw()
circle_changer5 = CircleChanger(450, 180, 30, 'green',
('yellow', 'magenta'))
circle_changer5.draw("""
Here are the BLUE and GREEN circles again, on TOP of the RED circle.
The RED should appear to be underneath approximately HALF
of each of the BLUE and GREEN circles.""")
# Test that the swallowing (red) CircleChanger
# has a colors attribute that is the CONCATENATION
# of the colors attributes of the swallowed CircleChangers.
if circle_changer3.colors != (circle_changer4.colors +
circle_changer5.colors):
message = """The colors instance variable
of the swallowing CircleChanger (the one RETURNED by
the swallow method) is wrong.
It should be the CONCATENATION of the colors instance
variables of the two SWALLOWED Animated Circle objects.
For example, if circle_changer1.colors is:
(blue', 'yellow', 'green')
and if circle_changer2.colors is:
('yellow', 'magenta')
then circle_changer1.swallow(circle_changer2) should be:
('blue', 'yellow', 'green', 'yellow', 'magenta')
and circle_changer2.swallow(circle_changer1) should be:
('yellow', 'magenta', 'blue', 'yellow', 'green')
"""
time.sleep(0.5)
print(message, file=sys.stderr)
print('The colors instance of circle_changer1 is:',
'\n ', circle_changer1.colors, file=sys.stderr)
print('The colors instance of circle_changer2 is:',
'\n ', circle_changer2.colors, file=sys.stderr)
print('The colors instance of the swallowing',
file=sys.stderr)
print('CircleChanger (circle_changer3) is:',
'\n ', circle_changer3.colors, file=sys.stderr)
print('but should be:', file=sys.stderr)
print(" ('blue', 'yellow', 'green', 'yellow', 'magenta')",
file=sys.stderr)
time.sleep(1)
def run_test_change_color():
""" Tests the change_color method. """
m1_tests.run_test_change_color() # This runs OUR tests.
random.seed(77) # Lets us determine the results of the randomness
# This is a VISUAL test.
# Construct 2 CircleChanger objects, printing and drawing them.
title = 'Testing: the change_color method'
m1_tests.start_drawing(title)
print('After construction:')
circle_changer1 = CircleChanger(100, 100, 70, 'black',
('blue', 'yellow'))
circle_changer1.draw()
print(circle_changer1)
circle_changer2 = CircleChanger(350, 130, 50, 'purple',
('yellow', 'magenta', 'blue',
'green', 'yellow', 'aquamarine'))
print(circle_changer2)
circle_changer2.draw("""
A BLACK circle at (100, 100) with radius 70,
and a PURPLE circle at (350, 130) with radius 50.""")
# Apply the change_color method. Then print/draw the results.
print('\nAfter the first set of change_color calls:')
circle_changer1.change_color(1)
print(circle_changer1)
circle_changer1.draw()
circle_changer2.change_color(5)
print(circle_changer2)
circle_changer2.draw("""
Same circles, but now YELLOW and AQUAMARINE.
The next test will cycle the LEFT circle through:
blue and yellow (repeating as needed)
and the RIGHT circle through:
yellow, magenta, blue, green, yellow, and aquamarine.""")
# Another test:
for k in range(6):
circle_changer1.change_color(k % 2)
circle_changer1.draw()
circle_changer2.change_color(k)
circle_changer2.draw()
circle_changer1.draw("""
Should have finished with YELLOW and AQUAMARINE.""")
# This tests change_color and swell_and_shrink_once
# repeatedly.
for k in range(20, 0, -1):
circle_changer1.change_color(k % 2)
circle_changer1.draw(0.05)
circle_changer1.swell_or_shrink_once((-40 // k) * (k ** -1))
circle_changer1.draw(0.05)
circle_changer2.change_color(k % 6)
circle_changer2.draw(0.05)
circle_changer2.swell_or_shrink_once(50 // k)
circle_changer1.draw(0.05)
circle_changer1.draw("""
Should have ended with two YELLOW circles:
a TINY one on the LEFT and a HUGE one on the RIGHT.""")
def run_test_change_to_original_color():
""" Tests the change_to_original_color method. """
m1_tests.run_test_change_to_original_color() # This runs OUR tests.
random.seed(123) # Lets us determine the results of the randomness
# This is a VISUAL test.
# Construct 2 CircleChanger objects, printing and drawing them.
title = 'Testing: the change_to_original_color method'
m1_tests.start_drawing(title)
print('After construction:')
circle_changer1 = CircleChanger(100, 100, 100, 'black',
('blue', 'green'))
circle_changer1.draw()
print(circle_changer1)
circle_changer2 = CircleChanger(280, 100, 100, 'purple',
('yellow', 'magenta', 'blue',
'green', 'yellow', 'aquamarine'))
print(circle_changer2)
circle_changer2.draw("""
A BLACK circle at (100, 100) with radius 100,
and a PURPLE circle at (280, 100) with radius 100.
You will next see a bunch of colors,
ending with BLACK and PURPLE again.""")
# Flash through many color changes. Then apply the
# change_to_original_color method and print/draw the results.
print('\nAfter the first set of change_to_original_color calls:')
for k in range(30):
circle_changer1.change_color(k % 2)
circle_changer1.draw(0.05)
circle_changer2.change_color(k % 6)
circle_changer2.draw(0.05)
circle_changer1.change_to_original_color()
print(circle_changer1)
circle_changer1.draw()
circle_changer2.change_to_original_color()
print(circle_changer2)
circle_changer2.draw("""
Should end as it started: BLACK and PURPLE.""")
def run_test_change_to_next_color_in_tuple():
""" Tests the change_to_next_color_in_tuple method. """
# m1_tests.change_to_next_color() # This runs OUR tests.
# This is a VISUAL test.
# Construct 2 CircleChanger objects, printing and drawing them.
title = 'Testing: the change_to_next_color method'
m1_tests.start_drawing(title)
print('After construction:')
circle_changer1 = CircleChanger(100, 100, 100, 'black',
('blue', 'green', 'red'))
circle_changer1.draw()
print(circle_changer1)
circle_changer2 = CircleChanger(280, 100, 40, 'purple',
('yellow', 'magenta', 'blue',
'green', 'yellow', 'aquamarine'))
print(circle_changer2)
circle_changer2.draw("""
A BLACK circle at (100, 100) with radius 100,
and a PURPLE circle at (280, 100) with radius 40.
You will next see a bunch of colors,
cycling through BLUE, GREEN, RED (for the left, larger circle)
and YELLOW, MAGENTA, BLUE, GREEN, YELLOW (again) and AQUAMARINE
(for the right, smaller circle).""")
# Cycle through the CircleChanger's tuples of colors:
print('\nAfter the first set of change_to_next_color calls:')
for _ in range(16):
circle_changer1.change_to_next_color_in_tuple()
circle_changer1.draw(0.25)
circle_changer2.change_to_next_color_in_tuple()
circle_changer2.draw(0.25)
circle_changer2.draw("""
Should end with circles: BLUE and GREEN.""")
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| 41.124559 | 84 | 0.568706 |
53a0eb4d82fe20e306e8e61ed3138d0c69125471
| 495 |
py
|
Python
|
gcloud/fixtures/pubsub.py
|
pantheon-ci-bot/etl-framework
|
36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f
|
[
"MIT"
] | 2 |
2017-03-01T20:09:06.000Z
|
2019-02-08T17:10:16.000Z
|
gcloud/fixtures/pubsub.py
|
pantheon-ci-bot/etl-framework
|
36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f
|
[
"MIT"
] | 40 |
2015-10-10T15:02:21.000Z
|
2020-03-17T22:32:04.000Z
|
gcloud/fixtures/pubsub.py
|
pantheon-ci-bot/etl-framework
|
36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f
|
[
"MIT"
] | 2 |
2018-11-14T21:50:58.000Z
|
2022-03-07T20:59:27.000Z
|
from etl_framework.testing.fixtures import FixtureInterface
from gcloud.datastores.utils.pubsub_messages import PublisherMessage
from gcloud.datastores.pubsub import PubsubPublisher
class PubsubFixture(FixtureInterface):
def load(self):
messages = [PublisherMessage(**row) for row in self.data]
publisher = PubsubPublisher(
project_name=self.schema.config.project,
topic_name=self.schema.config.topic
)
publisher.publish(messages)
| 30.9375 | 68 | 0.737374 |
1c5be247486d51c8a81e7542cb2d582d68fb70e8
| 759 |
py
|
Python
|
build_interactive_webapps_python_scripts_using_streamlit/02_interactive_webapps_streamlit_pandas/003b_interactive_webapps_streamlit_pandas.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 5 |
2018-05-03T08:16:02.000Z
|
2021-09-04T03:44:24.000Z
|
build_interactive_webapps_python_scripts_using_streamlit/02_interactive_webapps_streamlit_pandas/003b_interactive_webapps_streamlit_pandas.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 1 |
2022-01-28T19:27:19.000Z
|
2022-01-28T19:27:19.000Z
|
build_interactive_webapps_python_scripts_using_streamlit/02_interactive_webapps_streamlit_pandas/003b_interactive_webapps_streamlit_pandas.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 2 |
2020-09-10T13:33:27.000Z
|
2022-02-09T11:07:38.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/build_interactive_webapps_python_scripts_using_streamlit/02_interactive_webapps_streamlit/
[file]
streamlit run 003b_interactive_webapps_streamlit_pandas.py
# more on infos and apps on
https://streamlit.io/
https://streamlit.io/gallery
https://docs.streamlit.io/en/stable/
"""
# Source :: ! ARTICLE_1 How to write Web apps using simple Python for Data Scientists? Check https: // mlwhiz.com/blog/2019/12/07/streamlit/
import streamlit as st
import pandas as pd
import numpy as np
df = pd.read_csv("data/world_es.csv")
option = st.selectbox(
'¿Qué país te gusta más?',
df['name'].unique())
st.write('Seleccionaste:', option)
| 18.071429 | 144 | 0.740448 |
cb6b828cd2c9c508ce9e6eed647357b0696721e4
| 959 |
py
|
Python
|
simulator/ui/inside.py
|
ondiiik/meteoink
|
9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb
|
[
"MIT"
] | 2 |
2021-05-27T13:32:16.000Z
|
2022-03-30T01:23:34.000Z
|
simulator/ui/inside.py
|
ondiiik/meteoink
|
9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb
|
[
"MIT"
] | null | null | null |
simulator/ui/inside.py
|
ondiiik/meteoink
|
9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb
|
[
"MIT"
] | null | null | null |
from ui import UiFrame, Vect
from config import location
class UiInside(UiFrame):
def __init__(self, ofs, dim):
super().__init__(ofs, dim)
def draw(self, ui, args):
tab, connection = args
# Type celsius symbol
ui.text(50, '°C', Vect(111, -5))
# Type humidity
if None == ui.forecast.home.rh:
t = '--'
else:
t = '{:.0f}'.format(ui.forecast.home.rh)
ui.text(25, t, Vect(175, 0))
ui.text(10, '%', Vect(175 + tab, 11))
# Type weather details
ui.text_right(10, ui.forecast.descr, Vect(self.dim.x, 15))
ui.text_right(10, location[connection.config.location].name, Vect(self.dim.x, 35))
dt = ui.forecast.time.get_date_time(ui.forecast.weather.dt)
ui.text_right(10, '{:d}.{:d}.{:d} {:d}:{:02d}'.format(dt[2], dt[1], dt[0], dt[3], dt[4]), Vect(self.dim.x, 25))
| 33.068966 | 119 | 0.52659 |
9604a44809fba891b5adec1176811019270e32ed
| 2,029 |
py
|
Python
|
tests/python/pants_test/engine/test_engine.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/test_engine.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/test_engine.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.exceptions import TaskError
from pants.engine.engine import Engine
from pants_test.base.context_utils import create_context
from pants_test.engine.base_engine_test import EngineTestBase
class EngineTest(EngineTestBase):
class RecordingEngine(Engine):
def __init__(self, action=None):
super(EngineTest.RecordingEngine, self).__init__()
self._action = action
self._attempts = []
@property
def attempts(self):
return self._attempts
def attempt(self, context, goals):
self._attempts.append((context, goals))
if self._action:
self._action()
def setUp(self):
self.context = create_context()
def assert_attempt(self, engine, *goal_names):
self.assertEqual(1, len(engine.attempts))
context, goals = engine.attempts[0]
self.assertEqual(self.context, context)
self.assertEqual(self.as_goals(*goal_names), goals)
def test_execute_success(self):
engine = self.RecordingEngine()
result = engine.execute(self.context, self.as_goals('one', 'two'))
self.assertEqual(0, result)
self.assert_attempt(engine, 'one', 'two')
def _throw(self, error):
def throw():
raise error
return throw
def test_execute_raise(self):
engine = self.RecordingEngine(action=self._throw(TaskError()))
result = engine.execute(self.context, self.as_goals('three'))
self.assertEqual(1, result)
self.assert_attempt(engine, 'three')
def test_execute_code(self):
engine = self.RecordingEngine(action=self._throw(TaskError(exit_code=42)))
result = engine.execute(self.context, self.as_goals('four', 'five', 'six'))
self.assertEqual(42, result)
self.assert_attempt(engine, 'four', 'five', 'six')
| 32.725806 | 93 | 0.715623 |
ea737a9133bcfd7a90118a3ca2bd09c8d9a9420e
| 1,136 |
py
|
Python
|
data/p4VQE/R1/benchmark/startPyquil63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startPyquil63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startPyquil63.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=9
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += CNOT(2,0) # number=5
prog += CNOT(2,0) # number=6
prog += CNOT(3,0) # number=7
prog += CNOT(3,0) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil63.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 22.72 | 64 | 0.606514 |
8da2e4bf7a93b58899243b47b133e2c4018c5a05
| 1,440 |
py
|
Python
|
app/user/serializers.py
|
Aries922/recipe-api
|
332f7f4407d358c76ba55b80b0ed56003b571827
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
Aries922/recipe-api
|
332f7f4407d358c76ba55b80b0ed56003b571827
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
Aries922/recipe-api
|
332f7f4407d358c76ba55b80b0ed56003b571827
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
| 28.8 | 74 | 0.641667 |
78b63af5e8e8f3101a50dc6f782c6abdb2cfba0e
| 1,769 |
py
|
Python
|
tensorflow/python/ops/control_flow_v2_toggles_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848 |
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/ops/control_flow_v2_toggles_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056 |
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/ops/control_flow_v2_toggles_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 506 |
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow_v2_toggles.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class ControlFlowV2TogglesTest(test.TestCase):
def testOutputAllIntermediates(self):
self.assertIsNone(
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE)
control_flow_v2_toggles.output_all_intermediates(True)
self.assertTrue(
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE)
control_flow_v2_toggles.output_all_intermediates(False)
self.assertFalse(
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE)
control_flow_v2_toggles.output_all_intermediates(None)
self.assertIsNone(
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE)
if __name__ == '__main__':
googletest.main()
| 39.311111 | 80 | 0.773318 |
955090bb5e79cecd3eff01cd518f92bc8cba5692
| 2,755 |
py
|
Python
|
code/stack_functions.py
|
6one2/Stack_PublicSurvey
|
3d9e659b3bc8b0b491931086eb9392bcfcc8f9c1
|
[
"FTL"
] | null | null | null |
code/stack_functions.py
|
6one2/Stack_PublicSurvey
|
3d9e659b3bc8b0b491931086eb9392bcfcc8f9c1
|
[
"FTL"
] | null | null | null |
code/stack_functions.py
|
6one2/Stack_PublicSurvey
|
3d9e659b3bc8b0b491931086eb9392bcfcc8f9c1
|
[
"FTL"
] | null | null | null |
import pandas as pd
import numpy as np
from collections import defaultdict
from forex_python.converter import CurrencyRates, CurrencyCodes
from forex_python.bitcoin import BtcConverter
def convert2USD_today(cur_name_from: str, amount: float):
'''
Use forex_python to convert the currencies found in the dataset into US dollars
cur_name_from - currency to convert in USD
amount - float represneting the amount to convert
'''
if cur_name_from == 'U.S. dollars ($)':
return amount
if cur_name_from == 'Bitcoin (btc)':
c = BtcConverter()
btc_rate = c.get_latest_price('USD')
return amount*btc_rate
cur_dict = dict(
AUD = 'Australian dollars (A$)',
btc = 'Bitcoin (btc)',
BRL = 'Brazilian reais (R$)',
GBP = 'British pounds sterling (£)',
CAD = 'Canadian dollars (C$)',
CNY = 'Chinese yuan renminbi (¥)',
EUR = 'Euros (€)',
INR = 'Indian rupees (?)',
JPY = 'Japanese yen (¥)',
MXN = 'Mexican pesos (MXN$)',
PLN = 'Polish zloty (zl)',
RUB = 'Russian rubles (?)',
SGD = 'Singapore dollars (S$)',
ZAR = 'South African rands (R)',
SEK = 'Swedish kroner (SEK)',
CHF = 'Swiss francs',
USD = 'U.S. dollars ($)'
)
cur_code_from = [code for code, name in cur_dict.items() if name == cur_name_from][0]
c = CurrencyRates()
return c.convert(cur_code_from,'USD', amount)
def convert2USD(amount, cur, df_rate):
rate = df_rate.query(f'currency_name == "{cur}"')['rate']
return float(rate*amount)
def getChoices(quest_series):
'''
get all unique responses to a categorical question (series example df['DevType'])
dictIdx: list observation indices in which each choice is found
'''
dictIdx = defaultdict(list)
for idx, choice in quest_series.items():
if isinstance(choice, str):
list_choice = [x.lstrip() for x in choice.split(';')]
for x in list_choice:
dictIdx[x].append(idx)
return dictIdx
def createDummyVar(quest_series):
'''
Create dummy variable for multiple choices categorical variable. Split cat into n col corresponding...
to uniques choices, and fill with 1 if respondent chose this choice.
df - origin DataFrame
cat - name of the category (column) to be split and filled with 1
'''
dictChoices = getChoices(quest_series)
cat = quest_series.name
col = [cat+'.'+str(k) for k in dictChoices.keys()]
n_df = pd.DataFrame(columns=col, index=quest_series.index).fillna(value=0)
for k,v in dictChoices.items():
col = cat+'.'+k
n_df.loc[v,col]=1
return n_df
| 32.797619 | 106 | 0.618512 |
f3db36794ec3a7ebbbc9ec3b3d2d8ecf50977401
| 9,854 |
py
|
Python
|
script.module.uncoded/lib/resources/lib/sources/en/xmovies.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1 |
2019-03-05T09:38:10.000Z
|
2019-03-05T09:38:10.000Z
|
script.module.uncoded/lib/resources/lib/sources/en/xmovies.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | null | null | null |
script.module.uncoded/lib/resources/lib/sources/en/xmovies.py
|
TheWardoctor/wardoctors-repo
|
893f646d9e27251ffc00ca5f918e4eb859a5c8f0
|
[
"Apache-2.0"
] | 1 |
2021-11-05T20:48:09.000Z
|
2021-11-05T20:48:09.000Z
|
# NEEDS FIXING
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['xmovies8.tv', 'xmovies8.ru']
self.base_link = 'https://xmovies8.es'
self.search_base = 'https://search.xmovies8.es'
self.search_link = '/?q=%s'
self.scraper = cfscrape.create_scraper()
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def searchShow(self, title, season, year, aliases, headers):
try:
title = cleantitle.normalize(title)
t = cleantitle.get(title)
url = urlparse.urljoin(self.search_base, self.search_link % urllib.quote_plus(cleantitle.query('%s S%02d' % (title.replace('\'', '-'), int(season)))))
#sr = client.request(url, headers=headers, timeout='10')
sr = self.scraper.get(url).content
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.search_base, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
#sr = client.request(url, headers=headers, timeout='10')
sr = self.scraper.get(url).content
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
else:
url = urlparse.urljoin(self.search_base, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
#sr = client.request(url, headers=headers, timeout='10')
sr = self.scraper.get(url).content
if sr:
r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.search_base, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
#r = client.request(url, timeout='10', headers=headers)
r = self.scraper.get(url).content
r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
try:
match = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
except:
match = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
url = re.findall('(?://.+?|)(/.+)', match)[0]
url = client.replaceHTMLCodes(url)
return url.encode('utf-8')
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
url = re.sub('/watching.html$', '', url.strip('/'))
url = url + '/watching.html'
#p = client.request(url, headers=headers, timeout='10')
p = self.scraper.get(url).content
if episode > 0:
r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[1][0]) for i in r]
r = [i[0] for i in r if int(i[1]) == episode][0]
#p = client.request(r, headers=headers, timeout='10')
p = self.scraper.get(url).content
referer = url
id = re.findall('load_player\(.+?(\d+)', p)[0]
r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id)
#r = client.request(r, headers=headers, referer=referer, XHR=True, timeout='10')
r = self.scraper.get(r).content
url = json.loads(r)['value']
if (url.startswith('//')):
url = 'https:' + url
url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10')
if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
sources.append({'source': 'openload.co', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False,'debridonly': False})
raise Exception()
r = client.request(url, headers=headers, timeout='10')
try:
src = json.loads(r)['playlist'][0]
links = re.findall('''file['"]:\s*u['"]([^'"]+)''', str(src))
for i in links:
try:
sources.append(
{'source': 'gvideo', 'quality': 'SD', 'language': 'en',
'url': i, 'direct': True, 'debridonly': False})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
for i in range(3):
u = directstream.googlepass(url)
if not u == None: break
return u
except:
return
| 45.410138 | 172 | 0.502233 |
3a038b32fb5a9dd4198b72b0ac349cbc7c0942dd
| 4,226 |
py
|
Python
|
NNTreeJussi_Test.py
|
timohart7/tau-vehicle-37
|
7b849b7d019b70ca2afef87cbbcbd596dccbba77
|
[
"MIT"
] | null | null | null |
NNTreeJussi_Test.py
|
timohart7/tau-vehicle-37
|
7b849b7d019b70ca2afef87cbbcbd596dccbba77
|
[
"MIT"
] | null | null | null |
NNTreeJussi_Test.py
|
timohart7/tau-vehicle-37
|
7b849b7d019b70ca2afef87cbbcbd596dccbba77
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn import model_selection
import cv2
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Activation, Flatten
from tensorflow.keras.models import Model
from keras.applications.mobilenet_v2 import MobileNetV2
import h5py
'''
directory = "C:/Users/juspe/Documents/Koodailua/tau-vehicle-37/train/train"
class_names = sorted(os.listdir(directory))
test_files = 'C:/Users/juspe/Documents/Koodailua/tau-vehicle-37/test/testset'
Test = []
for file in os.listdir(test_files):
if file.endswith('.jpg'):
# Load the image:
img = plt.imread(test_files + os.sep + file)
# Resize it to the net input size:
img = cv2.resize(img, (224,224))
img = img.astype(np.float32)
img -= 128
Test.append(img)
print(file)
Test = np.array(Test)
'''
def JussiTree(Test):
Pred = np.zeros((Test.shape[0]),dtype=int)
Layer1 = tf.keras.models.load_model('Layer1_Jussi_MobnetV2.h5')
L1Pred = np.argmax(Layer1.predict(Test),1)
del Layer1
Aquatic = Test[np.argwhere(L1Pred==0).ravel()]
Automobile = Test[np.argwhere(L1Pred==1).ravel()]
Bike = Test[np.argwhere(L1Pred==2).ravel()]
Pred[np.argwhere(L1Pred==3)] = 7
Pred[np.argwhere(L1Pred==4)] = 8
Pred[np.argwhere(L1Pred==5)] = 11
Pred[np.argwhere(L1Pred==6)] = 12
Pred[np.argwhere(L1Pred==7)] = 13
del Test
tf.keras.backend.clear_session()
LayerAq = tf.keras.models.load_model('LayerAq_Jussi_MobnetV2.h5')
AqPred = np.argmax(LayerAq.predict(Aquatic),1)
AqPred = np.where(AqPred==0,1,3)
Pred[np.argwhere(L1Pred==0).ravel()] = AqPred
del LayerAq
del Aquatic
tf.keras.backend.clear_session()
LayerBike = tf.keras.models.load_model('LayerBike_Jussi_MobnetV2.h5')
BikePred = LayerBike.predict(Bike)
BikePred = np.argmax(BikePred,1)
BikePred = np.where(BikePred==2,10,BikePred)
BikePred = np.where(BikePred==0,2,BikePred)
BikePred = np.where(BikePred==1,6,BikePred)
Pred[np.argwhere(L1Pred==2).ravel()] = BikePred
del LayerBike
del Bike
tf.keras.backend.clear_session()
LayerAuto = tf.keras.models.load_model('LayerAuto_Jussi_MobnetV2.h5')
AutoPred = np.argmax(LayerAuto.predict(Automobile),1)
Large = Automobile[np.argwhere(AutoPred==0).ravel()]
Small = Automobile[np.argwhere(AutoPred==1).ravel()]
del LayerAuto
del Automobile
tf.keras.backend.clear_session()
LayerLarge = tf.keras.models.load_model('LayerLarge_Jussi_MobnetV2.h5')
LargePred = np.argmax(LayerLarge.predict(Large),1)
LargePred = np.where(LargePred==0,4,LargePred)
LargePred = np.where(LargePred==1,15,LargePred)
Van = Large[np.argwhere(LargePred==2).ravel()]
del LayerLarge
del Large
tf.keras.backend.clear_session()
LayerVan = tf.keras.models.load_model('LayerVan_Jussi_MobnetV2.h5')
VanPred = np.argmax(LayerVan.predict(Van),1)
VanPred = np.where(VanPred==1,16,VanPred)
LargePred[np.argwhere(LargePred==2).ravel()] = VanPred
AutoPred[np.argwhere(AutoPred==0).ravel()] = LargePred
del LayerVan
del Van
del LargePred
del VanPred
tf.keras.backend.clear_session()
LayerSmall = tf.keras.models.load_model('LayerSmall_Jussi_MobnetV2.h5')
SmallPred = np.argmax(LayerSmall.predict(Small),1)
SmallPred = np.where(SmallPred==0,5,SmallPred)
SmallPred = np.where(SmallPred==1,6,SmallPred)
SmallPred = np.where(SmallPred==2,9,SmallPred)
SmallPred = np.where(SmallPred==3,14,SmallPred)
AutoPred[np.argwhere(AutoPred==1).ravel()] = SmallPred
Pred[np.argwhere(L1Pred==1).ravel()]= AutoPred
del LayerSmall
del Small
del AutoPred
del SmallPred
tf.keras.backend.clear_session()
return(Pred)
'''
with open("C:/Users/juspe/Documents/Koodailua/tau-vehicle-37/submissionTree1.csv", "w") as fp:
fp.write("Id,Category\n")
i = 0
for i in range(Pred.shape[0]):
label = class_names[Pred[i]]
fp.write("%d,%s\n" % (i, label))
i +=1
'''
| 30.185714 | 94 | 0.683152 |
c9f7d72ad0ae91c18a6eeec1ce7eb4e52dfbcf3d
| 324 |
py
|
Python
|
biobrary/amino_acids_mw.py
|
benjaminfang/fbio
|
cb033df257682f41919a99202340d846a2ee9f5d
|
[
"MIT"
] | 4 |
2019-01-31T07:41:41.000Z
|
2019-07-03T01:09:04.000Z
|
biobrary/amino_acids_mw.py
|
benjaminfang/biolib
|
660ee48205509522e0fbbde287a4456bcd94e448
|
[
"MIT"
] | null | null | null |
biobrary/amino_acids_mw.py
|
benjaminfang/biolib
|
660ee48205509522e0fbbde287a4456bcd94e448
|
[
"MIT"
] | null | null | null |
amino_acids_mw = {'F':165.19, 'L':131.17, 'I':131.175, 'M':149.21, 'V':117.15,
'S':105.093, 'P':115.13, 'T':119.12, 'A':89.094, 'Y':181.19,
'H':155.15, 'Q':146.14, 'N':146.14, 'K':146.19, 'D':133.11,
'E':147.13, 'C':121.16, 'W':204.23, 'R':174.20, 'G':75.07, '*':0}
| 64.8 | 84 | 0.419753 |
3e575d217ca966ae596dcccac09c039850fd6979
| 6,696 |
py
|
Python
|
src/ggrc_workflows/models/cycle.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/models/cycle.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/models/cycle.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module contains a workflow Cycle model
"""
import itertools
from sqlalchemy import orm, inspect
from ggrc import db
from ggrc.models import mixins
from ggrc.models import reflection
from ggrc.fulltext import attributes as ft_attributes
from ggrc.fulltext import mixin as ft_mixin
from urlparse import urljoin
from ggrc.utils import get_url_root
from ggrc_workflows.models import mixins as wf_mixins
def _query_filtered_by_contact(person):
"""Returns cycle required to reindex for sent persons."""
attrs = inspect(person).attrs
if any([attrs["email"].history.has_changes(),
attrs["name"].history.has_changes()]):
return Cycle.query.filter(Cycle.contact_id == person.id)
else:
return []
class Cycle(mixins.WithContact,
wf_mixins.CycleStatusValidatedMixin,
mixins.Timeboxed,
mixins.Described,
mixins.Titled,
mixins.Slugged,
mixins.Notifiable,
ft_mixin.Indexed,
db.Model):
"""Workflow Cycle model
"""
__tablename__ = 'cycles'
_title_uniqueness = False
workflow_id = db.Column(
db.Integer,
db.ForeignKey('workflows.id', ondelete="CASCADE"),
nullable=False,
)
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='cycle', cascade='all, delete-orphan')
cycle_task_group_object_tasks = db.relationship(
'CycleTaskGroupObjectTask', backref='cycle',
cascade='all, delete-orphan')
cycle_task_entries = db.relationship(
'CycleTaskEntry', backref='cycle', cascade='all, delete-orphan')
is_current = db.Column(db.Boolean,
default=True,
nullable=False)
next_due_date = db.Column(db.Date)
@property
def is_done(self):
"""Check if cycle's done
Overrides StatusValidatedMixin method because cycle's is_done state
depends on is_verification_needed flag
"""
if super(Cycle, self).is_done:
return True
if self.cycle_task_group_object_tasks:
return False
return True
_api_attrs = reflection.ApiAttributes(
'workflow',
'cycle_task_groups',
'is_current',
'next_due_date',
)
_aliases = {
"cycle_workflow": {
"display_name": "Workflow",
"filter_by": "_filter_by_cycle_workflow",
},
"contact": "Assignee",
"secondary_contact": None,
}
PROPERTY_TEMPLATE = u"cycle {}"
_fulltext_attrs = [
ft_attributes.MultipleSubpropertyFullTextAttr(
"group title", "cycle_task_groups", ["title"], False,
),
ft_attributes.MultipleSubpropertyFullTextAttr(
"group assignee",
lambda instance: [g.contact for g in instance.cycle_task_groups],
["name", "email"],
False,
),
ft_attributes.DateMultipleSubpropertyFullTextAttr(
"group due date",
'cycle_task_groups',
["next_due_date"],
False,
),
ft_attributes.MultipleSubpropertyFullTextAttr(
"task title",
'cycle_task_group_object_tasks',
["title"],
False,
),
ft_attributes.MultipleSubpropertyFullTextAttr(
"task assignee",
lambda instance: [t.contact for t in
instance.cycle_task_group_object_tasks],
["name", "email"],
False
),
ft_attributes.DateMultipleSubpropertyFullTextAttr(
"task due date",
"cycle_task_group_object_tasks",
["end_date"],
False
),
ft_attributes.DateFullTextAttr("due date", "next_due_date"),
ft_attributes.MultipleSubpropertyFullTextAttr(
"task comments",
lambda instance: list(itertools.chain(*[
t.cycle_task_entries
for t in instance.cycle_task_group_object_tasks
])),
["description"],
False
),
]
AUTO_REINDEX_RULES = [
ft_mixin.ReindexRule("CycleTaskGroup", lambda x: x.cycle),
ft_mixin.ReindexRule("CycleTaskGroupObjectTask",
lambda x: x.cycle_task_group.cycle),
ft_mixin.ReindexRule("Person", _query_filtered_by_contact)
]
@classmethod
def _filter_by_cycle_workflow(cls, predicate):
from ggrc_workflows.models.workflow import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def eager_query(cls):
"""Add cycle task groups to cycle eager query
This function adds cycle_task_groups as a join option when fetching cycles,
and makes sure we fetch all cycle related data needed for generating cycle
json, in one query.
Returns:
a query object with cycle_task_groups added to joined load options.
"""
query = super(Cycle, cls).eager_query()
return query.options(
orm.joinedload('cycle_task_groups'),
)
@classmethod
def indexed_query(cls):
return super(Cycle, cls).indexed_query().options(
orm.Load(cls).load_only("next_due_date"),
orm.Load(cls).subqueryload("cycle_task_group_object_tasks").load_only(
"id",
"title",
"end_date"
),
orm.Load(cls).subqueryload("cycle_task_groups").load_only(
"id",
"title",
"end_date",
"next_due_date",
),
orm.Load(cls).subqueryload("cycle_task_group_object_tasks").joinedload(
"contact"
).load_only(
"email",
"name",
"id"
),
orm.Load(cls).subqueryload("cycle_task_group_object_tasks").joinedload(
"cycle_task_entries"
).load_only(
"description",
"id"
),
orm.Load(cls).subqueryload("cycle_task_groups").joinedload(
"contact"
).load_only(
"email",
"name",
"id"
),
orm.Load(cls).joinedload("contact").load_only(
"email",
"name",
"id"
),
)
def _get_cycle_url(self, widget_name):
return urljoin(
get_url_root(),
"workflows/{workflow_id}#{widget_name}/cycle/{cycle_id}".format(
workflow_id=self.workflow.id,
cycle_id=self.id,
widget_name=widget_name
)
)
@property
def cycle_url(self):
return self._get_cycle_url("current_widget")
@property
def cycle_inactive_url(self):
return self._get_cycle_url("history_widget")
| 28.987013 | 79 | 0.62037 |
e80d2d337db1acf71861ab0ce6d6de98686a4c70
| 4,907 |
py
|
Python
|
raksha/openstack/common/scheduler/filters/json_filter.py
|
DPaaS-Raksha/raksha
|
e4e482865d2860473bc0a80e10d76bb127e9f6c5
|
[
"Apache-2.0"
] | 8 |
2015-03-19T20:22:44.000Z
|
2021-04-11T06:00:52.000Z
|
raksha/openstack/common/scheduler/filters/json_filter.py
|
DPaaS-Raksha/raksha
|
e4e482865d2860473bc0a80e10d76bb127e9f6c5
|
[
"Apache-2.0"
] | 1 |
2015-07-21T23:05:23.000Z
|
2016-03-16T08:11:54.000Z
|
raksha/openstack/common/scheduler/filters/json_filter.py
|
DPaaS-Raksha/raksha
|
e4e482865d2860473bc0a80e10d76bb127e9f6c5
|
[
"Apache-2.0"
] | 5 |
2015-10-09T17:42:24.000Z
|
2021-03-11T18:33:00.000Z
|
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from raksha.openstack.common import jsonutils
from raksha.openstack.common.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms"""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item, None)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
# TODO(zhiteng) Add description for filter_properties structure
# and scheduler_hints.
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| 32.496689 | 78 | 0.594457 |
cae1c60718f4ecb1eace53bc91c999b60ab46121
| 1,195 |
py
|
Python
|
tests/test_asce/test_rlre.py
|
Layty/dlms-cosem
|
95b67054a1dfb928e960547b0246b7b6794f0594
|
[
"MIT"
] | 1 |
2021-08-20T09:19:07.000Z
|
2021-08-20T09:19:07.000Z
|
tests/test_asce/test_rlre.py
|
Layty/dlms-cosem
|
95b67054a1dfb928e960547b0246b7b6794f0594
|
[
"MIT"
] | null | null | null |
tests/test_asce/test_rlre.py
|
Layty/dlms-cosem
|
95b67054a1dfb928e960547b0246b7b6794f0594
|
[
"MIT"
] | null | null | null |
import pytest
from dlms_cosem import enumerations
from dlms_cosem.protocol import xdlms
from dlms_cosem.protocol.acse import ReleaseResponseApdu
class TestDecodeRLRE:
def test_simple(self):
data = b"c\x03\x80\x01\x00"
rlre = ReleaseResponseApdu.from_bytes(data)
assert rlre.reason == enumerations.ReleaseResponseReason.NORMAL
assert rlre.user_information is None
def test_with_initiate_response(self):
data = b"c\x16\x80\x01\x00\xbe\x11\x04\x0f\x08\x01\x00\x06_\x1f\x04\x00\x00\x1e\x1d\x04\xc8\x00\x07"
rlre = ReleaseResponseApdu.from_bytes(data)
assert rlre.reason == enumerations.ReleaseResponseReason.NORMAL
assert isinstance(rlre.user_information.content, xdlms.InitiateResponseApdu)
def test_with_ciphered_initiate_response(self):
data = bytes.fromhex(
"6328800100BE230421281F3001234567891214A0845E475714383F65BC19745CA235906525E4F3E1C893"
)
rlre = ReleaseResponseApdu.from_bytes(data)
assert rlre.reason == enumerations.ReleaseResponseReason.NORMAL
assert isinstance(
rlre.user_information.content, xdlms.GlobalCipherInitiateResponse
)
| 39.833333 | 108 | 0.740586 |
1aa336f6d66cb5c03f986e551dd4db9a7fc9e1c2
| 1,599 |
py
|
Python
|
lego/apps/meetings/tests/test_notifications.py
|
mathiazom/lego
|
4c6c80fbe023b67bf68548ad806af4ff944da92c
|
[
"MIT"
] | null | null | null |
lego/apps/meetings/tests/test_notifications.py
|
mathiazom/lego
|
4c6c80fbe023b67bf68548ad806af4ff944da92c
|
[
"MIT"
] | 71 |
2021-11-01T04:47:36.000Z
|
2022-03-31T04:25:04.000Z
|
lego/apps/meetings/tests/test_notifications.py
|
wahello/lego
|
a0b02f3abc997fe96326e9c9c05b49847170041b
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from lego.apps.meetings.models import Meeting
from lego.apps.meetings.notifications import MeetingInvitationNotification
from lego.apps.users.models import User
from lego.utils.test_utils import BaseTestCase
@patch("lego.utils.email.django_send_mail")
class MeetingInvitationNotificationTestCase(BaseTestCase):
fixtures = [
"test_abakus_groups.yaml",
"test_meetings.yaml",
"test_users.yaml",
"initial_files.yaml",
]
def setUp(self):
user = User.objects.all().first()
meeting = Meeting.objects.all().first()
meeting.created_by = user
meeting.save()
invitation, _created = meeting.invite_user(user)
self.notifier = MeetingInvitationNotification(
user, meeting=meeting, meeting_invitation=invitation
)
def assertEmailContains(self, send_mail_mock, content):
self.notifier.generate_mail()
email_args = send_mail_mock.call_args[1]
self.assertIn(content, email_args["message"])
self.assertIn(content, email_args["html_message"])
def test_generate_email_time(self, send_mail_mock):
time = "01.10.16, kl. 19:15"
self.assertEmailContains(send_mail_mock, time)
def test_generate_email_content(self, send_mail_mock):
content = "test user1 inviterte deg til et møte med tittel Bra møte."
self.assertEmailContains(send_mail_mock, content)
def test_generate_email_name(self, send_mail_mock):
opening = "Hei, test!"
self.assertEmailContains(send_mail_mock, opening)
| 35.533333 | 77 | 0.707317 |
f0aac3b0beaa5e4c7ffcac2807094c227b275801
| 637 |
py
|
Python
|
pesquisa_e_ordenacao/quick_sort.py
|
Rudigus/besteirinhas-python
|
70f93dd522770a46966656980cd9f0d559aa8b0f
|
[
"MIT"
] | null | null | null |
pesquisa_e_ordenacao/quick_sort.py
|
Rudigus/besteirinhas-python
|
70f93dd522770a46966656980cd9f0d559aa8b0f
|
[
"MIT"
] | null | null | null |
pesquisa_e_ordenacao/quick_sort.py
|
Rudigus/besteirinhas-python
|
70f93dd522770a46966656980cd9f0d559aa8b0f
|
[
"MIT"
] | null | null | null |
from utils.plotter import plot
from utils.generator import getRandomList
from timeit import timeit
from utils.line import Line
from utils.sorter import quickSort
# Change this value to switch from and to test mode
testMode = False
testDivisor = 100
counts = [100000, 200000, 400000, 700000, 1000000, 5000000]
if testMode:
counts = [int(n / testDivisor) for n in counts]
randomLists = [getRandomList(count) for count in counts]
elapsedTimes = [timeit(lambda: quickSort(list), number = 1) for list in randomLists]
lines = [Line((counts, elapsedTimes), "Caso aleatório", 'b')]
plot(lines, figname = "products/quick_sort_graph.png")
| 28.954545 | 84 | 0.761381 |
78853fe4051db93ba19c4c684fbc8c8e6a97690c
| 13,087 |
py
|
Python
|
codes/nlper/utils/fn.py
|
jimme0421/NLPer-Arsenal
|
4d1b01556ec8ff5b4a92752de91fbfd27e9ebdee
|
[
"MIT"
] | 2 |
2022-02-17T03:15:00.000Z
|
2022-03-14T12:52:11.000Z
|
codes/nlper/utils/fn.py
|
yanqiangmiffy/NLPer-Arsenal
|
3bdef107c7499731535a24e4b9cff46fa543775f
|
[
"MIT"
] | null | null | null |
codes/nlper/utils/fn.py
|
yanqiangmiffy/NLPer-Arsenal
|
3bdef107c7499731535a24e4b9cff46fa543775f
|
[
"MIT"
] | null | null | null |
import warnings
from typing import List
import os
import re
import random
import time
import multiprocessing as mp
import psutil
import pynvml
import numpy as np
import torch
from prettytable import PrettyTable
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def seed_everything(seed=1000):
"""seed everything to reproduce your experiments
:param int seed: default 1000
:return: None
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def set_devices(device_ids: List[int]):
"""setting the global environment of CUDA
:param device_ids: list of device id, [-1] is cpu
:return: torch.device
"""
if type(device_ids) != list:
raise TypeError(f'the gpus type should be List[int], not {type(device_ids)}')
if len(device_ids) > 1:
warnings.warn(f'we only support cpu or single gpu now, '
f'but you input {len(device_ids)} device id, and only the first will be used')
os.environ['CUDA_VISIBLE_DEVICES'] = str(device_ids[0])
if device_ids[0] != -1:
print(f'Training on GPU {device_ids}')
return torch.device('cuda')
else:
print('Training on CPU')
return torch.device('cpu')
def count_params(model, show=False):
num_params = 0
if show:
for name, p in model.named_parameters():
print(f'{name}: {str(p.size())}')
num_params += p.numel()
else:
for name, p in model.named_parameters():
num_params += p.numel()
return num_params
def format_runTime(seconds:float):
"""format running time to `day hours:minutes:seconds`
:param seconds: 通常来说是两次time.time()的差值
:return: format string
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
h = '0' + str(int(h)) if h < 10 else str(int(h))
m = '0' + str(int(m)) if m < 10 else str(int(m))
s = '0' + str(int(s)) if s < 10 else str(int(s))
if d == 0:
return f'{h}:{m}:{s}'
else:
return f'{d}d {h}:{m}:{s}'
class ProcessStatus():
"""记录程序运行过程中GPU/CPU/内存的全局使用情况(不一定是主进程的实际使用情况,暂未实现进程跟踪功能)
>>> gpu = 0 # 指定0号GPU,或者为None,不指定GPU
>>> processStatus = ProcessStatus(gpu)
>>> p = mp.Process(target=processStatus.record_running_status, args=(0.5,))
>>> p.start() # 开始执行监控进程
>>> # 执行主进程,例如运行程序
>>> p.terminate() # 终结监控进程
>>> processStatus.print_statisticAnalysis() # 打印表信息
>>> processStatus.plot_running_info() # 打印图信息
"""
def __init__(self, gpu:int=None):
self.start = time.time()
self.running_info = mp.Manager().list()
self.gpu = gpu
if gpu:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu)
gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
self.device_total_memory = round(gpu_info.total/1024**2) # MiB
self.driver_version = pynvml.nvmlSystemGetDriverVersion().decode('utf-8')
self.device_name = pynvml.nvmlDeviceGetName(handle).decode('utf-8')
pynvml.nvmlShutdown()
def record_running_status(self, interval=0.5):
"""供多进程调用,监控程序运行过程中的GPU、CPU、内存变化
:param interval: 记录间隔,默认 0.5s 记录一次
:return: 不间断运行,直至主进程内结束该子进程
"""
pynvml.nvmlInit()
start = self.start
if self.gpu != None: # 指定GPU的情况下
while True:
cur_time = time.time()
if cur_time - start >= interval:
start = cur_time
handle = pynvml.nvmlDeviceGetHandleByIndex(self.gpu)
gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
mem = psutil.virtual_memory()
self.running_info.append({
'cur_time': cur_time,
'gpu_used': round(gpu_info.used / 1024 ** 2, 2), # GPU显存占用量(MiB)
'gpu_util': pynvml.nvmlDeviceGetUtilizationRates(handle).gpu, # GPU使用率(0~100)
'cpu_util': psutil.cpu_percent(), # CPU使用率(0.0~100.0)
'mem_util': mem.percent, # 内存使用率(0.0~100.0)
'mem_used': round(mem.used / 1024 ** 2) # 内存占用量(MiB)
})
else: # 不指定GPU的情况下
while True:
cur_time = time.time()
if cur_time - start >= interval:
start = cur_time
mem = psutil.virtual_memory()
self.running_info.append({
'cur_time': cur_time,
'cpu_util': psutil.cpu_percent(), # CPU使用率(0.0~100.0)
'mem_util': mem.percent, # 内存使用率(0.0~100.0)
'mem_used': round(mem.used / 1024 ** 2) # 内存占用量(MiB)
})
def print_statisticAnalysis(self):
"""统计分析程序运行时间以及GPU/CPU/内存使用情况,以表格形式呈现
"""
start = self.start
table = PrettyTable(['Param', 'Value'])
if self.gpu != None: # 指定GPU的情况下
table.add_row(['cuda version', torch.version.cuda])
table.add_row(['driver version', self.driver_version])
table.add_row(['device', self.device_name])
table.add_row(['device id', self.gpu])
table.add_row(['start time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))])
table.add_row(['end time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())])
table.add_row(['running time', format_runTime(time.time() - start)])
table.add_row(['device total memory', f'{self.device_total_memory} MiB'])
table.add_row(['device max used memory', f"{round(np.max([t['gpu_used'] for t in self.running_info]), 2)} MiB"])
table.add_row(['device avg util ratio', f"{round(np.mean([t['gpu_util'] for t in self.running_info]), 2)}%"])
else: # 不指定GPU的情况下
table.add_row(['start time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))])
table.add_row(['end time', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())])
table.add_row(['running time', format_runTime(time.time() - start)])
table.add_row(['cpu avg util ratio', f"{round(np.mean([t['cpu_util'] for t in self.running_info]), 2)}%"])
table.add_row(['memory max used', f"{round(np.max([t['mem_used'] for t in self.running_info]), 2)} MiB"])
table.add_row(['memory avg util ratio', f"{round(np.mean([t['mem_util'] for t in self.running_info]), 2)}%"])
table.align['Param'] = 'l'
table.align['Value'] = 'l'
print(table)
def plot_running_info(self, show=False, saved_path='./status.png'):
"""以图表形式展现程序运行过程中的GPU/CPU/内存使用情况,默认不显示,只保存在'./status.png'
:param show: 是否调用plt.show()画出该图
:param saved_path: 将图保存在指定位置
"""
font = FontProperties()
font.set_family('serif')
font.set_name('Times New Roman')
font.set_style('normal')
font.set_size(12)
plt.style.use(['science', 'no-latex'])
plt.figure(figsize=(12, 12), dpi=300)
cur_time = [item['cur_time']-self.start for item in self.running_info]
cpu_util = [item['cpu_util'] for item in self.running_info]
mem_util = [item['mem_util'] for item in self.running_info]
mem_used = [item['mem_used'] for item in self.running_info]
if self.gpu != None:
gpu_used = [item['gpu_used'] for item in self.running_info]
gpu_util = [item['gpu_util'] for item in self.running_info]
ax = plt.subplot(2, 1, 1)
ax.plot(cur_time, gpu_util, label='gpu_util')
ax.plot(cur_time, cpu_util, label='cpu_util')
ax.plot(cur_time, mem_util, label='mem_util')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_ylabel('percentage', font_properties=font, fontsize=16)
plt.legend()
ax = plt.subplot(2, 1, 2)
ax.plot(cur_time, gpu_used, label='gpu_used')
ax.plot(cur_time, mem_used, label='mem_used')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_xlabel('time', font_properties=font, fontsize=16)
plt.gca().set_ylabel('capacity', font_properties=font, fontsize=16)
plt.legend()
plt.title("status", font_properties=font, fontsize=20)
else:
ax = plt.subplot(2, 1, 1)
ax.plot(cur_time, cpu_util, label='cpu_util')
ax.plot(cur_time, mem_util, label='mem_util')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_ylabel('percentage', font_properties=font, fontsize=16)
plt.legend()
ax = plt.subplot(2, 1, 2)
ax.plot(cur_time, mem_used, label='mem_used')
plt.xticks(font_properties=font)
plt.yticks(font_properties=font)
plt.gca().set_xlabel('time', font_properties=font, fontsize=16)
plt.gca().set_ylabel('capacity', font_properties=font, fontsize=16)
plt.legend()
plt.title("status", font_properties=font, fontsize=20)
if show:
plt.show()
if saved_path:
plt.savefig('./status.png')
class Timer(object):
"""Computes elapsed time."""
def __init__(self, name):
self.name = name
self.running = True
self.total = 0
self.start = round(time.time(), 2)
self.intervalTime = round(time.time(), 2)
print("<> <> <> Starting Timer [{}] <> <> <>".format(self.name))
def reset(self):
self.running = True
self.total = 0
self.start = round(time.time(), 2)
return self
def interval(self, intervalName=''):
intervalTime = self._to_hms(round(time.time() - self.intervalTime, 2))
print("<> <> Timer [{}] <> <> Interval [{}]: {} <> <>".format(
self.name, intervalName, intervalTime))
self.intervalTime = round(time.time(), 2)
return intervalTime
def stop(self):
if self.running:
self.running = False
self.total += round(time.time() - self.start, 2)
return self
def resume(self):
if not self.running:
self.running = True
self.start = round(time.time(), 2)
return self
def time(self):
if self.running:
return round(self.total + time.time() - self.start, 2)
return self.total
def finish(self):
if self.running:
self.running = False
self.total += round(time.time() - self.start, 2)
elapsed = self._to_hms(self.total)
print("<> <> <> Finished Timer [{}] <> <> <> Total time elapsed: {} <> <> <>".format(self.name, elapsed))
return elapsed
def _to_hms(self, seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%dh %02dm %02ds" % (h, m, s)
class Dict2Obj():
"""
将嵌套字典转换成对象,将关键字访问替换成属性访问
>>> t = Dict2Obj()
>>> t.x1 = 3e-5
>>> t.x2.x21 = [8]
>>> t.x2.x22 = 16
>>> t.update({
>>> 'x3': 0.1,
>>> 'x2': {'x22': 32, 'x23': 64},
>>> 'x4': {'x41':'yyy'}
>>> })
>>> t.toDict() # {'x1': 3e-05, 'x2': {'x21': [8], 'x22': 32, 'x23': 64},
>>> # 'x3': 0.1, 'x4': {'x41': 'yyy'}}
>>> print(t) # str of t.toDict()
"""
def __init__(self, init_dict=None):
if init_dict:
for key, value in init_dict.items():
if self._is_valid(key):
if type(value) is dict:
self.__setattr__(key, Dict2Obj(value))
else:
self.__setattr__(key, value)
def __getattr__(self, key):
if self._is_valid(key):
self.__setattr__(key, Dict2Obj({}))
return self.__getattribute__(key)
def __repr__(self):
return str(self.toDict())
def update(self, aux_dict):
for key, value in aux_dict.items():
if self._is_valid(key):
if type(value) is dict:
if hasattr(self, key):
self.__getattribute__(key).update(value)
else:
self.__getattr__(key).update(value)
else:
self.__setattr__(key, value)
def _is_valid(self, key):
if type(key) is str and re.match(r'[a-zA-Z_][0-9a-zA-Z_]*', key):
return True
raise ValueError(f'{key} is not a valid variable, please check manually')
def toDict(self):
target = {}
for key, value in self.__dict__.items():
if type(value) is not Dict2Obj:
target[key] = value
else:
target[key] = value.toDict()
return target
| 37.823699 | 124 | 0.557729 |
3f79199a33594e121ee05d29efc7a570ed3e1d59
| 1,482 |
py
|
Python
|
tests/integration/goldens/asset/samples/generated_samples/cloudasset_generated_asset_v1_asset_service_list_assets_async.py
|
major/gapic-generator-python
|
68515c4c1444875f151a971b595e9dc837ddf47c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/goldens/asset/samples/generated_samples/cloudasset_generated_asset_v1_asset_service_list_assets_async.py
|
major/gapic-generator-python
|
68515c4c1444875f151a971b595e9dc837ddf47c
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/goldens/asset/samples/generated_samples/cloudasset_generated_asset_v1_asset_service_list_assets_async.py
|
major/gapic-generator-python
|
68515c4c1444875f151a971b595e9dc837ddf47c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAssets
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-asset
# [START cloudasset_generated_asset_v1_AssetService_ListAssets_async]
from google.cloud import asset_v1
async def sample_list_assets():
"""Snippet for list_assets"""
# Create a client
client = asset_v1.AssetServiceAsyncClient()
# Initialize request argument(s)
request = asset_v1.ListAssetsRequest(
)
# Make the request
page_result = client.list_assets(request=request)
async for response in page_result:
print("{}".format(response))
# [END cloudasset_generated_asset_v1_AssetService_ListAssets_async]
| 32.217391 | 85 | 0.755735 |
14cc65e066578a0c51ba70fcca698f8d9e9a5005
| 126 |
py
|
Python
|
docs/xref_map.py
|
jabooth/menpodetect
|
fd3c52a87a2327a2f0ad17fdd1fd1861bc9f4330
|
[
"BSD-3-Clause"
] | 27 |
2015-03-14T22:54:13.000Z
|
2021-03-14T04:37:12.000Z
|
docs/xref_map.py
|
jabooth/menpodetect
|
fd3c52a87a2327a2f0ad17fdd1fd1861bc9f4330
|
[
"BSD-3-Clause"
] | 11 |
2015-04-13T08:31:36.000Z
|
2021-03-09T06:14:45.000Z
|
docs/xref_map.py
|
jabooth/menpodetect
|
fd3c52a87a2327a2f0ad17fdd1fd1861bc9f4330
|
[
"BSD-3-Clause"
] | 28 |
2015-02-02T16:50:03.000Z
|
2020-05-14T06:51:17.000Z
|
xref_map = {
'as_vector': ('function', 'menpo.base.Vectorizable.as_vector'),
'Affine': ('class', 'menpo.transform.Affine'),
}
| 25.2 | 63 | 0.68254 |
4a058d51d256f1621e5245dd9fedb9b54ad36fab
| 5,320 |
py
|
Python
|
pullbug/github_bug.py
|
ncr4/pullbug
|
393722558b2e997051fe1c86ea04951f635ef5e7
|
[
"MIT"
] | null | null | null |
pullbug/github_bug.py
|
ncr4/pullbug
|
393722558b2e997051fe1c86ea04951f635ef5e7
|
[
"MIT"
] | null | null | null |
pullbug/github_bug.py
|
ncr4/pullbug
|
393722558b2e997051fe1c86ea04951f635ef5e7
|
[
"MIT"
] | null | null | null |
import os
import requests
import logging
from pullbug.logger import PullBugLogger
from pullbug.messages import Messages
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
GITHUB_OWNER = os.getenv('GITHUB_OWNER')
GITHUB_HEADERS = {
'Authorization': f'token {GITHUB_TOKEN}',
'Content-Type': 'application/json; charset=utf-8'
}
LOGGER = logging.getLogger(__name__)
class GithubBug():
@classmethod
def run(cls, github_owner, github_state, github_context, wip, slack, rocketchat):
"""Run the logic to get PR's from GitHub and
send that data via message.
"""
PullBugLogger._setup_logging(LOGGER)
repos = cls.get_repos(github_owner, github_context)
pull_requests = cls.get_pull_requests(repos, github_owner, github_state)
message_preamble = ''
if pull_requests == []:
message = 'No pull requests are available from GitHub.'
LOGGER.info(message)
return message
message_preamble = '\n:bug: *The following pull requests on GitHub are still open and need your help!*\n'
pull_request_messages = cls.iterate_pull_requests(pull_requests, wip)
final_message = message_preamble + pull_request_messages
if slack:
Messages.slack(final_message)
if rocketchat:
Messages.rocketchat(final_message)
LOGGER.info(final_message)
@classmethod
def get_repos(cls, github_owner, github_context=''):
"""Get all repos of the GITHUB_OWNER.
"""
LOGGER.info('Bugging GitHub for repos...')
try:
repos_response = requests.get(
f'https://api.github.com/{github_context}/{github_owner}/repos?per_page=100',
headers=GITHUB_HEADERS
)
LOGGER.debug(repos_response.text)
if 'Not Found' in repos_response.text:
error = f'Could not retrieve GitHub repos due to bad parameter: {github_owner} | {github_context}.'
LOGGER.error(error)
raise ValueError(error)
LOGGER.info('GitHub repos retrieved!')
except requests.exceptions.RequestException as response_error:
LOGGER.error(
f'Could not retrieve GitHub repos: {response_error}'
)
raise requests.exceptions.RequestException(response_error)
return repos_response.json()
@classmethod
def get_pull_requests(cls, repos, github_owner, github_state):
"""Grab all pull requests from each repo.
"""
LOGGER.info('Bugging GitHub for pull requests...')
pull_requests = []
for repo in repos:
try:
pull_response = requests.get(
f'https://api.github.com/repos/{github_owner}/{repo["name"]}/pulls?state={github_state}&per_page=100', # noqa
headers=GITHUB_HEADERS
)
LOGGER.debug(pull_response.text)
if pull_response.json():
for single_pull_request in pull_response.json():
pull_requests.append(single_pull_request)
else:
continue
except requests.exceptions.RequestException as response_error:
LOGGER.error(
f'Could not retrieve GitHub pull requests for {repo["name"]}: {response_error}'
)
raise requests.exceptions.RequestException(response_error)
except TypeError:
error = f'Could not retrieve GitHub pull requests due to bad parameter: {github_owner} | {github_state}.' # noqa
LOGGER.error(error)
raise TypeError(error)
LOGGER.info('Pull requests retrieved!')
return pull_requests
@classmethod
def iterate_pull_requests(cls, pull_requests, wip):
"""Iterate through each pull request of a repo
and send a message to Slack if a PR exists.
"""
final_message = ''
for pull_request in pull_requests:
if not wip and 'WIP' in pull_request['title'].upper():
continue
else:
message = cls.prepare_message(pull_request)
final_message += message
return final_message
@classmethod
def prepare_message(cls, pull_request):
"""Prepare the message with pull request data.
"""
# TODO: Check requested_reviewers array also
try:
if pull_request['assignees'][0]['login']:
users = ''
for assignee in pull_request['assignees']:
user = f"<{assignee['html_url']}|{assignee['login']}>"
users += user + ' '
else:
users = 'No assignee'
except IndexError:
users = 'No assignee'
# Truncate description after 120 characters
description = (pull_request['body'][:120] + '...') if len(pull_request
['body']) > 120 else pull_request['body']
message = f"\n:arrow_heading_up: *Pull Request:* <{pull_request['html_url']}|" + \
f"{pull_request['title']}>\n*Description:* {description}\n*Waiting on:* {users}\n"
return message
| 41.24031 | 130 | 0.594549 |
507985eca12500a1cd436b5e7cd8591cd816abd7
| 2,907 |
py
|
Python
|
pyrevolve/spec/msgs/robot_pb2.py
|
MRebolle/Battery-Robot
|
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
|
[
"Apache-1.1"
] | null | null | null |
pyrevolve/spec/msgs/robot_pb2.py
|
MRebolle/Battery-Robot
|
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
|
[
"Apache-1.1"
] | null | null | null |
pyrevolve/spec/msgs/robot_pb2.py
|
MRebolle/Battery-Robot
|
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
|
[
"Apache-1.1"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: robot.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import body_pb2 as body__pb2
from . import neural_net_pb2 as neural__net__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='robot.proto',
package='revolve.msgs',
syntax='proto2',
serialized_pb=_b('\n\x0brobot.proto\x12\x0crevolve.msgs\x1a\nbody.proto\x1a\x10neural_net.proto\"a\n\x05Robot\x12\n\n\x02id\x18\x01 \x02(\x05\x12 \n\x04\x62ody\x18\x02 \x02(\x0b\x32\x12.revolve.msgs.Body\x12*\n\x05\x62rain\x18\x03 \x02(\x0b\x32\x1b.revolve.msgs.NeuralNetwork')
,
dependencies=[body__pb2.DESCRIPTOR,neural__net__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ROBOT = _descriptor.Descriptor(
name='Robot',
full_name='revolve.msgs.Robot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='revolve.msgs.Robot.id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='body', full_name='revolve.msgs.Robot.body', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='brain', full_name='revolve.msgs.Robot.brain', index=2,
number=3, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=156,
)
_ROBOT.fields_by_name['body'].message_type = body__pb2._BODY
_ROBOT.fields_by_name['brain'].message_type = neural__net__pb2._NEURALNETWORK
DESCRIPTOR.message_types_by_name['Robot'] = _ROBOT
Robot = _reflection.GeneratedProtocolMessageType('Robot', (_message.Message,), dict(
DESCRIPTOR = _ROBOT,
__module__ = 'robot_pb2'
# @@protoc_insertion_point(class_scope:revolve.msgs.Robot)
))
_sym_db.RegisterMessage(Robot)
# @@protoc_insertion_point(module_scope)
| 32.662921 | 279 | 0.74785 |
2be4eab93ba28742b8192a138250b621964e6c61
| 409 |
py
|
Python
|
drf_vue_template/wsgi.py
|
soltanoff/drf_vue_template
|
2269f045fc5557bbca168a806d7ca37a7298837a
|
[
"MIT"
] | null | null | null |
drf_vue_template/wsgi.py
|
soltanoff/drf_vue_template
|
2269f045fc5557bbca168a806d7ca37a7298837a
|
[
"MIT"
] | 1 |
2021-10-04T05:38:08.000Z
|
2021-10-05T07:20:59.000Z
|
drf_vue_template/wsgi.py
|
soltanoff/drf_vue_template
|
2269f045fc5557bbca168a806d7ca37a7298837a
|
[
"MIT"
] | 2 |
2019-09-25T10:22:26.000Z
|
2020-07-29T16:34:20.000Z
|
"""
WSGI config for drf_vue_template project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf_vue_template.settings')
application = get_wsgi_application()
| 24.058824 | 78 | 0.794621 |
dc9911850529e6fd28a0989d47a2105b82be08e1
| 264 |
py
|
Python
|
PhysicsTools/HepMCCandAlgos/python/allMuonsGenParticlesMatch_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852 |
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/HepMCCandAlgos/python/allMuonsGenParticlesMatch_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371 |
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/HepMCCandAlgos/python/allMuonsGenParticlesMatch_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240 |
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
allMuonsGenParticlesMatch = cms.EDFilter("MCTruthDeltaRMatcher",
src = cms.InputTag("allMuons"),
distMin = cms.double(0.15),
matchPDGId = cms.vint32(13),
matched = cms.InputTag("genParticleCandidates")
)
| 24 | 64 | 0.727273 |
ef4d5f6322b7ae79b051795b5af7e6f7f1e55550
| 2,992 |
py
|
Python
|
tensorflow/compiler/tests/bucketize_op_test.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 8 |
2017-03-20T12:04:21.000Z
|
2021-06-24T20:34:30.000Z
|
tensorflow/compiler/tests/bucketize_op_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 4 |
2019-08-14T22:32:51.000Z
|
2020-03-09T14:59:18.000Z
|
tensorflow/compiler/tests/bucketize_op_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 4 |
2019-11-11T13:46:27.000Z
|
2020-03-14T05:36:53.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(xla_test.XLATestCase):
def testInt(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(expected_out,
sess.run(op, {p: [-5, 0, 2, 3, 5, 8, 10, 11, 12]}))
def testFloat(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(
expected_out,
sess.run(op, {p: [-5., 0., 2., 3., 5., 8., 10., 11., 12.]}))
def test2DInput(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
self.assertAllEqual(
expected_out, sess.run(op,
{p: [[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]}))
def testInvalidBoundariesOrder(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 8, 3, 11])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
sess.run(op, {p: [-5, 0]})
def testBoundariesNotList(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "Expected list.*"):
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
math_ops._bucketize(p, boundaries=0)
if __name__ == "__main__":
test.main()
| 37.873418 | 80 | 0.64004 |
721bd1bc8f48bc8577268df664009d754a151acb
| 80 |
py
|
Python
|
Python/car.py
|
andresrueda90/POO-Curso-Platiz
|
aa2d5fd408a998f70b0bc9d8b3d55015d9a5edf9
|
[
"Apache-2.0"
] | null | null | null |
Python/car.py
|
andresrueda90/POO-Curso-Platiz
|
aa2d5fd408a998f70b0bc9d8b3d55015d9a5edf9
|
[
"Apache-2.0"
] | null | null | null |
Python/car.py
|
andresrueda90/POO-Curso-Platiz
|
aa2d5fd408a998f70b0bc9d8b3d55015d9a5edf9
|
[
"Apache-2.0"
] | null | null | null |
class Car:
id = int
license = str
driver = str
passegenger = int
| 16 | 21 | 0.575 |
cbc91cde3d68ce38a6389f1b1437e7063a1c86fc
| 1,038 |
py
|
Python
|
var/spack/repos/builtin/packages/py-azure-keyvault-secrets/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9 |
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/py-azure-keyvault-secrets/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907 |
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/py-azure-keyvault-secrets/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29 |
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureKeyvaultSecrets(PythonPackage):
"""Microsoft Azure Key Vault Secrets Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault-secrets"
url = "https://pypi.io/packages/source/a/azure-keyvault-secrets/azure-keyvault-secrets-4.1.0.zip"
version('4.1.0', sha256='4f3bfac60e025e01dd1c1998b73649d45d706975356c0cf147174cf5a6ddf8be')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:1.999', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-azure-keyvault-nspkg', when='^python@:2', type=('build', 'run'))
depends_on('[email protected]:', when='^python@:3.3', type=('build', 'run'))
depends_on('py-typing', when='^python@:3.4', type=('build', 'run'))
| 49.428571 | 110 | 0.702312 |
02114772f45ee06185341627dc2a244badc0466d
| 1,354 |
py
|
Python
|
codiga/common.py
|
codiga/clitool
|
bc138668d603f5fd1f6caaf89f683974a0f8e2b7
|
[
"BSD-2-Clause"
] | 2 |
2021-04-28T18:56:05.000Z
|
2021-07-25T12:10:46.000Z
|
codiga/common.py
|
codiga/clitool
|
bc138668d603f5fd1f6caaf89f683974a0f8e2b7
|
[
"BSD-2-Clause"
] | 2 |
2019-04-25T02:32:34.000Z
|
2019-07-13T20:49:51.000Z
|
codiga/common.py
|
codeinspectorio/citool
|
61fd033015080e4d2b100f120f4bf137bf85b082
|
[
"BSD-2-Clause"
] | 2 |
2021-05-25T16:36:13.000Z
|
2021-10-04T23:03:20.000Z
|
import logging
log = logging.getLogger('codiga')
GRADE_EXCELLENT = "EXCELLENT"
GRADE_GOOD = "GOOD"
GRADE_NEUTRAL = "NEUTRAL"
GRADE_WARNING = "WARNING"
GRADE_CRITICAL = "CRITICAL"
GRADE_UNKNOWN = "UNKNOWN"
GRADE_UNAVAILABLE = "UNAVAILABLE"
def is_grade_lower(grade, minimum_grade):
"""
return is a given grade is lower than a given grade.
:param grade: the current grade
:param minimum_grade: minimum grade to expect.
:return:
"""
grade = grade.upper()
minimum_grade = minimum_grade.upper()
if grade == GRADE_EXCELLENT:
return False
if grade == GRADE_GOOD:
if minimum_grade in [GRADE_EXCELLENT]:
return True
return False
if grade == GRADE_NEUTRAL:
if minimum_grade in [GRADE_EXCELLENT, GRADE_GOOD]:
return True
return False
if grade == GRADE_WARNING:
if minimum_grade in [GRADE_EXCELLENT, GRADE_GOOD, GRADE_NEUTRAL]:
return True
return False
if grade == GRADE_CRITICAL:
if minimum_grade in [GRADE_EXCELLENT, GRADE_GOOD, GRADE_NEUTRAL, GRADE_WARNING]:
return True
return False
if grade == GRADE_UNKNOWN:
if minimum_grade in [GRADE_UNKNOWN]:
return False
return True
if grade == GRADE_UNAVAILABLE:
return False
return False
| 23.754386 | 88 | 0.654357 |
1746c2dcab28cfdedbf3b99376eae1e2fd341295
| 648 |
py
|
Python
|
dashboard/dashboard/oauth2_decorator.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 1 |
2019-11-01T23:31:22.000Z
|
2019-11-01T23:31:22.000Z
|
dashboard/dashboard/oauth2_decorator.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 6 |
2020-07-19T21:51:44.000Z
|
2022-02-13T08:22:58.000Z
|
dashboard/dashboard/oauth2_decorator.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 1 |
2020-07-24T18:22:03.000Z
|
2020-07-24T18:22:03.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides oauth2 decorators in a mockable way."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oauth2client.appengine import OAuth2Decorator
from dashboard.common import utils
DECORATOR = OAuth2Decorator(
client_id='425761728072.apps.googleusercontent.com',
client_secret='9g-XlmEFW8ROI01YY6nrQVKq',
scope=utils.EMAIL_SCOPE,
message='Oauth error occurred!',
callback_path='/oauth2callback')
| 32.4 | 72 | 0.79321 |
a9ea9b2a04859c44152ab27bc664721ebc0fa98f
| 2,924 |
py
|
Python
|
tests/compilation/request/test_request_body_compiled_urlencoded.py
|
lasta/preacher
|
5e50f8eb930fac72a788e7614eb5a85903f7bde6
|
[
"MIT"
] | null | null | null |
tests/compilation/request/test_request_body_compiled_urlencoded.py
|
lasta/preacher
|
5e50f8eb930fac72a788e7614eb5a85903f7bde6
|
[
"MIT"
] | null | null | null |
tests/compilation/request/test_request_body_compiled_urlencoded.py
|
lasta/preacher
|
5e50f8eb930fac72a788e7614eb5a85903f7bde6
|
[
"MIT"
] | null | null | null |
from unittest.mock import NonCallableMock, sentinel
from pytest import raises
from preacher.compilation.error import CompilationError, NamedNode
from preacher.compilation.request.request_body import RequestBodyCompiled
from preacher.compilation.request.request_body import UrlencodedRequestBodyCompiled
PKG = 'preacher.compilation.request.request_body'
def test_replace_given_another_type():
original = UrlencodedRequestBodyCompiled()
other = NonCallableMock(RequestBodyCompiled)
replaced = original.replace(other)
assert replaced is other
def test_replace_given_the_same_type():
original = UrlencodedRequestBodyCompiled(data=sentinel.original_data)
other = UrlencodedRequestBodyCompiled()
replaced = original.replace(other)
assert isinstance(replaced, UrlencodedRequestBodyCompiled)
assert replaced.data is sentinel.original_data
other = UrlencodedRequestBodyCompiled(data=sentinel.new_data)
replaced = original.replace(other)
assert isinstance(replaced, UrlencodedRequestBodyCompiled)
assert replaced.data is sentinel.new_data
def test_compile_and_replace_empty():
default = UrlencodedRequestBodyCompiled(data=sentinel.original_data)
compiled = default.compile_and_replace({})
assert isinstance(compiled, UrlencodedRequestBodyCompiled)
assert compiled.data is sentinel.original_data
def test_compile_and_replace_given_invalid_data(mocker):
compile_params = mocker.patch(f'{PKG}.compile_url_params')
compile_params.side_effect = CompilationError('m', node=NamedNode('x'))
default = UrlencodedRequestBodyCompiled(data=sentinel.original_data)
with raises(CompilationError) as error_info:
default.compile_and_replace({'data': sentinel.data})
assert error_info.value.path == [NamedNode('data'), NamedNode('x')]
compile_params.assert_called_once_with(sentinel.data)
def test_compile_and_replace_given_valid_data(mocker):
compile_params = mocker.patch(f'{PKG}.compile_url_params')
compile_params.return_value = sentinel.params
default = UrlencodedRequestBodyCompiled(data=sentinel.original_data)
compiled = default.compile_and_replace({'data': sentinel.data})
assert isinstance(compiled, UrlencodedRequestBodyCompiled)
assert compiled.data is sentinel.params
compile_params.assert_called_once_with(sentinel.data)
def test_fix_empty(mocker):
ctor = mocker.patch(f'{PKG}.UrlencodedRequestBody', return_value=sentinel.fixed)
compiled = UrlencodedRequestBodyCompiled()
fixed = compiled.fix()
assert fixed is sentinel.fixed
ctor.assert_called_once_with(params={})
def test_fix_filled(mocker):
ctor = mocker.patch(f'{PKG}.UrlencodedRequestBody', return_value=sentinel.fixed)
compiled = UrlencodedRequestBodyCompiled(data=sentinel.data)
fixed = compiled.fix()
assert fixed is sentinel.fixed
ctor.assert_called_once_with(params=sentinel.data)
| 36.098765 | 84 | 0.794118 |
c094913d513179f9f8714e66fa7bf720f180bbc1
| 47,791 |
py
|
Python
|
nipy/labs/group/spatial_relaxation_onesample.py
|
neurospin/nipy
|
cc54600a0dca1e003ad393bc05c46f91eef30a68
|
[
"BSD-3-Clause"
] | 1 |
2016-03-08T15:01:06.000Z
|
2016-03-08T15:01:06.000Z
|
nipy/labs/group/spatial_relaxation_onesample.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
nipy/labs/group/spatial_relaxation_onesample.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#####################################################################################
# BAYESIAN MODEL SELECTION FOR ACTIVATION DETECTION ON FMRI GROUP DATA
# Merlin Keller, 2009
import numpy as np
import scipy.special as sp
from routines import add_lines
from displacement_field import displacement_field
#####################################################################################
# some useful functions
def log_gammainv_pdf(x, a, b):
"""
log density of the inverse gamma distribution with shape a and scale b,
at point x, using Stirling's approximation for a > 100
"""
return a * np.log(b) - sp.gammaln(a) - (a + 1) * np.log(x) - b / x
def log_gaussian_pdf(x, m, v):
"""
log density of the gaussian distribution with mean m and variance v at point x
"""
return -0.5 * (np.log(2 * np.pi * v) + (x - m)**2 / v)
#####################################################################################
# spatial relaxation multivariate statistic class
class multivariate_stat(object):
def __init__(self, data, vardata=None, XYZ=None, std=None, sigma=None,
labels=None, network=None, v_shape=3, v_scale=20,
std_shape=3, std_scale=20, m_mean_rate=1e-3,
m_var_shape=3, m_var_scale=20, disp_mask=None,
labels_prior=None, label_values=None, labels_prior_mask=None):
"""
Multivariate modeling of fMRI group data accounting for spatial uncertainty
In: data (n,p) estimated effects
vardata (n,p) variances of estimated effects
XYZ (3,p) voxel coordinates
std <float> Initial guess for standard deviate of spatial displacements
sigma <float> regularity of displacement field
labels (p,) labels defining regions of interest
network (N,) binary region labels (1 for active, 0 for inactive)
v_shape <float> intensity variance prior shape
v_scale <float> intensity variance prior scale
std_shape <float> spatial standard error prior shape
std_scale <float> spatial standard error prior scale
m_mean_rate <float> mean effect prior rate
m_var_shape <float> effect variance prior shape
m_var_scale <float> effect variance prior scale
disp_mask (q,) mask of the brain, to limit displacements
labels_prior (M,r) prior on voxelwise region membership
labels_prior_values (M,r) voxelwise label values where prior is defined
labels_prior_mask (r,) Mask of voxels where a label prior is defined
"""
self.data = data
if vardata != None and vardata.max() == 0:
self.vardata = None
else:
self.vardata = vardata
self.std = std
self.sigma = sigma
self.labels = labels
self.network = network
self.v_shape = v_shape
self.v_scale = v_scale
self.std_shape = std_shape
self.std_scale = std_scale
n, p = data.shape
if labels == None:
self.labels = np.zeros(p, int)
M = self.labels.max() + 1
if network == None:
self.network = np.ones(M, int)
if np.isscalar(m_mean_rate):
self.m_mean_rate = np.zeros(M, float) + m_mean_rate
else:
self.m_mean_rate = m_mean_rate
if np.isscalar(m_var_shape):
self.m_var_shape = np.zeros(M, float) + m_var_shape
else:
self.m_var_shape = m_var_shape
if np.isscalar(m_var_scale):
self.m_var_scale = np.zeros(M, float) + m_var_scale
else:
self.m_var_scale = m_var_scale
if std != None:
self.D = displacement_field(XYZ, sigma, data.shape[0], disp_mask)
self.labels_prior = labels_prior
self.label_values = label_values
self.labels_prior_mask = labels_prior_mask
def init_hidden_variables(self, mode='saem', init_spatial=True):
n, p = self.data.shape
self.X = self.data.copy()
self.m = self.X.mean(axis=0)
#self.v = np.square(self.X - self.m).mean()
N = len(self.network)
self.m_mean = np.zeros(N, float)
self.m_var = np.zeros(N, float)
self.v = np.zeros(N, float)
#self.s0 = np.zeros(N, float)
#self.S0 = np.zeros(N, float)
self.s1 = np.zeros(N, float)
self.S1 = np.zeros(N, float)
self.s2 = np.zeros(N, float)
self.S2 = np.zeros(N, float)
self.s3 = np.zeros(N, float)
self.S3 = np.zeros(N, float)
self.s6 = np.zeros(N, float)
for j in xrange(N):
self.s6[j] = (self.labels == j).sum()
self.S6 = self.s6.copy()
self.m_var_post_scale = np.zeros(N, float)
if init_spatial and self.std != None:
B = len(self.D.block)
if B == 0:
self.std = None
else:
self.R = np.zeros((n, B), int)
self.N = np.ones(p, float) * n
self.s4 = 0.0
self.S4 = 0.0
self.s5 = np.zeros(N, float)
self.S5 = np.zeros(N, float)
std = self.std
self.update_summary_statistics(init_spatial)
if mode == 'saem':
self.update_parameters_saem(init_spatial)
else:
self.update_parameters_mcmc(init_spatial)
self.std = std
def update_summary_statistics(self, w=1.0, update_spatial=True, mode='saem'):
n, p = self.data.shape
if self.std == None:
m = self.m
else:
m = self.m[self.D.I]
if update_spatial:
self.s4 = np.square(self.D.U).sum()
if mode == 'saem':
self.S4 += w * (self.s4 - self.S4)
if self.vardata == None:
SS = np.square(self.data - m) #/ self.v + np.log(2 * np.pi * self.v)
else:
SS = np.square(self.X - m) #/ self.vardata + np.log(2 * np.pi * self.vardata)
if self.std == None:
SS_sum = SS.sum(axis=0)
else:
SS_sum = np.zeros(p, float)
for i in xrange(n):
Ii = self.D.I[i]
SSi = SS[i].reshape(p, 1)
add_lines(SSi, SS_sum.reshape(p, 1), Ii)
for j in xrange(len(self.network)):
L = np.where(self.labels == j)[0]
self.s1[j] = SS_sum[L].sum()
if self.labels_prior != None:
self.s6[j] = len(L)
self.s2[j] = np.square(self.m[L]).sum()
if self.network[j] == 1:
self.s3[j] = self.m[L].sum()
if update_spatial and self.std != None:
self.s5[j] = self.N[L].sum()
if mode == 'saem':
self.S5 += w * (self.s5 - self.S5)
if mode == 'saem':
self.S1 += w * (self.s1 - self.S1)
self.S2 += w * (self.s2 - self.S2)
self.S3 += w * (self.s3 - self.S3)
if self.labels_prior != None:
self.S6 += w * (self.s6 - self.S6)
size = self.S6
sum_sq = self.S2
sum = self.S3
else:
size = self.S6
sum_sq = self.s2
sum = self.s3
# Update m_var post scale
# used to update parameters,
# and compute conditional posterior
rate = self.m_mean_rate
shape = self.m_var_shape
scale = self.m_var_scale
J = self.network == 1
N1 = J.sum()
if N1 > 0:
post_rate = rate[J] + size[J]
self.m_var_post_scale[J] = scale[J] + 0.5 * (sum_sq[J] - np.square(sum[J]) / post_rate)
if N1 < len(self.network):
self.m_var_post_scale[J==0] = scale[J==0] + 0.5 * sum_sq[J==0]
def update_parameters_saem(self, update_spatial=True):
n, p = self.data.shape
#self.v = (self.S1 + 2 * self.v_scale) / (n * p + 2 * (1 + self.v_shape))
size = self.S6
rate = self.m_mean_rate
shape = self.m_var_shape
scale = self.m_var_scale
if self.std == None:
N = n * size
else:
N = self.S5
if update_spatial:
#B = len(self.D.block)
self.std = np.sqrt(
(self.S4 + 2 * self.std_scale) / (self.D.U.size + 2 * self.std_shape + 2))
self.v = (self.S1 + 2 * self.v_scale) / (N + 2 * self.v_shape + 2)
J = self.network == 1
N1 = J.sum()
if N1 > 0:
self.m_mean[J] = self.S3[J] / (rate[J] + size[J])
self.m_var[J] = 2 * self.m_var_post_scale[J] / (size[J] + 2 * shape[J] + 3)
if N1 < len(self.network):
self.m_var[J==0] = 2 * self.m_var_post_scale[J==0] / (size[J==0] + 2 * shape[J==0] + 2)
def update_parameters_mcmc(self, update_spatial=True):
n, p = self.data.shape
#self.v = (self.s1 + 2 * self.v_scale) / np.random.chisquare(df = n * p + 2 * self.v_shape)
size = self.s6
rate = self.m_mean_rate
shape = self.m_var_shape
scale = self.m_var_scale
if self.std == None:
N = n * size
else:
N = self.s5
if update_spatial:
#B = len(self.D.block)
self.std = np.sqrt(
(self.s4 + 2*self.std_scale) / np.random.chisquare(df=self.D.U.size + 2*self.std_shape))
J = self.network == 1
if J.sum() > 0:
post_rate = rate[J] + size[J]
self.m_mean[J] = self.s3[J] / post_rate
+ np.random.randn(J.sum()) * np.sqrt(self.m_var[J] / post_rate)
for j in xrange(len(self.network)):
self.v[j] = (self.s1[j] + 2 * self.v_scale) / np.random.chisquare(df = N[j] + 2 * self.v_shape)
self.m_var[j] = 2 * self.m_var_post_scale[j] / np.random.chisquare(df = size[j] + 2 * shape[j])
def update_displacements(self):
n, p = self.data.shape
B = len(self.D.block)
if self.proposal == 'prior':
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'prior', self.std)
elif self.proposal == 'rand_walk':
if np.isscalar(self.proposal_std):
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'rand_walk', self.proposal_std)
else:
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'rand_walk', self.proposal_std[:, i, b])
else:
for i in xrange(n):
for b in np.random.permutation(range(B)):
block = self.D.block[b]
A = self.update_block(i, b, 'fixed', self.proposal_std[:, i, b], self.proposal_mean[:, i, b])
self.N *= 0
ones = np.ones((p, 1), float)
for i in xrange(n):
Ii = self.D.I[i]
add_lines(ones, self.N.reshape(p, 1), Ii)
if self.verbose:
print "mean rejected displacements :", self.R.mean(axis=0)
def update_block(self, i, b, proposal='prior', proposal_std=None,
proposal_mean=None, verbose=False, reject_override=False):
block = self.D.block[b]
if verbose:
print 'sampling field', i, 'block', b
# Propose new displacement
U, V, L, W, I = self.D.sample(i, b, proposal, proposal_std,
proposal_mean)
Uc = self.D.U[:, i, b]
Ic = self.D.I[i, L]
# log acceptance rate
mc = self.m[Ic]
m = self.m[I]
vc = self.v[self.labels[Ic]]
v = self.v[self.labels[I]]
#A = ((mc - m) * (mc + m - 2 * self.X[i, L])).sum() / self.v
A = (np.log(v) - np.log(vc)
+ (self.X[i, L] - mc)**2 / vc
- (self.X[i, L] - m)**2 / v).sum()
if not proposal == 'prior':
A += (Uc**2 - U**2).sum() / self.std**2
if proposal == 'fixed':
if proposal_std.max() == 0:
A = np.inf
else:
A += ((U - Uc) * (U + Uc - 2 * proposal_mean) / proposal_std**2).sum()
self.R[i, b] = np.random.uniform() > np.exp(0.5 * A)
if self.R[i, b] == 0 and not reject_override:
self.D.U[:, i, b] = U
self.D.V[:, i, block] = V
if len(L)> 0:
self.D.W[:, i, L] = W
self.D.I[i, L] = I
return A
def update_effects(self, T=1.0):
"""
T is a temperature used to compute log posterior density
by simulated annealing
"""
n, p = self.data.shape
if self.std == None:
m = self.m
v = self.v[self.labels]
else:
m = self.m[self.D.I]
v = self.v[self.labels[self.D.I]]
#tot_var = self.v + self.vardata
#cond_mean = (self.v * self.data + self.vardata * m) / tot_var
#cond_var = self.v * self.vardata / tot_var
tot_var = v + self.vardata
cond_mean = (v * self.data + self.vardata * m) / tot_var
cond_var = T * v * self.vardata / tot_var
self.X = cond_mean + np.random.randn(n, p) * np.sqrt(cond_var)
def update_mean_effect(self, T=1.0):
"""
T is a temperature used to compute log posterior density
by simulated annealing
"""
n, p = self.data.shape
X_sum = np.zeros(p, float)
if self.std == None:
X_sum = self.X.sum(axis=0)
else:
#self.N *= 0
#ones = np.ones((p, 1), float)
for i in xrange(n):
Ii = self.D.I[i]
XI = self.X[i].reshape(p, 1)
add_lines(XI, X_sum.reshape(p, 1), Ii)
#add_lines(ones, self.N.reshape(p, 1), Ii)
for j in xrange(len(self.network)):
L = np.where(self.labels == j)[0]
m_var = self.m_var[j] * T
v = self.v[j] * T
if self.std == None:
#tot_var = self.v + m_var * n
tot_var = v + m_var * n
else:
#tot_var = self.v + m_var * self.N[L]
tot_var = v + m_var * self.N[L]
#cond_mean = (X_sum[L] * m_var + self.v * self.m_mean[j]) / tot_var
#cond_std = np.sqrt(self.v * m_var / tot_var)
cond_mean = (X_sum[L] * m_var + v * self.m_mean[j]) / tot_var
cond_std = np.sqrt(v * m_var / tot_var)
self.m[L] = cond_mean + np.random.randn(len(L)) * cond_std
def update_labels(self):
N, r = self.labels_prior.shape
I = self.labels_prior_mask
m_mean = self.m_mean[self.label_values]
m_var = self.m_var[self.label_values]
L = (self.m[I].reshape(1, r) - m_mean)**2 / m_var
P = self.labels_prior * np.exp(-0.5 * L) / np.sqrt(m_var)
P_cumsum = P.cumsum(axis=0)
X = np.random.rand(r) * P_cumsum[-1]
labels = (X > P_cumsum).sum(axis=0)
self.labels[I] = self.label_values[labels, xrange(r)]
def evaluate(self, nsimu=1e3, burnin=100, J=None, verbose=False,
proposal='prior', proposal_std=None, proposal_mean=None,
compute_post_mean=False, mode='saem', update_spatial=True):
"""
Sample posterior distribution of model parameters, or compute their MAP estimator
In: nsimu <int> Number of samples drawn from posterior mean distribution
burnin <int> Number of discarded burn-in samples
J (N,) voxel indices where successive mean values are stored
verbose <bool> Print some infos during the sampling process
proposal <str> 'prior', 'rand_walk' or 'fixed'
proposal_mean <float> Used for fixed proposal only
proposal_std <float> Used for random walk or fixed proposal
mode <str> if mode='saem', compute MAP estimates of model parameters.
if mode='mcmc', sample their posterior distribution
update_spatial <bool> when False, enables sampling conditional on spatial parameters
Out: self.m_values (N, nsimu+burnin) successive mean values (if J is not empty)
if self.labels_prior is not empty:
self.labels_post (M,r) posterior distribution of region labels
if self.std is not empty:
self.std_values (nsimu+burnin,) successive spatial standard deviate values
if compute_post_mean is True:
self.mean_m (p,) posterior average of mean effect
self.var_m (p,) posterior variance of mean effect
if self.std is not empty and compute_post_mean is True:
self.r (n, nblocks) mean rejection rate for each displacement field
self.mean_U (3, n, nblocks) posterior average of displacement weights
self.var_U (3, n, nblocks) posterior marginal variances of displacement weights
"""
#self.init_hidden_variables()
n, p = self.data.shape
self.nsimu = nsimu
self.burnin = burnin
self.J = J
self.verbose = verbose
self.proposal = proposal
self.proposal_mean = proposal_mean
self.proposal_std = proposal_std
self.compute_post_mean = compute_post_mean
#self.v_values = np.zeros(nsimu + burnin, float)
if J != None:
self.m_values = np.zeros((len(J), nsimu + burnin), float)
if self.std != None:
B = len(self.D.block)
if update_spatial:
self.std_values = np.zeros(nsimu + burnin, float)
if proposal == 'rand_walk':
self.proposal_std_values = np.zeros(nsimu + burnin, float)
if self.labels_prior != None:
self.labels_post = np.zeros(self.labels_prior.shape, float)
#Il = np.array(np.where(self.labels_prior > 0))
#r = len(self.labels_prior_mask)
if compute_post_mean:
sum_m = np.zeros(p, float)
sum_m_sq = np.zeros(p, float)
if mode == 'mcmc':
N = len(self.network)
self.P = np.zeros(N, float)
self.mean_m_mean = np.zeros(N, float)
self.mean_m_var = np.zeros(N, float)
self.mean_v = np.zeros(N, float)
if update_spatial and self.std != None:
self.r = np.zeros((n, B), float)
sum_U = np.zeros((3, n, B), float)
sum_U_sq = np.zeros((3, n, B), float)
niter = np.array([int(burnin), int(nsimu)])
for j in np.arange(2)[niter>0]:
if j == 0:
w = 1
if self.verbose:
print "Burn-in"
else:
if mode == 'saem':
if self.verbose:
print "Maximizing likelihood"
else:
if self.verbose:
print "Sampling posterior distribution"
for i in xrange(niter[j]):
if self.verbose:
if mode == 'saem':
print "SAEM",
else:
print "Gibbs",
print "iteration", i+1, "out of", niter[j]
# Gibbs iteration
#i += 1
if update_spatial and self.std != None:
self.update_displacements()
if j == 0 and self.proposal == 'rand_walk':
self.proposal_std = np.clip(self.proposal_std * (1 + 0.9) / (1 + self.R.mean()), 0.01, 10.0)
if self.vardata != None:
self.update_effects()
self.update_mean_effect()
if self.labels_prior != None:
self.update_labels()
if j == 1:
w = 1.0 / (i + 1)
self.update_summary_statistics(w, update_spatial, mode)
if mode == 'saem':
self.update_parameters_saem(update_spatial)
else:
self.update_parameters_mcmc(update_spatial)
if self.verbose:
print "population effect min variance value :", self.m_var.min()
# Update results
#self.v_values[i + self.burnin * j] = self.v
if update_spatial and self.std != None:
self.std_values[i + self.burnin * j] = self.std
if proposal == 'rand_walk':
self.proposal_std_values[i + self.burnin * j] = self.proposal_std
if self.J != None:
self.m_values[:, i + self.burnin * j] = self.m[self.J]
if j == 1 and self.labels_prior != None:
self.labels_post += \
self.label_values == self.labels[self.labels_prior_mask]
#self.labels_post[Il[0], Il[1]] += \
#self.label_values[Il[0], Il[1]] == self.labels[Il[0]]
if j == 1 and compute_post_mean:
sum_m += self.m
sum_m_sq += self.m**2
if mode == 'mcmc':
self.P += (self.m_mean > 0)
self.mean_m_mean += self.m_mean
self.mean_m_var += self.m_var
self.mean_v += self.v
if update_spatial and self.std != None:
self.r += self.R
sum_U += self.D.U
sum_U_sq += self.D.U**2
if j== 1 and self.labels_prior != None:
self.labels_post /= nsimu
if j == 1 and compute_post_mean:
self.mean_m = sum_m / float(self.nsimu)
self.var_m = sum_m_sq / float(self.nsimu) - self.mean_m**2
if mode == 'mcmc':
self.P /= float(self.nsimu)
self.mean_m_mean /= float(self.nsimu)
self.mean_m_var /= float(self.nsimu)
self.mean_v /= float(self.nsimu)
if update_spatial and self.std != None:
self.r /= float(self.nsimu)
self.mean_U = sum_U / float(self.nsimu)
self.var_U = sum_U_sq / float(self.nsimu) - self.mean_U**2
#####################################################################################
# MAP estimation of displacement fields
def estimate_displacements_SA(self, nsimu=100, c=0.99, proposal_std=None, verbose=False):
"""
MAP estimate of elementary displacements conditional on model parameters
"""
if proposal_std==None:
proposal_std = self.proposal_std
LL, self.Z, self.tot_var, self.SS1, self.SS2, self.SS3, self.SS4 =\
self.compute_log_voxel_likelihood(return_SS=True)
self.log_voxel_likelihood = LL
for i in xrange(nsimu):
if verbose:
print "SA iteration", i+1, "out of", nsimu
self.update_displacements_SA(c**i, proposal_std, verbose)
self.update_summary_statistics(w=1.0, update_spatial=True)
def update_displacements_SA(self, T=1.0, proposal_std=None, verbose=False):
n = self.data.shape[0]
B = len(self.D.block)
for i in xrange(n):
for b in np.random.permutation(range(B)):
#block = self.D.block[b]
A = self.update_block_SA(i, b, T, proposal_std, verbose)
if self.verbose:
print "mean rejected displacements :", self.R.mean(axis=0)
def compute_log_conditional_displacements_posterior(self, U=None, nsimu=100, burnin=100, proposal_std=None, verbose=False, change_U=False):
"""
Compute posterior log density of elementary displacements at point U, conditional on model parameters
"""
n = self.data.shape[0]
B = len(self.D.block)
if U == None:
U = self.D.U.copy()
if proposal_std == None:
proposal_std = self.proposal_std
LL, self.Z, self.tot_var, self.SS1, self.SS2, self.SS3, self.SS4 =\
self.compute_log_voxel_likelihood(return_SS=True)
self.log_voxel_likelihood = LL
if not change_U:
Uc = self.D.U.copy()
proposal_c = self.proposal
proposal_mean_c = self.proposal_mean
proposal_std_c = self.proposal_std.copy()
self.proposal = 'fixed'
self.proposal_mean = U
self.proposal_std = U * 0
self.update_displacements()
#Restore displacement parameters
self.proposal = proposal_c
self.proposal_mean = proposal_mean_c
self.proposal_std = proposal_std_c
self.update_summary_statistics(update_spatial=True, mode='mcmc')
L = 0.0
i,b = n-1, B-1
n_ib = n * B - i * B - b
nsimu_ib = nsimu / n_ib
burnin_ib = burnin / n_ib
A_values = np.zeros(nsimu_ib, float)
A2_values = np.zeros(nsimu_ib, float)
SS_values = np.zeros(nsimu_ib, float)
if verbose:
print 'Compute mean acceptance rate for block', i, b
print 'Burn-in'
if verbose:
print 'Sample acceptance rate values'
for s in xrange(nsimu / (n * B - i * B - b)):
if verbose:
print "SA iteration", s, "out of", nsimu / (n * B - i * B - b)
A_values[s] = self.update_block_SA(\
i, b, 1.0, proposal_std,
verbose=False, reject_override=True)
mean_acceptance = np.exp(A_values).clip(0,1).mean()
L -= np.log(mean_acceptance)
for i in range(n)[::-1]:
for b in range(B)[::-1]:
n_ib = n * B - i * B - b
nsimu_ib = nsimu / n_ib
burnin_ib = burnin / n_ib
A_values = np.zeros(nsimu_ib, float)
A2_values = np.zeros(nsimu_ib, float)
SS_values = np.zeros(nsimu_ib, float)
if verbose:
print 'Compute log conditional posterior for block', i, b
print 'Burn-in'
for s in xrange(burnin / n_ib):
if verbose:
print "SA iteration", s, "out of", burnin_ib
for bb in xrange(b, B):
A = self.update_block_SA(\
i, bb, 1.0, proposal_std, verbose=False)
for ii in xrange(i+1, n):
for bb in xrange(B):
A = self.update_block_SA(\
ii, bb, 1.0, proposal_std, verbose=False)
if verbose:
print 'Sample kernel and acceptance rate values'
for s in xrange(nsimu_ib):
if verbose:
print "SA iteration", s, "out of", nsimu_ib
for bb in xrange(b, B):
A = self.update_block_SA(\
i, bb, 1.0, proposal_std, verbose=False)
for ii in xrange(i+1, n):
for bb in xrange(B):
A = self.update_block_SA(\
ii, bb, 1.0, proposal_std, verbose=False)
A_values[s] = self.update_block_SA(\
i, b, 1.0, proposal_std*0, verbose=False, reject_override=True,
proposal='fixed', proposal_mean=U[:, i, b])
SS_values[s] = np.square(U[:, i, b] - self.D.U[:, i, b]).sum()
if b > 0:
A2_values[s] = self.update_block_SA(\
i, b-1, 1.0, proposal_std, verbose=False,
reject_override=True)
elif i > 0:
A2_values[s] = self.update_block_SA(\
i-1, B-1, 1.0, proposal_std, verbose=False,
reject_override=True)
mean_acceptance = np.exp(A2_values).clip(0,1).mean()
mean_kernel = \
(np.exp(A_values).clip(0,1) * \
np.exp( -0.5 * SS_values / proposal_std**2) \
/ (np.sqrt(2 * np.pi) * proposal_std)**3).mean()
L += np.log(mean_kernel) - np.log(mean_acceptance)*(i>0 or b>0)
if not change_U:
# Restore initial displacement value
self.proposal = 'fixed'
self.proposal_mean = Uc
self.proposal_std = Uc * 0
self.update_displacements()
self.proposal = proposal_c
self.proposal_mean = proposal_mean_c
self.proposal_std = proposal_std_c
self.update_summary_statistics(update_spatial=True, mode='mcmc')
return L
def update_block_SA(self, i, b, T=1.0, proposal_std=None, verbose=False, reject_override=False, proposal='rand_walk', proposal_mean=None):
"""
Update displacement block using simulated annealing scheme
with random-walk kernel
"""
if proposal_std==None:
proposal_std=self.std
block = self.D.block[b]
if verbose:
print 'sampling field', i, 'block', b
# Propose new displacement
U, V, L, W, I = self.D.sample(i, b, proposal, proposal_std * T, proposal_mean=proposal_mean)
Uc = self.D.U[:, i, b].copy()
#Vc = self.D.V[:, i, block].copy()
p = self.data.shape[1]
pL = len(L)
if pL > 0:
#Wc = self.D.W[:, i, L].copy()
Ic = self.D.I[i, L].copy()
J = np.unique(np.concatenate((I, Ic)))
q = len(J)
IJ = np.searchsorted(J, I)
IJc = np.searchsorted(J, Ic)
N = self.N[J].copy()
Zc = self.Z[i,L].copy()
tot_varc = self.tot_var[i,L].copy()
SS1 = self.SS1[J].copy()
SS2 = self.SS2[J].copy()
SS3 = self.SS3[J].copy()
SS4 = self.SS4[J].copy()
# log acceptance rate
#self.D.U[:, i, b] = U
#self.D.V[:, i, block] = V
#if pL > 0:
#self.D.W[:, i, L] = W
#self.D.I[i, L] = I
ones = np.ones((len(L), 1), float)
add_lines(-ones, N.reshape(q, 1), IJc)
add_lines(ones, N.reshape(q, 1), IJ)
Z = self.data[i,L] - self.m_mean[self.labels[I]]
if self.vardata == None:
tot_var = self.v[self.labels[I]] + np.zeros(len(L), float)
else:
tot_var = self.v[self.labels[I]] + self.vardata[i,L]
add_lines(\
-(1.0 / tot_varc).reshape(pL, 1),
SS1.reshape(q, 1),
IJc)
add_lines(\
(1.0 / tot_var).reshape(pL, 1),
SS1.reshape(q, 1),
IJ)
add_lines(\
-np.log(tot_varc).reshape(pL, 1),
SS2.reshape(q, 1),
IJc)
add_lines(\
np.log(tot_var).reshape(pL, 1),
SS2.reshape(q, 1),
IJ)
add_lines(\
-(Zc**2 / tot_varc).reshape(pL, 1),
SS3.reshape(q, 1),
IJc)
add_lines(\
(Z**2 / tot_var).reshape(pL, 1),
SS3.reshape(q, 1),
IJ)
add_lines(\
-(Zc / tot_varc).reshape(pL, 1),
SS4.reshape(q, 1),
IJc)
add_lines(\
(Z / tot_var).reshape(pL, 1),
SS4.reshape(q, 1),
IJ)
fc = self.log_voxel_likelihood[J]
f = - 0.5 * (\
N * np.log(2 * np.pi) + \
np.log(1 + self.m_var[self.labels[J]] * SS1) \
+ SS2 + SS3 - SS4**2 / \
(1 / self.m_var[self.labels[J]] + SS1))
else:
f = np.zeros(1)
fc = np.zeros(1)
A = (f - fc).sum() + 0.5 * (Uc**2 - U**2).sum() / self.std**2
self.R[i, b] = np.random.uniform() > np.exp(A / T)
if self.R[i, b] == 0 and not reject_override:
self.D.U[:, i, b] = U
self.D.V[:, i, block] = V
if len(L) > 0:
self.D.W[:, i, L] = W
self.D.I[i, L] = I
self.N[J] = N
self.Z[i,L] = Z
self.tot_var[i,L] = tot_var
self.SS1[J] = SS1
self.SS2[J] = SS2
self.SS3[J] = SS3
self.SS4[J] = SS4
self.log_voxel_likelihood[J] = f
return A
#####################################################################################
# Marginal likelihood computation for model selection
def compute_log_region_likelihood_slow(self, v=None, m_mean=None, m_var=None, verbose=False, J=None):
"""
Essentially maintained for debug purposes
"""
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
n, p = self.data.shape
nregions = len(self.network)
log_region_likelihood = np.zeros(nregions, float)
if J == None:
J = xrange(nregions)
if self.std == None:
nk = n
else:
I = self.D.I
argsort_I = np.argsort(I.ravel())
data_I = self.data.ravel()[argsort_I]
if self.vardata != None:
var_I = (self.vardata + v[self.labels[I]]).ravel()[argsort_I]
cumsum = np.zeros(p + 1, int)
cumsum[1:] = self.N.cumsum().astype(int)
for i in xrange(len(J)):
j = J[i]
if verbose:
print "computing log likelihood for region", i + 1, "out of", len(J)
m_var_j = self.m_var[j]
m_mean_j = self.m_mean[j]
v_j = self.v[j]
L = np.where(self.labels == j)[0]
for k in L:
if self.std == None:
datak = np.matrix(self.data[:, k].reshape(n, 1) - m_mean_j)
if self.vardata != None:
vark = self.vardata[:, k] + v_j
else:
nk = int(self.N[k])
datak = np.matrix(data_I[cumsum[k] : cumsum[k + 1]].reshape(nk, 1) - m_mean_j)
if self.vardata != None:
vark = var_I[cumsum[k] : cumsum[k + 1]]
Vk = np.matrix(np.zeros((nk, nk), float) + m_var_j)
if self.vardata == None:
Vk[xrange(nk), xrange(nk)] = v_j + m_var_j
else:
Vk[xrange(nk), xrange(nk)] = vark + m_var_j
log_region_likelihood[j] += np.log(np.linalg.det(Vk)) + datak.transpose() * np.linalg.inv(Vk) * datak
if self.std == None:
nj = n * len(L)
else:
nj = self.N[L].sum()
log_region_likelihood[j] += nj * np.log(2 * np.pi)
return log_region_likelihood
def compute_log_region_likelihood(self, v=None, m_mean=None, m_var=None):
log_voxel_likelihood = self.compute_log_voxel_likelihood(v, m_mean, m_var)
N = len(self.network)
log_region_likelihood = np.zeros(N, float)
for j in xrange(N):
log_region_likelihood[j] = log_voxel_likelihood[self.labels==j].sum()
return log_region_likelihood
def compute_log_voxel_likelihood(self, v=None, m_mean=None, m_var=None, return_SS=False):
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
n, p = self.data.shape
if self.std == None:
N = n
v_labels = v[self.labels]
Z = self.data - m_mean[self.labels]
else:
N = self.N
I = self.D.I
v_labels = v[self.labels[I]]
Z = self.data - m_mean[self.labels[I]]
if self.vardata == None:
tot_var = v_labels + np.zeros(self.data.shape, float)
else:
tot_var = v_labels + self.vardata
if self.std == None:
SS1 = (1 / tot_var).sum(axis=0)
SS2 = np.log(tot_var).sum(axis=0)
SS3 = (Z**2 / tot_var).sum(axis=0)
SS4 = (Z / tot_var).sum(axis=0)
else:
SS1 = np.zeros(p, float)
SS2 = np.zeros(p, float)
SS3 = np.zeros(p, float)
SS4 = np.zeros(p, float)
for i in xrange(n):
Ii = self.D.I[i]
add_lines((1 / tot_var[i]).reshape(p, 1), SS1.reshape(p, 1), Ii)
add_lines(np.log(tot_var[i]).reshape(p, 1), SS2.reshape(p, 1), Ii)
add_lines((Z[i]**2 / tot_var[i]).reshape(p, 1), SS3.reshape(p, 1), Ii)
add_lines((Z[i] / tot_var[i]).reshape(p, 1), SS4.reshape(p, 1), Ii)
LL = - 0.5 * (N * np.log(2 * np.pi) + np.log(1 + m_var[self.labels] * SS1) \
+ SS2 + SS3 - SS4**2 / (1 / m_var[self.labels] + SS1))
if return_SS:
return LL, Z, tot_var, SS1, SS2, SS3, SS4
else:
return LL
def compute_log_prior(self, v=None, m_mean=None, m_var=None, std=None):
"""
compute log prior density of model parameters, spatial uncertainty excepted,
assuming hidden variables have been initialized
"""
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
if std == None:
std = self.std
N = len(self.network)
log_prior_values = np.zeros(N + 1, float)
log_prior_values[:-1] = log_gammainv_pdf(v, self.v_shape, self.v_scale)
log_prior_values[:-1] += log_gammainv_pdf(m_var, self.m_var_shape, self.m_var_scale)
J = self.network == 1
if J.sum() > 0:
log_prior_values[J] += log_gaussian_pdf(m_mean[J], 0, m_var[J] / self.m_mean_rate[J])
if self.std != None:
log_prior_values[-1] = log_gammainv_pdf(std**2, self.std_shape, self.std_scale)
return log_prior_values
def compute_log_conditional_posterior(self, v=None, m_mean=None, m_var=None, std=None):
"""
compute log posterior density of model parameters, conditional on hidden parameters.
This function is used in compute_log_region_posterior. It should only be used within
the Gibbs sampler, and not the SAEM algorithm.
"""
n,p = self.data.shape
if v == None:
v = self.v
if m_mean == None:
m_mean = self.m_mean
if m_var == None:
m_var = self.m_var
if std == None:
std = self.std
log_conditional_posterior = np.zeros(len(self.network) + 1, float)
size = self.s6
if self.std == None:
N = n * size
else:
N = self.s5
log_conditional_posterior[:-1] = log_gammainv_pdf(v, self.v_shape + 0.5 * N, self.v_scale + 0.5 * self.s1)
log_conditional_posterior[:-1] += log_gammainv_pdf(m_var, self.m_var_shape + 0.5 * size, self.m_var_post_scale)
J = self.network == 1
if J.sum() > 0:
post_rate = self.m_mean_rate[J] + size[J]
log_conditional_posterior[J] += log_gaussian_pdf(m_mean[J], self.s3[J] / post_rate, m_var[J] / post_rate)
if std != None:
#B = len(self.D.block)
log_conditional_posterior[-1] = \
log_gammainv_pdf(std**2, self.std_shape + 0.5 * self.D.U.size, self.std_scale + 0.5 * self.s4)
return log_conditional_posterior
def sample_log_conditional_posterior(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False):
"""
sample log conditional posterior density of region parameters
using a Gibbs sampler (assuming all hidden variables have been initialized).
Computes posterior mean.
if stabilize is True, sampling is conditioned on the parameters, reducing
the variance of the estimate, but introducing a positive bias.
"""
if v == None:
v = self.v.copy()
if m_mean == None:
m_mean = self.m_mean.copy()
if m_var == None:
m_var = self.m_var.copy()
if std == None and self.std != None:
if np.isscalar(self.std):
std = self.std
else:
std = self.std.copy()
if update_spatial:
U = self.D.U.copy()
proposal = self.proposal
proposal_mean = self.proposal_mean
proposal_std = self.proposal_std
N = len(self.network)
log_conditional_posterior_values = np.zeros((nsimu, N+1), float)
#self.init_hidden_variables()
n, p = self.data.shape
posterior_mean = np.zeros(p, float)
self.nsimu = nsimu
self.burnin = burnin
#self.J = J
self.verbose = verbose
niter = np.array([int(burnin), int(nsimu)])
for k in np.arange(2)[niter>0]:
if self.verbose:
if k == 0:
print "Burn-in"
else:
print "Sampling posterior distribution"
for i in xrange(niter[k]):
if self.verbose:
print "Iteration", i+1, "out of", niter[k]
# Gibbs iteration
#i += 1
if update_spatial and self.std != None:
self.update_displacements()
if self.vardata != None:
self.update_effects()
self.update_mean_effect()
posterior_mean += self.m
if not stabilize:
self.update_summary_statistics(update_spatial, mode='mcmc')
self.update_parameters_mcmc(update_spatial)
if self.verbose:
print "population effect min variance value :", self.m_var.min()
if k == 1:
if stabilize:
self.update_summary_statistics(update_spatial, mode='mcmc')
log_conditional_posterior_values[i] = \
self.compute_log_conditional_posterior(v, m_mean, m_var, std)#[:-1]
posterior_mean /= nsimu
if not stabilize:
# Restore initial parameter values
self.v[:], self.m_mean[:], self.m_var[:], self.std = v, m_mean, m_var, std
if update_spatial:
# Restore initial displacement values
self.proposal = 'fixed'
self.proposal_mean = U
self.proposal_std = U * 0
self.update_displacements()
self.proposal = proposal
self.proposal_mean = proposal_mean
self.proposal_std = proposal_std
self.update_summary_statistics(update_spatial, mode='mcmc')
return log_conditional_posterior_values, posterior_mean
def compute_log_posterior(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False):
"""
compute log posterior density of region parameters by Rao-Blackwell method,
or a stabilized upper bound if stabilize is True.
"""
log_conditional_posterior_values \
= self.sample_log_conditional_posterior(v, m_mean, m_var, std, nsimu, burnin, stabilize, verbose, update_spatial)[0]
max_log_conditional = log_conditional_posterior_values.max(axis=0)
ll_ratio = log_conditional_posterior_values - max_log_conditional
if stabilize:
return max_log_conditional + ll_ratio.mean(axis=0)
elif not update_spatial:
return max_log_conditional \
+ np.log(np.exp(ll_ratio).sum(axis=0)) \
- np.log(nsimu)
else:
return max_log_conditional.sum() \
+ np.log(np.exp(ll_ratio.sum(axis=1)).sum()) \
- np.log(nsimu)
def compute_marginal_likelihood(self, v=None, m_mean=None, m_var=None, std=None, nsimu=100, burnin=100, stabilize=False, verbose=False, update_spatial=False, U=None, proposal_std=None):
log_likelihood = self.compute_log_region_likelihood(v, m_mean, m_var)
log_prior = self.compute_log_prior(v, m_mean, m_var, std)
log_posterior = self.compute_log_posterior(v, m_mean, m_var, std, nsimu, burnin, stabilize, verbose, update_spatial)
if update_spatial and self.std != None:
n, B = self.data.shape[0], len(self.D.block)
if std == None:
std = self.std
if U == None:
U = self.D.U
log_displacements_prior = \
- 0.5 * np.square(U).sum() / std**2 \
- self.D.U.size * np.log(std)
log_displacements_posterior = \
self.compute_log_conditional_displacements_posterior(\
U,
nsimu*n*B,
burnin*n*B,
proposal_std,
verbose)
return log_likelihood.sum() + \
log_prior.sum() + \
log_displacements_prior - \
log_posterior - \
log_displacements_posterior
else:
return log_likelihood + log_prior[:-1] - log_posterior[:-1]
def compute_conditional_posterior_mean(self, v=None, m_mean=None, m_var=None):
"""
Compute posterior mean of mean effect map,
conditional on parameters and displacements
"""
if v == None:
v = self.v.copy()
if m_mean == None:
m_mean = self.m_mean.copy()
if m_var == None:
m_var = self.m_var.copy()
LL, Z, tot_var, SS1, SS2, SS3, SS4 = \
self.compute_log_voxel_likelihood(v, m_mean, m_var, return_SS=True)
#if self.std == None:
#I = range(self.m.size)*np.ones(self.data.shape,int)
#else:
#I = self.D.I
m_labels = m_mean[self.labels]
v_labels = m_var[self.labels]
return (SS4 + m_labels * SS1 + m_labels / v_labels)\
/ (SS1 + 1.0 / v_labels)
| 44.291937 | 189 | 0.504593 |
4870c0ad7fa8099cb1d7f6efc8c755fef7cdc5fb
| 3,230 |
py
|
Python
|
examples/blowout_wake/analysis.py
|
ax3l/hipace
|
ff25dda59fe3de2ff70c49f6b53e52aead04ef60
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
examples/blowout_wake/analysis.py
|
ax3l/hipace
|
ff25dda59fe3de2ff70c49f6b53e52aead04ef60
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
examples/blowout_wake/analysis.py
|
ax3l/hipace
|
ff25dda59fe3de2ff70c49f6b53e52aead04ef60
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#! /usr/bin/env python3
# This Python analysis script is part of the code Hipace++
#
# It compares the transverse field By with the theoretical value, plots both
# the simulation result and the theory on the same plot, and asserts that the
# difference is small.
#
# To use it, run the simulation and execute this script with
# > ../../build/bin/hipace inputs_SI
# > python analysis.py
# Note: the simulation may take some time, as the box size must be high to have
# decent agreement
import matplotlib.pyplot as plt
import scipy.constants as scc
import matplotlib
import sys
import numpy as np
import math
import argparse
from openpmd_viewer import OpenPMDTimeSeries
parser = argparse.ArgumentParser(description='Script to analyze the correctness of the beam in vacuum')
parser.add_argument('--normalized-data',
dest='norm_data',
required=True,
help='Path to the data of the normalized units run')
parser.add_argument('--si-data',
dest='si_data',
required=True,
help='Path to the data of the SI units run')
parser.add_argument('--si-fixed-weight-data',
dest='si_fixed_weight_data',
required=True,
help='Path to the data of the SI units run with a fixed weight beam')
parser.add_argument('--do-plot',
dest='do_plot',
action='store_true',
default=False,
help='Plot figures and save them to file')
args = parser.parse_args()
ts_norm = OpenPMDTimeSeries(args.norm_data)
ts_si = OpenPMDTimeSeries(args.si_data)
ts_si_fixed_weight = OpenPMDTimeSeries(args.si_fixed_weight_data)
elec_density = 2.8239587008591567e23 # [1/m^3]
# calculation of the plasma frequency
omega_p = np.sqrt( elec_density * (scc.e**2)/ (scc.epsilon_0 * scc.m_e));
E_0 = omega_p * scc.m_e * scc.c / scc.e;
kp = omega_p / scc.c # 1./10.e-6
# Load Hipace++ data for Ez in both normalized and SI units
Ez_along_z_norm, meta_norm = ts_norm.get_field(
field='Ez', iteration=1, slice_across=['x','y'], slice_relative_position=[0,0])
Ez_along_z_si, meta_si = ts_si.get_field(
field='Ez', iteration=1, slice_across=['x','y'], slice_relative_position=[0,0])
Ez_along_z_si_fixed_w, meta = ts_si_fixed_weight.get_field(
field='Ez', iteration=1, slice_across=['x','y'], slice_relative_position=[0,0])
zeta_norm = meta_norm.z
zeta_si = meta_si.z
if args.do_plot:
fig, ax = plt.subplots()
ax.plot(zeta_norm, Ez_along_z_norm)
ax.plot(zeta_si*kp, Ez_along_z_si/E_0, linestyle='--')
ax.set_xlabel('z')
ax.set_ylabel('Ez/E0')
plt.savefig('Ez_z.png')
# Assert that the simulation result is close enough to theory
error_Ez = np.sum((Ez_along_z_si/E_0-Ez_along_z_norm)**2) / np.sum((Ez_along_z_norm)**2)
print("total relative error Ez: " + str(error_Ez) + " (tolerance = 1e-10)")
error_Ez_fixed_weight = np.sum((Ez_along_z_si_fixed_w-Ez_along_z_si)**2) / np.sum((Ez_along_z_si)**2)
print("total relative error Ez for a fixed weight beam to the fixed ppc beam: " + str(error_Ez_fixed_weight) + " (tolerance = 1e-2)")
assert(error_Ez < 1e-10)
assert(error_Ez_fixed_weight < 1e-2)
| 40.375 | 133 | 0.68452 |
5ab0249d0d751559412e02669927a1b4d81f2d33
| 1,666 |
py
|
Python
|
examples/plot_map.py
|
Huite/contextily
|
14d4fb5f18eeb5ef6266885c9b1b5c5513f63021
|
[
"BSD-3-Clause"
] | 182 |
2020-04-08T15:56:50.000Z
|
2022-03-24T15:02:19.000Z
|
examples/plot_map.py
|
Huite/contextily
|
14d4fb5f18eeb5ef6266885c9b1b5c5513f63021
|
[
"BSD-3-Clause"
] | 126 |
2016-09-28T22:18:00.000Z
|
2020-04-08T12:45:36.000Z
|
examples/plot_map.py
|
Huite/contextily
|
14d4fb5f18eeb5ef6266885c9b1b5c5513f63021
|
[
"BSD-3-Clause"
] | 43 |
2016-09-28T19:59:36.000Z
|
2022-01-20T17:03:53.000Z
|
"""
Downloading and Plotting Maps
-----------------------------
Plotting maps with Contextily.
Contextily is designed to pull map tile information from the web. In many
cases we want to go from a location to a map of that location as quickly
as possible. There are two main ways to do this with Contextily.
Searching for places with text
==============================
The simplest approach is to search for a location with text. You can do
this with the ``Place`` class. This will return an object that contains
metadata about the place, such as its bounding box. It will also contain an
image of the place.
"""
import numpy as np
import matplotlib.pyplot as plt
import contextily as ctx
loc = ctx.Place("boulder", zoom_adjust=0) # zoom_adjust modifies the auto-zoom
# Print some metadata
for attr in ["w", "s", "e", "n", "place", "zoom", "n_tiles"]:
print("{}: {}".format(attr, getattr(loc, attr)))
# Show the map
im1 = loc.im
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
ctx.plot_map(loc, ax=axs[0])
###############################################################################
# The zoom level will be chosen for you by default, though you can specify
# this manually as well:
loc2 = ctx.Place("boulder", zoom=11)
ctx.plot_map(loc2, ax=axs[1])
###############################################################################
# Downloading tiles from bounds
# =============================
#
# You can also grab tile information directly from a bounding box + zoom level.
# This is demoed below:
im2, bbox = ctx.bounds2img(loc.w, loc.s, loc.e, loc.n, zoom=loc.zoom, ll=True)
ctx.plot_map(im2, bbox, ax=axs[2], title="Boulder, CO")
plt.show()
| 31.433962 | 79 | 0.614046 |
4ed7dc19a79afe73f6614a8f8057fcda8c14d8bd
| 215 |
py
|
Python
|
trebelge/trebelge/doctype/ubl_tr_despatchline_summary/ubl_tr_despatchline_summary.py
|
Framras/trebelge
|
362179925dc688ad8ea008f532de72e67e49941b
|
[
"MIT"
] | 6 |
2019-12-21T21:15:50.000Z
|
2021-12-30T21:59:53.000Z
|
trebelge/trebelge/doctype/ubl_tr_despatchline_summary/ubl_tr_despatchline_summary.py
|
Framras/trebelge
|
362179925dc688ad8ea008f532de72e67e49941b
|
[
"MIT"
] | null | null | null |
trebelge/trebelge/doctype/ubl_tr_despatchline_summary/ubl_tr_despatchline_summary.py
|
Framras/trebelge
|
362179925dc688ad8ea008f532de72e67e49941b
|
[
"MIT"
] | 3 |
2020-01-05T19:32:40.000Z
|
2021-11-03T14:11:21.000Z
|
# Copyright (c) 2022, Framras AS-Izmir and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class UBLTRDespatchLineSummary(Document):
pass
| 23.888889 | 55 | 0.804651 |
52a9b7691220caab9a1c8eca5c6ca93d1888b2a5
| 4,279 |
py
|
Python
|
examples/cifar_generator_cnn.py
|
sohan/hyperas
|
1d62178e6edaed0899fe61ab3bf0115164424c82
|
[
"MIT"
] | 1 |
2017-02-28T03:08:45.000Z
|
2017-02-28T03:08:45.000Z
|
examples/cifar_generator_cnn.py
|
soheilb/hyperas
|
1d62178e6edaed0899fe61ab3bf0115164424c82
|
[
"MIT"
] | null | null | null |
examples/cifar_generator_cnn.py
|
soheilb/hyperas
|
1d62178e6edaed0899fe61ab3bf0115164424c82
|
[
"MIT"
] | 1 |
2020-02-28T21:03:42.000Z
|
2020-02-28T21:03:42.000Z
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar10
from keras.utils import np_utils
def data():
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_test, Y_test
def model(datagen, X_train, Y_train, X_test, Y_test):
batch_size = 32
nb_epoch = 200
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
datagen, X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 37.535088 | 94 | 0.6595 |
a8509561fe0c699ce065673ed8f046f82620c302
| 51,224 |
py
|
Python
|
src/transformers/models/layoutlm/modeling_layoutlm.py
|
HimashiRathnayake/adapter-transformers
|
d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4
|
[
"Apache-2.0"
] | 50,404 |
2019-09-26T09:55:55.000Z
|
2022-03-31T23:07:49.000Z
|
src/transformers/models/layoutlm/modeling_layoutlm.py
|
HimashiRathnayake/adapter-transformers
|
d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4
|
[
"Apache-2.0"
] | 13,179 |
2019-09-26T10:10:57.000Z
|
2022-03-31T23:17:08.000Z
|
src/transformers/models/layoutlm/modeling_layoutlm.py
|
HimashiRathnayake/adapter-transformers
|
d9c06ecbf4aaa33756e848b8fc5b3ec65f5ff4f4
|
[
"Apache-2.0"
] | 13,337 |
2019-09-26T10:49:38.000Z
|
2022-03-31T23:06:17.000Z
|
# coding=utf-8
# Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch LayoutLM model. """
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_layoutlm import LayoutLMConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LayoutLMConfig"
_TOKENIZER_FOR_DOC = "LayoutLMTokenizer"
_CHECKPOINT_FOR_DOC = "microsoft/layoutlm-base-uncased"
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlm-base-uncased",
"layoutlm-large-uncased",
]
LayoutLMLayerNorm = nn.LayerNorm
class LayoutLMEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(
self,
input_ids=None,
bbox=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
words_embeddings = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->LayoutLM
class LayoutLMSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in LayoutLMModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->LayoutLM
class LayoutLMSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->LayoutLM
class LayoutLMAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMSelfAttention(config)
self.output = LayoutLMSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LayoutLMIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
class LayoutLMOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->LayoutLM
class LayoutLMLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = LayoutLMAttention(config)
self.intermediate = LayoutLMIntermediate(config)
self.output = LayoutLMOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->LayoutLM
class LayoutLMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LayoutLMPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM
class LayoutLMPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM
class LayoutLMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LayoutLMPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM
class LayoutLMOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = LayoutLMLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class LayoutLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlm"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LayoutLMEncoder):
module.gradient_checkpointing = value
LAYOUTLM_START_DOCSTRING = r"""
The LayoutLM model was proposed in `LayoutLM: Pre-training of Text and Layout for Document Image Understanding
<https://arxiv.org/abs/1912.13318>`__ by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and Ming Zhou.
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.LayoutLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LAYOUTLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.LayoutLMTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
bbox (:obj:`torch.LongTensor` of shape :obj:`({0}, 4)`, `optional`):
Bounding boxes of each input sequence tokens. Selected in the range ``[0,
config.max_2d_position_embeddings-1]``. Each bounding box should be a normalized version in (x0, y0, x1,
y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and
(x1, y1) represents the position of the lower right corner. See :ref:`Overview` for normalization.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for
tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1`
indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned
tensors for more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.",
LAYOUTLM_START_DOCSTRING,
)
class LayoutLMModel(LayoutLMPreTrainedModel):
def __init__(self, config):
super(LayoutLMModel, self).__init__(config)
self.config = config
self.embeddings = LayoutLMEmbeddings(config)
self.encoder = LayoutLMEncoder(config)
self.pooler = LayoutLMPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples::
>>> from transformers import LayoutLMTokenizer, LayoutLMModel
>>> import torch
>>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')
>>> model = LayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased')
>>> words = ["Hello", "world"]
>>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
>>> token_boxes = []
>>> for word, box in zip(words, normalized_word_boxes):
... word_tokens = tokenizer.tokenize(word)
... token_boxes.extend([box] * len(word_tokens))
>>> # add bounding boxes of cls + sep tokens
>>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
>>> encoding = tokenizer(' '.join(words), return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> attention_mask = encoding["attention_mask"]
>>> token_type_ids = encoding["token_type_ids"]
>>> bbox = torch.tensor([token_boxes])
>>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings("""LayoutLM Model with a `language modeling` head on top. """, LAYOUTLM_START_DOCSTRING)
class LayoutLMForMaskedLM(LayoutLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlm = LayoutLMModel(config)
self.cls = LayoutLMOnlyMLMHead(config)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlm.embeddings.word_embeddings
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
Returns:
Examples::
>>> from transformers import LayoutLMTokenizer, LayoutLMForMaskedLM
>>> import torch
>>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')
>>> model = LayoutLMForMaskedLM.from_pretrained('microsoft/layoutlm-base-uncased')
>>> words = ["Hello", "[MASK]"]
>>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
>>> token_boxes = []
>>> for word, box in zip(words, normalized_word_boxes):
... word_tokens = tokenizer.tokenize(word)
... token_boxes.extend([box] * len(word_tokens))
>>> # add bounding boxes of cls + sep tokens
>>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
>>> encoding = tokenizer(' '.join(words), return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> attention_mask = encoding["attention_mask"]
>>> token_type_ids = encoding["token_type_ids"]
>>> bbox = torch.tensor([token_boxes])
>>> labels = tokenizer("Hello world", return_tensors="pt")["input_ids"]
>>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,
... labels=labels)
>>> loss = outputs.loss
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlm(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLM Model with a sequence classification head on top (a linear layer on top of the pooled output) e.g. for
document image classification tasks such as the `RVL-CDIP <https://www.cs.cmu.edu/~aharley/rvl-cdip/>`__ dataset.
""",
LAYOUTLM_START_DOCSTRING,
)
class LayoutLMForSequenceClassification(LayoutLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlm = LayoutLMModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlm.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Examples::
>>> from transformers import LayoutLMTokenizer, LayoutLMForSequenceClassification
>>> import torch
>>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')
>>> model = LayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased')
>>> words = ["Hello", "world"]
>>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
>>> token_boxes = []
>>> for word, box in zip(words, normalized_word_boxes):
... word_tokens = tokenizer.tokenize(word)
... token_boxes.extend([box] * len(word_tokens))
>>> # add bounding boxes of cls + sep tokens
>>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
>>> encoding = tokenizer(' '.join(words), return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> attention_mask = encoding["attention_mask"]
>>> token_type_ids = encoding["token_type_ids"]
>>> bbox = torch.tensor([token_boxes])
>>> sequence_label = torch.tensor([1])
>>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,
... labels=sequence_label)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlm(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
sequence labeling (information extraction) tasks such as the `FUNSD <https://guillaumejaume.github.io/FUNSD/>`__
dataset and the `SROIE <https://rrc.cvc.uab.es/?ch=13>`__ dataset.
""",
LAYOUTLM_START_DOCSTRING,
)
class LayoutLMForTokenClassification(LayoutLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlm = LayoutLMModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlm.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
Returns:
Examples::
>>> from transformers import LayoutLMTokenizer, LayoutLMForTokenClassification
>>> import torch
>>> tokenizer = LayoutLMTokenizer.from_pretrained('microsoft/layoutlm-base-uncased')
>>> model = LayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased')
>>> words = ["Hello", "world"]
>>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
>>> token_boxes = []
>>> for word, box in zip(words, normalized_word_boxes):
... word_tokens = tokenizer.tokenize(word)
... token_boxes.extend([box] * len(word_tokens))
>>> # add bounding boxes of cls + sep tokens
>>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
>>> encoding = tokenizer(' '.join(words), return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> attention_mask = encoding["attention_mask"]
>>> token_type_ids = encoding["token_type_ids"]
>>> bbox = torch.tensor([token_boxes])
>>> token_labels = torch.tensor([1,1,0,0]).unsqueeze(0) # batch size of 1
>>> outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids,
... labels=token_labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlm(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 42.651124 | 164 | 0.658031 |
a6526d41acb8c5a8ee40ed159f738863d09c292c
| 327 |
py
|
Python
|
packages/pyright-internal/src/tests/samples/genericTypes68.py
|
martindemello/pyright
|
4fe3f1f7c08f139715701fdf746183062b5165ff
|
[
"MIT"
] | 4,391 |
2019-05-07T01:18:57.000Z
|
2022-03-31T20:45:44.000Z
|
packages/pyright-internal/src/tests/samples/genericTypes68.py
|
martindemello/pyright
|
4fe3f1f7c08f139715701fdf746183062b5165ff
|
[
"MIT"
] | 2,740 |
2019-05-07T03:29:30.000Z
|
2022-03-31T12:57:46.000Z
|
packages/pyright-internal/src/tests/samples/genericTypes68.py
|
martindemello/pyright
|
4fe3f1f7c08f139715701fdf746183062b5165ff
|
[
"MIT"
] | 455 |
2019-05-07T12:55:14.000Z
|
2022-03-31T17:09:15.000Z
|
# This sample tests the case where a constrained TypeVar is assigned
# to another constrained TypeVar or a union that contains a constrained
# TypeVar.
from os import PathLike
from typing import AnyStr
def func(path: AnyStr | PathLike[AnyStr]) -> AnyStr:
...
def thing(value: AnyStr) -> AnyStr:
return func(value)
| 21.8 | 71 | 0.737003 |
482f02531474020a976e7e1b7d89f0fc68271596
| 2,444 |
py
|
Python
|
app/forms/page.py
|
tch1bo/viaduct
|
bfd37b0a8408b2dd66fb01138163b80ce97699ff
|
[
"MIT"
] | 11 |
2015-04-23T21:57:56.000Z
|
2019-04-28T12:48:58.000Z
|
app/forms/page.py
|
tch1bo/viaduct
|
bfd37b0a8408b2dd66fb01138163b80ce97699ff
|
[
"MIT"
] | 1 |
2016-10-05T14:10:58.000Z
|
2016-10-05T14:12:23.000Z
|
app/forms/page.py
|
tch1bo/viaduct
|
bfd37b0a8408b2dd66fb01138163b80ce97699ff
|
[
"MIT"
] | 3 |
2016-10-05T14:00:42.000Z
|
2019-01-16T14:33:43.000Z
|
from flask_babel import lazy_gettext as _
from flask_wtf import FlaskForm
from wtforms import BooleanField, StringField, TextAreaField, SubmitField, \
RadioField
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField
from wtforms.validators import Optional
from app.forms.fields import CustomFormSelectField
from app.service import group_service
class SuperPageForm(FlaskForm):
"""TODO."""
nl_title = StringField(_('Dutch title'))
en_title = StringField(_('English title'))
comment = StringField(_('Comment for change'), [Optional()])
class PageForm(SuperPageForm):
nl_content = TextAreaField(_('Dutch content'))
en_content = TextAreaField(_('English content'))
filter_html = BooleanField(_('Do not filter HTML tags'))
custom_read_permission = BooleanField(_('Limit read permission to groups'))
read_groups = QuerySelectMultipleField(
_("Groups with read permission"),
query_factory=lambda: group_service.find_groups(),
get_label='name')
needs_paid = BooleanField(_('Require membership to view'))
custom_form_id = CustomFormSelectField(_('Form'))
def validate(self):
# Validate all other fields with default validators
if not SuperPageForm.validate(self):
return False
# Test if either english or dutch is entered
result = True
if not (self.nl_title.data or self.en_title.data):
self.nl_title.errors.append(
_('Either Dutch or English title required'))
result = False
if not (self.nl_content.data or self.en_content.data):
self.nl_content.errors.append(
_('Either Dutch or English content required'))
result = False
# XOR the results to test if both of a language was given
if bool(self.nl_title.data) != bool(self.nl_content.data):
self.nl_title.errors.append(
_('Dutch title requires Dutch content and vice versa'))
result = False
if bool(self.en_title.data) != bool(self.en_content.data):
self.en_title.errors.append(
_('English title requires English content and vice versa'))
result = False
return result
class HistoryPageForm(FlaskForm):
previous = RadioField(_('Previous'), coerce=int)
current = RadioField(_('Current'), coerce=int)
compare = SubmitField(_('Compare'))
| 35.42029 | 79 | 0.675532 |
19ebdb1eb8d9318c7dc8457a7c170e14999e9575
| 2,365 |
py
|
Python
|
AutotestWebD/apps/interface/services/HTTP_interface_debugService.py
|
yangjourney/sosotest
|
2e88099a829749910ca325253c9b1a2e368d21a0
|
[
"MIT"
] | 422 |
2019-08-18T05:04:20.000Z
|
2022-03-31T06:49:19.000Z
|
AutotestWebD/apps/interface/services/HTTP_interface_debugService.py
|
LinSongJian1985/sosotest
|
091863dee531b5726650bb63efd6f169267cbeb4
|
[
"MIT"
] | 10 |
2019-10-24T09:55:38.000Z
|
2021-09-29T17:28:43.000Z
|
AutotestWebD/apps/interface/services/HTTP_interface_debugService.py
|
LinSongJian1985/sosotest
|
091863dee531b5726650bb63efd6f169267cbeb4
|
[
"MIT"
] | 202 |
2019-08-18T05:04:27.000Z
|
2022-03-30T05:57:18.000Z
|
import apps.common.func.InitDjango
from all_models.models import TbHttpInterface,TbHttpInterfaceDebug
from django.db import connection
from django.forms.models import model_to_dict
from apps.common.func.CommonFunc import *
from apps.common.config import commonWebConfig
class HTTP_interfaceDebugService(object):
@staticmethod
def interfaceDebugAdd(data,addBy):
newDataDict = {}
for k, v in data.items():
newDataDict[k] = data[k]
newDataDict["addBy_id"] = addBy
if (len(TbHttpInterfaceDebug.objects.filter(addBy_id=addBy)) == 0):
print(newDataDict)
return TbHttpInterfaceDebug.objects.create(**newDataDict)
else:
newDataDict['actualResult'] = ''
newDataDict['assertResult'] = ''
newDataDict['testResult'] = "NOTRUN"
newDataDict['execStatus'] = 1
newDataDict['beforeExecuteTakeTime'] = 0
newDataDict['executeTakeTime'] = 0
newDataDict['afterExecuteTakeTime'] = 0
newDataDict['totalTakeTime'] = 0
newDataDict["modTime"] = datetime.datetime.now()
try:
TbHttpInterfaceDebug.objects.filter(addBy_id=addBy).update(**newDataDict)
except Exception as e:
import traceback
print(traceback.format_exc())
return False
return TbHttpInterfaceDebug.objects.filter(addBy_id=addBy)[0]
@staticmethod
def getDebugResult(addBy):
debugResult = TbHttpInterfaceDebug.objects.filter(addBy_id=addBy)[0]
if debugResult.execStatus == 3 or debugResult.execStatus == 4:
debugResultDict = dbModelToDict(debugResult)
businessLineDict = dbModelToDict(debugResult.businessLineId)
moduleDict = dbModelToDict(debugResult.moduleId)
httpConfKey = dbModelToDict(debugResult.httpConfKey)
debugResultDict.update(httpConfKey)
debugResultDict.update(businessLineDict)
debugResultDict.update(moduleDict)
return debugResultDict
else:
return 0
@staticmethod
def setDebugFail(addBy,dataDict):
debugFail = TbHttpInterfaceDebug.objects.filter(addBy_id=addBy).update(**dataDict)
return debugFail
if __name__ == "__main__":
print(int(67/1000*100))
| 38.145161 | 90 | 0.655391 |
8b968ffed313f67876bebdb3463ef51939213611
| 2,884 |
py
|
Python
|
Natural Language Processsing/NLP Course in Python/Topic 12 - Sentiment Analysis/sentiment_mod.py
|
bilwa496/NLP-Basics-Intermediate
|
84e37827b2512345c1227be772e2d69034524ca0
|
[
"MIT"
] | null | null | null |
Natural Language Processsing/NLP Course in Python/Topic 12 - Sentiment Analysis/sentiment_mod.py
|
bilwa496/NLP-Basics-Intermediate
|
84e37827b2512345c1227be772e2d69034524ca0
|
[
"MIT"
] | null | null | null |
Natural Language Processsing/NLP Course in Python/Topic 12 - Sentiment Analysis/sentiment_mod.py
|
bilwa496/NLP-Basics-Intermediate
|
84e37827b2512345c1227be772e2d69034524ca0
|
[
"MIT"
] | null | null | null |
import random
import pickle
import nltk
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.classify import ClassifierI
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import LinearSVC, NuSVC
from statistics import mode
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
documents_f = open("documents.pickle", "rb")
documents = pickle.load(documents_f)
documents_f.close()
word_features_f = open("word_features.pickle", "rb")
word_features = pickle.load(word_features_f)
word_features_f.close()
featuresets_f = open("featuresets.pickle", "rb")
featuresets = pickle.load(featuresets_f)
featuresets_f.close()
# Original Naive Bayes
open_file = open("pickled_algorithms/naivebayes.pickle", "rb")
classifier = pickle.load(open_file)
open_file.close()
# MNB
open_file = open("pickled_algorithms/MNB_classifier.pickle", "rb")
MNB_classifier = pickle.load(open_file)
open_file.close()
# BernoulliNB
open_file = open("pickled_algorithms/BernoulliNB_classifier.pickle", "rb")
BernoulliNB_classifier = pickle.load(open_file)
open_file.close()
# Logistic Regression
open_file = open(
"pickled_algorithms/LogisticRegression_classifier.pickle", "rb")
LogisticRegression_classifier = pickle.load(open_file)
open_file.close()
# SGDClassifier
open_file = open("pickled_algorithms/SGDClassifier_classifier.pickle", "rb")
SGDClassifier_classifier = pickle.load(open_file)
open_file.close()
# LinearSVC
open_file = open("pickled_algorithms/LinearSVC_classifier.pickle", "rb")
LinearSVC_classifier = pickle.load(open_file)
open_file.close()
# NuSVC
open_file = open("pickled_algorithms/NuSVC_classifier.pickle", "rb")
NuSVC_classifier = pickle.load(open_file)
open_file.close()
voted_classifier = VoteClassifier(
classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier,
SGDClassifier_classifier,
LinearSVC_classifier,
NuSVC_classifier)
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats), voted_classifier.confidence(feats)
| 26.458716 | 79 | 0.75 |
6c9abebaf6be06c27af946066c187f79c02b0bd2
| 1,706 |
py
|
Python
|
profiles_api/migrations/0001_initial.py
|
Pathomhpong/profiles-rest-api
|
697bc0dd7397c498abb09dd7a8c8f75c172a3088
|
[
"MIT"
] | null | null | null |
profiles_api/migrations/0001_initial.py
|
Pathomhpong/profiles-rest-api
|
697bc0dd7397c498abb09dd7a8c8f75c172a3088
|
[
"MIT"
] | null | null | null |
profiles_api/migrations/0001_initial.py
|
Pathomhpong/profiles-rest-api
|
697bc0dd7397c498abb09dd7a8c8f75c172a3088
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2020-07-14 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.176471 | 266 | 0.638921 |
4fc420ca3bdf5e01b2801b6d5d1fc789d43d4c07
| 4,311 |
py
|
Python
|
azure-mgmt-trafficmanager/azure/mgmt/trafficmanager/traffic_manager_management_client.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4 |
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/mgmt/trafficmanager/traffic_manager_management_client.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 54 |
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure/mgmt/trafficmanager/traffic_manager_management_client.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 |
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.endpoints_operations import EndpointsOperations
from .operations.profiles_operations import ProfilesOperations
from .operations.geographic_hierarchies_operations import GeographicHierarchiesOperations
from . import models
class TrafficManagerManagementClientConfiguration(AzureConfiguration):
"""Configuration for TrafficManagerManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(TrafficManagerManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('trafficmanagermanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class TrafficManagerManagementClient(object):
"""TrafficManagerManagementClient
:ivar config: Configuration for client.
:vartype config: TrafficManagerManagementClientConfiguration
:ivar endpoints: Endpoints operations
:vartype endpoints: azure.mgmt.trafficmanager.operations.EndpointsOperations
:ivar profiles: Profiles operations
:vartype profiles: azure.mgmt.trafficmanager.operations.ProfilesOperations
:ivar geographic_hierarchies: GeographicHierarchies operations
:vartype geographic_hierarchies: azure.mgmt.trafficmanager.operations.GeographicHierarchiesOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = TrafficManagerManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2017-05-01'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.endpoints = EndpointsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.profiles = ProfilesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.geographic_hierarchies = GeographicHierarchiesOperations(
self._client, self.config, self._serialize, self._deserialize)
| 43.989796 | 105 | 0.727673 |
031a14eb4733c18fc97b38c9471ab6dc9aec2706
| 1,350 |
py
|
Python
|
dqn_zoo/rainbow_flare/run_atari_test.py
|
WendyShang/dqn_zoo
|
465aedaee48a6e13cb141808abf23876a1b21e4e
|
[
"Apache-2.0"
] | 3 |
2021-02-04T23:13:51.000Z
|
2021-11-06T10:21:50.000Z
|
dqn_zoo/rainbow_flare/run_atari_test.py
|
WendyShang/dqn_zoo
|
465aedaee48a6e13cb141808abf23876a1b21e4e
|
[
"Apache-2.0"
] | null | null | null |
dqn_zoo/rainbow_flare/run_atari_test.py
|
WendyShang/dqn_zoo
|
465aedaee48a6e13cb141808abf23876a1b21e4e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Rainbow."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from dqn_zoo.rainbow_flare import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariFlareTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.target_network_update_period = 3
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 3
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
absltest.main()
| 30 | 80 | 0.711111 |
279842e17481bd2416e89b34001cba7883a4b2a5
| 3,610 |
py
|
Python
|
lliregistration_back/foundation/migrations/0004_auto_20191203_1519.py
|
ydang5/final-project-back
|
ae8b0ff2b340b521b70e3b0c25ab8cb4b64ac453
|
[
"BSD-3-Clause"
] | null | null | null |
lliregistration_back/foundation/migrations/0004_auto_20191203_1519.py
|
ydang5/final-project-back
|
ae8b0ff2b340b521b70e3b0c25ab8cb4b64ac453
|
[
"BSD-3-Clause"
] | null | null | null |
lliregistration_back/foundation/migrations/0004_auto_20191203_1519.py
|
ydang5/final-project-back
|
ae8b0ff2b340b521b70e3b0c25ab8cb4b64ac453
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-12-03 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foundation', '0003_auto_20191203_1506'),
]
operations = [
migrations.AlterField(
model_name='llistudentdata',
name='contract_end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='date_of_birth',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='email_address',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='if_have_student_file',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='immigration_status',
field=models.CharField(blank=True, help_text='Study permit, work permit, visitor, PR, or Canadian citizen', max_length=50, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='immigration_status_valid_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='insurance_valid_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='level',
field=models.CharField(blank=True, help_text='student current level.', max_length=50, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='nationality',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='pathway',
field=models.CharField(blank=True, help_text='Is this a pathway student, please answer yes or no.', max_length=50, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='payment_through',
field=models.CharField(blank=True, help_text='Self or agent', max_length=255, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='payment_valid_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='photo',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='start_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='llistudentdata',
name='student_id',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
]
| 36.464646 | 146 | 0.58892 |
05a1a37a0312a72550e7acc3ade270a867fe27d7
| 1,549 |
py
|
Python
|
packages/python/plotly/plotly/validators/bar/marker/colorbar/_tickfont.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/bar/marker/colorbar/_tickfont.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/bar/marker/colorbar/_tickfont.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="bar.marker.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs,
)
| 38.725 | 81 | 0.556488 |
12564cb2146557d4de461fa5ac80c69753492884
| 3,712 |
py
|
Python
|
src/user_info.py
|
nothinghope3/instabot.py
|
a4f5e52d13bad2c45a24fb8e5049ba79e8ad2eb6
|
[
"MIT"
] | 3 |
2021-01-09T04:09:44.000Z
|
2021-08-16T17:00:44.000Z
|
src/user_info.py
|
nothinghope3/instabot.py
|
a4f5e52d13bad2c45a24fb8e5049ba79e8ad2eb6
|
[
"MIT"
] | null | null | null |
src/user_info.py
|
nothinghope3/instabot.py
|
a4f5e52d13bad2c45a24fb8e5049ba79e8ad2eb6
|
[
"MIT"
] | 2 |
2022-01-02T11:36:09.000Z
|
2022-03-09T09:54:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import random
import time
import re
def get_user_info(self, username):
current_user = username
log_string = "Getting user info : %s" % current_user
self.write_log(log_string)
if self.login_status == 1:
url_tag = self.url_user_detail % (current_user)
if self.login_status == 1:
r = self.s.get(url_tag)
if (
r.text.find(
"The link you followed may be broken, or the page may have been removed."
)
!= -1
):
log_string = (
"Looks like account was deleted, skipping : %s" % current_user
)
self.write_log(log_string)
insert_unfollow_count(self, user_id=current_id)
time.sleep(3)
return False
all_data = json.loads(
re.search(
"window._sharedData = (.*?);</script>", r.text, re.DOTALL
).group(1)
)["entry_data"]["ProfilePage"][0]
user_info = all_data["graphql"]["user"]
self.current_user_info = user_info
i = 0
log_string = "Checking user info.."
self.write_log(log_string)
follows = user_info["edge_follow"]["count"]
follower = user_info["edge_followed_by"]["count"]
media = user_info["edge_owner_to_timeline_media"]["count"]
follow_viewer = user_info["follows_viewer"]
followed_by_viewer = user_info["followed_by_viewer"]
requested_by_viewer = user_info["requested_by_viewer"]
has_requested_viewer = user_info["has_requested_viewer"]
log_string = "Follower : %i" % (follower)
self.write_log(log_string)
log_string = "Following : %s" % (follows)
self.write_log(log_string)
log_string = "Media : %i" % (media)
self.write_log(log_string)
if follows == 0 or follower / follows > 2:
self.is_selebgram = True
self.is_fake_account = False
self.write_log(" >>>This is probably Selebgram account")
elif follower == 0 or follows / follower > 2:
self.is_fake_account = True
self.is_selebgram = False
self.write_log(" >>>This is probably Fake account")
else:
self.is_selebgram = False
self.is_fake_account = False
self.write_log(" >>>This is a normal account")
if media > 0 and follows / media < 25 and follower / media < 25:
self.is_active_user = True
self.write_log(" >>>This user is active")
else:
self.is_active_user = False
self.write_log(" >>>This user is passive")
if follow_viewer or has_requested_viewer:
self.is_follower = True
self.write_log(" >>>This account is following you")
else:
self.is_follower = False
self.write_log(" >>>This account is NOT following you")
if followed_by_viewer or requested_by_viewer:
self.is_following = True
self.write_log(" >>>You are following this account")
else:
self.is_following = False
self.write_log(" >>>You are NOT following this account")
else:
logging.exception("Except on auto_unfollow!")
time.sleep(3)
return False
else:
return 0
| 38.268041 | 93 | 0.535291 |
0b2970d84ad4532a57662fbb006e47357f080511
| 511 |
py
|
Python
|
whats_fresh/urls.py
|
osu-cass/whats-fresh-api
|
0ace76c3d7d423e95d5e3b3c7cd0f74abcf975bd
|
[
"Apache-2.0"
] | 4 |
2015-08-20T19:38:03.000Z
|
2016-01-20T18:52:24.000Z
|
whats_fresh/urls.py
|
osu-cass/whats-fresh-api
|
0ace76c3d7d423e95d5e3b3c7cd0f74abcf975bd
|
[
"Apache-2.0"
] | 39 |
2015-01-08T23:50:47.000Z
|
2021-01-05T20:19:15.000Z
|
whats_fresh/urls.py
|
osu-cass/whats-fresh-api
|
0ace76c3d7d423e95d5e3b3c7cd0f74abcf975bd
|
[
"Apache-2.0"
] | 8 |
2015-03-07T23:52:30.000Z
|
2015-12-25T04:25:23.000Z
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
(r'^', include('whats_fresh.whats_fresh_api.urls')),
)
if settings.DEBUG:
# Allow statics to be served if in debug mode
urlpatterns += patterns(
'',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}))
| 26.894737 | 64 | 0.65362 |
8ee77f8af7f4f737fcd8d5dd5c2a5abc41f6105e
| 1,161 |
py
|
Python
|
final_project/machinetranslation/translator.py
|
Giulio987/xzceb-flask_eng_fr
|
704386931e032c6c149eaed3b39b874395d25618
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/translator.py
|
Giulio987/xzceb-flask_eng_fr
|
704386931e032c6c149eaed3b39b874395d25618
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/translator.py
|
Giulio987/xzceb-flask_eng_fr
|
704386931e032c6c149eaed3b39b874395d25618
|
[
"Apache-2.0"
] | null | null | null |
#import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(english_text):
""" Function to convert a string from english to french"""
french_text = ''
if len(english_text) > 0:
translation = language_translator.translate(
text=english_text,
model_id='en-fr').get_result()
french_text = translation['translations'][0]['translation']
return french_text
def french_to_english(french_text):
""" Function to convert a string from french to english"""
english_text = ''
if len(french_text) > 0:
translation = language_translator.translate(text=french_text,model_id="fr-en").get_result()
print(translation)
english_text = translation['translations'][0]['translation']
return english_text
| 29.769231 | 99 | 0.728682 |
4297231d77d1dd0fc298fe508723210a3c32aa7b
| 4,734 |
py
|
Python
|
CSScoring.py
|
E10-10/Product-Clustering
|
39dd49c3dbc25d5ae4a8c7ba523a603ad7a7274d
|
[
"MIT"
] | null | null | null |
CSScoring.py
|
E10-10/Product-Clustering
|
39dd49c3dbc25d5ae4a8c7ba523a603ad7a7274d
|
[
"MIT"
] | null | null | null |
CSScoring.py
|
E10-10/Product-Clustering
|
39dd49c3dbc25d5ae4a8c7ba523a603ad7a7274d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: <encoding name> -*-
"""
CSScoring.py: Collection of tools for cgn-data-21-1
Capstone Project: Product Clustering
Functions:
f_score_i(cl_real_i, cl_pred_i):
return: 2*len(s_intsec) / (len(s_pred)+len(s_real))
recall_i(cl_real_i, cl_pred_i):
return: (len(s_real) - len(s_diff_r_p)) / len(s_real)
precision_i(cl_real_i, cl_pred_i)
return: (len(s_pred) - len(s_diff_p_r)) / len(s_pred)
# Define a function that return all images which actually belong to the cluster of a certain image i
# The result is returned as a list containing strings
get_sim_all_pi(i_vec_1,i_vec_all):
return: i_vec_all.dot(i_vec_1)
get_sim_two_pi(i_vec_1,i_vec_2):
return: df_red_list
pred_cluster_of_i_w2v(i,threshold,df,labels,posting_id)
return: ls
"""
__author__ = "Elias Büchner / Niels-Christian Leight"
__license__ = "GPL"
__version__ = "0.1"
__status__ = "Development"
# import modules
import os
import pickle
import numpy as np
import pandas as pd
def f_score_i(cl_real_i, cl_pred_i):
'''
Description:
Calculate f-score for a single posting_id
f1-score is the mean of all f-scores
Parameters:
argument1 (list): list of posting_id's belonging to the real cluster
argument2 (list): list of posting_id's belonging to the predicted cluster
Returns:
float value of f-score
'''
s_pred = set(cl_pred_i)
s_real = set(cl_real_i)
s_intsec = s_pred.intersection(s_real)
return 2*len(s_intsec) / (len(s_pred)+len(s_real))
def recall_i(cl_real_i, cl_pred_i):
'''
Description:
Calculate recall for a single posting_id
Parameters:
argument1 (list): list of posting_id's belonging to the real cluster
argument2 (list): list of posting_id's belonging to the predicted cluster
Returns:
float value of recall
'''
s_pred = set(cl_pred_i)
s_real = set(cl_real_i)
s_diff_r_p = s_real.difference(s_pred)
return (len(s_real) - len(s_diff_r_p)) / len(s_real)
def precision_i(cl_real_i, cl_pred_i):
'''
Description:
Calculate precision for a single posting_id
Parameters:
argument1 (list): list of posting_id's belonging to the real cluster
argument2 (list): list of posting_id's belonging to the predicted cluster
Returns:
float value of precision
'''
s_pred = set(cl_pred_i)
s_real = set(cl_real_i)
s_diff_p_r = s_pred.difference(s_real)
return (len(s_pred) - len(s_diff_p_r)) / len(s_pred)
# Define a function that return all images which actually belong to the cluster of a certain image i
# The result is returned as a list containing strings
def get_sim_all_pi(i_vec_1,i_vec_all):
return i_vec_all.dot(i_vec_1)
def get_sim_two_pi(i_vec_1,i_vec_2):
sim = np.dot(i_vec_1,i_vec_2)/(np.linalg.norm(i_vec_1)*np.linalg.norm(i_vec_2))
return sim
# x,y indicate the position of the two images in our DataFrame
def dist2(x,y,label_vec):
a = label_vec[x]
b = label_vec[y]
# (Euclidean Metric)
dist = np.sqrt(sum([(a[i] - b[i])**2 for i in range(label_vec.shape[1])]))
# (Manhattan-Metric)
#dist = sum([abs((a[i] - b[i])) for i in range(label_vec.shape[1])])
return dist
def real_cluster_of_i_w2v(i,df):
'''
Description:
Find real cluster for a single posting_id
Use this function when working with Word2Vec
Parameters:
argument1 (int): position of posting_id in DataFrame
Returns:
list of all posting_id's
'''
l_g = (df.iloc[i].at['label_group'])
df_red = df[df['label_group'] == l_g]
df_red_list = df_red['posting_id'].tolist()
return df_red_list
def pred_cluster_of_i_w2v(i,threshold,df,labels,posting_id):
'''
Description:
Find predicted cluster for a single posting_id
Use this function when working with Word2Vec
Parameters:
argument1 (int): position of posting_id in DataFrame
Returns:
list of all posting_id's
'''
list1 = []
list2 = []
list3 = []
for j in range(34250):
i_vec_1 = df['word_vec'][j]
i_vec_2 = df['word_vec'][i]
list1.append(round(get_sim_two_pi(i_vec_1, i_vec_2),4))
list2.append(labels[j])
list3.append(posting_id[j])
df_nlp = pd.DataFrame(data = [list1,list2,list3]).transpose()
df_nlp = df_nlp.sort_values(by = 0)
df_nlp = df_nlp[df_nlp[0] >= threshold]
ls = df_nlp[2].tolist()
return ls
# EOF
| 30.346154 | 105 | 0.644487 |
633b8aeaafc9bbbe503811ad6a5059abbadeebd8
| 82 |
py
|
Python
|
Simulation/GUI/src/gui/program /program_frame.py
|
fontysrobotics/ARMinor-2020-Maniputaltor-with-FOC-and-FPGA
|
7a130fc62d0d5b22b585f2a7481636ae79e36b6d
|
[
"BSD-3-Clause"
] | 5 |
2020-08-30T14:20:54.000Z
|
2022-03-25T07:29:59.000Z
|
Simulation/GUI/src/gui/program /program_frame.py
|
fontysrobotics/ARMinor-2020-Maniputaltor-with-FOC-and-FPGA
|
7a130fc62d0d5b22b585f2a7481636ae79e36b6d
|
[
"BSD-3-Clause"
] | 3 |
2021-06-08T21:59:37.000Z
|
2022-01-13T02:59:57.000Z
|
Simulation/GUI/src/gui/program /program_frame.py
|
fontysrobotics/ARMinor-2020-Maniputaltor-with-FOC-and-FPGA
|
7a130fc62d0d5b22b585f2a7481636ae79e36b6d
|
[
"BSD-3-Clause"
] | 3 |
2020-07-03T10:23:40.000Z
|
2020-11-11T03:31:07.000Z
|
# frame for the GUI related to programming the robot arm
# todo programming frame
| 27.333333 | 56 | 0.792683 |
eb3953658284222e3b47a25083479627c5fe3102
| 1,021 |
py
|
Python
|
tensorflow/python/compiler/xla/__init__.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848 |
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/compiler/xla/__init__.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056 |
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/compiler/xla/__init__.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 506 |
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for controlling the Tensorflow/XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.compiler.xla import jit
from tensorflow.python.compiler.xla import xla
# pylint: enable=unused-import
| 40.84 | 80 | 0.72478 |
3bff6b32e6075c00c1ddc81d07120e02fc09fb8a
| 29,476 |
py
|
Python
|
python/dgl/frame.py
|
shouhengtuo/dgl
|
43c0a5471ed26887f265c9308d37e32b29857e27
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/frame.py
|
shouhengtuo/dgl
|
43c0a5471ed26887f265c9308d37e32b29857e27
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/frame.py
|
shouhengtuo/dgl
|
43c0a5471ed26887f265c9308d37e32b29857e27
|
[
"Apache-2.0"
] | 1 |
2020-09-07T02:11:42.000Z
|
2020-09-07T02:11:42.000Z
|
"""Columnar storage for DGLGraph."""
from __future__ import absolute_import
from collections import namedtuple
from collections.abc import MutableMapping
import numpy as np
from . import backend as F
from .base import DGLError, dgl_warning
from .init import zero_initializer
from . import utils
class Scheme(namedtuple('Scheme', ['shape', 'dtype'])):
"""The column scheme.
Parameters
----------
shape : tuple of int
The feature shape.
dtype : backend-specific type object
The feature data type.
"""
# Pickling torch dtypes could be problemetic; this is a workaround.
# I also have to create data_type_dict and reverse_data_type_dict
# attribute just for this bug.
# I raised an issue in PyTorch bug tracker:
# https://github.com/pytorch/pytorch/issues/14057
def __reduce__(self):
state = (self.shape, F.reverse_data_type_dict[self.dtype])
return self._reconstruct_scheme, state
@classmethod
def _reconstruct_scheme(cls, shape, dtype_str):
dtype = F.data_type_dict[dtype_str]
return cls(shape, dtype)
def infer_scheme(tensor):
"""Infer column scheme from the given tensor data.
Parameters
---------
tensor : Tensor
The tensor data.
Returns
-------
Scheme
The column scheme.
"""
return Scheme(tuple(F.shape(tensor)[1:]), F.dtype(tensor))
class Column(object):
"""A column is a compact store of features of multiple nodes/edges.
Currently, we use one dense tensor to batch all the feature tensors
together (along the first dimension).
Parameters
----------
data : Tensor
The initial data of the column.
scheme : Scheme, optional
The scheme of the column. Will be inferred if not provided.
"""
def __init__(self, data, scheme=None):
self.data = data
self.scheme = scheme if scheme else infer_scheme(data)
def __len__(self):
"""The column length."""
return F.shape(self.data)[0]
@property
def shape(self):
"""Return the scheme shape (feature shape) of this column."""
return self.scheme.shape
def __getitem__(self, idx):
"""Return the feature data given the index.
Parameters
----------
idx : utils.Index
The index.
Returns
-------
Tensor
The feature data
"""
if idx.slice_data() is not None:
slc = idx.slice_data()
return F.narrow_row(self.data, slc.start, slc.stop)
else:
user_idx = idx.tousertensor(F.context(self.data))
return F.gather_row(self.data, user_idx)
def __setitem__(self, idx, feats):
"""Update the feature data given the index.
The update is performed out-placely so it can be used in autograd mode.
For inplace write, please use ``update``.
Parameters
----------
idx : utils.Index or slice
The index.
feats : Tensor
The new features.
"""
self.update(idx, feats, inplace=False)
def update(self, idx, feats, inplace):
"""Update the feature data given the index.
Parameters
----------
idx : utils.Index
The index.
feats : Tensor
The new features.
inplace : bool
If true, use inplace write.
"""
feat_scheme = infer_scheme(feats)
if feat_scheme != self.scheme:
raise DGLError("Cannot update column of scheme %s using feature of scheme %s."
% (feat_scheme, self.scheme))
if inplace:
idx = idx.tousertensor(F.context(self.data))
F.scatter_row_inplace(self.data, idx, feats)
elif idx.slice_data() is not None:
# for contiguous indices narrow+concat is usually faster than scatter row
slc = idx.slice_data()
parts = [feats]
if slc.start > 0:
parts.insert(0, F.narrow_row(self.data, 0, slc.start))
if slc.stop < len(self):
parts.append(F.narrow_row(self.data, slc.stop, len(self)))
self.data = F.cat(parts, dim=0)
else:
idx = idx.tousertensor(F.context(self.data))
self.data = F.scatter_row(self.data, idx, feats)
def extend(self, feats, feat_scheme=None):
"""Extend the feature data.
Parameters
----------
feats : Tensor
The new features.
feat_scheme : Scheme, optional
The scheme
"""
if feat_scheme is None:
feat_scheme = infer_scheme(feats)
if feat_scheme != self.scheme:
raise DGLError("Cannot update column of scheme %s using feature of scheme %s."
% (feat_scheme, self.scheme))
feats = F.copy_to(feats, F.context(self.data))
self.data = F.cat([self.data, feats], dim=0)
@staticmethod
def create(data):
"""Create a new column using the given data."""
if isinstance(data, Column):
return Column(data.data, data.scheme)
else:
return Column(data)
class Frame(MutableMapping):
"""The columnar storage for node/edge features.
The frame is a dictionary from feature fields to feature columns.
All columns should have the same number of rows (i.e. the same first dimension).
Parameters
----------
data : dict-like, optional
The frame data in dictionary. If the provided data is another frame,
this frame will NOT share columns with the given frame. So any out-place
update on one will not reflect to the other. The inplace update will
be seen by both. This follows the semantic of python's container.
num_rows : int, optional [default=0]
The number of rows in this frame. If ``data`` is provided, ``num_rows``
will be ignored and inferred from the given data.
"""
def __init__(self, data=None, num_rows=0):
if data is None:
self._columns = dict()
self._num_rows = num_rows
else:
# Note that we always create a new column for the given data.
# This avoids two frames accidentally sharing the same column.
self._columns = {k : Column.create(v) for k, v in data.items()}
if len(self._columns) != 0:
self._num_rows = len(next(iter(self._columns.values())))
else:
self._num_rows = 0
# sanity check
for name, col in self._columns.items():
if len(col) != self._num_rows:
raise DGLError('Expected all columns to have same # rows (%d), '
'got %d on %r.' % (self._num_rows, len(col), name))
# Initializer for empty values. Initializer is a callable.
# If is none, then a warning will be raised
# in the first call and zero initializer will be used later.
self._initializers = {} # per-column initializers
self._remote_initializer = None
self._default_initializer = None
def _warn_and_set_initializer(self):
dgl_warning('Initializer is not set. Use zero initializer instead.'
' To suppress this warning, use `set_initializer` to'
' explicitly specify which initializer to use.')
self._default_initializer = zero_initializer
def get_initializer(self, column=None):
"""Get the initializer for empty values for the given column.
Parameters
----------
column : str
The column
Returns
-------
callable
The initializer
"""
return self._initializers.get(column, self._default_initializer)
def set_initializer(self, initializer, column=None):
"""Set the initializer for empty values, for a given column or all future
columns.
Initializer is a callable that returns a tensor given the shape and data type.
Parameters
----------
initializer : callable
The initializer.
column : str, optional
The column name
"""
if column is None:
self._default_initializer = initializer
else:
self._initializers[column] = initializer
def set_remote_initializer(self, initializer):
"""Set the remote initializer when a column is added to the frame.
Initializer is a callable that returns a tensor given a local tensor and tensor name.
Parameters
----------
initializer : callable
The initializer.
"""
self._remote_initializer = initializer
@property
def schemes(self):
"""Return a dictionary of column name to column schemes."""
return {k : col.scheme for k, col in self._columns.items()}
@property
def num_columns(self):
"""Return the number of columns in this frame."""
return len(self._columns)
@property
def num_rows(self):
"""Return the number of rows in this frame."""
return self._num_rows
def __contains__(self, name):
"""Return true if the given column name exists."""
return name in self._columns
def __getitem__(self, name):
"""Return the column of the given name.
Parameters
----------
name : str
The column name.
Returns
-------
Column
The column.
"""
return self._columns[name]
def __setitem__(self, name, data):
"""Update the whole column.
Parameters
----------
name : str
The column name.
col : Column or data convertible to Column
The column data.
"""
self.update_column(name, data)
def __delitem__(self, name):
"""Delete the whole column.
Parameters
----------
name : str
The column name.
"""
del self._columns[name]
def add_column(self, name, scheme, ctx):
"""Add a new column to the frame.
The frame will be initialized by the initializer.
Parameters
----------
name : str
The column name.
scheme : Scheme
The column scheme.
ctx : DGLContext
The column context.
"""
if name in self:
dgl_warning('Column "%s" already exists. Ignore adding this column again.' % name)
return
if self.get_initializer(name) is None:
self._warn_and_set_initializer()
initializer = self.get_initializer(name)
init_data = initializer((self.num_rows,) + scheme.shape, scheme.dtype,
ctx, slice(0, self.num_rows))
# If the data is backed by a remote server, we need to move data
# to the remote server.
if self._remote_initializer is not None:
init_data = self._remote_initializer(name, init_data)
self._columns[name] = Column(init_data, scheme)
def add_rows(self, num_rows):
"""Add blank rows to this frame.
For existing fields, the rows will be extended according to their
initializers.
Parameters
----------
num_rows : int
The number of new rows
"""
feat_placeholders = {}
for key, col in self._columns.items():
scheme = col.scheme
ctx = F.context(col.data)
if self.get_initializer(key) is None:
self._warn_and_set_initializer()
initializer = self.get_initializer(key)
new_data = initializer((num_rows,) + scheme.shape, scheme.dtype,
ctx, slice(self._num_rows, self._num_rows + num_rows))
feat_placeholders[key] = new_data
self._append(Frame(feat_placeholders))
self._num_rows += num_rows
def update_column(self, name, data):
"""Add or replace the column with the given name and data.
Parameters
----------
name : str
The column name.
data : Column or data convertible to Column
The column data.
"""
# If the data is backed by a remote server, we need to move data
# to the remote server.
if self._remote_initializer is not None:
data = self._remote_initializer(name, data)
col = Column.create(data)
if len(col) != self.num_rows:
raise DGLError('Expected data to have %d rows, got %d.' %
(self.num_rows, len(col)))
self._columns[name] = col
def _append(self, other):
assert self._remote_initializer is None, \
"We don't support append if data in the frame is mapped from a remote server."
# NOTE: `other` can be empty.
if self.num_rows == 0:
# if no rows in current frame; append is equivalent to
# directly updating columns.
self._columns = {key: Column.create(data) for key, data in other.items()}
else:
# pad columns that are not provided in the other frame with initial values
for key, col in self.items():
if key in other:
continue
scheme = col.scheme
ctx = F.context(col.data)
if self.get_initializer(key) is None:
self._warn_and_set_initializer()
initializer = self.get_initializer(key)
new_data = initializer((other.num_rows,) + scheme.shape,
scheme.dtype, ctx,
slice(self._num_rows, self._num_rows + other.num_rows))
other[key] = new_data
# append other to self
for key, col in other.items():
if key not in self._columns:
# the column does not exist; init a new column
self.add_column(key, col.scheme, F.context(col.data))
self._columns[key].extend(col.data, col.scheme)
def append(self, other):
"""Append another frame's data into this frame.
If the current frame is empty, it will just use the columns of the
given frame. Otherwise, the given data should contain all the
column keys of this frame.
Parameters
----------
other : Frame or dict-like
The frame data to be appended.
"""
if not isinstance(other, Frame):
other = Frame(other)
self._append(other)
self._num_rows += other.num_rows
def clear(self):
"""Clear this frame. Remove all the columns."""
self._columns = {}
self._num_rows = 0
def __iter__(self):
"""Return an iterator of columns."""
return iter(self._columns)
def __len__(self):
"""Return the number of columns."""
return self.num_columns
def keys(self):
"""Return the keys."""
return self._columns.keys()
class FrameRef(MutableMapping):
"""Reference object to a frame on a subset of rows.
Parameters
----------
frame : Frame, optional
The underlying frame. If not given, the reference will point to a
new empty frame.
index : utils.Index, optional
The rows that are referenced in the underlying frame. If not given,
the whole frame is referenced. The index should be distinct (no
duplication is allowed).
"""
def __init__(self, frame=None, index=None):
self._frame = frame if frame is not None else Frame()
# TODO(minjie): check no duplication
assert index is None or isinstance(index, utils.Index)
if index is None:
self._index = utils.toindex(slice(0, self._frame.num_rows))
else:
self._index = index
@property
def schemes(self):
"""Return the frame schemes.
Returns
-------
dict of str to Scheme
The frame schemes.
"""
return self._frame.schemes
@property
def num_columns(self):
"""Return the number of columns in the referred frame."""
return self._frame.num_columns
@property
def num_rows(self):
"""Return the number of rows referred."""
return len(self._index)
def set_initializer(self, initializer, column=None):
"""Set the initializer for empty values.
Initializer is a callable that returns a tensor given the shape and data type.
Parameters
----------
initializer : callable
The initializer.
column : str, optional
The column name
"""
self._frame.set_initializer(initializer, column=column)
def set_remote_initializer(self, initializer):
"""Set the remote initializer when a column is added to the frame.
Initializer is a callable that returns a tensor given a local tensor and tensor name.
Parameters
----------
initializer : callable
The initializer.
"""
self._frame.set_remote_initializer(initializer)
def get_initializer(self, column=None):
"""Get the initializer for empty values for the given column.
Parameters
----------
column : str
The column
Returns
-------
callable
The initializer
"""
return self._frame.get_initializer(column)
def __contains__(self, name):
"""Return whether the column name exists."""
return name in self._frame
def __iter__(self):
"""Return the iterator of the columns."""
return iter(self._frame)
def __len__(self):
"""Return the number of columns."""
return self.num_columns
def keys(self):
"""Return the keys."""
return self._frame.keys()
def __getitem__(self, key):
"""Get data from the frame.
If the provided key is string, the corresponding column data will be returned.
If the provided key is an index or a slice, the corresponding rows will be selected.
The returned rows are saved in a lazy dictionary so only the real selection happens
when the explicit column name is provided.
Examples (using pytorch)
------------------------
>>> # create a frame of two columns and five rows
>>> f = Frame({'c1' : torch.zeros([5, 2]), 'c2' : torch.ones([5, 2])})
>>> fr = FrameRef(f)
>>> # select the row 1 and 2, the returned `rows` is a lazy dictionary.
>>> rows = fr[Index([1, 2])]
>>> rows['c1'] # only select rows for 'c1' column; 'c2' column is not sliced.
Parameters
----------
key : str or utils.Index
The key.
Returns
-------
Tensor or lazy dict or tensors
Depends on whether it is a column selection or row selection.
"""
if not isinstance(key, (str, utils.Index)):
raise DGLError('Argument "key" must be either str or utils.Index type.')
if isinstance(key, str):
return self.select_column(key)
elif key.is_slice(0, self.num_rows):
# shortcut for selecting all the rows
return self
else:
return self.select_rows(key)
def select_column(self, name):
"""Return the column of the given name.
If only part of the rows are referenced, the fetching the whole column will
also slice out the referenced rows.
Parameters
----------
name : str
The column name.
Returns
-------
Tensor
The column data.
"""
col = self._frame[name]
if self.is_span_whole_column():
return col.data
else:
return col[self._index]
def select_rows(self, query):
"""Return the rows given the query.
Parameters
----------
query : utils.Index or slice
The rows to be selected.
Returns
-------
utils.LazyDict
The lazy dictionary from str to the selected data.
"""
rows = self._getrows(query)
return utils.LazyDict(lambda key: self._frame[key][rows], keys=self.keys())
def __setitem__(self, key, val):
"""Update the data in the frame. The update is done out-of-place.
Parameters
----------
key : str or utils.Index
The key.
val : Tensor or dict of tensors
The value.
See Also
--------
update
"""
self.update_data(key, val, inplace=False)
def update_data(self, key, val, inplace):
"""Update the data in the frame.
If the provided key is string, the corresponding column data will be updated.
The provided value should be one tensor that have the same scheme and length
as the column.
If the provided key is an index, the corresponding rows will be updated. The
value provided should be a dictionary of string to the data of each column.
All updates are performed out-placely to be work with autograd. For inplace
update, use ``update_column`` or ``update_rows``.
Parameters
----------
key : str or utils.Index
The key.
val : Tensor or dict of tensors
The value.
inplace: bool
If True, update will be done in place
"""
if not isinstance(key, (str, utils.Index)):
raise DGLError('Argument "key" must be either str or utils.Index type.')
if isinstance(key, str):
self.update_column(key, val, inplace=inplace)
elif key.is_slice(0, self.num_rows):
# shortcut for updating all the rows
for colname, col in val.items():
self.update_column(colname, col, inplace=inplace)
else:
self.update_rows(key, val, inplace=inplace)
def update_column(self, name, data, inplace):
"""Update the column.
If this frameref spans the whole column of the underlying frame, this is
equivalent to update the column of the frame.
If this frameref only points to part of the rows, then update the column
here will correspond to update part of the column in the frame. Raise error
if the given column name does not exist.
Parameters
----------
name : str
The column name.
data : Tensor
The update data.
inplace : bool
True if the update is performed inplacely.
"""
if self.is_span_whole_column():
if self.num_columns == 0:
# the frame is empty
self._index = utils.toindex(slice(0, len(data)))
self._frame[name] = data
else:
if name not in self._frame:
ctx = F.context(data)
self._frame.add_column(name, infer_scheme(data), ctx)
fcol = self._frame[name]
fcol.update(self._index, data, inplace)
def add_rows(self, num_rows):
"""Add blank rows to the underlying frame.
For existing fields, the rows will be extended according to their
initializers.
Note: only available for FrameRef that spans the whole column. The row
span will extend to new rows. Other FrameRefs referencing the same
frame will not be affected.
Parameters
----------
num_rows : int
Number of rows to add
"""
if not self.is_span_whole_column():
raise RuntimeError('FrameRef not spanning whole column.')
self._frame.add_rows(num_rows)
if self._index.slice_data() is not None:
# the index is a slice
slc = self._index.slice_data()
self._index = utils.toindex(slice(slc.start, slc.stop + num_rows))
else:
selfidxdata = self._index.tousertensor()
newdata = F.arange(self.num_rows, self.num_rows + num_rows)
self._index = utils.toindex(F.cat([selfidxdata, newdata], dim=0))
def update_rows(self, query, data, inplace):
"""Update the rows.
If the provided data has new column, it will be added to the frame.
See Also
--------
``update_column``
Parameters
----------
query : utils.Index or slice
The rows to be updated.
data : dict-like
The row data.
inplace : bool
True if the update is performed inplace.
"""
rows = self._getrows(query)
for key, col in data.items():
if key not in self:
# add new column
tmpref = FrameRef(self._frame, rows)
tmpref.update_column(key, col, inplace)
else:
self._frame[key].update(rows, col, inplace)
def __delitem__(self, key):
"""Delete data in the frame.
If the provided key is a string, the corresponding column will be deleted.
If the provided key is an index object or a slice, the corresponding rows will
be deleted.
Please note that "deleted" rows are not really deleted, but simply removed
in the reference. As a result, if two FrameRefs point to the same Frame, deleting
from one ref will not reflect on the other. However, deleting columns is real.
Parameters
----------
key : str or utils.Index
The key.
"""
if not isinstance(key, (str, utils.Index)):
raise DGLError('Argument "key" must be either str or utils.Index type.')
if isinstance(key, str):
del self._frame[key]
else:
self.delete_rows(key)
def delete_rows(self, query):
"""Delete rows.
Please note that "deleted" rows are not really deleted, but simply removed
in the reference. As a result, if two FrameRefs point to the same Frame, deleting
from one ref will not reflect on the other. By contrast, deleting columns is real.
Parameters
----------
query : utils.Index
The rows to be deleted.
"""
query = query.tonumpy()
index = self._index.tonumpy()
self._index = utils.toindex(np.delete(index, query))
def append(self, other):
"""Append another frame into this one.
Parameters
----------
other : dict of str to tensor
The data to be appended.
"""
old_nrows = self._frame.num_rows
self._frame.append(other)
new_nrows = self._frame.num_rows
# update index
if (self._index.slice_data() is not None
and self._index.slice_data().stop == old_nrows):
# Self index is a slice and index.stop is equal to the size of the
# underlying frame. Can still use a slice for the new index.
oldstart = self._index.slice_data().start
self._index = utils.toindex(slice(oldstart, new_nrows))
else:
# convert it to user tensor and concat
selfidxdata = self._index.tousertensor()
newdata = F.arange(old_nrows, new_nrows)
self._index = utils.toindex(F.cat([selfidxdata, newdata], dim=0))
def clear(self):
"""Clear the frame."""
self._frame.clear()
self._index = utils.toindex(slice(0, 0))
def is_contiguous(self):
"""Return whether this refers to a contiguous range of rows."""
# NOTE: this check could have false negatives
return self._index.slice_data() is not None
def is_span_whole_column(self):
"""Return whether this refers to all the rows."""
return self.is_contiguous() and self.num_rows == self._frame.num_rows
def _getrows(self, query):
"""Internal function to convert from the local row ids to the row ids of the frame.
Parameters
----------
query : utils.Index
The query index.
Returns
-------
utils.Index
The actual index to the underlying frame.
"""
return self._index.get_items(query)
def frame_like(other, num_rows):
"""Create a new frame that has the same scheme as the given one.
Parameters
----------
other : Frame
The given frame.
num_rows : int
The number of rows of the new one.
Returns
-------
Frame
The new frame.
"""
# TODO(minjie): scheme is not inherited at the moment. Fix this
# when moving per-col initializer to column scheme.
newf = Frame(num_rows=num_rows)
# set global initializr
if other.get_initializer() is None:
other._warn_and_set_initializer()
newf._default_initializer = other._default_initializer
# set per-col initializer
# TODO(minjie): hack; cannot rely on keys as the _initializers
# now supports non-exist columns.
newf._initializers = other._initializers
return newf
| 33.268623 | 94 | 0.57976 |
a301cdf056c2d2ec71e99d860985b637d5fca24c
| 1,086 |
py
|
Python
|
ctypes_generation/definitions/ntstatus_template.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 479 |
2016-01-08T00:53:34.000Z
|
2022-03-22T10:28:19.000Z
|
ctypes_generation/definitions/ntstatus_template.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 38 |
2017-12-29T17:09:04.000Z
|
2022-01-31T08:27:47.000Z
|
ctypes_generation/definitions/ntstatus_template.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 103 |
2016-01-10T01:32:17.000Z
|
2021-12-24T17:21:06.000Z
|
import sys
import ctypes
from .flag import Flag
is_py3 = (sys.version_info.major >= 3)
class NtStatusException(WindowsError):
ALL_STATUS = {}
def __init__(self , code):
try:
x = self.ALL_STATUS[code]
except KeyError:
x = (code, 'UNKNOW_ERROR', 'Error non documented in ntstatus.py')
self.code = x[0]
self.name = x[1]
self.descr = x[2]
code_as_long = ctypes.c_long(x[0]).value
if is_py3:
vals = code_as_long, x[1], x[2], code_as_long
else:
vals = code_as_long, x[1], x[2]
return super(NtStatusException, self).__init__(*vals)
def __str__(self):
return "{e.name}(0x{e.code:x}): {e.descr}".format(e=self)
def __repr__(self):
return "{0}(0x{1:08x}, {2})".format(type(self).__name__, self.code, self.name)
@classmethod
def register_ntstatus(cls, code, name, descr):
if code in cls.ALL_STATUS:
return # Use the first def
cls.ALL_STATUS[code] = (code, name, descr)
return Flag(name, code)
| 29.351351 | 86 | 0.584715 |
9dae7f2a7ecc825f61acbbcc35f63a18dc8f09fc
| 54,388 |
py
|
Python
|
rapid7vmconsole/models/policy_override_submitter.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
rapid7vmconsole/models/policy_override_submitter.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
rapid7vmconsole/models/policy_override_submitter.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from rapid7vmconsole.models.link import Link # noqa: F401,E501
class PolicyOverrideSubmitter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comment': 'str',
'date': 'str',
'links': 'list[Link]',
'name': 'str',
'user': 'int'
}
attribute_map = {
'comment': 'comment',
'date': 'date',
'links': 'links',
'name': 'name',
'user': 'user'
}
def __init__(self, comment=None, date=None, links=None, name=None, user=None): # noqa: E501
"""PolicyOverrideSubmitter - a model defined in Swagger""" # noqa: E501
self._comment = None
self._date = None
self._links = None
self._name = None
self._user = None
self.discriminator = None
self.comment = comment
if date is not None:
self.date = date
if links is not None:
self.links = links
if name is not None:
self.name = name
if user is not None:
self.user = user
@property
def comment(self):
"""Gets the comment of this PolicyOverrideSubmitter. # noqa: E501
A comment from the submitter as to why the policy override was submitted. Cannot exceed 1024 characters. # noqa: E501
:return: The comment of this PolicyOverrideSubmitter. # noqa: E501
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this PolicyOverrideSubmitter.
A comment from the submitter as to why the policy override was submitted. Cannot exceed 1024 characters. # noqa: E501
:param comment: The comment of this PolicyOverrideSubmitter. # noqa: E501
:type: str
"""
if comment is None:
raise ValueError("Invalid value for `comment`, must not be `None`") # noqa: E501
self._comment = comment
@property
def date(self):
"""Gets the date of this PolicyOverrideSubmitter. # noqa: E501
The date the policy override was submitted. # noqa: E501
:return: The date of this PolicyOverrideSubmitter. # noqa: E501
:rtype: str
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this PolicyOverrideSubmitter.
The date the policy override was submitted. # noqa: E501
:param date: The date of this PolicyOverrideSubmitter. # noqa: E501
:type: str
"""
self._date = date
@property
def links(self):
"""Gets the links of this PolicyOverrideSubmitter. # noqa: E501
:return: The links of this PolicyOverrideSubmitter. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this PolicyOverrideSubmitter.
:param links: The links of this PolicyOverrideSubmitter. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def name(self):
"""Gets the name of this PolicyOverrideSubmitter. # noqa: E501
The login name of the user that submitted the policy override. # noqa: E501
:return: The name of this PolicyOverrideSubmitter. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PolicyOverrideSubmitter.
The login name of the user that submitted the policy override. # noqa: E501
:param name: The name of this PolicyOverrideSubmitter. # noqa: E501
:type: str
"""
self._name = name
@property
def user(self):
"""Gets the user of this PolicyOverrideSubmitter. # noqa: E501
The identifier of the user that submitted the policy override. # noqa: E501
:return: The user of this PolicyOverrideSubmitter. # noqa: E501
:rtype: int
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this PolicyOverrideSubmitter.
The identifier of the user that submitted the policy override. # noqa: E501
:param user: The user of this PolicyOverrideSubmitter. # noqa: E501
:type: int
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyOverrideSubmitter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 238.54386 | 48,045 | 0.49726 |
7fb15a6d4da97b9b8236b688ebeb958245985b2d
| 6,817 |
py
|
Python
|
salt/utils/openstack/swift.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | 2 |
2017-09-17T21:10:35.000Z
|
2019-08-26T03:00:12.000Z
|
salt/utils/openstack/swift.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/utils/openstack/swift.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | 3 |
2021-02-23T08:12:48.000Z
|
2021-02-23T08:13:13.000Z
|
# -*- coding: utf-8 -*-
'''
Swift utility class
===================
Author: Anthony Stanton <[email protected]>
'''
# Import python libs
import logging
from sys import stdout
from os import makedirs
from os.path import dirname, isdir
from errno import EEXIST
# Get logging started
log = logging.getLogger(__name__)
# Import Swift client libs
HAS_SWIFT = False
try:
from swiftclient import client
HAS_SWIFT = True
except ImportError:
pass
def check_swift():
return HAS_SWIFT
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
# we've been playing fast and loose with kwargs, but the swiftclient isn't
# going to accept any old thing
def _sanitize(kwargs):
variables = (
'user', 'key', 'authurl',
'retries', 'preauthurl', 'preauthtoken', 'snet',
'starting_backoff', 'max_backoff', 'tenant_name',
'os_options', 'auth_version', 'cacert',
'insecure', 'ssl_compression'
)
ret = {}
for var in kwargs.keys():
if var in variables:
ret[var] = kwargs[var]
return ret
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if not 'key' in self.kwargs.keys():
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = open(local_file, 'wb')
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
fp = open(local_file, 'rb')
self.conn.put_object(cont, obj, fp)
fp.close()
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
| 29.008511 | 111 | 0.526918 |
c6465f75c629db4b76d78d7095b3f33c40283f7b
| 185 |
py
|
Python
|
run.py
|
galactaknife/webchat
|
eff610365128989ff2d58a6d75a51d735b16dd98
|
[
"Apache-2.0"
] | 2 |
2021-03-13T15:27:19.000Z
|
2021-03-13T15:27:21.000Z
|
run.py
|
galactaknife/webchat
|
eff610365128989ff2d58a6d75a51d735b16dd98
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
galactaknife/webchat
|
eff610365128989ff2d58a6d75a51d735b16dd98
|
[
"Apache-2.0"
] | null | null | null |
from __init__ import create_app, socketio
# Get app instance
app = create_app()
# Run app through socketio
if __name__ == "__main__":
socketio.run(app, host="0.0.0.0", port=5000)
| 20.555556 | 48 | 0.713514 |
227cd6fd3f5cfff5b9275faaa6f8257c7f0a19e6
| 400 |
py
|
Python
|
main.py
|
chucoding/Notion2Github
|
820aace4e6f52a42adf2587f5c77ef768c4e1586
|
[
"MIT"
] | null | null | null |
main.py
|
chucoding/Notion2Github
|
820aace4e6f52a42adf2587f5c77ef768c4e1586
|
[
"MIT"
] | null | null | null |
main.py
|
chucoding/Notion2Github
|
820aace4e6f52a42adf2587f5c77ef768c4e1586
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from fastapi.logger import logger
from api import export_svg
app = FastAPI()
@app.get('/')
def hello_world():
logger.debug('hello_world')
return 'Hello World!'
@app.get('/calendar')
def show_calendar():
return export_svg.write()
if __name__ == '__main__':
import uvicorn
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True, debug=True)
| 21.052632 | 79 | 0.695 |
98074a197618f76796753cf2d19fceba91c9c32c
| 64,669 |
py
|
Python
|
GenEO/precond.py
|
gouarin/DDM_elasticity
|
d72b284fcc9d98b437d580f1b2a94b99250fa6e5
|
[
"BSD-3-Clause"
] | 9 |
2018-05-29T10:53:43.000Z
|
2021-10-11T02:30:59.000Z
|
GenEO/precond.py
|
gouarin/DDM_elasticity
|
d72b284fcc9d98b437d580f1b2a94b99250fa6e5
|
[
"BSD-3-Clause"
] | null | null | null |
GenEO/precond.py
|
gouarin/DDM_elasticity
|
d72b284fcc9d98b437d580f1b2a94b99250fa6e5
|
[
"BSD-3-Clause"
] | 1 |
2019-12-31T16:14:48.000Z
|
2019-12-31T16:14:48.000Z
|
# Authors:
# Loic Gouarin <[email protected]>
# Nicole Spillane <[email protected]>
#
# License: BSD 3 clause
from .assembling import buildElasticityMatrix
from .bc import bcApplyWestMat, bcApplyWest_vec
from .cg import cg
from .projection import projection, GenEO_V0, minimal_V0, coarse_operators
from petsc4py import PETSc
from slepc4py import SLEPc
import mpi4py.MPI as mpi
import numpy as np
import scipy as sp
import os
class PCBNN(object): #Neumann-Neumann and Additive Schwarz with no overlap
def __init__(self, A_IS):
"""
Initialize the domain decomposition preconditioner, multipreconditioner and coarse space with its operators
Parameters
==========
A_IS : petsc.Mat
The matrix of the problem in IS format. A must be a symmetric positive definite matrix
with symmetric positive semi-definite submatrices
PETSc.Options
=============
PCBNN_switchtoASM :Bool
Default is False
If True then the domain decomposition preconditioner is the BNN preconditioner. If false then the domain
decomposition precondition is the Additive Schwarz preconditioner with minimal overlap.
PCBNN_kscaling : Bool
Default is True.
If true then kscaling (partition of unity that is proportional to the diagonal of the submatrices of A)
is used when a partition of unity is required. Otherwise multiplicity scaling is used when a partition
of unity is required. This may occur in two occasions:
- to scale the local BNN matrices if PCBNN_switchtoASM=True,
- in the GenEO eigenvalue problem for eigmin if PCBNN_switchtoASM=False and PCBNN_GenEO=True with
PCBNN_GenEO_eigmin > 0 (see projection.__init__ for the meaning of these options).
PCBNN_verbose : Bool
If True, some information about the preconditioners is printed when the code is executed.
PCBNN_GenEO : Bool
Default is False.
If True then the coarse space is enriched by solving local generalized eigenvalue problems.
PCBNN_CoarseProjection : Bool
Default is True.
If False then there is no coarse projection: Two level Additive Schwarz or One-level preconditioner depending on PCBNN_addCoarseSolve.
If True, the coarse projection is applied: Projected preconditioner of hybrid preconditioner depending on PCBNN_addCoarseSolve.
PCBNN_addCoarseSolve : Bool
Default is True.
If True then (R0t A0\R0 r) is added to the preconditioned residual.
False corresponds to the projected preconditioner (need to choose initial guess accordingly) (or the one level preconditioner if PCBNN_CoarseProjection = False).
True corresponds to the hybrid preconditioner (or the fully additive preconditioner if PCBNN_CoarseProjection = False).
"""
OptDB = PETSc.Options()
self.switchtoASM = OptDB.getBool('PCBNN_switchtoASM', False) #use Additive Schwarz as a preconditioner instead of BNN
self.kscaling = OptDB.getBool('PCBNN_kscaling', True) #kscaling if true, multiplicity scaling if false
self.verbose = OptDB.getBool('PCBNN_verbose', False)
self.GenEO = OptDB.getBool('PCBNN_GenEO', True)
self.addCS = OptDB.getBool('PCBNN_addCoarseSolve', True)
self.projCS = OptDB.getBool('PCBNN_CoarseProjection', True)
self.viewPC = OptDB.getBool('PCBNN_view', True)
self.viewV0 = OptDB.getBool('PCBNN_viewV0', False)
self.viewGenEOV0 = OptDB.getBool('PCBNN_viewGenEO', False)
self.viewminV0 = OptDB.getBool('PCBNN_viewminV0', False)
self.test_case = OptDB.getString('test_case', 'default')
#extract Neumann matrix from A in IS format
Ms = A_IS.copy().getISLocalMat()
# convert A_IS from matis to mpiaij
A_mpiaij = A_IS.convert('mpiaij')
r, _ = A_mpiaij.getLGMap() #r, _ = A_IS.getLGMap()
is_A = PETSc.IS().createGeneral(r.indices)
# extract exact local solver
As = A_mpiaij.createSubMatrices(is_A)[0]
vglobal, _ = A_mpiaij.getVecs()
vlocal, _ = Ms.getVecs()
scatter_l2g = PETSc.Scatter().create(vlocal, None, vglobal, is_A)
#compute the multiplicity of each degree
vlocal.set(1.)
vglobal.set(0.)
scatter_l2g(vlocal, vglobal, PETSc.InsertMode.ADD_VALUES)
scatter_l2g(vglobal, vlocal, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
NULL,mult_max = vglobal.max()
if self.viewPC:
_, self.ns = vlocal.getSizes()
_, self.nglob = vglobal.getSizes()
tempglobal = vglobal.getArray(readonly=True)
templocal = vlocal.getArray(readonly=True)
self.nints = np.count_nonzero(tempglobal == 1) #interor dofs in this subdomain
self.nGammas = np.count_nonzero(templocal -1) #interor dofs in this subdomain
# k-scaling or multiplicity scaling of the local (non-assembled) matrix
if self.kscaling == False:
Ms.diagonalScale(vlocal,vlocal)
else:
v1 = As.getDiagonal()
v2 = Ms.getDiagonal()
Ms.diagonalScale(v1/v2, v1/v2)
# the default local solver is the scaled non assembled local matrix (as in BNN)
if self.switchtoASM:
Atildes = As
if mpi.COMM_WORLD.rank == 0:
print('The user has chosen to switch to Additive Schwarz instead of BNN.')
else: #(default)
Atildes = Ms
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('cholesky')
pc_Atildes.setFactorSolverType('mumps')
ksp_Atildes.setFromOptions()
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('cholesky')
pc_Atildes_forSLEPc.setFactorSolverType('mumps')
ksp_Atildes_forSLEPc.setFromOptions()
self.A = A_mpiaij
self.Ms = Ms
self.As = As
self.ksp_Atildes = ksp_Atildes
self.ksp_Atildes_forSLEPc = ksp_Atildes_forSLEPc
self.work = vglobal.copy()
self.works_1 = vlocal.copy()
self.works_2 = self.works_1.copy()
self.scatter_l2g = scatter_l2g
self.mult_max = mult_max
self.minV0 = minimal_V0(self.ksp_Atildes)
if self.viewminV0 == True:
self.minV0.view()
if self.GenEO == True:
self.GenEOV0 = GenEO_V0(self.ksp_Atildes_forSLEPc,self.Ms,self.As,self.mult_max,self.minV0.V0s, self.minV0.labs)
self.V0s = self.GenEOV0.V0s
self.labs = self.GenEOV0.labs
if self.viewGenEOV0 == True:
self.GenEOV0.view()
else:
self.V0s = self.minV0.V0s
self.labs = self.minV0.labs
self.proj = coarse_operators(self.V0s,self.A,self.scatter_l2g,vlocal,self.work)
#TODO implement better the case where no coarse projection is performed by not computing V0 at all
if self.addCS == False and self.projCS == False: #no coarse operation so set the size of V0 to zero
self.GenEO = False
self.minV0.nrb = 0
self.minV0.labs = []
self.minV0.mumpsCntl3= []
self.proj.gathered_dimV0s= mpi.COMM_WORLD.gather(self.minV0.nrb, root=0)
if self.viewV0 == True:
self.proj.view()
if self.viewPC == True:
self.view()
def mult(self, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
########################
########################
xd = x.copy()
if self.projCS == True:
self.proj.project_transpose(xd)
self.scatter_l2g(xd, self.works_1, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.ksp_Atildes.solve(self.works_1, self.works_2)
y.set(0.)
self.scatter_l2g(self.works_2, y, PETSc.InsertMode.ADD_VALUES)
if self.projCS == True:
self.proj.project(y)
if self.addCS == True:
xd = x.copy()
ytild = self.proj.coarse_init(xd) # I could save a coarse solve by combining this line with project_transpose
y += ytild
def MP_mult(self, x, y):
"""
Applies the domain decomposition multipreconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : FIX
The list of ndom vectors that stores the result of the multipreconditioning operation (one vector per subdomain).
"""
self.scatter_l2g(x, self.works_1, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.ksp_Atildes.solve(self.works_1, self.works_2)
for i in range(mpi.COMM_WORLD.size):
self.works_1.set(0)
if mpi.COMM_WORLD.rank == i:
self.works_1 = self.works_2.copy()
y[i].set(0.)
self.scatter_l2g(self.works_1, y[i], PETSc.InsertMode.ADD_VALUES)
self.proj.project(y[i])
def apply(self,pc, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
This is just a call to PCBNN.mult with the function name and arguments that allow PCBNN to be passed
as a preconditioner to PETSc.ksp.
Parameters
==========
pc: This argument is not called within the function but it belongs to the standard way of calling a preconditioner.
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
self.mult(x,y)
def view(self):
self.gathered_ns = mpi.COMM_WORLD.gather(self.ns, root=0)
self.gathered_nints = mpi.COMM_WORLD.gather(self.nints, root=0)
self.gathered_Gammas = mpi.COMM_WORLD.gather(self.nGammas, root=0)
self.minV0.gathered_dim = mpi.COMM_WORLD.gather(self.minV0.nrb, root=0)
self.gathered_labs = mpi.COMM_WORLD.gather(self.labs, root=0)
if self.GenEO == True:
self.GenEOV0.gathered_nsharp = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmax, root=0)
self.GenEOV0.gathered_nflat = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmin, root=0)
self.GenEOV0.gathered_dimKerMs = mpi.COMM_WORLD.gather(self.GenEOV0.dimKerMs, root=0)
self.GenEOV0.gathered_Lambdasharp = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmax, root=0)
self.GenEOV0.gathered_Lambdaflat = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmin, root=0)
if mpi.COMM_WORLD.rank == 0:
print('#############################')
print(f'view of PCBNN')
print(f'{self.switchtoASM=}')
print(f'{self.kscaling= }')
print(f'{self.verbose= }')
print(f'{self.GenEO= }')
print(f'{self.addCS= }')
print(f'{self.projCS= }')
print(f'{self.viewPC= }')
print(f'{self.viewV0= }')
print(f'{self.viewGenEOV0= }')
print(f'{self.viewminV0= }')
print(f'{self.mult_max=}')
print(f'### info about the subdomains ###')
self.nint = np.sum(self.gathered_nints)
self.nGamma = self.nglob - self.nint
print(f'{self.gathered_ns =}')
print(f'{self.gathered_nints =}')
print(f'{self.gathered_Gammas=}')
print(f'{self.nGamma=}')
print(f'{self.nint=}')
print(f'{self.nglob=}')
print(f'{self.gathered_labs=}')
print(f'### info about minV0.V0s = (Ker(Atildes)) ###')
print(f'{self.minV0.mumpsCntl3=}')
if (self.ksp_Atildes.pc.getFactorSolverType() == 'mumps'):
print(f'dim(Ker(Atildes)) = {self.minV0.gathered_dim}')
else:
print(f'Ker(Atildes) not computed because pc is not mumps')
if self.GenEO == True:
print(f'### info about GenEOV0.V0s ###')
print(f'{self.GenEOV0.tau_eigmax=}')
print(f'{self.GenEOV0.tau_eigmin=}')
print(f'{self.GenEOV0.eigmax=}')
print(f'{self.GenEOV0.eigmin=}')
print(f'{self.GenEOV0.nev=}')
print(f'{self.GenEOV0.maxev=}')
print(f'{self.GenEOV0.mumpsCntl3=}')
print(f'{self.GenEOV0.verbose=}')
print(f'{self.GenEOV0.gathered_nsharp=}')
print(f'{self.GenEOV0.gathered_nflat=}')
print(f'{self.GenEOV0.gathered_dimKerMs=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdasharp)=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdaflat)=}')
print(f'### info about the coarse space ###')
print(f'{self.proj.V0_is_global=}')
print(f'{self.proj.gathered_dimV0s=}')
if self.GenEO == True:
print(f'global dim V0 = {np.sum(self.proj.gathered_dimV0s)} = ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes)) + ({np.sum(self.GenEOV0.gathered_nsharp)} from GenEO_eigmax) + ({np.sum(self.GenEOV0.gathered_nflat)+np.sum(self.GenEOV0.gathered_dimKerMs)} from GenEO_eigmin)')
else:
print(f'global dim V0 = {np.sum(self.proj.gathered_dimV0s)} = ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes))')
print('#############################')
self.savetofile()
def savetofile(self):
if mpi.COMM_WORLD.rank == 0:
if not os.path.exists(self.test_case):
os.mkdir(self.test_case)
np.savez(f'{self.test_case}/init',
switchtoASM= self.switchtoASM,
kscaling = self.kscaling,
verbose = self.verbose,
GenEO = self.GenEO,
addCS = self.addCS,
projCS = self.projCS,
viewPC = self.viewPC,
viewV0 = self.viewV0,
viewGenEOV0= self.viewGenEOV0,
viewminV0 = self.viewminV0,
mult_max = self.mult_max ,
gathered_ns = np.asarray(self.gathered_ns),
gathered_nints = np.asarray(self.gathered_nints),
gathered_Gammas = np.asarray(self.gathered_Gammas),
nGamma = self.nGamma,
nint = self.nint,
nglob = self.nglob,
minV0_mumpsCntl3 = self.minV0.mumpsCntl3,
V0_is_global= self.proj.V0_is_global,
gathered_dimV0s= np.asarray(self.proj.gathered_dimV0s),
minV0_gathered_dim = np.asarray(self.minV0.gathered_dim),
V0dim = np.sum(self.proj.gathered_dimV0s),
minV0dim = np.sum(self.minV0.gathered_dim),
gathered_labs= np.asarray(self.gathered_labs),
)
if self.GenEO == True:
np.savez(f'{self.test_case}/GenEO',
tau_eigmax = self.GenEOV0.tau_eigmax,
tau_eigmin = self.GenEOV0.tau_eigmin,
eigmax = self.GenEOV0.eigmax,
eigmin = self.GenEOV0.eigmin,
nev = self.GenEOV0.nev,
maxev = self.GenEOV0.maxev,
mumpsCntl3 = self.GenEOV0.mumpsCntl3,
verbose = self.GenEOV0.verbose,
gathered_nsharp = self.GenEOV0.gathered_nsharp,
gathered_nflat = self.GenEOV0.gathered_nflat,
gathered_dimKerMs = self.GenEOV0.gathered_dimKerMs,
gathered_Lambdasharp = np.asarray(self.GenEOV0.gathered_Lambdasharp,dtype='object'),
gathered_Lambdaflat = np.asarray(self.GenEOV0.gathered_Lambdaflat,dtype='object'),
sum_nsharp = np.sum(self.GenEOV0.gathered_nsharp),
sum_nflat = np.sum(self.GenEOV0.gathered_nflat),
sum_dimKerMs = np.sum(self.GenEOV0.gathered_dimKerMs)
)
class PCNew:
def __init__(self, A_IS):
OptDB = PETSc.Options()
self.switchtoASM = OptDB.getBool('PCNew_switchtoASM', False) #use Additive Schwarz as a preconditioner instead of BNN
self.switchtoASMpos = OptDB.getBool('PCNew_switchtoASMpos', False) #use Additive Schwarz as a preconditioner instead of BNN
self.verbose = OptDB.getBool('PCNew_verbose', False)
self.GenEO = OptDB.getBool('PCNew_GenEO', True)
#self.H2addCS = OptDB.getBool('PCNew_H2addCoarseSolve', True)
self.H2projCS = OptDB.getBool('PCNew_H2CoarseProjection', True)
self.H3addCS = OptDB.getBool('PCNew_H3addCoarseSolve', True)
self.H3projCS = OptDB.getBool('PCNew_H3CoarseProjection', True)
self.compute_ritz_apos = OptDB.getBool('PCNew_ComputeRitzApos', False)
self.nev = OptDB.getInt('PCNew_Bs_nev', 20) #number of vectors asked to SLEPc for cmputing negative part of Bs
self.viewPC = OptDB.getBool('PCNew_view', True)
self.viewV0 = OptDB.getBool('PCNew_viewV0', False)
self.viewGenEOV0 = OptDB.getBool('PCNew_viewGenEO', False)
self.viewminV0 = OptDB.getBool('PCNew_viewminV0', False)
self.viewnegV0 = OptDB.getBool('PCNew_viewnegV0', False)
self.test_case = OptDB.getString('test_case', 'default')
self.H2addCS = True #OptDB.getBool('PCNew_H2addCoarseSolve', True) (it is currently not an option to use a projected preconditioner for H2)
# Compute Bs (the symmetric matrix in the algebraic splitting of A)
# TODO: implement without A in IS format
ANeus = A_IS.getISLocalMat() #only the IS is used for the algorithm,
Mu = A_IS.copy()
Mus = Mu.getISLocalMat() #the IS format is used to compute Mu (multiplicity of each pair of dofs)
for i in range(ANeus.getSize()[0]):
col, _ = ANeus.getRow(i)
Mus.setValues([i], col, np.ones_like(col))
Mu.restoreISLocalMat(Mus)
Mu.assemble()
Mu = Mu.convert('mpiaij')
A_mpiaij = A_IS.convert('mpiaij')
B = A_mpiaij.duplicate()
for i in range(*A_mpiaij.getOwnershipRange()):
a_cols, a_values = A_mpiaij.getRow(i)
_, b_values = Mu.getRow(i)
B.setValues([i], a_cols, a_values/b_values, PETSc.InsertMode.INSERT_VALUES)
B.assemble()
# B.view()
# A_mpiaij.view()
# (A_mpiaij - B).view()
# data = ANeus.getArray()
# if mpi.COMM_WORLD.rank == 0:
# print(dir(ANeus))
# print(type(ANeus), ANeus.getType())
###################@
# convert A_IS from matis to mpiaij
#A_mpiaij = A_IS.convertISToAIJ()
r, _ = A_mpiaij.getLGMap() #r, _ = A_IS.getLGMap()
is_A = PETSc.IS().createGeneral(r.indices)
# extract exact local solver
As = A_mpiaij.createSubMatrices(is_A)[0]
Bs = B.createSubMatrices(is_A)[0]
#mumps solver for Bs
Bs_ksp = PETSc.KSP().create(comm=PETSc.COMM_SELF)
Bs_ksp.setOptionsPrefix("Bs_ksp_")
Bs_ksp.setOperators(Bs)
Bs_ksp.setType('preonly')
Bs_pc = Bs_ksp.getPC()
Bs_pc.setType('cholesky')
Bs_pc.setFactorSolverType('mumps')
Bs_pc.setFactorSetUpSolverType()
Bs_pc.setUp()
Bs_ksp.setFromOptions()
#temp = Bs.getValuesCSR()
work, _ = A_mpiaij.getVecs()
work_2 = work.duplicate()
works, _ = As.getVecs()
works_2 = works.duplicate()
mus = works.duplicate()
scatter_l2g = PETSc.Scatter().create(works, None, work, is_A)
#compute the multiplicity of each dof
work = Mu.getDiagonal()
NULL,mult_max = work.max()
scatter_l2g(work, mus, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
if self.viewPC:
_, self.ns = mus.getSizes()
_, self.nglob = work.getSizes()
tempglobal = work.getArray(readonly=True)
templocal = mus.getArray(readonly=True)
self.nints = np.count_nonzero(tempglobal == 1) #interor dofs in this subdomain
self.nGammas = np.count_nonzero(templocal -1) #interor dofs in this subdomain
invmus = mus.duplicate()
invmus = 1/mus
if mpi.COMM_WORLD.rank == 0:
print(f'multmax: {mult_max}')
DVnegs = []
Vnegs = []
invmusVnegs = []
#BEGIN diagonalize Bs
#Eigenvalue Problem for smallest eigenvalues
eps = SLEPc.EPS().create(comm=PETSc.COMM_SELF)
eps.setDimensions(nev=self.nev)
eps.setProblemType(SLEPc.EPS.ProblemType.HEP)
eps.setOperators(Bs)
#print(f'dimension of Bs : {Bs.getSize()}')
#OPTION 1: works but dense algebra
eps.setType(SLEPc.EPS.Type.LAPACK)
eps.setWhichEigenpairs(SLEPc.EPS.Which.SMALLEST_REAL) #with lapack this just tells slepc how to order the eigenpairs
##END OPTION 1
##OPTION 2: default solver (Krylov Schur) but error with getInertia - is there a MUMPS mattype - Need to use MatCholeskyFactor
#if Which eigenpairs is set to SMALLEST_REAL, some are computed but not all
##Bs.setOption(PETSc.Mat.Option.SYMMETRIC, True)
##Bs.convert('sbaij')
##IScholBs = is_A.duplicate()
##Bs.factorCholesky(IScholBs) #not implemented
#tempksp = PETSc.KSP().create(comm=PETSc.COMM_SELF)
#tempksp.setOperators(Bs)
#tempksp.setType('preonly')
#temppc = tempksp.getPC()
#temppc.setType('cholesky')
#temppc.setFactorSolverType('mumps')
#temppc.setFactorSetUpSolverType()
#tempF = temppc.getFactorMatrix()
#tempF.setMumpsIcntl(13, 1) #needed to compute intertia according to slepcdoc, inertia computation still doesn't work though
#temppc.setUp()
##eps.setOperators(tempF)
#eps.setWhichEigenpairs(SLEPc.EPS.Which.ALL)
#eps.setInterval(PETSc.NINFINITY,0.0)
#eps.setUp()
##eps.setWhichEigenpairs(SLEPc.EPS.Which.TARGET_REAL)
##eps.setTarget(0.)
##if len(Vnegs) > 0 :
## eps.setDeflationSpace(Vnegs)
##if mpi.COMM_WORLD.rank == 0:
## eps.view()
##END OPTION 2
eps.solve()
if eps.getConverged() < self.nev:
PETSc.Sys.Print('for Bs in subdomain {}: {} eigenvalues converged (less that the {} requested)'.format(mpi.COMM_WORLD.rank, eps.getConverged(), self.nev), comm=PETSc.COMM_SELF)
Dnegs = []
Dposs = []
for i in range(eps.getConverged()):
tempscalar = np.real(eps.getEigenvalue(i))
if tempscalar < 0. :
Dnegs.append(-1.*tempscalar)
Vnegs.append(works.duplicate())
eps.getEigenvector(i,Vnegs[-1])
DVnegs.append(Dnegs[-1] * Vnegs[-1])
invmusVnegs.append(invmus * Vnegs[-1])
else :
Dposs.append(tempscalar)
if self.verbose:
PETSc.Sys.Print('for Bs in subdomain {}: ncv= {} with {} negative eigs'.format(mpi.COMM_WORLD.rank, eps.getConverged(), len(Vnegs), self.nev), comm=PETSc.COMM_SELF)
print(f'values of Dnegs {np.array(Dnegs)}')
nnegs = len(Dnegs)
#print(f'length of Dnegs {nnegs}')
#END diagonalize Bs
if self.viewnegV0:
print('###')
print(f'view of Vneg in Subdomain {mpi.COMM_WORLD.rank}')
print(f'ncv = {eps.getConverged()} eigenvalues converged')
print(f'{nnegs=}')
print(f'values of Dnegs: {np.array(Dnegs)}')
works.set(0.)
RsVnegs = []
Vneg = []
Dneg = []
RsDVnegs = []
RsDnegs = []
for i in range(mpi.COMM_WORLD.size):
nnegi = len(Vnegs) if i == mpi.COMM_WORLD.rank else None
nnegi = mpi.COMM_WORLD.bcast(nnegi, root=i)
for j in range(nnegi):
Vneg.append(Vnegs[j].copy() if i == mpi.COMM_WORLD.rank else works.copy())
dnegi = Dnegs[j] if i == mpi.COMM_WORLD.rank else None
dnegi = mpi.COMM_WORLD.bcast(dnegi, root=i)
Dneg.append(dnegi)
#print(f'i Dneg[i] = {i} {Dneg[i]}')
for i, vec in enumerate(Vneg):
work.set(0)
scatter_l2g(vec, work, PETSc.InsertMode.ADD_VALUES)
scatter_l2g(work, works, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
if works.norm() != 0:
RsVnegs.append(works.copy())
RsDVnegs.append(Dneg[i]*works.copy())
RsDnegs.append(Dneg[i])
#TO DO: here implement RsVnegs and RsDVnegs
#self.Vneg = Vneg
# self.Vnegs = Vnegs
# self.DVnegs = DVnegs
# self.scatterl
#Local Apos and Aneg
Aneg = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
Aneg.setPythonContext(Aneg_ctx(Vnegs, DVnegs, scatter_l2g, works, works_2))
Aneg.setUp()
Apos = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
Apos.setPythonContext(Apos_ctx(A_mpiaij, Aneg ))
Apos.setUp()
#A pos = A_mpiaij + Aneg so it could be a composite matrix rather than Python type
Anegs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
Anegs.setPythonContext(Anegs_ctx(Vnegs, DVnegs))
Anegs.setUp()
Aposs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
Aposs.setPythonContext(Aposs_ctx(Bs, Anegs ))
Aposs.setUp()
projVnegs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
projVnegs.setPythonContext(projVnegs_ctx(Vnegs))
projVnegs.setUp()
projVposs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
projVposs.setPythonContext(projVposs_ctx(projVnegs))
projVposs.setUp()
#TODO Implement RsAposRsts, this is the restriction of Apos to the dofs in this subdomain. So it applies to local vectors but has non local operations
RsAposRsts = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF) #or COMM_WORLD ?
RsAposRsts.setPythonContext(RsAposRsts_ctx(As,RsVnegs,RsDVnegs))
RsAposRsts.setUp()
invAposs = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
invAposs.setPythonContext(invAposs_ctx(Bs_ksp, projVposs ))
invAposs.setUp()
ksp_Aposs = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Aposs.setOperators(Aposs)
ksp_Aposs.setType('preonly')
pc_Aposs = ksp_Aposs.getPC()
pc_Aposs.setType('python')
pc_Aposs.setPythonContext(invAposs_ctx(Bs_ksp,projVposs))
ksp_Aposs.setUp()
work.set(1.)
Ms = PETSc.Mat().createPython([works.getSizes(), works.getSizes()], comm=PETSc.COMM_SELF)
Ms.setPythonContext(scaledmats_ctx(Aposs, mus, mus))
Ms.setUp()
ksp_Ms = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Ms.setOptionsPrefix("ksp_Ms_")
ksp_Ms.setOperators(Ms)
ksp_Ms.setType('preonly')
pc_Ms = ksp_Ms.getPC()
pc_Ms.setType('python')
pc_Ms.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Ms.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Ms_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Ms_forSLEPc.setOptionsPrefix("ksp_Ms_")
ksp_Ms_forSLEPc.setOperators(Ms)
ksp_Ms_forSLEPc.setType('preonly')
pc_Ms_forSLEPc = ksp_Ms_forSLEPc.getPC()
pc_Ms_forSLEPc.setType('python')
pc_Ms_forSLEPc.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Ms_forSLEPc.setFromOptions()
# the default local solver is the scaled non assembled local matrix (as in BNN)
if self.switchtoASM:
Atildes = As
if mpi.COMM_WORLD.rank == 0:
print('Switch to Additive Schwarz instead of BNN.')
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('cholesky')
pc_Atildes.setFactorSolverType('mumps')
ksp_Atildes.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('cholesky')
pc_Atildes_forSLEPc.setFactorSolverType('mumps')
ksp_Atildes_forSLEPc.setFromOptions()
if self.switchtoASMpos:
if mpi.COMM_WORLD.rank == 0:
print('switchtoASMpos has been ignored in favour of switchtoASM.')
elif self.switchtoASMpos:
Atildes = RsAposRsts
if mpi.COMM_WORLD.rank == 0:
print('Switch to Apos Additive Schwarz instead of BNN.')
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('python')
pc_Atildes.setPythonContext(invRsAposRsts_ctx(As,RsVnegs,RsDnegs,works))
ksp_Atildes.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('python')
pc_Atildes_forSLEPc.setPythonContext(invRsAposRsts_ctx(As,RsVnegs,RsDnegs,works))
ksp_Atildes_forSLEPc.setFromOptions()
else: #(default)
Atildes = Ms
ksp_Atildes = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes.setOperators(Atildes)
ksp_Atildes.setType('preonly')
pc_Atildes = ksp_Atildes.getPC()
pc_Atildes.setType('python')
pc_Atildes.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Atildes.setFromOptions()
#once a ksp has been passed to SLEPs it cannot be used again so we use a second, identical, ksp for SLEPc as a temporary fix
ksp_Atildes_forSLEPc = PETSc.KSP().create(comm=PETSc.COMM_SELF)
ksp_Atildes_forSLEPc.setOptionsPrefix("ksp_Atildes_")
ksp_Atildes_forSLEPc.setOperators(Atildes)
ksp_Atildes_forSLEPc.setType('preonly')
pc_Atildes_forSLEPc = ksp_Atildes_forSLEPc.getPC()
pc_Atildes_forSLEPc.setType('python')
pc_Atildes_forSLEPc.setPythonContext(scaledmats_ctx(invAposs,invmus,invmus) )
ksp_Atildes_forSLEPc.setFromOptions()
labs=[]
for i, tmp in enumerate(Dnegs):
labs.append(f'(\Lambda_-^s)_{i} = {-1.*tmp}')
minV0 = minimal_V0(ksp_Atildes,invmusVnegs,labs) #won't compute anything more vecause the solver for Atildes is not mumps
minV0s = minV0.V0s
labs = minV0.labs
if self.viewminV0 == True:
minV0.view()
self.A = A_mpiaij
self.Apos = Apos
self.Aneg = Aneg
self.Ms = Ms
self.As = As
self.RsAposRsts = RsAposRsts
self.ksp_Atildes = ksp_Atildes
self.ksp_Ms = ksp_Ms
self.ksp_Atildes_forSLEPc = ksp_Atildes_forSLEPc
self.ksp_Ms_forSLEPc = ksp_Ms_forSLEPc
self.work = work
self.work_2 = work_2
self.works_1 = works
self.works_2 = works_2
self.scatter_l2g = scatter_l2g
self.mult_max = mult_max
self.ksp_Atildes = ksp_Atildes
self.minV0 = minV0
self.labs = labs
self.Dnegs = Dnegs
self.nnegs = nnegs
self.works_1.set(1.)
self.RsAposRsts.mult(self.works_1,self.works_2)
if self.GenEO == True:
print(f'{labs=}')
self.GenEOV0 = GenEO_V0(self.ksp_Atildes_forSLEPc,self.Ms,self.RsAposRsts,self.mult_max,minV0s,labs,self.ksp_Ms_forSLEPc)
self.V0s = self.GenEOV0.V0s
if self.viewGenEOV0 == True:
self.GenEOV0.view()
print(f'{self.GenEOV0.labs=}')
else:
self.V0s = minV0s
self.proj2 = coarse_operators(self.V0s,self.Apos,self.scatter_l2g,self.works_1,self.work)
if self.viewV0 == True:
self.proj2.view()
# work.set(1.)
# test = work.copy()
# test = self.proj2.coarse_init(work)
# testb = work.copy()
# self.proj2.project(testb)
# testc = work.copy()
# self.proj2.project_transpose(testc)
# testd = work.copy()
# self.apply([], work,testd)
self.H2 = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
self.H2.setPythonContext(H2_ctx(self.H2projCS, self.H2addCS, self.proj2, self.scatter_l2g, self.ksp_Atildes, self.works_1, self.works_2 ))
self.H2.setUp()
self.ksp_Apos = PETSc.KSP().create(comm=PETSc.COMM_WORLD)
self.ksp_Apos.setOptionsPrefix("ksp_Apos_")
self.ksp_Apos.setOperators(Apos)
self.ksp_Apos.setType("cg")
if self.compute_ritz_apos:
self.ksp_Apos.setComputeEigenvalues(True)
self.pc_Apos = self.ksp_Apos.getPC()
self.pc_Apos.setType('python')
self.pc_Apos.setPythonContext(H2_ctx(self.H2projCS, self.H2addCS, self.proj2, self.scatter_l2g, self.ksp_Atildes, self.works_1, self.works_2 ))
self.ksp_Apos.setFromOptions()
self.pc_Apos.setFromOptions()
#At this point the preconditioner for Apos is ready
if self.verbose:
if mpi.COMM_WORLD.rank == 0:
print(f'#V0(H2) = rank(Ker(Pi2)) = {len(self.proj2.V0)}')
Vneg = []
for i in range(mpi.COMM_WORLD.size):
nnegi = len(Vnegs) if i == mpi.COMM_WORLD.rank else None
nnegi = mpi.COMM_WORLD.bcast(nnegi, root=i)
for j in range(nnegi):
if i == mpi.COMM_WORLD.rank:
works = Vnegs[j].copy()
else:
works.set(0.)
self.work.set(0)
self.scatter_l2g(works, self.work, PETSc.InsertMode.ADD_VALUES)
Vneg.append(self.work.copy())
AposinvV0 = []
self.ritz_eigs_apos = None
for vec in Vneg:
self.ksp_Apos.solve(vec,self.work_2)
if self.compute_ritz_apos and self.ritz_eigs_apos is None:
self.ritz_eigs_apos = self.ksp_Apos.computeEigenvalues()
self.ksp_Apos.setComputeEigenvalues(False)
AposinvV0.append(self.work_2.copy())
self.AposinvV0 = AposinvV0
self.proj3 = coarse_operators(self.AposinvV0,self.A,self.scatter_l2g,self.works_1,self.work,V0_is_global=True)
self.proj = self.proj3 #this name is consistent with the proj in PCBNN
###############################
# ###Alternative to assembling the second coarse operators
#
# ###
# self.Id = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# self.Id.setPythonContext(Id_ctx())
# self.Id.setUp()
#
# #self.Id = PETSc.Mat().create(comm=PETSc.COMM_SELF)
# #self.Id.setType("constantdiagonal") #I don't know how to set the value to 1
#
# #self.N = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# #self.N.setPythonContext(N_ctx(self.Aneg,self.A,self.ksp_Apos,self.work,self.work_2))
# #self.N.setUp()
#
# #self.ksp_N = PETSc.KSP().create(comm=PETSc.COMM_WORLD)
# #self.ksp_N.setOptionsPrefix("ksp_N_")
# #self.ksp_N.setOperators(self.N)
# #self.ksp_N.setType("gmres")
# #self.ksp_N.setGMRESRestart(151)
## # if self.compute_ritz_N:
# #self.ksp_N.setComputeEigenvalues(True)
# ##self.pc_N = self.ksp_N.getPC()
# ##self.pc_N.setType('python')
# ##self.pc_N.setPythonContext(
# #self.ksp_N.setFromOptions()
# self.proj4 = coarse_operators(Vneg,self.Id,self.scatter_l2g,self.works_1,self.work,V0_is_global=True)
#
# self.ProjA = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# self.ProjA.setPythonContext(ProjA_ctx(self.proj4,self.A))
# self.ProjA.setUp()
# self.work.set(1.)
# #test = self.work.duplicate()
# #self.ProjA.mult(self.work,test)
# #print('self.ProjA works ok')
#
# self.ksp_ProjA = PETSc.KSP().create(comm=PETSc.COMM_WORLD)
# self.ksp_ProjA.setOptionsPrefix("ksp_ProjA_")
# self.ksp_ProjA.setOperators(self.ProjA)
# self.ksp_ProjA.setType("gmres")
# self.ksp_ProjA.setGMRESRestart(151)
# self.ksp_ProjA.setComputeEigenvalues(True)
# #self.pc_ProjA = self.ksp_N.getPC()
# #self.pc_ProjA.setType('python')
# #self.pc_ProjA.setPythonContext(
# self.ksp_ProjA.setFromOptions()
###############################
##
if self.viewV0 == True:
self.proj.view()
if self.viewPC == True:
self.view()
##Debug DEBUG
# works_3 = works.copy()
##projVnegs is a projection
# #works.setRandom()
# works.set(1.)
# projVnegs.mult(works,works_2)
# projVnegs.mult(works_2,works_3)
# print(f'check that projVnegs is a projection {works_2.norm()} = {works_3.norm()} < {works.norm()}')
##projVposs is a projection
##Pythagoras ok
# works.setRandom()
# #works.set(1.)
# projVnegs.mult(works,works_2)
# projVposs.mult(works,works_3)
# print(f'{works_2.norm()**2} + {works_3.norm()**2}= {works_2.norm()**2 + works_3.norm()**2} = {(works.norm())**2}')
# print(f'0 = {(works - works_2 - works_3).norm()} if the two projections sum to identity')
##Aposs = projVposs Bs projVposs = Bs projVposs (it is implemented as Bs + Anegs)
# works_4 = works.copy()
# works.setRandom()
# #works.set(1.)
# projVposs.mult(works,works_2)
# Bs.mult(works_2,works_3)
# projVposs.mult(works_3,works_2)
# Aposs.mult(works,works_4)
# print(f'check Aposs = projVposs Bs projVposs = Bs projVposs: {works_2.norm()} = {works_3.norm()} = {works_4.norm()}')
# print(f'norms of diffs (should be zero): {(works_2 - works_3).norm()}, {(works_2 - works_4).norm()}, {(works_3 - works_4).norm()}')
###check that Aposs > 0 and Anegs >0 but Bs is indefinite + "Pythagoras"
# works_4 = works.copy()
# works.set(1.) #(with vector full of ones I get a negative Bs semi-norm)
# Bs.mult(works,works_4)
# Aposs.mult(works,works_2)
# Anegs.mult(works,works_3)
# print(f'|.|_Bs {works_4.dot(works)} (can be neg or pos); |.|_Aposs {works_2.dot(works)} > 0; |.|_Anegs {works_3.dot(works)} >0')
# print(f' |.|_Bs^2 = |.|_Aposs^2 - |.|_Anegs ^2 = {works_2.dot(works)} - {works_3.dot(works)} = {works_2.dot(works) - works_3.dot(works)} = {works_4.dot(works)} ')##
###check that ksp_Aposs.solve(Aposs * x) = projVposs x
# works_4 = works.copy()
# works.setRandom()
# #works.set(1.)
# projVposs.mult(works,works_2)
# Aposs(works,works_3)
# ksp_Aposs.solve(works_3,works_4)
# works_5 = works_2 - works_4
# print(f'norm x = {works.norm()}; norm projVposs x = {works_2.norm()} = norm Aposs\Aposs*x = {works_4.norm()}; normdiff = {works_5.norm()}')
####check that mus*invmus = vec of ones
# works.set(1.0)
# works_2 = invmus*mus
# works_3 = works - works_2
# print(f'0 = norm(vec of ones - mus*invmus) = {works_3.norm()}, mus in [{mus.min()}, {mus.max()}], invmus in [{invmus.min()}, {invmus.max()}]')
###check that Ms*ksp_Ms.solve(Ms*x) = Ms*x
# works_4 = works.copy()
# works.setRandom()
# Atildes.mult(works,works_3)
# self.ksp_Atildes.solve(works_3,works_4)
# Atildes.mult(works_4,works_2)
# works_5 = works_2 - works_3
# print(f'norm x = {works.norm()}; Atilde*x = {works_3.norm()} = norm Atilde*(Atildes\Atildes)*x = {works_2.norm()}; normdiff = {works_5.norm()}')
###check Apos by implementing it a different way in Apos_debug
# Apos_debug = PETSc.Mat().createPython([work.getSizes(), work.getSizes()], comm=PETSc.COMM_WORLD)
# Apos_debug.setPythonContext(Apos_debug_ctx(projVposs, Aposs, scatter_l2g, works, work))
# Apos_debug.setUp()
# work.setRandom()
# test = work.duplicate()
# test2 = work.duplicate()
# Apos.mult(work,test)
# Apos_debug.mult(work,test2)
# testdiff = test-test2
# print(f'norm of |.|_Apos = {np.sqrt(test.dot(work))} = |.|_Apos_debug = {np.sqrt(test2.dot(work))} ; norm of diff = {testdiff.norm()}')
###
###check that the projection in proj2 is a self.proj2.A orth projection
#work.setRandom()
# work.set(1.)
# test = work.copy()
# self.proj2.project(test)
# test2 = test.copy()
# self.proj2.project(test2)
# testdiff = test-test2
# print(f'norm(Pi x - Pi Pix) = {testdiff.norm()} = 0')
# self.proj2.A.mult(test,test2)
# test3 = work.duplicate()
# self.proj2.A.mult(work,test3)
# print(f'|Pi x|_A^2 - |x|_A^2 = {test.dot(test2)} - {work.dot(test3)} = {test.dot(test2) - work.dot(test3)} < 0 ')
# #test2 = A Pi x ( = Pit A Pi x)
# test3 = test2.copy()
# self.proj2.project_transpose(test3)
# test = test3.copy()
# self.proj2.project_transpose(test)
# testdiff = test3 - test2
# print(f'norm(A Pi x - Pit A Pix) = {testdiff.norm()} = 0 = {(test - test3).norm()} = norm(Pit Pit A Pi x - Pit A Pix); compare to norm(A Pi x) = {test2.norm()} ')
# #work.setRandom()
# work.set(1.)
# test2 = work.copy()
# self.proj2.project_transpose(test2)
# test2 = -1*test2
# test2 += work
#
# test = work.copy()
# test = self.proj2.coarse_init(work)
# test3 = work.duplicate()
# self.proj2.A.mult(test,test3)
###check that the projection in proj3 is a self.proj3.A orth projection whose image includes Ker(Aneg)
# #work.setRandom()
# work.set(1.)
# test = work.copy()
# self.proj3.project(test)
# test2 = test.copy()
# self.proj3.project(test2)
# testdiff = test-test2
# print(f'norm(Pi x - Pi Pix) = {testdiff.norm()} = 0')
# self.proj3.A.mult(test,test2)
# test3 = work.duplicate()
# self.proj3.A.mult(work,test3)
# print(f'|Pi x|_A^2 - |x|_A^2 = {test.dot(test2)} - {work.dot(test3)} = {test.dot(test2) - work.dot(test3)} < 0 ')
# #test2 = A Pi x ( = Pit A Pi x)
# test3 = test2.copy()
# self.proj3.project_transpose(test3)
# test = test3.copy()
# self.proj3.project_transpose(test)
# testdiff = test3 - test2
# print(f'norm(A Pi x - Pit A Pix) = {testdiff.norm()} = 0 = {(test - test3).norm()} = norm(Pit Pit A Pi x - Pit A Pix); compare to norm(A Pi x) = {test2.norm()} ')
# #work.setRandom()
# work.set(1.)
# test2 = work.copy()
# self.proj3.project_transpose(test2)
# test2 = -1*test2
# test2 += work
#
# test = work.copy()
# test = self.proj3.coarse_init(work)
# test3 = work.duplicate()
# self.proj3.A.mult(test,test3)
#
# print(f'norm(A coarse_init(b)) = {test3.norm()} = {test2.norm()} = norm((I-Pit b)); norm diff = {(test2 - test3).norm()}')
#
# work.set(1.)
# test = work.copy()
# test2 = work.copy()
# self.proj3.project(test2)
# test3 = work.copy()
# self.proj3.project(test3)
# test = work.copy()
# self.Apos.mult(test2,test)
# test2 = work.copy()
# self.A.mult(test3,test2)
# print(f'norm(Apos Pi3 x) = {test.norm()} = {test2.norm()} = norm(A Pi3 x); norm diff = {(test - test2).norm()}')
# for vec in self.AposinvV0:
# test = vec.copy()
# self.proj3.project(test)
# print(f'norm(Pi3 AposinvV0[i]) = {test.norm()} compare to norm of the non projected vector norm ={(vec).norm()}')
#
### END Debug DEBUG
def mult(self, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
########################
########################
xd = x.copy()
if self.H3projCS == True:
self.proj3.project_transpose(xd)
self.H2.mult(xd,y)
if self.H3projCS == True:
self.proj3.project(y)
if self.H3addCS == True:
xd = x.copy()
ytild = self.proj3.coarse_init(xd) # I could save a coarse solve by combining this line with project_transpose
if ytild.dot(xd) < 0:
print(f'x.dot(coarse_init(x)) = {ytild.dot(xd)} < 0 ')
y += ytild
def MP_mult(self, x, y):
"""
Applies the domain decomposition multipreconditioner followed by the projection preconditioner to a vector.
Parameters
==========
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : FIX
The list of ndom vectors that stores the result of the multipreconditioning operation (one vector per subdomain).
"""
print('not implemented')
def apply(self, pc, x, y):
"""
Applies the domain decomposition preconditioner followed by the projection preconditioner to a vector.
This is just a call to PCNew.mult with the function name and arguments that allow PCNew to be passed
as a preconditioner to PETSc.ksp.
Parameters
==========
pc: This argument is not called within the function but it belongs to the standard way of calling a preconditioner.
x : petsc.Vec
The vector to which the preconditioner is to be applied.
y : petsc.Vec
The vector that stores the result of the preconditioning operation.
"""
self.mult(x,y)
def view(self):
self.gathered_ns = mpi.COMM_WORLD.gather(self.ns, root=0)
self.gathered_nints = mpi.COMM_WORLD.gather(self.nints, root=0)
self.gathered_Gammas = mpi.COMM_WORLD.gather(self.nGammas, root=0)
self.minV0.gathered_dim = mpi.COMM_WORLD.gather(self.minV0.nrb, root=0)
self.gathered_labs = mpi.COMM_WORLD.gather(self.labs, root=0)
self.gathered_nneg = mpi.COMM_WORLD.gather(self.nnegs, root=0)
self.gathered_Dneg = mpi.COMM_WORLD.gather(self.Dnegs, root=0)
if self.GenEO == True:
self.GenEOV0.gathered_nsharp = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmax, root=0)
self.GenEOV0.gathered_nflat = mpi.COMM_WORLD.gather(self.GenEOV0.n_GenEO_eigmin, root=0)
self.GenEOV0.gathered_dimKerMs = mpi.COMM_WORLD.gather(self.GenEOV0.dimKerMs, root=0)
self.GenEOV0.gathered_Lambdasharp = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmax, root=0)
self.GenEOV0.gathered_Lambdaflat = mpi.COMM_WORLD.gather(self.GenEOV0.Lambda_GenEO_eigmin, root=0)
if mpi.COMM_WORLD.rank == 0:
print('#############################')
print(f'view of PCNew')
print(f'{self.switchtoASM=}')
print(f'{self.verbose= }')
print(f'{self.GenEO= }')
print(f'{self.H3addCS= }')
print(f'{self.H3projCS= }')
print(f'{self.H2projCS= }')
print(f'{self.viewPC= }')
print(f'{self.viewV0= }')
print(f'{self.viewGenEOV0= }')
print(f'{self.viewnegV0= }')
print(f'{self.viewminV0= }')
print(f'{self.compute_ritz_apos=}')
print(f'{self.mult_max=}')
print(f'### info about the subdomains ###')
self.nint = np.sum(self.gathered_nints)
self.nGamma = self.nglob - self.nint
print(f'{self.gathered_ns =}')
print(f'{self.gathered_nints =}')
print(f'{self.gathered_Gammas=}')
print(f'{self.nGamma=}')
print(f'{self.nint=}')
print(f'{self.nglob=}')
print(f'{self.gathered_labs=}')
print(f'### info about minV0.V0s = (Ker(Atildes)) ###')
print(f'{self.minV0.mumpsCntl3=}')
print(f'###info about Vnegs = rank(Anegs) = coarse components for proj3')
print(f'{self.gathered_nneg=}')
print(f'{np.sum(self.gathered_nneg)=}')
if (self.ksp_Atildes.pc.getFactorSolverType() == 'mumps'):
print(f'dim(Ker(Atildes)) = {self.minV0.gathered_dim}')
else:
print(f'Ker(Atildes) not computed because pc is not mumps')
if self.GenEO == True:
print(f'### info about GenEOV0.V0s ###')
print(f'{self.GenEOV0.tau_eigmax=}')
print(f'{self.GenEOV0.tau_eigmin=}')
print(f'{self.GenEOV0.eigmax=}')
print(f'{self.GenEOV0.eigmin=}')
print(f'{self.GenEOV0.nev=}')
print(f'{self.GenEOV0.maxev=}')
print(f'{self.GenEOV0.mumpsCntl3=}')
print(f'{self.GenEOV0.verbose=}')
print(f'{self.GenEOV0.gathered_nsharp=}')
print(f'{self.GenEOV0.gathered_nflat=}')
#print(f'{self.GenEOV0.gathered_dimKerMs=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdasharp)=}')
#print(f'{np.array(self.GenEOV0.gathered_Lambdaflat)=}')
print(f'### info about the preconditioner for Apos ###')
print(f'{self.proj2.V0_is_global=}')
if(self.proj2.V0_is_global == False):
print(f'{self.proj2.gathered_dimV0s=}')
if self.GenEO == True:
print(f'global dim V0 for Apos = {self.proj2.dim} = ({np.sum(self.gathered_nneg)} from Vneg ) + ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes)) + ({np.sum(self.GenEOV0.gathered_nsharp)} from GenEO_eigmax) + ({np.sum(self.GenEOV0.gathered_nflat) } from GenEO_eigmin)')
else:
print(f'global dim V0 for Apos = {np.sum(self.proj2.gathered_dimV0s)} = ({np.sum(self.minV0.gathered_dim)} from Ker(Atildes))')
if self.compute_ritz_apos and self.ritz_eigs_apos is not None:
print(f'Estimated kappa(H2 Apos) = {self.ritz_eigs_apos.max()/self.ritz_eigs_apos.min() }; with lambdamin = {self.ritz_eigs_apos.min()} and lambdamax = {self.ritz_eigs_apos.max()}')
print('#############################')
self.savetofile()
def savetofile(self):
if mpi.COMM_WORLD.rank == 0:
if not os.path.exists(self.test_case):
os.mkdir(self.test_case)
np.savez(f'{self.test_case}/init',
switchtoASM = self.switchtoASM,
verbose = self.verbose,
GenEO = self.GenEO,
H3addCS = self.H3addCS,
H3projCS = self.H3projCS,
H2projCS = self.H2projCS,
viewPC = self.viewPC,
viewV0 = self.viewV0,
viewGenEOV0 = self.viewGenEOV0,
viewnegV0 = self.viewnegV0,
viewminV0 = self.viewminV0,
compute_ritz_apos = self.compute_ritz_apos,
mult_max = self.mult_max,
gathered_ns = self.gathered_ns,
gathered_nints = self.gathered_nints,
gathered_Gammas = self.gathered_Gammas,
nGamma = self.nGamma,
nint = self.nint,
nglob = self.nglob,
minV0_mumpsCntl3 = self.minV0.mumpsCntl3,
gathered_labs= np.asarray(self.gathered_labs,dtype='object'),
gathered_nneg = self.gathered_nneg,
minV0_gathered_dim = self.minV0.gathered_dim,
ritz_eigs_Apos = self.ritz_eigs_apos ,
sum_nneg = np.sum(self.gathered_nneg),
proj2_V0_is_global = self.proj2.V0_is_global,
proj2_gathered_dimV0s = np.asarray(self.proj2.gathered_dimV0s),
proj2_dimV0 = np.sum(self.proj2.gathered_dimV0s),
proj2_sum_dimminV0 = np.sum(self.minV0.gathered_dim) ,
)
if self.GenEO == True:
np.savez(f'{self.test_case}/GenEO',
GenEOV0_tau_eigmax = self.GenEOV0.tau_eigmax,
GenEOV0_tau_eigmin = self.GenEOV0.tau_eigmin,
GenEOV0_eigmax = self.GenEOV0.eigmax,
GenEOV0_eigmin = self.GenEOV0.eigmin,
GenEOV0_nev = self.GenEOV0.nev,
GenEOV0_maxev = self.GenEOV0.maxev,
GenEOV0_mumpsCntl3 = self.GenEOV0.mumpsCntl3,
GenEOV0_verbose = self.GenEOV0.verbose,
GenEOV0_gathered_nsharp = np.asarray(self.GenEOV0.gathered_nsharp),
GenEOV0_gathered_nflat = np.asarray(self.GenEOV0.gathered_nflat),
GenEOV0_sum_nsharp = np.sum(self.GenEOV0.gathered_nsharp),
GenEOV0_sum_nflat = np.sum(self.GenEOV0.gathered_nflat),
)
class Aneg_ctx(object):
def __init__(self, Vnegs, DVnegs, scatter_l2g, works, works_2):
self.scatter_l2g = scatter_l2g
self.works = works
self.works_2 = works_2
self.Vnegs = Vnegs
self.DVnegs = DVnegs
self.gamma = PETSc.Vec().create(comm=PETSc.COMM_SELF)
self.gamma.setType(PETSc.Vec.Type.SEQ)
self.gamma.setSizes(len(self.Vnegs))
def mult(self, mat, x, y):
y.set(0)
self.scatter_l2g(x, self.works, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.works_2.set(0)
for i,vec in enumerate(self.Vnegs):
self.works_2.axpy(self.works.dot(self.DVnegs[i]) , vec)
self.scatter_l2g(self.works_2, y, PETSc.InsertMode.ADD_VALUES)
class Apos_debug_ctx(object):
def __init__(self, projVposs, Aposs, scatter_l2g, works, work):
self.scatter_l2g = scatter_l2g
self.work = works
self.works = works
self.projVposs = projVposs
self.Aposs = Aposs
def mult(self, mat, x, y):
y.set(0)
works_2 = self.works.duplicate()
self.scatter_l2g(x, self.works, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.Aposs.mult(self.works,works_2)
self.scatter_l2g(works_2, y, PETSc.InsertMode.ADD_VALUES)
class Apos_ctx(object):
def __init__(self,A_mpiaij, Aneg):
self.A_mpiaij = A_mpiaij
self.Aneg = Aneg
def mult(self, mat, x, y):
xtemp = x.duplicate()
self.Aneg.mult(x,xtemp)
self.A_mpiaij.mult(x,y)
y += xtemp
class Anegs_ctx(object):
def __init__(self, Vnegs, DVnegs):
self.Vnegs = Vnegs
self.DVnegs = DVnegs
def mult(self, mat, x, y):
y.set(0)
for i,vec in enumerate(self.Vnegs):
y.axpy(x.dot(self.DVnegs[i]), vec)
class RsAposRsts_ctx(object):
def __init__(self,As,RsVnegs,RsDVnegs):
self.As = As
self.RsVnegs = RsVnegs
self.RsDVnegs = RsDVnegs
def mult(self, mat, x, y):
self.As.mult(x,y)
for i,vec in enumerate(self.RsVnegs):
y.axpy(x.dot(self.RsDVnegs[i]), vec)
class invRsAposRsts_ctx(object):
def __init__(self,As,RsVnegs,RsDnegs,works):
self.As = As
self.works = works
self.RsVnegs = RsVnegs
self.RsDnegs = RsDnegs
self.ksp_As = PETSc.KSP().create(comm=PETSc.COMM_SELF)
self.ksp_As.setOptionsPrefix("ksp_As_")
self.ksp_As.setOperators(self.As)
self.ksp_As.setType('preonly')
self.pc_As = self.ksp_As.getPC()
self.pc_As.setType('cholesky')
self.pc_As.setFactorSolverType('mumps')
self.ksp_As.setFromOptions()
self.AsinvRsVnegs = []
for i,vec in enumerate(self.RsVnegs):
self.ksp_As.solve(vec,self.works)
self.AsinvRsVnegs.append(self.works.copy())
self.Matwood = PETSc.Mat().create(comm=PETSc.COMM_SELF)
self.Matwood.setType(PETSc.Mat.Type.SEQDENSE)
self.Matwood.setSizes([len(self.AsinvRsVnegs),len(self.AsinvRsVnegs)])
self.Matwood.setOption(PETSc.Mat.Option.SYMMETRIC, True)
self.Matwood.setPreallocationDense(None)
for i, vec in enumerate(self.AsinvRsVnegs):
for j in range(i):
tmp = self.RsVnegs[j].dot(vec)
self.Matwood[i, j] = tmp
self.Matwood[j, i] = tmp
tmp = self.RsVnegs[i].dot(vec)
self.Matwood[i, i] = tmp + 1/self.RsDnegs[i]
self.Matwood.assemble()
self.ksp_Matwood = PETSc.KSP().create(comm=PETSc.COMM_SELF)
self.ksp_Matwood.setOperators(self.Matwood)
self.ksp_Matwood.setType('preonly')
self.pc = self.ksp_Matwood.getPC()
self.pc.setType('cholesky')
self.gamma, _ = self.Matwood.getVecs()
self.alpha = self.gamma.duplicate()
def mult(self, mat, x, y):
self.ksp_As.solve(x,y)
for i, vec in enumerate(self.AsinvRsVnegs):
self.gamma[i] = vec.dot(x)
self.ksp_Matwood.solve(self.gamma, self.alpha)
for i, vec in enumerate(self.AsinvRsVnegs):
y.axpy(-self.alpha[i], vec)
def apply(self,pc, x, y):
self.mult(pc,x,y)
class Aposs_ctx(object):
def __init__(self,Bs, Anegs):
self.Bs = Bs
self.Anegs = Anegs
def mult(self, mat, x, y):
xtemp = x.duplicate()
self.Anegs.mult(x,xtemp)
self.Bs.mult(x,y)
y += xtemp
class scaledmats_ctx(object):
def __init__(self, mats, musl, musr):
self.mats = mats
self.musl = musl
self.musr = musr
def mult(self, mat, x, y):
xtemp = x.copy()*self.musr
self.mats.mult(xtemp,y)
y *= self.musl
def apply(self, mat, x, y):
self.mult(mat, x, y)
class invAposs_ctx(object):
def __init__(self,Bs_ksp,projVposs):
self.Bs_ksp = Bs_ksp
self.projVposs = projVposs
def apply(self, mat, x, y):
xtemp1 = y.duplicate()
xtemp2 = y.duplicate()
self.projVposs.mult(x,xtemp1)
self.Bs_ksp.solve(xtemp1,xtemp2)
self.projVposs.mult(xtemp2,y)
def mult(self, mat, x, y):
#xtemp1 = y.duplicate()
#xtemp2 = y.duplicate()
#self.projVnegs.mult(x,xtemp1)
#self.Bs_ksp.solve(xtemp1,xtemp2)
#self.projVnegs.mult(xtemp2,y)
self.apply(mat, x, y)
class projVnegs_ctx(object):
def __init__(self, Vnegs):
self.Vnegs = Vnegs
def mult(self, mat, x, y):
y.set(0)
for i,vec in enumerate(self.Vnegs):
y.axpy(x.dot(vec) , vec)
class projVposs_ctx(object):
def __init__(self, projVnegs):
self.projVnegs = projVnegs
def mult(self, mat, x, y):
self.projVnegs(-x,y)
y.axpy(1.,x)
class H2_ctx(object):
def __init__(self, projCS, addCS, proj2, scatter_l2g, ksp_Atildes, works_1, works_2 ):
self.projCS = projCS
self.addCS = addCS
self.proj2 = proj2
self.scatter_l2g = scatter_l2g
self.ksp_Atildes = ksp_Atildes
self.works_1 = works_1
self.works_2 = works_2
def mult(self,mat,x,y):
self.apply([],x,y)
def apply(self,pc, x, y):
xd = x.copy()
if self.projCS == True:
self.proj2.project_transpose(xd)
self.scatter_l2g(xd, self.works_1, PETSc.InsertMode.INSERT_VALUES, PETSc.ScatterMode.SCATTER_REVERSE)
self.ksp_Atildes.solve(self.works_1, self.works_2)
y.set(0.)
self.scatter_l2g(self.works_2, y, PETSc.InsertMode.ADD_VALUES)
if self.projCS == True:
self.proj2.project(y)
if self.addCS == True:
xd = x.copy()
ytild = self.proj2.coarse_init(xd) # I could save a coarse solve by combining this line with project_transpose
#print(f'in H2 x.dot(coarse_init(x)) = {ytild.dot(xd)} > 0 ')
if ytild.dot(xd) < 0:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
y += ytild
class N_ctx(object): #(I - Aneg *invApos)
def __init__(self,Aneg,A,ksp_Apos,work,work_2):
self.Aneg = Aneg
self.A = A
self.ksp_Apos = ksp_Apos
self.work = work
self.work_2 = work_2
def mult(self, mat, x, y):
if mpi.COMM_WORLD.rank == 0:
print('in N_ctx')
self.ksp_Apos.solve(x,self.work)
#self.A.mult(self.work,y)
self.Aneg.mult(self.work,self.work_2)
#self.Aneg.mult(self.work,y)
y.set(0.)
y.axpy(1.,x)
y.axpy(-1.,self.work_2)
class Id_ctx(object): # I
# def __init__(self):
def mult(self, mat, x, y):
y.axpy(1.,x)
class ProjA_ctx(object): #(Vneg (Vnegt *Vneg)\Vnegt *A )
def __init__(self, proj4, A):
self.proj4 = proj4
self.A = A
def mult(self, mat, x, y):
xd = x.copy()
self.proj4.project(xd)
self.A.mult(xd,y)
###UNFINISHED
#class Psi_ctx(object) #Dneg^{-1} - Vnegt Aneg^{-1} Vneg
# def __init__(self,Vneg,ksp_Apos,work,work_2):
# self.Vneg = Vneg
# self.work = work
# self.work = work_2
# #self.gamma = PETSc.Vec().create(comm=PETSc.COMM_SELF)
# #self.gamma.setType(PETSc.Vec.Type.SEQ)
# #self.gamma.setSizes(len(self.Vneg))
# self.ksp_Apos = ksp_Apos
# def mult(self, mat, x, y):
# part with Dneg inv is not at all implemented yet
# self.work.set(0.)
# for i, vec in enumerate(self.Vneg):
# self.work.axpy(x[i], vec)
#
# self.ksp_Apos.solve(self.work,self.work_2)
#
# #y = self.gamma.duplicate()
# for i, vec in enumerate(self.V0):
# y[i] = vec.dot(x)
#
| 43.69527 | 293 | 0.600597 |
401c4d078a76dac21366d7cb1fb9c12daafe629a
| 559 |
py
|
Python
|
Maths_And_Stats/Algebra/LCM/LCM.py
|
arslantalib3/algo_ds_101
|
a1293f407e00b8346f93e8770727f769e7add00e
|
[
"MIT"
] | 182 |
2020-10-01T17:16:42.000Z
|
2021-10-04T17:52:49.000Z
|
Maths_And_Stats/Algebra/LCM/LCM.py
|
arslantalib3/algo_ds_101
|
a1293f407e00b8346f93e8770727f769e7add00e
|
[
"MIT"
] | 759 |
2020-10-01T00:12:21.000Z
|
2021-10-04T19:35:11.000Z
|
Maths_And_Stats/Algebra/LCM/LCM.py
|
arslantalib3/algo_ds_101
|
a1293f407e00b8346f93e8770727f769e7add00e
|
[
"MIT"
] | 1,176 |
2020-10-01T16:02:13.000Z
|
2021-10-04T19:20:19.000Z
|
def lcm(x:int, y:int) -> int:
'''
Input: 2 integers x and y
Output: The smallest integer which is divisible by both x and y
'''
greater = max(x,y)
lesser = min(x,y)
multiple = 1
while ((greater * multiple) % lesser != 0):
# I use <greater * multiple> instead of <lesser * multiple> because greater steps through the number line faster
multiple += 1
return greater * multiple
def main():
num1 = int(input())
num2 = int(input())
print(f"LCM({num1}, {num2}) = {lcm(num1,num2)}")
if __name__ == "__main__":
main()
| 23.291667 | 116 | 0.615385 |
de1da973a80509f566975799e63c9aedec30eb3a
| 436 |
py
|
Python
|
A3 - RL/rl_assignments/rl4_hyperparameters_and_environment/student_4_2_1.py
|
NickSmyr/ai-player-agents
|
f8972d02c53a2ba566b541b1270a0637e3d3e5c7
|
[
"MIT"
] | null | null | null |
A3 - RL/rl_assignments/rl4_hyperparameters_and_environment/student_4_2_1.py
|
NickSmyr/ai-player-agents
|
f8972d02c53a2ba566b541b1270a0637e3d3e5c7
|
[
"MIT"
] | null | null | null |
A3 - RL/rl_assignments/rl4_hyperparameters_and_environment/student_4_2_1.py
|
NickSmyr/ai-player-agents
|
f8972d02c53a2ba566b541b1270a0637e3d3e5c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# rewards: [golden_fish, jellyfish_1, jellyfish_2, ... , step]
rewards = [10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, 1000]
# Q learning learning rate
alpha = 0.1
# Q learning discount rate
gamma = 0.1
# Epsilon initial
epsilon_initial = 1.0
# Epsilon final
epsilon_final = 0.0
# Annealing timesteps
# 10 episodes * 100 steps / episode
annealing_timesteps = 1000
# threshold
threshold = 1e-1
| 18.956522 | 75 | 0.678899 |
a50b82a1479a923e9778c17eaebc8937201aa750
| 62,396 |
py
|
Python
|
pytorch3d/renderer/cameras.py
|
tobyclh/pytorch3d
|
cac6cb1b7813a4f09a05e0ade43c63292bb08b79
|
[
"BSD-3-Clause"
] | 1 |
2021-11-17T01:46:24.000Z
|
2021-11-17T01:46:24.000Z
|
pytorch3d/renderer/cameras.py
|
tobyclh/pytorch3d
|
cac6cb1b7813a4f09a05e0ade43c63292bb08b79
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch3d/renderer/cameras.py
|
tobyclh/pytorch3d
|
cac6cb1b7813a4f09a05e0ade43c63292bb08b79
|
[
"BSD-3-Clause"
] | 1 |
2022-01-05T15:03:24.000Z
|
2022-01-05T15:03:24.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from pytorch3d.common.types import Device
from pytorch3d.transforms import Rotate, Transform3d, Translate
from .utils import TensorProperties, convert_to_tensors_and_broadcast
# Default values for rotation and translation matrices.
_R = torch.eye(3)[None] # (1, 3, 3)
_T = torch.zeros(1, 3) # (1, 3)
class CamerasBase(TensorProperties):
"""
`CamerasBase` implements a base class for all cameras.
For cameras, there are four different coordinate systems (or spaces)
- World coordinate system: This is the system the object lives - the world.
- Camera view coordinate system: This is the system that has its origin on the camera
and the and the Z-axis perpendicular to the image plane.
In PyTorch3D, we assume that +X points left, and +Y points up and
+Z points out from the image plane.
The transformation from world --> view happens after applying a rotation (R)
and translation (T)
- NDC coordinate system: This is the normalized coordinate system that confines
in a volume the rendered part of the object or scene. Also known as view volume.
For square images, given the PyTorch3D convention, (+1, +1, znear) is the top left near corner,
and (-1, -1, zfar) is the bottom right far corner of the volume.
The transformation from view --> NDC happens after applying the camera
projection matrix (P) if defined in NDC space.
For non square images, we scale the points such that smallest side
has range [-1, 1] and the largest side has range [-u, u], with u > 1.
- Screen coordinate system: This is another representation of the view volume with
the XY coordinates defined in image space instead of a normalized space.
A better illustration of the coordinate systems can be found in
pytorch3d/docs/notes/cameras.md.
It defines methods that are common to all camera models:
- `get_camera_center` that returns the optical center of the camera in
world coordinates
- `get_world_to_view_transform` which returns a 3D transform from
world coordinates to the camera view coordinates (R, T)
- `get_full_projection_transform` which composes the projection
transform (P) with the world-to-view transform (R, T)
- `transform_points` which takes a set of input points in world coordinates and
projects to the space the camera is defined in (NDC or screen)
- `get_ndc_camera_transform` which defines the transform from screen/NDC to
PyTorch3D's NDC space
- `transform_points_ndc` which takes a set of points in world coordinates and
projects them to PyTorch3D's NDC space
- `transform_points_screen` which takes a set of points in world coordinates and
projects them to screen space
For each new camera, one should implement the `get_projection_transform`
routine that returns the mapping from camera view coordinates to camera
coordinates (NDC or screen).
Another useful function that is specific to each camera model is
`unproject_points` which sends points from camera coordinates (NDC or screen)
back to camera view or world coordinates depending on the `world_coordinates`
boolean argument of the function.
"""
def get_projection_transform(self):
"""
Calculate the projective transformation matrix.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in `__init__`.
Return:
a `Transform3d` object which represents a batch of projection
matrices of shape (N, 3, 3)
"""
raise NotImplementedError()
def unproject_points(self):
"""
Transform input points from camera coodinates (NDC or screen)
to the world / camera coordinates.
Each of the input points `xy_depth` of shape (..., 3) is
a concatenation of the x, y location and its depth.
For instance, for an input 2D tensor of shape `(num_points, 3)`
`xy_depth` takes the following form:
`xy_depth[i] = [x[i], y[i], depth[i]]`,
for a each point at an index `i`.
The following example demonstrates the relationship between
`transform_points` and `unproject_points`:
.. code-block:: python
cameras = # camera object derived from CamerasBase
xyz = # 3D points of shape (batch_size, num_points, 3)
# transform xyz to the camera view coordinates
xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz)
# extract the depth of each point as the 3rd coord of xyz_cam
depth = xyz_cam[:, :, 2:]
# project the points xyz to the camera
xy = cameras.transform_points(xyz)[:, :, :2]
# append depth to xy
xy_depth = torch.cat((xy, depth), dim=2)
# unproject to the world coordinates
xyz_unproj_world = cameras.unproject_points(xy_depth, world_coordinates=True)
print(torch.allclose(xyz, xyz_unproj_world)) # True
# unproject to the camera coordinates
xyz_unproj = cameras.unproject_points(xy_depth, world_coordinates=False)
print(torch.allclose(xyz_cam, xyz_unproj)) # True
Args:
xy_depth: torch tensor of shape (..., 3).
world_coordinates: If `True`, unprojects the points back to world
coordinates using the camera extrinsics `R` and `T`.
`False` ignores `R` and `T` and unprojects to
the camera view coordinates.
Returns
new_points: unprojected points with the same shape as `xy_depth`.
"""
raise NotImplementedError()
def get_camera_center(self, **kwargs) -> torch.Tensor:
"""
Return the 3D location of the camera optical center
in the world coordinates.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting T here will update the values set in init as this
value may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
C: a batch of 3D locations of shape (N, 3) denoting
the locations of the center of each camera in the batch.
"""
w2v_trans = self.get_world_to_view_transform(**kwargs)
P = w2v_trans.inverse().get_matrix()
# the camera center is the translation component (the first 3 elements
# of the last row) of the inverted world-to-view
# transform (4x4 RT matrix)
C = P[:, 3, :3]
return C
def get_world_to_view_transform(self, **kwargs) -> Transform3d:
"""
Return the world-to-view transform.
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
A Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
R: torch.Tensor = kwargs.get("R", self.R)
T: torch.Tensor = kwargs.get("T", self.T)
self.R = R # pyre-ignore[16]
self.T = T # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(R=R, T=T)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
"""
Return the full world-to-camera transform composing the
world-to-view and view-to-camera transforms.
If camera is defined in NDC space, the projected points are in NDC space.
If camera is defined in screen space, the projected points are in screen space.
Args:
**kwargs: parameters for the projection transforms can be passed in
as keyword arguments to override the default values
set in __init__.
Setting R and T here will update the values set in init as these
values may be needed later on in the rendering pipeline e.g. for
lighting calculations.
Returns:
a Transform3d object which represents a batch of transforms
of shape (N, 3, 3)
"""
self.R: torch.Tensor = kwargs.get("R", self.R) # pyre-ignore[16]
self.T: torch.Tensor = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
view_to_proj_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_proj_transform)
def transform_points(
self, points, eps: Optional[float] = None, **kwargs
) -> torch.Tensor:
"""
Transform input points from world to camera space with the
projection matrix defined by the camera.
For `CamerasBase.transform_points`, setting `eps > 0`
stabilizes gradients since it leads to avoiding division
by excessively low numbers for points close to the camera plane.
Args:
points: torch tensor of shape (..., 3).
eps: If eps!=None, the argument is used to clamp the
divisor in the homogeneous normalization of the points
transformed to the ndc space. Please see
`transforms.Transform3D.transform_points` for details.
For `CamerasBase.transform_points`, setting `eps > 0`
stabilizes gradients since it leads to avoiding division
by excessively low numbers for points close to the
camera plane.
Returns
new_points: transformed points with the same shape as the input.
"""
world_to_proj_transform = self.get_full_projection_transform(**kwargs)
return world_to_proj_transform.transform_points(points, eps=eps)
def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
"""
Returns the transform from camera projection space (screen or NDC) to NDC space.
For cameras that can be specified in screen space, this transform
allows points to be converted from screen to NDC space.
The default transform scales the points from [0, W-1]x[0, H-1]
to [-1, 1]x[-u, u] or [-u, u]x[-1, 1] where u > 1 is the aspect ratio of the image.
This function should be modified per camera definitions if need be,
e.g. for Perspective/Orthographic cameras we provide a custom implementation.
This transform assumes PyTorch3D coordinate system conventions for
both the NDC space and the input points.
This transform interfaces with the PyTorch3D renderer which assumes
input points to the renderer to be in NDC space.
"""
if self.in_ndc():
return Transform3d(device=self.device, dtype=torch.float32)
else:
# For custom cameras which can be defined in screen space,
# users might might have to implement the screen to NDC transform based
# on the definition of the camera parameters.
# See PerspectiveCameras/OrthographicCameras for an example.
# We don't flip xy because we assume that world points are in
# PyTorch3D coordinates, and thus conversion from screen to ndc
# is a mere scaling from image to [-1, 1] scale.
image_size = kwargs.get("image_size", self.get_image_size())
return get_screen_to_ndc_transform(
self, with_xyflip=False, image_size=image_size
)
def transform_points_ndc(
self, points, eps: Optional[float] = None, **kwargs
) -> torch.Tensor:
"""
Transforms points from PyTorch3D world/camera space to NDC space.
Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up.
Output points are in NDC space: +X left, +Y up, origin at image center.
Args:
points: torch tensor of shape (..., 3).
eps: If eps!=None, the argument is used to clamp the
divisor in the homogeneous normalization of the points
transformed to the ndc space. Please see
`transforms.Transform3D.transform_points` for details.
For `CamerasBase.transform_points`, setting `eps > 0`
stabilizes gradients since it leads to avoiding division
by excessively low numbers for points close to the
camera plane.
Returns
new_points: transformed points with the same shape as the input.
"""
world_to_ndc_transform = self.get_full_projection_transform(**kwargs)
if not self.in_ndc():
to_ndc_transform = self.get_ndc_camera_transform(**kwargs)
world_to_ndc_transform = world_to_ndc_transform.compose(to_ndc_transform)
return world_to_ndc_transform.transform_points(points, eps=eps)
def transform_points_screen(
self, points, eps: Optional[float] = None, **kwargs
) -> torch.Tensor:
"""
Transforms points from PyTorch3D world/camera space to screen space.
Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up.
Output points are in screen space: +X right, +Y down, origin at top left corner.
Args:
points: torch tensor of shape (..., 3).
eps: If eps!=None, the argument is used to clamp the
divisor in the homogeneous normalization of the points
transformed to the ndc space. Please see
`transforms.Transform3D.transform_points` for details.
For `CamerasBase.transform_points`, setting `eps > 0`
stabilizes gradients since it leads to avoiding division
by excessively low numbers for points close to the
camera plane.
Returns
new_points: transformed points with the same shape as the input.
"""
points_ndc = self.transform_points_ndc(points, eps=eps, **kwargs)
image_size = kwargs.get("image_size", self.get_image_size())
return get_ndc_to_screen_transform(
self, with_xyflip=True, image_size=image_size
).transform_points(points_ndc, eps=eps)
def clone(self):
"""
Returns a copy of `self`.
"""
cam_type = type(self)
other = cam_type(device=self.device)
return super().clone(other)
def is_perspective(self):
raise NotImplementedError()
def in_ndc(self):
"""
Specifies whether the camera is defined in NDC space
or in screen (image) space
"""
raise NotImplementedError()
def get_znear(self):
return self.znear if hasattr(self, "znear") else None
def get_image_size(self):
"""
Returns the image size, if provided, expected in the form of (height, width)
The image size is used for conversion of projected points to screen coordinates.
"""
return self.image_size if hasattr(self, "image_size") else None
############################################################
# Field of View Camera Classes #
############################################################
def OpenGLPerspectiveCameras(
znear=1.0,
zfar=100.0,
aspect_ratio=1.0,
fov=60.0,
degrees: bool = True,
R: torch.Tensor = _R,
T: torch.Tensor = _T,
device: Device = "cpu",
) -> "FoVPerspectiveCameras":
"""
OpenGLPerspectiveCameras has been DEPRECATED. Use FoVPerspectiveCameras instead.
Preserving OpenGLPerspectiveCameras for backward compatibility.
"""
warnings.warn(
"""OpenGLPerspectiveCameras is deprecated,
Use FoVPerspectiveCameras instead.
OpenGLPerspectiveCameras will be removed in future releases.""",
PendingDeprecationWarning,
)
return FoVPerspectiveCameras(
znear=znear,
zfar=zfar,
aspect_ratio=aspect_ratio,
fov=fov,
degrees=degrees,
R=R,
T=T,
device=device,
)
class FoVPerspectiveCameras(CamerasBase):
"""
A class which stores a batch of parameters to generate a batch of
projection matrices by specifying the field of view.
The definition of the parameters follow the OpenGL perspective camera.
The extrinsics of the camera (R and T matrices) can also be set in the
initializer or passed in to `get_full_projection_transform` to get
the full transformation from world -> ndc.
The `transform_points` method calculates the full world -> ndc transform
and then applies it to the input points.
The transforms can also be returned separately as Transform3d objects.
* Setting the Aspect Ratio for Non Square Images *
If the desired output image size is non square (i.e. a tuple of (H, W) where H != W)
the aspect ratio needs special consideration: There are two aspect ratios
to be aware of:
- the aspect ratio of each pixel
- the aspect ratio of the output image
The `aspect_ratio` setting in the FoVPerspectiveCameras sets the
pixel aspect ratio. When using this camera with the differentiable rasterizer
be aware that in the rasterizer we assume square pixels, but allow
variable image aspect ratio (i.e rectangle images).
In most cases you will want to set the camera `aspect_ratio=1.0`
(i.e. square pixels) and only vary the output image dimensions in pixels
for rasterization.
"""
def __init__(
self,
znear=1.0,
zfar=100.0,
aspect_ratio=1.0,
fov=60.0,
degrees: bool = True,
R: torch.Tensor = _R,
T: torch.Tensor = _T,
K: Optional[torch.Tensor] = None,
device: Device = "cpu",
) -> None:
"""
Args:
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
aspect_ratio: aspect ratio of the image pixels.
1.0 indicates square pixels.
fov: field of view angle of the camera.
degrees: bool, set to True if fov is specified in degrees.
R: Rotation matrix of shape (N, 3, 3)
T: Translation matrix of shape (N, 3)
K: (optional) A calibration matrix of shape (N, 4, 4)
If provided, don't need znear, zfar, fov, aspect_ratio, degrees
device: Device (as str or torch.device)
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
super().__init__(
device=device,
znear=znear,
zfar=zfar,
aspect_ratio=aspect_ratio,
fov=fov,
R=R,
T=T,
K=K,
)
# No need to convert to tensor or broadcast.
self.degrees = degrees
def compute_projection_matrix(
self, znear, zfar, fov, aspect_ratio, degrees: bool
) -> torch.Tensor:
"""
Compute the calibration matrix K of shape (N, 4, 4)
Args:
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
fov: field of view angle of the camera.
aspect_ratio: aspect ratio of the image pixels.
1.0 indicates square pixels.
degrees: bool, set to True if fov is specified in degrees.
Returns:
torch.FloatTensor of the calibration matrix with shape (N, 4, 4)
"""
K = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
if degrees:
fov = (np.pi / 180) * fov
if not torch.is_tensor(fov):
fov = torch.tensor(fov, device=self.device)
tanHalfFov = torch.tan((fov / 2))
max_y = tanHalfFov * znear
min_y = -max_y
max_x = max_y * aspect_ratio
min_x = -max_x
# NOTE: In OpenGL the projection matrix changes the handedness of the
# coordinate frame. i.e the NDC space positive z direction is the
# camera space negative z direction. This is because the sign of the z
# in the projection matrix is set to -1.0.
# In pytorch3d we maintain a right handed coordinate system throughout
# so the so the z sign is 1.0.
z_sign = 1.0
K[:, 0, 0] = 2.0 * znear / (max_x - min_x)
K[:, 1, 1] = 2.0 * znear / (max_y - min_y)
K[:, 0, 2] = (max_x + min_x) / (max_x - min_x)
K[:, 1, 2] = (max_y + min_y) / (max_y - min_y)
K[:, 3, 2] = z_sign * ones
# NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point
# is at the near clipping plane and z = 1 when the point is at the far
# clipping plane.
K[:, 2, 2] = z_sign * zfar / (zfar - znear)
K[:, 2, 3] = -(zfar * znear) / (zfar - znear)
return K
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the perspective projection matrix with a symmetric
viewing frustrum. Use column major order.
The viewing frustrum will be projected into ndc, s.t.
(max_x, max_y) -> (+1, +1)
(min_x, min_y) -> (-1, -1)
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in `__init__`.
Return:
a Transform3d object which represents a batch of projection
matrices of shape (N, 4, 4)
.. code-block:: python
h1 = (max_y + min_y)/(max_y - min_y)
w1 = (max_x + min_x)/(max_x - min_x)
tanhalffov = tan((fov/2))
s1 = 1/tanhalffov
s2 = 1/(tanhalffov * (aspect_ratio))
# To map z to the range [0, 1] use:
f1 = far / (far - near)
f2 = -(far * near) / (far - near)
# Projection matrix
K = [
[s1, 0, w1, 0],
[0, s2, h1, 0],
[0, 0, f1, f2],
[0, 0, 1, 0],
]
"""
K = kwargs.get("K", self.K)
if K is not None:
if K.shape != (self._N, 4, 4):
msg = "Expected K to have shape of (%r, 4, 4)"
raise ValueError(msg % (self._N))
else:
K = self.compute_projection_matrix(
kwargs.get("znear", self.znear),
kwargs.get("zfar", self.zfar),
kwargs.get("fov", self.fov),
kwargs.get("aspect_ratio", self.aspect_ratio),
kwargs.get("degrees", self.degrees),
)
# Transpose the projection matrix as PyTorch3D transforms use row vectors.
transform = Transform3d(
matrix=K.transpose(1, 2).contiguous(), device=self.device
)
return transform
def unproject_points(
self,
xy_depth: torch.Tensor,
world_coordinates: bool = True,
scaled_depth_input: bool = False,
**kwargs
) -> torch.Tensor:
""">!
FoV cameras further allow for passing depth in world units
(`scaled_depth_input=False`) or in the [0, 1]-normalized units
(`scaled_depth_input=True`)
Args:
scaled_depth_input: If `True`, assumes the input depth is in
the [0, 1]-normalized units. If `False` the input depth is in
the world units.
"""
# obtain the relevant transformation to ndc
if world_coordinates:
to_ndc_transform = self.get_full_projection_transform()
else:
to_ndc_transform = self.get_projection_transform()
if scaled_depth_input:
# the input is scaled depth, so we don't have to do anything
xy_sdepth = xy_depth
else:
# parse out important values from the projection matrix
K_matrix = self.get_projection_transform(**kwargs.copy()).get_matrix()
# parse out f1, f2 from K_matrix
unsqueeze_shape = [1] * xy_depth.dim()
unsqueeze_shape[0] = K_matrix.shape[0]
f1 = K_matrix[:, 2, 2].reshape(unsqueeze_shape)
f2 = K_matrix[:, 3, 2].reshape(unsqueeze_shape)
# get the scaled depth
sdepth = (f1 * xy_depth[..., 2:3] + f2) / xy_depth[..., 2:3]
# concatenate xy + scaled depth
xy_sdepth = torch.cat((xy_depth[..., 0:2], sdepth), dim=-1)
# unproject with inverse of the projection
unprojection_transform = to_ndc_transform.inverse()
return unprojection_transform.transform_points(xy_sdepth)
def is_perspective(self):
return True
def in_ndc(self):
return True
def OpenGLOrthographicCameras(
znear=1.0,
zfar=100.0,
top=1.0,
bottom=-1.0,
left=-1.0,
right=1.0,
scale_xyz=((1.0, 1.0, 1.0),), # (1, 3)
R: torch.Tensor = _R,
T: torch.Tensor = _T,
device: Device = "cpu",
) -> "FoVOrthographicCameras":
"""
OpenGLOrthographicCameras has been DEPRECATED. Use FoVOrthographicCameras instead.
Preserving OpenGLOrthographicCameras for backward compatibility.
"""
warnings.warn(
"""OpenGLOrthographicCameras is deprecated,
Use FoVOrthographicCameras instead.
OpenGLOrthographicCameras will be removed in future releases.""",
PendingDeprecationWarning,
)
return FoVOrthographicCameras(
znear=znear,
zfar=zfar,
max_y=top,
min_y=bottom,
max_x=right,
min_x=left,
scale_xyz=scale_xyz,
R=R,
T=T,
device=device,
)
class FoVOrthographicCameras(CamerasBase):
"""
A class which stores a batch of parameters to generate a batch of
projection matrices by specifying the field of view.
The definition of the parameters follow the OpenGL orthographic camera.
"""
def __init__(
self,
znear=1.0,
zfar=100.0,
max_y=1.0,
min_y=-1.0,
max_x=1.0,
min_x=-1.0,
scale_xyz=((1.0, 1.0, 1.0),), # (1, 3)
R: torch.Tensor = _R,
T: torch.Tensor = _T,
K: Optional[torch.Tensor] = None,
device: Device = "cpu",
):
"""
Args:
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
max_y: maximum y coordinate of the frustrum.
min_y: minimum y coordinate of the frustrum.
max_x: maximum x coordinate of the frustrum.
min_x: minimum x coordinate of the frustrum
scale_xyz: scale factors for each axis of shape (N, 3).
R: Rotation matrix of shape (N, 3, 3).
T: Translation of shape (N, 3).
K: (optional) A calibration matrix of shape (N, 4, 4)
If provided, don't need znear, zfar, max_y, min_y, max_x, min_x, scale_xyz
device: torch.device or string.
Only need to set min_x, max_x, min_y, max_y for viewing frustrums
which are non symmetric about the origin.
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
super().__init__(
device=device,
znear=znear,
zfar=zfar,
max_y=max_y,
min_y=min_y,
max_x=max_x,
min_x=min_x,
scale_xyz=scale_xyz,
R=R,
T=T,
K=K,
)
def compute_projection_matrix(
self, znear, zfar, max_x, min_x, max_y, min_y, scale_xyz
) -> torch.Tensor:
"""
Compute the calibration matrix K of shape (N, 4, 4)
Args:
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
max_x: maximum x coordinate of the frustrum.
min_x: minimum x coordinate of the frustrum
max_y: maximum y coordinate of the frustrum.
min_y: minimum y coordinate of the frustrum.
scale_xyz: scale factors for each axis of shape (N, 3).
"""
K = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
# NOTE: OpenGL flips handedness of coordinate system between camera
# space and NDC space so z sign is -ve. In PyTorch3D we maintain a
# right handed coordinate system throughout.
z_sign = +1.0
K[:, 0, 0] = (2.0 / (max_x - min_x)) * scale_xyz[:, 0]
K[:, 1, 1] = (2.0 / (max_y - min_y)) * scale_xyz[:, 1]
K[:, 0, 3] = -(max_x + min_x) / (max_x - min_x)
K[:, 1, 3] = -(max_y + min_y) / (max_y - min_y)
K[:, 3, 3] = ones
# NOTE: This maps the z coordinate to the range [0, 1] and replaces the
# the OpenGL z normalization to [-1, 1]
K[:, 2, 2] = z_sign * (1.0 / (zfar - znear)) * scale_xyz[:, 2]
K[:, 2, 3] = -znear / (zfar - znear)
return K
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the orthographic projection matrix.
Use column major order.
Args:
**kwargs: parameters for the projection can be passed in to
override the default values set in __init__.
Return:
a Transform3d object which represents a batch of projection
matrices of shape (N, 4, 4)
.. code-block:: python
scale_x = 2 / (max_x - min_x)
scale_y = 2 / (max_y - min_y)
scale_z = 2 / (far-near)
mid_x = (max_x + min_x) / (max_x - min_x)
mix_y = (max_y + min_y) / (max_y - min_y)
mid_z = (far + near) / (far - near)
K = [
[scale_x, 0, 0, -mid_x],
[0, scale_y, 0, -mix_y],
[0, 0, -scale_z, -mid_z],
[0, 0, 0, 1],
]
"""
K = kwargs.get("K", self.K)
if K is not None:
if K.shape != (self._N, 4, 4):
msg = "Expected K to have shape of (%r, 4, 4)"
raise ValueError(msg % (self._N))
else:
K = self.compute_projection_matrix(
kwargs.get("znear", self.znear),
kwargs.get("zfar", self.zfar),
kwargs.get("max_x", self.max_x),
kwargs.get("min_x", self.min_x),
kwargs.get("max_y", self.max_y),
kwargs.get("min_y", self.min_y),
kwargs.get("scale_xyz", self.scale_xyz),
)
transform = Transform3d(
matrix=K.transpose(1, 2).contiguous(), device=self.device
)
return transform
def unproject_points(
self,
xy_depth: torch.Tensor,
world_coordinates: bool = True,
scaled_depth_input: bool = False,
**kwargs
) -> torch.Tensor:
""">!
FoV cameras further allow for passing depth in world units
(`scaled_depth_input=False`) or in the [0, 1]-normalized units
(`scaled_depth_input=True`)
Args:
scaled_depth_input: If `True`, assumes the input depth is in
the [0, 1]-normalized units. If `False` the input depth is in
the world units.
"""
if world_coordinates:
to_ndc_transform = self.get_full_projection_transform(**kwargs.copy())
else:
to_ndc_transform = self.get_projection_transform(**kwargs.copy())
if scaled_depth_input:
# the input depth is already scaled
xy_sdepth = xy_depth
else:
# we have to obtain the scaled depth first
K = self.get_projection_transform(**kwargs).get_matrix()
unsqueeze_shape = [1] * K.dim()
unsqueeze_shape[0] = K.shape[0]
mid_z = K[:, 3, 2].reshape(unsqueeze_shape)
scale_z = K[:, 2, 2].reshape(unsqueeze_shape)
scaled_depth = scale_z * xy_depth[..., 2:3] + mid_z
# cat xy and scaled depth
xy_sdepth = torch.cat((xy_depth[..., :2], scaled_depth), dim=-1)
# finally invert the transform
unprojection_transform = to_ndc_transform.inverse()
return unprojection_transform.transform_points(xy_sdepth)
def is_perspective(self):
return False
def in_ndc(self):
return True
############################################################
# MultiView Camera Classes #
############################################################
"""
Note that the MultiView Cameras accept parameters in NDC space.
"""
def SfMPerspectiveCameras(
focal_length=1.0,
principal_point=((0.0, 0.0),),
R: torch.Tensor = _R,
T: torch.Tensor = _T,
device: Device = "cpu",
) -> "PerspectiveCameras":
"""
SfMPerspectiveCameras has been DEPRECATED. Use PerspectiveCameras instead.
Preserving SfMPerspectiveCameras for backward compatibility.
"""
warnings.warn(
"""SfMPerspectiveCameras is deprecated,
Use PerspectiveCameras instead.
SfMPerspectiveCameras will be removed in future releases.""",
PendingDeprecationWarning,
)
return PerspectiveCameras(
focal_length=focal_length,
principal_point=principal_point,
R=R,
T=T,
device=device,
)
class PerspectiveCameras(CamerasBase):
"""
A class which stores a batch of parameters to generate a batch of
transformation matrices using the multi-view geometry convention for
perspective camera.
Parameters for this camera are specified in NDC if `in_ndc` is set to True.
If parameters are specified in screen space, `in_ndc` must be set to False.
"""
def __init__(
self,
focal_length=1.0,
principal_point=((0.0, 0.0),),
R: torch.Tensor = _R,
T: torch.Tensor = _T,
K: Optional[torch.Tensor] = None,
device: Device = "cpu",
in_ndc: bool = True,
image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,
) -> None:
"""
Args:
focal_length: Focal length of the camera in world units.
A tensor of shape (N, 1) or (N, 2) for
square and non-square pixels respectively.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
A tensor of shape (N, 2).
in_ndc: True if camera parameters are specified in NDC.
If camera parameters are in screen space, it must
be set to False.
R: Rotation matrix of shape (N, 3, 3)
T: Translation matrix of shape (N, 3)
K: (optional) A calibration matrix of shape (N, 4, 4)
If provided, don't need focal_length, principal_point
image_size: (height, width) of image size.
A tensor of shape (N, 2) or a list/tuple. Required for screen cameras.
device: torch.device or string
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
kwargs = {"image_size": image_size} if image_size is not None else {}
super().__init__(
device=device,
focal_length=focal_length,
principal_point=principal_point,
R=R,
T=T,
K=K,
_in_ndc=in_ndc,
**kwargs, # pyre-ignore
)
if image_size is not None:
if (self.image_size < 1).any(): # pyre-ignore
raise ValueError("Image_size provided has invalid values")
else:
self.image_size = None
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the projection matrix using the
multi-view geometry convention.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in __init__.
Returns:
A `Transform3d` object with a batch of `N` projection transforms.
.. code-block:: python
fx = focal_length[:, 0]
fy = focal_length[:, 1]
px = principal_point[:, 0]
py = principal_point[:, 1]
K = [
[fx, 0, px, 0],
[0, fy, py, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
"""
K = kwargs.get("K", self.K)
if K is not None:
if K.shape != (self._N, 4, 4):
msg = "Expected K to have shape of (%r, 4, 4)"
raise ValueError(msg % (self._N))
else:
K = _get_sfm_calibration_matrix(
self._N,
self.device,
kwargs.get("focal_length", self.focal_length),
kwargs.get("principal_point", self.principal_point),
orthographic=False,
)
transform = Transform3d(
matrix=K.transpose(1, 2).contiguous(), device=self.device
)
return transform
def unproject_points(
self, xy_depth: torch.Tensor, world_coordinates: bool = True, **kwargs
) -> torch.Tensor:
if world_coordinates:
to_camera_transform = self.get_full_projection_transform(**kwargs)
else:
to_camera_transform = self.get_projection_transform(**kwargs)
unprojection_transform = to_camera_transform.inverse()
xy_inv_depth = torch.cat(
(xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]), dim=-1 # type: ignore
)
return unprojection_transform.transform_points(xy_inv_depth)
def get_principal_point(self, **kwargs) -> torch.Tensor:
"""
Return the camera's principal point
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
"""
proj_mat = self.get_projection_transform(**kwargs).get_matrix()
return proj_mat[:, 2, :2]
def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
"""
Returns the transform from camera projection space (screen or NDC) to NDC space.
If the camera is defined already in NDC space, the transform is identity.
For cameras defined in screen space, we adjust the principal point computation
which is defined in the image space (commonly) and scale the points to NDC space.
Important: This transforms assumes PyTorch3D conventions for the input points,
i.e. +X left, +Y up.
"""
if self.in_ndc():
ndc_transform = Transform3d(device=self.device, dtype=torch.float32)
else:
# when cameras are defined in screen/image space, the principal point is
# provided in the (+X right, +Y down), aka image, coordinate system.
# Since input points are defined in the PyTorch3D system (+X left, +Y up),
# we need to adjust for the principal point transform.
pr_point_fix = torch.zeros(
(self._N, 4, 4), device=self.device, dtype=torch.float32
)
pr_point_fix[:, 0, 0] = 1.0
pr_point_fix[:, 1, 1] = 1.0
pr_point_fix[:, 2, 2] = 1.0
pr_point_fix[:, 3, 3] = 1.0
pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs)
pr_point_fix_transform = Transform3d(
matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device
)
image_size = kwargs.get("image_size", self.get_image_size())
screen_to_ndc_transform = get_screen_to_ndc_transform(
self, with_xyflip=False, image_size=image_size
)
ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform)
return ndc_transform
def is_perspective(self):
return True
def in_ndc(self):
return self._in_ndc
def SfMOrthographicCameras(
focal_length=1.0,
principal_point=((0.0, 0.0),),
R: torch.Tensor = _R,
T: torch.Tensor = _T,
device: Device = "cpu",
) -> "OrthographicCameras":
"""
SfMOrthographicCameras has been DEPRECATED. Use OrthographicCameras instead.
Preserving SfMOrthographicCameras for backward compatibility.
"""
warnings.warn(
"""SfMOrthographicCameras is deprecated,
Use OrthographicCameras instead.
SfMOrthographicCameras will be removed in future releases.""",
PendingDeprecationWarning,
)
return OrthographicCameras(
focal_length=focal_length,
principal_point=principal_point,
R=R,
T=T,
device=device,
)
class OrthographicCameras(CamerasBase):
"""
A class which stores a batch of parameters to generate a batch of
transformation matrices using the multi-view geometry convention for
orthographic camera.
Parameters for this camera are specified in NDC if `in_ndc` is set to True.
If parameters are specified in screen space, `in_ndc` must be set to False.
"""
def __init__(
self,
focal_length=1.0,
principal_point=((0.0, 0.0),),
R: torch.Tensor = _R,
T: torch.Tensor = _T,
K: Optional[torch.Tensor] = None,
device: Device = "cpu",
in_ndc: bool = True,
image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,
) -> None:
"""
Args:
focal_length: Focal length of the camera in world units.
A tensor of shape (N, 1) or (N, 2) for
square and non-square pixels respectively.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
A tensor of shape (N, 2).
in_ndc: True if camera parameters are specified in NDC.
If False, then camera parameters are in screen space.
R: Rotation matrix of shape (N, 3, 3)
T: Translation matrix of shape (N, 3)
K: (optional) A calibration matrix of shape (N, 4, 4)
If provided, don't need focal_length, principal_point, image_size
image_size: (height, width) of image size.
A tensor of shape (N, 2) or list/tuple. Required for screen cameras.
device: torch.device or string
"""
# The initializer formats all inputs to torch tensors and broadcasts
# all the inputs to have the same batch dimension where necessary.
kwargs = {"image_size": image_size} if image_size is not None else {}
super().__init__(
device=device,
focal_length=focal_length,
principal_point=principal_point,
R=R,
T=T,
K=K,
_in_ndc=in_ndc,
**kwargs, # pyre-ignore
)
if image_size is not None:
if (self.image_size < 1).any(): # pyre-ignore
raise ValueError("Image_size provided has invalid values")
else:
self.image_size = None
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
Calculate the projection matrix using
the multi-view geometry convention.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values set in __init__.
Returns:
A `Transform3d` object with a batch of `N` projection transforms.
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
px = principal_point[:,0]
py = principal_point[:,1]
K = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
"""
K = kwargs.get("K", self.K)
if K is not None:
if K.shape != (self._N, 4, 4):
msg = "Expected K to have shape of (%r, 4, 4)"
raise ValueError(msg % (self._N))
else:
K = _get_sfm_calibration_matrix(
self._N,
self.device,
kwargs.get("focal_length", self.focal_length),
kwargs.get("principal_point", self.principal_point),
orthographic=True,
)
transform = Transform3d(
matrix=K.transpose(1, 2).contiguous(), device=self.device
)
return transform
def unproject_points(
self, xy_depth: torch.Tensor, world_coordinates: bool = True, **kwargs
) -> torch.Tensor:
if world_coordinates:
to_camera_transform = self.get_full_projection_transform(**kwargs)
else:
to_camera_transform = self.get_projection_transform(**kwargs)
unprojection_transform = to_camera_transform.inverse()
return unprojection_transform.transform_points(xy_depth)
def get_principal_point(self, **kwargs) -> torch.Tensor:
"""
Return the camera's principal point
Args:
**kwargs: parameters for the camera extrinsics can be passed in
as keyword arguments to override the default values
set in __init__.
"""
proj_mat = self.get_projection_transform(**kwargs).get_matrix()
return proj_mat[:, 3, :2]
def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
"""
Returns the transform from camera projection space (screen or NDC) to NDC space.
If the camera is defined already in NDC space, the transform is identity.
For cameras defined in screen space, we adjust the principal point computation
which is defined in the image space (commonly) and scale the points to NDC space.
Important: This transforms assumes PyTorch3D conventions for the input points,
i.e. +X left, +Y up.
"""
if self.in_ndc():
ndc_transform = Transform3d(device=self.device, dtype=torch.float32)
else:
# when cameras are defined in screen/image space, the principal point is
# provided in the (+X right, +Y down), aka image, coordinate system.
# Since input points are defined in the PyTorch3D system (+X left, +Y up),
# we need to adjust for the principal point transform.
pr_point_fix = torch.zeros(
(self._N, 4, 4), device=self.device, dtype=torch.float32
)
pr_point_fix[:, 0, 0] = 1.0
pr_point_fix[:, 1, 1] = 1.0
pr_point_fix[:, 2, 2] = 1.0
pr_point_fix[:, 3, 3] = 1.0
pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs)
pr_point_fix_transform = Transform3d(
matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device
)
image_size = kwargs.get("image_size", self.get_image_size())
screen_to_ndc_transform = get_screen_to_ndc_transform(
self, with_xyflip=False, image_size=image_size
)
ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform)
return ndc_transform
def is_perspective(self):
return False
def in_ndc(self):
return self._in_ndc
################################################
# Helper functions for cameras #
################################################
def _get_sfm_calibration_matrix(
N: int,
device: Device,
focal_length,
principal_point,
orthographic: bool = False,
) -> torch.Tensor:
"""
Returns a calibration matrix of a perspective/orthographic camera.
Args:
N: Number of cameras.
focal_length: Focal length of the camera.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
orthographic: Boolean specifying if the camera is orthographic or not
The calibration matrix `K` is set up as follows:
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
px = principal_point[:,0]
py = principal_point[:,1]
for orthographic==True:
K = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
else:
K = [
[fx, 0, px, 0],
[0, fy, py, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
Returns:
A calibration matrix `K` of the SfM-conventioned camera
of shape (N, 4, 4).
"""
if not torch.is_tensor(focal_length):
focal_length = torch.tensor(focal_length, device=device)
if focal_length.ndim in (0, 1) or focal_length.shape[1] == 1:
fx = fy = focal_length
else:
fx, fy = focal_length.unbind(1)
if not torch.is_tensor(principal_point):
principal_point = torch.tensor(principal_point, device=device)
px, py = principal_point.unbind(1)
K = fx.new_zeros(N, 4, 4)
K[:, 0, 0] = fx
K[:, 1, 1] = fy
if orthographic:
K[:, 0, 3] = px
K[:, 1, 3] = py
K[:, 2, 2] = 1.0
K[:, 3, 3] = 1.0
else:
K[:, 0, 2] = px
K[:, 1, 2] = py
K[:, 3, 2] = 1.0
K[:, 2, 3] = 1.0
return K
################################################
# Helper functions for world to view transforms
################################################
def get_world_to_view_transform(
R: torch.Tensor = _R, T: torch.Tensor = _T
) -> Transform3d:
"""
This function returns a Transform3d representing the transformation
matrix to go from world space to view space by applying a rotation and
a translation.
PyTorch3D uses the same convention as Hartley & Zisserman.
I.e., for camera extrinsic parameters R (rotation) and T (translation),
we map a 3D point `X_world` in world coordinates to
a point `X_cam` in camera coordinates with:
`X_cam = X_world R + T`
Args:
R: (N, 3, 3) matrix representing the rotation.
T: (N, 3) matrix representing the translation.
Returns:
a Transform3d object which represents the composed RT transformation.
"""
# TODO: also support the case where RT is specified as one matrix
# of shape (N, 4, 4).
if T.shape[0] != R.shape[0]:
msg = "Expected R, T to have the same batch dimension; got %r, %r"
raise ValueError(msg % (R.shape[0], T.shape[0]))
if T.dim() != 2 or T.shape[1:] != (3,):
msg = "Expected T to have shape (N, 3); got %r"
raise ValueError(msg % repr(T.shape))
if R.dim() != 3 or R.shape[1:] != (3, 3):
msg = "Expected R to have shape (N, 3, 3); got %r"
raise ValueError(msg % repr(R.shape))
# Create a Transform3d object
T_ = Translate(T, device=T.device)
R_ = Rotate(R, device=R.device)
return R_.compose(T_)
def camera_position_from_spherical_angles(
distance: float,
elevation: float,
azimuth: float,
degrees: bool = True,
device: Device = "cpu",
) -> torch.Tensor:
"""
Calculate the location of the camera based on the distance away from
the target point, the elevation and azimuth angles.
Args:
distance: distance of the camera from the object.
elevation, azimuth: angles.
The inputs distance, elevation and azimuth can be one of the following
- Python scalar
- Torch scalar
- Torch tensor of shape (N) or (1)
degrees: bool, whether the angles are specified in degrees or radians.
device: str or torch.device, device for new tensors to be placed on.
The vectors are broadcast against each other so they all have shape (N, 1).
Returns:
camera_position: (N, 3) xyz location of the camera.
"""
broadcasted_args = convert_to_tensors_and_broadcast(
distance, elevation, azimuth, device=device
)
dist, elev, azim = broadcasted_args
if degrees:
elev = math.pi / 180.0 * elev
azim = math.pi / 180.0 * azim
x = dist * torch.cos(elev) * torch.sin(azim)
y = dist * torch.sin(elev)
z = dist * torch.cos(elev) * torch.cos(azim)
camera_position = torch.stack([x, y, z], dim=1)
if camera_position.dim() == 0:
camera_position = camera_position.view(1, -1) # add batch dim.
return camera_position.view(-1, 3)
def look_at_rotation(
camera_position, at=((0, 0, 0),), up=((0, 1, 0),), device: Device = "cpu"
) -> torch.Tensor:
"""
This function takes a vector 'camera_position' which specifies the location
of the camera in world coordinates and two vectors `at` and `up` which
indicate the position of the object and the up directions of the world
coordinate system respectively. The object is assumed to be centered at
the origin.
The output is a rotation matrix representing the transformation
from world coordinates -> view coordinates.
Args:
camera_position: position of the camera in world coordinates
at: position of the object in world coordinates
up: vector specifying the up direction in the world coordinate frame.
The inputs camera_position, at and up can each be a
- 3 element tuple/list
- torch tensor of shape (1, 3)
- torch tensor of shape (N, 3)
The vectors are broadcast against each other so they all have shape (N, 3).
Returns:
R: (N, 3, 3) batched rotation matrices
"""
# Format input and broadcast
broadcasted_args = convert_to_tensors_and_broadcast(
camera_position, at, up, device=device
)
camera_position, at, up = broadcasted_args
for t, n in zip([camera_position, at, up], ["camera_position", "at", "up"]):
if t.shape[-1] != 3:
msg = "Expected arg %s to have shape (N, 3); got %r"
raise ValueError(msg % (n, t.shape))
z_axis = F.normalize(at - camera_position, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis, dim=1), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5)
is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all(
dim=1, keepdim=True
)
if is_close.any():
replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5)
x_axis = torch.where(is_close, replacement, x_axis)
R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
return R.transpose(1, 2)
def look_at_view_transform(
dist=1.0,
elev=0.0,
azim=0.0,
degrees: bool = True,
eye: Optional[Sequence] = None,
at=((0, 0, 0),), # (1, 3)
up=((0, 1, 0),), # (1, 3)
device: Device = "cpu",
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
This function returns a rotation and translation matrix
to apply the 'Look At' transformation from world -> view coordinates [0].
Args:
dist: distance of the camera from the object
elev: angle in degrees or radians. This is the angle between the
vector from the object to the camera, and the horizontal plane y = 0 (xz-plane).
azim: angle in degrees or radians. The vector from the object to
the camera is projected onto a horizontal plane y = 0.
azim is the angle between the projected vector and a
reference vector at (0, 0, 1) on the reference plane (the horizontal plane).
dist, elev and azim can be of shape (1), (N).
degrees: boolean flag to indicate if the elevation and azimuth
angles are specified in degrees or radians.
eye: the position of the camera(s) in world coordinates. If eye is not
None, it will override the camera position derived from dist, elev, azim.
up: the direction of the x axis in the world coordinate system.
at: the position of the object(s) in world coordinates.
eye, up and at can be of shape (1, 3) or (N, 3).
Returns:
2-element tuple containing
- **R**: the rotation to apply to the points to align with the camera.
- **T**: the translation to apply to the points to align with the camera.
References:
[0] https://www.scratchapixel.com
"""
if eye is not None:
broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device)
eye, at, up = broadcasted_args
C = eye
else:
broadcasted_args = convert_to_tensors_and_broadcast(
dist, elev, azim, at, up, device=device
)
dist, elev, azim, at, up = broadcasted_args
C = (
camera_position_from_spherical_angles(
dist, elev, azim, degrees=degrees, device=device
)
+ at
)
R = look_at_rotation(C, at, up, device=device)
T = -torch.bmm(R.transpose(1, 2), C[:, :, None])[:, :, 0]
return R, T
def get_ndc_to_screen_transform(
cameras,
with_xyflip: bool = False,
image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,
) -> Transform3d:
"""
PyTorch3D NDC to screen conversion.
Conversion from PyTorch3D's NDC space (+X left, +Y up) to screen/image space
(+X right, +Y down, origin top left).
Args:
cameras
with_xyflip: flips x- and y-axis if set to True.
Optional kwargs:
image_size: ((height, width),) specifying the height, width
of the image. If not provided, it reads it from cameras.
We represent the NDC to screen conversion as a Transform3d
with projection matrix
K = [
[s, 0, 0, cx],
[0, s, 0, cy],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
"""
# We require the image size, which is necessary for the transform
if image_size is None:
msg = "For NDC to screen conversion, image_size=(height, width) needs to be specified."
raise ValueError(msg)
K = torch.zeros((cameras._N, 4, 4), device=cameras.device, dtype=torch.float32)
if not torch.is_tensor(image_size):
image_size = torch.tensor(image_size, device=cameras.device)
image_size = image_size.view(-1, 2) # of shape (1 or B)x2
height, width = image_size.unbind(1)
# For non square images, we scale the points such that smallest side
# has range [-1, 1] and the largest side has range [-u, u], with u > 1.
# This convention is consistent with the PyTorch3D renderer
scale = (image_size.min(dim=1).values - 1.0) / 2.0
K[:, 0, 0] = scale
K[:, 1, 1] = scale
K[:, 0, 3] = -1.0 * (width - 1.0) / 2.0
K[:, 1, 3] = -1.0 * (height - 1.0) / 2.0
K[:, 2, 2] = 1.0
K[:, 3, 3] = 1.0
# Transpose the projection matrix as PyTorch3D transforms use row vectors.
transform = Transform3d(
matrix=K.transpose(1, 2).contiguous(), device=cameras.device
)
if with_xyflip:
# flip x, y axis
xyflip = torch.eye(4, device=cameras.device, dtype=torch.float32)
xyflip[0, 0] = -1.0
xyflip[1, 1] = -1.0
xyflip = xyflip.view(1, 4, 4).expand(cameras._N, -1, -1)
xyflip_transform = Transform3d(
matrix=xyflip.transpose(1, 2).contiguous(), device=cameras.device
)
transform = transform.compose(xyflip_transform)
return transform
def get_screen_to_ndc_transform(
cameras,
with_xyflip: bool = False,
image_size: Optional[Union[List, Tuple, torch.Tensor]] = None,
) -> Transform3d:
"""
Screen to PyTorch3D NDC conversion.
Conversion from screen/image space (+X right, +Y down, origin top left)
to PyTorch3D's NDC space (+X left, +Y up).
Args:
cameras
with_xyflip: flips x- and y-axis if set to True.
Optional kwargs:
image_size: ((height, width),) specifying the height, width
of the image. If not provided, it reads it from cameras.
We represent the screen to NDC conversion as a Transform3d
with projection matrix
K = [
[1/s, 0, 0, cx/s],
[ 0, 1/s, 0, cy/s],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1],
]
"""
transform = get_ndc_to_screen_transform(
cameras,
with_xyflip=with_xyflip,
image_size=image_size,
).inverse()
return transform
| 37.792853 | 103 | 0.592859 |
cd7a545035a8a2632537032d9128821167d55f23
| 665 |
py
|
Python
|
internos/activityinfo/migrations/0106_auto_20200206_1444.py
|
UNICEFLebanonInnovation/Staging-Neuro
|
aac1e4f335ff4ec32041f989a9c22f8581a4961a
|
[
"MIT"
] | null | null | null |
internos/activityinfo/migrations/0106_auto_20200206_1444.py
|
UNICEFLebanonInnovation/Staging-Neuro
|
aac1e4f335ff4ec32041f989a9c22f8581a4961a
|
[
"MIT"
] | null | null | null |
internos/activityinfo/migrations/0106_auto_20200206_1444.py
|
UNICEFLebanonInnovation/Staging-Neuro
|
aac1e4f335ff4ec32041f989a9c22f8581a4961a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-02-06 12:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activityinfo', '0105_auto_20200206_1435'),
]
operations = [
migrations.AlterField(
model_name='activity',
name='ai_category_id',
field=models.CharField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='activity',
name='location_type',
field=models.CharField(max_length=254, null=True),
),
]
| 25.576923 | 74 | 0.613534 |
492aff2a89642db7db9e303a7ae1268e65bc30a5
| 4,913 |
py
|
Python
|
year2021/python/day4.py
|
Infekma/Infekma-Advent-Of-Code
|
4078d076f250b22c1dc3271a5fd74d31b6fb8b77
|
[
"MIT"
] | null | null | null |
year2021/python/day4.py
|
Infekma/Infekma-Advent-Of-Code
|
4078d076f250b22c1dc3271a5fd74d31b6fb8b77
|
[
"MIT"
] | null | null | null |
year2021/python/day4.py
|
Infekma/Infekma-Advent-Of-Code
|
4078d076f250b22c1dc3271a5fd74d31b6fb8b77
|
[
"MIT"
] | null | null | null |
import aocd
import csv
from numpy import matrixlib
YEAR = 2021
DAY = 4
# simple class representation for single value on the bingo board
class BingoEntry:
def __init__(self, value):
self.value = int(value)
self.isMarked = False
# checks if this entry matches the provided value
# if the entry matches the value it will be marked
def check(self, value):
if self.isMarked:
return
self.isMarked = True if self.value == value else False
return self.isMarked
class BingoBoard:
def __init__(self, rows):
self.board = []
# populate board with rows of bingo entry
for row in rows:
self.board.append([BingoEntry(val) for val in row.split()])
def check(self, value):
for row in self.board:
for entry in row:
# the bingo board assumes that there are no duplicates
# return early if the value was found on the board
if entry.check(value):
break
def bingo(self):
return self.checkForBingoInRows() or self.checkForBingoInColumns()
def checkForBingoInRows(self):
# iterate through each row and check for bingo
for row in self.board:
if all([val.isMarked for val in row]):
return True
return False
def checkForBingoInColumns(self):
numOfColumns = len(self.board[0])
numOfRows = len(self.board)
# iterate through each column and check for bingo
for i in range(0, numOfColumns):
if all([self.board[y][i].isMarked for y in range(0, numOfRows)]):
return True
return False
def getAllUnMarkedEntries(self):
listOfUnMarkedEntries = []
for row in self.board:
for entry in row:
if not entry.isMarked:
listOfUnMarkedEntries.append(entry.value)
return listOfUnMarkedEntries
# visualises the bingo board by writing row by row
# if the board has bingo it'll stand out from non-bingo boards
# entries that are marked are also indicated
def visualise(self):
for row in self.board:
for i in range(0, len(row)):
printVal = f" {row[i].value} " if not row[i].isMarked else f"|{row[i].value}| "
print(printVal, end='')
print()
def getSequence(data):
sequence = data[0].split(',')
return [int(val) for val in sequence]
def getBingoBoards(data):
numOfBoardRows = 5 # hard coded
boardList = []
# process each board 1 at a time by processing the numOfBoardRows
# the data also has a newline between each board that needs to be skipped
# hence the additional 1
for x in range(2, len(data)-numOfBoardRows, numOfBoardRows+1):
boardRows = [data[row] for row in range(x, x+numOfBoardRows)]
boardList.append(BingoBoard(boardRows))
return boardList
def checkBingoBoardsAgainstSequence(sequence, boards):
winningBoards = []
for value in sequence:
for board in boards:
if board.bingo(): # early continue if board is already bingo
continue
# check if the current value in the sequence is in the board
board.check(value)
if board.bingo(): # is the board a bingo due to the value that was just checked?
winningBoards.append([board, value])
return winningBoards
def visualiseBingoBoards(boards):
for board in boards:
if board.bingo():
print("========= BINGO ============")
board.visualise()
print("========= BINGO ============")
print()
else:
board.visualise()
print()
def solveAnswer(a, b):
return a * b
def part_a(data) -> int:
boards = getBingoBoards(data)
results = checkBingoBoardsAgainstSequence(getSequence(data), boards)
firstResults = results[0]
unmarkedEntries = firstResults[0].getAllUnMarkedEntries()
return solveAnswer(sum(unmarkedEntries), firstResults[1])
def part_b(data) -> int:
boards = getBingoBoards(data)
results = checkBingoBoardsAgainstSequence(getSequence(data), boards)
lastResults = results[-1]
unmarkedEntries = lastResults[0].getAllUnMarkedEntries()
return solveAnswer(sum(unmarkedEntries), lastResults[1])
if __name__ == "__main__":
# session token is derived from AOC_SESSION environmental variable
dayPuzzle = aocd.models.Puzzle(year=YEAR, day=DAY)
dayData = aocd.transforms.lines(dayPuzzle.input_data)
partA = part_a(dayData)
partB = part_b(dayData)
print(f"part a: {partA}")
print(f"part b: {partB}")
submitResults = True
if submitResults:
aocd.submit(partA, part="a", day=DAY, year=YEAR)
aocd.submit(partB, part="b", day=DAY, year=YEAR)
| 32.322368 | 95 | 0.621209 |
ffaaade5da2a5be04529af54bacac0ef586742f1
| 74,901 |
py
|
Python
|
airflow/www/views.py
|
rahul342/airflow
|
2107dc97ca0b17131ad5cbda6c91301acf5a6079
|
[
"Apache-2.0"
] | null | null | null |
airflow/www/views.py
|
rahul342/airflow
|
2107dc97ca0b17131ad5cbda6c91301acf5a6079
|
[
"Apache-2.0"
] | null | null | null |
airflow/www/views.py
|
rahul342/airflow
|
2107dc97ca0b17131ad5cbda6c91301acf5a6079
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
from itertools import chain, product
from past.utils import old_div
from past.builtins import basestring
import inspect
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_
from flask import redirect, url_for, request, Markup, Response, current_app, render_template
from flask.ext.admin import BaseView, expose, AdminIndexView
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.admin.actions import action
from flask.ext.login import flash
from flask._compat import PY2
import jinja2
import markdown
import json
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import models
from airflow.settings import Session
from airflow import configuration
from airflow import utils
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
from airflow import settings
from airflow.models import State
from airflow.www.forms import DateTimeForm, TreeForm
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(os.path.expanduser(configuration.get('core', 'DAGS_FOLDER')))
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
if configuration.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
#@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
dag_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
if not dag.is_subdag:
dag_ids.append(dag.dag_id)
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=configuration.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=utils.State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.gethostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow import ascii as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.gethostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
configuration.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
log_loaded = False
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
log_loaded = True
except:
log = "*** Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
configuration.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{host}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
log += '\n' + requests.get(url).text
log_loaded = True
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
# try to load log backup from S3
s3_log_folder = configuration.get('core', 'S3_LOG_FOLDER')
if not log_loaded and s3_log_folder.startswith('s3:'):
import boto
s3 = boto.connect_s3()
s3_log_loc = os.path.join(
configuration.get('core', 'S3_LOG_FOLDER'), log_relative)
log += '*** Fetching log from S3: {}\n'.format(s3_log_loc)
log += ('*** Note: S3 logs are only available once '
'tasks have completed.\n')
bucket, key = s3_log_loc.lstrip('s3:/').split('/', 1)
s3_key = boto.s3.key.Key(s3.get_bucket(bucket), key)
if s3_key.exists():
log += '\n' + s3_key.get_contents_as_string().decode()
else:
log += '*** No log found on S3.\n'
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
try:
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
MAX_PERIODS = 1000
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
end_date = ((dag.latest_execution_date or datetime.now())
if future else execution_date)
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
if dag.schedule_interval == '@once':
dates = [start_date]
else:
dates = dag.date_range(start_date, end_date=end_date)
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids)).all()
tis_to_change = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids),
TI.state != State.SUCCESS).all()
tasks = list(product(task_ids, dates))
tis_to_create = list(
set(tasks) -
set([(ti.task_id, ti.execution_date) for ti in tis]))
tis_all_altered = list(chain(
[(ti.task_id, ti.execution_date) for ti in tis_to_change],
tis_to_create))
if len(tis_all_altered) > MAX_PERIODS:
flash("Too many tasks at once (>{0})".format(
MAX_PERIODS), 'error')
return redirect(origin)
if confirmed:
for ti in tis_to_change:
ti.state = State.SUCCESS
session.commit()
for task_id, task_execution_date in tis_to_create:
ti = TI(
task=dag.get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(tis_all_altered)))
return redirect(origin)
else:
if not tis_all_altered:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id, task_execution_date in tis_all_altered:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = configuration.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: utils.alchemy_to_dict(dr) for dr in dag_runs}
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
task_instances = {}
for ti in tis:
tid = utils.alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
task_instances.get((task.task_id, d)) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
form = TreeForm(data={'base_date': max_date, 'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = configuration.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = session.query(DR).filter_by(dag_id=dag_id).order_by('execution_date desc').all()
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=json.dumps(all_data),
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=configuration.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
ts = ti.execution_date
if dag.schedule_interval:
ts = dag.following_schedule(ts)
secs = old_div((ti.end_date - ts).total_seconds(), 60*60)
data.append([ti.execution_date.isoformat(), secs])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=json.dumps(all_data),
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=configuration.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = configuration.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks, 'alternateGridColor': '#FAFAFA'},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = (
session.query(DM)
.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.username)
.all()
)
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {
dag.dag_id: dag
for dag in dags
if (
dag.owner == current_user.username and (not dag.parent_dag)
)
}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_delete = True
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@utils.provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'pool', 'log')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@utils.provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
for count, id in enumerate(ids):
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
count += 1
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('bigquery', 'BigQuery',),
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
return not configuration.has_option('core', 'fernet_key')
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
configuration.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if configuration.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| 34.468937 | 101 | 0.536348 |
baaf58bddd668d5332b78340221fc5ae8415aada
| 984 |
py
|
Python
|
examples/classes/toys_with_info_hiding.py
|
bjucps/cps110
|
da67f832aeded810fad011a6675d049a2201b2b3
|
[
"MIT"
] | null | null | null |
examples/classes/toys_with_info_hiding.py
|
bjucps/cps110
|
da67f832aeded810fad011a6675d049a2201b2b3
|
[
"MIT"
] | null | null | null |
examples/classes/toys_with_info_hiding.py
|
bjucps/cps110
|
da67f832aeded810fad011a6675d049a2201b2b3
|
[
"MIT"
] | 5 |
2020-02-06T20:35:55.000Z
|
2022-02-10T23:01:20.000Z
|
class Toy:
def __init__(self, newdescr: str, newlovable: float):
self.__description = newdescr
self.__lovable = newlovable
def getDescription(self) -> str:
return self.__description
def getLovable(self) -> float:
return self.__lovable
def setDescription(self, newdescr: str):
if newdescr == None or newdescr == '':
raise ValueError
self.__description = newdescr
def setLovable(self, newlovable: float):
self.__lovable = newlovable
def __repr__(self) -> str:
return '{0} (love factor: {1})'.format(self.__description, self.__lovable)
class ToyChest:
def __init__(self):
self.items = []
def addToy(self, toy: Toy):
self.items.append(toy)
def __repr__(self):
return str(self.items)
chest = ToyChest()
mytoy = Toy('G.I. Joe', 1)
chest.addToy(mytoy)
print(str(chest))
descr = mytoy.getDescription()
mytoy.setDescription('')
nothing = None
| 21.866667 | 82 | 0.635163 |
1aa74ccea974596bc4368ab55748a2644d5bd00e
| 582 |
py
|
Python
|
app.py
|
Mycheny/simbert
|
833398f95cff23be5e303e561a0ea39a5b73c4c7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Mycheny/simbert
|
833398f95cff23be5e303e561a0ea39a5b73c4c7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
Mycheny/simbert
|
833398f95cff23be5e303e561a0ea39a5b73c4c7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @File app.py
# @Time 2020/12/7 17:24
# @Author wcy
# @Software: PyCharm
# @Site
import json
import os
from flask import Blueprint, jsonify, request, Flask
from simbert import gen_synonyms
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route("/gen", methods=["POST"])
def gen():
args = json.loads(request.data)
text = args.get("text", "")
n = int(args.get("n", 20))
k = int(args.get("k", 10))
return gen_synonyms(text, n, k)
if __name__ == '__main__':
app.run(port=5000)
| 21.555556 | 52 | 0.646048 |
5e7a184fbab8de60b20867a4e647091083a302ab
| 20,525 |
py
|
Python
|
salt/modules/pillar.py
|
amaclean199/salt
|
8aaac011b4616e3c9e74a1daafb4a2146a5a430f
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/pillar.py
|
amaclean199/salt
|
8aaac011b4616e3c9e74a1daafb4a2146a5a430f
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/pillar.py
|
amaclean199/salt
|
8aaac011b4616e3c9e74a1daafb4a2146a5a430f
|
[
"Apache-2.0"
] | 1 |
2019-06-10T17:42:31.000Z
|
2019-06-10T17:42:31.000Z
|
# -*- coding: utf-8 -*-
'''
Extract the pillar data for this minion
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import collections
# Import third party libs
import copy
import os
import copy
import logging
from salt.ext import six
# Import salt libs
import salt.pillar
import salt.utils.crypt
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.functools
import salt.utils.odict
import salt.utils.yaml
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import CommandExecutionError
__proxyenabled__ = ['*']
log = logging.getLogger(__name__)
def get(key,
default=KeyError,
merge=False,
merge_nested_lists=None,
delimiter=DEFAULT_TARGET_DELIM,
pillarenv=None,
saltenv=None):
'''
.. versionadded:: 0.14
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string
except __opts__['pillar_raise_on_missing'] is set to True, in which case a
KeyError will be raised.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict in pillar looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
merge : ``False``
If ``True``, the retrieved values will be merged into the passed
default. When the default and the retrieved value are both
dictionaries, the dictionaries will be recursively merged.
.. versionadded:: 2014.7.0
.. versionchanged:: 2016.3.7,2016.11.4,2017.7.0
If the default and the retrieved value are not of the same type,
then merging will be skipped and the retrieved value will be
returned. Earlier releases raised an error in these cases.
merge_nested_lists
If set to ``False``, lists nested within the retrieved pillar
dictionary will *overwrite* lists in ``default``. If set to ``True``,
nested lists will be *merged* into lists in ``default``. If unspecified
(the default), this option is inherited from the
:conf_minion:`pillar_merge_lists` minion config option.
.. note::
This option is ignored when ``merge`` is set to ``False``.
.. versionadded:: 2016.11.6
delimiter
Specify an alternate delimiter to use when traversing a nested dict.
This is useful for when the desired key contains a colon. See CLI
example below for usage.
.. versionadded:: 2014.7.0
pillarenv
If specified, this function will query the master to generate fresh
pillar data on the fly, specifically from the requested pillar
environment. Note that this can produce different pillar data than
executing this function without an environment, as its normal behavior
is just to return a value from minion's pillar data in memory (which
can be sourced from more than one pillar environment).
Using this argument will not affect the pillar data in memory. It will
however be slightly slower and use more resources on the master due to
the need for the master to generate and send the minion fresh pillar
data. This tradeoff in performance however allows for the use case
where pillar data is desired only from a single environment.
.. versionadded:: 2017.7.0
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' pillar.get pkg:apache
salt '*' pillar.get abc::def|ghi delimiter='|'
'''
if not __opts__.get('pillar_raise_on_missing'):
if default is KeyError:
default = ''
opt_merge_lists = __opts__.get('pillar_merge_lists', False) if \
merge_nested_lists is None else merge_nested_lists
pillar_dict = __pillar__ \
if all(x is None for x in (saltenv, pillarenv)) \
else items(saltenv=saltenv, pillarenv=pillarenv)
if merge:
if isinstance(default, dict):
ret = salt.utils.data.traverse_dict_and_list(
pillar_dict,
key,
{},
delimiter)
if isinstance(ret, collections.Mapping):
default = copy.deepcopy(default)
return salt.utils.dictupdate.update(
default,
ret,
merge_lists=opt_merge_lists)
else:
log.error(
'pillar.get: Default (%s) is a dict, but the returned '
'pillar value (%s) is of type \'%s\'. Merge will be '
'skipped.', default, ret, type(ret).__name__
)
elif isinstance(default, list):
ret = salt.utils.data.traverse_dict_and_list(
pillar_dict,
key,
[],
delimiter)
if isinstance(ret, list):
default = copy.deepcopy(default)
default.extend([x for x in ret if x not in default])
return default
else:
log.error(
'pillar.get: Default (%s) is a list, but the returned '
'pillar value (%s) is of type \'%s\'. Merge will be '
'skipped.', default, ret, type(ret).__name__
)
else:
log.error(
'pillar.get: Default (%s) is of type \'%s\', must be a dict '
'or list to merge. Merge will be skipped.',
default, type(default).__name__
)
ret = salt.utils.data.traverse_dict_and_list(
pillar_dict,
key,
default,
delimiter)
if ret is KeyError:
raise KeyError('Pillar key not found: {0}'.format(key))
return ret
def items(*args, **kwargs):
'''
Calls the master for a fresh pillar and generates the pillar data on the
fly
Contrast with :py:func:`raw` which returns the pillar data that is
currently loaded into the minion.
pillar
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. these pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
pillar_enc
If specified, the data passed in the ``pillar`` argument will be passed
through this renderer to decrypt it.
.. note::
This will decrypt on the minion side, so the specified renderer
must be set up on the minion for this to work. Alternatively,
pillar data can be decrypted master-side. For more information, see
the :ref:`Pillar Encryption <pillar-encryption>` documentation.
Pillar data that is decrypted master-side, is not decrypted until
the end of pillar compilation though, so minion-side decryption
will be necessary if the encrypted pillar data must be made
available in an decrypted state pillar/ext_pillar rendering.
.. versionadded:: 2017.7.0
pillarenv
Pass a specific pillar environment from which to compile pillar data.
If not specified, then the minion's :conf_minion:`pillarenv` option is
not used, and if that also is not specified then all configured pillar
environments will be merged into a single pillar dictionary and
returned.
.. versionadded:: 2016.11.2
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
CLI Example:
.. code-block:: bash
salt '*' pillar.items
'''
# Preserve backwards compatibility
if args:
return item(*args)
pillarenv = kwargs.get('pillarenv')
if pillarenv is None:
if __opts__.get('pillarenv_from_saltenv', False):
pillarenv = kwargs.get('saltenv') or __opts__['saltenv']
else:
pillarenv = __opts__['pillarenv']
pillar_override = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_override and pillar_enc:
try:
pillar_override = salt.utils.crypt.decrypt(
pillar_override,
pillar_enc,
translate_newlines=True,
opts=__opts__,
valid_rend=__opts__['decrypt_pillar_renderers'])
except Exception as exc:
raise CommandExecutionError(
'Failed to decrypt pillar override: {0}'.format(exc)
)
pillar = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
pillar_override=pillar_override,
pillarenv=pillarenv)
return pillar.compile_pillar()
# Allow pillar.data to also be used to return pillar data
data = salt.utils.functools.alias_function(items, 'data')
def _obfuscate_inner(var):
'''
Recursive obfuscation of collection types.
Leaf or unknown Python types get replaced by the type name
Known collection types trigger recursion.
In the special case of mapping types, keys are not obfuscated
'''
if isinstance(var, (dict, salt.utils.odict.OrderedDict)):
return var.__class__((key, _obfuscate_inner(val))
for key, val in six.iteritems(var))
elif isinstance(var, (list, set, tuple)):
return type(var)(_obfuscate_inner(v) for v in var)
else:
return '<{0}>'.format(var.__class__.__name__)
def obfuscate(*args):
'''
.. versionadded:: 2015.8.0
Same as :py:func:`items`, but replace pillar values with a simple type indication.
This is useful to avoid displaying sensitive information on console or
flooding the console with long output, such as certificates.
For many debug or control purposes, the stakes lie more in dispatching than in
actual values.
In case the value is itself a collection type, obfuscation occurs within the value.
For mapping types, keys are not obfuscated.
Here are some examples:
* ``'secret password'`` becomes ``'<str>'``
* ``['secret', 1]`` becomes ``['<str>', '<int>']``
* ``{'login': 'somelogin', 'pwd': 'secret'}`` becomes
``{'login': '<str>', 'pwd': '<str>'}``
CLI Examples:
.. code-block:: bash
salt '*' pillar.obfuscate
'''
return _obfuscate_inner(items(*args))
# naming chosen for consistency with grains.ls, although it breaks the short
# identifier rule.
def ls(*args):
'''
.. versionadded:: 2015.8.0
Calls the master for a fresh pillar, generates the pillar data on the
fly (same as :py:func:`items`), but only shows the available main keys.
CLI Examples:
.. code-block:: bash
salt '*' pillar.ls
'''
return list(items(*args).keys())
def item(*args, **kwargs):
'''
.. versionadded:: 0.16.2
Return one or more pillar entries from the :ref:`in-memory pillar data
<pillar-in-memory>`.
delimiter
Delimiter used to traverse nested dictionaries.
.. note::
This is different from :py:func:`pillar.get
<salt.modules.pillar.get>` in that no default value can be
specified. :py:func:`pillar.get <salt.modules.pillar.get>` should
probably still be used in most cases to retrieve nested pillar
values, as it is a bit more flexible. One reason to use this
function instead of :py:func:`pillar.get <salt.modules.pillar.get>`
however is when it is desirable to retrieve the values of more than
one key, since :py:func:`pillar.get <salt.modules.pillar.get>` can
only retrieve one key at a time.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.item foo
salt '*' pillar.item foo:bar
salt '*' pillar.item foo bar baz
'''
ret = {}
default = kwargs.get('default', '')
delimiter = kwargs.get('delimiter', DEFAULT_TARGET_DELIM)
try:
for arg in args:
ret[arg] = salt.utils.data.traverse_dict_and_list(
__pillar__,
arg,
default,
delimiter)
except KeyError:
pass
return ret
def raw(key=None):
'''
Return the raw pillar data that is currently loaded into the minion.
Contrast with :py:func:`items` which calls the master to fetch the most
up-to-date Pillar.
CLI Example:
.. code-block:: bash
salt '*' pillar.raw
With the optional key argument, you can select a subtree of the
pillar raw data.::
salt '*' pillar.raw key='roles'
'''
if key:
ret = __pillar__.get(key, {})
else:
ret = __pillar__
return ret
def ext(external, pillar=None):
'''
.. versionchanged:: 2016.3.6,2016.11.3,2017.7.0
The supported ext_pillar types are now tunable using the
:conf_master:`on_demand_ext_pillar` config option. Earlier releases
used a hard-coded default.
Generate the pillar and apply an explicit external pillar
external
A single ext_pillar to add to the ext_pillar configuration. This must
be passed as a single section from the ext_pillar configuration (see
CLI examples below). For more complicated ``ext_pillar``
configurations, it can be helpful to use the Python shell to load YAML
configuration into a dictionary, and figure out
.. code-block:: python
>>> import salt.utils.yaml
>>> ext_pillar = salt.utils.yaml.safe_load("""
... ext_pillar:
... - git:
... - issue38440 https://github.com/terminalmage/git_pillar:
... - env: base
... """)
>>> ext_pillar
{'ext_pillar': [{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}]}
>>> ext_pillar['ext_pillar'][0]
{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}
In the above example, the value to pass would be
``{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}``.
Note that this would need to be quoted when passing on the CLI (as in
the CLI examples below).
pillar : None
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.ext '{libvirt: _}'
salt '*' pillar.ext "{'git': ['master https://github.com/myuser/myrepo']}"
salt '*' pillar.ext "{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}"
'''
if isinstance(external, six.string_types):
external = salt.utils.yaml.safe_load(external)
pillar_obj = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['saltenv'],
ext=external,
pillar_override=pillar)
ret = pillar_obj.compile_pillar()
return ret
def keys(key, delimiter=DEFAULT_TARGET_DELIM):
'''
.. versionadded:: 2015.8.0
Attempt to retrieve a list of keys from the named value from the pillar.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict, similar to how pillar.get works.
delimiter
Specify an alternate delimiter to use when traversing a nested dict
CLI Example:
.. code-block:: bash
salt '*' pillar.keys web:sites
'''
ret = salt.utils.data.traverse_dict_and_list(
__pillar__, key, KeyError, delimiter)
if ret is KeyError:
raise KeyError("Pillar key not found: {0}".format(key))
if not isinstance(ret, dict):
raise ValueError("Pillar value in key {0} is not a dict".format(key))
return ret.keys()
def file_exists(path, saltenv=None):
'''
.. versionadded:: 2016.3.0
This is a master-only function. Calling from the minion is not supported.
Use the given path and search relative to the pillar environments to see if
a file exists at that path.
If the ``saltenv`` argument is given, restrict search to that environment
only.
Will only work with ``pillar_roots``, not external pillars.
Returns True if the file is found, and False otherwise.
path
The path to the file in question. Will be treated as a relative path
saltenv
Optional argument to restrict the search to a specific saltenv
CLI Example:
.. code-block:: bash
salt '*' pillar.file_exists foo/bar.sls
'''
pillar_roots = __opts__.get('pillar_roots')
if not pillar_roots:
raise CommandExecutionError('No pillar_roots found. Are you running '
'this on the master?')
if saltenv:
if saltenv in pillar_roots:
pillar_roots = {saltenv: pillar_roots[saltenv]}
else:
return False
for env in pillar_roots:
for pillar_dir in pillar_roots[env]:
full_path = os.path.join(pillar_dir, path)
if __salt__['file.file_exists'](full_path):
return True
return False
# Provide a jinja function call compatible get aliased as fetch
fetch = get
def filter_by(lookup_dict,
pillar,
merge=None,
default='default',
base=None):
'''
.. versionadded:: 2017.7.0
Look up the given pillar in a given dictionary and return the result
:param lookup_dict: A dictionary, keyed by a pillar, containing a value or
values relevant to systems matching that pillar. For example, a key
could be a pillar for a role and the value could the name of a package
on that particular OS.
The dictionary key can be a globbing pattern. The function will return
the corresponding ``lookup_dict`` value where the pilalr value matches
the pattern. For example:
.. code-block:: bash
# this will render 'got some salt' if ``role`` begins with 'salt'
salt '*' pillar.filter_by '{salt*: got some salt, default: salt is not here}' role
:param pillar: The name of a pillar to match with the system's pillar. For
example, the value of the "role" pillar could be used to pull values
from the ``lookup_dict`` dictionary.
The pillar value can be a list. The function will return the
``lookup_dict`` value for a first found item in the list matching
one of the ``lookup_dict`` keys.
:param merge: A dictionary to merge with the results of the pillar
selection from ``lookup_dict``. This allows another dictionary to
override the values in the ``lookup_dict``.
:param default: default lookup_dict's key used if the pillar does not exist
or if the pillar value has no match on lookup_dict. If unspecified
the value is "default".
:param base: A lookup_dict key to use for a base dictionary. The
pillar-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the pillar
selection dictionary and the merge dictionary. Default is unset.
CLI Example:
.. code-block:: bash
salt '*' pillar.filter_by '{web: Serve it up, db: I query, default: x_x}' role
'''
return salt.utils.data.filter_by(lookup_dict=lookup_dict,
lookup=pillar,
traverse=__pillar__,
merge=merge,
default=default,
base=base)
| 33.104839 | 105 | 0.623484 |
87b2299b4e6a9771fc52ea07cd4193d49ff6d7a6
| 5,846 |
py
|
Python
|
django_extensions/management/commands/drop_test_database.py
|
todaycode/django-ext
|
213abda56cf95a3fe4059de106dd7106935fe72a
|
[
"MIT"
] | null | null | null |
django_extensions/management/commands/drop_test_database.py
|
todaycode/django-ext
|
213abda56cf95a3fe4059de106dd7106935fe72a
|
[
"MIT"
] | null | null | null |
django_extensions/management/commands/drop_test_database.py
|
todaycode/django-ext
|
213abda56cf95a3fe4059de106dd7106935fe72a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import logging
import warnings
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from six.moves import input
from django_extensions.settings import SQLITE_ENGINES, POSTGRESQL_ENGINES, MYSQL_ENGINES
from django_extensions.management.mysql import parse_mysql_cnf
from django_extensions.management.utils import signalcommand
from django_extensions.utils.deprecation import RemovedInNextVersionWarning
class Command(BaseCommand):
help = "Drops test database for this project."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
default=True, help='Tells Django to NOT prompt the user for input of any kind.'
)
parser.add_argument(
'-U', '--user', action='store', dest='user', default=None,
help='Use another user for the database then defined in settings.py'
)
parser.add_argument(
'-P', '--password', action='store', dest='password', default=None,
help='Use another password for the database then defined in settings.py'
)
parser.add_argument(
'-D', '--dbname', action='store', dest='dbname', default=None,
help='Use another database name then defined in settings.py'
)
parser.add_argument(
'-R', '--router', action='store', dest='router', default=DEFAULT_DB_ALIAS,
help='Use this router-database other then defined in settings.py'
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to run command for. Defaults to the "%s" database.' % DEFAULT_DB_ALIAS,
)
@signalcommand
def handle(self, *args, **options):
"""Drop test database for this project."""
database = options['database']
if options['router'] != DEFAULT_DB_ALIAS:
warnings.warn("--router is deprecated. You should use --database.", RemovedInNextVersionWarning, stacklevel=2)
database = options['router']
dbinfo = settings.DATABASES.get(database)
if dbinfo is None:
raise CommandError("Unknown database %s" % database)
engine = dbinfo.get('ENGINE')
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = options['user'] or dbinfo.get('USER') or user
password = options['password'] or dbinfo.get('PASSWORD') or password
try:
database_name = dbinfo['TEST']['NAME']
except KeyError:
database_name = None
if database_name is None:
database_name = TEST_DATABASE_PREFIX + (options['dbname'] or dbinfo.get('NAME'))
if database_name is None or database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = options["verbosity"]
if options['interactive']:
confirm = input("""
You have requested to drop the test database.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
if engine in SQLITE_ENGINES:
try:
logging.info("Unlinking %s database" % engine)
if os.path.isfile(database_name):
os.unlink(database_name)
except OSError:
return
elif engine in MYSQL_ENGINES:
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
logging.info('Executing: "' + drop_query + '"')
connection.query(drop_query)
elif engine in POSTGRESQL_ENGINES:
import psycopg2 as Database # NOQA
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
drop_query = "DROP DATABASE IF EXISTS \"%s\";" % database_name
logging.info('Executing: "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
return
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options['interactive']:
print("Reset successful.")
| 38.20915 | 122 | 0.605542 |
1a5cd497ec9c3e22326a1639cfed468c652cda25
| 607 |
py
|
Python
|
tests/test_colorchanger.py
|
wcchristian/pi-tv-colorchange
|
8e6d51dd3c9835bf35dd0b8de10c65adf048c782
|
[
"MIT"
] | null | null | null |
tests/test_colorchanger.py
|
wcchristian/pi-tv-colorchange
|
8e6d51dd3c9835bf35dd0b8de10c65adf048c782
|
[
"MIT"
] | 12 |
2019-05-25T02:57:34.000Z
|
2019-06-19T03:04:34.000Z
|
tests/test_colorchanger.py
|
wcchristian/pi-tv-colorchange
|
8e6d51dd3c9835bf35dd0b8de10c65adf048c782
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import MagicMock
from colorchanger import colorchanger
class MyTestCase(unittest.TestCase):
@staticmethod
def test_set_hue_color():
# Given
hue_light_id = 1
rgb_color = (0, 255, 0)
colorchanger.hue_bridge.set_light = MagicMock(return_value=None)
xy = colorchanger.converter.rgb_to_xy(0, 255, 0)
# When
colorchanger.set_hue_color(hue_light_id, rgb_color)
# Then
colorchanger.hue_bridge.set_light.assert_called_with(hue_light_id, 'xy', xy)
if __name__ == '__main__':
unittest.main()
| 24.28 | 84 | 0.686985 |
bb04809805afba0f681673359c6b31ae62b0ee2b
| 5,082 |
py
|
Python
|
hw/ip/otbn/dv/rig/rig/gens/bad_branch.py
|
y-srini/opentitan
|
b46a08d07671c9d6c020e54fb44424f1611c43a0
|
[
"Apache-2.0"
] | null | null | null |
hw/ip/otbn/dv/rig/rig/gens/bad_branch.py
|
y-srini/opentitan
|
b46a08d07671c9d6c020e54fb44424f1611c43a0
|
[
"Apache-2.0"
] | 1 |
2022-02-15T22:20:51.000Z
|
2022-02-15T22:20:51.000Z
|
hw/ip/otbn/dv/rig/rig/gens/bad_branch.py
|
y-srini/opentitan
|
b46a08d07671c9d6c020e54fb44424f1611c43a0
|
[
"Apache-2.0"
] | 1 |
2021-12-04T06:08:11.000Z
|
2021-12-04T06:08:11.000Z
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Optional
from shared.operand import ImmOperandType, RegOperandType
from shared.insn_yaml import InsnsFile
from ..config import Config
from ..model import Model
from ..program import ProgInsn, Program
from ..snippet import ProgSnippet
from ..snippet_gen import GenCont, GenRet, SnippetGen
class BadBranch(SnippetGen):
'''A snippet generator that generates program ending branch instructions.
This includes out of bounds branches in negative and positive extremes
'''
ends_program = True
def __init__(self, cfg: Config, insns_file: InsnsFile) -> None:
super().__init__()
self.insns = []
self.weights = []
self.beq = self._get_named_insn(insns_file, 'beq')
self.bne = self._get_named_insn(insns_file, 'bne')
# beq and bne expect operands: grs1, grs2, offset
for insn in [self.beq, self.bne]:
if not (len(insn.operands) == 3 and
isinstance(insn.operands[0].op_type, RegOperandType) and
insn.operands[0].op_type.reg_type == 'gpr' and
not insn.operands[0].op_type.is_dest() and
isinstance(insn.operands[1].op_type, RegOperandType) and
insn.operands[1].op_type.reg_type == 'gpr' and
not insn.operands[1].op_type.is_dest() and
isinstance(insn.operands[2].op_type, ImmOperandType) and
insn.operands[2].op_type.signed):
raise RuntimeError('{} instruction from instructions file is not '
'the shape expected by the Branch generator.'
.format(insn.mnemonic))
self.imm_op_type = self.bne.operands[2].op_type
for insn in insns_file.insns:
if insn.mnemonic in ['beq', 'bne']:
weight = cfg.insn_weights.get(insn.mnemonic)
if weight > 0:
self.weights.append(weight)
self.insns.append(insn)
# Check that at least one instruction has a positive weight
assert len(self.insns) == len(self.weights)
if not self.weights:
self.disabled = True
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
# Max/Min Offsets for BXX (-2048 * 2, 2047 * 2)
# We give 0 to get_op_val_range method because it tries to calculate
# range with regards to given PC.
imm_rng = self.imm_op_type.get_op_val_range(model.pc)
assert imm_rng is not None
min_offset, max_offset = imm_rng
# Get known registers. We always have x0.
# So it should never fail.
known_regs = model.regs_with_known_vals('gpr')
assert known_regs is not None
# Pick a random register among known registers.
idx, value = random.choice(known_regs)
equals = []
not_equals = []
for reg_idx, reg_val in known_regs:
if reg_val == value:
equals.append(reg_idx)
else:
not_equals.append(reg_idx)
# Get the chosen base register index as grs1 and grs2 operand. This is
# because we want to branch to the faulty addresses with this snippet.
op_val_grs1 = idx
assert op_val_grs1 is not None
# We need to pick an out of bounds offset from all available values (A)
# Best way to solve this is exclude the set of valid choices (B)
# We know tgt_addr can be max : pc + max_offset (A_max)
# We know tgt_addr can be min : pc + min_offset (A_min)
# Aim: (PC + min_offset, PC + max_offset) - (0, imem_size)
# Choose target from (A_min, A_max-B_max)
local_max = max_offset - program.imem_size
tgt_addr = random.randrange(min_offset, local_max, 2)
# If chosen value is in B, push it out by adding B_max
if tgt_addr >= 0:
tgt_addr += program.imem_size
assert tgt_addr < 0 or tgt_addr > program.imem_size
off_enc = self.imm_op_type.op_val_to_enc_val(tgt_addr, model.pc)
assert off_enc is not None
# Pick the instruction from the weighted list
if not_equals:
chosen_insn = random.choices(self.insns, weights=self.weights)[0]
else:
chosen_insn = self.beq
beq_weight = self.weights[0]
if not beq_weight:
return None
grs2_choices = equals if chosen_insn.mnemonic == 'beq' else not_equals
assert grs2_choices
op_val_grs2 = random.choice(grs2_choices)
op_vals = [op_val_grs1, op_val_grs2, off_enc]
prog_insn = ProgInsn(chosen_insn, op_vals, None)
snippet = ProgSnippet(model.pc, [prog_insn])
snippet.insert_into_program(program)
return (snippet, True, model)
| 36.042553 | 82 | 0.617277 |
08c15f906ef35f42451e358f33ceea13926f91fb
| 23,353 |
py
|
Python
|
electrum_plcu/submarine_swaps.py
|
plc-ultima/electrum-plcu
|
149e53db286d831b5aba78983c4f9d4c0ff485a5
|
[
"MIT"
] | null | null | null |
electrum_plcu/submarine_swaps.py
|
plc-ultima/electrum-plcu
|
149e53db286d831b5aba78983c4f9d4c0ff485a5
|
[
"MIT"
] | null | null | null |
electrum_plcu/submarine_swaps.py
|
plc-ultima/electrum-plcu
|
149e53db286d831b5aba78983c4f9d4c0ff485a5
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import os
from typing import TYPE_CHECKING, Optional, Dict, Union
from decimal import Decimal
import math
import attr
from .crypto import sha256, hash_160
from .ecc import ECPrivkey
from .bitcoin import (script_to_p2wsh, opcodes, p2wsh_nested_script, push_script,
is_segwit_address, construct_witness)
from .transaction import PartialTxInput, PartialTxOutput, PartialTransaction
from .transaction import script_GetOp, match_script_against_template, OPPushDataGeneric, OPPushDataPubkey
from .util import log_exceptions
from .lnutil import REDEEM_AFTER_DOUBLE_SPENT_DELAY, ln_dummy_address
from .bitcoin import dust_threshold
from .logging import Logger
from .lnutil import hex_to_bytes
from .json_db import StoredObject
from . import constants
if TYPE_CHECKING:
from .network import Network
from .wallet import Abstract_Wallet
from .lnwatcher import LNWalletWatcher
from .lnworker import LNWallet
API_URL_MAINNET = 'https://swaps.electrum.plcultima.info/api'
API_URL_TESTNET = 'https://swaps.electrum.plcultima.info/testnet'
API_URL_REGTEST = 'https://localhost/api'
WITNESS_TEMPLATE_SWAP = [
opcodes.OP_HASH160,
OPPushDataGeneric(lambda x: x == 20),
opcodes.OP_EQUAL,
opcodes.OP_IF,
OPPushDataPubkey,
opcodes.OP_ELSE,
OPPushDataGeneric(None),
opcodes.OP_CHECKLOCKTIMEVERIFY,
opcodes.OP_DROP,
OPPushDataPubkey,
opcodes.OP_ENDIF,
opcodes.OP_CHECKSIG
]
# The script of the reverse swaps has one extra check in it to verify
# that the length of the preimage is 32. This is required because in
# the reverse swaps the preimage is generated by the user and to
# settle the hold invoice, you need a preimage with 32 bytes . If that
# check wasn't there the user could generate a preimage with a
# different length which would still allow for claiming the onchain
# coins but the invoice couldn't be settled
WITNESS_TEMPLATE_REVERSE_SWAP = [
opcodes.OP_SIZE,
OPPushDataGeneric(None),
opcodes.OP_EQUAL,
opcodes.OP_IF,
opcodes.OP_HASH160,
OPPushDataGeneric(lambda x: x == 20),
opcodes.OP_EQUALVERIFY,
OPPushDataPubkey,
opcodes.OP_ELSE,
opcodes.OP_DROP,
OPPushDataGeneric(None),
opcodes.OP_CHECKLOCKTIMEVERIFY,
opcodes.OP_DROP,
OPPushDataPubkey,
opcodes.OP_ENDIF,
opcodes.OP_CHECKSIG
]
@attr.s
class SwapData(StoredObject):
is_reverse = attr.ib(type=bool)
locktime = attr.ib(type=int)
onchain_amount = attr.ib(type=int) # in sats
lightning_amount = attr.ib(type=int) # in sats
redeem_script = attr.ib(type=bytes, converter=hex_to_bytes)
preimage = attr.ib(type=bytes, converter=hex_to_bytes)
prepay_hash = attr.ib(type=Optional[bytes], converter=hex_to_bytes)
privkey = attr.ib(type=bytes, converter=hex_to_bytes)
lockup_address = attr.ib(type=str)
funding_txid = attr.ib(type=Optional[str])
spending_txid = attr.ib(type=Optional[str])
is_redeemed = attr.ib(type=bool)
def create_claim_tx(
*,
txin: PartialTxInput,
witness_script: bytes,
preimage: Union[bytes, int], # 0 if timing out forward-swap
privkey: bytes,
address: str,
amount_sat: int,
locktime: int,
) -> PartialTransaction:
"""Create tx to either claim successful reverse-swap,
or to get refunded for timed-out forward-swap.
"""
if is_segwit_address(txin.address):
txin.script_type = 'p2wsh'
txin.script_sig = b''
else:
txin.script_type = 'p2wsh-p2sh'
txin.redeem_script = bytes.fromhex(p2wsh_nested_script(witness_script.hex()))
txin.script_sig = bytes.fromhex(push_script(txin.redeem_script.hex()))
txin.witness_script = witness_script
txout = PartialTxOutput.from_address_and_value(address, amount_sat)
tx = PartialTransaction.from_io([txin], [txout], version=2, locktime=locktime)
#tx.set_rbf(True)
sig = bytes.fromhex(tx.sign_txin(0, privkey))
witness = [sig, preimage, witness_script]
txin.witness = bytes.fromhex(construct_witness(witness))
return tx
class SwapManager(Logger):
network: Optional['Network'] = None
lnwatcher: Optional['LNWalletWatcher'] = None
def __init__(self, *, wallet: 'Abstract_Wallet', lnworker: 'LNWallet'):
Logger.__init__(self)
self.normal_fee = 0
self.lockup_fee = 0
self.percentage = 0
self.min_amount = 0
self._max_amount = 0
self.wallet = wallet
self.lnworker = lnworker
self.swaps = self.wallet.db.get_dict('submarine_swaps') # type: Dict[str, SwapData]
self.prepayments = {} # type: Dict[bytes, bytes] # fee_preimage -> preimage
for k, swap in self.swaps.items():
if swap.is_reverse and swap.prepay_hash is not None:
self.prepayments[swap.prepay_hash] = bytes.fromhex(k)
# api url
if constants.net == constants.BitcoinMainnet:
self.api_url = API_URL_MAINNET
elif constants.net == constants.BitcoinTestnet:
self.api_url = API_URL_TESTNET
else:
self.api_url = API_URL_REGTEST
def start_network(self, *, network: 'Network', lnwatcher: 'LNWalletWatcher'):
assert network
assert lnwatcher
self.network = network
self.lnwatcher = lnwatcher
for k, swap in self.swaps.items():
if swap.is_redeemed:
continue
self.add_lnwatcher_callback(swap)
@log_exceptions
async def _claim_swap(self, swap: SwapData) -> None:
assert self.network
assert self.lnwatcher
if not self.lnwatcher.is_up_to_date():
return
current_height = self.network.get_local_height()
delta = current_height - swap.locktime
if not swap.is_reverse and delta < 0:
# too early for refund
return
txos = self.lnwatcher.get_addr_outputs(swap.lockup_address)
for txin in txos.values():
if swap.is_reverse and txin.value_sats() < swap.onchain_amount:
self.logger.info('amount too low, we should not reveal the preimage')
continue
spent_height = txin.spent_height
if spent_height is not None:
if spent_height > 0 and current_height - spent_height > REDEEM_AFTER_DOUBLE_SPENT_DELAY:
self.logger.info(f'stop watching swap {swap.lockup_address}')
self.lnwatcher.remove_callback(swap.lockup_address)
swap.is_redeemed = True
continue
# FIXME the mining fee should depend on swap.is_reverse.
# the txs are not the same size...
amount_sat = txin.value_sats() - self.get_claim_fee()
if amount_sat < dust_threshold():
self.logger.info('utxo value below dust threshold')
continue
address = self.wallet.get_receiving_address()
if swap.is_reverse: # successful reverse swap
preimage = swap.preimage
locktime = 0
else: # timing out forward swap
preimage = 0
locktime = swap.locktime
tx = create_claim_tx(
txin=txin,
witness_script=swap.redeem_script,
preimage=preimage,
privkey=swap.privkey,
address=address,
amount_sat=amount_sat,
locktime=locktime,
)
await self.network.broadcast_transaction(tx)
# save txid
if swap.is_reverse:
swap.spending_txid = tx.txid()
else:
self.wallet.set_label(tx.txid(), 'Swap refund')
def get_claim_fee(self):
return self.wallet.config.estimate_fee(136, allow_fallback_to_static_rates=True)
def get_swap(self, payment_hash: bytes) -> Optional[SwapData]:
# for history
swap = self.swaps.get(payment_hash.hex())
if swap:
return swap
payment_hash = self.prepayments.get(payment_hash)
if payment_hash:
return self.swaps.get(payment_hash.hex())
def add_lnwatcher_callback(self, swap: SwapData) -> None:
callback = lambda: self._claim_swap(swap)
self.lnwatcher.add_callback(swap.lockup_address, callback)
def num_sats_can_receive(self):
# finding how to do MPP is too hard for sender,
# might result in our coins being locked
return self.lnworker.num_sats_can_receive_no_mpp()
async def normal_swap(
self,
*,
lightning_amount_sat: int,
expected_onchain_amount_sat: int,
password,
tx: PartialTransaction = None,
) -> str:
"""send on-chain PLCU, receive on Lightning
- User generates an LN invoice with RHASH, and knows preimage.
- User creates on-chain output locked to RHASH.
- Server pays LN invoice. User reveals preimage.
- Server spends the on-chain output using preimage.
"""
assert self.network
assert self.lnwatcher
privkey = os.urandom(32)
pubkey = ECPrivkey(privkey).get_public_key_bytes(compressed=True)
lnaddr, invoice = await self.lnworker.create_invoice(
amount_msat=lightning_amount_sat * 1000,
message='swap',
expiry=3600 * 24,
)
payment_hash = lnaddr.paymenthash
preimage = self.lnworker.get_preimage(payment_hash)
request_data = {
"type": "submarine",
"pairId": "PLCU/PLCU",
"orderSide": "sell",
"invoice": invoice,
"refundPublicKey": pubkey.hex()
}
response = await self.network._send_http_on_proxy(
'post',
self.api_url + '/createswap',
json=request_data,
timeout=30)
data = json.loads(response)
response_id = data["id"]
zeroconf = data["acceptZeroConf"]
onchain_amount = data["expectedAmount"]
locktime = data["timeoutBlockHeight"]
lockup_address = data["address"]
redeem_script = data["redeemScript"]
# verify redeem_script is built with our pubkey and preimage
redeem_script = bytes.fromhex(redeem_script)
parsed_script = [x for x in script_GetOp(redeem_script)]
if not match_script_against_template(redeem_script, WITNESS_TEMPLATE_SWAP):
raise Exception("fswap check failed: scriptcode does not match template")
if script_to_p2wsh(redeem_script.hex()) != lockup_address:
raise Exception("fswap check failed: inconsistent scriptcode and address")
if hash_160(preimage) != parsed_script[1][1]:
raise Exception("fswap check failed: our preimage not in script")
if pubkey != parsed_script[9][1]:
raise Exception("fswap check failed: our pubkey not in script")
if locktime != int.from_bytes(parsed_script[6][1], byteorder='little'):
raise Exception("fswap check failed: inconsistent locktime and script")
# check that onchain_amount is not more than what we estimated
if onchain_amount > expected_onchain_amount_sat:
raise Exception(f"fswap check failed: onchain_amount is more than what we estimated: "
f"{onchain_amount} > {expected_onchain_amount_sat}")
# verify that they are not locking up funds for more than a day
if locktime - self.network.get_local_height() >= 576:
raise Exception("fswap check failed: locktime too far in future")
# create funding tx
funding_output = PartialTxOutput.from_address_and_value(lockup_address, onchain_amount)
if tx is None:
tx = self.wallet.create_transaction(outputs=[funding_output], rbf=False, password=password)
else:
dummy_output = PartialTxOutput.from_address_and_value(ln_dummy_address(), expected_onchain_amount_sat)
tx.outputs().remove(dummy_output)
tx.add_outputs([funding_output])
tx.set_rbf(False)
self.wallet.sign_transaction(tx, password)
# save swap data in wallet in case we need a refund
swap = SwapData(
redeem_script = redeem_script,
locktime = locktime,
privkey = privkey,
preimage = preimage,
prepay_hash = None,
lockup_address = lockup_address,
onchain_amount = expected_onchain_amount_sat,
lightning_amount = lightning_amount_sat,
is_reverse = False,
is_redeemed = False,
funding_txid = tx.txid(),
spending_txid = None,
)
self.swaps[payment_hash.hex()] = swap
self.add_lnwatcher_callback(swap)
await self.network.broadcast_transaction(tx)
return tx.txid()
async def reverse_swap(
self,
*,
lightning_amount_sat: int,
expected_onchain_amount_sat: int,
) -> bool:
"""send on Lightning, receive on-chain
- User generates preimage, RHASH. Sends RHASH to server.
- Server creates an LN invoice for RHASH.
- User pays LN invoice - except server needs to hold the HTLC as preimage is unknown.
- Server creates on-chain output locked to RHASH.
- User spends on-chain output, revealing preimage.
- Server fulfills HTLC using preimage.
Note: expected_onchain_amount_sat is BEFORE deducting the on-chain claim tx fee.
"""
assert self.network
assert self.lnwatcher
privkey = os.urandom(32)
pubkey = ECPrivkey(privkey).get_public_key_bytes(compressed=True)
preimage = os.urandom(32)
preimage_hash = sha256(preimage)
request_data = {
"type": "reversesubmarine",
"pairId": "PLCU/PLCU",
"orderSide": "buy",
"invoiceAmount": lightning_amount_sat,
"preimageHash": preimage_hash.hex(),
"claimPublicKey": pubkey.hex()
}
response = await self.network._send_http_on_proxy(
'post',
self.api_url + '/createswap',
json=request_data,
timeout=30)
data = json.loads(response)
invoice = data['invoice']
fee_invoice = data.get('minerFeeInvoice')
lockup_address = data['lockupAddress']
redeem_script = data['redeemScript']
locktime = data['timeoutBlockHeight']
onchain_amount = data["onchainAmount"]
response_id = data['id']
# verify redeem_script is built with our pubkey and preimage
redeem_script = bytes.fromhex(redeem_script)
parsed_script = [x for x in script_GetOp(redeem_script)]
if not match_script_against_template(redeem_script, WITNESS_TEMPLATE_REVERSE_SWAP):
raise Exception("rswap check failed: scriptcode does not match template")
if script_to_p2wsh(redeem_script.hex()) != lockup_address:
raise Exception("rswap check failed: inconsistent scriptcode and address")
if hash_160(preimage) != parsed_script[5][1]:
raise Exception("rswap check failed: our preimage not in script")
if pubkey != parsed_script[7][1]:
raise Exception("rswap check failed: our pubkey not in script")
if locktime != int.from_bytes(parsed_script[10][1], byteorder='little'):
raise Exception("rswap check failed: inconsistent locktime and script")
# check that the onchain amount is what we expected
if onchain_amount < expected_onchain_amount_sat:
raise Exception(f"rswap check failed: onchain_amount is less than what we expected: "
f"{onchain_amount} < {expected_onchain_amount_sat}")
# verify that we will have enough time to get our tx confirmed
if locktime - self.network.get_local_height() <= 60:
raise Exception("rswap check failed: locktime too close")
# verify invoice preimage_hash
lnaddr = self.lnworker._check_invoice(invoice)
invoice_amount = lnaddr.get_amount_sat()
if lnaddr.paymenthash != preimage_hash:
raise Exception("rswap check failed: inconsistent RHASH and invoice")
# check that the lightning amount is what we requested
if fee_invoice:
fee_lnaddr = self.lnworker._check_invoice(fee_invoice)
invoice_amount += fee_lnaddr.get_amount_sat()
prepay_hash = fee_lnaddr.paymenthash
else:
prepay_hash = None
if int(invoice_amount) != lightning_amount_sat:
raise Exception(f"rswap check failed: invoice_amount ({invoice_amount}) "
f"not what we requested ({lightning_amount_sat})")
# save swap data to wallet file
swap = SwapData(
redeem_script = redeem_script,
locktime = locktime,
privkey = privkey,
preimage = preimage,
prepay_hash = prepay_hash,
lockup_address = lockup_address,
onchain_amount = onchain_amount,
lightning_amount = lightning_amount_sat,
is_reverse = True,
is_redeemed = False,
funding_txid = None,
spending_txid = None,
)
self.swaps[preimage_hash.hex()] = swap
# add callback to lnwatcher
self.add_lnwatcher_callback(swap)
# initiate payment.
if fee_invoice:
self.prepayments[prepay_hash] = preimage_hash
asyncio.ensure_future(self.lnworker.pay_invoice(fee_invoice, attempts=10))
# initiate payment.
success, log = await self.lnworker.pay_invoice(invoice, attempts=10)
return success
async def get_pairs(self) -> None:
assert self.network
response = await self.network._send_http_on_proxy(
'get',
self.api_url + '/getpairs',
timeout=30)
pairs = json.loads(response)
fees = pairs['pairs']['PLCU/PLCU']['fees']
self.percentage = fees['percentage']
self.normal_fee = fees['minerFees']['baseAsset']['normal']
self.lockup_fee = fees['minerFees']['baseAsset']['reverse']['lockup']
limits = pairs['pairs']['PLCU/PLCU']['limits']
self.min_amount = limits['minimal']
self._max_amount = limits['maximal']
def get_max_amount(self):
return self._max_amount
def check_invoice_amount(self, x):
return x >= self.min_amount and x <= self._max_amount
def _get_recv_amount(self, send_amount: Optional[int], *, is_reverse: bool) -> Optional[int]:
"""For a given swap direction and amount we send, returns how much we will receive.
Note: in the reverse direction, the mining fee for the on-chain claim tx is NOT accounted for.
In the reverse direction, the result matches what the swap server returns as response["onchainAmount"].
"""
if send_amount is None:
return
x = Decimal(send_amount)
percentage = Decimal(self.percentage)
if is_reverse:
if not self.check_invoice_amount(x):
return
# see/ref:
# https://github.com/BoltzExchange/boltz-backend/blob/e7e2d30f42a5bea3665b164feb85f84c64d86658/lib/service/Service.ts#L948
percentage_fee = math.ceil(percentage * x / 100)
base_fee = self.lockup_fee
x -= percentage_fee + base_fee
x = math.floor(x)
if x < dust_threshold():
return
else:
x -= self.normal_fee
percentage_fee = math.ceil(x * percentage / (100 + percentage))
x -= percentage_fee
if not self.check_invoice_amount(x):
return
x = int(x)
return x
def _get_send_amount(self, recv_amount: Optional[int], *, is_reverse: bool) -> Optional[int]:
"""For a given swap direction and amount we want to receive, returns how much we will need to send.
Note: in the reverse direction, the mining fee for the on-chain claim tx is NOT accounted for.
In the forward direction, the result matches what the swap server returns as response["expectedAmount"].
"""
if not recv_amount:
return
x = Decimal(recv_amount)
percentage = Decimal(self.percentage)
if is_reverse:
# see/ref:
# https://github.com/BoltzExchange/boltz-backend/blob/e7e2d30f42a5bea3665b164feb85f84c64d86658/lib/service/Service.ts#L928
# https://github.com/BoltzExchange/boltz-backend/blob/e7e2d30f42a5bea3665b164feb85f84c64d86658/lib/service/Service.ts#L958
base_fee = self.lockup_fee
x += base_fee
x = math.ceil(x / ((100 - percentage) / 100))
if not self.check_invoice_amount(x):
return
else:
if not self.check_invoice_amount(x):
return
# see/ref:
# https://github.com/BoltzExchange/boltz-backend/blob/e7e2d30f42a5bea3665b164feb85f84c64d86658/lib/service/Service.ts#L708
# https://github.com/BoltzExchange/boltz-backend/blob/e7e2d30f42a5bea3665b164feb85f84c64d86658/lib/rates/FeeProvider.ts#L90
percentage_fee = math.ceil(percentage * x / 100)
x += percentage_fee + self.normal_fee
x = int(x)
return x
def get_recv_amount(self, send_amount: Optional[int], *, is_reverse: bool) -> Optional[int]:
recv_amount = self._get_recv_amount(send_amount, is_reverse=is_reverse)
# sanity check calculation can be inverted
if recv_amount is not None:
inverted_send_amount = self._get_send_amount(recv_amount, is_reverse=is_reverse)
# accept off-by ones as amt_rcv = recv_amt(send_amt(amt_rcv)) only up to +-1
if abs(send_amount - inverted_send_amount) > 1:
raise Exception(f"calc-invert-sanity-check failed. is_reverse={is_reverse}. "
f"send_amount={send_amount} -> recv_amount={recv_amount} -> inverted_send_amount={inverted_send_amount}")
# account for on-chain claim tx fee
if is_reverse and recv_amount is not None:
recv_amount -= self.get_claim_fee()
return recv_amount
def get_send_amount(self, recv_amount: Optional[int], *, is_reverse: bool) -> Optional[int]:
send_amount = self._get_send_amount(recv_amount, is_reverse=is_reverse)
# sanity check calculation can be inverted
if send_amount is not None:
inverted_recv_amount = self._get_recv_amount(send_amount, is_reverse=is_reverse)
if recv_amount != inverted_recv_amount:
raise Exception(f"calc-invert-sanity-check failed. is_reverse={is_reverse}. "
f"recv_amount={recv_amount} -> send_amount={send_amount} -> inverted_recv_amount={inverted_recv_amount}")
# account for on-chain claim tx fee
if is_reverse and send_amount is not None:
send_amount += self.get_claim_fee()
return send_amount
| 42.928309 | 137 | 0.642359 |
7090e1b10146ef4c5d847f5f61e77af035ffb594
| 3,917 |
py
|
Python
|
run_with_submitit.py
|
gstoica27/Swin-Transformer
|
79ef4e0284f95d25a9bb64cfc72d6d683aab0bef
|
[
"MIT"
] | null | null | null |
run_with_submitit.py
|
gstoica27/Swin-Transformer
|
79ef4e0284f95d25a9bb64cfc72d6d683aab0bef
|
[
"MIT"
] | null | null | null |
run_with_submitit.py
|
gstoica27/Swin-Transformer
|
79ef4e0284f95d25a9bb64cfc72d6d683aab0bef
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as swin
import submitit
# Please change path before use - one time set up
LOGS_PATH = "/nethome/bdevnani3/raid/Swin-Transformer/logs"
def parse_args():
parent_parser = swin.parse_option()
parser = argparse.ArgumentParser("Submitit for swin", parents=[parent_parser])
parser.add_argument(
"--ngpus", default=2, type=int, help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes", default=1, type=int, help="Number of nodes to request"
)
parser.add_argument(
"--cpus_per_task", default=4, type=int, help="Number of nodes to request"
)
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument(
"--job_dir", default="", type=str, help="Job dir. Leave empty for automatic."
)
parser.add_argument(
"-slurm_partition", type=str, default="overcap", help="slurm partition"
)
parser.add_argument("-submitit_run", type=bool, default=True)
args, _ = parser.parse_known_args()
return args
def get_shared_folder() -> Path:
p = Path(LOGS_PATH)
p.mkdir(exist_ok=True)
return p
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
# lazy imports because we have no guarantees on order of imports
import main as swin
self._setup_gpu_args()
swin.run(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
# lazy imports because we have no guarantees on order of imports
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(
str(self.args.output_dir).replace("%j", str(job_env.job_id))
)
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
# folder = Path("logs/")
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# basically, any slurm parameters (exclude, pus_per_task) etc can be added here
executor.update_parameters(
gpus_per_node=args.ngpus,
tasks_per_node=args.ngpus, # one task per GPU
nodes=args.nodes,
cpus_per_task=args.cpus_per_task,
timeout_min=args.timeout, # max is 60 * 72
slurm_partition=args.slurm_partition,
)
if args.slurm_partition == "overcap":
executor.update_parameters(slurm_account=args.slurm_partition)
executor.update_parameters(name="SWIN")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
return job
if __name__ == "__main__":
job = main()
# import pdb; pdb.set_trace()
# job._interrupt(timeout=(False,True))
| 29.231343 | 87 | 0.668624 |
c1f9d5463739584cbd046f0a4236e5a84c56b8ff
| 30,177 |
py
|
Python
|
tests/test_jutil.py
|
karpierz/jtypes.javabridge
|
ee519456d4048e55a0696c28bdf3c727667b5b50
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_jutil.py
|
karpierz/jtypes.javabridge
|
ee519456d4048e55a0696c28bdf3c727667b5b50
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_jutil.py
|
karpierz/jtypes.javabridge
|
ee519456d4048e55a0696c28bdf3c727667b5b50
|
[
"BSD-3-Clause"
] | null | null | null |
'''test_jutil.py - test the high-level interface
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
from __future__ import absolute_import
import gc
import os
import random # <AK> was: numpy as np
import threading
import unittest
import sys
import javabridge
# Monkey patch some half-corrent implementations of methods that only
# appeared in Python 2.7.
if not hasattr(unittest.TestCase, 'assertIn'): # pragma: no cover # <AK> added
unittest.TestCase.assertIn = lambda self, a, b: self.assertTrue(a in b)
if not hasattr(unittest.TestCase, 'assertNotIn'): # pragma: no cover # <AK> added
unittest.TestCase.assertNotIn = lambda self, a, b: self.assertTrue(a not in b)
if not hasattr(unittest.TestCase, 'assertSequenceEqual'): # pragma: no cover # <AK> added
unittest.TestCase.assertSequenceEqual = lambda self, a, b: self.assertTrue([aa == bb for aa, bb in zip(a, b)])
class TestJutil(unittest.TestCase):
def setUp(self):
self.env = javabridge.attach()
def tearDown(self):
javabridge.detach()
def test_01_01_to_string(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.to_string(jstring), "Hello, world")
def test_01_02_make_instance(self):
jobject = javabridge.make_instance("java/lang/Object", "()V")
self.assertTrue(javabridge.to_string(jobject).startswith("java.lang.Object"))
# <AK> added
with self.assertRaisesRegex(javabridge.JavaError,
'Could not find constructor with signature = '
'"\(\)V"'):
jobject = javabridge.make_instance("java/lang/Class", "()V")
# </AK>
def test_01_03_call(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.call(jstring, "charAt", "(I)C", 0), "H")
def test_01_03_01_static_call(self):
result = javabridge.static_call("Ljava/lang/String;", "valueOf",
"(I)Ljava/lang/String;",123)
self.assertEqual(result, "123")
# <AK> added
with self.assertRaisesRegex(javabridge.JavaError,
'Could not find method name = "unknown method" '
'with signature = "\(I\)Ljava/lang/String;"'):
result = javabridge.static_call("Ljava/lang/String;", "unknown method",
"(I)Ljava/lang/String;",123)
# </AK>
def test_01_04_make_method(self):
env = self.env
class String(object):
def __init__(self):
self.o = env.new_string_utf("Hello, world")
charAt = javabridge.make_method("charAt", "(I)C", "My documentation")
s = String()
self.assertEqual(s.charAt.__doc__, "My documentation")
self.assertEqual(s.charAt(0), "H")
def test_01_05_00_get_static_field(self):
klass = self.env.find_class("java/lang/Short")
self.assertEqual(javabridge.get_static_field(klass, "MAX_VALUE", "S"), 2**15 - 1)
def test_01_05_01_no_field_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'java/lang/Object', "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_02_no_class_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'no/such/class', "field", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_03_set_static_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
test_cases = (
("fs_boolean", "Z", True), # <AK> added
("fs_char", "C", "A"),
("fs_byte", "B", 3),
("fs_short", "S", 15),
("fs_int", "I", 392),
("fs_long", "J", -14),
("fs_float", "F", 1.03),
("fs_double", "D", -889.1),
("fs_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("fs_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_static_field(class_name, field_name, signature, value)
v = javabridge.get_static_field(class_name, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_01_05_04_no_field_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'java/lang/Object', "NoSuchField", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_05_no_class_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'no/such/class', "field", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_06_get_enumeration_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
keys = javabridge.call(properties, "keys", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(keys)
has_java_vm_name = False
while(enum.hasMoreElements()):
key = javabridge.to_string(enum.nextElement())
if key == "java.vm.name":
has_java_vm_name = True
self.assertTrue(has_java_vm_name)
def test_01_07_get_dictionary_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
self.assertTrue(d.size() > 10)
self.assertFalse(d.isEmpty())
keys = javabridge.get_enumeration_wrapper(d.keys())
values = javabridge.get_enumeration_wrapper(d.elements())
n_elems = d.size()
for i in range(n_elems):
self.assertTrue(keys.hasMoreElements())
key = javabridge.to_string(keys.nextElement())
self.assertTrue(values.hasMoreElements())
value = javabridge.to_string(values.nextElement())
self.assertEqual(javabridge.to_string(d.get(key)), value)
self.assertFalse(keys.hasMoreElements())
self.assertFalse(values.hasMoreElements())
def test_01_08_jenumeration_to_string_list(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
enum = javabridge.get_enumeration_wrapper(d.keys())
for i in range(d.size()):
key = javabridge.to_string(enum.nextElement())
self.assertEqual(key, keys[i])
def test_01_09_jdictionary_to_string_dictionary(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
pyd = javabridge.jdictionary_to_string_dictionary(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
for key in keys:
value = javabridge.to_string(d.get(key))
self.assertEqual(pyd[key], value)
def test_01_10_make_new(self):
env = self.env
class MyClass:
new_fn = javabridge.make_new("java/lang/Object", '()V')
def __init__(self):
self.new_fn()
my_instance = MyClass()
def test_01_11_class_for_name(self):
c = javabridge.class_for_name('java.lang.String')
name = javabridge.call(c, 'getCanonicalName', '()Ljava/lang/String;')
self.assertEqual(name, 'java.lang.String')
def test_02_01_access_object_across_environments(self):
#
# Create an object in one environment, close the environment,
# open a second environment, then use it and delete it.
#
env = self.env
self.assertTrue(isinstance(env,javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_02_delete_in_environment(self):
env = self.env
self.assertTrue(isinstance(env, javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
del my_integer
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_03_death_and_resurrection(self):
'''Put an object into another in Java, delete it in Python and recover it'''
random.seed(24) # <AK> was: np.random.seed(24)
my_value = random.randrange(0, 1000) # <AK> was: np.random.randint(0, 1000)
jobj = javabridge.make_instance("java/lang/Integer", "(I)V", my_value)
integer_klass = self.env.find_class("java/lang/Integer")
jcontainer = self.env.make_object_array(1, integer_klass)
self.env.set_object_array_element(jcontainer, 0, jobj)
del jobj
gc.collect()
jobjs = self.env.get_object_array_elements(jcontainer)
jobj = jobjs[0]
self.assertEqual(javabridge.call(jobj, "intValue", "()I"), my_value)
def test_02_04_non_java_thread_deletes_it(self):
'''Delete a Java object on a not-Java thread'''
refs = [javabridge.make_instance("java/lang/Integer", "(I)V", 5)]
def run():
del refs[0]
gc.collect()
t = threading.Thread(target = run)
t.start()
t.join()
def test_03_01_cw_from_class(self):
'''Get a class wrapper from a class'''
c = javabridge.get_class_wrapper(javabridge.make_instance('java/lang/Integer', '(I)V',
14))
# <AK> added
self.assertIn("public static int java.lang.Integer.divideUnsigned(int,int)\n", repr(c))
def test_03_02_cw_from_string(self):
'''Get a class wrapper from a string'''
c = javabridge.get_class_wrapper("java.lang.Number")
def test_03_03_cw_get_classes(self):
c = javabridge.get_class_wrapper('java.lang.Number')
classes = c.getClasses()
self.assertEqual(len(javabridge.get_env().get_object_array_elements(classes)), 0)
def test_03_04_cw_get_annotation(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotation = c.getAnnotation(javabridge.class_for_name('java.lang.Deprecated'))
self.assertTrue(annotation is not None)
def test_03_05_cw_get_annotations(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotations = c.getAnnotations()
annotations = javabridge.get_env().get_object_array_elements(annotations)
self.assertEqual(len(annotations), 1)
self.assertTrue(javabridge.to_string(annotations[0]).startswith('@java.lang.Deprecated'))
def test_03_06_cw_get_constructors(self):
c = javabridge.get_class_wrapper('java.lang.String')
constructors = c.getConstructors()
constructors = javabridge.get_env().get_object_array_elements(constructors)
self.assertEqual(len(constructors), 15)
def test_03_07_cw_get_fields(self):
c = javabridge.get_class_wrapper('java.lang.String')
fields = c.getFields()
fields = javabridge.get_env().get_object_array_elements(fields)
self.assertEqual(len(fields), 1)
self.assertEqual(javabridge.call(fields[0], 'getName', '()Ljava/lang/String;'),
"CASE_INSENSITIVE_ORDER")
def test_03_08_cw_get_field(self):
c = javabridge.get_class_wrapper('java.lang.String')
field = c.getField('CASE_INSENSITIVE_ORDER')
modifiers = javabridge.call(field, 'getModifiers', '()I')
static = javabridge.get_static_field('java/lang/reflect/Modifier','STATIC','I')
self.assertEqual((modifiers & static), static)
def test_03_09_cw_get_method(self):
sclass = javabridge.class_for_name('java.lang.String')
iclass = javabridge.get_static_field('java/lang/Integer', 'TYPE',
'Ljava/lang/Class;')
c = javabridge.get_class_wrapper('java.lang.String')
m = c.getMethod('charAt', [ iclass ])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')), 'char')
m = c.getMethod('concat', [ sclass])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')),
'class java.lang.String')
def test_03_10_cw_get_methods(self):
c = javabridge.get_class_wrapper('java.lang.String')
mmm = javabridge.get_env().get_object_array_elements(c.getMethods())
self.assertTrue(any([javabridge.call(m, 'getName', '()Ljava/lang/String;') == 'concat'
for m in mmm]))
def test_03_11_cw_get_constructor(self):
c = javabridge.get_class_wrapper('java.lang.String')
sclass = javabridge.class_for_name('java.lang.String')
constructor = c.getConstructor([sclass])
self.assertEqual(javabridge.call(constructor, 'getName', '()Ljava/lang/String;'),
'java.lang.String')
def test_04_01_field_get(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
v = f.get(None)
self.assertEqual(javabridge.to_string(v), '127')
def test_04_02_field_name(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
self.assertEqual(f.getName(), 'MAX_VALUE')
def test_04_03_field_type(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
t = f.getType()
self.assertEqual(javabridge.to_string(t), 'byte')
def test_04_04_field_modifiers(self):
# <AK> added
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
m = f.getModifiers()
self.assertIsInstance(m, list)
self.assertEqual(set(m), {'PUBLIC', 'STATIC', 'FINAL'})
def test_05_01_run_script(self):
self.assertEqual(javabridge.run_script("2+2"), 4)
def test_05_02_run_script_with_inputs(self):
self.assertEqual(javabridge.run_script("a+b", bindings_in={"a":2, "b":3}), 5)
def test_05_03_run_script_with_outputs(self):
outputs = { "result": None}
javabridge.run_script("var result = 2+2;", bindings_out=outputs)
self.assertEqual(outputs["result"], 4)
def test_06_01_execute_asynch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"))
def test_06_02_execute_synch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"), True)
def test_06_03_future_main(self):
c = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
result = javabridge.execute_future_in_main_thread(
javabridge.make_future_task(c, fn_post_process=javabridge.unwrap_javascript))
self.assertEqual(result, 4)
def test_07_01_wrap_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
self.assertFalse(wfuture.isDone())
self.assertFalse(wfuture.isCancelled())
wfuture.run()
self.assertTrue(wfuture.isDone())
self.assertEqual(wfuture.get(), 4)
def test_07_02_cancel_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
wfuture.cancel(True)
self.assertTrue(wfuture.isCancelled())
self.assertRaises(javabridge.JavaException, wfuture.get)
def test_07_03_make_future_task_from_runnable(self):
future = javabridge.make_future_task(
javabridge.run_script("new java.lang.Runnable() { run: function() {}};"),
11)
future.run()
self.assertEqual(javabridge.call(future.get(), "intValue", "()I"), 11)
def test_07_04_make_future_task_from_callable(self):
call_able = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
future = javabridge.make_future_task(
call_able, fn_post_process=javabridge.unwrap_javascript)
future.run()
self.assertEqual(future.get(), 4)
def test_08_01_wrap_collection(self):
c = javabridge.make_instance("java/util/HashSet", "()V")
w = javabridge.get_collection_wrapper(c)
self.assertFalse(hasattr(w, "addI"))
self.assertEqual(w.size(), 0)
self.assertEqual(len(w), 0)
self.assertTrue(w.isEmpty())
def test_08_02_add(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
self.assertTrue(c.add("Foo"))
self.assertEqual(len(c), 1)
self.assertFalse(c.isEmpty())
def test_08_03_contains(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c.add("Foo")
self.assertTrue(c.contains("Foo"))
self.assertFalse(c.contains("Bar"))
self.assertIn("Foo", c)
self.assertNotIn("Bar", c)
def test_08_04_addAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2.addAll(c1.o)
self.assertIn("Foo", c2)
def test_08_05__add__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c3 = c1 + c2
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c3)
c4 = c3 + ["Hello", "World"]
self.assertIn("Hello", c4)
self.assertIn("World", c4)
def test_08_06__iadd__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2 += c1
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c2)
c2 += ["Hello", "World"]
self.assertIn("Hello", c2)
self.assertIn("World", c2)
def test_08_07_contains_all(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
self.assertFalse(c2.containsAll(c1.o))
c2 += c1
self.assertTrue(c2.containsAll(c1.o))
def test_08_08_remove(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c1.remove("Foo")
self.assertNotIn("Foo", c1)
def test_08_09_removeAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.removeAll(c2)
self.assertNotIn("Foo", c1)
def test_08_10_retainAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.retainAll(c2)
self.assertIn("Foo", c1)
self.assertNotIn("Bar", c1)
def test_08_11_toArray(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
result = [javabridge.to_string(x) for x in c1.toArray()]
self.assertIn("Foo", result)
self.assertIn("Bar", result)
def test_08_12_make_list(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertSequenceEqual(l, ["Foo", "Bar"])
self.assertTrue(hasattr(l, "addI"))
def test_08_13_addI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addI(1, "Baz")
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_14_addAllI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addAllI(1, javabridge.make_list(["Baz"]))
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_15_indexOf(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.indexOf("Bar"), 1)
self.assertEqual(l.lastIndexOf("Foo"), 0)
def test_08_16_get(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.get(1), "Bar")
def test_08_17_set(self):
l = javabridge.make_list(["Foo", "Bar"])
l.set(1, "Baz")
self.assertEqual(l.get(1), "Baz")
def test_08_18_subList(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertSequenceEqual(l.subList(1, 3), ["Bar", "Baz"])
def test_08_19__getitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertEqual(l[1], "Bar")
self.assertEqual(l[-2], "Hello")
self.assertSequenceEqual(l[1:3], ["Bar", "Baz"])
self.assertSequenceEqual(l[::3], ["Foo", "Hello"])
def test_08_20__setitem__(self):
l = javabridge.make_list(["Foo", "Bar"])
l[1] = "Baz"
self.assertEqual(l.get(1), "Baz")
def test_08_21__delitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz"])
del l[1]
self.assertSequenceEqual(l, ["Foo", "Baz"])
def test_09_01_00_get_field(self):
o = javabridge.make_instance("org/cellprofiler/javabridge/test/RealRect", "(DDDD)V", 1, 2, 3, 4)
self.assertEqual(javabridge.get_field(o, "x", "D"), 1)
def test_09_02_get_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.get_field(o, "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_09_03_set_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
o = javabridge.make_instance(class_name, "()V")
test_cases = (
("f_boolean", "Z", True), # <AK> added
("f_char", "C", "A"),
("f_byte", "B", 3),
("f_short", "S", 15),
("f_int", "I", 392),
("f_long", "J", -14),
("f_float", "F", 1.03),
("f_double", "D", -889.1),
("f_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("f_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_field(o, field_name, signature, value)
v = javabridge.get_field(o, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_09_04_set_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.set_field(o, "NoSuchField", "I", 1)
self.assertRaises(javabridge.JavaException, fn)
def test_10_01_iterate_java_on_non_iterator(self):
#
# Regression test of issue #11: the expression below segfaulted
#
def fn():
list(javabridge.iterate_java(javabridge.make_list(range(10)).o))
self.assertRaises(javabridge.JavaError, fn)
def test_10_01_class_path(self):
for arg in ['-cp', '-classpath', '-Djava.class.path=foo']:
self.assertRaises(ValueError, lambda: javabridge.start_vm([arg]))
def test_11_01_make_run_dictionary(self):
from javabridge.jutil import make_run_dictionary
o = javabridge.make_instance("java/util/Hashtable", "()V")
a = javabridge.make_instance("java/util/ArrayList", "()V")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"foo", "bar")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"baz", a)
d = make_run_dictionary(o)
self.assertIn("foo", d)
self.assertEquals(d["foo"], "bar")
self.assertIn("baz", d)
self.assertTrue(javabridge.call(d["baz"], "equals",
"(Ljava/lang/Object;)Z", a))
def test_12_01_jref(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
alt = javabridge.redeem_jref(ref_id)
o["bar"] = "bunny"
for key in o:
self.assertTrue(key in alt)
self.assertEqual(o[key], alt[key])
def test_12_02_jref_lost(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
del ref
self.assertRaises(KeyError, javabridge.redeem_jref, ref_id)
def test_12_03_jref_create_and_lock(self):
cpython = javabridge.JClassWrapper(
'org.cellprofiler.javabridge.CPython')()
d = javabridge.JClassWrapper('java.util.Hashtable')()
result = javabridge.JClassWrapper('java.util.ArrayList')()
d.put("result", result)
ref_self = javabridge.create_and_lock_jref(self)
d.put("self", ref_self)
cpython.execute(
'import javabridge\n'
'x = { "foo":"bar"}\n'
'ref_id = javabridge.create_and_lock_jref(x)\n'
'javabridge.JWrapper(result).add(ref_id)', d, d)
cpython.execute(
'import javabridge\n'
'ref_id = javabridge.JWrapper(result).get(0)\n'
'self = javabridge.redeem_jref(javabridge.to_string(self))\n'
'self.assertEqual(javabridge.redeem_jref(ref_id)["foo"], "bar")\n'
'javabridge.unlock_jref(ref_id)', d, d)
javabridge.unlock_jref(ref_self)
self.assertRaises(KeyError, javabridge.redeem_jref, ref_self)
def test_13_01_unicode_arg(self):
# On 2.x, check that a unicode argument is properly prepared
s = u"Hola ni\u00F1os"
s1, s2 = s.split(" ")
if sys.version_info.major == 2: s2 = s2.encode("utf-8")
env = javabridge.get_env()
js1 = env.new_string(s1+" ")
result = javabridge.call(
js1, "concat", "(Ljava/lang/String;)Ljava/lang/String;", s2)
self.assertEqual(s, result)
if __name__=="__main__":
unittest.main()
| 42.865057 | 114 | 0.602843 |
c525dd8ae57209a92b408f189d82f91f17b6c90a
| 243 |
py
|
Python
|
setup.py
|
khickey25/medical_image_interpretability_scratch_work
|
d88266a458ebf2812c7cccb1efa115de98e0db52
|
[
"MIT"
] | null | null | null |
setup.py
|
khickey25/medical_image_interpretability_scratch_work
|
d88266a458ebf2812c7cccb1efa115de98e0db52
|
[
"MIT"
] | null | null | null |
setup.py
|
khickey25/medical_image_interpretability_scratch_work
|
d88266a458ebf2812c7cccb1efa115de98e0db52
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Scratch work for medial image interpretability analysis project',
author='Kevin',
license='MIT',
)
| 22.090909 | 82 | 0.695473 |
1ff05f68c8287dede2e6cba6f861241154151cb1
| 3,813 |
py
|
Python
|
IMPORTANT_INFO.py
|
vasetousa/Python-fundamentals
|
3180c03de28b4f4d36d966221719069a7e18e521
|
[
"MIT"
] | null | null | null |
IMPORTANT_INFO.py
|
vasetousa/Python-fundamentals
|
3180c03de28b4f4d36d966221719069a7e18e521
|
[
"MIT"
] | null | null | null |
IMPORTANT_INFO.py
|
vasetousa/Python-fundamentals
|
3180c03de28b4f4d36d966221719069a7e18e521
|
[
"MIT"
] | null | null | null |
#
#
# s = "Count, the number of spaces"
# print(s.count(' '))
#
#
# word = "Hello My World"
# print(word.count('l')) # count how many times l is in the string
#
#
# print(word.find("H")) # find the word H in the string
#
# print(word.index("World")) # find the letters World in the string
#
#
#
# word = "Hello World"
#
# print(word[0]) #get one char of the word
# print(word[0:1]) #get one char of the word (same as above)
# print(word[0:3]) #get the first three char
# print(word[:3]) #get the first three char
# print(word[-3:]) #get the last three char
# print(word[3:]) #get all but the three first char
# print(word[:-3]) #get all but the three last character
#
# word = "Hello World"
#
# # word[start:end] # items start through end-1
# # word[start:] # items start through the rest of the list
# # word[:end] # items from the beginning through end-1
# # word[:] # a copy of the whole list
#
# print("." * 10) # print 10 dots
#
# a = word.replace("Hello", "Goodbye") # replace the word "Hello" with the one "Goodbye"
# 'Goodbye World'
#
# print(a)
#
#
#
# some_text = "a b c d"
# list = some_text.split(' ')
# print(some_text); print(list)
#
# back_to_str = ''.join(list)
# print(back_to_str)
#
# nums = [1, 2, 3]
#
# for index in range(0, len(nums)): # index could be any other name
# print(index)
# list_happiness = [int(el) for el in input().split()] # direct list of integers with comprehension
# list_happiness = list(map(lambda el: int(el), input().split())) # direct list of integers with map and lambda
# list_happiness = list(int, input().split())) # direct list of integers with reference to "int"
# print(','.join(str(x) for x in list_of_ints))
# ', '.join(map(str, myList))
# creating the data and saving it in a list
# line = input()
# while line != "Stop":
# sender, receiver, content = line.split()
# email = Email(sender, receiver, content) !!!
# emails.append(email)
# line = input()
# SORTING DICTIONARY some_dict = {"a": 1, "b": 11, "c": 20}
# print(sorted(some_dict.items(), key=lambda kvp: [1], reverse=True))
# print(sorted(some_dict.items(), key=lambda kvp: -kvp[1])) # sorting by value same as above DESCENDING
# (-KVP is integer of float)
# print(sorted(dict.items(), key=lambda kvp: kvp[1], reversed=True)) # sorting by value
# (when value is not an integer or float) same as above DESCENDING (-kvp)
# when strings only, no Digits in the dictionaries, use:
# a = sorted(dict.items(), key=lambda kvp: kvp[1], reversed=True)
# printing result (dictionary)
# for key, values in synonyms.items(): # same as above print, but using ".items()"
# print(f"{key} - {', '.join(values)}")
# A = [[1, 4, 5, 12],
# [-5, 8, 9, 0],
# [-6, 7, 11, 19]]
#
# print("A =", A)
# print("A[1] =", A[1]) # 2nd row
# print("A[1][2] =", A[1][2]) # 3rd element of 2nd row
# print("A[0][-1] =", A[0][-1]) # Last element of 1st Row
#
# column = [] # empty list
# for row in A:
# column.append(row[2])
#
# print("3rd column =", column)
# Regex
# findall() only if there are no groups!
# finditer() returns "iterated object" and we must always use a cycle and
# use .group() <-> which gives us the value in the group
# search() returns the first occurrence
# match() returns None or object.
# object matches or not
# Name groups -> (?P<name>regex) ->>> @(?P<name>[a-zA-Z]+)@(?P<price>\d+\.?\d+)@(?P<quantity>\d+\b) @[email protected]@4
# https://www.regular-expressions.info/python.html
# Regex for a number between 0-10000 ->> ([0-9][0-9]{0,3}|10000) or ([0-9]{0,4}|10000) example
# when we have repeatable items like "#" or "|", we can use group calling, or "\1, 2, 3" ->>
# (#|\|)[a-zA-Z\s)\1[\d{2}/\d{2}/\d{2}\1 where \1 is calling the group (#|\|) #Bread#19/03/21#4000#|Carrots|06/08/20|500|
| 31.775 | 127 | 0.618148 |
2852770c058867d8826914a383790fce5d75f2f3
| 3,336 |
py
|
Python
|
Data Structures and Algorithms/Linked List/01. Singly Linked List.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | 1 |
2021-07-15T18:40:26.000Z
|
2021-07-15T18:40:26.000Z
|
Data Structures and Algorithms/Linked List/01. Singly Linked List.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
Data Structures and Algorithms/Linked List/01. Singly Linked List.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, value=None):
self.value = value
self.next = None
class SLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
node = self.head
while node:
yield node
node = node.next
# Insert in Linked List
def insertSLL(self, value, location):
newNode = Node(value)
if self.head is None:
self.head = newNode
self.tail = newNode
else:
if location == 0:
newNode.next = self.head
self.head = newNode
elif location == -1:
newNode.next = None
self.tail.next = newNode
self.tail = newNode
else:
tempNode = self.head
index = 0
while index < location - 1:
tempNode = tempNode.next
index += 1
nextNode = tempNode.next
tempNode.next = newNode
newNode.next = nextNode
# Traverse Singly Linked List
def traverseList(self):
if self.head is None:
print("The Singly Linked List does not exist")
else:
node = self.head
while node is not None:
print(node.value)
node = node.next
# Search for a node in Singly Linked List
def searchSLL(self, nodeValue):
if self.head is None:
print("The Singly Linked List does not exist")
else:
node = self.head
while node is not None:
if node.value == nodeValue:
return node.value
node = node.next
return "The node does not exist in this SLL"
# Delete a node from Singly Linked List
def deleteNode(self, location):
if self.head is None:
return "The Singly Linked List does not exist"
elif location == 0:
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.head = self.head.next
elif location == -1:
if self.head == self.tail:
self.head = None
self.tail = None
else:
node = self.head
while node is not None:
node = node.next
if node.next == self.tail:
node.next = None
self.tail = node
else:
tempNode = self.head
index = 0
while index < location - 1:
tempNode = tempNode.next
index += 1
nextNode = tempNode.next
tempNode.next = nextNode.next
# Delete entire SLL
def deleteEntireSLL(self):
if self.head is None:
print("SLL does not exist")
else:
self.head = None
self.tail = None
singlyLinkedList = SLinkedList()
singlyLinkedList.insertSLL(3, 0)
singlyLinkedList.insertSLL(4, -1)
singlyLinkedList.insertSLL(5, -1)
singlyLinkedList.insertSLL(7, 2)
print([node.value for node in singlyLinkedList])
singlyLinkedList.deleteEntireSLL()
print([node.value for node in singlyLinkedList])
| 29.263158 | 58 | 0.506595 |
0a7be655332cca09ba8fb4f0516f2b57bc6d6e3e
| 16,802 |
py
|
Python
|
conans/test/integration/profile_test.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
conans/test/integration/profile_test.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
conans/test/integration/profile_test.py
|
datalogics-kam/conan
|
7bf230cd5f8ef68eb804908777ebaad75e951b16
|
[
"MIT"
] | null | null | null |
import unittest
from conans.client import tools
from conans.test.utils.tools import TestClient
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.util.files import save, load
import os
from conans.paths import CONANFILE
from collections import OrderedDict
from conans.test.utils.test_files import temp_folder
from conans.test.utils.profiles import create_profile as _create_profile
from parameterized import parameterized
conanfile_scope_env = """
from conans import ConanFile
class AConan(ConanFile):
name = "Hello0"
version = "0.1"
settings = "os", "compiler", "arch"
def build(self):
# Print environment vars
if self.settings.os == "Windows":
self.run("SET")
else:
self.run("env")
"""
def create_profile(folder, name, settings=None, package_settings=None, env=None,
package_env=None, options=None):
_create_profile(folder, name, settings, package_settings, env, package_env, options)
content = load(os.path.join(folder, name))
content = "include(default)\n \n" + content
save(os.path.join(folder, name), content)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
def base_profile_generated_test(self):
"""we are testing that the default profile is created (when not existing, fresh install)
even when you run a create with a profile"""
client = TestClient()
client.save({CONANFILE: conanfile_scope_env,
"myprofile": "include(default)\n[settings]\nbuild_type=Debug"})
client.run("create . conan/testing --profile myprofile")
def bad_syntax_test(self):
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("export . lasote/stable")
profile = '''
[settings
'''
clang_profile_path = os.path.join(self.client.client_cache.profiles_path, "clang")
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile", self.client.user_io.out)
self.assertIn("Bad syntax", self.client.user_io.out)
profile = '''
[settings]
[invented]
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Unrecognized field 'invented'", self.client.user_io.out)
self.assertIn("Error reading 'clang' profile", self.client.user_io.out)
profile = '''
[settings]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Invalid setting line 'as'",
self.client.user_io.out)
profile = '''
[env]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Invalid env line 'as'",
self.client.user_io.out)
profile = '''
[settings]
os = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
# stripped "a value"
self.assertIn("'a value' is not a valid 'settings.os'", self.client.user_io.out)
profile = '''
include(default)
[env]
ENV_VAR = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang")
self._assert_env_variable_printed("ENV_VAR", "a value")
profile = '''
include(default)
# Line with comments is not a problem
[env]
# Not even here
ENV_VAR = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build -pr clang")
self._assert_env_variable_printed("ENV_VAR", "a value")
@parameterized.expand([("", ), ("./local_profiles/", ), (temp_folder() + "/", )])
def install_with_missing_profile_test(self, path):
self.client.save({CONANFILE: conanfile_scope_env})
error = self.client.run('install . -pr "%sscopes_env"' % path, ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Profile not found:", self.client.out)
self.assertIn("scopes_env", self.client.out)
def install_profile_env_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
files["conanfile.py"] = conanfile_scope_env
create_profile(self.client.client_cache.profiles_path, "envs", settings={},
env=[("A_VAR", "A_VALUE")], package_env={"Hello0": [("OTHER_VAR", "2")]})
self.client.save(files)
self.client.run("export . lasote/stable")
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr envs")
self._assert_env_variable_printed("A_VAR", "A_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "2")
# Override with package var
self.client.run("install Hello0/0.1@lasote/stable --build "
"-pr envs -e Hello0:A_VAR=OTHER_VALUE")
self._assert_env_variable_printed("A_VAR", "OTHER_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "2")
# Override package var with package var
self.client.run("install Hello0/0.1@lasote/stable --build -pr envs "
"-e Hello0:A_VAR=OTHER_VALUE -e Hello0:OTHER_VAR=3")
self._assert_env_variable_printed("A_VAR", "OTHER_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "3")
# Pass a variable with "=" symbol
self.client.run("install Hello0/0.1@lasote/stable --build -pr envs "
"-e Hello0:A_VAR=Valuewith=equal -e Hello0:OTHER_VAR=3")
self._assert_env_variable_printed("A_VAR", "Valuewith=equal")
self._assert_env_variable_printed("OTHER_VAR", "3")
def install_profile_settings_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
# Create a profile and use it
profile_settings = OrderedDict([("compiler", "Visual Studio"),
("compiler.version", "12"),
("compiler.runtime", "MD"),
("arch", "x86")])
create_profile(self.client.client_cache.profiles_path, "vs_12_86",
settings=profile_settings, package_settings={})
self.client.client_cache.default_profile # Creates default
tools.replace_in_file(self.client.client_cache.default_profile_path,
"compiler.libcxx", "#compiler.libcxx", strict=False)
self.client.save(files)
self.client.run("export . lasote/stable")
self.client.run("install . --build missing -pr vs_12_86")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
for setting, value in profile_settings.items():
self.assertIn("%s=%s" % (setting, value), info)
# Try to override some settings in install command
self.client.run("install . --build missing -pr vs_12_86 -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
for setting, value in profile_settings.items():
if setting != "compiler.version":
self.assertIn("%s=%s" % (setting, value), info)
else:
self.assertIn("compiler.version=14", info)
# Use package settings in profile
tmp_settings = OrderedDict()
tmp_settings["compiler"] = "gcc"
tmp_settings["compiler.libcxx"] = "libstdc++11"
tmp_settings["compiler.version"] = "4.8"
package_settings = {"Hello0": tmp_settings}
create_profile(self.client.client_cache.profiles_path,
"vs_12_86_Hello0_gcc", settings=profile_settings,
package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install . --build missing -pr vs_12_86_Hello0_gcc -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=gcc", info)
self.assertIn("compiler.libcxx=libstdc++11", info)
# If other package is specified compiler is not modified
package_settings = {"NoExistsRecipe": tmp_settings}
create_profile(self.client.client_cache.profiles_path,
"vs_12_86_Hello0_gcc", settings=profile_settings,
package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install . --build missing -pr vs_12_86_Hello0_gcc -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=Visual Studio", info)
self.assertNotIn("compiler.libcxx", info)
# Mix command line package settings with profile
package_settings = {"Hello0": tmp_settings}
create_profile(self.client.client_cache.profiles_path, "vs_12_86_Hello0_gcc",
settings=profile_settings, package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install . --build missing -pr vs_12_86_Hello0_gcc"
" -s compiler.version=14 -s Hello0:compiler.libcxx=libstdc++")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=gcc", info)
self.assertNotIn("compiler.libcxx=libstdc++11", info)
self.assertIn("compiler.libcxx=libstdc++", info)
def install_profile_options_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
create_profile(self.client.client_cache.profiles_path, "vs_12_86",
options=[("Hello0:language", 1),
("Hello0:static", False)])
self.client.save(files)
self.client.run("install . --build missing -pr vs_12_86")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("language=1", info)
self.assertIn("static=False", info)
def scopes_env_test(self):
# Create a profile and use it
create_profile(self.client.client_cache.profiles_path, "scopes_env", settings={},
env=[("CXX", "/path/tomy/g++"), ("CC", "/path/tomy/gcc")])
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("export . lasote/stable")
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr scopes_env")
self._assert_env_variable_printed("CC", "/path/tomy/gcc")
self._assert_env_variable_printed("CXX", "/path/tomy/g++")
# The env variable shouldn't persist after install command
self.assertFalse(os.environ.get("CC", None) == "/path/tomy/gcc")
self.assertFalse(os.environ.get("CXX", None) == "/path/tomy/g++")
def default_including_another_profile_test(self):
p1 = "include(p2)\n[env]\nA_VAR=1"
p2 = "include(default)\n[env]\nA_VAR=2"
self.client.client_cache.conan_config # Create the default conf
self.client.client_cache.default_profile # Create default profile
save(os.path.join(self.client.client_cache.profiles_path, "p1"), p1)
save(os.path.join(self.client.client_cache.profiles_path, "p2"), p2)
# Change default profile to p1 => p2 => default
tools.replace_in_file(self.client.client_cache.conan_conf_path,
"default_profile = default",
"default_profile = p1")
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("create . user/testing")
self._assert_env_variable_printed("A_VAR", "1")
def test_package_test(self):
test_conanfile = '''from conans.model.conan_file import ConanFile
from conans import CMake
import os
class DefaultNameConan(ConanFile):
name = "DefaultName"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
requires = "Hello0/0.1@lasote/stable"
def build(self):
# Print environment vars
if self.settings.os == "Windows":
self.run('echo "My var is %ONE_VAR%"')
else:
self.run('echo "My var is $ONE_VAR"')
def test(self):
pass
'''
files = {"conanfile.py": conanfile_scope_env,
"test_package/conanfile.py": test_conanfile}
# Create a profile and use it
create_profile(self.client.client_cache.profiles_path, "scopes_env", settings={},
env=[("ONE_VAR", "ONE_VALUE")])
self.client.save(files)
self.client.run("create . lasote/stable --profile scopes_env")
self._assert_env_variable_printed("ONE_VAR", "ONE_VALUE")
self.assertIn("My var is ONE_VALUE", str(self.client.user_io.out))
# Try now with package environment vars
create_profile(self.client.client_cache.profiles_path, "scopes_env2", settings={},
package_env={"DefaultName": [("ONE_VAR", "IN_TEST_PACKAGE")],
"Hello0": [("ONE_VAR", "PACKAGE VALUE")]})
self.client.run("create . lasote/stable --profile scopes_env2")
self._assert_env_variable_printed("ONE_VAR", "PACKAGE VALUE")
self.assertIn("My var is IN_TEST_PACKAGE", str(self.client.user_io.out))
# Try now overriding some variables with command line
self.client.run("create . lasote/stable --profile scopes_env2 "
"-e DefaultName:ONE_VAR=InTestPackageOverride "
"-e Hello0:ONE_VAR=PackageValueOverride ")
self._assert_env_variable_printed("ONE_VAR", "PackageValueOverride")
self.assertIn("My var is InTestPackageOverride", str(self.client.user_io.out))
# A global setting in command line won't override a scoped package variable
self.client.run("create . lasote/stable --profile scopes_env2 -e ONE_VAR=AnotherValue")
self._assert_env_variable_printed("ONE_VAR", "PACKAGE VALUE")
def _assert_env_variable_printed(self, name, value):
self.assertIn("%s=%s" % (name, value), self.client.user_io.out)
def info_with_profiles_test(self):
self.client.run("remove '*' -f")
# Create a simple recipe to require
winreq_conanfile = '''
from conans.model.conan_file import ConanFile
class WinRequireDefaultNameConan(ConanFile):
name = "WinRequire"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
'''
files = {"conanfile.py": winreq_conanfile}
self.client.save(files)
self.client.run("export . lasote/stable")
# Now require the first recipe depending on OS=windows
conanfile = '''from conans.model.conan_file import ConanFile
import os
class DefaultNameConan(ConanFile):
name = "Hello"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
def config(self):
if self.settings.os == "Windows":
self.requires.add("WinRequire/0.1@lasote/stable")
'''
files = {"conanfile.py": conanfile}
self.client.save(files)
self.client.run("export . lasote/stable")
# Create a profile that doesn't activate the require
create_profile(self.client.client_cache.profiles_path, "scopes_env",
settings={"os": "Linux"})
# Install with the previous profile
self.client.run("info Hello/0.1@lasote/stable --profile scopes_env")
self.assertNotIn('''Requires:
WinRequire/0.1@lasote/stable''', self.client.user_io.out)
# Create a profile that activate the require
create_profile(self.client.client_cache.profiles_path, "scopes_env",
settings={"os": "Windows"})
# Install with the previous profile
self.client.run("info Hello/0.1@lasote/stable --profile scopes_env")
self.assertIn('''Requires:
WinRequire/0.1@lasote/stable''', self.client.user_io.out)
| 42.753181 | 99 | 0.630461 |
17b8d225b55dd70bbc9ecb294ae455be7b0559d9
| 3,599 |
py
|
Python
|
sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1 |
2021-09-16T02:33:52.000Z
|
2021-09-16T02:33:52.000Z
|
sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2 |
2021-08-24T15:32:30.000Z
|
2021-08-24T23:21:34.000Z
|
sdk/identity/azure-identity/azure/identity/aio/_internal/get_token_mixin.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1 |
2016-04-19T22:15:47.000Z
|
2016-04-19T22:15:47.000Z
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import abc
import logging
import time
from typing import TYPE_CHECKING
from ..._constants import DEFAULT_REFRESH_OFFSET, DEFAULT_TOKEN_REFRESH_RETRY_DELAY
from ..._internal import within_credential_chain
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports,unused-import
from typing import Any, Optional
from azure.core.credentials import AccessToken
_LOGGER = logging.getLogger(__name__)
class GetTokenMixin(abc.ABC):
def __init__(self, *args: "Any", **kwargs: "Any") -> None:
self._last_request_time = 0
# https://github.com/python/mypy/issues/5887
super(GetTokenMixin, self).__init__(*args, **kwargs) # type: ignore
@abc.abstractmethod
async def _acquire_token_silently(self, *scopes: str, **kwargs: "Any") -> "Optional[AccessToken]":
"""Attempt to acquire an access token from a cache or by redeeming a refresh token"""
@abc.abstractmethod
async def _request_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
"""Request an access token from the STS"""
def _should_refresh(self, token: "AccessToken") -> bool:
now = int(time.time())
if token.expires_on - now > DEFAULT_REFRESH_OFFSET:
return False
if now - self._last_request_time < DEFAULT_TOKEN_REFRESH_RETRY_DELAY:
return False
return True
async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
"""Request an access token for `scopes`.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:keyword str tenant_id: optional tenant to include in the token request. If **allow_multitenant_authentication**
is False, specifying a tenant with this argument may raise an exception.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises CredentialUnavailableError: the credential is unable to attempt authentication because it lacks
required data, state, or platform support
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
attribute gives a reason.
"""
if not scopes:
raise ValueError('"get_token" requires at least one scope')
try:
token = await self._acquire_token_silently(*scopes, **kwargs)
if not token:
self._last_request_time = int(time.time())
token = await self._request_token(*scopes, **kwargs)
elif self._should_refresh(token):
try:
self._last_request_time = int(time.time())
token = await self._request_token(*scopes, **kwargs)
except Exception: # pylint:disable=broad-except
pass
_LOGGER.log(
logging.DEBUG if within_credential_chain.get() else logging.INFO,
"%s.get_token succeeded",
self.__class__.__name__,
)
return token
except Exception as ex:
_LOGGER.log(
logging.DEBUG if within_credential_chain.get() else logging.WARNING,
"%s.get_token failed: %s",
self.__class__.__name__,
ex,
exc_info=_LOGGER.isEnabledFor(logging.DEBUG),
)
raise
| 39.988889 | 120 | 0.626841 |
50b9e1b032ee610aebe972988439ee6e9d6d90b3
| 3,980 |
py
|
Python
|
src/data/dataClean_bang.py
|
rockpiyush/Prediction-house-pricing
|
eb8bd5160624bd925389235fee4e33df41eacd19
|
[
"MIT"
] | null | null | null |
src/data/dataClean_bang.py
|
rockpiyush/Prediction-house-pricing
|
eb8bd5160624bd925389235fee4e33df41eacd19
|
[
"MIT"
] | null | null | null |
src/data/dataClean_bang.py
|
rockpiyush/Prediction-house-pricing
|
eb8bd5160624bd925389235fee4e33df41eacd19
|
[
"MIT"
] | 4 |
2018-02-09T15:24:21.000Z
|
2020-04-30T09:01:00.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from scipy.stats import norm
from scipy import stats
df_test = pd.read_csv("../../test.csv")
df_train = pd.read_csv("../../train.csv")
#查看目标数据详细信息
#count 1460.000000
#mean 180921.195890
#std 79442.502883
#min 34900.000000
#25% 129975.000000
#50% 163000.000000
#75% 214000.000000
#max 755000.000000
df_train['SalePrice'].describe();
#查看目标数据的
sns.distplot(df_train['SalePrice'])
#plt.show()
#查看所有数据的相关系数
corrmat = df_train.corr()
f,ax = plt.subplots(figsize=(12,9))
sns.heatmap(corrmat,vmax=1,vmin= -1,square=True)
#plt.show()
#查看正相关性最强的前10个值
k = 10
cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index
cm = np.corrcoef(df_train[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
#plt.show()
sns.set()
cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']
sns.pairplot(df_train[cols], size = 2.5)
#plt.show();
#对缺失值的处理
#查看数据内所有缺失的信息
total = df_train.isnull().sum().sort_values(ascending=False)
percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
#对于缺失值,不同情况不同分析 高特征的低缺失值可以尝试填充估计;高缺失值的可以通过回归估计计算
#低特征的低缺失值可以不做处理;高缺失值的可直接剔除字段
#通过观察发现出现缺失值的字段的相关系数都很低,特征都不明显,因此可以删除
df_train = df_train.drop((missing_data[missing_data['Total'] > 1]).index,1)
df_train = df_train.drop(df_train.loc[df_train['Electrical'].isnull()].index)
df_train.isnull().sum().max()
#观测离群值
saleprice_scaled = StandardScaler().fit_transform(df_train['SalePrice'][:,np.newaxis]);
low_range = saleprice_scaled[saleprice_scaled[:,0].argsort()][:10]
high_range= saleprice_scaled[saleprice_scaled[:,0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range)
var = 'GrLivArea'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000));
#删除离群点 提高准确度
df_train.sort_values(by = 'GrLivArea', ascending = False)[:2]
df_train = df_train.drop(df_train[df_train['Id'] == 1299].index)
df_train = df_train.drop(df_train[df_train['Id'] == 524].index)
var = 'TotalBsmtSF'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
data.plot.scatter(x=var, y='SalePrice', ylim=(0,800000));
#方差齐次检验
sns.distplot(df_train['SalePrice'], fit=norm);
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
#正偏系数大于一,高峰偏左,长尾向右延伸,处于正偏态
#进行对数变换(log transformation)让数据符合假设
df_train['SalePrice'] = np.log(df_train['SalePrice'])
sns.distplot(df_train['SalePrice'], fit=norm);
fig = plt.figure()
res = stats.probplot(df_train['SalePrice'], plot=plt)
sns.distplot(df_train['GrLivArea'], fit=norm);
fig = plt.figure()
res = stats.probplot(df_train['GrLivArea'], plot=plt)
#也呈现正偏态,进行对数变换
df_train['GrLivArea'] = np.log(df_train['GrLivArea'])
sns.distplot(df_train['GrLivArea'], fit=norm);
fig = plt.figure()
res = stats.probplot(df_train['GrLivArea'], plot=plt)
#TotalBsmtSF 内部数据存在0值,无法进行归一化
#df_train['HasBsmt'] = pd.Series(len(df_train['TotalBsmtSF']), index=df_train.index)
#df_train['HasBsmt'] = 0
#df_train.loc[df_train['TotalBsmtSF']>0,'HasBsmt'] = 1 #进行切片,将totalbsmtsf 大于1 的都提出 hasbsmt赋值1 其余=0 的赋值为0
#对那些做了标签的项,即TotalBsmtSF>0 的项 进行对数变换
df_train.loc[df_train['HasBsmt']==1,'TotalBsmtSF'] = np.log(df_train['TotalBsmtSF'])
sns.distplot(df_train[df_train['TotalBsmtSF']>0]['TotalBsmtSF'], fit=norm);
fig = plt.figure()
res = stats.probplot(df_train[df_train['TotalBsmtSF']>0]['TotalBsmtSF'], plot=plt)
df_train = pd.get_dummies(df_train)
#print(df_train)
pd.DataFrame(df_train).to_csv('../../CleantrainingDataSet_Bang.csv', index=False)
| 32.892562 | 141 | 0.732161 |
daba1a8cc7d86fdeacce5834b4c188aaa8cc25c8
| 412 |
py
|
Python
|
deleting_face.py
|
olgOk/AngelHack-Hackathon-June2019
|
bd44bb9a27df25bb31a4d70d9c3cbb5530717ec7
|
[
"MIT"
] | null | null | null |
deleting_face.py
|
olgOk/AngelHack-Hackathon-June2019
|
bd44bb9a27df25bb31a4d70d9c3cbb5530717ec7
|
[
"MIT"
] | null | null | null |
deleting_face.py
|
olgOk/AngelHack-Hackathon-June2019
|
bd44bb9a27df25bb31a4d70d9c3cbb5530717ec7
|
[
"MIT"
] | null | null | null |
import boto3
if __name__ == "__main__":
collectionId='Gamers'
faces=["d5e10426-1383-4674-8025-384a6fff661b"]
client=boto3.client('rekognition')
response=client.delete_faces(CollectionId=collectionId,
FaceIds=faces)
print(str(len(response['DeletedFaces'])) + ' faces deleted:')
for faceId in response['DeletedFaces']:
print (faceId)
| 27.466667 | 73 | 0.628641 |
29e1dc095b78a27ca945dda23d00f70a09926053
| 44 |
py
|
Python
|
beat_evaluation_toolbox/__init__.py
|
Fhrozen/Beat-Tracking-Evaluation-Toolbox
|
652521cc8cb82c794f28975731d93e736a40f09c
|
[
"MIT"
] | null | null | null |
beat_evaluation_toolbox/__init__.py
|
Fhrozen/Beat-Tracking-Evaluation-Toolbox
|
652521cc8cb82c794f28975731d93e736a40f09c
|
[
"MIT"
] | null | null | null |
beat_evaluation_toolbox/__init__.py
|
Fhrozen/Beat-Tracking-Evaluation-Toolbox
|
652521cc8cb82c794f28975731d93e736a40f09c
|
[
"MIT"
] | null | null | null |
from beat_evaluation_toolbox.main import *
| 22 | 43 | 0.840909 |
cad39b9d21d487ae3d06dad3eae3e452ba8051e3
| 3,426 |
py
|
Python
|
aoc/year_2021/day_05/solver.py
|
logan-connolly/AoC
|
23f47e72abaf438cc97897616be4d6b057a01bf3
|
[
"MIT"
] | 2 |
2020-12-06T10:59:52.000Z
|
2021-09-29T22:14:03.000Z
|
aoc/year_2021/day_05/solver.py
|
logan-connolly/AoC
|
23f47e72abaf438cc97897616be4d6b057a01bf3
|
[
"MIT"
] | null | null | null |
aoc/year_2021/day_05/solver.py
|
logan-connolly/AoC
|
23f47e72abaf438cc97897616be4d6b057a01bf3
|
[
"MIT"
] | 2 |
2021-09-29T22:14:18.000Z
|
2022-01-18T02:20:26.000Z
|
"""This is the Solution for Year 2021 Day 05"""
import itertools
from collections import Counter
from dataclasses import dataclass
from aoc.abstracts.solver import Answers, StrLines
@dataclass(frozen=True)
class Point:
"""Immutable point that will define x and y on 2D plane"""
x: int
y: int
@dataclass
class LineSegment:
"""Define a line object that takes a start and end point"""
start: Point
end: Point
@property
def slope(self) -> int:
return int((self.start.y - self.end.y) / (self.start.x - self.end.x))
@property
def intercept(self) -> int:
return int(self.start.y - (self.start.x * self.slope))
def is_vertical(self) -> bool:
return self.start.x == self.end.x
def is_horizontal(self) -> bool:
return self.start.y == self.end.y
def y_range(self) -> range:
coords = self.start.y, self.end.y
return range(min(coords), max(coords) + 1)
def x_range(self) -> range:
coords = self.start.x, self.end.x
return range(min(coords), max(coords) + 1)
def calculate_y(self, x: int) -> int:
return int(self.slope * x + self.intercept)
def parse_point(raw_point: str) -> Point:
"""Parse point from raw string"""
x, y = raw_point.split(",")
return Point(x=int(x), y=int(y))
def parse_lines(lines: StrLines) -> list[LineSegment]:
"""Parse raw lines into Lines and Points"""
parsed_lines = []
for raw_line in lines:
raw_start, raw_end = raw_line.split(" -> ")
start_point = parse_point(raw_start)
end_point = parse_point(raw_end)
line = LineSegment(start=start_point, end=end_point)
parsed_lines.append(line)
return parsed_lines
def get_horizontal_vertical_lines(lines: list[LineSegment]) -> list[LineSegment]:
"""Filter for only horizontal or vertical lines"""
return [line for line in lines if line.is_horizontal() or line.is_vertical()]
def get_point_segment(line: LineSegment) -> list[Point]:
"""Get a list of points in a given line"""
if line.is_vertical():
return [Point(x=line.start.x, y=y) for y in line.y_range()]
return [Point(x=x, y=line.calculate_y(x)) for x in line.x_range()]
def get_point_occurences(lines: list[LineSegment]) -> dict[Point, int]:
"""Count up the number of occurences for a given point"""
segment_points = (get_point_segment(line) for line in lines)
return Counter(itertools.chain.from_iterable(segment_points))
class Solver:
def __init__(self, data: str) -> None:
self.data = data
def _preprocess(self) -> StrLines:
return self.data.splitlines()
def _solve_part_one(self, lines: StrLines) -> int:
parsed_lines = parse_lines(lines)
filtered_lines = get_horizontal_vertical_lines(parsed_lines)
point_count = get_point_occurences(filtered_lines)
return sum(1 for n_occurences in point_count.values() if n_occurences >= 2)
def _solve_part_two(self, lines: StrLines) -> int:
parsed_lines = parse_lines(lines)
point_count = get_point_occurences(parsed_lines)
return sum(1 for n_occurences in point_count.values() if n_occurences >= 2)
def solve(self) -> Answers:
lines = self._preprocess()
ans_one = self._solve_part_one(lines)
ans_two = self._solve_part_two(lines)
return Answers(part_one=ans_one, part_two=ans_two)
| 31.145455 | 83 | 0.66725 |
c604690602a1802ebd8d2d45cf47322e205d3b58
| 2,997 |
py
|
Python
|
sprox/metadata.py
|
carl-wallace/sprox
|
69c8639b86318c28bbaad36125232d144d8be380
|
[
"MIT"
] | 3 |
2015-07-03T16:31:22.000Z
|
2018-04-19T04:26:02.000Z
|
sprox/metadata.py
|
carl-wallace/sprox
|
69c8639b86318c28bbaad36125232d144d8be380
|
[
"MIT"
] | 8 |
2015-02-23T23:01:50.000Z
|
2021-07-06T14:10:26.000Z
|
sprox/metadata.py
|
carl-wallace/sprox
|
69c8639b86318c28bbaad36125232d144d8be380
|
[
"MIT"
] | 7 |
2015-06-14T04:07:53.000Z
|
2020-04-28T13:50:50.000Z
|
"""
matadata Module
This contains the class which defines the generic interface for metadata.
Basically, it provides an interface for how data is extracted from the provider
for widget generation.
Copyright (c) 2008 Christopher Perkins
Original Version by Christopher Perkins 2007
Released under MIT license.
"""
from sprox.iprovider import IProvider
class MetadataError(Exception):pass
class NotFoundError(Exception):pass
class Metadata(dict):
"""Base Metadata class
Metadatas are dictionary-like. They map attributes
of the entity they wrap, so that attributes of the entity
can be examined without being explicitly set. Elements
of a metadata can be set if they are not already part of the
wrapped entity. This allows for customization of the
metadata without modification to the wrapped metadata.
"""
def __init__(self, provider, entity=None):
self.provider = provider
self.entity = entity
def __setitem__(self, key, value):
self._do_check_set_item(key, value)
dict.__setitem__(self, key, value)
def _do_get_item(self, item):
raise NotImplementedError
def _do_keys(sekf):
raise NotImplementedError
def _do_check_set_item(self, key, value):
raise NotImplementedError
def __getitem__(self, item):
try:
value = self._do_get_item(item)
return value
except NotFoundError:
return dict.__getitem__(self, item)
def keys(self):
r = self._do_keys()
r.extend(dict.keys(self))
return r
class EntitiesMetadata(Metadata):
"""A class to extract entities from a database definition.
"""
def _do_get_item(self, name):
if name in self.provider.get_entities():
return self.provider.get_entity(name)
raise NotFoundError
def _do_keys(self):
entities = sorted(self.provider.get_entities())
return entities
class FieldsMetadata(Metadata):
"""A class to extract fields from an entity.
"""
def __init__(self, provider, entity):
Metadata.__init__(self, provider, entity)
self.provider = provider
self.entity = entity
def _do_check_set_item(self, key, value):
if key in self.provider.get_fields(self.entity):
raise MetadataError('%s is already found in entity: %s'%(key, self.entity))
def _do_get_item(self, item):
try:
return self.provider.get_field(self.entity, item)
except AttributeError:
#XXX I'm not sure if we should change the type,but we shouldn't swallow with except:
if dict.__contains__(self, item):
return dict.get(self, item)
raise NotFoundError(self.entity,item)
def _do_keys(self):
return self.provider.get_fields(self.entity)
class FieldMetadata(Metadata):
"""In the future, if the Field attributes need to be extracted, this is where it will happen.
"""
pass
| 30.896907 | 97 | 0.67701 |
87d36134a2850ce9a0335e3009b54e86f60c9e49
| 4,539 |
py
|
Python
|
SBW_Classification_PyTorch/extension/normalization/cdWhiteningSigma.py
|
huangleiBuaa/StochasticityBW
|
11db7ed0238f0c7cd5f6e336a087fc1d0427b1e6
|
[
"BSD-2-Clause"
] | 8 |
2020-03-23T15:46:13.000Z
|
2022-03-25T03:11:17.000Z
|
SBW_Classification_PyTorch/extension/normalization/cdWhiteningSigma.py
|
huangleiBuaa/StochasticityBW
|
11db7ed0238f0c7cd5f6e336a087fc1d0427b1e6
|
[
"BSD-2-Clause"
] | null | null | null |
SBW_Classification_PyTorch/extension/normalization/cdWhiteningSigma.py
|
huangleiBuaa/StochasticityBW
|
11db7ed0238f0c7cd5f6e336a087fc1d0427b1e6
|
[
"BSD-2-Clause"
] | 1 |
2022-03-25T03:11:20.000Z
|
2022-03-25T03:11:20.000Z
|
import torch.nn
from torch.nn import Parameter
__all__ = ['cdWhiteningSigma', 'CDWhiteningSigma']
class CDWhiteningSigma_Single(torch.nn.Module):
def __init__(self, num_features, dim=4, eps=1e-3, momentum=0.1, affine=True,
*args, **kwargs):
super(CDWhiteningSigma_Single, self).__init__()
# assert dim == 4, 'CDWhiteningSigma is not support 2D'
self.eps = eps
self.momentum = momentum
self.num_features = num_features
self.affine = affine
self.dim = dim
shape = [1] * dim
shape[1] = self.num_features
self.register_buffer('running_mean', torch.zeros(num_features, 1))
# running whiten matrix
self.register_buffer('running_projection', torch.eye(num_features))
def forward(self, X: torch.Tensor):
x = X.transpose(0, 1).contiguous().view(self.num_features, -1)
d, m = x.size()
mean = x.mean(-1, keepdim=True) if self.training else self.running_mean
xc = x - mean
if self.training:
self.running_mean = (1. - self.momentum) * self.running_mean + self.momentum * mean.data
# calculate covariance matrix
sigma = torch.addmm(self.eps, torch.eye(self.num_features).to(X), 1. / m, xc, xc.transpose(0, 1))
self.running_projection = (1. - self.momentum) * self.running_projection + self.momentum * sigma.data
else:
sigma = self.running_projection
# reciprocal of trace of Sigma: shape [g, 1, 1]
L=torch.potrf(sigma, upper=False)
wm = torch.inverse(L)
xn = wm.mm(xc)
Xn = xn.view(X.size(1), X.size(0), *X.size()[2:]).transpose(0, 1).contiguous()
return Xn
class CDWhiteningSigma(torch.nn.Module):
def __init__(self, num_features, num_channels=16, dim=4, eps=1e-3, momentum=0.1, affine=True,
*args, **kwargs):
super(CDWhiteningSigma, self).__init__()
# assert dim == 4, 'CDWhiteningSigma is not support 2D'
self.eps = eps
self.momentum = momentum
self.num_features = num_features
self.num_channels = num_channels
num_groups = (self.num_features-1) // self.num_channels + 1
self.num_groups = num_groups
self.CDWhiteningSigma_Groups = torch.nn.ModuleList(
[CDWhiteningSigma_Single(num_features = self.num_channels, eps=eps, momentum=momentum) for _ in range(self.num_groups-1)]
)
num_channels_last=self.num_features - self.num_channels * (self.num_groups -1)
self.CDWhiteningSigma_Groups.append(CDWhiteningSigma_Single(num_features = num_channels_last, eps=eps, momentum=momentum))
print('CDWhiteningSigma-------m_perGroup:' + str(self.num_channels) + '---nGroup:' + str(self.num_groups))
self.affine = affine
self.dim = dim
shape = [1] * dim
shape[1] = self.num_features
if self.affine:
self.weight = Parameter(torch.Tensor(*shape))
self.bias = Parameter(torch.Tensor(*shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
# self.reset_running_stats()
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, X: torch.Tensor):
X_splits = torch.split(X, self.num_channels, dim=1)
X_hat_splits = []
for i in range(self.num_groups):
X_hat_tmp = self.CDWhiteningSigma_Groups[i](X_splits[i])
X_hat_splits.append(X_hat_tmp)
X_hat = torch.cat(X_hat_splits, dim=1)
# affine
if self.affine:
return X_hat * self.weight + self.bias
else:
return X_hat
def extra_repr(self):
return '{num_features}, num_channels={num_channels}, eps={eps}, ' \
'momentum={momentum}, affine={affine}'.format(**self.__dict__)
if __name__ == '__main__':
ItN = CDWhiteningSigma(8, num_channels=3, momentum=1, affine=False)
print(ItN)
ItN.train()
x = torch.randn(32, 8, 4, 4)
#x = torch.randn(32, 8)
x.requires_grad_()
y = ItN(x)
z = y.transpose(0, 1).contiguous().view(x.size(1), -1)
print(z.matmul(z.t()) / z.size(1))
y.sum().backward()
print('x grad', x.grad.size())
ItN.eval()
y = ItN(x)
z = y.transpose(0, 1).contiguous().view(x.size(1), -1)
print(z.matmul(z.t()) / z.size(1))
| 38.466102 | 133 | 0.614893 |
c1f69f27d2ac1911207c5c18ec8dfd08a5b6b074
| 670 |
py
|
Python
|
venv/Scripts/django-admin.py
|
LucasHollas/CRUD_Django
|
a0add0fa382f19ed76b1cc7dac654e5f5e8e4a91
|
[
"MIT"
] | null | null | null |
venv/Scripts/django-admin.py
|
LucasHollas/CRUD_Django
|
a0add0fa382f19ed76b1cc7dac654e5f5e8e4a91
|
[
"MIT"
] | null | null | null |
venv/Scripts/django-admin.py
|
LucasHollas/CRUD_Django
|
a0add0fa382f19ed76b1cc7dac654e5f5e8e4a91
|
[
"MIT"
] | null | null | null |
#!d:\Python\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 30.454545 | 80 | 0.722388 |
77901b0343ff7459765ef4b3a5560079fbe4b1c6
| 5,822 |
py
|
Python
|
RealData/Pumbed/main.py
|
Ikerlz/dcd
|
056e5c4060f9d655ce4f6234b86481ae4b3f7106
|
[
"MIT"
] | null | null | null |
RealData/Pumbed/main.py
|
Ikerlz/dcd
|
056e5c4060f9d655ce4f6234b86481ae4b3f7106
|
[
"MIT"
] | null | null | null |
RealData/Pumbed/main.py
|
Ikerlz/dcd
|
056e5c4060f9d655ce4f6234b86481ae4b3f7106
|
[
"MIT"
] | null | null | null |
#!/home/lizhe/anaconda3/envs/pyspark/bin/python
from simulationfunc import *
from scipy.io import loadmat
from math import ceil
from pyspark import SparkContext
from pyspark import SparkConf
import os
import warnings
warnings.filterwarnings('ignore')
os.environ['JAVA_HOME'] = '/usr/lib/jvm/jdk8u252'
# Spark Environment
findspark.init("/usr/local/spark")
conf = SparkConf().\
setMaster("local[*]").\
setAll([('spark.executor.memory', '6g'),
('spark.driver.memory', '10g')])
# spark = SparkContext.getOrCreate(conf)
spark = pyspark.sql.SparkSession.builder.\
config(conf=conf).\
appName("Pubmed").\
getOrCreate()
#################################################
# Pandas options
pd.options.display.max_columns = None
pd.options.display.max_rows = None
np.set_printoptions(threshold=np.inf)
#################################################
# define the algorithm function of spark version
def spark_pilot_spectral_clustering(whole_node_num, node_in_master_num, cluster_num,
worker_num, worker_per_sub_reader, adjacency_matrix, index_to_label_dict):
"""
:param whole_node_num: the number of all nodes
:param node_in_master_num: the number of the nodes in the master
:param cluster_num: the number of clusers
:param worker_num: the number of workers
:param worker_per_sub_reader: the number for reading into memory per time
:param adjacency_matrix: the adjacency matrix
:param index_to_label_dict: the index2label dictionary
:return: mis-clustering rate
"""
# Register a user defined function via the Pandas UDF
beta = StructType([StructField('IndexNum', IntegerType(), True),
StructField('ClusterInfo', IntegerType(), True),
StructField('ClusterExp', IntegerType(), True)])
@pandas_udf(beta, PandasUDFType.GROUPED_MAP)
def clustering_worker_udf(data_frame):
return clustering_worker(worker_pdf=data_frame,
master_pdf=master_pdf,
pseudo_center_dict=master_pseudo_dict,
real_data=True)
master_pdf, worker_pdf = split_master_worker(adjacency_matrix,
index_to_label_dict,
master_num=node_in_master_num,
partition_num=worker_num)
master_pseudo_dict, master_cluster_pdf, master_time = \
spectral_clustering_master(master_pdf, cluster_num, real_data=True)
print(master_time)
worker_size = whole_node_num - node_in_master_num
sub_num = ceil(worker_size / worker_per_sub_reader)
for i in range(sub_num):
if i != sub_num - 1:
worker_sdf_isub = spark.createDataFrame(worker_pdf[(sub_num * i): (sub_num * (i + 1) - 1)])
else:
worker_sdf_isub = spark.createDataFrame(worker_pdf[(sub_num * i): (worker_size - 1)])
if i == 0:
worker_sdf = worker_sdf_isub
else:
worker_sdf = worker_sdf.unionAll(worker_sdf_isub)
worker_sdf = worker_sdf.repartitionByRange("PartitionID")
# start1 = time.time()
worker_cluster_sdf = worker_sdf.groupby("PartitionID").apply(clustering_worker_udf)
# end1 = time.time()
worker_cluster_pdf = worker_cluster_sdf.toPandas() # Spark DataFrame => Pandas DataFrame
cluster_pdf = pd.concat(
[master_cluster_pdf, worker_cluster_pdf]) # merge the clustering result on master and worker
mis_rate = get_accurate(cluster_pdf, cluster_num, error=True)
running_time = round(master_time, 6)
return mis_rate, running_time
if __name__ == '__main__':
node_df = pd.read_csv('node_index_label.csv')
node_relationship = pd.read_csv('node_relationship.csv')
index_list = list(node_df.iloc[:, 2])
label_list = list(node_df.iloc[:, 1])
label_list = [(x-1) for x in label_list] # let the label index starts from zero
id_list = list(node_df.iloc[:, 0])
index2id_dict = dict(zip(index_list, id_list))
index2label_dict = dict(zip(index_list, label_list))
# adjacency matrix
print('构造矩阵')
pumbed_adjacency_matrix = np.zeros((19717, 19717), dtype=int)
for i in range(19717):
pumbed_adjacency_matrix[i, i] = 10
for row in node_relationship.itertuples():
index1 = getattr(row, 'index1')
index2 = getattr(row, 'index2')
if index1 != index2:
pumbed_adjacency_matrix[index1, index2] = 1
pumbed_adjacency_matrix[index2, index1] = 1
# settings
total_size = 19717
# pilot_node_number = 400
pilot_ratio_list = [0.02*x for x in range(13, 14)]
cluster_number = 3
worker_number = 2 # equal to the partition number
worker_per_sub = 4000
repeat_number = 1
print('开始聚类')
for pilot_ratio in pilot_ratio_list:
pilot_node_number = math.ceil(pilot_ratio * total_size)
mis_rate_list = []
running_time_list = []
for repeat in range(repeat_number):
mis_rate_i, running_time_i = \
spark_pilot_spectral_clustering(total_size, pilot_node_number,
cluster_number, worker_number,
worker_per_sub, pumbed_adjacency_matrix, index2label_dict)
mis_rate_list.append(mis_rate_i)
running_time_list.append(running_time_i)
print('一次运行结束')
print('r:{},R:{},time:{}'.format(pilot_ratio,
round(sum(mis_rate_list)/len(mis_rate_list), 5),
round(sum(running_time_list)/len(running_time_list), 5)
)
)
| 41.585714 | 110 | 0.63157 |
eea46bf69b00c9d69a175aad7ee44aad5445c0e8
| 6,487 |
py
|
Python
|
code/tcp_server.py
|
Reydan46/Sonoff_Devices_DIY_Tools
|
3ff3a5991cbb01e28559beb296fd4b39ef5d2f7c
|
[
"BSD-3-Clause"
] | null | null | null |
code/tcp_server.py
|
Reydan46/Sonoff_Devices_DIY_Tools
|
3ff3a5991cbb01e28559beb296fd4b39ef5d2f7c
|
[
"BSD-3-Clause"
] | null | null | null |
code/tcp_server.py
|
Reydan46/Sonoff_Devices_DIY_Tools
|
3ff3a5991cbb01e28559beb296fd4b39ef5d2f7c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module is about server threads
class:
SeverThreadForQT:(Based on QThread)
"""
import json
from socket import *
from PySide2.QtCore import *
class SeverThreadForQT(QThread):
"""
This class handles data transfer using sockets directly
__init__:It must be initialized with server_ip and server_post
run:Block waiting for device connection, one device at a time
ota_state_Thread: signal
"""
ota_state_Thread = Signal(str)
def __init__(self, parent=None, **func_task):
super(SeverThreadForQT, self).__init__(parent)
HOST = func_task["server_ip"]
PORT = func_task["server_port"]
ADDR = (HOST, PORT)
# Create socket
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sockfd.bind(ADDR)
self.count_bin_len()
def __def__(self):
self.wait()
def run(self):
self.sockfd.listen(5)
# The loop waits for the client link
while True:
try:
self.connfd, addr = self.sockfd.accept()
except KeyboardInterrupt:
sockfd.close()
return
except Exception as e:
print(e)
continue
print("Client login:", addr)
while True:
try:
data = self.connfd.recv(1024).decode()
print("len :", str(len(data)))
print("Receive :%s" % data)
if "GET" in data:
self.do_GET(data)
elif "POST" in data:
self.do_POST(data)
return
else:
print("The client sends an error instruction")
except BaseException:
self.ota_state_Thread.emit("ERR\n\n0")
break
def count_bin_len(self):
"""
Calculate the size of the firmware
:return: None
"""
with open("itead.bin", 'rb') as file_obj:
img = file_obj.read()
self.bin_len = len(img)
def do_GET(self, data):
print("Handle GET requests for devices")
# Find the digital segment after "bytes="
all_read = self.get_range_bytes(data)
if all_read == -1:
print("NOT FIND", data)
return
with open("itead.bin", 'rb') as file_obj:
file_obj.seek(all_read[0], 0)
self.img = file_obj.read(all_read[1] - all_read[0] + 1)
# Open the file, read the corresponding data segment sent out
print("HEAD:", len(self.img), "ALL LEN:", str(self.bin_len))
# The header that assembles the HTTP data
send_data = 'HTTP/1.1 206 Partial Content\r\n' + \
'Content-type: application/octet-stream\r\n'
send_Range = "bytes=" + \
str(all_read[0]) + "-" + str(all_read[1]) + "/" + str(self.bin_len)
send_data += 'Content-Length: ' + \
str(len(self.img)) + '\r\n' + 'Content-Range: ' + send_Range + "\r\n\r\n"
self.ckeck_finsh(all_read[1])
self.my_send_head(send_data)
print("send_data", str(send_data))
self.connfd.send(self.img)
print("send_data", str(self.img))
get_new = "get\n\n" + str(self.updata_get_rata(all_read[0]) + 1)
print(get_new)
self.ota_state_Thread.emit(get_new)
def ckeck_finsh(self, end_seek):
if (self.bin_len - 1) == end_seek:
self.send_over_flg = True
else:
self.send_over_flg = False
def my_send_head(self, data):
self.connfd.send(bytes(data, "ASCII"))
def get_range_bytes(self, re_data):
print(re_data, type(re_data), len(re_data))
start_index = re_data.find("bytes") + 6
data_f = re_data[start_index:].splitlines()
start_read, end_read = data_f[0].split("-")
print("开始位置:", start_read, "结束位置:", end_read)
return [int(start_read), int(end_read)]
def do_POST(self, data):
print("post:", data)
json_data = json.loads(self.find_post_json(data))
print("json_data", json_data)
if "error" in json_data:
if json_data["error"] == 0:
if self.send_over_flg:
print("To complete the transfer")
post_new = "post\n\n0"
else:
print("Download failed")
post_new = "post\n\n1"
print(post_new)
self.ota_state_Thread.emit(post_new)
print("To complete the transfer")
elif json_data["error"] == 404:
post_new = "post\n\n404"
print(post_new)
self.ota_state_Thread.emit(post_new)
print("Download failed")
elif json_data["error"] == 406:
post_new = "post\n\n406"
print(post_new)
self.ota_state_Thread.emit(post_new)
print("Error issuing upgrade message")
elif json_data["error"] == 409:
post_new = "post\n\n409"
print(post_new)
self.ota_state_Thread.emit(post_new)
print("Check failure")
elif json_data["error"] == 410:
post_new = "post\n\n410"
print(post_new)
self.ota_state_Thread.emit(post_new)
print("Internal error of equipment")
def find_post_json(self, data):
"""
Find the json data in the data section
:param data:Data to look up
:return:(str) "null" or json"{ key: val }"
"""
if "{" in data:
json_sta_index = data.find("{")
else:
return "null"
if "}" in data:
json_end_index = data.find("}")
else:
return "error data pool"
return data[json_sta_index:json_end_index + 1]
def updata_get_rata(self, new_seek):
"""
Update firmware transfer progress
:param new_seek:
:return: (int) Percentage of current updates
"""
return new_seek / self.bin_len * 100
| 36.240223 | 95 | 0.517034 |
6324631c4820d1f310805cb2aa6db3e3a6a092a1
| 2,696 |
py
|
Python
|
lib/rucio/db/sqla/migrate_repo/versions/0f1adb7a599a_create_transfer_hops_table.py
|
cfenell/rucio-1
|
bf83b9d52e204f3bc912c42f2e149f18413e59e5
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/0f1adb7a599a_create_transfer_hops_table.py
|
cfenell/rucio-1
|
bf83b9d52e204f3bc912c42f2e149f18413e59e5
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/db/sqla/migrate_repo/versions/0f1adb7a599a_create_transfer_hops_table.py
|
cfenell/rucio-1
|
bf83b9d52e204f3bc912c42f2e149f18413e59e5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Radu Carpa <[email protected]>, 2022
''' create transfer hops table '''
import datetime
import sqlalchemy as sa
from alembic import context
from alembic.op import (create_table, create_primary_key, create_foreign_key,
create_check_constraint, create_index, drop_table)
from rucio.db.sqla.types import GUID
# Alembic revision identifiers
revision = '0f1adb7a599a'
down_revision = '9a45bc4ea66d'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
create_table('transfer_hops',
sa.Column('request_id', GUID()),
sa.Column('next_hop_request_id', GUID()),
sa.Column('initial_request_id', GUID()),
sa.Column('created_at', sa.DateTime, default=datetime.datetime.utcnow),
sa.Column('updated_at', sa.DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow))
create_primary_key('TRANSFER_HOPS_PK', 'transfer_hops', ['request_id', 'next_hop_request_id', 'initial_request_id'])
create_foreign_key('TRANSFER_HOPS_INIT_REQ_ID_FK', 'transfer_hops', 'requests', ['initial_request_id'], ['id'])
create_foreign_key('TRANSFER_HOPS_REQ_ID_FK', 'transfer_hops', 'requests', ['request_id'], ['id'])
create_foreign_key('TRANSFER_HOPS_NH_REQ_ID_FK', 'transfer_hops', 'requests', ['next_hop_request_id'], ['id'])
create_check_constraint('TRANSFER_HOPS_CREATED_NN', 'transfer_hops', 'created_at is not null')
create_check_constraint('TRANSFER_HOPS_UPDATED_NN', 'transfer_hops', 'updated_at is not null')
create_index('TRANSFER_HOPS_INITIAL_REQ_IDX', 'transfer_hops', ['initial_request_id'])
create_index('TRANSFER_HOPS_NH_REQ_IDX', 'transfer_hops', ['next_hop_request_id'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
drop_table('transfer_hops')
| 40.238806 | 127 | 0.700668 |
a841837c81eecb77a166b30c29f91dd507506b88
| 12,418 |
py
|
Python
|
nova/tests/unit/objects/test_migration.py
|
bopopescu/TestNova
|
fb6a183b54f87cc078dc6de5be89711ec0d9ac26
|
[
"Apache-2.0"
] | 1 |
2018-08-19T02:13:16.000Z
|
2018-08-19T02:13:16.000Z
|
nova/tests/unit/objects/test_migration.py
|
bopopescu/TestNova
|
fb6a183b54f87cc078dc6de5be89711ec0d9ac26
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/objects/test_migration.py
|
bopopescu/TestNova
|
fb6a183b54f87cc078dc6de5be89711ec0d9ac26
|
[
"Apache-2.0"
] | 1 |
2020-07-22T22:13:56.000Z
|
2020-07-22T22:13:56.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from nova import context
from nova.db import api as db
from nova import exception
from nova import objects
from nova.objects import migration
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
def fake_db_migration(**updates):
db_instance = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.migration,
'source_compute': 'compute-source',
'dest_compute': 'compute-dest',
'source_node': 'node-source',
'dest_node': 'node-dest',
'dest_host': 'host-dest',
'old_instance_type_id': 42,
'new_instance_type_id': 84,
'instance_uuid': 'fake-uuid',
'status': 'migrating',
'migration_type': 'resize',
'hidden': False,
'memory_total': 123456,
'memory_processed': 12345,
'memory_remaining': 111111,
'disk_total': 234567,
'disk_processed': 23456,
'disk_remaining': 211111,
}
if updates:
db_instance.update(updates)
return db_instance
class _TestMigrationObject(object):
@mock.patch.object(db, 'migration_get')
def test_get_by_id(self, mock_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
mock_get.return_value = fake_migration
mig = migration.Migration.get_by_id(ctxt, fake_migration['id'])
self.compare_obj(mig, fake_migration)
mock_get.assert_called_once_with(ctxt, fake_migration['id'])
@mock.patch.object(db, 'migration_get_by_instance_and_status')
def test_get_by_instance_and_status(self, mock_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
mock_get.return_value = fake_migration
mig = migration.Migration.get_by_instance_and_status(
ctxt, fake_migration['id'], 'migrating')
self.compare_obj(mig, fake_migration)
mock_get.assert_called_once_with(ctxt,
fake_migration['id'],
'migrating')
@mock.patch('nova.db.api.migration_get_in_progress_by_instance')
def test_get_in_progress_by_instance(self, m_get_mig):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
m_get_mig.return_value = db_migrations
migrations = migration.MigrationList.get_in_progress_by_instance(
ctxt, fake_migration['instance_uuid'])
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
@mock.patch.object(db, 'migration_create')
def test_create(self, mock_create):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
mock_create.return_value = fake_migration
mig = migration.Migration(context=ctxt)
mig.source_compute = 'foo'
mig.migration_type = 'resize'
mig.uuid = uuidsentinel.migration
mig.create()
self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
self.assertIn('uuid', mig)
mock_create.assert_called_once_with(ctxt,
{'source_compute': 'foo',
'migration_type': 'resize',
'uuid': uuidsentinel.migration})
@mock.patch.object(db, 'migration_create')
def test_recreate_fails(self, mock_create):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
mock_create.return_value = fake_migration
mig = migration.Migration(context=ctxt)
mig.source_compute = 'foo'
mig.migration_type = 'resize'
mig.uuid = uuidsentinel.migration
mig.create()
self.assertRaises(exception.ObjectActionError, mig.create)
mock_create.assert_called_once_with(ctxt,
{'source_compute': 'foo',
'migration_type': 'resize',
'uuid': uuidsentinel.migration})
def test_create_fails_migration_type(self):
ctxt = context.get_admin_context()
mig = migration.Migration(context=ctxt,
old_instance_type_id=42,
new_instance_type_id=84)
mig.source_compute = 'foo'
self.assertRaises(exception.ObjectActionError, mig.create)
@mock.patch.object(db, 'migration_update')
def test_save(self, mock_update):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
mock_update.return_value = fake_migration
mig = migration.Migration(context=ctxt)
mig.id = 123
mig.source_compute = 'foo'
mig.save()
self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
mock_update.assert_called_once_with(ctxt, 123,
{'source_compute': 'foo'})
@mock.patch.object(db, 'instance_get_by_uuid')
def test_instance(self, mock_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
fake_inst = fake_instance.fake_db_instance()
mock_get.return_value = fake_inst
mig = migration.Migration._from_db_object(ctxt,
migration.Migration(),
fake_migration)
mig._context = ctxt
self.assertEqual(mig.instance.host, fake_inst['host'])
mock_get.assert_called_once_with(ctxt,
fake_migration['instance_uuid'],
columns_to_join=['info_cache',
'security_groups'])
def test_instance_setter(self):
migration = objects.Migration(instance_uuid=uuidsentinel.instance)
inst = objects.Instance(uuid=uuidsentinel.instance)
with mock.patch('nova.objects.Instance.get_by_uuid') as mock_get:
migration.instance = inst
migration.instance
self.assertFalse(mock_get.called)
self.assertEqual(inst, migration._cached_instance)
self.assertEqual(inst, migration.instance)
@mock.patch.object(db, 'migration_get_unconfirmed_by_dest_compute')
def test_get_unconfirmed_by_dest_compute(self, mock_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
mock_get.return_value = db_migrations
migrations = (
migration.MigrationList.get_unconfirmed_by_dest_compute(
ctxt, 'window', 'foo', use_slave=False))
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
mock_get.assert_called_once_with(ctxt, 'window', 'foo')
@mock.patch.object(db, 'migration_get_in_progress_by_host_and_node')
def test_get_in_progress_by_host_and_node(self, mock_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
mock_get.return_value = db_migrations
migrations = (
migration.MigrationList.get_in_progress_by_host_and_node(
ctxt, 'host', 'node'))
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
mock_get.assert_called_once_with(ctxt, 'host', 'node')
@mock.patch.object(db, 'migration_get_all_by_filters')
def test_get_by_filters(self, mock_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
db_migrations = [fake_migration, dict(fake_migration, id=456)]
filters = {'foo': 'bar'}
mock_get.return_value = db_migrations
migrations = migration.MigrationList.get_by_filters(ctxt, filters)
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
mock_get.assert_called_once_with(ctxt, filters,
sort_dirs=None, sort_keys=None,
limit=None, marker=None)
def test_migrate_old_resize_record(self):
db_migration = dict(fake_db_migration(), migration_type=None)
with mock.patch('nova.db.api.migration_get') as fake_get:
fake_get.return_value = db_migration
mig = objects.Migration.get_by_id(context.get_admin_context(), 1)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
self.assertEqual('resize', mig.migration_type)
def test_migrate_old_migration_record(self):
db_migration = dict(
fake_db_migration(), migration_type=None,
old_instance_type_id=1, new_instance_type_id=1)
with mock.patch('nova.db.api.migration_get') as fake_get:
fake_get.return_value = db_migration
mig = objects.Migration.get_by_id(context.get_admin_context(), 1)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
self.assertEqual('migration', mig.migration_type)
def test_migrate_unset_type_resize(self):
mig = objects.Migration(old_instance_type_id=1,
new_instance_type_id=2)
self.assertEqual('resize', mig.migration_type)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
def test_migrate_unset_type_migration(self):
mig = objects.Migration(old_instance_type_id=1,
new_instance_type_id=1)
self.assertEqual('migration', mig.migration_type)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
@mock.patch('nova.db.api.migration_get_by_id_and_instance')
def test_get_by_id_and_instance(self, fake_get):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
fake_get.return_value = fake_migration
migration = objects.Migration.get_by_id_and_instance(ctxt, '1', '1')
self.compare_obj(migration, fake_migration)
def test_create_uuid_on_load(self):
values = {'source_compute': 'src',
'dest_compute': 'dst',
'source_node': 'srcnode',
'dest_node': 'dstnode',
'instance_uuid': 'fake',
'status': 'faking',
'migration_type': 'migration',
'created_at': None,
'deleted_at': None,
'updated_at': None}
db_mig = db.migration_create(self.context, values)
mig = objects.Migration.get_by_id(self.context, db_mig.id)
self.assertIn('uuid', mig)
uuid = mig.uuid
# Make sure that it was saved and we get the same one back
mig = objects.Migration.get_by_id(self.context, db_mig.id)
self.assertEqual(uuid, mig.uuid)
class TestMigrationObject(test_objects._LocalTest,
_TestMigrationObject):
pass
class TestRemoteMigrationObject(test_objects._RemoteTest,
_TestMigrationObject):
pass
| 43.118056 | 78 | 0.636334 |
25f67b24dc255d0483dfad2a82ecccbdf60842e0
| 601 |
py
|
Python
|
038.Decorators_with_arguments/decorator_finish.py
|
adrija-bhandari/tutorials.python.Corey-Schafer-Python-Tutorial
|
450d11f81c85793666a166f91892d3fc7de1dcdb
|
[
"MIT"
] | null | null | null |
038.Decorators_with_arguments/decorator_finish.py
|
adrija-bhandari/tutorials.python.Corey-Schafer-Python-Tutorial
|
450d11f81c85793666a166f91892d3fc7de1dcdb
|
[
"MIT"
] | null | null | null |
038.Decorators_with_arguments/decorator_finish.py
|
adrija-bhandari/tutorials.python.Corey-Schafer-Python-Tutorial
|
450d11f81c85793666a166f91892d3fc7de1dcdb
|
[
"MIT"
] | null | null | null |
def prefix_decorator(prefix):
def decorator_function(original_function):
def wrapper_function(*args, **kwargs):
print(prefix, 'Executed Before', original_function.__name__)
result = original_function(*args, **kwargs)
print(prefix, 'Executed After', original_function.__name__, '\n')
return result
return wrapper_function
return decorator_function
@prefix_decorator('LOG:')
def display_info(name, age):
print('display_info ran with arguments ({}, {})'.format(name, age))
display_info('John', 25)
display_info('Travis', 30)
| 33.388889 | 77 | 0.678869 |
add8fba36f8806dcdee27f5d571324ed56384b4c
| 468 |
py
|
Python
|
data/scripts/templates/object/static/structure/general/shared_cave_stalagmite_tato_s01_small.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/static/structure/general/shared_cave_stalagmite_tato_s01_small.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/static/structure/general/shared_cave_stalagmite_tato_s01_small.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_cave_stalagmite_tato_s01_small.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.529412 | 94 | 0.737179 |
56c455d9c2d105ad4f8f51886464a4d29ce9b20d
| 42,307 |
py
|
Python
|
src/v5.1/resources/swagger_client/api/student_section_associations_api.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 2 |
2021-04-27T17:18:17.000Z
|
2021-04-27T19:14:39.000Z
|
src/v5.1/resources/swagger_client/api/student_section_associations_api.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | null | null | null |
src/v5.1/resources/swagger_client/api/student_section_associations_api.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 1 |
2022-01-06T09:43:11.000Z
|
2022-01-06T09:43:11.000Z
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class StudentSectionAssociationsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_student_section_association_by_id(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_student_section_association_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_student_section_association_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_student_section_association_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_student_section_association_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_student_section_association_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_student_section_association_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_student_section_association_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/studentSectionAssociations/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_student_section_associations(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_student_section_associations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:return: list[EdFiStudentSectionAssociation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_student_section_associations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deletes_student_section_associations_with_http_info(**kwargs) # noqa: E501
return data
def deletes_student_section_associations_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_student_section_associations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:return: list[EdFiStudentSectionAssociation]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_student_section_associations" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_student_section_associations`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_student_section_associations`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/studentSectionAssociations/deletes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiStudentSectionAssociation]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_student_section_associations(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_student_section_associations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param date begin_date: Month, day, and year of the Student's entry or assignment to the Section.
:param str local_course_code: The local code assigned by the School that identifies the course offering provided for the instruction of students.
:param int school_id: The identifier assigned to a school.
:param int school_year: The identifier for the school year.
:param str section_identifier: The local identifier assigned to a section.
:param str session_name: The identifier for the calendar for the academic session (e.g., 2010/11, 2011 Summer).
:param str student_unique_id: A unique alphanumeric code assigned to a student.
:param str attempt_status_descriptor: An indication of the student's completion status for the section.
:param str repeat_identifier_descriptor: An indication as to whether a student has previously taken a given course. Repeated, counted in grade point average Repeated, not counted in grade point average Not repeated Other.
:param date end_date: Month, day, and year of the withdrawal or exit of the Student from the Section.
:param bool homeroom_indicator: Indicates the Section is the student's homeroom. Homeroom period may the convention for taking daily attendance.
:param str id:
:param bool teacher_student_data_link_exclusion: Indicates that the student-section combination is excluded from calculation of value-added or growth attribution calculations used for a particular teacher evaluation.
:return: list[EdFiStudentSectionAssociation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_student_section_associations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_student_section_associations_with_http_info(**kwargs) # noqa: E501
return data
def get_student_section_associations_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_student_section_associations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param date begin_date: Month, day, and year of the Student's entry or assignment to the Section.
:param str local_course_code: The local code assigned by the School that identifies the course offering provided for the instruction of students.
:param int school_id: The identifier assigned to a school.
:param int school_year: The identifier for the school year.
:param str section_identifier: The local identifier assigned to a section.
:param str session_name: The identifier for the calendar for the academic session (e.g., 2010/11, 2011 Summer).
:param str student_unique_id: A unique alphanumeric code assigned to a student.
:param str attempt_status_descriptor: An indication of the student's completion status for the section.
:param str repeat_identifier_descriptor: An indication as to whether a student has previously taken a given course. Repeated, counted in grade point average Repeated, not counted in grade point average Not repeated Other.
:param date end_date: Month, day, and year of the withdrawal or exit of the Student from the Section.
:param bool homeroom_indicator: Indicates the Section is the student's homeroom. Homeroom period may the convention for taking daily attendance.
:param str id:
:param bool teacher_student_data_link_exclusion: Indicates that the student-section combination is excluded from calculation of value-added or growth attribution calculations used for a particular teacher evaluation.
:return: list[EdFiStudentSectionAssociation]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'total_count', 'begin_date', 'local_course_code', 'school_id', 'school_year', 'section_identifier', 'session_name', 'student_unique_id', 'attempt_status_descriptor', 'repeat_identifier_descriptor', 'end_date', 'homeroom_indicator', 'id', 'teacher_student_data_link_exclusion'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_student_section_associations" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_student_section_associations`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_student_section_associations`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('local_course_code' in params and
len(params['local_course_code']) > 60):
raise ValueError("Invalid value for parameter `local_course_code` when calling `get_student_section_associations`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('section_identifier' in params and
len(params['section_identifier']) > 255):
raise ValueError("Invalid value for parameter `section_identifier` when calling `get_student_section_associations`, length must be less than or equal to `255`") # noqa: E501
if self.api_client.client_side_validation and ('session_name' in params and
len(params['session_name']) > 60):
raise ValueError("Invalid value for parameter `session_name` when calling `get_student_section_associations`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('student_unique_id' in params and
len(params['student_unique_id']) > 32):
raise ValueError("Invalid value for parameter `student_unique_id` when calling `get_student_section_associations`, length must be less than or equal to `32`") # noqa: E501
if self.api_client.client_side_validation and ('attempt_status_descriptor' in params and
len(params['attempt_status_descriptor']) > 306):
raise ValueError("Invalid value for parameter `attempt_status_descriptor` when calling `get_student_section_associations`, length must be less than or equal to `306`") # noqa: E501
if self.api_client.client_side_validation and ('repeat_identifier_descriptor' in params and
len(params['repeat_identifier_descriptor']) > 306):
raise ValueError("Invalid value for parameter `repeat_identifier_descriptor` when calling `get_student_section_associations`, length must be less than or equal to `306`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
if 'total_count' in params:
query_params.append(('totalCount', params['total_count'])) # noqa: E501
if 'begin_date' in params:
query_params.append(('beginDate', params['begin_date'])) # noqa: E501
if 'local_course_code' in params:
query_params.append(('localCourseCode', params['local_course_code'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
if 'school_year' in params:
query_params.append(('schoolYear', params['school_year'])) # noqa: E501
if 'section_identifier' in params:
query_params.append(('sectionIdentifier', params['section_identifier'])) # noqa: E501
if 'session_name' in params:
query_params.append(('sessionName', params['session_name'])) # noqa: E501
if 'student_unique_id' in params:
query_params.append(('studentUniqueId', params['student_unique_id'])) # noqa: E501
if 'attempt_status_descriptor' in params:
query_params.append(('attemptStatusDescriptor', params['attempt_status_descriptor'])) # noqa: E501
if 'repeat_identifier_descriptor' in params:
query_params.append(('repeatIdentifierDescriptor', params['repeat_identifier_descriptor'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'homeroom_indicator' in params:
query_params.append(('homeroomIndicator', params['homeroom_indicator'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'teacher_student_data_link_exclusion' in params:
query_params.append(('teacherStudentDataLinkExclusion', params['teacher_student_data_link_exclusion'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/studentSectionAssociations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiStudentSectionAssociation]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_student_section_associations_by_id(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_student_section_associations_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:return: EdFiStudentSectionAssociation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_student_section_associations_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_student_section_associations_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_student_section_associations_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_student_section_associations_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:return: EdFiStudentSectionAssociation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_none_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_student_section_associations_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_student_section_associations_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/studentSectionAssociations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdFiStudentSectionAssociation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_student_section_association(self, student_section_association, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error (you must use a PUT operation to update a resource by \"id\"). The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_student_section_association(student_section_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiStudentSectionAssociation student_section_association: The JSON representation of the \"studentSectionAssociation\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_student_section_association_with_http_info(student_section_association, **kwargs) # noqa: E501
else:
(data) = self.post_student_section_association_with_http_info(student_section_association, **kwargs) # noqa: E501
return data
def post_student_section_association_with_http_info(self, student_section_association, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error (you must use a PUT operation to update a resource by \"id\"). The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_student_section_association_with_http_info(student_section_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiStudentSectionAssociation student_section_association: The JSON representation of the \"studentSectionAssociation\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['student_section_association'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_student_section_association" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'student_section_association' is set
if self.api_client.client_side_validation and ('student_section_association' not in params or
params['student_section_association'] is None): # noqa: E501
raise ValueError("Missing the required parameter `student_section_association` when calling `post_student_section_association`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'student_section_association' in params:
body_params = params['student_section_association']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/studentSectionAssociations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_student_section_association(self, id, student_section_association, **kwargs): # noqa: E501
"""Updates or creates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update or create a resource by identifier. If the resource doesn't exist, the resource will be created using that identifier. Additionally, natural key values cannot be changed using this operation, and will not be modified in the database. If the resource \"id\" is provided in the JSON body, it will be ignored as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_student_section_association(id, student_section_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiStudentSectionAssociation student_section_association: The JSON representation of the \"studentSectionAssociation\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_student_section_association_with_http_info(id, student_section_association, **kwargs) # noqa: E501
else:
(data) = self.put_student_section_association_with_http_info(id, student_section_association, **kwargs) # noqa: E501
return data
def put_student_section_association_with_http_info(self, id, student_section_association, **kwargs): # noqa: E501
"""Updates or creates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update or create a resource by identifier. If the resource doesn't exist, the resource will be created using that identifier. Additionally, natural key values cannot be changed using this operation, and will not be modified in the database. If the resource \"id\" is provided in the JSON body, it will be ignored as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_student_section_association_with_http_info(id, student_section_association, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiStudentSectionAssociation student_section_association: The JSON representation of the \"studentSectionAssociation\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'student_section_association', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_student_section_association" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `put_student_section_association`") # noqa: E501
# verify the required parameter 'student_section_association' is set
if self.api_client.client_side_validation and ('student_section_association' not in params or
params['student_section_association'] is None): # noqa: E501
raise ValueError("Missing the required parameter `student_section_association` when calling `put_student_section_association`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'student_section_association' in params:
body_params = params['student_section_association']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/studentSectionAssociations/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 56.635877 | 493 | 0.666675 |
672d39424dc1b4840b07509056858df35fc9f4de
| 6,739 |
py
|
Python
|
gwa_maid/routes.py
|
laikaah/gwa-maid-backend
|
aab7c191cac0d98e71b93e9c3507710aef8542b8
|
[
"MIT"
] | null | null | null |
gwa_maid/routes.py
|
laikaah/gwa-maid-backend
|
aab7c191cac0d98e71b93e9c3507710aef8542b8
|
[
"MIT"
] | null | null | null |
gwa_maid/routes.py
|
laikaah/gwa-maid-backend
|
aab7c191cac0d98e71b93e9c3507710aef8542b8
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request
from gwa_maid import app, bcrypt, db
from gwa_maid.helpers import get_user_from_token, tokenize
from gwa_maid.models import Assessment, AssessmentClass, Subject, User
@app.route('/')
def index():
return 'Welcome!'
@app.route('/verify_token', methods=['POST'])
def verify():
print(request.json)
if not request.json:
return jsonify(success=False)
if 'token' not in request.json:
return jsonify(success=False)
token = request.json['token']
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
return jsonify(success=True)
@app.route('/register', methods=['POST'])
def register():
if not request.json:
return jsonify(success=False)
if 'username' not in request.json or 'password' not in request.json:
return jsonify(success=False)
username = request.json['username']
password = request.json['password']
if not username or not password:
return jsonify(success=False)
existing_user = User.query.filter(User.username == username).first()
if existing_user:
return jsonify(success=False)
user = User(
username=username,
password=bcrypt.generate_password_hash(password).decode('utf-8')
)
db.session.add(user)
db.session.commit()
token = tokenize(user.id, password)
return jsonify(token=token, success=True)
@app.route('/login', methods=['POST'])
def login():
username = request.form.get('username')
password = request.form.get('password')
user = User.query.filter(User.username == username).first()
if not user:
return jsonify(success=False)
if not bcrypt.check_password_hash(user.password, password):
return jsonify(success=False)
token = tokenize(user.id, password)
return jsonify(token=token, success=True)
@app.route('/subjects', methods=['GET'])
def get_subjects():
token = request.args.get('token')
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
subjects = user.subjects.all()
return jsonify(subjects=subjects, success=True)
@app.route('/subjects/add', methods=['POST'])
def add_subject():
token = request.forms.get('token')
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
subject_name = request.form.get('subject_name')
subject = Subject(
name=subject_name,
user_id=user.id
)
db.session.add(subject)
db.session.flush()
user.predicted_grade = (user.predicted_grade +
subject.predicted_grade) / (user.subject_count + 1)
user.subject_count += 1
db.session.commit()
return jsonify(success=True)
@app.route('/subjects/assessment_classes', methods=['GET'])
def get_assessment_classes():
token = request.args.get('token')
subject_name = request.args.get('subject')
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
subject = Subject.query.\
filter(Subject.name == subject_name).\
filter(Subject.owner.has(User.id == token)).first()
if not subject:
return jsonify(success=False)
assessment_classes = subject.assessment_classes.all()
return jsonify(assessment_classes=assessment_classes, success=True)
@app.route('/subjects/assessment_classes/add', methods=['POST'])
def add_assessment_class():
token = request.form.get('token')
subject_name = request.form.get('subject_name')
assessment_class_name = request.form.get('assessment_class_name')
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
subject = Subject.query.filter(Subject.name == subject_name).\
filter(Subject.owner.has(User.id == token))
if not user:
return jsonify(success=False)
assessment_class = AssessmentClass(
name=assessment_class_name,
subject_id=subject.id,
)
db.session.add(assessment_class)
db.session.flush()
assessment_class.subject.predicted_grade = \
(assessment_class.subject.predicted_grade + assessment_class.predicted_grade)\
/ (assessment_class.subject.assessment_class_count + 1)
assessment_class.subject.assessment_class_count += 1
db.session.commit()
return jsonify(success=True)
@app.route('/subjects/assessment_classes/assessments', methods=['GET'])
def get_assessments():
token = request.args.get('token')
subject_name = request.args.get('subject')
assessment_class_name = request.args.get('assessment_class')
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
subject = Subject.query.\
filter(Subject.name == subject_name).\
filter(Subject.owner.has(User.id == token)).first()
if not subject:
return jsonify(success=False)
assessment_class = AssessmentClass.query.\
filter(AssessmentClass.name == assessment_class_name).\
filter(AssessmentClass.subject.has(Subject.id == subject.id))
if not assessment_class:
return jsonify(success=False)
assessments = assessment_class.assessments.all()
return jsonify(assessments=assessments, success=True)
@app.route('/subjects/assessment_classes/assessments/add', methods=['POST'])
def add_assessment():
token = request.form.get('token')
subject_name = request.form.get('subject_name')
assessment_class_name = request.form.get('assessment_class_name')
assessment_name = request.form.get('assessment_name')
assessment_grade = request.form.get('assessment_grade', type=int)
user = get_user_from_token(token)
if user is None:
return jsonify(success=False)
subject = Subject.query.filter(Subject.name == subject_name).\
filter(Subject.owner.has(User.id == user.id)).first()
if not subject:
return jsonify(success=False)
assessment_class = AssessmentClass.query.filter(AssessmentClass.name == assessment_class_name).\
filter(AssessmentClass.subject.has(Subject.id == subject.id))
if not assessment_class:
return jsonify(success=False)
assessment = Assessment(
name=assessment_name,
assessment_class_id=assessment_class.id,
grade=assessment_grade
)
db.session.add(assessment)
db.session.flush()
assessment.assessment_class.predicted_grade = \
(assessment.assessment_class.predicted_grade + assessment.grade)\
/ (assessment.assessment_class.assessment_count + 1)
assessment.assessment_class.assessment_count += 1
db.session.commit()
return jsonify(success=True)
| 26.956 | 100 | 0.689271 |
f5ccad099a14810cec31dc033bf1ed6cf0ac3a0b
| 1,012 |
py
|
Python
|
rouge_papier_v2/rouge_papier_v2/util.py
|
BambooPalace/text-summarization
|
17ac68598563492b5e8959493b2bf1b137f78a5a
|
[
"MIT"
] | 54 |
2019-09-20T12:31:10.000Z
|
2022-03-19T12:21:32.000Z
|
rouge_papier_v2/rouge_papier_v2/util.py
|
huaweicould-ei/ExtSummLongDoc
|
43da8584a1ec5df6ed31a844285a12b71eb2b4a8
|
[
"MIT"
] | 9 |
2019-11-25T06:17:11.000Z
|
2022-03-23T04:08:53.000Z
|
rouge_papier_v2/rouge_papier_v2/util.py
|
huaweicould-ei/ExtSummLongDoc
|
43da8584a1ec5df6ed31a844285a12b71eb2b4a8
|
[
"MIT"
] | 12 |
2019-12-08T10:06:05.000Z
|
2022-03-06T08:10:53.000Z
|
import os
import tempfile
import shutil
def make_simple_config_text(system_and_summary_paths):
lines = []
for system_path, summary_paths in system_and_summary_paths:
line = "{} {}".format(system_path, " ".join(summary_paths))
lines.append(line)
return "\n".join(lines)
class TempFileManager(object):
def __init__(self):
pass
def create_temp_files(self, texts):
paths = []
for text in texts:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, dir=self.tmpdir) as fp:
fp.write(text)
paths.append(fp.name)
return paths
def create_temp_file(self, text):
with tempfile.NamedTemporaryFile(
mode="w", delete=False, dir=self.tmpdir) as fp:
fp.write(text)
return fp.name
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
return self
def __exit__(self, *args):
shutil.rmtree(self.tmpdir)
| 26.631579 | 67 | 0.603755 |
5ec2157e66e8b9bef6d638fb23c142ff8493d6c8
| 16,400 |
py
|
Python
|
sabnzbd/bpsmeter.py
|
jcfp/sabnzbd
|
5b3d932232cd512166b14bca40b20c221ef3be61
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null |
sabnzbd/bpsmeter.py
|
jcfp/sabnzbd
|
5b3d932232cd512166b14bca40b20c221ef3be61
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null |
sabnzbd/bpsmeter.py
|
jcfp/sabnzbd
|
5b3d932232cd512166b14bca40b20c221ef3be61
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null |
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.bpsmeter - bpsmeter
"""
import time
import logging
import re
import sabnzbd
from sabnzbd.constants import BYTES_FILE_NAME, KIBI
import sabnzbd.cfg as cfg
DAY = float(24 * 60 * 60)
WEEK = DAY * 7
def tomorrow(t):
""" Return timestamp for tomorrow (midnight) """
now = time.localtime(t)
ntime = (now[0], now[1], now[2], 0, 0, 0, now[6], now[7], now[8])
return time.mktime(ntime) + DAY
def this_week(t):
""" Return timestamp for start of this week (monday) """
while 1:
tm = time.localtime(t)
if tm.tm_wday == 0:
break
t -= DAY
monday = (tm.tm_year, tm.tm_mon, tm.tm_mday, 0, 0, 0, 0, 0, tm.tm_isdst)
return time.mktime(monday)
def next_week(t):
""" Return timestamp for start of next week (monday) """
return this_week(t) + WEEK
def this_month(t):
""" Return timestamp for start of next month """
now = time.localtime(t)
ntime = (now[0], now[1], 1, 0, 0, 0, 0, 0, now[8])
return time.mktime(ntime)
_DAYS = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def last_month_day(tm):
""" Return last day of this month """
year, month = tm[:2]
day = _DAYS[month]
# This simple formula for leap years is good enough
if day == 28 and (year % 4) == 0:
day = 29
return day
def next_month(t):
""" Return timestamp for start of next month """
now = time.localtime(t)
month = now.tm_mon + 1
year = now.tm_year
if month > 12:
month = 1
year += 1
ntime = (year, month, 1, 0, 0, 0, 0, 0, now[8])
return time.mktime(ntime)
class BPSMeter:
do = None
def __init__(self):
t = time.time()
self.start_time = t
self.log_time = t
self.speed_log_time = t
self.last_update = t
self.bps = 0.0
self.bps_list = []
self.bps_list_max = 275
self.day_total = {}
self.week_total = {}
self.month_total = {}
self.grand_total = {}
self.timeline_total = {}
self.day_label = time.strftime("%Y-%m-%d")
self.end_of_day = tomorrow(t) # Time that current day will end
self.end_of_week = next_week(t) # Time that current day will end
self.end_of_month = next_month(t) # Time that current month will end
self.q_day = 1 # Day of quota reset
self.q_period = 'm' # Daily/Weekly/Monthly quota = d/w/m
self.quota = self.left = 0.0 # Quota and remaining quota
self.have_quota = False # Flag for quota active
self.q_time = 0 # Next reset time for quota
self.q_hour = 0 # Quota reset hour
self.q_minute = 0 # Quota reset minute
self.quota_enabled = True # Scheduled quota enable/disable
BPSMeter.do = self
def save(self):
""" Save admin to disk """
data = (self.last_update, self.grand_total,
self.day_total, self.week_total, self.month_total,
self.end_of_day, self.end_of_week, self.end_of_month,
self.quota, self.left, self.q_time, self.timeline_total
)
sabnzbd.save_admin(data, BYTES_FILE_NAME)
def defaults(self):
""" Get the latest data from the database and assign to a fake server """
logging.debug('Setting default BPS meter values')
history_db = sabnzbd.database.HistoryDB()
grand, month, week = history_db.get_history_size()
history_db.close()
self.grand_total = {}
self.month_total = {}
self.week_total = {}
self.day_total = {}
if grand:
self.grand_total['x'] = grand
if month:
self.month_total['x'] = month
if week:
self.week_total['x'] = week
self.quota = self.left = cfg.quota_size.get_float()
def read(self):
""" Read admin from disk, return True when pause is needed """
res = False
quota = self.left = cfg.quota_size.get_float() # Quota for this period
self.have_quota = bool(cfg.quota_size())
data = sabnzbd.load_admin(BYTES_FILE_NAME)
try:
self.last_update, self.grand_total, \
self.day_total, self.week_total, self.month_total, \
self.end_of_day, self.end_of_week, self.end_of_month, \
self.quota, self.left, self.q_time, self.timeline_total = data
if abs(quota - self.quota) > 0.5:
self.change_quota()
res = self.reset_quota()
except:
self.defaults()
# Force update of counters and validate data
try:
for server in self.grand_total.keys():
self.update(server)
except TypeError:
self.defaults()
self.update()
return res
def update(self, server=None, amount=0, testtime=None):
""" Update counters for "server" with "amount" bytes """
if testtime:
t = testtime
else:
t = time.time()
if t > self.end_of_day:
# current day passed. get new end of day
self.day_label = time.strftime("%Y-%m-%d")
self.day_total = {}
self.end_of_day = tomorrow(t) - 1.0
if t > self.end_of_week:
self.week_total = {}
self.end_of_week = next_week(t) - 1.0
if t > self.end_of_month:
self.month_total = {}
self.end_of_month = next_month(t) - 1.0
if server:
if server not in self.day_total:
self.day_total[server] = 0
self.day_total[server] += amount
if server not in self.week_total:
self.week_total[server] = 0
self.week_total[server] += amount
if server not in self.month_total:
self.month_total[server] = 0
self.month_total[server] += amount
if server not in self.grand_total:
self.grand_total[server] = 0
self.grand_total[server] += amount
if server not in self.timeline_total:
self.timeline_total[server] = {}
if self.day_label not in self.timeline_total[server]:
self.timeline_total[server][self.day_label]= 0
self.timeline_total[server][self.day_label] += amount
# Quota check
if self.have_quota and self.quota_enabled:
self.left -= amount
if self.left <= 0.0:
if sabnzbd.downloader.Downloader.do and not sabnzbd.downloader.Downloader.do.paused:
sabnzbd.downloader.Downloader.do.pause()
logging.warning(T('Quota spent, pausing downloading'))
# Speedometer
try:
self.bps = (self.bps * (self.last_update - self.start_time) + amount) / (t - self.start_time)
except:
self.bps = 0.0
self.last_update = t
check_time = t - 5.0
if self.start_time < check_time:
self.start_time = check_time
if self.bps < 0.01:
self.reset()
elif self.log_time < check_time:
logging.debug("bps: %s", self.bps)
self.log_time = t
if self.speed_log_time < (t - 1.0):
self.add_empty_time()
self.bps_list.append(int(self.bps / KIBI))
self.speed_log_time = t
def reset(self):
t = time.time()
self.start_time = t
self.log_time = t
self.last_update = t
self.bps = 0.0
def add_empty_time(self):
# Extra zeros, but never more than the maximum!
nr_diffs = min(int(time.time() - self.speed_log_time), self.bps_list_max)
if nr_diffs > 1:
self.bps_list.extend([0] * nr_diffs)
# Always trim the list to the max-length
if len(self.bps_list) > self.bps_list_max:
self.bps_list = self.bps_list[len(self.bps_list) - self.bps_list_max:]
def get_sums(self):
""" return tuple of grand, month, week, day totals """
return (sum([v for v in self.grand_total.values()]),
sum([v for v in self.month_total.values()]),
sum([v for v in self.week_total.values()]),
sum([v for v in self.day_total.values()]))
def amounts(self, server):
""" Return grand, month, week, day totals for specified server """
return self.grand_total.get(server, 0), \
self.month_total.get(server, 0), \
self.week_total.get(server, 0), \
self.day_total.get(server, 0), \
self.timeline_total.get(server, {})
def clear_server(self, server):
""" Clean counters for specified server """
if server in self.day_total:
del self.day_total[server]
if server in self.week_total:
del self.week_total[server]
if server in self.month_total:
del self.month_total[server]
if server in self.grand_total:
del self.grand_total[server]
if server in self.timeline_total:
del self.timeline_total[server]
self.save()
def get_bps_list(self):
refresh_rate = int(cfg.refresh_rate()) if cfg.refresh_rate() else 1
self.add_empty_time()
# We record every second, but display at the user's refresh-rate
return self.bps_list[::refresh_rate]
def get_stable_speed(self, timespan=10):
""" See if there is a stable speed the last <timespan> seconds
None: indicates it can't determine yet
False: the speed was not stable during <timespan>
"""
if len(self.bps_list) < timespan:
return None
# Calculate the variance in the speed
avg = sum(self.bps_list[-timespan:]) / timespan
vari = 0
for bps in self.bps_list[-timespan:]:
vari += abs(bps - avg)
vari = vari / timespan
try:
# See if the variance is less than 5%
if (vari / (self.bps / KIBI)) < 0.05:
return avg
else:
return False
except:
# Probably one of the values was 0
pass
return None
def reset_quota(self, force=False):
""" Check if it's time to reset the quota, optionally resuming
Return True, when still paused
"""
if force or (self.have_quota and time.time() > (self.q_time - 50)):
self.quota = self.left = cfg.quota_size.get_float()
logging.info('Quota was reset to %s', self.quota)
if cfg.quota_resume():
logging.info('Auto-resume due to quota reset')
if sabnzbd.downloader.Downloader.do:
sabnzbd.downloader.Downloader.do.resume()
self.next_reset()
return False
else:
return True
def next_reset(self, t=None):
""" Determine next reset time """
t = t or time.time()
tm = time.localtime(t)
if self.q_period == 'd':
nx = (tm[0], tm[1], tm[2], self.q_hour, self.q_minute, 0, 0, 0, tm[8])
if (tm.tm_hour * 60 + tm.tm_min) >= (self.q_hour * 60 + self.q_minute):
# If today's moment has passed, it will happen tomorrow
t = time.mktime(nx) + 24 * 3600
tm = time.localtime(t)
elif self.q_period == 'w':
if self.q_day < tm.tm_wday + 1 or (self.q_day == tm.tm_wday + 1 and (tm.tm_hour * 60 + tm.tm_min) >= (self.q_hour * 60 + self.q_minute)):
tm = time.localtime(next_week(t))
dif = abs(self.q_day - tm.tm_wday - 1)
t = time.mktime(tm) + dif * 24 * 3600
tm = time.localtime(t)
elif self.q_period == 'm':
if self.q_day < tm.tm_mday or (self.q_day == tm.tm_mday and (tm.tm_hour * 60 + tm.tm_min) >= (self.q_hour * 60 + self.q_minute)):
tm = time.localtime(next_month(t))
day = min(last_month_day(tm), self.q_day)
tm = (tm[0], tm[1], day, self.q_hour, self.q_minute, 0, 0, 0, tm[8])
else:
return
tm = (tm[0], tm[1], tm[2], self.q_hour, self.q_minute, 0, 0, 0, tm[8])
self.q_time = time.mktime(tm)
logging.debug('Will reset quota at %s', tm)
def change_quota(self, allow_resume=True):
""" Update quota, potentially pausing downloader """
if not self.have_quota and self.quota < 0.5:
# Never set, use last period's size
per = cfg.quota_period()
sums = self.get_sums()
if per == 'd':
self.left = sums[3]
elif per == 'w':
self.left = sums[2]
elif per == 'm':
self.left = sums[1]
self.have_quota = bool(cfg.quota_size())
if self.have_quota:
quota = cfg.quota_size.get_float()
if self.quota:
# Quota change, recalculate amount left
self.left = quota - (self.quota - self.left)
else:
# If previously no quota, self.left holds this period's usage
self.left = quota - self.left
self.quota = quota
else:
self.quota = self.left = 0
self.update(0)
self.next_reset()
if self.left > 0.5 and allow_resume:
self.resume()
# Pattern = <day#> <hh:mm>
# The <day> and <hh:mm> part can both be optional
__re_day = re.compile(r'^\s*(\d+)[^:]*')
__re_hm = re.compile(r'(\d+):(\d+)\s*$')
def get_quota(self):
""" If quota active, return check-function, hour, minute """
if self.have_quota:
self.q_period = cfg.quota_period()[0].lower()
self.q_day = 1
self.q_hour = self.q_minute = 0
txt = cfg.quota_day().lower()
m = self.__re_day.search(txt)
if m:
self.q_day = int(m.group(1))
m = self.__re_hm.search(txt)
if m:
self.q_hour = int(m.group(1))
self.q_minute = int(m.group(2))
if self.q_period == 'w':
self.q_day = max(1, self.q_day)
self.q_day = min(7, self.q_day)
elif self.q_period == 'm':
self.q_day = max(1, self.q_day)
self.q_day = min(31, self.q_day)
else:
self.q_day = 1
self.change_quota(allow_resume=False)
return quota_handler, self.q_hour, self.q_minute
else:
return None, 0, 0
def set_status(self, status, action=True):
""" Disable/enable quota management """
self.quota_enabled = status
if action and not status:
self.resume()
def resume(self):
""" Resume downloading """
if cfg.quota_resume() and sabnzbd.downloader.Downloader.do and sabnzbd.downloader.Downloader.do.paused:
sabnzbd.downloader.Downloader.do.resume()
def midnight(self):
""" Midnight action: dummy update for all servers """
for server in self.day_total.keys():
self.update(server)
def quota_handler():
""" To be called from scheduler """
logging.debug('Checking quota')
BPSMeter.do.reset_quota()
def midnight_action():
if BPSMeter.do:
BPSMeter.do.midnight()
BPSMeter()
| 35.652174 | 149 | 0.559939 |
4107814077d1fabc5034590e495aac321ead9606
| 8,031 |
py
|
Python
|
test/test_storage_mongo.py
|
goodtiding5/flask-track-usage
|
1ace3261c935b7071af7322afbd2150d4d991c8c
|
[
"BSD-3-Clause"
] | 46 |
2015-01-13T10:24:35.000Z
|
2022-02-13T04:58:34.000Z
|
test/test_storage_mongo.py
|
goodtiding5/flask-track-usage
|
1ace3261c935b7071af7322afbd2150d4d991c8c
|
[
"BSD-3-Clause"
] | 42 |
2015-01-08T22:20:52.000Z
|
2022-01-14T08:25:01.000Z
|
test/test_storage_mongo.py
|
goodtiding5/flask-track-usage
|
1ace3261c935b7071af7322afbd2150d4d991c8c
|
[
"BSD-3-Clause"
] | 27 |
2015-03-11T19:56:24.000Z
|
2022-03-24T15:59:51.000Z
|
# Copyright (c) 2013-2018 Steve Milner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tests mongodb based storage.
"""
import datetime
import unittest
COLLECTION = False
HAS_PYMONGO = False
HAS_MONGOENGINE = False
try:
import pymongo
HAS_PYMONGO = True
DB = 'test'
COLL_NAME = 'flask_track_usage'
COLLECTION = getattr(getattr(pymongo.MongoClient(), DB), COLL_NAME)
except ImportError:
HAS_PYMONGO = False
except pymongo.errors.ConnectionFailure:
COLLECTION = False
try:
import mongoengine
HAS_MONGOENGINE = True
try:
mongoengine.connect(db="mongoenginetest")
except:
print('Can not connect to mongoengine database.')
HAS_MONGOENGINE = False
except ImportError:
pass
from flask_track_usage import TrackUsage
from flask_track_usage.storage.mongo import (
MongoPiggybackStorage,
MongoStorage,
MongoEngineStorage
)
from . import FlaskTrackUsageTestCase
@unittest.skipUnless(HAS_PYMONGO, "Requires pymongo")
@unittest.skipUnless(COLLECTION, "Requires a running test MongoDB")
class TestMongoPiggybaclStorage(FlaskTrackUsageTestCase):
"""
Tests MongoDB storage while using a piggybacked connection.
"""
def setUp(self):
"""
Set up an app to test with.
"""
FlaskTrackUsageTestCase.setUp(self)
self.storage = MongoPiggybackStorage(collection=COLLECTION)
# Clean out the storage
self.storage.collection.drop()
self.track_usage = TrackUsage(self.app, self.storage)
def test_mongo_piggyback_storage(self):
"""
Test MongoPiggybackStorage stores the data the way we expect.
"""
self.client.get('/')
result = self.storage.collection.find_one()
assert result['blueprint'] is None
assert result['ip_info'] is None
assert result['status'] == 200
self.assertTrue(result['remote_addr']) # Should be set with modern versions of Flask
assert result['speed'].__class__ is float
assert result['view_args'] == {}
assert result['url'] == 'http://localhost/'
assert result['authorization'] is False
assert result['user_agent']['browser'] is None # because of testing
assert result['user_agent']['platform'] is None # because of testing
assert result['user_agent']['language'] is None # because of testing
assert result['user_agent']['version'] is None # because of testing
assert result['path'] == '/'
assert type(result['date']) is datetime.datetime
@unittest.skipUnless(HAS_PYMONGO, "Requires pymongo")
@unittest.skipUnless(COLLECTION, "Requires a running test MongoDB")
class TestMongoStorage(FlaskTrackUsageTestCase):
"""
Tests MongoDB storage while using it's own connection.
"""
def setUp(self):
"""
Set up an app to test with.
"""
FlaskTrackUsageTestCase.setUp(self)
self.storage = MongoStorage(
database=DB,
collection=COLL_NAME
)
# Clean out the storage
self.storage.collection.drop()
self.track_usage = TrackUsage(self.app, self.storage)
def test_mongo_storage_data(self):
"""
Test that data is stored in MongoDB and retrieved correctly.
"""
self.client.get('/')
result = self.storage.collection.find_one()
assert result['blueprint'] is None
assert result['ip_info'] is None
assert result['status'] == 200
self.assertTrue(result['remote_addr']) # Should be set with modern versions of Flask
assert result['speed'].__class__ is float
assert result['view_args'] == {}
assert result['url'] == 'http://localhost/'
assert result['authorization'] is False
assert result['user_agent']['browser'] is None # because of testing
assert result['user_agent']['platform'] is None # because of testing
assert result['user_agent']['language'] is None # because of testing
assert result['user_agent']['version'] is None # because of testing
assert result['path'] == '/'
assert type(result['date']) is datetime.datetime
def test_mongo_storage_get_usage(self):
"""
Verify we can get usage information in expected ways.
"""
# Make 3 requests to make sure we have enough records
self.client.get('/')
self.client.get('/')
self.client.get('/')
# Limit tests
assert len(self.storage.get_usage()) == 3
assert len(self.storage.get_usage(limit=2)) == 2
assert len(self.storage.get_usage(limit=1)) == 1
# Page tests
assert len(self.storage.get_usage(limit=2, page=1)) == 2
assert len(self.storage.get_usage(limit=2, page=2)) == 1
# timing tests
now = datetime.datetime.utcnow()
assert len(self.storage.get_usage(start_date=now)) == 0
assert len(self.storage.get_usage(end_date=now)) == 3
assert len(self.storage.get_usage(end_date=now, limit=2)) == 2
@unittest.skipUnless(HAS_MONGOENGINE, "Requires MongoEngine")
@unittest.skipUnless(COLLECTION, "Requires a running test MongoDB")
class TestMongoEngineStorage(FlaskTrackUsageTestCase):
"""
Tests MongoEngine storage.
"""
def setUp(self):
"""
Set up an app to test with.
"""
FlaskTrackUsageTestCase.setUp(self)
self.storage = MongoEngineStorage()
# Clean out the storage
self.storage.collection.drop_collection()
self.track_usage = TrackUsage(self.app, self.storage)
def test_mongoengine_storage(self):
"""
Test MongoEngineStorages stores the data the way we expect.
"""
self.client.get('/')
doc = self.storage.collection.objects.first()
assert doc.blueprint is None
assert doc.ip_info is None
assert doc.status == 200
self.assertTrue(doc.remote_addr) # Should be set with modern versions of Flask
assert doc.speed.__class__ is float
assert doc.view_args == {}
assert doc.url_args == {}
assert doc.url == 'http://localhost/'
assert doc.authorization is False
assert doc.user_agent.browser is None # because of testing
assert doc.user_agent.platform is None # because of testing
assert doc.user_agent.language is None # because of testing
assert doc.user_agent.version is None # because of testing
assert doc.content_length == 6
assert doc.path == '/'
assert type(doc.date) is datetime.datetime
| 36.671233 | 93 | 0.670153 |
6772f47be90751a8ab2cbacfba1c7b99baa2b64a
| 102 |
py
|
Python
|
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
caiman/models.py
|
Rockstreet/usman_min
|
c15145a444cbc913a1349b69dffc0b8a45e38dbb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
| 10.2 | 65 | 0.784314 |
40ff73a50e428a34204355c8944416160e034ae7
| 749 |
py
|
Python
|
alembic/versions/20210317_233807_.py
|
sasano8/magnet-migrade
|
b5669b34a6a3b845df8df96dfedaf967df6b88e2
|
[
"MIT"
] | null | null | null |
alembic/versions/20210317_233807_.py
|
sasano8/magnet-migrade
|
b5669b34a6a3b845df8df96dfedaf967df6b88e2
|
[
"MIT"
] | 4 |
2021-03-24T23:38:22.000Z
|
2021-03-31T07:24:30.000Z
|
alembic/versions/20210317_233807_.py
|
sasano8/magnet-migrade
|
b5669b34a6a3b845df8df96dfedaf967df6b88e2
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 20210317_233807
Revises: 20210317_233703
Create Date: 2021-03-17 23:38:07.486793
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "20210317_233807"
down_revision = "20210317_233703"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("trade_bots", "broker")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"trade_bots",
sa.Column(
"broker", sa.VARCHAR(length=255), autoincrement=False, nullable=False
),
)
# ### end Alembic commands ###
| 22.029412 | 81 | 0.664887 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.