blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11660dd3974023d138b1c172b13f5b3f2dee9fef | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200129140643.py | 022d13bf3aae4de363246cc09c5e38857bcd3e6c | []
| no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,165 | py | '''
-**make snake longer when eaten
- fix stop watch so it restarts when you restart level
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
DONEEE
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
- add score (done)
-fix speed so that it resets when you go back to main page
- add high score page (txt file, saves high scores outside of program)
'''
import arcade
import random
import json
import time
# Starting screen
alive_button = []
start_button_text = ["Noob: 0.5 speed \n (Refresh rate 1/5 seconds)",
"Normal speed: 1 \n (Refresh rate 1/10 seconds)",
"Hard: 1.5 speed \n (Refresh rate 1/15 seconds)",
"Expert: 2.5 speed \n (Refresh rate 1/25 seconds)"]
for i in range (2, 10, 2):
start_options = [i*100, 200, 150, 50, start_button_text[(i // 2) - 1]] # x, y, width, height
alive_button.append(start_options)
show_text = False
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
# Death screen
dead_button = []
death_button_text = ["Retry", "Starting screen","Quit", ""]
text_num = 0
for x in range (1, 5, 2):
for y in range (1, 5, 2):
death_options = [x*(SCREEN_WIDTH//4) - 75, y*(SCREEN_HEIGHT//4) - 75 , 150, 150, death_button_text[text_num]] # x, y, width, height
dead_button.append(death_options)
text_num += 1
# Direction the snake is moving in
up = False
down = False
left = False
right = False
# Use snakes position shown on grid, not the python coordinates
player_x_column = 25
player_y_row = 20
# Length of the snake body
body = 1
# Current snake location
snake_pos = []
# Determine where the starting apple will be drawn in
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
# Boolean to see if apple needs to be moved
apple_display = True
# Background grid
grid_texture = arcade.load_texture("29x51_grid.jpg")
score = 0
# Landing page, game, death screen, or high score
page = 0
SPEED = 1
high_score = 0
time = 0
second = 0
minute = 0
red = 255
green = 255
blue = 0
def on_update(delta_time):
snake_move()
def on_draw():
global page
arcade.start_render()
if page == 0:
start_screen()
elif page == 1:
main_game()
elif page == 2:
grid_background()
death_screen()
elif page == 3:
high_score_page()
print(time)
def stop_watch():
global time, second, minute, SPEED
global red, green, blue
time += 1
if (time % SPEED == 0):
second += 1
elif second > 60:
second = 0
minute += 1
if (red == 255 and 0 <= green < 255 and blue == 0):
green += 5
elif (0 < red <= 255 and green == 255 and blue == 0):
red -= 5
elif (red == 0 and green == 255 and 0 <= blue < 255):
blue += 5
elif (red == 0 and 0 < green <= 255 and blue == 255):
green -= 5
elif (0 <= red < 255 and green == 0 and blue == 255):
red += 5
elif (red == 255 and green == 0 and 0 < blue <= 255):
blue -= 5
arcade.draw_text(f"Time: {minute:02d}:{second:02d}", 75, SCREEN_HEIGHT - 50, (red, green, blue),
25, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
def high_score_check():
global high_score, score
with open("high_score.json", "r") as high_score_file:
high_score = json.load(high_score_file)
with open("high_score.json", "w") as high_score_file:
if score > high_score:
json.dump(score, high_score_file)
else:
json.dump(high_score, high_score_file)
def high_score_page():
global high_score
high_score_check()
arcade.draw_text("The high score is " + str(high_score),dead_button[3][0], dead_button[3][1],
arcade.color.WHITE, 50, font_name='calibri', anchor_x="center", anchor_y="center")
def main_game():
grid_background()
snake()
apple()
stop_watch()
def start_screen():
global alive_button, SPEED
arcade.draw_text("Welcome to snake \n choose your level", (SCREEN_WIDTH//2), 3*(SCREEN_HEIGHT//4),
arcade.color.WHITE, 25, font_name='calibri', anchor_x="center", anchor_y="center")
# arcade.draw_text(str(current_time), (3 * SCREEN_WIDTH // 4), (SCREEN_HEIGHT//4),
# arcade.color.BLACK, 25, font_name='calibri', anchor_x="center", anchor_y="center")
for i in range (0, 4):
arcade.draw_xywh_rectangle_filled(alive_button[i][0],
alive_button[i][1],
alive_button[i][2],
alive_button[i][3],
arcade.color.WHITE)
arcade.draw_text(alive_button[i][4], alive_button[i][0] + (alive_button[i][2] // 2), alive_button[i][1] + (alive_button[i][3] // 2),
arcade.color.BLACK, 10, font_name='calibri', anchor_x="center", anchor_y="center")
def death_screen():
global dead_button, death_button_text, red, green, blue
high_score_page()
if (red == 255 and 0 <= green < 255 and blue == 0):
green += 5
elif (0 < red <= 255 and green == 255 and blue == 0):
red -= 5
elif (red == 0 and green == 255 and 0 <= blue < 255):
blue += 5
elif (red == 0 and 0 < green <= 255 and blue == 255):
green -= 5
elif (0 <= red < 255 and green == 0 and blue == 255):
red += 5
elif (red == 255 and green == 0 and 0 < blue <= 255):
blue -= 5
for i in range (2):
arcade.draw_text("You died rip lol", random.randint(50, SCREEN_WIDTH), random.randint(50, SCREEN_HEIGHT), (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
50, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
for i in range (0, 3):
arcade.draw_xywh_rectangle_filled(dead_button[i][0],
dead_button[i][1],
dead_button[i][2],
dead_button[i][3],
(red, blue, green))
arcade.draw_text(dead_button[i][4], dead_button[i][0] + (dead_button[i][2] // 2), dead_button[i][1] + (dead_button[i][3] // 2),
arcade.color.BLACK, 15, font_name='calibri', anchor_x="center", anchor_y="center")
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
global snake_pos
global page, score
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
else:
page = 2
suicide_check = []
for position in snake_pos:
if position not in suicide_check:
suicide_check.append(position)
else:
page = 2
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row, snake_len, body, snake_pos
global up, down, left, right
global page, score, time
global SPEED
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
snake_pos = []
up = False
down = False
left = False
right = False
page = 1
score = 0
time = 0
print ("You died", SPEED)
def snake():
global player_x_column, player_y_row, snake_len, body
global apple_x, apple_y
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_len = [[player_x_column, player_y_row]]
snake_pos.append([player_x_column, player_y_row])
if body < len(snake_pos):
snake_pos.pop(0)
if (body > 1):
for num in range (1, body):
snake_len.append([snake_pos[num - 1][0], snake_pos[num - 1][1]])
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, body, snake_len
global score
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
body += 1
print ("hit")
else:
apple_display = True
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
# Make sure that apple doesn't spawn where the snake is
for apple in range (len(snake_pos)):
if apple_x == snake_pos[apple][0] or apple_y == snake_pos[apple][1]:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
score += 10
apple_display == True
arcade.draw_text("Score is " + str(score), SCREEN_WIDTH - 75, SCREEN_HEIGHT - 50, arcade.color.GREEN,
25, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
def on_key_press(key, modifiers):
global up, down, left, right
if page == 1:
if (key == arcade.key.W) and (down == False):
up = True
down = False
right = False
left = False
elif (key == arcade.key.S) and (up == False):
down = True
up = False
right = False
left = False
elif (key == arcade.key.A) and (right == False):
left = True
up = False
down = False
right = False
elif (key == arcade.key.D) and (left == False):
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
global alive_button, dead_button, page
global start_screen, restart
global high_score_page
global SPEED
if page == 0:
# For starting screen, check which button has been clicked
if (x > alive_button[0][0] and x < alive_button[0][0] + alive_button[0][2] and
y > alive_button[0][1] and y < alive_button[0][1] + alive_button[0][3]):
page += 1
SPEED = 5
arcade.schedule(on_update, 1/(SPEED))
print("noob")
elif (x > alive_button[1][0] and x < alive_button[1][0] + alive_button[1][2] and
y > alive_button[1][1] and y < alive_button[1][1] + alive_button[1][3]):
page += 1
SPEED = 10
arcade.schedule(on_update, 1/(SPEED))
print("normal")
elif (x > alive_button[2][0] and x < alive_button[2][0] + alive_button[2][2] and
y > alive_button[2][1] and y < alive_button[2][1] + alive_button[2][3]):
page += 1
SPEED = 15
arcade.schedule(on_update, 1/(SPEED))
print("hard")
elif (x > alive_button[3][0] and x < alive_button[3][0] + alive_button[3][2] and
y > alive_button[3][1] and y < alive_button[3][1] + alive_button[3][3]):
page += 1
SPEED = 25
arcade.schedule(on_update, 1/(SPEED))
print("expert")
else:
SPEED = 1
if page == 2:
if (x > dead_button[0][0] and x < dead_button[0][0] + dead_button[0][2] and
y > dead_button[0][1] and y < dead_button[0][1] + dead_button[0][3]):
restart()
print("try again")
elif (x > dead_button[1][0] and x < dead_button[1][0] + dead_button[1][2] and
y > dead_button[1][1] and y < dead_button[1][1] + dead_button[1][3]):
start_screen()
print("main")
elif (x > dead_button[2][0] and x < dead_button[2][0] + dead_button[2][2] and
y > dead_button[2][1] and y < dead_button[2][1] + dead_button[2][3]):
print("exit")
arcade.close_window()
def setup():
global grid, SPEED
# SPEED = float(input("What fast do you want? \n Noob: Type 0.5 \n Normal: Type 1 \n Hard: Type 1.5 - 2 \n Expert: Type 2.5 or more \n *Changes the refresh rate* \n"))
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/SPEED)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup()
| [
"[email protected]"
]
| |
5c4983c27b4cef394e0a0bcb7f0283990cd5809d | df7b40e95718ac0f6071a0ba571b42efc81cf6de | /configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py | df79a9cf13963d26384b00ced0cf5efa9f68a420 | [
"Apache-2.0"
]
| permissive | shinianzhihou/ChangeDetection | 87fa2c498248e6124aeefb8f0ee8154bda36deee | 354e71234bef38b6e142b6ba02f23db958582844 | refs/heads/master | 2023-01-23T20:42:31.017006 | 2023-01-09T11:37:24 | 2023-01-09T11:37:24 | 218,001,748 | 162 | 29 | Apache-2.0 | 2022-11-03T04:11:00 | 2019-10-28T08:41:54 | Python | UTF-8 | Python | false | false | 1,118 | py | _base_ = [
'../_base_/models/ocrnet_hr18.py',
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(decode_head=[
dict(
type='FCNHead',
in_channels=[18, 36, 72, 144],
channels=sum([18, 36, 72, 144]),
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
kernel_size=1,
num_convs=1,
concat_input=False,
dropout_ratio=-1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='OCRHead',
in_channels=[18, 36, 72, 144],
in_index=(0, 1, 2, 3),
input_transform='resize_concat',
channels=512,
ocr_channels=256,
dropout_ratio=-1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
])
| [
"[email protected]"
]
| |
9fa13f789486e633b64a9f1f7c8078dd81fdce4b | 5ed389c1f3fc175aa73478fc3dcba4101520b80b | /python/spoonacular/com/spoonacular/client/model/inline_response20028.py | e0a6835c4ba1356a272d7bfb8bdc945bbc398dd5 | [
"MIT"
]
| permissive | jvenlin/spoonacular-api-clients | fae17091722085017cae5d84215d3b4af09082aa | 63f955ceb2c356fefdd48ec634deb3c3e16a6ae7 | refs/heads/master | 2023-08-04T01:51:19.615572 | 2021-10-03T13:30:26 | 2021-10-03T13:30:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,485 | py | # coding: utf-8
"""
spoonacular API
The spoonacular Nutrition, Recipe, and Food API allows you to access over 380,000 recipes, thousands of ingredients, 800,000 food products, and 100,000 menu items. Our food ontology and semantic recipe search engine makes it possible to search for recipes using natural language queries, such as \"gluten free brownies without sugar\" or \"low fat vegan cupcakes.\" You can automatically calculate the nutritional information for any recipe, analyze recipe costs, visualize ingredient lists, find recipes for what's in your fridge, find recipes based on special diets, nutritional requirements, or favorite ingredients, classify recipes into types and cuisines, convert ingredient amounts, or even compute an entire meal plan. With our powerful API, you can create many kinds of food and especially nutrition apps. Special diets/dietary requirements currently available include: vegan, vegetarian, pescetarian, gluten free, grain free, dairy free, high protein, whole 30, low sodium, low carb, Paleo, ketogenic, FODMAP, and Primal. # noqa: E501
The version of the OpenAPI document: 1.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class InlineResponse20028(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'title': 'str',
'badges': 'list[str]',
'important_badges': 'list[str]',
'breadcrumbs': 'list[str]',
'generated_text': 'str',
'image_type': 'str',
'ingredient_count': 'int',
'ingredient_list': 'str',
'ingredients': 'list[InlineResponse20028Ingredients]',
'likes': 'float',
'nutrition': 'InlineResponse20028Nutrition',
'price': 'float',
'servings': 'InlineResponse20028Servings',
'spoonacular_score': 'float'
}
attribute_map = {
'id': 'id',
'title': 'title',
'badges': 'badges',
'important_badges': 'importantBadges',
'breadcrumbs': 'breadcrumbs',
'generated_text': 'generatedText',
'image_type': 'imageType',
'ingredient_count': 'ingredientCount',
'ingredient_list': 'ingredientList',
'ingredients': 'ingredients',
'likes': 'likes',
'nutrition': 'nutrition',
'price': 'price',
'servings': 'servings',
'spoonacular_score': 'spoonacularScore'
}
def __init__(self, id=None, title=None, badges=None, important_badges=None, breadcrumbs=None, generated_text=None, image_type=None, ingredient_count=None, ingredient_list=None, ingredients=None, likes=None, nutrition=None, price=None, servings=None, spoonacular_score=None): # noqa: E501
"""InlineResponse20028 - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._title = None
self._badges = None
self._important_badges = None
self._breadcrumbs = None
self._generated_text = None
self._image_type = None
self._ingredient_count = None
self._ingredient_list = None
self._ingredients = None
self._likes = None
self._nutrition = None
self._price = None
self._servings = None
self._spoonacular_score = None
self.discriminator = None
self.id = id
self.title = title
self.badges = badges
self.important_badges = important_badges
self.breadcrumbs = breadcrumbs
self.generated_text = generated_text
self.image_type = image_type
if ingredient_count is not None:
self.ingredient_count = ingredient_count
self.ingredient_list = ingredient_list
self.ingredients = ingredients
self.likes = likes
self.nutrition = nutrition
self.price = price
self.servings = servings
self.spoonacular_score = spoonacular_score
@property
def id(self):
"""Gets the id of this InlineResponse20028. # noqa: E501
:return: The id of this InlineResponse20028. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InlineResponse20028.
:param id: The id of this InlineResponse20028. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def title(self):
"""Gets the title of this InlineResponse20028. # noqa: E501
:return: The title of this InlineResponse20028. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this InlineResponse20028.
:param title: The title of this InlineResponse20028. # noqa: E501
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
if title is not None and len(title) < 1:
raise ValueError("Invalid value for `title`, length must be greater than or equal to `1`") # noqa: E501
self._title = title
@property
def badges(self):
"""Gets the badges of this InlineResponse20028. # noqa: E501
:return: The badges of this InlineResponse20028. # noqa: E501
:rtype: list[str]
"""
return self._badges
@badges.setter
def badges(self, badges):
"""Sets the badges of this InlineResponse20028.
:param badges: The badges of this InlineResponse20028. # noqa: E501
:type: list[str]
"""
if badges is None:
raise ValueError("Invalid value for `badges`, must not be `None`") # noqa: E501
self._badges = badges
@property
def important_badges(self):
"""Gets the important_badges of this InlineResponse20028. # noqa: E501
:return: The important_badges of this InlineResponse20028. # noqa: E501
:rtype: list[str]
"""
return self._important_badges
@important_badges.setter
def important_badges(self, important_badges):
"""Sets the important_badges of this InlineResponse20028.
:param important_badges: The important_badges of this InlineResponse20028. # noqa: E501
:type: list[str]
"""
if important_badges is None:
raise ValueError("Invalid value for `important_badges`, must not be `None`") # noqa: E501
self._important_badges = important_badges
@property
def breadcrumbs(self):
"""Gets the breadcrumbs of this InlineResponse20028. # noqa: E501
:return: The breadcrumbs of this InlineResponse20028. # noqa: E501
:rtype: list[str]
"""
return self._breadcrumbs
@breadcrumbs.setter
def breadcrumbs(self, breadcrumbs):
"""Sets the breadcrumbs of this InlineResponse20028.
:param breadcrumbs: The breadcrumbs of this InlineResponse20028. # noqa: E501
:type: list[str]
"""
if breadcrumbs is None:
raise ValueError("Invalid value for `breadcrumbs`, must not be `None`") # noqa: E501
self._breadcrumbs = breadcrumbs
@property
def generated_text(self):
"""Gets the generated_text of this InlineResponse20028. # noqa: E501
:return: The generated_text of this InlineResponse20028. # noqa: E501
:rtype: str
"""
return self._generated_text
@generated_text.setter
def generated_text(self, generated_text):
"""Sets the generated_text of this InlineResponse20028.
:param generated_text: The generated_text of this InlineResponse20028. # noqa: E501
:type: str
"""
if generated_text is None:
raise ValueError("Invalid value for `generated_text`, must not be `None`") # noqa: E501
if generated_text is not None and len(generated_text) < 1:
raise ValueError("Invalid value for `generated_text`, length must be greater than or equal to `1`") # noqa: E501
self._generated_text = generated_text
@property
def image_type(self):
"""Gets the image_type of this InlineResponse20028. # noqa: E501
:return: The image_type of this InlineResponse20028. # noqa: E501
:rtype: str
"""
return self._image_type
@image_type.setter
def image_type(self, image_type):
"""Sets the image_type of this InlineResponse20028.
:param image_type: The image_type of this InlineResponse20028. # noqa: E501
:type: str
"""
if image_type is None:
raise ValueError("Invalid value for `image_type`, must not be `None`") # noqa: E501
if image_type is not None and len(image_type) < 1:
raise ValueError("Invalid value for `image_type`, length must be greater than or equal to `1`") # noqa: E501
self._image_type = image_type
@property
def ingredient_count(self):
"""Gets the ingredient_count of this InlineResponse20028. # noqa: E501
:return: The ingredient_count of this InlineResponse20028. # noqa: E501
:rtype: int
"""
return self._ingredient_count
@ingredient_count.setter
def ingredient_count(self, ingredient_count):
"""Sets the ingredient_count of this InlineResponse20028.
:param ingredient_count: The ingredient_count of this InlineResponse20028. # noqa: E501
:type: int
"""
self._ingredient_count = ingredient_count
@property
def ingredient_list(self):
"""Gets the ingredient_list of this InlineResponse20028. # noqa: E501
:return: The ingredient_list of this InlineResponse20028. # noqa: E501
:rtype: str
"""
return self._ingredient_list
@ingredient_list.setter
def ingredient_list(self, ingredient_list):
"""Sets the ingredient_list of this InlineResponse20028.
:param ingredient_list: The ingredient_list of this InlineResponse20028. # noqa: E501
:type: str
"""
if ingredient_list is None:
raise ValueError("Invalid value for `ingredient_list`, must not be `None`") # noqa: E501
if ingredient_list is not None and len(ingredient_list) < 1:
raise ValueError("Invalid value for `ingredient_list`, length must be greater than or equal to `1`") # noqa: E501
self._ingredient_list = ingredient_list
@property
def ingredients(self):
"""Gets the ingredients of this InlineResponse20028. # noqa: E501
:return: The ingredients of this InlineResponse20028. # noqa: E501
:rtype: list[InlineResponse20028Ingredients]
"""
return self._ingredients
@ingredients.setter
def ingredients(self, ingredients):
"""Sets the ingredients of this InlineResponse20028.
:param ingredients: The ingredients of this InlineResponse20028. # noqa: E501
:type: list[InlineResponse20028Ingredients]
"""
if ingredients is None:
raise ValueError("Invalid value for `ingredients`, must not be `None`") # noqa: E501
self._ingredients = ingredients
@property
def likes(self):
"""Gets the likes of this InlineResponse20028. # noqa: E501
:return: The likes of this InlineResponse20028. # noqa: E501
:rtype: float
"""
return self._likes
@likes.setter
def likes(self, likes):
"""Sets the likes of this InlineResponse20028.
:param likes: The likes of this InlineResponse20028. # noqa: E501
:type: float
"""
if likes is None:
raise ValueError("Invalid value for `likes`, must not be `None`") # noqa: E501
self._likes = likes
@property
def nutrition(self):
"""Gets the nutrition of this InlineResponse20028. # noqa: E501
:return: The nutrition of this InlineResponse20028. # noqa: E501
:rtype: InlineResponse20028Nutrition
"""
return self._nutrition
@nutrition.setter
def nutrition(self, nutrition):
"""Sets the nutrition of this InlineResponse20028.
:param nutrition: The nutrition of this InlineResponse20028. # noqa: E501
:type: InlineResponse20028Nutrition
"""
if nutrition is None:
raise ValueError("Invalid value for `nutrition`, must not be `None`") # noqa: E501
self._nutrition = nutrition
@property
def price(self):
"""Gets the price of this InlineResponse20028. # noqa: E501
:return: The price of this InlineResponse20028. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this InlineResponse20028.
:param price: The price of this InlineResponse20028. # noqa: E501
:type: float
"""
if price is None:
raise ValueError("Invalid value for `price`, must not be `None`") # noqa: E501
self._price = price
@property
def servings(self):
"""Gets the servings of this InlineResponse20028. # noqa: E501
:return: The servings of this InlineResponse20028. # noqa: E501
:rtype: InlineResponse20028Servings
"""
return self._servings
@servings.setter
def servings(self, servings):
"""Sets the servings of this InlineResponse20028.
:param servings: The servings of this InlineResponse20028. # noqa: E501
:type: InlineResponse20028Servings
"""
if servings is None:
raise ValueError("Invalid value for `servings`, must not be `None`") # noqa: E501
self._servings = servings
@property
def spoonacular_score(self):
"""Gets the spoonacular_score of this InlineResponse20028. # noqa: E501
:return: The spoonacular_score of this InlineResponse20028. # noqa: E501
:rtype: float
"""
return self._spoonacular_score
@spoonacular_score.setter
def spoonacular_score(self, spoonacular_score):
"""Sets the spoonacular_score of this InlineResponse20028.
:param spoonacular_score: The spoonacular_score of this InlineResponse20028. # noqa: E501
:type: float
"""
if spoonacular_score is None:
raise ValueError("Invalid value for `spoonacular_score`, must not be `None`") # noqa: E501
self._spoonacular_score = spoonacular_score
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20028):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
32a4fc72ae9516b70c68b7ae031027d437134130 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_02_01_preview/aio/operations/_component_versions_operations.py | 9908e719843784cb21d8c91b3423906874e65d20 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 15,041 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._component_versions_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ComponentVersionsOperations:
"""ComponentVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
name: str,
order_by: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[str] = None,
list_view_type: Optional[Union[str, "_models.ListViewType"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.ComponentVersionResourceArmPaginatedResult"]:
"""List component versions.
List component versions.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Component name.
:type name: str
:param order_by: Ordering of list.
:type order_by: str
:param top: Maximum number of records to return.
:type top: int
:param skip: Continuation token for pagination.
:type skip: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComponentVersionResourceArmPaginatedResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ComponentVersionResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentVersionResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
order_by=order_by,
top=top,
skip=skip,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
order_by=order_by,
top=top,
skip=skip,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComponentVersionResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
workspace_name: str,
name: str,
version: str,
**kwargs: Any
) -> None:
"""Delete version.
Delete version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
name: str,
version: str,
**kwargs: Any
) -> "_models.ComponentVersionData":
"""Get version.
Get version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersionData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersionData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentVersionData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComponentVersionData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
version: str,
body: "_models.ComponentVersionData",
**kwargs: Any
) -> "_models.ComponentVersionData":
"""Create or update version.
Create or update version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:param body: Version entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.ComponentVersionData
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComponentVersionData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ComponentVersionData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ComponentVersionData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'ComponentVersionData')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ComponentVersionData', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ComponentVersionData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/components/{name}/versions/{version}'} # type: ignore
| [
"[email protected]"
]
| |
d13d6b5acc64af0811538ebda36d881d4dcf63ca | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/catapult/dashboard/dashboard/email_sheriff_test.py | a66f70d0b3480620b2a4fbb9330bd1a14a74e6d8 | [
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
]
| permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 6,169 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import unittest
import mock
from dashboard import email_sheriff
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import bug_label_patterns
from dashboard.models.subscription import Subscription
_SHERIFF_URL = 'http://chromium-build.appspot.com/p/chromium/sheriff_perf.js'
_SHERIFF_EMAIL = '[email protected]'
class EmailSheriffTest(testing_common.TestCase):
def _AddTestToStubDataStore(self):
"""Adds a test which will be used in the methods below."""
bug_label_patterns.AddBugLabelPattern('label1', '*/*/dromaeo/dom')
bug_label_patterns.AddBugLabelPattern('label2', '*/*/other/test')
testing_common.AddTests(['ChromiumPerf'], ['Win7'],
{'dromaeo': {
'dom': {}
}})
test = utils.TestKey('ChromiumPerf/Win7/dromaeo/dom').get()
test.improvement_direction = anomaly.DOWN
return test
def _GetDefaultMailArgs(self):
"""Adds an Anomaly and returns arguments for email_sheriff.EmailSheriff."""
test_entity = self._AddTestToStubDataStore()
subscription_url = Subscription(
name='Chromium Perf Sheriff URL',
rotation_url=_SHERIFF_URL,
bug_labels=['Performance-Sheriff-URL'])
subscription_email = Subscription(
name='Chromium Perf Sheriff Mail',
notification_email=_SHERIFF_EMAIL,
bug_labels=['Performance-Sheriff-Mail'])
anomaly_entity = anomaly.Anomaly(
median_before_anomaly=5.0,
median_after_anomaly=10.0,
start_revision=10002,
end_revision=10004,
subscription_names=[
subscription_url.name,
subscription_email.name,
],
subscriptions=[subscription_url, subscription_email],
test=utils.TestKey('ChromiumPerf/Win7/dromaeo/dom'))
return {
'subscriptions': [subscription_url, subscription_email],
'test': test_entity,
'anomaly': anomaly_entity
}
@mock.patch('google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(
200, 'document.write(\'sullivan\')')))
def testEmailSheriff_ContentAndRecipientAreCorrect(self):
email_sheriff.EmailSheriff(**self._GetDefaultMailArgs())
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('[email protected]', messages[0].sender)
self.assertEqual({'[email protected]', '[email protected]'},
{s.strip() for s in messages[0].to.split(',')})
name = 'dromaeo/dom on Win7'
expected_subject = '100.0%% regression in %s at 10002:10004' % name
self.assertEqual(expected_subject, messages[0].subject)
body = str(messages[0].body)
self.assertIn('10002 - 10004', body)
self.assertIn('100.0%', body)
self.assertIn('ChromiumPerf', body)
self.assertIn('Win7', body)
self.assertIn('dromaeo/dom', body)
html = str(messages[0].html)
self.assertIn('<b>10002 - 10004</b>', html)
self.assertIn('<b>100.0%</b>', html)
self.assertIn('<b>ChromiumPerf</b>', html)
self.assertIn('<b>Win7</b>', html)
self.assertIn('<b>dromaeo/dom</b>', html)
@mock.patch('google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(
200, 'document.write(\'sonnyrao, digit\')')))
def testEmailSheriff_MultipleSheriffs_AllGetEmailed(self):
email_sheriff.EmailSheriff(**self._GetDefaultMailArgs())
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertEqual('[email protected]', messages[0].sender)
self.assertEqual(
{
'[email protected]', '[email protected]',
'[email protected]'
}, {s.strip() for s in messages[0].to.split(',')})
def testEmail_NoSheriffUrl_EmailSentToSheriffRotationEmailAddress(self):
args = self._GetDefaultMailArgs()
args['subscriptions'][0].rotation_url = None
email_sheriff.EmailSheriff(**args)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
# An email is only sent to the general sheriff rotation email;
# There is no other specific sheriff to send it to.
self.assertEqual('[email protected]', messages[0].to)
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(
return_value=testing_common.FakeResponseObject(200, 'garbage')))
def testEmailSheriff_RotationUrlHasInvalidContent_EmailStillSent(self):
"""Tests the email to list when the rotation URL returns garbage."""
args = self._GetDefaultMailArgs()
email_sheriff.EmailSheriff(**args)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
# An email is only sent to the general sheriff rotation email.
self.assertEqual('[email protected]', messages[0].to)
def testEmailSheriff_PercentChangeMaxFloat_ContentSaysAlertSize(self):
"""Tests the email content for "freakin huge" alert."""
args = self._GetDefaultMailArgs()
args['subscriptions'][0].rotation_url = None
args['anomaly'].median_before_anomaly = 0.0
email_sheriff.EmailSheriff(**args)
messages = self.mail_stub.get_sent_messages()
self.assertEqual(1, len(messages))
self.assertIn(anomaly.FREAKIN_HUGE, str(messages[0].subject))
self.assertNotIn(str(sys.float_info.max), str(messages[0].body))
self.assertIn(anomaly.FREAKIN_HUGE, str(messages[0].body))
self.assertNotIn(str(sys.float_info.max), str(messages[0].html))
self.assertIn(anomaly.FREAKIN_HUGE, str(messages[0].html))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
bd455dab671cde472cc785727e4d195135c38a5d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_3_1_neat/16_3_1_ipince_senate.py | e11f2916855fa67449999b8443738a3141993f9f | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,445 | py | #!/usr/bin/python
import operator
import sys
import random
ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
# number -> party
def solve(senators):
evacs = []
tups = senators.items()
while True:
tups = sorted(tups, key=operator.itemgetter(1), reverse=True)
#print tups
if tups[0][1] is 0 and tups[1][1] is 0:
return evacs
m1 = tups[0]
m2 = tups[1]
if m1[1] is 1 and m2[1] is 1:
if len(tups) > 2 and tups[2][1] > 0: # 1 1 1 ....
evacs.append('' + m1[0])
tups[0] = (m1[0], m1[1] - 1)
else: # 1 1 0 ...
evacs.append('' + m1[0] + m2[0])
tups[0] = (m1[0], m1[1] - 1)
tups[1] = (m2[0], m2[1] - 1)
elif m1[1] - m2[1] is 0: # x x ...
evacs.append('' + m1[0] + m2[0])
tups[0] = (m1[0], m1[1] - 1)
tups[1] = (m2[0], m2[1] - 1)
else: # 4 2 ...
evacs.append('' + m1[0])
tups[0] = (m1[0], m1[1] - 1)
# check if all 0
if False:
print 50
for i in xrange(50):
s = random.randint(2, 26)
print s
nums = []
for j in xrange(s):
nums.append(str(random.randint(1, 1000)))
print ' '.join(nums)
sys.exit()
lines = iter(sys.stdin.readlines())
cases = int(lines.next())
for i in xrange(cases):
senators = {}
s = int(lines.next())
numbers = [int(num) for num in lines.next().split(' ')]
for j in xrange(s):
senators[ALPHA[j]] = numbers[j]
print "Case #%d: %s" % (i + 1, ' '.join(solve(senators)))
| [
"[[email protected]]"
]
| |
75240260a004d52337a86bfbe5cd8738addff1a1 | 77f0f0156f27810269bd2542383af3cea97907e5 | /bafs/views/user.py | 77e351840aaa74b869e1b1faadc7afc78482c83f | []
| no_license | schvin/freezingsaddles | 2676808da9b5c025cedc5fc877d855efe211db83 | 5b3e608a05eebf72925d8b8a50b8afa5141f575b | refs/heads/master | 2021-09-02T12:36:00.265585 | 2018-01-02T03:05:50 | 2018-01-02T03:05:50 | 111,460,388 | 0 | 0 | null | 2017-11-20T20:38:49 | 2017-11-20T20:38:49 | null | UTF-8 | Python | false | false | 4,107 | py | import json
import copy
import logging
from collections import defaultdict
from datetime import datetime, timedelta, date
from flask import render_template, redirect, url_for, current_app, request, Blueprint, session, g, jsonify
from sqlalchemy import text
from stravalib import Client
from stravalib import unithelper as uh
from bafs import app, db, data
from bafs.utils import gviz_api, auth
from bafs.model import Team, Athlete, RidePhoto, Ride, RideWeather
from bafs.utils.auth import requires_auth
from .people import people_list_users, people_show_person, ridedays
from .pointless import averagespeed, shortride, billygoat, tortoiseteam, weekendwarrior
def bt_jsonify(data):
"""
Override eto handle raw lists expected by bootrap table.
"""
return current_app.response_class(json.dumps(data, default=json_seralizer), mimetype='application/json')
blueprint = Blueprint('user', __name__)
def json_seralizer(obj):
if isinstance(obj, datetime):
return obj.isoformat()
else:
return str(obj)
@blueprint.route("/rides")
@requires_auth
def rides():
return render_template('user/rides.html')
@blueprint.route("/refetch_ride_photos", methods=['POST'])
@requires_auth
def ride_refetch_photos():
ride_id = request.form['id']
ride = db.session.query(Ride).filter(Ride.id==ride_id).filter(Ride.athlete_id==session.get('athlete_id')).one()
ride.photos_fetched = False
logging.info("Marking photos to be refetched for ride {}".format(ride))
db.session.commit()
return jsonify(success=True) # I don't really have anything useful to spit back.
@blueprint.route("/rides.json")
@requires_auth
def rides_data():
athlete_id = session.get('athlete_id')
rides_q = db.session.query(Ride).filter(Ride.athlete_id==athlete_id).order_by(Ride.start_date.desc())
results = []
for r in rides_q:
w = r.weather
if w:
avg_temp = w.ride_temp_avg
else:
avg_temp = None
results.append(dict(id=r.id,
private=r.private,
name=r.name,
start_date=r.start_date,
elapsed_time=r.elapsed_time,
moving_time=r.moving_time,
distance=r.distance,
photos_fetched=r.photos_fetched,
avg_temp=avg_temp
))
#rides = db.session.query(Ride).all()
return bt_jsonify(results)
# athlete_id = sa.Column(sa.BigInteger, sa.ForeignKey('athletes.id', ondelete='cascade'), nullable=False, index=True)
# elapsed_time = sa.Column(sa.Integer, nullable=False) # Seconds
# # in case we want to conver that to a TIME type ... (using time for interval is kinda mysql-specific brokenness, though)
# # time.strftime('%H:%M:%S', time.gmtime(12345))
# moving_time = sa.Column(sa.Integer, nullable=False, index=True) #
# elevation_gain = sa.Column(sa.Integer, nullable=True) # 269.6 (feet)
# average_speed = sa.Column(sa.Float) # mph
# maximum_speed = sa.Column(sa.Float) # mph
# start_date = sa.Column(sa.DateTime, nullable=False, index=True) # 2010-02-28T08:31:35Z
# distance = sa.Column(sa.Float, nullable=False, index=True) # 82369.1 (meters)
# location = sa.Column(sa.String(255), nullable=True)
#
# commute = sa.Column(sa.Boolean, nullable=True)
# trainer = sa.Column(sa.Boolean, nullable=True)
#
# efforts_fetched = sa.Column(sa.Boolean, default=False, nullable=False)
#
# timezone = sa.Column(sa.String(255), nullable=True)
#
# geo = orm.relationship("RideGeo", uselist=False, backref="ride", cascade="all, delete, delete-orphan")
# weather = orm.relationship("RideWeather", uselist=False, backref="ride", cascade="all, delete, delete-orphan")
# photos = orm.relationship("RidePhoto", backref="ride", cascade="all, delete, delete-orphan")
#
# photos_fetched = sa.Column(sa.Boolean, default=False, nullable=False)
# private = sa.Column(sa.Boolean, default=False, nullable=False) | [
"[email protected]"
]
| |
c16057f28176e9844ca0d10ae26e3b78d5f37bc3 | b19a1baf69d1f7ba05a02ace7dfcba15c8d47cfb | /liste.py | 1d1cf8fda235c256858d262cedca33463379f949 | []
| no_license | MarkHofstetter/20191018-wifi-python | 20ed5de1cf28996902cecf7cd681d054e0d06739 | 7427b896783059a77c541e95df851a492ef5ebb9 | refs/heads/master | 2020-08-15T03:43:42.964992 | 2019-10-28T14:39:17 | 2019-10-28T14:39:17 | 215,275,139 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # 0 1 2 3 4 5 6
f = [1, 1, 2, 3, 5, 8, 13,]
print(f)
print(f[3])
print(f[-1])
print(f[0:3]) # exklusive des letzen Elements
print(f[3:])
# f[4] = 'Hallo'
# print(f)
print(len(f))
print(f[3], f[5])
d = [f[3], f[5]]
f.append(21)
f.insert(0,0) # position, wert(e)
print(f)
f.remove(1) # entfernt den ersten Wert aus der Liste!
print(f)
del(f[0]) # entfernt per index
print(f)
| [
"[email protected]"
]
| |
acb2f22c10e5a983885d5d763b965af07ef47eb9 | ea1bb1d4b3227fde47ce1ebf3aadef696f355642 | /0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py | 15ca6e3120ece11db4e3d16d806045fdf3867f01 | []
| no_license | petehwu/holbertonschool-higher_level_programming | 4f0b0ea76699a60f47cf3dcb0b2b29da35ec2ef1 | 3c1595d1011fcc3c0265f52a1b965e54f4edd94c | refs/heads/master | 2021-07-13T18:41:06.413945 | 2019-02-22T05:33:14 | 2019-02-22T05:33:14 | 148,185,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | #!/usr/bin/python3
"""python script to list all state objects
"""
import sys
from model_state import Base, State
from model_city import City
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(sys.argv[1], sys.argv[2],
sys.argv[3]), pool_pre_ping=True)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
cities = session.query(City).all()
for c in cities:
state = session.query(State).filter(State.id == c.state_id).first()
print("{:s}: ({:d}) {:s}".format(state.name, c.id, c.name))
| [
"[email protected]"
]
| |
2071d69796f799b401c9dc1bd122c662f52f5d0c | 3d61905cb470e4918027d2b6d995246d60aab2b4 | /python/brenpysandbox/fbx/fbx_import_settings_test_01.py | d0baf17d94994bfbb82d1b191a19c80242170f51 | []
| no_license | brenainnJordan/brenpy-sandbox | 6e36cfba617c4c9c8989bb36b78c3780b9d0959c | 6dd20d8b7722719742613d2efec2c2992fcfdd9a | refs/heads/master | 2020-08-28T17:16:24.041945 | 2020-06-21T19:58:15 | 2020-06-21T19:58:15 | 217,766,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,897 | py | import fbx
import sys
import os
try:
from Qt import QtCore
from Qt import QtWidgets
from Qt import QtGui
except ImportError:
print "[ WARNING ] Cannot find Qt library, using PySide2 instead"
from PySide2 import QtCore
from PySide2 import QtWidgets
from PySide2 import QtGui
# QtCore.SIGNAL doesn't seem to exist
# TODO investigate why
try:
from PySide.QtCore import SIGNAL
except ImportError:
from PySide2.QtCore import SIGNAL
from brenfbx.utils import bfFbxUtils
from brenfbx.qt.property import bfQtPropertyValueWidgets
from brenfbx.qt.property import bfQtPropertyTreeWidgets
from brenfbx.qt.property import bfQtPropertyItems
from brenfbx.qt.property import bfQtPropertyModels
def inspect_child_properties(fbx_property, indent=0):
"""Recursively debug properties
"""
child_property = fbx_property.GetChild()
while child_property.IsValid():
print "-"*indent, child_property.GetName()
inspect_child_properties(child_property, indent=indent+1)
child_property = child_property.GetSibling()
def test_1():
fbx_manager = fbx.FbxManager.Create()
settings = fbx.FbxIOSettings.Create(
fbx_manager, fbx.IOSROOT
)
print settings
root_properties = bfFbxUtils.get_root_properties(settings)
for property in root_properties:
print property.GetName()
inspect_child_properties(property, indent=1)
# fbx_property = settings.GetFirstProperty()
#
# while fbx_property.IsValid():
# print fbx_property.GetName()
#
# fbx_property = settings.GetNextProperty(fbx_property)
class Test2(object):
def __init__(self):
self.fbx_manager = fbx.FbxManager.Create()
self.settings = fbx.FbxIOSettings.Create(
self.fbx_manager, fbx.IOSROOT
)
item_manager = bfQtPropertyItems.FbxPropertyTreeItemManager(self.fbx_manager)
item_manager.set_debug_level(item_manager.LEVELS.mid())
item_manager.set_fbx_object(self.settings)
model = bfQtPropertyModels.BfFbxPropertyModel()
model.set_item_manager(item_manager)
import_property = self.settings.FindProperty("Import")
print import_property, import_property.IsValid()
model.set_root_fbx_property(import_property)
# self._properties_widget = bfQtPropertyWidgets.BfPropertiesWidget(self.fbx_manager)
self._properties_widget = bfQtPropertyTreeWidgets.BfPropertyTreeWidget()
self._properties_widget.set_property_model(model)
# self._properties_widget.set_fbx_object(self.settings)
# for child_property in bfUtils.get_child_properties(import_property):
# print child_property.GetName()
self._properties_widget.show()
if __name__ == "__main__":
# test_1()
app = QtWidgets.QApplication(sys.argv)
test = Test2()
sys.exit(app.exec_())
| [
"[email protected]"
]
| |
a30ea7479628945da6d1b7182c482c49399b5a02 | 397c9e2743c41cf591692c4fc37f43a9070119bd | /build/env/lib/python2.7/site-packages/windmill-1.6-py2.7.egg/windmill/tools/urlparse_25.py | e8341181f7a47001865f41e67e30002482632d13 | [
"Apache-2.0"
]
| permissive | bopopescu/myhue | cf41238c782d12b3a1a0ee9ef70196359bb67894 | 5f566970a5a1fa5af9f01832c9e9808c47634bc7 | refs/heads/master | 2022-11-18T05:37:24.467150 | 2019-11-23T16:16:22 | 2019-11-23T16:16:22 | 282,390,507 | 0 | 0 | Apache-2.0 | 2020-07-25T07:03:40 | 2020-07-25T07:03:39 | null | UTF-8 | Python | false | false | 12,066 | py | # From Python2.5 stdlib
"""Parse (absolute and relative) URLs.
See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding,
UC Irvine, June 1995.
"""
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
global _parse_cache
_parse_cache = {}
class BaseResult(tuple):
"""Base class for the parsed result objects.
This provides the attributes shared by the two derived result
objects as read-only properties. The derived classes are
responsible for checking the right number of arguments were
supplied to the constructor.
"""
__slots__ = ()
# Attributes that access the basic components of the URL:
@property
def scheme(self):
return self[0]
@property
def netloc(self):
return self[1]
@property
def path(self):
return self[2]
@property
def query(self):
return self[-2]
@property
def fragment(self):
return self[-1]
# Additional attributes that provide access to parsed-out portions
# of the netloc:
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.split("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.split("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.split("@", 1)[1]
if ":" in netloc:
netloc = netloc.split(":", 1)[0]
return netloc.lower() or None
@property
def port(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.split("@", 1)[1]
if ":" in netloc:
port = netloc.split(":", 1)[1]
return int(port, 10)
return None
class SplitResult(BaseResult):
__slots__ = ()
def __new__(cls, scheme, netloc, path, query, fragment):
return BaseResult.__new__(
cls, (scheme, netloc, path, query, fragment))
def geturl(self):
return urlunsplit(self)
class ParseResult(BaseResult):
__slots__ = ()
def __new__(cls, scheme, netloc, path, params, query, fragment):
return BaseResult.__new__(
cls, (scheme, netloc, path, params, query, fragment))
@property
def params(self):
return self[3]
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i+1:]
if scheme in uses_netloc and url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and scheme in uses_fragment and '#' in url:
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse((scheme, netloc, url, params, query, fragment)):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit((scheme, netloc, url, query, fragment)):
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return url
if scheme in uses_netloc:
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not (path or params or query):
return urlunparse((scheme, netloc, bpath,
bparams, bquery, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
test_input = """
http://a/b/c/d
g:h = <URL:g:h>
http:g = <URL:http://a/b/c/g>
http: = <URL:http://a/b/c/d>
g = <URL:http://a/b/c/g>
./g = <URL:http://a/b/c/g>
g/ = <URL:http://a/b/c/g/>
/g = <URL:http://a/g>
//g = <URL:http://g>
?y = <URL:http://a/b/c/d?y>
g?y = <URL:http://a/b/c/g?y>
g?y/./x = <URL:http://a/b/c/g?y/./x>
. = <URL:http://a/b/c/>
./ = <URL:http://a/b/c/>
.. = <URL:http://a/b/>
../ = <URL:http://a/b/>
../g = <URL:http://a/b/g>
../.. = <URL:http://a/>
../../g = <URL:http://a/g>
../../../g = <URL:http://a/../g>
./../g = <URL:http://a/b/g>
./g/. = <URL:http://a/b/c/g/>
/./g = <URL:http://a/./g>
g/./h = <URL:http://a/b/c/g/h>
g/../h = <URL:http://a/b/c/h>
http:g = <URL:http://a/b/c/g>
http: = <URL:http://a/b/c/d>
http:?y = <URL:http://a/b/c/d?y>
http:g?y = <URL:http://a/b/c/g?y>
http:g?y/./x = <URL:http://a/b/c/g?y/./x>
"""
def test():
import sys
base = ''
if sys.argv[1:]:
fn = sys.argv[1]
if fn == '-':
fp = sys.stdin
else:
fp = open(fn)
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO(test_input)
while 1:
line = fp.readline()
if not line: break
words = line.split()
if not words:
continue
url = words[0]
parts = urlparse(url)
print '%-10s : %s' % (url, parts)
abs = urljoin(base, url)
if not base:
base = abs
wrapped = '<URL:%s>' % abs
print '%-10s = %s' % (url, wrapped)
if len(words) == 3 and words[1] == '=':
if wrapped != words[2]:
print 'EXPECTED', words[2], '!!!!!!!!!!'
if __name__ == '__main__':
test()
| [
"[email protected]"
]
| |
2e94f6588aa5777aec5c16eee7d4ea0ca17dd4ea | 7882860350c714e6c08368288dab721288b8d9db | /2407_조합.py | 255617b4bcd46fc1eb2cfadaf6a18e1d263ead7a | []
| no_license | park-seonju/Algorithm | 682fca984813a54b92a3f2ab174e4f05a95921a8 | 30e5bcb756e9388693624e8880e57bc92bfda969 | refs/heads/master | 2023-08-11T18:23:49.644259 | 2021-09-27T10:07:49 | 2021-09-27T10:07:49 | 388,741,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | n,m=map(int,input().split())
cnt=0
top=1
while cnt<m:
cnt+=1
top*=n
n-=1
for i in range(m,1,-1):
top//=i
print(top)
| [
"[email protected]"
]
| |
cb114711c20368e1af3bd2e23487fe45e5301fca | 47c01a12f776928a041aee309841d5efc1622e18 | /backend/dailigreen_3612/wsgi.py | 0497660c86729db74a3c245c4c7812b7635d8e36 | []
| no_license | crowdbotics-apps/dailigreen-3612 | 2aa28e1197db65be35fe30a6838efe28d1f9890c | a38bf4be6eacb40d05cf0eee998bb3f0fdaf451e | refs/heads/master | 2020-05-25T10:02:37.845216 | 2019-05-21T03:04:19 | 2019-05-21T03:04:19 | 187,751,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
WSGI config for dailigreen_3612 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dailigreen_3612.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
bd4d17989f1ebd69ef859132dbb57ac3f978ebb0 | 5abb52c3ee859ee5876601025479e9d3214f829f | /meiduo/meiduo/apps/contents/migrations/0001_initial.py | 797d2d8fd8153a0ac6f469e3cd7a43e02e2b0e1a | []
| no_license | RapperDream/meiduo-18 | 05ca46628f5575b31d6a0b2115786dd3f0e57f5a | d7f5aad879f0e420ac16e577d107236bdec816ee | refs/heads/master | 2020-04-22T02:39:01.099998 | 2019-02-23T14:53:39 | 2019-02-23T14:53:39 | 170,057,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-02-23 07:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('url', models.CharField(max_length=300, verbose_name='内容链接')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='图片')),
('text', models.TextField(blank=True, null=True, verbose_name='内容')),
('sequence', models.IntegerField(verbose_name='排序')),
('status', models.BooleanField(default=True, verbose_name='是否展示')),
],
options={
'verbose_name_plural': '广告内容',
'db_table': 'tb_content',
'verbose_name': '广告内容',
},
),
migrations.CreateModel(
name='ContentCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('key', models.CharField(max_length=50, verbose_name='类别键名')),
],
options={
'verbose_name_plural': '广告内容类别',
'db_table': 'tb_content_category',
'verbose_name': '广告内容类别',
},
),
migrations.AddField(
model_name='content',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contents.ContentCategory', verbose_name='类别'),
),
]
| [
"[email protected]"
]
| |
71fd47eacb61850cec0364459140c654afb663c1 | d9cc042eacfd65912781aad31a57a2709c1e27e4 | /platypush/backend/camera/pi.py | a595c399c9fe28c4578fef15bb5134b787c138c7 | [
"MIT"
]
| permissive | shineit/platypush | 914ddb50371cee63669c74f775baf5ae652c9075 | a67b301cd66235b41bff6bfb3de56b903bf9551d | refs/heads/master | 2020-03-15T04:08:02.349413 | 2018-05-03T00:17:31 | 2018-05-03T00:17:31 | 131,958,375 | 1 | 0 | MIT | 2018-05-03T07:31:56 | 2018-05-03T07:31:56 | null | UTF-8 | Python | false | false | 2,443 | py | import logging
import socket
import time
import picamera
from platypush.backend import Backend
class CameraPiBackend(Backend):
def __init__(self, listen_port, x_resolution=640, y_resolution=480,
framerate=24, hflip=False, vflip=False,
sharpness=0, contrast=0, brightness=50,
video_stabilization=False, ISO=0, exposure_compensation=0,
exposure_mode='auto', meter_mode='average', awb_mode='auto',
image_effect='none', color_effects=None, rotation=0,
crop=(0.0, 0.0, 1.0, 1.0), **kwargs):
""" See https://www.raspberrypi.org/documentation/usage/camera/python/README.md
for a detailed reference about the Pi camera options """
super().__init__(**kwargs)
self.listen_port = listen_port
self.server_socket = socket.socket()
self.server_socket.bind(('0.0.0.0', self.listen_port))
self.server_socket.listen(0)
self.camera = picamera.PiCamera()
self.camera.resolution = (x_resolution, y_resolution)
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.camera.sharpness = sharpness
self.camera.contrast = contrast
self.camera.brightness = brightness
self.camera.video_stabilization = video_stabilization
self.camera.ISO = ISO
self.camera.exposure_compensation = exposure_compensation
self.camera.exposure_mode = exposure_mode
self.camera.meter_mode = meter_mode
self.camera.awb_mode = awb_mode
self.camera.image_effect = image_effect
self.camera.color_effects = color_effects
self.camera.rotation = rotation
self.camera.crop = crop
logging.info('Initialized Pi camera backend')
def send_message(self, msg):
pass
def run(self):
super().run()
while True:
connection = self.server_socket.accept()[0].makefile('wb')
try:
self.camera.start_recording(connection, format='h264')
while True:
self.camera.wait_recording(60)
except ConnectionError as e:
pass
finally:
try:
self.camera.stop_recording()
connection.close()
except:
pass
# vim:sw=4:ts=4:et:
| [
"[email protected]"
]
| |
9fbe0840c86114c15a28c5123e2a28710abe4740 | 2c32cf726e111b8625265c458feeaea436652e83 | /pramp-condility-3month/random_O1.py | 47221cf8aaec833a485f67d72f55e7c969e9e592 | []
| no_license | minhthe/practice-algorithms-and-data-structures | 6fa3bf98e8e2fe98f4e32419fb797b1df4400364 | 488a82dd3a0c797859a6c9e1195d6d579d676073 | refs/heads/master | 2021-05-16T23:01:20.026475 | 2020-09-23T04:17:13 | 2020-09-23T04:17:13 | 250,505,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | '''https://leetcode.com/problems/insert-delete-getrandom-o1'''
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.arr = []
self.mp = {}
self.cnt = 0
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.mp:
return False
else:
self.mp[val] = self.cnt
self.arr.append(val)
self.cnt +=1
# print(self.arr, self.mp, self.cnt )
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.mp:
# 1 2 3 4 5
idx = self.mp[val]
tmp = self.arr[-1]
self.arr[idx] = tmp
self.mp[tmp] = idx
del self.mp[val]
self.arr.pop()
self.cnt-=1
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return self.arr[ random.randint(0, self.cnt-1) ]
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom() | [
"[email protected]"
]
| |
7ec4ecdfa91bbe1ffc8d0703684dbf39db4898ca | 535503dc18c38b92f8520289da5b4fa42b0a722a | /code/exp_control/sequencer/sequences/obsolete/general_pulses2.py | dc379fb427becc1d61c5d12714b867915e729148 | []
| no_license | jamesbate/phd_code | fbbbf7657c428a0a1f18768edca1dfce56801cc1 | 7e71d7f041835497fb421dd741c644ab5c8e3805 | refs/heads/master | 2023-05-07T10:31:22.168217 | 2021-05-26T15:00:40 | 2021-05-26T15:00:40 | 371,073,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | # 729 pulse experiment
# 16.2.06 TK
<VARIABLES>
# test-tom=self.set_variable("float","test-tom",9000.123456,0.01,2e7)
phase1=self.set_variable("float","phase1",0,-10,10)
phase2=self.set_variable("float","phase2",0,-10,10)
gl_cam_time=self.set_variable("float","gl_cam_time",5000.000000,0,2e7)
det_time=self.set_variable("float","det_time",5000.000000,0.01,2e7)
doppler_length=self.set_variable("float","doppler_length",3000.000000,1,2e5)
pump_length=self.set_variable("float","pump_length",100.000000,1,2e5)
pump_length_729=self.set_variable("float","pump_length_729",500.000000,1,2e5)
pulse_3=self.set_variable("bool","pulse_3",0)
pulse_4=self.set_variable("bool","pulse_4",0)
</VARIABLES>
# The save form specifies which data will be saved and how, when a scan is performed.
# If this is omitted a standard form is used
<SAVE FORM>
.dat ; %1.6f
meanExc; 0; %1.3f
parity; 0; %1.3f
pn; 1;elements; (0:N); %1.3f
StartTime; 0; %1.3f
StopTime; 0; %1.3f
PMTcounts; 1;elements; (0:N); %1.0f
</SAVE FORM>
<TRANSITIONS>
t_carr={1 : 1.0, 2: 1.0, 3 : 1.0}
#Carrier=transition(transition_name="Carrier",t_rabi=t_carr,
# frequency=freq,sweeprange=sspan,amplitude=power_dB,slope_type="blackman",
# slope_duration=slope_dur,amplitude2=-1,frequency2=0,port=port_nr)
#set_transition(Carrier,"729")
</TRANSITIONS>
# Here the sequence can override program parameters. Syntax follows from "Write Token to Params.vi"
<PARAMS OVERRIDE>
AcquisitionMode excitation
</PARAMS OVERRIDE>
<SEQUENCE>
incl.DopplerCooling40(doppler_length,repump_length)
if opt_pumping : incl.OpticalPumping40(pump_length)
if sb_cool : incl.SBCooling40(SBCool_time,SBCool_reps)
if opt_pump_729 : incl.OpticalPumping40_729(pump_length_729)
else : seq_wait(pump_length_729)
seq_wait(700)
if pulse_1 : rf_729(1,0.5,0,"carrier1")
if pulse_2 : rf_729(1,1,0,"gate")
seq_wait(wait_time)
if pulse_3 : rf_729(1,1,phase2*math.pi,"gate")
if pulse_4 : rf_729(1,0.5,phase1*math.pi,"carrier1")
incl.PMTDetection(det_time,gl_cam_time)
</SEQUENCE>
<AUTHORED BY LABVIEW>
3
</AUTHORED BY LABVIEW>
| [
"[email protected]"
]
| |
111e59e47e462a20ed1faf6392b577cdf83b5911 | df8466745f40b2966c69744a78dce62e46e69503 | /AULA7/exercicios/portas_006.py | b8e84668c02f01686b5f4f59a256b9507ef0ebb1 | []
| no_license | sandromelobrazil/YELLOW_aula1 | 42f0e8ffa673a5f95d6ee6330ef1d2b894339a14 | c078fbe235979f365f72c6f160462b394a44765c | refs/heads/master | 2023-02-05T00:52:43.719881 | 2021-08-10T16:00:03 | 2021-08-10T16:00:03 | 300,909,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | #!/usr/bin/python
portlist = set()
porteste1 = [ '11-443', 'iE100', 'XYZ','1000', '30', '111', '11', '11', '11', '25', '22', '21', '22', '22', 'Zzz' ]
def testaporta(_ports):
if _port.isdigit():
portlist.add(int(_port))
elif '-' in _port:
portainicial, portafinal = _port.split('-')
for _rangeport in range(int(portainicial), int(portafinal)+1):
portlist.add(int(_rangeport))
else:
pass
for _port in porteste1:
testaporta(_port)
print('=' * 50)
print(portlist)
print('=' * 50)
print('=' * 50)
print('=' * 50)
portlist2 = [ str(port) for port in portlist ]
listport = ','.join(portlist2)
print(listport)
print('=' * 50)
print('=' * 50)
| [
"[email protected]"
]
| |
11c7d56d8a24de673cf13849d9d7c1be58c71029 | 73b158f51285300c1d3456b7af9163939ee206f2 | /DevOps/sprint03/t01_clear_words/clear_words.py | edc4ac2b62b175289d0754c33a8097cc31331b4f | []
| no_license | nnocturnnn/DevOps | 2e332b3552a5b294b36d2af7de854aa18f2da46f | 173c75938e65be8fbbb5c02c3d655d09df9a2931 | refs/heads/master | 2023-06-11T07:21:14.097930 | 2021-06-30T13:58:15 | 2021-06-30T13:58:15 | 352,070,911 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import re
def re_sub_clean(string):
return re.sub(r" |\?|!|\.|:|;|,|-","",string)
def clear_words(s):
return list(map(re_sub_clean, s.split()))
| [
"[email protected]"
]
| |
4d943666d27082a916fe19297f81966b1985dcc9 | 72dc7d124cdac8f2dcab3f72e95e9a646154a6a0 | /byceps/services/shop/order/models/order_item.py | bdf7f5515c1aaaa9557530f6c771a730c2c0e82d | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | m-ober/byceps | e6569802ee76e8d81b892f1f547881010359e416 | 4d0d43446f3f86a7888ed55395bc2aba58eb52d5 | refs/heads/master | 2020-11-30T23:31:33.944870 | 2020-02-12T23:53:55 | 2020-02-12T23:56:04 | 40,315,983 | 0 | 0 | null | 2015-08-06T16:41:36 | 2015-08-06T16:41:36 | null | UTF-8 | Python | false | false | 2,501 | py | """
byceps.services.shop.order.models.order_item
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from decimal import Decimal
from .....database import db, generate_uuid
from ...article.models.article import Article
from ...article.transfer.models import ArticleNumber
from ..transfer.models import OrderItem as OrderItemTransferObject
from .order import Order
class OrderItem(db.Model):
"""An item that belongs to an order."""
__tablename__ = 'shop_order_items'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
order_number = db.Column(db.UnicodeText, db.ForeignKey('shop_orders.order_number'), index=True, nullable=False)
order = db.relationship(Order, backref='items')
article_number = db.Column(db.UnicodeText, db.ForeignKey('shop_articles.item_number'), index=True, nullable=False)
article = db.relationship(Article, backref='order_items')
description = db.Column(db.UnicodeText, nullable=False)
unit_price = db.Column(db.Numeric(6, 2), nullable=False)
tax_rate = db.Column(db.Numeric(3, 3), nullable=False)
quantity = db.Column(db.Integer, db.CheckConstraint('quantity > 0'), nullable=False)
line_amount = db.Column(db.Numeric(7, 2), nullable=False)
shipping_required = db.Column(db.Boolean, nullable=False)
def __init__(
self,
order: Order,
article_number: ArticleNumber,
description: str,
unit_price: Decimal,
tax_rate: Decimal,
quantity: int,
line_amount: Decimal,
shipping_required: bool,
) -> None:
# Require order instance rather than order number as argument
# because order items are created together with the order – and
# until the order is created, there is no order number assigned.
self.order = order
self.article_number = article_number
self.description = description
self.unit_price = unit_price
self.tax_rate = tax_rate
self.quantity = quantity
self.line_amount = line_amount
self.shipping_required = shipping_required
def to_transfer_object(self) -> OrderItemTransferObject:
return OrderItemTransferObject(
self.order_number,
self.article_number,
self.description,
self.unit_price,
self.tax_rate,
self.quantity,
self.line_amount,
)
| [
"[email protected]"
]
| |
bcbfda9b43d7eb4d22855efabe65b5b75b4b22a1 | 1dcea2a511f14a43701994f6a7785afd21a20d74 | /Algorithm/389_FindTheDifference.py | 429149c653494ecf639efcc4a8d316ffea12735d | []
| no_license | lingtianwan/Leetcode2 | 66031e256a2928c6197516f83f14748c52e91b8c | 80a604cc09d5d2d62dd05157d8b829de675e4404 | refs/heads/master | 2021-01-13T11:17:18.238465 | 2017-02-09T01:43:38 | 2017-02-09T01:43:38 | 81,395,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # Given two strings s and t which consist of only lowercase letters.
#
# String t is generated by random shuffling string s and then add one more letter at a random position.
#
# Find the letter that was added in t.
#
# Example:
#
# Input:
# s = "abcd"
# t = "abcde"
#
# Output:
# e
#
# Explanation:
# 'e' is the letter that was added.
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
s_map = collections.Counter(s)
t_map = collections.Counter(t)
for key, val in t_map.items():
if s_map.get(key, 0) != val:
return key
| [
"[email protected]"
]
| |
33a4fb3e793637a89656c533d9a60bdc7161a7de | 0f1849a97b00fefef26756884d1410df2bd3e484 | /app/color.py | ea0f4e4ffb55d6867d91250f186479d3bd4248f2 | [
"Apache-2.0"
]
| permissive | yumioi/ci_edit | 62170f8e429f7183cea4f122112c790e3851b210 | 5af80d643e7b16e5e3270771bdbc6b322255d460 | refs/heads/master | 2020-04-09T06:16:07.197122 | 2018-11-28T18:55:10 | 2018-11-28T18:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import app.prefs
colors = 256
cache__ = {}
def reset():
global cache__
cache__ = {}
def get(colorType, delta=0):
global cache__
if type(colorType) == type(0):
colorIndex = colorType
else:
colorIndex = app.prefs.color[colorType]
colorIndex = min(colors - 1, colorIndex + delta)
color = cache__.get(colorIndex) or curses.color_pair(colorIndex)
cache__[colorIndex] = color
if colorType in ('error', 'misspelling'):
color |= curses.A_BOLD | curses.A_REVERSE
return color
| [
"[email protected]"
]
| |
6487f8748c0f588a0cf9da0085b7944535d7e7f5 | 0d298b3aff627883b2eed85516bed7e61f174d74 | /bin/delete_downstream.py | 1754ee3365c81b8aba8b74ef4d7d8a1bd33065c9 | [
"BSD-3-Clause"
]
| permissive | ellongley/TXPipe | be05bec21181a0d1e13cb2f3dcc2f55633a62369 | 785f93e876a00c528379b1bbacbbf5c42585b789 | refs/heads/master | 2023-08-21T13:53:50.732723 | 2021-10-04T14:28:06 | 2021-10-04T14:28:06 | 416,102,676 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | """
This script prints out the commands to delete all files generated by a pipeline,
downstream of a specified stage.
If one stage was wrong, and you need to re-run everything it affected, this script
will print out the commands to delete the relevant files to that re-running the pipeline
with resume=True will re-run the correct stages.
"""
import sys
sys.path.append('.')
import ceci
import txpipe
import yaml
import collections
import os
# start from a config file and a stage to delete
config = yaml.safe_load(open(sys.argv[1]))
stage_to_delete = sys.argv[2]
# get the stages we need
stage_names = [s['name'] for s in config['stages']]
pipeline = ceci.Pipeline(config['stages'], None)
stages = [ceci.PipelineStage.get_stage(stage_name) for stage_name in stage_names]
# build the mapping tag => stages depending on that tag
dependencies = collections.defaultdict(list)
for stage in stages:
for tag in stage.input_tags():
dependencies[tag].append(stage)
# initialize with deletng one stage and the tags it makes
tags_to_delete = ceci.PipelineStage.get_stage(stage_to_delete).output_tags()
stages_to_delete = {stage}
# loop through nstage times (the maximum it could be)
for i in range(len(stage_names)):
# take all tags we currently know we have to delete
for tag in tags_to_delete[:]:
# find out which stages to clear because they need
# this tag which we are deleting
deps = set(dependencies[tag])
for s in stages:
if s in deps:
# if we need to delete this stage,
# add its outputs to the tags to delete
tags_to_delete += s.output_tags()
# and it to the stages to delete
stages_to_delete.add(s)
tags_to_delete = list(set(tags_to_delete))
# now at the end we delete all tags output by stage to delete
for s in stages_to_delete:
for f in pipeline.find_outputs(s, config).values():
print(f"rm -f {f}")
| [
"[email protected]"
]
| |
b642949b0d7101af9212e049ba6f11ca4f2ae132 | 08f087f8d07aac2f93f5adccc48bfa7172e7ae6d | /trash/servers_action.py | a5b17eb341b9844e549ba21b6bf4cbc71a54c880 | []
| no_license | bsdpunk/trash | 78a49e2255dbffb212e227ea6fe1d5fbbbf60158 | 2a1f0f1eb4456d9c5ca418a34e94fa61a88f00b6 | refs/heads/master | 2020-04-12T09:34:35.638883 | 2017-01-05T02:53:26 | 2017-01-05T02:53:26 | 64,771,900 | 8 | 2 | null | 2016-09-02T19:52:41 | 2016-08-02T16:05:34 | Python | UTF-8 | Python | false | false | 4,432 | py | import sys
import requests
import json
import re
from pprint import pprint
def linode_list(api_key):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.list"
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_list_ip(api_key, arguement=0):
headers = {'content-type': 'application/json'}
ip_addy = re.compile('(\d+|\d)\.(\d+|\d)\.(\d+|\d)\.(\d+|\d)')
lin_name = re.compile('(\d+)')
#print(arguement)
if(arguement == 0):
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.ip.list"
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
elif re.match(ip_addy, arguement) is not None:
json_data = "not implimented yet"
elif re.match(lin_name, arguement) is not None:
p = re.match(lin_name, arguement)
lin_id = p.group(1)
#print(lin_id)
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.ip.list&LinodeID="+ lin_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
#pprint(json_data)
json_data = json_data["DATA"][0]["IPADDRESS"]
else:
json_data = "Invalid"
return(json_data)
def linode_create(api_key, dc_id, plan_id, pay_term_id=0):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.create&DatacenterID="+ dc_id +"&PlanID=" +plan_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_disk_create(api_key, l_id, size, dst_id=0, root=0, label=0, formatt=0, ro=0):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.create&DatacenterID="+ dc_id +"&PlanID=" +plan_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_disk_dist(api_key, l_id, dst_id, label, size, root, ssh_key=0):
headers = {'content-type': 'application/json'}
if ssh_key == 0:
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.disk.createfromdistribution&LinodeID="+ l_id +"&DistributionID=" +dst_id+"&Label="+label+"&Size="+size+"&rootPass="+root
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
else:
json_data = "Invalid"
return(json_data)
#def linode_config_create(api_key, l_id, k_id, label, size, root, ssh_key=0):
# headers = {'content-type': 'application/json'}
# if ssh_key == 0:
# endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.disk.createfromdistribution&LinodeID="+ l_id +"&DistributionID=" +dst_id+"&Label="+label+"&Size="+size+"&rootPass="+root
# r = requests.get(endpoint, headers=headers)
# json_data = json.loads(r.text)
# else:
# json_data = "Invalid"
# return(json_data)
#def linode_disk_image(api_key, i_id, l_id, label, size, root, ssh_key=0):
# headers = {'content-type': 'application/json'}
# if ssh_key == 0:
# endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.disk.createfromdistribution&i_id="+i_id+"&LinodeID="+ l_id+"&Label="+label+"&Size="+size+"&rootPass="+root
#
# r = requests.get(endpoint, headers=headers)
# json_data = json.loads(r.text)
# else:
# json_data = "Invalid"
# return(json_data)
def list_images(api_key):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=image.list"
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
def linode_shutdown(api_key, numeric_lin_id):
headers = {'content-type': 'application/json'}
endpoint = "https://api.linode.com/?api_key=" + api_key["Linode-API-Key"] + "&api_action=linode.shutdown&LinodeID="+ numeric_lin_id
r = requests.get(endpoint, headers=headers)
json_data = json.loads(r.text)
return(json_data)
| [
"[email protected]"
]
| |
b696eccd4a16cc676cb7faa3e3ac14ad5a53df25 | d2da19b86a134c4213311af568c7166bd10fbc8a | /OGBL_Collab/utils/logger.py | 3daed2535eae606a66945ac26b1f14d17234d746 | [
"MIT"
]
| permissive | x-zho14/Unified-LTH-GNN | bc96f89e6bdff18c45cc050de2cbdee8d425fcbf | edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55 | refs/heads/main | 2023-07-19T22:06:57.653044 | 2021-08-22T03:44:53 | 2021-08-22T03:44:53 | 398,705,892 | 0 | 0 | MIT | 2021-08-22T03:33:27 | 2021-08-22T03:33:27 | null | UTF-8 | Python | false | false | 891 | py | import os
import shutil
import csv
def save_best_result(list_of_dict, file_name, dir_path='best_result'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print("Directory ", dir_path, " is created.")
csv_file_name = '{}/{}.csv'.format(dir_path, file_name)
with open(csv_file_name, 'a+') as csv_file:
csv_writer = csv.writer(csv_file)
for _ in range(len(list_of_dict)):
csv_writer.writerow(list_of_dict[_].values())
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
# if scripts_to_save is not None:
# os.mkdir(os.path.join(path, 'scripts'))
# for script in scripts_to_save:
# dst_file = os.path.join(path, 'scripts', os.path.basename(script))
# shutil.copyfile(script, dst_file)
| [
"[email protected]"
]
| |
ce27d4f1720ab3ca03e03f54a6859697aa0b7d55 | 8cd098de61bc569e4247d15ca25bfbd0fb9ef793 | /01_Jump_to_Python/Chap04/149.py | a55e73979cb20208806cdd471eb9977af696e106 | []
| no_license | gaeunPark/Bigdata | 67b8df7a4988f130ed7b45750c5447cf0394c49b | 31388b2ac3a28306caae6d299bb5090aef849403 | refs/heads/master | 2020-03-15T09:27:23.537060 | 2018-11-22T02:18:45 | 2018-11-22T02:18:45 | 132,075,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | def sum_and_num(a,b):
return a+b, a*b
result = sum_and_num(3,4)
print(result)
sum, mul = sum_and_num(3,4)
print("%d %d" %(sum, mul)) | [
"[email protected]"
]
| |
5a9bfe853d1596cafde41f489b55969c65a2a18f | 28c598bf75f3ab287697c7f0ff1fb13bebb7cf75 | /testgame.mmo/genesis/spawn/npc.py | 4d99f03468bc84923d3fb54f351f6becf353fe4a | []
| no_license | keaysma/solinia_depreciated | 4cb8811df4427261960af375cf749903d0ca6bd1 | 4c265449a5e9ca91f7acf7ac05cd9ff2949214ac | refs/heads/master | 2020-03-25T13:08:33.913231 | 2014-09-12T08:23:26 | 2014-09-12T08:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,595 | py | from genesis.dbdict import *
from mud.world.defines import *
from mud.world.spawn import SpawnSoundProfile
spawn = DBSpawn()
spawn.name = "Azala Dogooder"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Human"
spawn.sex = "Female"
spawn.pclass = "Warrior"
spawn.plevel = 10
spawn.isMonster = False
spawn.realm = RPG_REALM_NEUTRAL
#Avatar Pack Examples
#Available from:
#http://www.mmoworkshop.com/trac/mom/wiki/Store
"""
spawn = DBSpawn()
spawn.name = "Lithian Treil"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Elf"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 25
spawn.isMonster = False
spawn.realm = RPG_REALM_NEUTRAL
spawn = DBSpawn()
spawn.name = "Thurgin Oakenshielf"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Dwarf"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 35
spawn.isMonster = False
spawn.realm = RPG_REALM_NEUTRAL
spawn = DBSpawn()
spawn.name = "Blingo Longbottom"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Halfling"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 60
spawn.isMonster = False
spawn.realm = RPG_REALM_NEUTRAL
spawn = DBSpawn()
spawn.name = "Azarek Relik"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Gnome"
spawn.sex = "Female"
spawn.pclass = "Warrior"
spawn.plevel = 33
spawn.isMonster = False
spawn.realm = RPG_REALM_NEUTRAL
spawn = DBSpawn()
spawn.name = "Hemok Stonebreaker"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Titan"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 10
spawn.isMonster = False
spawn.realm = RPG_REALM_NEUTRAL
spawn = DBSpawn()
spawn.name = "Glarg Wickedblade"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Orc"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 50
spawn.isMonster = True
spawn = DBSpawn()
spawn.name = "Oshkosh Betosh"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Ogre"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 15
spawn.isMonster = True
spawn = DBSpawn()
spawn.name = "Cillik Neidle"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Goblin"
spawn.sex = "Female"
spawn.pclass = "Warrior"
spawn.plevel = 54
spawn.isMonster = True
spawn = DBSpawn()
spawn.name = "Tor the Brute"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Troll"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 40
spawn.isMonster = True
spawn = DBSpawn()
spawn.name = "Isis Krarn"
spawn.flags = RPG_SPAWN_UNIQUE
spawn.race = "Drakken"
spawn.sex = "Female"
spawn.pclass = "Warrior"
spawn.plevel = 20
spawn.isMonster = True
"""
# VendorNPC for tools
spawn = DBSpawn()
spawn.name = "Tool Vendor"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.desc = ""
spawn.vendor = "Tool Vendor"
spawn.addSkill("Dual Wield",1)
loot = DBLootProto()
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Broadsword", RPG_FREQ_COMMON)
loot.addItem("Magic Shield", RPG_FREQ_COMMON)
spawn.loot = loot
# VendorNPC for stones
spawn = DBSpawn()
spawn.name = "Stone Vendor"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.dialog = "Stone Vendor Dialog"
spawn.desc = "He really sells and buys stones. So maybe he is a little stoned himself."
spawn.vendor = "Vendorman"
spawn.addSkill("Dual Wield",1)
loot = DBLootProto()
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Broadsword", RPG_FREQ_COMMON)
loot.addItem("Magic Shield", RPG_FREQ_COMMON)
spawn.loot = loot
# VendorNPC for scrolls
spawn = DBSpawn()
spawn.name = "Scroll Vendor"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.dialog = "Scroll Vendor Dialog"
spawn.desc = "He sells and buys scrolls."
spawn.vendor = "Spell Dealer"
loot = DBLootProto()
loot.addItem("Magic Shield", RPG_FREQ_ALWAYS)
loot.addItem("Longsword", RPG_FREQ_ALWAYS)
spawn.loot = loot
# VendorNPC for weapons
spawn = DBSpawn()
spawn.name = "Weapon Vendor"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.dialog = "Weapon Vendor Dialog"
spawn.desc = "He sells and buys Weapons."
spawn.vendor = "Weapon Vendor"
spawn.addSkill("Dual Wield",1)
loot = DBLootProto()
loot.addItem("Longsword", RPG_FREQ_ALWAYS)
loot.addItem("Longsword", RPG_FREQ_ALWAYS)
spawn.loot = loot
# TrainerNPC for skills
spawn = DBSpawn()
spawn.name = "Skill Trainer"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.dialog = "Skill Trainer Dialog"
spawn.desc = "He trains skills."
loot = DBLootProto()
loot.addItem("Broadsword", RPG_FREQ_ALWAYS)
spawn.loot = loot
# NPCs for factions
spawn = DBSpawn()
spawn.name = "Follower of the Green"
spawn.realm = RPG_REALM_NEUTRAL
spawn.sex = "Male"
spawn.race = "Human"
spawn.pclass = "Warrior"
spawn.plevel = 1
spawn.isMonster = False
spawn.flags = RPG_SPAWN_NOASSIST
spawn.aggroRange = 20
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.desc = "He is for the Green."
spawn.addFaction("The Green")
spawn.addKillFaction("The Blue")
spawn.addKillFaction("The Red")
spawn = DBSpawn()
spawn.name = "Follower of the Red"
spawn.realm = RPG_REALM_NEUTRAL
spawn.sex = "Male"
spawn.race = "Human"
spawn.pclass = "Warrior"
spawn.plevel = 1
spawn.isMonster = False
spawn.flags = RPG_SPAWN_NOASSIST
spawn.aggroRange = 20
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.desc = "He is for the Red."
spawn.addFaction("The Red")
spawn.addKillFaction("The Blue")
spawn.addKillFaction("The Green")
spawn = DBSpawn()
spawn.name = "Follower of the Blue"
spawn.realm = RPG_REALM_NEUTRAL
spawn.sex = "Male"
spawn.race = "Human"
spawn.pclass = "Warrior"
spawn.plevel = 1
spawn.isMonster = False
spawn.flags = RPG_SPAWN_NOASSIST
spawn.aggroRange = 20
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.desc = "He is for the Blue."
spawn.addFaction("The Blue")
spawn.addKillFaction("The Green")
spawn.addKillFaction("The Red")
# VendorNPC for Food and Drinks
spawn = DBSpawn()
spawn.name = "Food Vendor"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Female"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "B"
spawn.dialog = "Food Vendor Dialog"
spawn.desc = "Here you can get nice snacks."
spawn.vendor = "Snacks"
spawn.addSkill("Dual Wield",1)
loot = DBLootProto()
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Broadsword", RPG_FREQ_COMMON)
loot.addItem("Magic Shield", RPG_FREQ_COMMON)
spawn.loot = loot
# NPC for fighting
spawn = DBSpawn()
spawn.name = "Fighter"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Warrior"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 10
spawn.isMonster = False
spawn.flags = 0
spawn.aggroRange = 20
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.desc = "A fighter."
# NPC for Quest
spawn = DBSpawn()
spawn.name = "Wolfman"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.dialog = "The Wolf Quest"
spawn.desc = "He will give you a quest."
spawn.addSkill("Dual Wield",1)
loot = DBLootProto()
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Longsword", RPG_FREQ_COMMON)
loot.addItem("Broadsword", RPG_FREQ_COMMON)
loot.addItem("Magic Shield", RPG_FREQ_COMMON)
spawn.loot = loot
# NPC and mobs for epic battle
#--- Taskmaster Duro
spawn = DBSpawn()
spawn.name = "Taskmaster Duro"
spawn.race = "Human"
spawn.sex = "Female"
spawn.pclass = "Doom Knight"
spawn.plevel = 40
spawn.difficultyMod = 2
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.dialog = "HumanWolfSkirmishReward"
spawn.desc = ""
#--- Ororcali
spawn = spawn.clone()
spawn.name = "Ororcali"
spawn.sex = "Male"
spawn.pclass = "Warrior"
spawn.plevel = 38
spawn.difficultyMod = 1
spawn.dialog = ""
#--- Human Deathbinder
spawn = spawn.clone()
spawn.name = "Human Deathbinder"
spawn.pclass = "Assassin"
spawn.flags = 0
#--- Human Punisher
spawn = spawn.clone()
spawn.name = "Human Punisher"
spawn.pclass = "Wizard"
spawn.plevel = 36
#--- Human Destroyer
spawn = spawn.clone()
spawn.name = "Human Destroyer"
spawn.pclass = "Barbarian"
spawn.plevel = 34
#--- Captain Flamehorn
spawn = DBSpawn()
spawn.name = "Captain Flamehorn"
spawn.race = "Animal"
spawn.sex = "Male"
spawn.pclass = "Paladin"
spawn.plevel = 33
spawn.difficultyMod = 2
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.model = "wolf/wolf.dts"
spawn.textureArms = ""
spawn.textureLegs = ""
spawn.textureBody = ""
spawn.textureHands = ""
spawn.textureFeet = ""
spawn.textureHead = ""
spawn.textureSingle = "wolf_grey"
sound = SpawnSoundProfile()
sound.sndAlert1 = "character/ZombieDog_Growl2.ogg"
sound.sndAlert2 = "character/ZombieDog_Growl3.ogg"
sound.sndAttack1 = "character/ZombieDog_Attack1.ogg"
sound.sndAttack2 = "character/ZombieDog_Attack2.ogg"
sound.sndAttack3 = "character/ZombieDog_Attack3.ogg"
sound.sndPain1 = "character/ZombieDog_Hurt1.ogg"
sound.sndPain2 = "character/ZombieDog_Hurt2.ogg"
sound.sndPain3 = "character/ZombieDog_Hurt3.ogg"
sound.sndDeath1 = "character/ZombieDog_Death1.ogg"
sound.sndDeath2 = "character/ZombieDog_Death2.ogg"
spawn.sndProfile = sound
spawn.vocalSet = ""
#--- Wolf Protector
spawn = spawn.clone()
spawn.name = "Wolf Protector"
spawn.pclass = "Warrior"
spawn.plevel = 28
spawn.difficultyMod = 1
spawn.flags = 0
#--- Wolf Crusader
spawn = spawn.clone()
spawn.name = "Wolf Crusader"
spawn.pclass = "Cleric"
spawn.plevel = 25
#--- Oak Tree
spawn = DBSpawn()
spawn.name = "Oak Tree"
spawn.pclass = "Harvest"
spawn.plevel = 1
spawn.race = "Tree"
spawn.requiresWeapon = "Foresting"
spawn.isMonster = True
spawn.scale = 2
spawn.aggroRange = 0
spawn.move = 0
spawn.model = "harvesting/oak.dts"
spawn.flags = RPG_SPAWN_RESOURCE|RPG_SPAWN_NOXP|RPG_SPAWN_PASSIVE|RPG_SPAWN_NORANDOMLOOT
spawn.textureBody = "harvesting/oak_bark"
spawn.textureHead = "harvesting/oak_branch"
#loot = DBLootProto()
#loot.addItem("Oak wood",RPG_FREQ_COMMON)
#spawn.loot = loot
spawn.addResistance(RPG_RESIST_PHYSICAL,1000)
spawn.addResistance(RPG_RESIST_MAGICAL,1000)
spawn.addResistance(RPG_RESIST_FIRE,1000)
spawn.addResistance(RPG_RESIST_COLD,1000)
spawn.addResistance(RPG_RESIST_POISON,1000)
spawn.addResistance(RPG_RESIST_DISEASE,1000)
spawn.addResistance(RPG_RESIST_ACID,1000)
spawn.addResistance(RPG_RESIST_ELECTRICAL,1000)
spawn.addResistance(RPG_RESIST_MINE,1000)
#--- Rock
spawn = DBSpawn()
spawn.name = "Rock"
spawn.pclass = "Harvest"
spawn.plevel = 1
spawn.race = "Rock"
spawn.requiresWeapon = "Mining"
spawn.isMonster = True
spawn.scale = 1
spawn.aggroRange = 0
spawn.move = 0
spawn.model = "harvesting/rock.dts"
spawn.flags = RPG_SPAWN_RESOURCE|RPG_SPAWN_NOXP|RPG_SPAWN_PASSIVE|RPG_SPAWN_NORANDOMLOOT
spawn.textureSingle = "harvesting/rock"
#loot = DBLootProto()
#loot.addItem("Rock Ore",RPG_FREQ_COMMON)
#spawn.loot = loot
spawn.addResistance(RPG_RESIST_PHYSICAL,1000)
spawn.addResistance(RPG_RESIST_MAGICAL,1000)
spawn.addResistance(RPG_RESIST_FIRE,1000)
spawn.addResistance(RPG_RESIST_COLD,1000)
spawn.addResistance(RPG_RESIST_POISON,1000)
spawn.addResistance(RPG_RESIST_DISEASE,1000)
spawn.addResistance(RPG_RESIST_ACID,1000)
spawn.addResistance(RPG_RESIST_ELECTRICAL,1000)
spawn.addResistance(RPG_RESIST_FOREST,1000)
# Auctioneer
spawn = DBSpawn()
spawn.name = "Auctioneer"
spawn.realm = RPG_REALM_LIGHT
spawn.pclass = "Paladin"
spawn.sex = "Male"
spawn.race = "Human"
spawn.plevel = 30
spawn.isMonster = False
spawn.flags = RPG_SPAWN_UNIQUE
spawn.aggroRange = 10
spawn.textureArms = "tset_0_arms"
spawn.textureLegs = "tset_0_legs"
spawn.textureBody = "tset_0_body"
spawn.textureHands = "tset_0_hands"
spawn.textureFeet = "tset_0_feet"
spawn.vocalSet = "A"
spawn.auctioneer = RPG_AUCTION_LIGHT
spawn.desc = "The auctioneer is here to allow you to trade some goods on the auction." | [
"[email protected]"
]
| |
fc0c1d5a27259452f63b323e79cbc3437ec29f5c | c95a30837ff6beaf228a42d05fed25301142f9d5 | /src/script/__init__.py | bc8a9fb8e7efce0582b61b660f29dfc9ddd4c266 | []
| no_license | fran-jo/engine.DM | a1e57d8c4158b33668afd184bee3dc279b07259c | da6eb7513b21a52566d1cbea15040c8a8e18bcec | refs/heads/master | 2020-03-23T21:04:19.781944 | 2018-07-24T00:09:31 | 2018-07-24T00:09:31 | 142,079,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | ''' package with script examples on how to use this engine ''' | [
"[email protected]"
]
| |
c46a1fd73901ca7b1756224e155dcb5e1ee69bf9 | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon2953.py | dec2a6a3720be7c3939a31f421f2c940a1e4d1cc | []
| no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | arr=[[0]*5 for _ in range(5)]
arr2=[]
for i in range(5):
arr=list(map(int,input().strip().split()))
arr2.append(sum(arr))
print(arr2.index(max(arr2))+1,max(arr2)) | [
"[email protected]"
]
| |
63244ed5efe5b54db895159ffba1c706ff6d5916 | 1122ebc8eb5e481f3afee6e033fd71233d0b75cc | /grr/lib/blob_stores/local/__init__.py | 2d2226b948011a329065c049779b663a877c92f2 | [
"DOC",
"Apache-2.0"
]
| permissive | ethicalhackeragnidhra/Grr | 002f8f899f11676575bb6f74123cbdf37b2f3eea | 9ff9178396d9d16575e42dded33627cb09ac3af1 | refs/heads/master | 2022-12-24T07:20:45.938196 | 2017-07-28T14:36:30 | 2017-07-28T14:36:30 | 98,792,087 | 1 | 1 | Apache-2.0 | 2022-12-10T02:09:58 | 2017-07-30T11:09:01 | Python | UTF-8 | Python | false | false | 73 | py | #!/usr/bin/env python
"""This imports the site specific blob stores."""
| [
"[email protected]"
]
| |
f8edeab01ab18d1ec01ca83f1c589413af276fb3 | 2abefb9b5df4a5fdd782ac43cd1f6e94198fe43a | /Misc/HoWdArEyOu.py | b09e270fc5a17a017c97dade02711916257adef0 | []
| no_license | jb1361/Class-files-repo | e63d339fd9c9add23b78571b2258d6c836c329d4 | b29b80a93655348067a5146a6c7d2d5186ba184a | refs/heads/master | 2023-01-30T09:38:45.786561 | 2021-06-14T22:42:04 | 2021-06-14T22:42:04 | 92,340,613 | 0 | 0 | null | 2023-01-06T11:20:52 | 2017-05-24T22:05:32 | Python | UTF-8 | Python | false | false | 212 | py | word = input()
def howdareyou(s):
ret = ""
i = True # capitalize
for char in s:
if i:
ret += char.upper()
else:
ret += char.lower()
if char != ' ':
i = not i
return ret
print(howdareyou(word)) | [
"[email protected]"
]
| |
951292454e8c84b2863f62cb932ec16fb27245f0 | 6dc80929dc2ef3dfdbde4f95fece88e68d4aa4aa | /catalog/migrations/0004_auto_20190623_2357.py | 8bf172be2fb269d620f2dd42f01d270a9062bb65 | []
| no_license | shortnd/local_library | 87d912cd885919d4f9db73cbef058ed7bccd6d11 | 5a8753131193183c81c448d36064c7481843da4e | refs/heads/master | 2022-02-22T07:15:06.096747 | 2019-06-24T04:02:27 | 2019-06-24T04:02:27 | 190,504,303 | 0 | 0 | null | 2021-06-10T18:11:55 | 2019-06-06T03:00:23 | Python | UTF-8 | Python | false | false | 423 | py | # Generated by Django 2.2.2 on 2019-06-24 03:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0003_bookinstance_borrower'),
]
operations = [
migrations.AlterModelOptions(
name='bookinstance',
options={'ordering': ['due_back'], 'permissions': (('can_mark_returned', 'Set book as returned'),)},
),
]
| [
"[email protected]"
]
| |
e14b578b7338dca6ead87edcb5371d81b75e155b | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/498245_LRU_and_LFU_cache_decorators/recipe-498245.py | 9dd9b93cee6c70ce3a39cb4159b952253e391c9e | [
"Python-2.0",
"MIT"
]
| permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 5,183 | py | import collections
import functools
from itertools import ifilterfalse
from heapq import nsmallest
from operator import itemgetter
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def lru_cache(maxsize=100):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
def decorating_function(user_function,
len=len, iter=iter, tuple=tuple, sorted=sorted, KeyError=KeyError):
cache = {} # mapping of args to results
queue = collections.deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@functools.wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in ifilterfalse(refcount.__contains__,
iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
'''Least-frequenty-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Least_Frequently_Used
'''
def decorating_function(user_function):
cache = {} # mapping of args to results
use_count = Counter() # times each key has been accessed
kwd_mark = object() # separate positional and keyword args
@functools.wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
use_count[key] += 1
# get cache entry or compute if not found
try:
result = cache[key]
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
wrapper.misses += 1
# purge least frequently used cache entry
if len(cache) > maxsize:
for key, _ in nsmallest(maxsize // 10,
use_count.iteritems(),
key=itemgetter(1)):
del cache[key], use_count[key]
return result
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
if __name__ == '__main__':
@lru_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
@lfu_cache(maxsize=20)
def f(x, y):
return 3*x+y
domain = range(5)
from random import choice
for i in range(1000):
r = f(choice(domain), choice(domain))
print(f.hits, f.misses)
| [
"[email protected]"
]
| |
60d9dd78e5b4036bf310616dd0cb6ccda32f0d3c | 767b5482f3c5b9c2c85575c711e37561f5b8f198 | /engine/plugins/TrojanCheckScript_yd.py | 914bad369a45d68fa5f1bc5d0bb50d9fc094b964 | []
| no_license | zhupite233/scaner | 8e39c903f295d06195be20067043087ec8baac4f | 7c29c02bca2247a82bcbb91cc86955cc27998c95 | refs/heads/master | 2020-05-18T03:23:03.459222 | 2019-04-15T04:29:10 | 2019-04-15T04:29:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb
import MySQLdb.cursors
from engine.engineConfig import *
from engine.engine_utils.common import *
from engine.logger import scanLogger as logger
from urlparse import urlparse
from bs4 import BeautifulSoup
def find_friend_links(content):
friend_link_list = []
soup = BeautifulSoup(content, 'lxml')
friends = soup.find(text=re.compile(u'.*?(友情链接|合作伙伴).*?'))
if not friends:
return []
i = 0
while not friends.find_all('a') and i < 4:
try:
friends = friends.parent
except:
pass
i += 1
for friend in friends.find_all('a'):
friend_link = friend.get('href')
if friend_link:
net_loc = urlparse(friend_link).netloc
if net_loc and not re.match('^(\d{1,3}\.){3}\d{1,3}(:\d{1,6})?$', net_loc): # ip地址不是友情链接
friend_link_list.append(net_loc)
return friend_link_list
def run_domain(http, ob):
'''
黑链暗链检测插件
'''
result = []
try:
scheme = ob.get('scheme')
domain = ob.get('domain')
path = ob.get('path')
res, content = http.request('%s://%s%s' % (scheme, domain, path))
friend_link_list = find_friend_links(content)
friend_link_set = set(friend_link_list)
task_id = ob.get('taskId')
# type = 1 表示域名与当前扫描网站不符
sql = "SELECT spider_url_other.url FROM spider_url_other WHERE task_id=%s AND TYPE=%s" % (task_id, 1)
db = MySQLdb.connect(SCANER_DB_HOST, SCANER_DB_USER, SCANER_DB_PASSWORD, SCANER_DB_DATABASE, cursorclass=MySQLdb.cursors.DictCursor)
cursor = db.cursor()
cursor.execute(sql)
other_url_list = cursor.fetchall()
if other_url_list:
detail = '检测到外站链接,如果不是友情链接或其他已知来源,则可能是暗链黑链等恶意链接'
for other_url_dict in other_url_list:
other_url = other_url_dict.get('url')
other_domain = urlparse(other_url).netloc
other_domain = other_domain.split(':')[0]
if other_domain.split('.', 1)[1] == domain.split('.', 1)[1]: # 子域名
continue
if other_domain not in friend_link_set: # 不在友情链接内
result.append(getRecord(ob, other_url, ob['level'], detail, request=other_url, response=''))
except Exception, e:
logger.error("File:TrojanCheckScript_yd.py, run_domain function :%s" % (str(e)))
return result
# result = []
# domain = ob['domain']
# try:
# task_id = ob['taskId']
# # other_urls = db.session.query(SpiderUrlOther.url).filter(SpiderUrlOther.task_id == task_id, SpiderUrlOther.type == 1).all()
# sql = "SELECT spider_url_other.url FROM spider_url_other WHERE task_id=%s AND TYPE=%s" % (task_id, 1)
# db = MySQLdb.connect(SCANER_DB_HOST, SCANER_DB_USER, SCANER_DB_PASSWORD, SCANER_DB_DATABASE, cursorclass=MySQLdb.cursors.DictCursor)
# cursor = db.cursor()
# cursor.execute(sql)
# other_url_list = cursor.fetchmany()
# if other_url_list:
# for other_url_dict in other_url_list:
# other_url = other_url_dict.get('url')
# other_domain = urlparse(other_url).netloc
# other_domain = other_domain.split(':')[0]
# if domain.find(other_domain) == -1 and other_domain.find(domain) == -1 and domain.find(other_domain.split('.', 1)[1]) == -1:
# detail = '检测到外站链接,如果不是友情链接或其他已知来源,则可能是暗链黑链木马等恶意链接'
# # res, content = http.request(other_url.url,"HEAD")
# # request = getRequest(other_url)
# result.append(getRecord(ob, other_url, ob['level'], detail, request=other_url, response=''))
# except Exception, e:
# logger.error("File:TrojanCheckScript_yd.py, run_domain function :%s" % (str(e)))
#
# return result
| [
"[email protected]"
]
| |
f639a2910f5084d10f088248ae867f33930c1c51 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/wiColorJ/pyr_Tcrop255_p60_j15/Sob_k23_s001/pyr_0s/L3/step10_a.py | 89256e72274b6f1679ebbb7044cb26830e0dc4c7 | []
| no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,983 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_sobel_k23_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
]
| |
9ab9ef75853c91fce2d10e650eae0aec3515ce68 | 34792ccd315338b2bd12b4251cc8188967bfaf35 | /linkdump/migrations/versions/e510de2bd585_.py | ac2a3c5327709abe4232c408efd0c3f024090750 | []
| no_license | puhoy/linkdump | a32dec709e10e8dda68c1dfbb326d9177d64dd96 | ba03ef64c4286910ac1bb15fea8d4c431a738c2f | refs/heads/master | 2022-12-13T20:35:41.913012 | 2020-07-26T12:55:22 | 2020-07-26T12:55:22 | 245,454,321 | 0 | 0 | null | 2022-12-08T03:45:01 | 2020-03-06T15:29:05 | Python | UTF-8 | Python | false | false | 2,097 | py | """empty message
Revision ID: e510de2bd585
Revises:
Create Date: 2020-03-03 23:24:51.952624
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e510de2bd585'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('source', sa.String(), nullable=False),
sa.Column('date_added', sa.Date(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('date_processing_started', sa.DateTime(), nullable=True),
sa.Column('date_processing_finished', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_items')),
sa.UniqueConstraint('source', 'date_added', name='_source_at_date')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=120), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_users')),
sa.UniqueConstraint('email', name=op.f('uq_users_email')),
sa.UniqueConstraint('username', name=op.f('uq_users_username'))
)
op.create_table('bookmarks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('item_id', sa.Integer(), nullable=True),
sa.Column('time_added', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['item_id'], ['items.id'], name=op.f('fk_bookmarks_item_id_items')),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_bookmarks_user_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_bookmarks'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('bookmarks')
op.drop_table('users')
op.drop_table('items')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
b7ba385ad28955eb47603abdc1dda3b37f3712c6 | b5a29700c3516cf12f837e2284e3844546205d09 | /plugins/wywwzjj_web_plugin.py | dcec14bf5b7494fcb6a82521181cf0d1fd25b8e7 | []
| no_license | p1g3/Collect-Info-Research | f609823486f36460186cfde27f4be7c9c5a058ae | e8e7366677a8642c3bcf4b103e43378762e6673c | refs/heads/master | 2020-12-24T03:59:01.190032 | 2020-01-31T06:47:35 | 2020-01-31T06:47:35 | 237,374,792 | 37 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py |
import asyncio
import feedparser
import ssl
import pymongo
from loguru import logger
import datetime
from dateutil import parser
class wywwzjj_web_plugin:
def __init__(self,loop,collection,lock):
ssl._create_default_https_context = ssl._create_unverified_context
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
self.loop = loop
self.rss = 'https://wywwzjj.top/atom.xml'
self.collection = collection
self.type = 'web'
self.lock = lock
async def return_result(self):
logger.info("{} is running.",self.__class__.__name__)
future = self.loop.run_in_executor(None,feedparser.parse,self.rss)
try:
parse_result = await asyncio.wait_for(future, 30, loop=self.loop)
except:
logger.warning("{} parse time out".format(self.rss))
return
if parse_result.has_key('entries'):
entries = parse_result['entries']
format_time = datetime.date.today()
for entrie in entries:
article_time = parser.parse(entrie['updated'])
if (article_time.year == format_time.year) and (article_time.month == format_time.month) and (article_time.day == format_time.day):
add_dict = {'type':self.type,'title':entrie['title'],'link':entrie['link'],'is_send':0}
try:
await self.lock
if self.collection.count_documents({'link':entrie['link']}) < 1:
self.collection.insert_one(add_dict)
logger.info('[Web] {} {}'.format(entrie['title'],entrie['link']))
finally:
self.lock.release()
else:
logger.error('[Error Parse] {}',self.rss)
if __name__ == '__main__':
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.info_collect
collection = db['infos']
lock = asyncio.Lock()
loop = asyncio.get_event_loop()
class_name = wywwzjj_web_plugin(loop,collection,lock)
loop.run_until_complete(class_name.return_result())
| [
"[email protected]"
]
| |
7f3012a25338bf5c6082fc3d3a1a539d859a756c | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/GDS/shamil_v3/purchase_report/report/purchase_total.py | cef7ba15b5a6a9cdb3f2d74d780ea138dbd3257e | []
| no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,820 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2011-2012 NCTR (<http://www.nctr.sd>).
#
##############################################################################
import time
from report import report_sxw
# purchases total report
class purchase_total_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(purchase_total_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'line':self._getdata,
})
def _getdata(self,data):
product=data['form']['product']
if not product:
self.cr.execute("""
select
min(l.id) as id,
min(u.name) as uom_name,
min(l.name) as product_name,
sum(l.product_qty) as quantity,
count(*) as nbr,
(min(l.price_unit)*sum(l.product_qty))::decimal(16,2) as price_total
from purchase_order s
left join purchase_order_line l on (s.id=l.order_id)
left join product_product p on (l.product_id=p.id)
left join product_uom u on (u.id=l.product_uom)
where s.state='done'and
(to_char(s.date_approve,'YYYY-mm-dd')>=%s and to_char(s.date_approve,'YYYY-mm-dd')<=%s)
group by
l.product_id
""",(data['form']['from_date'],data['form']['to_date']))
else:
self.cr.execute("""
select
min(l.id) as id,
min(u.name) as uom_name,
min(l.name) as product_name,
sum(l.product_qty) as quantity,
count(*) as nbr,
(min(l.price_unit)*sum(l.product_qty))::decimal(16,2) as price_total
from purchase_order s
left join purchase_order_line l on (s.id=l.order_id)
left join product_product p on (l.product_id=p.id)
left join product_uom u on (u.id=l.product_uom)
where l.product_id is not null and s.state='done'and
(to_char(s.date_approve,'YYYY-mm-dd')>=%s and to_char(s.date_approve,'YYYY-mm-dd')<=%s)
and p.id = %s
group by
l.product_id
""",(data['form']['from_date'],data['form']['to_date'],product[0]))
res = self.cr.dictfetchall()
return res
report_sxw.report_sxw('report.purchase_total.report', 'purchase.order', 'addons/purchase_report/report/purchase_total.rml' ,parser=purchase_total_report )
| [
"[email protected]"
]
| |
a4767966b69c986e4306309968a867a645504a69 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res_bw/scripts/common/lib/unittest/test/test_setups.py | 4c69c7ff9a59911a974c712c942f518c101738a9 | []
| no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 16,732 | py | # 2016.05.01 15:32:12 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/unittest/test/test_setups.py
import sys
from cStringIO import StringIO
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory, stream=StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip('hop')(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first,
second,
third,
fourth,
fifth,
sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results, ['Module1.setUpModule',
'setup 1',
'Test1.testOne',
'Test1.testTwo',
'teardown 1',
'setup 2',
'Test2.testOne',
'Test2.testTwo',
'teardown 2',
'Module1.tearDownModule',
'Module2.setUpModule',
'setup 3',
'Test3.testOne',
'Test3.testTwo',
'teardown 3',
'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
return
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule',
'setUpClass',
'test_something',
'tearDownClass',
'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegexp(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\unittest\test\test_setups.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:32:13 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
71ded94d8ec5f45f496da5eed363bb9ae2d74e09 | fc00b177802c49cf04dc6a8e430093bc14ae9b53 | /venv/Lib/site-packages/mypyc/test/test_tuplename.py | 7f3fd2000d29058fac183cb8157e9c118dc775c8 | []
| permissive | artisakov/vigilant-journey | 9c8264d36da5745374a0d08b0b0288a70f978a11 | 4fed9026071a64489d26422ba7cd1a9b9cb05e16 | refs/heads/master | 2022-11-16T03:10:06.418221 | 2020-07-16T07:33:06 | 2020-07-16T07:33:06 | 238,490,887 | 0 | 1 | MIT | 2020-03-01T10:12:22 | 2020-02-05T16:03:07 | HTML | UTF-8 | Python | false | false | 974 | py | import unittest
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, int_rprimitive, bool_rprimitive, list_rprimitive,
RInstance, RUnion,
)
from mypyc.ir.class_ir import ClassIR
class TestTupleNames(unittest.TestCase):
def setUp(self) -> None:
self.inst_a = RInstance(ClassIR('A', '__main__'))
self.inst_b = RInstance(ClassIR('B', '__main__'))
def test_names(self) -> None:
assert RTuple([int_rprimitive, int_rprimitive]).unique_id == "T2II"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_a]).unique_id == "T3OOO"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_b]).unique_id == "T3OOO"
assert RTuple([]).unique_id == "T0"
assert RTuple([RTuple([]),
RTuple([int_rprimitive, int_rprimitive])]).unique_id == "T2T0T2II"
assert RTuple([bool_rprimitive,
RUnion([bool_rprimitive, int_rprimitive])]).unique_id == "T2CO"
| [
"[email protected]"
]
| |
ca252f50d847acea12ba55205c8fe4b0e04c4158 | 75a009b47851ebe8879a61d716bb64c0b1fc4a0d | /app/config/settings/dev.py | 2f02da674fdba7c55f4db0d65627fc5a10b2dc4e | []
| no_license | smallbee3/Airbnb | c55bf97f5ff16105328d44c485f6d2c017c90ea7 | e8f79c15055c53e51fa2b3be549f9896680f63a7 | refs/heads/master | 2020-03-07T13:37:04.668580 | 2018-04-01T22:51:15 | 2018-04-01T22:51:15 | 127,505,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from .base import *
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS += [
'django_extensions',
]
SECRETS_DEV = os.path.join(SECRETS_DIR, 'dev.json')
secrets_dev = json.loads(open(SECRETS_DEV, 'rt').read())
# DATABASES = secrets_dev['DATABASES']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| [
"[email protected]"
]
| |
4dfefd806d30ea59c8d44aa2cec59209bc551910 | 5bb1ae9b9e6592def632b8a95def32b3a2d742d5 | /headfirst/ch6_sarah.py | 75490cb66f1cb9f23363d8e7c9377d5feff910a7 | []
| no_license | fiso0/my_python | af1132637a4ad92036ea0a949fa93df6f904b190 | 391def01ecdb97b8e3008235910a596bb5a9b52c | refs/heads/master | 2021-01-17T15:52:36.745999 | 2016-10-29T08:37:51 | 2016-10-29T08:37:51 | 58,641,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return(time_string)
(mins,secs) = time_string.split(splitter)
return(mins+'.'+secs)
def get_coach_data(file_name):
try:
with open(file_name) as f:
data = f.readline().strip().split(',')
mydict = {'name' : data.pop(0), \
'dob' : data.pop(0), \
'tops' : str(sorted(set([sanitize(t) for t in data]))[0:3])}
return(mydict)
except IOError as ioerr:
print('File error:' + str(ioerr))
return(None)
sarah = get_coach_data('sarah2.txt')
print(sarah['name'] + "'s fastest times are: " + sarah['tops'])
| [
"[email protected]"
]
| |
2693632938ac97d96d8d251bfd165cd7dca0f66f | 428ca6903cc085a0ff51d3d0d85e757bed412330 | /accounts/migrations/0011_auto_20190310_1744.py | 71f9a82d8c822d70c5f6b8dbda3ed92e671a997e | []
| no_license | privalytics/privalytics.io | a261603f51bcf7ec5c8946de88bb240ef1e76666 | 6f5121c798656bc6c6993e873ea56e77fa254a1d | refs/heads/master | 2021-08-07T02:47:50.708936 | 2021-06-20T08:55:28 | 2021-06-20T08:55:28 | 171,507,151 | 4 | 5 | null | 2021-06-20T08:55:47 | 2019-02-19T16:17:16 | CSS | UTF-8 | Python | false | false | 547 | py | # Generated by Django 2.1.7 on 2019-03-10 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0010_auto_20190308_0856'),
('subscriptions', '0001_initial')
]
operations = [
migrations.AlterField(
model_name='subscription',
name='subscription_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='subscriptions.SubscriptionType'),
),
]
| [
"[email protected]"
]
| |
b55934894d57fb4858d49c01cf2bdeeff5735d0b | 2eb6d57b4f97fe2ea2cd6ab78512dd2c7a6e6ecc | /chapter5/exercise_2.py | 52c87f388582138ef431ee7857c65c9bb4397dd3 | []
| no_license | afcarl/PythonDataStructures | 4ba98bca168f535dc9c8ed9392ed313592850101 | a620af0a1e0d707556a8883ecb5b79a6f1df56c7 | refs/heads/master | 2020-03-26T06:26:38.637461 | 2014-04-26T15:53:46 | 2014-04-26T15:53:46 | 144,605,553 | 1 | 0 | null | 2018-08-13T16:26:42 | 2018-08-13T16:26:42 | null | UTF-8 | Python | false | false | 482 | py | import math
def hypotenuse(a,b):
"""
>>> hypotenuse(3,4)
5.0
>>> hypotenuse(12,5)
13.0
>>> hypotenuse(7,24)
25.0
>>> hypotenuse(9,12)
15.0
"""
if type(a) == type(int()) or type(a) == type(float()) and type(b) == type(int()) or type(b) == type(float()):
return math.sqrt(a**2 + b**2)
else:
return "distance function undefined for associated types!!"
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"[email protected]"
]
| |
56140d86cc58f32f4304d03463614711db1e4edb | 60cf862c8b63394d244fbf20f004ede014c26687 | /simple_calculator/Ex2 Simple calculator2.py | c4d92a8ea7016fd58b932f34f3023b9ef1ea5f88 | []
| no_license | bartoszmaleta/2nd-Self-instructed-week-exercises- | bda9af5eabf04f8fa07ebba00b5a382140941fb0 | 22c33b6cc047ad0e293b5b1e04647e741502f9d7 | refs/heads/master | 2020-08-12T01:33:57.630782 | 2019-10-14T09:08:34 | 2019-10-14T09:08:34 | 214,664,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | first_number = input('Enter a number (or a letter to exit): ')
second_number = input('Enter another number: ')
sign = input('Enter an operation: ')
if sign == '+':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
sum = first_number_as_int + second_number_as_int
sum_as_str = str(sum)
print('Result: ' + sum_as_str)
elif sign == '-':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
difference = first_number_as_int - second_number_as_int
difference_as_str = str(difference)
print('Result: ' + difference_as_str)
elif sign == '*':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
product = first_number_as_int * second_number_as_int
product_as_str = str(product)
print('Result: ' + product_as_str)
elif sign == '/':
first_number_as_int = int(first_number)
second_number_as_int = int(second_number)
quotient = first_number_as_int / second_number_as_int
quotient_as_str = str(quotient)
print('Result: ' + quotient_as_str)
| [
"[email protected]"
]
| |
ea66a53f1ec41732f03fa530e7cff39b927d7b1b | 2da8bcfb9a72e507812a8723e38ad6d030c300f1 | /check_if_a_string_contains_all_binary_codes_of_size_k_1461.py | d908c35cb2d2ea1b12616cc16df50e74de78084a | []
| no_license | aditya-doshatti/Leetcode | 1a4e0f391a7d6ca2d7f8fdc35e535f4ec10fb634 | eed20da07896db471ea6582785335e52d4f04f85 | refs/heads/master | 2023-04-06T02:18:57.287263 | 2023-03-17T03:08:42 | 2023-03-17T03:08:42 | 218,408,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | '''
1461. Check If a String Contains All Binary Codes of Size K
Medium
Given a binary string s and an integer k.
Return True if every binary code of length k is a substring of s. Otherwise, return False.
Example 1:
Input: s = "00110110", k = 2
Output: true
Explanation: The binary codes of length 2 are "00", "01", "10" and "11". They can be all found as substrings at indicies 0, 1, 3 and 2 respectively.
https://leetcode.com/problems/check-if-a-string-contains-all-binary-codes-of-size-k/
'''
class Solution:
def hasAllCodes(self, s: str, k: int) -> bool:
required = 2 ** k
done = set()
for i in range(k, len(s)+1):
temp = s[i-k:i]
if temp not in done:
done.add(temp)
required -=1
if required == 0:
return True
return False
# if len(s) < k:
# return False
# for i in range(2 **k):
# binary = str(bin(i)[2:])
# checkVal = '0'*(k-len(binary)) + binary
# if checkVal in s:
# continue
# else:
# return False
# return True
| [
"[email protected]"
]
| |
00b88efee486df786fb91d829273704caaab765d | 071017425dbb9a175c3b4c5e090501e35b31d4f9 | /docqa/allennlp_custom/modules/similarity_functions/constant_tri.py | 0aaee645c881037f642bef330bfe274c181a0823 | [
"Apache-2.0"
]
| permissive | debjitpaul/discourse-aware-semantic-self-attention | 9bf22f3a8dcf61cc85ba56a3cbbc4dd9b371e55d | 5851c95bbe761c980177b2c3e769c9e716551d5f | refs/heads/master | 2022-04-20T17:05:53.216321 | 2020-04-24T09:24:15 | 2020-04-24T09:24:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | import numpy as np
import torch
from allennlp.common import Registrable, Params
from allennlp.modules import SimilarityFunction
from allennlp.nn import Activation
from typing import List
from torch.nn import Parameter
from functools import reduce
@SimilarityFunction.register("constant_tri")
class ConstantTriParams(SimilarityFunction):
"""
This function applies linear transformation for each of the input tensors and takes the sum.
If output_dim is 0, the dimensions of tensor_1_dim and tensor_2_dim of the two input tensors are expected
to be equal and the output_dim is set to be their size. This is used since we might want to automatically infer
the size of the output layer from automatically set values for tensor1 without explicitly knowing the semantic of
the similarity function.
Then the output is `W1x + W2y`` where W1 and W2 are linear transformation matrices.
Parameters
----------
tensor_1_dim : ``int``
The dimension of the first tensor, ``x``, described above. This is ``x.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : ``int``
The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_3_dim : ``int``
The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
output_dim : ``int``
The dimension of the output tensor.
activation : ``Activation``, optional (default=linear (i.e. no activation))
An activation function applied after the ``w^T * [x;y] + b`` calculation. Default is no
activation.
"""
def __init__(self,
tensor_1_dim: int,
tensor_2_dim: int,
tensor_3_dim: int,
output_constant: List[float],
):
super().__init__()
output_constant = np.array(output_constant)
self._output_constant = torch.tensor(output_constant, requires_grad=False, dtype=torch.float32)
self._output_dim = self._output_constant.shape[-1]
def forward(self, tensor1:torch.LongTensor, tensor2:torch.LongTensor, tensor3:torch.LongTensor) -> torch.Tensor:
# pylint: disable=arguments-differ
"""
Takes two tensors of the same shape, such as ``(batch_size, length_1, length_2,
embedding_dim)``. Transforms both tensor to a target output dimensions and returns a sum tensor with same
number of dimensions, such as ``(batch_size, length, out_dim)``.
"""
tile_size = reduce(lambda x, y: x * y, tensor1.shape[:-1])
res_repr = self._output_constant.unsqueeze(0).repeat(tile_size, 1)
return res_repr
| [
"[email protected]"
]
| |
0671af8fd937da608bdee27126acbc05573e7a2b | c8cee25ecb60ca3e6ce5e24c37db57f82f9858f6 | /Fundamentos Python/ecuacion_gauss_2.py | 7e786edf7e6daa2a8e045011a83ff06ba796512f | []
| no_license | mecomontes/Python | a0b4a0b69ae33ad3623e908731710563392d1615 | daba4247cca90c43a979e3e3f292cd7b8951b3d0 | refs/heads/master | 2023-05-30T05:24:41.999196 | 2020-03-23T02:30:09 | 2020-03-23T02:30:09 | 249,317,310 | 1 | 0 | null | 2023-05-22T22:42:36 | 2020-03-23T02:29:38 | Python | UTF-8 | Python | false | false | 352 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# Sumatoria de los n primeros números
# Método propuesto por Gauss: PAra cualquier número
Numero = float(input("Ingrese el último Número a incluir en la sumatoria: "))
Sumatoria = ((1 + Numero)/2) * Numero
print("Sumatoria de los primeros: ", Numero, " numeros enteros: ", Sumatoria)
| [
"[email protected]"
]
| |
2b2530136d5c1176b142bb6e61260cf9c0562804 | 50604d2b98220ea485c1ada1d6e5b7c230a621db | /src/python/labbox_ephys/extensions/workspaceview/WorkspaceView/sortingExamples/mountainsort4_example.py | c6d31e33e8bec6b8c5f1bbdfe498d57d20bc2d05 | []
| no_license | stjordanis/labbox-ephys | e9f4ca783947be9c6ab4caf19267d75577cbba10 | 563d112346e4a557f1aa04a052d245b07a0e9ce4 | refs/heads/main | 2023-08-11T21:18:40.122003 | 2021-07-26T18:35:33 | 2021-07-26T18:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | import spikeextractors as se
import numpy as np
import labbox_ephys as le
from labbox_ephys import sorters
import kachery_client as kc
if __name__ == '__main__':
# adjust these values
workspace_uri = '{workspaceUri}'
recording_id = '{recordingId}' # {recordingLabel}
workspace = le.load_workspace(workspace_uri)
le_recording = workspace.get_recording(recording_id)
recording_object = le_recording['recordingObject']
sorting_object = sorters.mountainsort4(
recording_object=recording_object,
num_workers=1
)
sorting = le.LabboxEphysSortingExtractor(sorting_object)
S_id = workspace.add_sorting(
sorting=sorting,
recording_id=recording_id,
label='mountainsort4'
) | [
"[email protected]"
]
| |
7a6418956f1e4c4925ad1763e955ed832be64cbc | 96bea2075b44646bd84328c7fa012bdf111196dd | /build/rotors_evaluation/catkin_generated/pkg.develspace.context.pc.py | de859b70ad68dd94a7697427e28b80265b3fd7b2 | []
| no_license | leithw2/Drone_DDR_ROS | 0a139ffc603c4faa3805c64cabcd591546cba6c5 | c41da9602177135a6033f36d5c19b684702b7656 | refs/heads/master | 2023-03-07T15:25:43.982116 | 2021-02-18T22:21:33 | 2021-02-18T22:21:33 | 280,242,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rotors_evaluation"
PROJECT_SPACE_DIR = "/home/laptop/catkin_ws/devel/.private/rotors_evaluation"
PROJECT_VERSION = "2.0.1"
| [
"[email protected]"
]
| |
7739878c949d613861d03ac303411425ddf611ba | 4c4b4076f960a1e1d0203cc58621090cc3dc45f3 | /architect/manager/urls.py | 376aa0b86346aa1fad7b7d25624b89d0e2854bfb | []
| no_license | michaelkuty/architect-api | 6c62f8a8ada4dc9a62b12ea7b38ae9a70d94290e | 30ebfdafdbdcff4098bf080f00957949e20a71cf | refs/heads/master | 2021-05-02T16:17:53.016373 | 2018-02-08T22:08:24 | 2018-02-08T22:08:24 | 120,673,129 | 0 | 0 | null | 2018-02-07T21:20:43 | 2018-02-07T21:20:43 | null | UTF-8 | Python | false | false | 1,047 | py | from django.urls import path
from . import views
app_name = 'manager'
urlpatterns = [
path('v1', views.ManagerListView.as_view(),
name='manager_list'),
path('v1/manager-check',
views.ManagerCheckView.as_view(),
name='manager_check'),
path('v1/<manager_name>',
views.ManagerDetailView.as_view(),
name='manager_detail'),
path('v1/<manager_name>/sync',
views.ManagerSyncView.as_view(),
name='manager_sync'),
path('v1/<manager_name>/query/<query_name>',
views.ManagerQueryJSONView.as_view(),
name='manager_query'),
path('v1/<manager_name>/action/<resource_kind>/<resource_action>',
views.ManagerActionView.as_view(),
name='manager_action'),
path('v1/<manager_name>/resource/<resource_uid>',
views.ResourceDetailView.as_view(),
name='resource_detail'),
path('v1/<manager_name>/resource/<resource_uid>/<resource_action>',
views.ResourceActionView.as_view(),
name='resource_action'),
]
| [
"[email protected]"
]
| |
eea7f772f28657f31564888e9c1bda1ae6088163 | 71bc873c20fbc45bb5e13095d2474496818a23f9 | /code_video clean/query_url.py | 682d51ed8e0b8b6c493923ee3aed28198d7f1e10 | []
| no_license | 2877992943/lianyun | f31c44ea2e266bae51cae4fa464d1bae368c8d3f | a872d6cd1b2eff402bcccb326d33d086816d87af | refs/heads/master | 2021-01-20T16:17:20.226401 | 2017-05-10T06:49:31 | 2017-05-10T06:49:31 | 90,830,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,300 | py | #! -*- coding:utf-8 -*-
import urllib2,time,os,urllib
import pandas as pd
import json
import MySQLdb
import random,cPickle
import csv,re
from MySQLdb import cursors
import numpy as np
import sys,time,os
read_host='rr-2zeg40364h2thw9m6o.mysql.rds.aliyuncs.com'
write_host='rds5943721vp4so4j16ro.mysql.rds.aliyuncs.com'
db_product = MySQLdb.connect(host=read_host,
user='yunker',
passwd="yunker2016EP",
db="xddb",
use_unicode=True,
charset='utf8',
cursorclass=cursors.DictCursor)
## 测试库
db = MySQLdb.connect(host='rds0710650me01y6d3ogo.mysql.rds.aliyuncs.com',
user='yunker',
passwd="yunke2016",
db="yunketest",
use_unicode=True,
charset='utf8',
cursorclass=cursors.DictCursor)
def get_request_helloWorld():
full_url='http://localhost:9300'
data=urllib2.urlopen(full_url)
Data=data.read()
print Data
def get_request_testDB():
try:
full_url='http://localhost:9300/?userId=E8B63FF5BB1840EABB9BEB2F9DCCA731'
data=urllib2.urlopen(full_url)
Data=data.read()
return Data
except:
return None
def send_request(cid):
try:
full_url='http://yunkecn.com/xdapi/esData/updateAlreadyDownload.action?userAccount=%s&clueId=%s'%(companyCode,cid)
data=urllib2.urlopen(full_url)
Data=data.read()
return Data
except:
return None
def requst_crawler(comname):
try:
full_url='http://101.200.139.60:8088/crawler/QiDuoWeiSpider?companyName=%s'%(comname)
print full_url
data=urllib2.urlopen(full_url)
Data=data.read()
return Data
except:
return None
def requst_mp3(url,mp3Name):
try:
full_url=url
data=urllib.urlretrieve(url, mp3Name+".mp3")
return data
except:
return None
def query_call_by_att(att, companyCode):
sql1 ="""SELECT %s from crm_t_call_action c left join crm_t_portaluser p
on c.User_Id=p.User_Id
#WHERE c.recordFile LIKE '%s' AND c.record_from = 1
where c.record_from = 1
limit 10000
#where p.User_Company_Id in ('%s')
"""
sql ="""SELECT %s from crm_t_call_action c left join crm_t_portaluser p
on c.User_Id=p.User_Id
WHERE c.recordFile LIKE '%s' AND c.record_from = 1
limit 100000
"""
cur = db_product.cursor()
#cur.execute(sql % (att, "','".join(companyCode)))
#print sql % (att, '%http://yunke-pcfile.oss-cn-beijing%')
cur.execute(sql % (att, '%http://yunke-pcfile.oss-cn-beijing%'))
ret = {}
for r in cur.fetchall():
ret[r['Call_Action_Id']] = r #{id:{record},...}
return ret
def strDuration2second(duration):
duration=re.sub('[\s+]','',duration)
duration=re.sub('[\'\"]',' ',duration)
ll=duration.split(' ')
ll=[int(i) for i in ll if len(i)>=1]
#print ll
minute,second=ll
second+=60*minute
return second
if __name__=='__main__':
#### query
attList=['c.Call_Action_Id','c.recordFile','c.Call_Duration','c.Tip_Type','c.Tip_Name']
companyCode=['bjnbm3','jjjeva','vnraea','ffz3ai','invjvi','mmbnn3']
companyCode=companyCode[5:]
ret=query_call_by_att(','.join(attList),companyCode)
print len(ret)
pd.to_pickle(ret,'../data/ret')
##### get second >60 url
ret=pd.read_pickle('../data/ret')
url_second_feedback={}
for id,r in ret.items()[:]:
recordFile=r['recordFile']
duration=r['Call_Duration']
tiptype=r['Tip_Type']
tipname=r['Tip_Name']
second=strDuration2second(duration)
if second<=0:continue
url_second_feedback[recordFile]=[second,str(tiptype)+' '+tipname]
########
print len(url_second_feedback)
pd.to_pickle(url_second_feedback,'../data/url_second')
####
"""
record_from 录音来源 1app 2pc 3yunkecc 4电话盒子
现在yunkecc渠道的录音应该是没有提示音的,电话盒子还没上线,没数据。pc的应该是null,app的录音是安卓的,带提示音
"""
| [
"[email protected]"
]
| |
d011eb0d199ae63be8e855b4671fdd4a997bb6cd | 1a5b20adde928721bc42dd17e7460dcc880000a2 | /src/transformers/models/pix2struct/modeling_pix2struct.py | dd965fc35e37164595441639ab9e3d30d219ef96 | [
"Apache-2.0"
]
| permissive | kashif/transformers | de36d78c47dac5ea816c3e0812cd50af95ce6ce6 | abbc96a2148da0c91fa078bd021984b2cc10ef85 | refs/heads/main | 2023-09-01T02:52:49.782252 | 2023-04-17T11:41:52 | 2023-04-17T11:41:52 | 230,251,524 | 0 | 0 | Apache-2.0 | 2019-12-26T11:20:47 | 2019-12-26T11:20:47 | null | UTF-8 | Python | false | false | 82,239 | py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Pix2Struct modeling file"""
import math
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.utils.checkpoint import checkpoint
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import ALL_LAYERNORM_LAYERS
from ...utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_torch_fx_proxy,
logging,
replace_return_docstrings,
)
from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "Pix2StructConfig"
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/pix2struct-textcaps-base",
"google/pix2struct-textcaps-large",
"google/pix2struct-base",
"google/pix2struct-large",
"google/pix2struct-ai2d-base",
"google/pix2struct-ai2d-large",
"google/pix2struct-widget-captioning-base",
"google/pix2struct-widget-captioning-large",
"google/pix2struct-screen2words-base",
"google/pix2struct-screen2words-large",
"google/pix2struct-docvqa-base",
"google/pix2struct-docvqa-large",
"google/pix2struct-ocrvqa-base",
"google/pix2struct-ocrvqa-large",
"google/pix2struct-chartqa-base",
"google/pix2struct-inforgraphics-vqa-base",
"google/pix2struct-inforgraphics-vqa-large",
# See all Pix2StructVision models at https://huggingface.co/models?filter=pix2struct
]
# Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct
class Pix2StructLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
try:
from apex.normalization import FusedRMSNorm
Pix2StructLayerNorm = FusedRMSNorm # noqa
logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm")
except ImportError:
# using the normal Pix2StructLayerNorm
pass
except Exception:
logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm")
pass
ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm)
class Pix2StructVisionEmbeddings(nn.Module):
r"""
Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models.
Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch
is represented by a vector of `hidden_size` values.
"""
def __init__(self, config: Pix2StructConfig) -> None:
super().__init__()
self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size)
self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size)
self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor:
# the row and column indices are stored in the first and second position of the flattened_patches
# flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2
row_indices = flattened_patches[:, :, 0].long()
col_indices = flattened_patches[:, :, 1].long()
flattened_patches = flattened_patches[:, :, 2:]
embeddings = self.patch_projection(flattened_patches)
row_embeddings = self.row_embedder(row_indices)
col_embeddings = self.column_embedder(col_indices)
# sum all embeddings together
embeddings = embeddings + row_embeddings + col_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class Pix2StructVisionAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_attention_heads
self.dropout = config.attention_dropout
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
output_attentions=False,
):
"""
Self-attention block
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
def to_projection_shape(states):
"""projection"""
return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# get query states
# (batch_size, n_heads, seq_length, dim_per_head)
query_states = to_projection_shape(self.query(hidden_states))
# get key/value states
key_states = to_projection_shape(self.key(hidden_states))
value_states = to_projection_shape(self.value(hidden_states))
# compute scores
# equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype)
if attention_mask.dim() == 2:
position_bias = position_bias + attention_mask[:, None, :, None].to(position_bias.device)
else:
# (batch_size, n_heads, seq_length, key_length)
position_bias = position_bias + attention_mask.to(position_bias.device)
position_bias = 1 - position_bias
position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min)
scores += position_bias_masked
scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min))
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores)
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = torch.matmul(attn_weights, value_states)
# (batch_size, seq_length, dim)
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
attn_output = self.output(attn_output)
outputs = (attn_output,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate
class Pix2StructVisionMlp(nn.Module):
def __init__(self, config: Pix2StructVisionConfig):
super().__init__()
self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
class Pix2StructVisionLayer(nn.Module):
def __init__(self, config: Pix2StructConfig) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Pix2StructVisionAttention(config)
self.mlp = Pix2StructVisionMlp(config)
self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
residual = hidden_states
# in Pix2StructVision, layernorm is applied before self-attention
hidden_states = self.pre_attention_layer_norm(hidden_states)
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection
hidden_states = attention_output + residual
# in Pix2StructVision, layernorm is also applied after self-attention
layer_output = self.pre_mlp_layer_norm(hidden_states)
layer_output = self.mlp(layer_output) + hidden_states # second residual connection
outputs = (layer_output,) + outputs
return outputs
class Pix2StructVisionEncoder(nn.Module):
def __init__(self, config: Pix2StructConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class Pix2StructPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Pix2StructConfig
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, Pix2StructLayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, Pix2StructTextDenseGatedActDense):
hidden_size = (
self.config.text_config.hidden_size
if isinstance(self.config, Pix2StructConfig)
else self.config.hidden_size
)
d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff
module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
module.wi_0.bias.data.zero_()
module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
module.wi_1.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, Pix2StructTextAttention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
hidden_size = (
self.config.text_config.hidden_size
if isinstance(self.config, Pix2StructConfig)
else self.config.hidden_size
)
key_value_proj_dim = (
self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size
)
n_heads = (
self.config.text_config.num_heads
if isinstance(self.config, Pix2StructConfig)
else self.config.num_heads
)
module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5))
module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
elif isinstance(module, nn.Embedding):
hidden_size = (
self.config.text_config.hidden_size
if isinstance(self.config, Pix2StructConfig)
else self.config.hidden_size
)
module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, Pix2StructTextModel):
hidden_size = (
self.config.text_config.hidden_size
if isinstance(self.config, Pix2StructConfig)
else self.config.hidden_size
)
module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
elif isinstance(module, (nn.Linear, nn.Conv2d)):
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
# `trunc_normal_cpu` not implemented in `half` issues
module.weight.data = nn.init.trunc_normal_(
module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, Pix2StructLayerNorm):
if module.weight is not None:
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert decoder_start_token_id is not None, (
"self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id."
" See Pix2Struct docs for more information"
)
# shift inputs to the right
if is_torch_fx_proxy(input_ids):
# Item assignment is not supported natively for proxies.
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
PIX2STRUCT_VISION_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
PIX2STRUCT_VISION_INPUTS_DOCSTRING = r"""
Args:
flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`):
Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See
[`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original
paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.",
PIX2STRUCT_VISION_START_DOCSTRING,
)
class Pix2StructVisionModel(Pix2StructPreTrainedModel):
config_class = Pix2StructVisionConfig
main_input_name = "flattened_patches"
supports_gradient_checkpointing = True
_no_split_modules = ["Pix2StructVisionLayer"]
def __init__(self, config: Pix2StructConfig):
super().__init__(config)
self.config = config
self.embeddings = Pix2StructVisionEmbeddings(config)
self.encoder = Pix2StructVisionEncoder(config)
self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def _set_gradient_checkpointing(self, module: Pix2StructVisionEncoder, value: bool = False) -> None:
if isinstance(module, Pix2StructVisionEncoder):
module.gradient_checkpointing = value
def get_input_embeddings(self):
return self.embeddings.patch_projection
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
flattened_patches: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
Example:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, Pix2StructVisionModel
>>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
>>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base")
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 2048, 768]
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if flattened_patches is None:
raise ValueError("You have to specify flattened_patches")
if attention_mask is None:
# check where `flattened_patches` is not 0
attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(flattened_patches)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
if not return_dict:
head_outputs = (sequence_output,)
return head_outputs + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size
class Pix2StructTextDenseGatedActDense(nn.Module):
def __init__(self, config: Pix2StructTextConfig):
super().__init__()
self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
class Pix2StructTextLayerFF(nn.Module):
def __init__(self, config: Pix2StructTextConfig):
super().__init__()
self.DenseReluDense = Pix2StructTextDenseGatedActDense(config)
self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class Pix2StructTextAttention(nn.Module):
def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False):
super().__init__()
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.hidden_size = config.hidden_size
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
self.gradient_checkpointing = False
@staticmethod
# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
# Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias
def compute_bias(self, query_length, key_length, device=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=False,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
if len(past_key_value) != 2:
raise ValueError(
f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
)
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def to_projection_shape(states):
"""projection"""
return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = to_projection_shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = to_projection_shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
elif past_key_value.shape[2] != key_value_states.shape[1]:
# checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = to_projection_shape(proj_layer(key_value_states))
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
# (batch_size, n_heads, seq_length, dim_per_head)
query_states = to_projection_shape(self.query(hidden_states))
# get key/value states
key_states = project(
hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
if self.pruned_heads:
mask = torch.ones(position_bias.shape[1])
mask[list(self.pruned_heads)] = 0
position_bias_masked = position_bias[:, mask.bool()]
else:
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = torch.matmul(attn_weights, value_states)
# (batch_size, seq_length, dim)
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
attn_output = self.output(attn_output)
present_key_value_state = (key_states, value_states) if use_cache else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size
class Pix2StructTextLayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.attention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size
class Pix2StructTextLayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False)
self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.attention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class Pix2StructTextBlock(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.self_attention = Pix2StructTextLayerSelfAttention(
config, has_relative_attention_bias=has_relative_attention_bias
)
self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config)
self.mlp = Pix2StructTextLayerFF(config)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
cross_attn_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
if len(past_key_value) != expected_num_past_key_values:
raise ValueError(
f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
f"Got {len(past_key_value)} past key / value states"
)
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.self_attention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.encoder_decoder_attention(
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.mlp(hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if use_cache:
outputs = outputs + (present_key_value_state,) + attention_outputs
else:
outputs = outputs + attention_outputs
return outputs
PIX2STRUCT_START_DOCSTRING = r"""
The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language
Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu,
Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder
transformer pre-trained in a image-to-text setting.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText
Training](./t5#training).
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
PIX2STRUCT_INPUTS_DOCSTRING = r"""
Args:
flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`):
Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` =
`num_channels` * `patch_size` * `patch_size`
The process of flattening the pixel patches is done by `Pix2StructProcessor`.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
`[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The standalone text decoder of Pix2Struct",
PIX2STRUCT_START_DOCSTRING,
)
class Pix2StructTextModel(Pix2StructPreTrainedModel):
config_class = Pix2StructTextConfig
_no_split_modules = ["Pix2StructTextBlock"]
supports_gradient_checkpointing = True
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (Pix2StructTextAttention, Pix2StructTextModel)):
module.gradient_checkpointing = value
def __init__(self, config):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
self.layer = nn.ModuleList(
[Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
self.gradient_checkpointing = False
# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache
def _reorder_cache(self, past_key_values, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past_key_values is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past_key_values
reordered_decoder_past = ()
for layer_past_states in past_key_values:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
labels=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Example:
```python
>>> from transformers import AutoProcessor, Pix2StructTextModel
>>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
>>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base")
>>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> loss = outputs.loss
```
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
if encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.layer)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(
create_custom_forward(layer_module),
hidden_states,
extended_attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias,
layer_head_mask,
cross_attn_layer_head_mask,
None, # past_key_value is always None with gradient checkpointing
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=layer_head_mask,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
if use_cache is False:
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[2]
if encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
all_cross_attentions = all_cross_attentions + (layer_outputs[3],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean", label_smoothing=0.1)
masked_labels = labels.masked_fill(labels == self.config.pad_token_id, -100)
loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), masked_labels.contiguous().view(-1))
if not return_dict:
return tuple(
v
for v in [
loss,
logits,
present_key_value_states,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"A conditional generation model with a language modeling head. Can be used for sequence generation tasks.",
PIX2STRUCT_START_DOCSTRING,
)
class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel):
config_class = Pix2StructConfig
main_input_name = "flattened_patches"
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.layer.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: Pix2StructConfig):
super().__init__(config)
self.encoder = Pix2StructVisionModel(config.vision_config)
self.decoder = Pix2StructTextModel(config.text_config)
self.is_vqa = config.is_vqa
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.decoder.set_output_embeddings(new_embeddings)
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
model_embeds = self.decoder.resize_token_embeddings(new_num_tokens)
# update vocab size
self.config.text_config.vocab_size = new_num_tokens
return model_embeds
def get_decoder(self):
return self.decoder
def get_encoder(self):
return self.encoder
@add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
flattened_patches: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
labels: Optional[torch.LongTensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
Returns:
Example:
Inference:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
>>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
>>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base")
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> # autoregressive generation
>>> generated_ids = model.generate(**inputs, max_new_tokens=50)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
A stop sign is on a street corner.
```
Training:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
>>> processor = AutoProcessor.from_pretrained("google/pix2struct-base")
>>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base")
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "A stop sign is on the street corner."
>>> inputs = processor(images=image, return_tensors="pt")
>>> labels = processor(text=text, return_tensors="pt").input_ids
>>> # forward pass
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> print(f"{loss.item():.5f}")
5.23973
```"""
use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
flattened_patches=flattened_patches,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
decoder_attention_mask = (
decoder_attention_mask
if decoder_attention_mask is not None
else decoder_input_ids.ne(self.config.pad_token_id).float()
)
# Always attend to the first token
decoder_attention_mask[:, 0] = 1
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
labels=labels,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
flattened_patches: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
past_key_values=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
if decoder_attention_mask is None:
decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device)
# cut decoder_input_ids if past is used
if past_key_values is not None:
input_ids = input_ids[:, -1:]
return {
"flattened_patches": flattened_patches,
"decoder_input_ids": input_ids,
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache,
}
| [
"[email protected]"
]
| |
fa72a26b813d236bccb271ec23fb2cc08087103e | 3f3e2677907d650851e4900ac267c93d61635032 | /midgard/config/config.py | 7b22af8ec02d5f742d7eae2a06391fad98f48aa1 | [
"MIT"
]
| permissive | skjaeve/midgard | c4aac05550f06d98823a57ee77ed3f5b6b9b4760 | afcb1e30236436b63b1232bf54b6531b574676be | refs/heads/master | 2020-04-05T03:35:11.811805 | 2018-11-29T13:46:06 | 2018-11-29T13:46:06 | 156,520,195 | 0 | 0 | MIT | 2018-11-07T09:14:22 | 2018-11-07T09:14:22 | null | UTF-8 | Python | false | false | 37,908 | py | """Midgard library module for handling of configuration settings
Description:
------------
A Configuration consists of one or several sections. Each ConfigurationSection
consists of one or more entries. Each ConfigurationEntry consists of a key and
a value.
Examples:
---------
For basic use, an entry is looked up by simple attribute access. For instance
if `cfg` is a Configuration with the section `midgard` which has an entry `foo
= bar`:
>>> cfg = Configuration("config_name")
>>> cfg.update("midgard", "foo", "bar")
>>> cfg.midgard.foo
ConfigurationEntry(key='foo', value='bar')
ConfigurationEntry has several access methods that convert the entry to a given
data type:
>>> cfg.update("midgard", "foo_pi", 3.14, source="command line")
>>> cfg.midgard.foo_pi
ConfigurationEntry(key='foo_pi', value='3.14')
>>> cfg.midgard.foo_pi.float
3.14
>>> cfg.midgard.foo_pi.str
'3.14'
>>> cfg.midgard.foo_pi.tuple
('3.14',)
Sources:
--------
Each configuration entry records its source. That is, where that entry was
defined. Examples include read from file, set as a command line option, or
programmatically from a dictionary. The source can be looked up on an
individual entry, or for all entries in a configuration.
>>> cfg.midgard.foo_pi.source
'command line'
>>> cfg.sources # doctest: +SKIP
{'/home/midgard/midgard.conf', 'command line'}
Profiles:
---------
Fallback Configuration:
---------------------
Master Section:
---------------
Replacement Variables:
----------------------
Help text and Type hints:
-------------------------
"""
# Standard library imports
import builtins
from configparser import ConfigParser, BasicInterpolation, ExtendedInterpolation
from contextlib import contextmanager
import datetime as stdlib_datetime
import os.path
import pathlib
import re
import sys
from collections import UserDict
# Midgard imports
from midgard.dev import console
from midgard.collections import enums
from midgard.dev import exceptions
# Typing
from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union
ProfileName = Optional[str]
Sections = Dict[str, "ConfigurationSection"]
EntryName = str
ConfigVars = Dict[str, Any]
# Date and datetime formats
FMT_date = "%Y-%m-%d"
FMT_datetime = "%Y-%m-%d %H:%M:%S"
FMT_dt_file = "%Y%m%d-%H%M%S"
class CasedConfigParser(ConfigParser):
"""ConfigParser with case-sensitive keys"""
def optionxform(self, optionstr: str) -> str:
"""Do not turn optionstr (key) into lowercase"""
return optionstr
class Configuration:
"""Represents a Configuration"""
def __init__(self, name: str) -> None:
"""Initialize a Configuration
The name is used purely for representation and error messages.
Args:
name: Name of configuration.
"""
self.name = name
self.fallback_config = None
self.master_section = None
self._profiles: List[ProfileName] = [None]
self._profile_sections: Dict[ProfileName, Sections] = dict()
self._sections: Sections = dict()
self._vars_dict: ConfigVars = dict()
self._update_count: int = 0
@classmethod
def read_from_file(cls, cfg_name: str, *file_paths: Union[str, pathlib.Path]) -> "Configuration":
"""Read a configuration from one or more files
Args:
file_paths: File(s) that will be read.
Returns:
A Configuration representing the file(s).
"""
cfg = cls(cfg_name)
for file_path in file_paths[::-1]:
cfg.update_from_file(file_path)
return cfg
@classmethod
@contextmanager
def update_on_file(cls, file_path: Union[str, pathlib.Path], **as_str_args: Any) -> Generator:
"""Context manager for updating a configuration on file
"""
# Read config from file
cfg = cls.read_from_file("Temporary", file_path)
update_count_before = cfg._update_count
# Yield config so it can be updated
yield cfg
# Write config if it has been updated
if cfg._update_count > update_count_before:
cfg.write_to_file(file_path, **as_str_args)
def write_to_file(self, file_path: Union[str, pathlib.Path], **as_str_args: Any) -> None:
"""Write the configuration to a file
In addition to the file path, arguments can be specified and will be passed on to the as_str() function. See
`as_str()` for more information.
Todo: Use files.open_path
"""
file_path = pathlib.Path(file_path)
file_path.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, mode="w") as fid:
fid.write(self.as_str(**as_str_args) + "\n")
@property
def section_names(self) -> List[str]:
"""Names of sections in Configuration"""
return list(self._sections.keys())
@property
def sections(self) -> List["ConfigurationSection"]:
"""Sections in Configuration"""
return list(self._sections.values())
@property
def sources(self) -> Set[str]:
"""Sources of entries in Configuration"""
return {s[k].source for s in self.sections for k in s.keys() if s[k].source}
@property
def profiles(self) -> List[ProfileName]:
"""List of profiles currently being used in Configuration"""
return self._profiles
@profiles.setter
def profiles(self, values: Union[None, List[ProfileName]]) -> None:
"""Set profiles that will be used in Configuration
The list of profiles should be a prioritized list where the first profile listed will be used if available.
None is used to indicate default values (no profile), and will be automatically appended at the end of the list
of profiles.
To not use any profiles, set `cfg.profiles = None`.
Args:
values: List of profiles to use.
"""
if values is None:
values = [None]
elif values[-1] is not None:
values.append(None)
self._profiles = values
self._set_sections_for_profiles()
def _set_sections_for_profiles(self) -> None:
"""Update sections according to profiles"""
self._sections.clear()
# Add values in reverse order so that the first profile is prioritized
for profile in self.profiles[::-1]:
for section_name, profile_section in self._profile_sections.get(profile, dict()).items():
self._sections.setdefault(section_name, ConfigurationSection(section_name))
for key, entry in profile_section.items():
self._sections[section_name][key] = entry
@property
def fallback_config(self) -> "Configuration":
"""The fallback configuration"""
if self._fallback_config is None:
raise exceptions.MissingConfigurationError(
f"Configuration '{self.name}' has not defined a fallback configuration"
)
return self._fallback_config
@fallback_config.setter
def fallback_config(self, cfg: Optional["Configuration"]) -> None:
"""Set the fallback configuration"""
self._fallback_config = cfg
@property
def master_section(self) -> "ConfigurationSection":
"""The master section"""
if self._master_section is None:
raise exceptions.MissingSectionError(f"Configuration {self.name!r} has not defined a master section")
if self._master_section not in self._sections:
raise exceptions.MissingSectionError(f"Master section {self._master_section!r} does not exist in"
f" configuration {self.name!r}")
return self._sections[self._master_section]
@master_section.setter
def master_section(self, section: Optional[str]) -> None:
"""Set the master section"""
self._master_section = section
def get(
self, key: str, value: Optional[str] = None, section: Optional[str] = None, default: Optional[str] = None
) -> "ConfigurationEntry":
"""Get an entry from a configuration with possibility for override and default value
A value for an entry is found using the following priorities:
1. An explicit value given in `value`. None is used as a marker for no value.
2. Looked up in the current configuration.
3. Looked up in any fallback confiurations that are defined.
4. The default value is used.
If `value` is not None, that value is simply returned as a `ConfigurationEntry`. If `default` is not given (is
None), and a value is not found in any other way, a MissingEntryError is raised.
Args:
key: Name of option (key in the configuration entry).
value: Value of entry. Used for overriding the configuration.
section: Section in the configuration in which to look up the key.
default: Default value that is returned if value is not found any other way.
Returns:
Entry representing the value.
"""
if value is not None:
return ConfigurationEntry(key, value=value, source="method call", vars_dict=self.vars)
try:
section_value = self.master_section if section is None else self[section]
if isinstance(section_value, ConfigurationEntry):
return section_value
else:
return section_value[key]
except (exceptions.MissingSectionError, exceptions.MissingEntryError) as err:
try:
return self.fallback_config.get(key=key, section=section)
except (exceptions.MissingConfigurationError, exceptions.MissingEntryError):
if default is None:
# Raise original error
raise err
else:
return ConfigurationEntry(key, value=default, source="default value", vars_dict=self.vars)
def update(
self,
section: str,
key: str,
value: str,
*,
profile: ProfileName = None,
source: str = "unknown",
meta: Optional[Dict[str, str]] = None,
allow_new: bool = True,
_update_sections: bool = True,
) -> None:
"""Update a configuration section with a configuration entry
If `allow_new` is False, the configuration entry must already exist. If it is True the update is allowed to
create a new section and a new entry is necessary.
The `_update_sections` flag can be used to not update the sections of the configuration, only the
profiles. This should typically not be done, but is used by some of the other update methods which update the
sections themselves.
Args:
section: Section to update.
key: Key of entry.
value: Value of entry.
profile: Profile to update.
source: Source of the update.
meta: Metadata like help text and type hints for the entry.
allow_new: Whether to allow the creation of a new section and entry.
"""
if not allow_new:
profile_str = "" if profile is None else f"(profile: '{profile}')"
if section not in self._sections:
raise exceptions.MissingSectionError(
f"Configuration '{self.name}' does not contain section '{section}' {profile_str}"
)
if key not in self._sections[section]:
raise exceptions.MissingEntryError(
f"Section '{section}' of configuration '{self.name}' does not contain entry '{key}' {profile_str}"
)
# Add entry to profile
source = source if profile is None else f"{source} ({profile})"
profile_sections = self._profile_sections.setdefault(profile, dict())
profile_sections.setdefault(section, ConfigurationSection(section))
# Record that configuration has been updated
if key not in profile_sections[section] or profile_sections[section][key]._value != value:
self._update_count += 1
profile_sections[section][key] = ConfigurationEntry(
key, value=value, source=source, meta=meta, vars_dict=self.vars
)
# Update sections
if _update_sections:
self._set_sections_for_profiles()
def update_from_file(
self,
file_path: Union[str, pathlib.Path],
allow_new: bool = True,
interpolate: bool = False,
case_sensitive: bool = False,
) -> None:
"""Update the configuration from a configuration file
The Python ConfigParser is used to read the file. The file format that is supported is described at
https://docs.python.org/library/configparser.html
Different profiles in a configuration file is denoted by double underscores in the sections names. For instance
does the following configuration have a `foo` profile in the `spam` section (in addition to the default
profile):
[spam]
...
[spam__foo]
...
If `interpolate` is set to True, ExtendedInterpolation of variables in the configuration file is used. See
https://docs.python.org/library/configparser.html#configparser.ExtendedInterpolation for details.
Args:
file_path: Path to the configuration file.
allow_new: Whether to allow the creation of new sections and entries.
interpolate: Whether to interpolate variables in the configuration file.
case_sensitive: Whether to read keys as case sensitive (or convert to lower case).
"""
# Use ConfigParser to read from file
cfg_parser_cls = CasedConfigParser if case_sensitive else ConfigParser
cfg_parser = cfg_parser_cls(
allow_no_value=True,
delimiters=("=",),
interpolation=ExtendedInterpolation() if interpolate else BasicInterpolation(),
)
cfg_parser.read(file_path)
# Add configuration entries
for cfg_section in cfg_parser.sections():
section, has_profile, profile = cfg_section.partition("__")
for key, value in cfg_parser[cfg_section].items():
# Handle meta-information
if ":" in key:
continue
meta = {k.partition(":")[-1]: v for k, v in cfg_parser[cfg_section].items() if k.startswith(f"{key}:")}
# Create a configuration entry
self.update(
section,
key,
value if value is None else value.replace("\n", " "),
profile=profile if has_profile else None,
source=str(file_path),
meta=meta,
allow_new=allow_new,
_update_sections=False,
)
self._set_sections_for_profiles()
def update_from_config_section(
self, other_section: "ConfigurationSection", section: Optional[str] = None, allow_new: bool = True
) -> None:
section = other_section.name if section is None else section
for key, entry in other_section.data.items():
self.update(
section,
key,
entry.str,
source=entry.source,
meta=entry.meta,
allow_new=allow_new,
_update_sections=False,
)
self._set_sections_for_profiles()
def update_from_options(
self,
options: Optional[List[str]] = None,
profile: ProfileName = None,
source: str = "command line",
allow_new: bool = False,
) -> None:
if options is None:
options = sys.argv[1:]
for option in options:
if not (option.startswith("--") and "=" in option):
continue
# Parse config name, section, key and value of the form name:section:key=value
opt_key, _, opt_value = option[2:].partition("=")
opt_section, _, opt_key = opt_key.rpartition(":")
opt_name, _, opt_section = opt_section.rpartition(":")
# Update current configuration
if opt_name and opt_name != self.name:
continue
if not opt_section:
opt_section = self.master_section.name
self.update(
opt_section,
opt_key,
opt_value,
profile=profile,
source=f"{source} ({option})",
allow_new=allow_new,
_update_sections=False,
)
self._set_sections_for_profiles()
def update_from_dict(
self,
cfg_dict: Dict[str, Any],
section: Optional[str] = None,
source: str = "dictionary",
allow_new: bool = True,
) -> None:
section = self.master_section.name if section is None else section
for key, value in cfg_dict.items():
self.update(section, key, value, source=source, allow_new=allow_new, _update_sections=False)
self._set_sections_for_profiles()
def clear(self) -> None:
"""Clear the configuration"""
self._sections.clear()
self.clear_vars()
@property
def vars(self) -> ConfigVars:
"""The configuration variables"""
return self._vars_dict
def clear_vars(self) -> None:
"""Clear the configuration variables"""
self._vars_dict.clear()
def update_vars(self, new_vars: ConfigVars) -> None:
"""Update the configuration variables"""
self._vars_dict.update(new_vars)
def as_str(
self, width: Optional[int] = None, key_width: int = 30, only_used: bool = False, metadata: bool = True
) -> str:
"""The configuration represented as a string
This is simililar to what is shown by `str(configuration)` (and implemented by `__str__`), but has more
flexibility.
Args:
width: Width of text for wrapping. Default is width of console.
key_width: Width of the key column. Default is 30 characters.
only_used: Only include configuration entries that has been used so far.
metadata: Include metadata like type and help text.
Returns:
String representation of the configuration.
"""
sections = self._sections.values()
section_strs = [
s.as_str(width=width, key_width=key_width, only_used=only_used, metadata=metadata) for s in sections
]
return "\n\n\n".join(s for s in section_strs if s)
def as_dict(
self, getters: Optional[Dict[str, Dict[str, str]]] = None, default_getter: str = "str"
) -> Dict[str, Dict[str, Any]]:
"""The configuration represented as a dictionary
Args:
getters: How to get the value of each entry in each section.
default_getter: How to get the value of entries not specified in getters.
Returns:
Representation of the configuration as a nested dictionary.
"""
getters = dict() if getters is None else getters
return {k: v.as_dict(getters=getters.get(k), default_getter=default_getter) for k, v in self._sections.items()}
def __getitem__(self, key: str) -> Union["ConfigurationSection", "ConfigurationEntry"]:
"""Get a section or entry from the master section from the configuration"""
if key in self.section_names:
return self._sections[key]
try:
return self.master_section[key]
except exceptions.MissingSectionError:
try:
return self.fallback_config[key]
except exceptions.MidgardException:
raise exceptions.MissingSectionError(f"Configuration {self.name!r} has no section {key!r}") from None
def __getattr__(self, key: str) -> Union["ConfigurationSection", "ConfigurationEntry"]:
"""Get a section or entry from the master section from the configuration"""
return self[key]
def __delitem__(self, key: str) -> None:
"""Delete a section from the configuration"""
del self._sections[key]
def __delattr__(self, key: str) -> None:
"""Delete a section from the configuration"""
del self._sections[key]
def __dir__(self) -> List[str]:
"""List attributes and sections in the configuration"""
try:
return list(super().__dir__()) + self.section_names + self.master_section.as_list()
except exceptions.MissingSectionError:
return list(super().__dir__()) + self.section_names
def __str__(self) -> str:
"""The configuration represented as a string
This string can be stored in a file and read back with `update_from_file`.
"""
return "\n\n".join(str(s) for s in self._sections.values())
def __repr__(self) -> str:
"""A simple string representation of the configuration"""
return f"{self.__class__.__name__}(name='{self.name}')"
class ConfigurationSection(UserDict):
data: Dict[str, "ConfigurationEntry"]
def __init__(self, name: str) -> None:
super().__init__()
self.name: str = name
def as_str(
self, width: Optional[int] = None, key_width: int = 30, only_used: bool = False, metadata: bool = True
) -> str:
"""The configuration section represented as a string
This is simililar to what is shown by `str(section)` (and implemented by `__str__`), but has more flexibility.
Args:
width: Width of text for wrapping. Default is width of console.
key_width: Width of the key column. Default is 30 characters.
only_used: Only include configuration entries that has been used so far.
metadata: Include metadata like type and help text.
Returns:
String representation of the configuration section.
"""
lines = list()
for entry in self.data.values():
if only_used and not entry.is_used:
continue
lines.append(entry.entry_as_str(width=width, key_width=key_width, metadata=metadata))
if lines:
return f"[{self.name}]\n" + "\n".join(lines)
else:
return ""
def as_list(self) -> List[str]:
"""List of keys of entries in configuration section
Returns:
List of keys of entries in configuration section.
"""
return list(self.data.keys())
def as_dict(self, getters: Dict[str, str] = None, default_getter: str = "str") -> Dict[str, Any]:
"""The configuration section represented as a dictionary
Args:
getters: How to get the value of each entry in the section.
default_getter: How to get the value of entries not specified in getters.
Returns:
Representation of the configuration section as a dictionary.
"""
getters = dict() if getters is None else getters
getters = {k: getters.get(k, default_getter) for k in self.keys()}
return {k: getattr(e, getters[k]) for k, e in self.items()}
def __getitem__(self, key: str) -> "ConfigurationEntry":
"""Get an entry from the configuration section"""
try:
return self.data[key]
except KeyError:
raise exceptions.MissingEntryError(f"Configuration section '{self.name}' has no entry '{key}'") from None
def __getattr__(self, key: str) -> "ConfigurationEntry":
"""Get an entry from the configuration section"""
try:
return self.data[key]
except KeyError:
raise exceptions.MissingEntryError(f"Configuration section '{self.name}' has no entry '{key}'") from None
def __dir__(self) -> List[str]:
"""List attributes and entries in the configuration section"""
return list(super().__dir__()) + self.as_list()
def __str__(self) -> str:
"""The configuration section represented as a string"""
return f"[{self.name}]\n" + "\n".join(str(v) for v in self.data.values())
def __repr__(self) -> str:
"""A simple string representation of the configuration section"""
return f"{self.__class__.__name__}(name='{self.name}')"
class ConfigurationEntry:
_BOOLEAN_STATES = {
"0": False,
"1": True,
"false": False,
"true": True,
"no": False,
"yes": True,
"off": False,
"on": True,
}
def __init__(
self,
key: str,
value: Any,
*,
source: builtins.str = "",
meta: Optional[Dict[str, str]] = None,
vars_dict: Optional[ConfigVars] = None,
_used_as: Optional[Set[builtins.str]] = None,
) -> None:
self.source = source
self.meta = dict() if meta is None else meta
self._key = key
self._value = str(value)
self._vars_dict = dict() if vars_dict is None else vars_dict
self._used_as = set() if _used_as is None else _used_as
@property
def type(self) -> Optional[builtins.str]:
"""Type hint for the ConfigurationEntry"""
return self.meta.get("type", None)
@property
def help(self) -> builtins.str:
"""Help text for the ConfigurationEntry"""
return self.meta.get("help", "")
@property
def str(self) -> builtins.str:
"""Value of ConfigurationEntry as string"""
self._using("str")
return self._value
def as_str(self) -> builtins.str:
"""Value of ConfigurationEntry as string"""
return self.str
@property
def int(self) -> builtins.int:
"""Value of ConfigurationEntry converted to an integer"""
self._using("int")
try:
return int(self._value)
except ValueError:
raise ValueError(
f"Value '{self._value}' of '{self._key}' in {self.source} cannot be converted to an integer"
) from None
def as_int(self) -> builtins.int:
"""Value of ConfigurationEntry converted to an integer"""
return self.int
@property
def float(self) -> builtins.float:
"""Value of ConfigurationEntry converted to a float"""
self._using("float")
try:
return float(self._value)
except ValueError:
raise ValueError(
f"Value '{self._value}' of '{self._key}' in {self.source} cannot be converted to a float"
) from None
def as_float(self) -> builtins.float:
"""Value of ConfigurationEntry converted to a float"""
return self.float
@property
def bool(self) -> builtins.bool:
"""Value of ConfigurationEntry converted to a boolean
The conversion is done by looking up the string value of the entry in _BOOLEAN_STATES.
"""
self._using("bool")
try:
return self._BOOLEAN_STATES[self._value.lower()]
except KeyError:
raise ValueError(
f"Value '{self._value}' of '{self._key}' in {self.source} cannot be converted to a boolean"
) from None
def as_bool(self) -> builtins.bool:
"""Value of ConfigurationEntry converted to a boolean
The conversion is done by looking up the string value of the entry in _BOOLEAN_STATES.
"""
return self.bool
@property
def date(self) -> stdlib_datetime.date:
"""Value of ConfigurationEntry converted to a date object assuming format `FMT_date`"""
return self.as_date(format=FMT_date)
def as_date(self, format: builtins.str = FMT_date) -> stdlib_datetime.date:
"""Value of ConfigurationEntry converted to a date object
Args:
format (String): Format string, see strftime for information about the string.
Returns:
Date: Value of entry.
"""
self._using("date")
try:
return stdlib_datetime.datetime.strptime(self._value, format).date()
except ValueError:
raise ValueError(
f"Value '{self._value}' of '{self._key}' in {self.source} does not match the date format '{format}'"
) from None
@property
def datetime(self) -> stdlib_datetime.datetime:
"""Value of ConfigurationEntry converted to a datetime object assuming format `FMT_datetime`"""
return self.as_datetime(format=FMT_datetime)
def as_datetime(self, format: builtins.str = FMT_datetime) -> stdlib_datetime.datetime:
"""Value of ConfigurationEntry converted to a datetime object
Args:
format (String): Format string, see strftime for information about the string.
Returns:
Datetime: Value of entry.
"""
self._using("datetime")
try:
return stdlib_datetime.datetime.strptime(self._value, format)
except ValueError:
raise ValueError(
f"Value '{self._value}' of '{self._key}' in {self.source} does not match the date format '{format}'"
) from None
@property
def path(self) -> pathlib.Path:
"""Value of ConfigurationEntry interpreted as a path string"""
self._using("path")
path = self._value
if "~" in path:
path = os.path.expanduser(path)
return pathlib.Path(path)
def as_path(self) -> pathlib.Path:
"""Value of ConfigurationEntry interpreted as a path string"""
return self.path
@property
def list(self) -> List[builtins.str]:
"""Value of ConfigurationEntry converted to a list by splitting at commas and whitespace"""
self._using("list")
return self._value.replace(",", " ").split()
def as_list(
self, split_re: builtins.str = r"[\s,]", convert: Callable = builtins.str, maxsplit: builtins.int = 0
) -> List[Any]:
"""Value of ConfigurationEntry converted to a list
The entry is converted to a list by using the `split_re`-regular expression. By default the entry will be split
at commas and whitespace.
Args:
split_re: Regular expression used to split entry into list.
convert: Function used to convert each element of the list.
maxsplit: If nonzero, at most maxsplit splits occur.
Returns:
Value of entry as list.
"""
self._using("list")
return [convert(s) for s in re.split(split_re, self._value, maxsplit=maxsplit) if s]
@property
def list_of_lists(self) -> List[List[builtins.str]]:
self._using("list_of_lists")
raise NotImplementedError
def as_list_of_lists(
self,
split_res: Tuple[builtins.str, ...] = (r"[\s,]", r"[^_\w]"),
num_elements: Optional[builtins.int] = None,
convert: Callable = builtins.str,
) -> List[List[Any]]:
self._using("list_of_lists")
raise NotImplementedError
@property
def tuple(self) -> Tuple[builtins.str, ...]:
"""Value of ConfigurationEntry converted to tuple by splitting at commas and whitespace"""
self._using("tuple")
return tuple(self._value.replace(",", " ").split())
def as_tuple(
self, split_re: builtins.str = r"[\s,]", convert: Callable = builtins.str, maxsplit: builtins.int = 0
) -> Tuple[Any, ...]:
"""Value of ConfigurationEntry converted to a tuple
The entry is converted to a tuple by using the `split_re`-regular expression. By default the entry will be
split at commas and whitespace.
Args:
split_re: Regular expression used to split entry into tuple.
convert: Function used to convert each element of the tuple.
maxsplit: If nonzero, at most maxsplit splits occur.
Returns:
Value of entry as tuple.
"""
self._using("tuple")
return tuple([convert(s) for s in re.split(split_re, self._value, maxsplit=maxsplit) if s])
@property
def dict(self) -> Dict[builtins.str, builtins.str]:
"""Value of ConfigurationEntry converted to a dict"""
self._using("dict")
return dict(i.partition(":")[::2] for i in self.list)
def as_dict(
self,
item_split_re: builtins.str = r"[\s,]",
key_value_split_re: builtins.str = r"[:]",
convert: Callable = builtins.str,
maxsplit: builtins.int = 0,
) -> Dict[builtins.str, Any]:
"""Value of ConfigurationEntry converted to a dictionary
By default the dictionary is created by splitting items at commas and whitespace,
and key from value at colons.
Args:
item_split_re: Regular expression used to split entry into items.
key_value_split_re: Regular expression used to split items into keys and values.
convert: Function used to convert each value in the dictionary.
maxsplit: If nonzero, at most maxsplit splits occur when splitting entry into items.
Returns:
Value of entry as dict.
"""
self._using("dict")
items = [s for s in re.split(item_split_re, self._value, maxsplit=maxsplit) if s]
key_values = [re.split(key_value_split_re, i, maxsplit=1) for i in items]
return {k: convert(v) for k, v in key_values}
def as_enum(self, enum: builtins.str) -> enums.enum.Enum:
"""Value of ConfigurationEntry converted to an enumeration
Args:
enum (String): Name of Enum.
Returns:
Enum: Value of entry as Enum.
"""
self._using("enum")
return enums.get_value(enum, self._value)
@property
def replaced(self) -> "ConfigurationEntry":
"""Value of ConfigurationEntry with {$}-variables replaced"""
return self.replace()
def replace(self, default: Optional[builtins.str] = None, **replace_vars: builtins.str) -> "ConfigurationEntry":
replacement_vars = dict(self._vars_dict, **replace_vars)
replacement_value = self._value
replacements = list()
matches = re.findall(r"\{\$\w+\}", replacement_value)
for match in matches:
var = match.strip("${}")
replacement = str(replacement_vars.get(var, match if default is None else default))
replacements.append(f"{var}={replacement}")
replacement_value = replacement_value.replace(match, replacement)
return self.__class__(
key=self._key,
value=replacement_value,
source=self.source + " ({','.join(replacements)})",
_used_as=self._used_as,
)
@property
def is_used(self) -> builtins.bool:
return bool(self._used_as)
def entry_as_str(
self, width: Optional[builtins.int] = None, key_width: builtins.int = 30, metadata: builtins.bool = True
) -> builtins.str:
"""The configuration entry represented as a string
This is simililar to what is shown by `str(entry)` (and implemented by `__str__`), but has more flexibility.
Args:
width: Width of text for wrapping. Default is width of console.
key_width: Width of the key column. Default is 30 characters.
metadata: Include metadata like type and help text.
Returns:
String representation of the configuration entry.
"""
lines = list()
width = console.columns() if width is None else width
fill_args = dict(width=width, hanging=key_width + 3, break_long_words=False, break_on_hyphens=False)
# The entry itself
lines.append(console.fill(f"{self._key:<{key_width}} = {self._value}", **fill_args))
# Metadata, including help text and type hints
if metadata and self.meta:
for meta_key, meta_value in self.meta.items():
if meta_value is None:
lines.append(console.fill(f"{self._key}:{meta_key}", **fill_args))
else:
lines.append(console.fill(f"{f'{self._key}:{meta_key}':<{key_width}} = {meta_value}", **fill_args))
lines.append("")
return "\n".join(lines)
def _using(self, as_type: builtins.str) -> None:
"""Register that entry is used as a type
Args:
as_type: Name of type entry is used as.
"""
self._used_as.add(as_type)
def __add__(self, other: "ConfigurationEntry") -> "ConfigurationEntry":
if isinstance(other, self.__class__):
if self.source == other.source:
source = f"{self.source} (+)"
else:
source = f"{self.source} + {other.source}"
return self.__class__(
key=f"{self._key} + {other._key}", value=self.str + other.str, source=source, vars_dict=self._vars_dict
)
else:
return NotImplemented
def __bool__(self) -> builtins.bool:
"""A ConfigurationEntry is truthy if the value is not empty"""
return bool(self._value)
def __str__(self) -> builtins.str:
"""The configuration entry represented as a string"""
return self.entry_as_str()
def __repr__(self) -> builtins.str:
"""A simple string representation of the configuration entry"""
return f"{self.__class__.__name__}(key='{self._key}', value='{self._value}')"
| [
"[email protected]"
]
| |
eaddb2f9ccb5d75e4fedfba55196b323ca4778ac | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/beer-song/58a6d9e0b5c9402b8236f1b6d329c093.py | 6c7d5a0ab15761501e144b12e4ed2ee1df7d5554 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 714 | py | def verse(number):
it = lambda n: "it" if n == 1 else "one"
suffix = lambda n: "no more bottles" if n < 1 else \
"1 bottle" if n < 2 else str(n) + " bottles"
if number == 0:
return "No more bottles of beer on the wall, no more bottles of beer.\n" \
"Go to the store and buy some more, 99 bottles of beer on the wall.\n"
verse = "{0} of beer on the wall, {0} of beer.\n" \
"Take {2} down and pass it around, {1} of beer on the wall.\n"
return verse.format(suffix(number), suffix(number-1), it(number))
def song(start, end=0):
return "\n".join([verse(number)
for number in range(start, end - 1, -1)]) + "\n"
| [
"[email protected]"
]
| |
9ded9a3c3c4a360078096f349f3240a33087ae0c | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Vulkan/ImageCreateFlags.py | abb32be5019fc6f30604e199e47ef6e098a03eb5 | []
| no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,209 | py | # encoding: utf-8
# module gi.repository.Vulkan
# from /usr/lib64/girepository-1.0/Vulkan-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
class ImageCreateFlags(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(ImageCreateFlags), '__module__': 'gi.repository.Vulkan', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'ImageCreateFlags' objects>, '__weakref__': <attribute '__weakref__' of 'ImageCreateFlags' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(ImageCreateFlags)
| [
"[email protected]"
]
| |
3c0685a1924a7506375a295827526e4a6a28518b | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba0370.pngMap.py | f0078bf8408f0c2539214b78db3e6bea6445619a | []
| no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba0370.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111001111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100011111111111111111111111111111111111111111111111111111111001111110',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111011111111001111111',
'11111111011111110011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111011111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111001111111111111111',
'11111111111111111111111111111111111111111001111111111111111111111111111111111111111111111111111111111111011111111111111111111111',
'11111111111111111111111111111111111111111001000011111111111111111111110011111111111101111111111111111110101111111111111111111111',
'11111111111111111111111111111111111111111111101100111111111011111111111111110111111111001111111111111111101111111011111111111111',
'11111111111111111111111111111111111110111111111110111111110110111111111111111111111111111111111111111111111111111101111111111111',
'11111111111111111111111111111111111010001111111111111111111000000010000000111111111111111111111111111111111111111000001111111111',
'11111111101111111111111111111111111111111111111111111111100100000000000001111111111111111111111111111111111111111100101111111111',
'11100111101111111111111111111111111111111111111111111111100000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111100111111111111111111111111111111111110000000000000000001111111111111111111111111111111111111111111111111110',
'11111111111111110111111111111111111101111111111111110100111001000000000000011111111111111111111100111111111111111111111111111111',
'11111111100001111100111111111111111111111111111111110011111111000000000000000111111111111111111111111111111111111111111110111111',
'11111111100011111111111111111111111101111110011111111111111100000000000000000000111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111011111111111111100000000000000000000111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111100000000000000001111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111000000000000100011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111010000000000000001111111111111111011111111111111111111111111111111',
'01111111111111111111111111111111111111111111111111111111111111110000000000000000011111111111111011111111111111111111111111111111',
'10011111111111111111111111111111111111111111111111111111110111010001000000000000000000111111111111111111111111111111111111111111',
'11111111111011111111111111111111111111111111111111111111101111000010000000000000000010110000000101111111111110011111111111111111',
'11111111111111111111111111111101111100000000000001000001110000001000000000000000000000000000000010001111111110111111111111111111',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111',
'11111111111111111111111111111111111111101110000000000000000000000000000000000000000000000000000000000011111111111111111111111111',
'11111111111111111111111111111111111111111011000000000000000000000000000000000000000000000000000000000111111111111111111111110011',
'11111111111111111111111111111111111111111111111111111100111111110000111000000011000000000000000000000111111111111111111111111111',
'11111111111111111111111110001111111111111111111111110011111111111111110000000011000010000000000000000111111111111111111111111111',
'11111111111111111111111111111111111111110111111111111111111111111111111100001111111111111100000001111111111111111111111111111110',
'11111111111011111111111111111111111111110111111110111111111111111111111100001111111111111100011011111111111111111111111111111111',
'11111111111100111111111111111111111111111111111110001111111111111111111111111111111111110111110111111111111111111111111111111111',
'11111111111110111111111111111111001111111111111111100111111111111111111111111111111111110111111111111111111111001111111111111110',
'11111111111110011111111111111111011111110011111111110111111111111111111111110001111111111111111111111111111111001111111111111111',
'11111111111111111111111111111111111111111111111111111111111111110011111111110111111111111111111111111111111111001111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111001111110111111111111111111111111111111111111111111111111111111',
'00111111111111111111111111111111111111111111111111111111111111111111111001111111111111110011111111111111111111111111111101111111',
'00011111111111111111111111111111111111111111111111111111111111111111111110111111111111110011111111111111111111111111111000011110',
'11111111111111111111111111111111111011111111111111111111011111111111111011111111111111111111111111111111111111111111111111111111',
'11111111111111111011111111111111111101011111111111111110011111111110110111111100111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111000111110111111111111111111111111111111111111111111111111111',
'11111111111111001111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111101111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111101111111111110111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111011111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111011111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111110',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111011111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111110111111111111111111111111111101111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111011111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
]
| [
"[email protected]"
]
| |
a55b52f2531f32ec37a7670048412895cfa27205 | dca653bb975528bd1b8ab2547f6ef4f48e15b7b7 | /tags/wxPy-2.8.7.1/wxPython/demo/Main.py | fea307faed9cebc60ab5a6e85e0b46aec0ffebfe | []
| no_license | czxxjtu/wxPython-1 | 51ca2f62ff6c01722e50742d1813f4be378c0517 | 6a7473c258ea4105f44e31d140ea5c0ae6bc46d8 | refs/heads/master | 2021-01-15T12:09:59.328778 | 2015-01-05T20:55:10 | 2015-01-05T20:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78,803 | py | #!/bin/env python
#----------------------------------------------------------------------------
# Name: Main.py
# Purpose: Testing lots of stuff, controls, window types, etc.
#
# Author: Robin Dunn
#
# Created: A long time ago, in a galaxy far, far away...
# RCS-ID: $Id$
# Copyright: (c) 1999 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------------
# FIXME List:
# * Problems with flickering related to ERASE_BACKGROUND
# and the splitters. Might be a problem with this 2.5 beta...?
# UPDATE: can't see on 2.5.2 GTK - maybe just a faster machine :)
# * Demo Code menu?
# * Annoying switching between tabs and resulting flicker
# how to replace a page in the notebook without deleting/adding?
# Where is SetPage!? tried freeze...tried reparent of dummy panel....
# AG: It looks like this issue is fixed by Freeze()ing and Thaw()ing the
# main frame and not the notebook
# TODO List:
# * UI design more professional (is the new version more professional?)
# * save file positions (new field in demoModules) (@ LoadDemoSource)
# * Update main overview
# * Why don't we move _treeList into a separate module
import sys, os, time, traceback, types
import wx # This module uses the new wx namespace
import wx.aui
import wx.html
import images
# For debugging
##wx.Trap();
##print "wx.VERSION_STRING = %s (%s)" % (wx.VERSION_STRING, wx.USE_UNICODE and 'unicode' or 'ansi')
##print "pid:", os.getpid()
##raw_input("Press Enter...")
#---------------------------------------------------------------------------
USE_CUSTOMTREECTRL = False
ALLOW_AUI_FLOATING = False
DEFAULT_PERSPECTIVE = "Default Perspective"
#---------------------------------------------------------------------------
_demoPngs = ["overview", "recent", "frame", "dialog", "moredialog", "core",
"book", "customcontrol", "morecontrols", "layout", "process", "clipboard",
"images", "miscellaneous"]
_treeList = [
# new stuff
('Recent Additions/Updates', [
'RichTextCtrl',
'Treebook',
'Toolbook',
'BitmapFromBuffer',
'RawBitmapAccess',
'DragScroller',
'DelayedResult',
'ExpandoTextCtrl',
'ButtonPanel',
'FlatNotebook',
'CustomTreeCtrl',
'AboutBox',
'AlphaDrawing',
'GraphicsContext',
'CollapsiblePane',
'ComboCtrl',
'OwnerDrawnComboBox',
'BitmapComboBox',
'I18N',
'Img2PyArtProvider',
'SearchCtrl',
'SizedControls',
'AUI_MDI',
'TreeMixin',
'AdjustChannels',
'RendererNative',
]),
# managed windows == things with a (optional) caption you can close
('Frames and Dialogs', [
'AUI_DockingWindowMgr',
'AUI_MDI',
'Dialog',
'Frame',
'MDIWindows',
'MiniFrame',
'Wizard',
]),
# the common dialogs
('Common Dialogs', [
'AboutBox',
'ColourDialog',
'DirDialog',
'FileDialog',
'FindReplaceDialog',
'FontDialog',
'MessageDialog',
'MultiChoiceDialog',
'PageSetupDialog',
'PrintDialog',
'ProgressDialog',
'SingleChoiceDialog',
'TextEntryDialog',
]),
# dialogs from libraries
('More Dialogs', [
'ImageBrowser',
'ScrolledMessageDialog',
]),
# core controls
('Core Windows/Controls', [
'BitmapButton',
'Button',
'CheckBox',
'CheckListBox',
'Choice',
'ComboBox',
'Gauge',
'Grid',
'Grid_MegaExample',
'ListBox',
'ListCtrl',
'ListCtrl_virtual',
'ListCtrl_edit',
'Menu',
'PopupMenu',
'PopupWindow',
'RadioBox',
'RadioButton',
'SashWindow',
'ScrolledWindow',
'SearchCtrl',
'Slider',
'SpinButton',
'SpinCtrl',
'SplitterWindow',
'StaticBitmap',
'StaticBox',
'StaticText',
'StatusBar',
'StockButtons',
'TextCtrl',
'ToggleButton',
'ToolBar',
'TreeCtrl',
'Validator',
]),
('"Book" Controls', [
'AUI_Notebook',
'Choicebook',
'Listbook',
'Notebook',
'Toolbook',
'Treebook',
]),
('Custom Controls', [
'AnalogClock',
'ButtonPanel',
'ColourSelect',
'ComboTreeBox',
'CustomTreeCtrl',
'Editor',
'FlatNotebook',
'GenericButtons',
'GenericDirCtrl',
'LEDNumberCtrl',
'MultiSash',
'PopupControl',
'PyColourChooser',
'TreeListCtrl',
]),
# controls coming from other libraries
('More Windows/Controls', [
'ActiveX_FlashWindow',
'ActiveX_IEHtmlWindow',
'ActiveX_PDFWindow',
'BitmapComboBox',
'Calendar',
'CalendarCtrl',
'CheckListCtrlMixin',
'CollapsiblePane',
'ComboCtrl',
'ContextHelp',
'DatePickerCtrl',
'DynamicSashWindow',
'EditableListBox',
'ExpandoTextCtrl',
'FancyText',
'FileBrowseButton',
'FloatBar',
'FloatCanvas',
'FoldPanelBar',
'HtmlWindow',
'HyperLinkCtrl',
'IntCtrl',
'MVCTree',
'MaskedEditControls',
'MaskedNumCtrl',
'MediaCtrl',
'MultiSplitterWindow',
'OwnerDrawnComboBox',
'Pickers',
'PyCrust',
'PyPlot',
'PyShell',
'RichTextCtrl',
'ScrolledPanel',
'SplitTree',
'StyledTextCtrl_1',
'StyledTextCtrl_2',
'TablePrint',
'Throbber',
'Ticker',
'TimeCtrl',
'TreeMixin',
'VListBox',
]),
# How to lay out the controls in a frame/dialog
('Window Layout', [
'GridBagSizer',
'LayoutAnchors',
'LayoutConstraints',
'Layoutf',
'RowColSizer',
'ScrolledPanel',
'SizedControls',
'Sizers',
'XmlResource',
'XmlResourceHandler',
'XmlResourceSubclass',
]),
# ditto
('Process and Events', [
'DelayedResult',
'EventManager',
'KeyEvents',
'Process',
'PythonEvents',
'Threads',
'Timer',
##'infoframe', # needs better explanation and some fixing
]),
# Clipboard and DnD
('Clipboard and DnD', [
'CustomDragAndDrop',
'DragAndDrop',
'URLDragAndDrop',
]),
# Images
('Using Images', [
'AdjustChannels',
'AlphaDrawing',
'AnimateCtrl',
'ArtProvider',
'BitmapFromBuffer',
'Cursor',
'DragImage',
'Image',
'ImageAlpha',
'ImageFromStream',
'Img2PyArtProvider',
'Mask',
'RawBitmapAccess',
'Throbber',
]),
# Other stuff
('Miscellaneous', [
'AlphaDrawing',
'ColourDB',
##'DialogUnits', # needs more explanations
'DragScroller',
'DrawXXXList',
'FileHistory',
'FontEnumerator',
'GraphicsContext',
'GLCanvas',
'I18N',
'Joystick',
'MimeTypesManager',
'MouseGestures',
'OGL',
'PrintFramework',
'PseudoDC',
'RendererNative',
'ShapedWindow',
'Sound',
'StandardPaths',
'Unicode',
]),
('Check out the samples dir too', [
]),
]
#---------------------------------------------------------------------------
# Show how to derive a custom wxLog class
class MyLog(wx.PyLog):
def __init__(self, textCtrl, logTime=0):
wx.PyLog.__init__(self)
self.tc = textCtrl
self.logTime = logTime
def DoLogString(self, message, timeStamp):
#print message, timeStamp
#if self.logTime:
# message = time.strftime("%X", time.localtime(timeStamp)) + \
# ": " + message
if self.tc:
self.tc.AppendText(message + '\n')
class MyTP(wx.PyTipProvider):
def GetTip(self):
return "This is my tip"
#---------------------------------------------------------------------------
# A class to be used to simply display a message in the demo pane
# rather than running the sample itself.
class MessagePanel(wx.Panel):
def __init__(self, parent, message, caption='', flags=0):
wx.Panel.__init__(self, parent)
# Make widgets
if flags:
artid = None
if flags & wx.ICON_EXCLAMATION:
artid = wx.ART_WARNING
elif flags & wx.ICON_ERROR:
artid = wx.ART_ERROR
elif flags & wx.ICON_QUESTION:
artid = wx.ART_QUESTION
elif flags & wx.ICON_INFORMATION:
artid = wx.ART_INFORMATION
if artid is not None:
bmp = wx.ArtProvider.GetBitmap(artid, wx.ART_MESSAGE_BOX, (32,32))
icon = wx.StaticBitmap(self, -1, bmp)
else:
icon = (32,32) # make a spacer instead
if caption:
caption = wx.StaticText(self, -1, caption)
caption.SetFont(wx.Font(28, wx.SWISS, wx.NORMAL, wx.BOLD))
message = wx.StaticText(self, -1, message)
# add to sizers for layout
tbox = wx.BoxSizer(wx.VERTICAL)
if caption:
tbox.Add(caption)
tbox.Add((10,10))
tbox.Add(message)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((10,10), 1)
hbox.Add(icon)
hbox.Add((10,10))
hbox.Add(tbox)
hbox.Add((10,10), 1)
box = wx.BoxSizer(wx.VERTICAL)
box.Add((10,10), 1)
box.Add(hbox, 0, wx.EXPAND)
box.Add((10,10), 2)
self.SetSizer(box)
self.Fit()
#---------------------------------------------------------------------------
# A class to be used to display source code in the demo. Try using the
# wxSTC in the StyledTextCtrl_2 sample first, fall back to wxTextCtrl
# if there is an error, such as the stc module not being present.
#
try:
##raise ImportError # for testing the alternate implementation
from wx import stc
from StyledTextCtrl_2 import PythonSTC
class DemoCodeEditor(PythonSTC):
def __init__(self, parent):
PythonSTC.__init__(self, parent, -1, style=wx.BORDER_NONE)
self.SetUpEditor()
# Some methods to make it compatible with how the wxTextCtrl is used
def SetValue(self, value):
if wx.USE_UNICODE:
value = value.decode('iso8859_1')
self.SetText(value)
self.EmptyUndoBuffer()
self.SetSavePoint()
def IsModified(self):
return self.GetModify()
def Clear(self):
self.ClearAll()
def SetInsertionPoint(self, pos):
self.SetCurrentPos(pos)
self.SetAnchor(pos)
def ShowPosition(self, pos):
line = self.LineFromPosition(pos)
#self.EnsureVisible(line)
self.GotoLine(line)
def GetLastPosition(self):
return self.GetLength()
def GetPositionFromLine(self, line):
return self.PositionFromLine(line)
def GetRange(self, start, end):
return self.GetTextRange(start, end)
def GetSelection(self):
return self.GetAnchor(), self.GetCurrentPos()
def SetSelection(self, start, end):
self.SetSelectionStart(start)
self.SetSelectionEnd(end)
def SelectLine(self, line):
start = self.PositionFromLine(line)
end = self.GetLineEndPosition(line)
self.SetSelection(start, end)
def SetUpEditor(self):
"""
This method carries out the work of setting up the demo editor.
It's seperate so as not to clutter up the init code.
"""
import keyword
self.SetLexer(stc.STC_LEX_PYTHON)
self.SetKeyWords(0, " ".join(keyword.kwlist))
# Enable folding
self.SetProperty("fold", "1" )
# Highlight tab/space mixing (shouldn't be any)
self.SetProperty("tab.timmy.whinge.level", "1")
# Set left and right margins
self.SetMargins(2,2)
# Set up the numbers in the margin for margin #1
self.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
# Reasonable value for, say, 4-5 digits using a mono font (40 pix)
self.SetMarginWidth(1, 40)
# Indentation and tab stuff
self.SetIndent(4) # Proscribed indent size for wx
self.SetIndentationGuides(True) # Show indent guides
self.SetBackSpaceUnIndents(True)# Backspace unindents rather than delete 1 space
self.SetTabIndents(True) # Tab key indents
self.SetTabWidth(4) # Proscribed tab size for wx
self.SetUseTabs(False) # Use spaces rather than tabs, or
# TabTimmy will complain!
# White space
self.SetViewWhiteSpace(False) # Don't view white space
# EOL: Since we are loading/saving ourselves, and the
# strings will always have \n's in them, set the STC to
# edit them that way.
self.SetEOLMode(wx.stc.STC_EOL_LF)
self.SetViewEOL(False)
# No right-edge mode indicator
self.SetEdgeMode(stc.STC_EDGE_NONE)
# Setup a margin to hold fold markers
self.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(2, stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(2, True)
self.SetMarginWidth(2, 12)
# and now set up the fold markers
self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "black")
self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "black")
# Global default style
if wx.Platform == '__WXMSW__':
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier New,size:9')
elif wx.Platform == '__WXMAC__':
# TODO: if this looks fine on Linux too, remove the Mac-specific case
# and use this whenever OS != MSW.
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Monaco')
else:
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier,size:9')
# Clear styles and revert to default.
self.StyleClearAll()
# Following style specs only indicate differences from default.
# The rest remains unchanged.
# Line numbers in margin
self.StyleSetSpec(wx.stc.STC_STYLE_LINENUMBER,'fore:#000000,back:#99A9C2')
# Highlighted brace
self.StyleSetSpec(wx.stc.STC_STYLE_BRACELIGHT,'fore:#00009D,back:#FFFF00')
# Unmatched brace
self.StyleSetSpec(wx.stc.STC_STYLE_BRACEBAD,'fore:#00009D,back:#FF0000')
# Indentation guide
self.StyleSetSpec(wx.stc.STC_STYLE_INDENTGUIDE, "fore:#CDCDCD")
# Python styles
self.StyleSetSpec(wx.stc.STC_P_DEFAULT, 'fore:#000000')
# Comments
self.StyleSetSpec(wx.stc.STC_P_COMMENTLINE, 'fore:#008000,back:#F0FFF0')
self.StyleSetSpec(wx.stc.STC_P_COMMENTBLOCK, 'fore:#008000,back:#F0FFF0')
# Numbers
self.StyleSetSpec(wx.stc.STC_P_NUMBER, 'fore:#008080')
# Strings and characters
self.StyleSetSpec(wx.stc.STC_P_STRING, 'fore:#800080')
self.StyleSetSpec(wx.stc.STC_P_CHARACTER, 'fore:#800080')
# Keywords
self.StyleSetSpec(wx.stc.STC_P_WORD, 'fore:#000080,bold')
# Triple quotes
self.StyleSetSpec(wx.stc.STC_P_TRIPLE, 'fore:#800080,back:#FFFFEA')
self.StyleSetSpec(wx.stc.STC_P_TRIPLEDOUBLE, 'fore:#800080,back:#FFFFEA')
# Class names
self.StyleSetSpec(wx.stc.STC_P_CLASSNAME, 'fore:#0000FF,bold')
# Function names
self.StyleSetSpec(wx.stc.STC_P_DEFNAME, 'fore:#008080,bold')
# Operators
self.StyleSetSpec(wx.stc.STC_P_OPERATOR, 'fore:#800000,bold')
# Identifiers. I leave this as not bold because everything seems
# to be an identifier if it doesn't match the above criterae
self.StyleSetSpec(wx.stc.STC_P_IDENTIFIER, 'fore:#000000')
# Caret color
self.SetCaretForeground("BLUE")
# Selection background
self.SetSelBackground(1, '#66CCFF')
self.SetSelBackground(True, wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT))
self.SetSelForeground(True, wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
def RegisterModifiedEvent(self, eventHandler):
self.Bind(wx.stc.EVT_STC_CHANGE, eventHandler)
except ImportError:
class DemoCodeEditor(wx.TextCtrl):
def __init__(self, parent):
wx.TextCtrl.__init__(self, parent, -1, style =
wx.TE_MULTILINE | wx.HSCROLL | wx.TE_RICH2 | wx.TE_NOHIDESEL)
def RegisterModifiedEvent(self, eventHandler):
self.Bind(wx.EVT_TEXT, eventHandler)
def SetReadOnly(self, flag):
self.SetEditable(not flag)
# NOTE: STC already has this method
def GetText(self):
return self.GetValue()
def GetPositionFromLine(self, line):
return self.XYToPosition(0,line)
def GotoLine(self, line):
pos = self.GetPositionFromLine(line)
self.SetInsertionPoint(pos)
self.ShowPosition(pos)
def SelectLine(self, line):
start = self.GetPositionFromLine(line)
end = start + self.GetLineLength(line)
self.SetSelection(start, end)
#---------------------------------------------------------------------------
# Constants for module versions
modOriginal = 0
modModified = 1
modDefault = modOriginal
#---------------------------------------------------------------------------
class DemoCodePanel(wx.Panel):
"""Panel for the 'Demo Code' tab"""
def __init__(self, parent, mainFrame):
wx.Panel.__init__(self, parent, size=(1,1))
if 'wxMSW' in wx.PlatformInfo:
self.Hide()
self.mainFrame = mainFrame
self.editor = DemoCodeEditor(self)
self.editor.RegisterModifiedEvent(self.OnCodeModified)
self.btnSave = wx.Button(self, -1, "Save Changes")
self.btnRestore = wx.Button(self, -1, "Delete Modified")
self.btnSave.Enable(False)
self.btnSave.Bind(wx.EVT_BUTTON, self.OnSave)
self.btnRestore.Bind(wx.EVT_BUTTON, self.OnRestore)
self.radioButtons = { modOriginal: wx.RadioButton(self, -1, "Original", style = wx.RB_GROUP),
modModified: wx.RadioButton(self, -1, "Modified") }
self.controlBox = wx.BoxSizer(wx.HORIZONTAL)
self.controlBox.Add(wx.StaticText(self, -1, "Active Version:"), 0,
wx.RIGHT | wx.LEFT | wx.ALIGN_CENTER_VERTICAL, 5)
for modID, radioButton in self.radioButtons.items():
self.controlBox.Add(radioButton, 0, wx.EXPAND | wx.RIGHT, 5)
radioButton.modID = modID # makes it easier for the event handler
radioButton.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButton)
self.controlBox.Add(self.btnSave, 0, wx.RIGHT, 5)
self.controlBox.Add(self.btnRestore, 0)
self.box = wx.BoxSizer(wx.VERTICAL)
self.box.Add(self.controlBox, 0, wx.EXPAND)
self.box.Add(wx.StaticLine(self), 0, wx.EXPAND)
self.box.Add(self.editor, 1, wx.EXPAND)
self.box.Fit(self)
self.SetSizer(self.box)
# Loads a demo from a DemoModules object
def LoadDemo(self, demoModules):
self.demoModules = demoModules
if (modDefault == modModified) and demoModules.Exists(modModified):
demoModules.SetActive(modModified)
else:
demoModules.SetActive(modOriginal)
self.radioButtons[demoModules.GetActiveID()].Enable(True)
self.ActiveModuleChanged()
def ActiveModuleChanged(self):
self.LoadDemoSource(self.demoModules.GetSource())
self.UpdateControlState()
self.mainFrame.pnl.Freeze()
self.ReloadDemo()
self.mainFrame.pnl.Thaw()
def LoadDemoSource(self, source):
self.editor.Clear()
self.editor.SetValue(source)
self.JumpToLine(0)
self.btnSave.Enable(False)
def JumpToLine(self, line, highlight=False):
self.editor.GotoLine(line)
self.editor.SetFocus()
if highlight:
self.editor.SelectLine(line)
def UpdateControlState(self):
active = self.demoModules.GetActiveID()
# Update the radio/restore buttons
for moduleID in self.radioButtons:
btn = self.radioButtons[moduleID]
if moduleID == active:
btn.SetValue(True)
else:
btn.SetValue(False)
if self.demoModules.Exists(moduleID):
btn.Enable(True)
if moduleID == modModified:
self.btnRestore.Enable(True)
else:
btn.Enable(False)
if moduleID == modModified:
self.btnRestore.Enable(False)
def OnRadioButton(self, event):
radioSelected = event.GetEventObject()
modSelected = radioSelected.modID
if modSelected != self.demoModules.GetActiveID():
busy = wx.BusyInfo("Reloading demo module...")
self.demoModules.SetActive(modSelected)
self.ActiveModuleChanged()
def ReloadDemo(self):
if self.demoModules.name != __name__:
self.mainFrame.RunModule()
def OnCodeModified(self, event):
self.btnSave.Enable(self.editor.IsModified())
def OnSave(self, event):
if self.demoModules.Exists(modModified):
if self.demoModules.GetActiveID() == modOriginal:
overwriteMsg = "You are about to overwrite an already existing modified copy\n" + \
"Do you want to continue?"
dlg = wx.MessageDialog(self, overwriteMsg, "wxPython Demo",
wx.YES_NO | wx.NO_DEFAULT| wx.ICON_EXCLAMATION)
result = dlg.ShowModal()
if result == wx.ID_NO:
return
dlg.Destroy()
self.demoModules.SetActive(modModified)
modifiedFilename = GetModifiedFilename(self.demoModules.name)
# Create the demo directory if one doesn't already exist
if not os.path.exists(GetModifiedDirectory()):
try:
os.makedirs(GetModifiedDirectory())
if not os.path.exists(GetModifiedDirectory()):
wx.LogMessage("BUG: Created demo directory but it still doesn't exist")
raise AssertionError
except:
wx.LogMessage("Error creating demo directory: %s" % GetModifiedDirectory())
return
else:
wx.LogMessage("Created directory for modified demos: %s" % GetModifiedDirectory())
# Save
f = open(modifiedFilename, "wt")
source = self.editor.GetText()
try:
f.write(source)
finally:
f.close()
busy = wx.BusyInfo("Reloading demo module...")
self.demoModules.LoadFromFile(modModified, modifiedFilename)
self.ActiveModuleChanged()
self.mainFrame.SetTreeModified(True)
def OnRestore(self, event): # Handles the "Delete Modified" button
modifiedFilename = GetModifiedFilename(self.demoModules.name)
self.demoModules.Delete(modModified)
os.unlink(modifiedFilename) # Delete the modified copy
busy = wx.BusyInfo("Reloading demo module...")
self.ActiveModuleChanged()
self.mainFrame.SetTreeModified(False)
#---------------------------------------------------------------------------
def opj(path):
"""Convert paths to the platform-specific separator"""
st = apply(os.path.join, tuple(path.split('/')))
# HACK: on Linux, a leading / gets lost...
if path.startswith('/'):
st = '/' + st
return st
def GetDataDir():
"""
Return the standard location on this platform for application data
"""
sp = wx.StandardPaths.Get()
return sp.GetUserDataDir()
def GetModifiedDirectory():
"""
Returns the directory where modified versions of the demo files
are stored
"""
return os.path.join(GetDataDir(), "modified")
def GetModifiedFilename(name):
"""
Returns the filename of the modified version of the specified demo
"""
if not name.endswith(".py"):
name = name + ".py"
return os.path.join(GetModifiedDirectory(), name)
def GetOriginalFilename(name):
"""
Returns the filename of the original version of the specified demo
"""
if not name.endswith(".py"):
name = name + ".py"
return name
def DoesModifiedExist(name):
"""Returns whether the specified demo has a modified copy"""
if os.path.exists(GetModifiedFilename(name)):
return True
else:
return False
def GetConfig():
if not os.path.exists(GetDataDir()):
os.makedirs(GetDataDir())
config = wx.FileConfig(
localFilename=os.path.join(GetDataDir(), "options"))
return config
def SearchDemo(name, keyword):
""" Returns whether a demo contains the search keyword or not. """
fid = open(GetOriginalFilename(name), "rt")
fullText = fid.read()
fid.close()
if type(keyword) is unicode:
fullText = fullText.decode('iso8859-1')
if fullText.find(keyword) >= 0:
return True
return False
#---------------------------------------------------------------------------
class ModuleDictWrapper:
"""Emulates a module with a dynamically compiled __dict__"""
def __init__(self, dict):
self.dict = dict
def __getattr__(self, name):
if name in self.dict:
return self.dict[name]
else:
raise AttributeError
class DemoModules:
"""
Dynamically manages the original/modified versions of a demo
module
"""
def __init__(self, name):
self.modActive = -1
self.name = name
# (dict , source , filename , description , error information )
# ( 0 , 1 , 2 , 3 , 4 )
self.modules = [[None, "" , "" , "<original>" , None],
[None, "" , "" , "<modified>" , None]]
# load original module
self.LoadFromFile(modOriginal, GetOriginalFilename(name))
self.SetActive(modOriginal)
# load modified module (if one exists)
if DoesModifiedExist(name):
self.LoadFromFile(modModified, GetModifiedFilename(name))
def LoadFromFile(self, modID, filename):
self.modules[modID][2] = filename
file = open(filename, "rt")
self.LoadFromSource(modID, file.read())
file.close()
def LoadFromSource(self, modID, source):
self.modules[modID][1] = source
self.LoadDict(modID)
def LoadDict(self, modID):
if self.name != __name__:
source = self.modules[modID][1]
description = self.modules[modID][2]
description = description.encode(sys.getfilesystemencoding())
try:
self.modules[modID][0] = {}
code = compile(source, description, "exec")
exec code in self.modules[modID][0]
except:
self.modules[modID][4] = DemoError(sys.exc_info())
self.modules[modID][0] = None
else:
self.modules[modID][4] = None
def SetActive(self, modID):
if modID != modOriginal and modID != modModified:
raise LookupError
else:
self.modActive = modID
def GetActive(self):
dict = self.modules[self.modActive][0]
if dict is None:
return None
else:
return ModuleDictWrapper(dict)
def GetActiveID(self):
return self.modActive
def GetSource(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[modID][1]
def GetFilename(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[self.modActive][2]
def GetErrorInfo(self, modID = None):
if modID is None:
modID = self.modActive
return self.modules[self.modActive][4]
def Exists(self, modID):
return self.modules[modID][1] != ""
def UpdateFile(self, modID = None):
"""Updates the file from which a module was loaded
with (possibly updated) source"""
if modID is None:
modID = self.modActive
source = self.modules[modID][1]
filename = self.modules[modID][2]
try:
file = open(filename, "wt")
file.write(source)
finally:
file.close()
def Delete(self, modID):
if self.modActive == modID:
self.SetActive(0)
self.modules[modID][0] = None
self.modules[modID][1] = ""
self.modules[modID][2] = ""
#---------------------------------------------------------------------------
class DemoError:
"""Wraps and stores information about the current exception"""
def __init__(self, exc_info):
import copy
excType, excValue = exc_info[:2]
# traceback list entries: (filename, line number, function name, text)
self.traceback = traceback.extract_tb(exc_info[2])
# --Based on traceback.py::format_exception_only()--
if type(excType) == types.ClassType:
self.exception_type = excType.__name__
else:
self.exception_type = excType
# If it's a syntax error, extra information needs
# to be added to the traceback
if excType is SyntaxError:
try:
msg, (filename, lineno, self.offset, line) = excValue
except:
pass
else:
if not filename:
filename = "<string>"
line = line.strip()
self.traceback.append( (filename, lineno, "", line) )
excValue = msg
try:
self.exception_details = str(excValue)
except:
self.exception_details = "<unprintable %s object>" & type(excValue).__name__
del exc_info
def __str__(self):
ret = "Type %s \n \
Traceback: %s \n \
Details : %s" % ( str(self.exception_type), str(self.traceback), self.exception_details )
return ret
#---------------------------------------------------------------------------
class DemoErrorPanel(wx.Panel):
"""Panel put into the demo tab when the demo fails to run due to errors"""
def __init__(self, parent, codePanel, demoError, log):
wx.Panel.__init__(self, parent, -1)#, style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.codePanel = codePanel
self.nb = parent
self.log = log
self.box = wx.BoxSizer(wx.VERTICAL)
# Main Label
self.box.Add(wx.StaticText(self, -1, "An error has occurred while trying to run the demo")
, 0, wx.ALIGN_CENTER | wx.TOP, 10)
# Exception Information
boxInfo = wx.StaticBox(self, -1, "Exception Info" )
boxInfoSizer = wx.StaticBoxSizer(boxInfo, wx.VERTICAL ) # Used to center the grid within the box
boxInfoGrid = wx.FlexGridSizer(0, 2, 0, 0)
textFlags = wx.ALIGN_RIGHT | wx.LEFT | wx.RIGHT | wx.TOP
boxInfoGrid.Add(wx.StaticText(self, -1, "Type: "), 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, str(demoError.exception_type)) , 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, "Details: ") , 0, textFlags, 5 )
boxInfoGrid.Add(wx.StaticText(self, -1, demoError.exception_details) , 0, textFlags, 5 )
boxInfoSizer.Add(boxInfoGrid, 0, wx.ALIGN_CENTRE | wx.ALL, 5 )
self.box.Add(boxInfoSizer, 0, wx.ALIGN_CENTER | wx.ALL, 5)
# Set up the traceback list
# This one automatically resizes last column to take up remaining space
from ListCtrl import TestListCtrl
self.list = TestListCtrl(self, -1, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.list.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.list.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected)
self.list.InsertColumn(0, "Filename")
self.list.InsertColumn(1, "Line", wx.LIST_FORMAT_RIGHT)
self.list.InsertColumn(2, "Function")
self.list.InsertColumn(3, "Code")
self.InsertTraceback(self.list, demoError.traceback)
self.list.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.list.SetColumnWidth(2, wx.LIST_AUTOSIZE)
self.box.Add(wx.StaticText(self, -1, "Traceback:")
, 0, wx.ALIGN_CENTER | wx.TOP, 5)
self.box.Add(self.list, 1, wx.GROW | wx.ALIGN_CENTER | wx.ALL, 5)
self.box.Add(wx.StaticText(self, -1, "Entries from the demo module are shown in blue\n"
+ "Double-click on them to go to the offending line")
, 0, wx.ALIGN_CENTER | wx.BOTTOM, 5)
self.box.Fit(self)
self.SetSizer(self.box)
def InsertTraceback(self, list, traceback):
#Add the traceback data
for x in range(len(traceback)):
data = traceback[x]
list.InsertStringItem(x, os.path.basename(data[0])) # Filename
list.SetStringItem(x, 1, str(data[1])) # Line
list.SetStringItem(x, 2, str(data[2])) # Function
list.SetStringItem(x, 3, str(data[3])) # Code
# Check whether this entry is from the demo module
if data[0] == "<original>" or data[0] == "<modified>": # FIXME: make more generalised
self.list.SetItemData(x, int(data[1])) # Store line number for easy access
# Give it a blue colour
item = self.list.GetItem(x)
item.SetTextColour(wx.BLUE)
self.list.SetItem(item)
else:
self.list.SetItemData(x, -1) # Editor can't jump into this one's code
def OnItemSelected(self, event):
# This occurs before OnDoubleClick and can be used to set the
# currentItem. OnDoubleClick doesn't get a wxListEvent....
self.currentItem = event.m_itemIndex
event.Skip()
def OnDoubleClick(self, event):
# If double-clicking on a demo's entry, jump to the line number
line = self.list.GetItemData(self.currentItem)
if line != -1:
self.nb.SetSelection(1) # Switch to the code viewer tab
wx.CallAfter(self.codePanel.JumpToLine, line-1, True)
event.Skip()
#---------------------------------------------------------------------------
class DemoTaskBarIcon(wx.TaskBarIcon):
TBMENU_RESTORE = wx.NewId()
TBMENU_CLOSE = wx.NewId()
TBMENU_CHANGE = wx.NewId()
TBMENU_REMOVE = wx.NewId()
def __init__(self, frame):
wx.TaskBarIcon.__init__(self)
self.frame = frame
# Set the image
icon = self.MakeIcon(images.getWXPdemoImage())
self.SetIcon(icon, "wxPython Demo")
self.imgidx = 1
# bind some events
self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, self.OnTaskBarActivate)
self.Bind(wx.EVT_MENU, self.OnTaskBarActivate, id=self.TBMENU_RESTORE)
self.Bind(wx.EVT_MENU, self.OnTaskBarClose, id=self.TBMENU_CLOSE)
self.Bind(wx.EVT_MENU, self.OnTaskBarChange, id=self.TBMENU_CHANGE)
self.Bind(wx.EVT_MENU, self.OnTaskBarRemove, id=self.TBMENU_REMOVE)
def CreatePopupMenu(self):
"""
This method is called by the base class when it needs to popup
the menu for the default EVT_RIGHT_DOWN event. Just create
the menu how you want it and return it from this function,
the base class takes care of the rest.
"""
menu = wx.Menu()
menu.Append(self.TBMENU_RESTORE, "Restore wxPython Demo")
menu.Append(self.TBMENU_CLOSE, "Close wxPython Demo")
menu.AppendSeparator()
menu.Append(self.TBMENU_CHANGE, "Change the TB Icon")
menu.Append(self.TBMENU_REMOVE, "Remove the TB Icon")
return menu
def MakeIcon(self, img):
"""
The various platforms have different requirements for the
icon size...
"""
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
# wxMac can be any size upto 128x128, so leave the source img alone....
icon = wx.IconFromBitmap(img.ConvertToBitmap() )
return icon
def OnTaskBarActivate(self, evt):
if self.frame.IsIconized():
self.frame.Iconize(False)
if not self.frame.IsShown():
self.frame.Show(True)
self.frame.Raise()
def OnTaskBarClose(self, evt):
wx.CallAfter(self.frame.Close)
def OnTaskBarChange(self, evt):
names = [ "WXPdemo", "Mondrian", "Pencil", "Carrot" ]
name = names[self.imgidx]
getFunc = getattr(images, "get%sImage" % name)
self.imgidx += 1
if self.imgidx >= len(names):
self.imgidx = 0
icon = self.MakeIcon(getFunc())
self.SetIcon(icon, "This is a new icon: " + name)
def OnTaskBarRemove(self, evt):
self.RemoveIcon()
#---------------------------------------------------------------------------
class wxPythonDemo(wx.Frame):
overviewText = "wxPython Overview"
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title, size = (970, 720),
style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetMinSize((640,480))
# Use a panel under the AUI panes in order to work around a
# bug on PPC Macs
pnl = wx.Panel(self)
self.pnl = pnl
self.mgr = wx.aui.AuiManager()
self.mgr.SetManagedWindow(pnl)
self.loaded = False
self.cwd = os.getcwd()
self.curOverview = ""
self.demoPage = None
self.codePage = None
self.shell = None
self.firstTime = True
self.finddlg = None
icon = images.getWXPdemoIcon()
self.SetIcon(icon)
try:
self.tbicon = DemoTaskBarIcon(self)
except:
self.tbicon = None
self.otherWin = None
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_ICONIZE, self.OnIconfiy)
self.Bind(wx.EVT_MAXIMIZE, self.OnMaximize)
self.Centre(wx.BOTH)
self.CreateStatusBar(1, wx.ST_SIZEGRIP)
self.dying = False
self.skipLoad = False
def EmptyHandler(evt): pass
self.ReadConfigurationFile()
# Create a Notebook
self.nb = wx.Notebook(pnl, -1, style=wx.CLIP_CHILDREN)
imgList = wx.ImageList(16, 16)
for png in ["overview", "code", "demo"]:
bmp = images.catalog[png].getBitmap()
imgList.Add(bmp)
self.nb.AssignImageList(imgList)
self.BuildMenuBar()
self.finddata = wx.FindReplaceData()
self.finddata.SetFlags(wx.FR_DOWN)
# Create a TreeCtrl
leftPanel = wx.Panel(pnl, style=wx.TAB_TRAVERSAL|wx.CLIP_CHILDREN)
self.treeMap = {}
self.searchItems = {}
self.tree = wxPythonDemoTree(leftPanel)
self.filter = wx.SearchCtrl(leftPanel, style=wx.TE_PROCESS_ENTER)
self.filter.ShowCancelButton(True)
self.filter.Bind(wx.EVT_TEXT, self.RecreateTree)
self.filter.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self.OnSearchCancelBtn)
self.filter.Bind(wx.EVT_TEXT_ENTER, self.OnSearch)
searchMenu = wx.Menu()
item = searchMenu.AppendRadioItem(-1, "Sample Name")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
item = searchMenu.AppendRadioItem(-1, "Sample Content")
self.Bind(wx.EVT_MENU, self.OnSearchMenu, item)
self.filter.SetMenu(searchMenu)
self.RecreateTree()
self.tree.SetExpansionState(self.expansionState)
self.tree.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded)
self.tree.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.tree.Bind(wx.EVT_LEFT_DOWN, self.OnTreeLeftDown)
# Set up a wx.html.HtmlWindow on the Overview Notebook page
# we put it in a panel first because there seems to be a
# refresh bug of some sort (wxGTK) when it is directly in
# the notebook...
if 0: # the old way
self.ovr = wx.html.HtmlWindow(self.nb, -1, size=(400, 400))
self.nb.AddPage(self.ovr, self.overviewText, imageId=0)
else: # hopefully I can remove this hacky code soon, see SF bug #216861
panel = wx.Panel(self.nb, -1, style=wx.CLIP_CHILDREN)
self.ovr = wx.html.HtmlWindow(panel, -1, size=(400, 400))
self.nb.AddPage(panel, self.overviewText, imageId=0)
def OnOvrSize(evt, ovr=self.ovr):
ovr.SetSize(evt.GetSize())
panel.Bind(wx.EVT_SIZE, OnOvrSize)
panel.Bind(wx.EVT_ERASE_BACKGROUND, EmptyHandler)
if "gtk2" in wx.PlatformInfo:
self.ovr.SetStandardFonts()
self.SetOverview(self.overviewText, mainOverview)
# Set up a log window
self.log = wx.TextCtrl(pnl, -1,
style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
if wx.Platform == "__WXMAC__":
self.log.MacCheckSpelling(False)
# Set the wxWindows log target to be this textctrl
#wx.Log_SetActiveTarget(wx.LogTextCtrl(self.log))
# But instead of the above we want to show how to use our own wx.Log class
wx.Log_SetActiveTarget(MyLog(self.log))
# for serious debugging
#wx.Log_SetActiveTarget(wx.LogStderr())
#wx.Log_SetTraceMask(wx.TraceMessages)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
wx.GetApp().Bind(wx.EVT_ACTIVATE_APP, self.OnAppActivate)
# add the windows to the splitter and split it.
leftBox = wx.BoxSizer(wx.VERTICAL)
leftBox.Add(self.tree, 1, wx.EXPAND)
leftBox.Add(wx.StaticText(leftPanel, label = "Filter Demos:"), 0, wx.TOP|wx.LEFT, 5)
leftBox.Add(self.filter, 0, wx.EXPAND|wx.ALL, 5)
leftPanel.SetSizer(leftBox)
# select initial items
self.nb.SetSelection(0)
self.tree.SelectItem(self.root)
# Load 'Main' module
self.LoadDemo(self.overviewText)
self.loaded = True
# select some other initial module?
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg.endswith('.py'):
arg = arg[:-3]
selectedDemo = self.treeMap.get(arg, None)
if selectedDemo:
self.tree.SelectItem(selectedDemo)
self.tree.EnsureVisible(selectedDemo)
# Use the aui manager to set up everything
self.mgr.AddPane(self.nb, wx.aui.AuiPaneInfo().CenterPane().Name("Notebook"))
self.mgr.AddPane(leftPanel,
wx.aui.AuiPaneInfo().
Left().Layer(2).BestSize((240, -1)).
MinSize((160, -1)).
Floatable(ALLOW_AUI_FLOATING).FloatingSize((240, 700)).
Caption("wxPython Demos").
CloseButton(False).
Name("DemoTree"))
self.mgr.AddPane(self.log,
wx.aui.AuiPaneInfo().
Bottom().BestSize((-1, 150)).
MinSize((-1, 60)).
Floatable(ALLOW_AUI_FLOATING).FloatingSize((500, 160)).
Caption("Demo Log Messages").
CloseButton(False).
Name("LogWindow"))
self.auiConfigurations[DEFAULT_PERSPECTIVE] = self.mgr.SavePerspective()
self.mgr.Update()
self.mgr.SetFlags(self.mgr.GetFlags() ^ wx.aui.AUI_MGR_TRANSPARENT_DRAG)
def ReadConfigurationFile(self):
self.auiConfigurations = {}
self.expansionState = [0, 1]
config = GetConfig()
val = config.Read('ExpansionState')
if val:
self.expansionState = eval(val)
val = config.Read('AUIPerspectives')
if val:
self.auiConfigurations = eval(val)
def BuildMenuBar(self):
# Make a File menu
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
item = menu.Append(-1, '&Redirect Output',
'Redirect print statements to a window',
wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.OnToggleRedirect, item)
exitItem = wx.MenuItem(menu, -1, 'E&xit\tCtrl-Q', 'Get the heck outta here!')
exitItem.SetBitmap(images.catalog['exit'].getBitmap())
menu.AppendItem(exitItem)
self.Bind(wx.EVT_MENU, self.OnFileExit, exitItem)
wx.App.SetMacExitMenuItemId(exitItem.GetId())
self.mainmenu.Append(menu, '&File')
# Make a Demo menu
menu = wx.Menu()
for indx, item in enumerate(_treeList[:-1]):
menuItem = wx.MenuItem(menu, -1, item[0])
submenu = wx.Menu()
for childItem in item[1]:
mi = submenu.Append(-1, childItem)
self.Bind(wx.EVT_MENU, self.OnDemoMenu, mi)
menuItem.SetBitmap(images.catalog[_demoPngs[indx+1]].getBitmap())
menuItem.SetSubMenu(submenu)
menu.AppendItem(menuItem)
self.mainmenu.Append(menu, '&Demo')
# Make an Option menu
# If we've turned off floatable panels then this menu is not needed
if ALLOW_AUI_FLOATING:
menu = wx.Menu()
auiPerspectives = self.auiConfigurations.keys()
auiPerspectives.sort()
perspectivesMenu = wx.Menu()
item = wx.MenuItem(perspectivesMenu, -1, DEFAULT_PERSPECTIVE, "Load startup default perspective", wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
perspectivesMenu.AppendItem(item)
for indx, key in enumerate(auiPerspectives):
if key == DEFAULT_PERSPECTIVE:
continue
item = wx.MenuItem(perspectivesMenu, -1, key, "Load user perspective %d"%indx, wx.ITEM_RADIO)
perspectivesMenu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
menu.AppendMenu(wx.ID_ANY, "&AUI Perspectives", perspectivesMenu)
self.perspectives_menu = perspectivesMenu
item = wx.MenuItem(menu, -1, 'Save Perspective', 'Save AUI perspective')
item.SetBitmap(images.catalog['saveperspective'].getBitmap())
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnSavePerspective, item)
item = wx.MenuItem(menu, -1, 'Delete Perspective', 'Delete AUI perspective')
item.SetBitmap(images.catalog['deleteperspective'].getBitmap())
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnDeletePerspective, item)
menu.AppendSeparator()
item = wx.MenuItem(menu, -1, 'Restore Tree Expansion', 'Restore the initial tree expansion state')
item.SetBitmap(images.catalog['expansion'].getBitmap())
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnTreeExpansion, item)
self.mainmenu.Append(menu, '&Options')
# Make a Help menu
menu = wx.Menu()
findItem = wx.MenuItem(menu, -1, '&Find\tCtrl-F', 'Find in the Demo Code')
findItem.SetBitmap(images.catalog['find'].getBitmap())
if 'wxMac' not in wx.PlatformInfo:
findNextItem = wx.MenuItem(menu, -1, 'Find &Next\tF3', 'Find Next')
else:
findNextItem = wx.MenuItem(menu, -1, 'Find &Next\tCtrl-G', 'Find Next')
findNextItem.SetBitmap(images.catalog['findnext'].getBitmap())
menu.AppendItem(findItem)
menu.AppendItem(findNextItem)
menu.AppendSeparator()
shellItem = wx.MenuItem(menu, -1, 'Open Py&Shell Window\tF5',
'An interactive interpreter window with the demo app and frame objects in the namesapce')
shellItem.SetBitmap(images.catalog['pyshell'].getBitmap())
menu.AppendItem(shellItem)
inspToolItem = wx.MenuItem(menu, -1, 'Open &Widget Inspector\tF6',
'A tool that lets you browse the live widgets and sizers in an application')
inspToolItem.SetBitmap(images.catalog['inspect'].getBitmap())
menu.AppendItem(inspToolItem)
if 'wxMac' not in wx.PlatformInfo:
menu.AppendSeparator()
helpItem = menu.Append(-1, '&About wxPython Demo', 'wxPython RULES!!!')
wx.App.SetMacAboutMenuItemId(helpItem.GetId())
self.Bind(wx.EVT_MENU, self.OnOpenShellWindow, shellItem)
self.Bind(wx.EVT_MENU, self.OnOpenWidgetInspector, inspToolItem)
self.Bind(wx.EVT_MENU, self.OnHelpAbout, helpItem)
self.Bind(wx.EVT_MENU, self.OnHelpFind, findItem)
self.Bind(wx.EVT_MENU, self.OnFindNext, findNextItem)
self.Bind(wx.EVT_FIND, self.OnFind)
self.Bind(wx.EVT_FIND_NEXT, self.OnFind)
self.Bind(wx.EVT_FIND_CLOSE, self.OnFindClose)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFindItems, findItem)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFindItems, findNextItem)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
if False:
# This is another way to set Accelerators, in addition to
# using the '\t<key>' syntax in the menu items.
aTable = wx.AcceleratorTable([(wx.ACCEL_ALT, ord('X'), exitItem.GetId()),
(wx.ACCEL_CTRL, ord('H'), helpItem.GetId()),
(wx.ACCEL_CTRL, ord('F'), findItem.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F3, findnextItem.GetId()),
(wx.ACCEL_NORMAL, wx.WXK_F9, shellItem.GetId()),
])
self.SetAcceleratorTable(aTable)
#---------------------------------------------
def RecreateTree(self, evt=None):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if evt:
if fullSearch:
# Do not`scan all the demo files for every char
# the user input, use wx.EVT_TEXT_ENTER instead
return
expansionState = self.tree.GetExpansionState()
current = None
item = self.tree.GetSelection()
if item:
prnt = self.tree.GetItemParent(item)
if prnt:
current = (self.tree.GetItemText(item),
self.tree.GetItemText(prnt))
self.tree.Freeze()
self.tree.DeleteAllItems()
self.root = self.tree.AddRoot("wxPython Overview")
self.tree.SetItemImage(self.root, 0)
self.tree.SetItemPyData(self.root, 0)
treeFont = self.tree.GetFont()
catFont = self.tree.GetFont()
# The old native treectrl on MSW has a bug where it doesn't
# draw all of the text for an item if the font is larger than
# the default. It seems to be clipping the item's label as if
# it was the size of the same label in the default font.
if 'wxMSW' not in wx.PlatformInfo or wx.GetApp().GetComCtl32Version() >= 600:
treeFont.SetPointSize(treeFont.GetPointSize()+2)
treeFont.SetWeight(wx.BOLD)
catFont.SetWeight(wx.BOLD)
self.tree.SetItemFont(self.root, treeFont)
firstChild = None
selectItem = None
filter = self.filter.GetValue()
count = 0
for category, items in _treeList:
count += 1
if filter:
if fullSearch:
items = self.searchItems[category]
else:
items = [item for item in items if filter.lower() in item.lower()]
if items:
child = self.tree.AppendItem(self.root, category, image=count)
self.tree.SetItemFont(child, catFont)
self.tree.SetItemPyData(child, count)
if not firstChild: firstChild = child
for childItem in items:
image = count
if DoesModifiedExist(childItem):
image = len(_demoPngs)
theDemo = self.tree.AppendItem(child, childItem, image=image)
self.tree.SetItemPyData(theDemo, count)
self.treeMap[childItem] = theDemo
if current and (childItem, category) == current:
selectItem = theDemo
self.tree.Expand(self.root)
if firstChild:
self.tree.Expand(firstChild)
if filter:
self.tree.ExpandAll()
elif expansionState:
self.tree.SetExpansionState(expansionState)
if selectItem:
self.skipLoad = True
self.tree.SelectItem(selectItem)
self.skipLoad = False
self.tree.Thaw()
self.searchItems = {}
def OnSearchMenu(self, event):
# Catch the search type (name or content)
searchMenu = self.filter.GetMenu().GetMenuItems()
fullSearch = searchMenu[1].IsChecked()
if fullSearch:
self.OnSearch()
else:
self.RecreateTree()
def OnSearch(self, event=None):
value = self.filter.GetValue()
if not value:
self.RecreateTree()
return
wx.BeginBusyCursor()
for category, items in _treeList:
self.searchItems[category] = []
for childItem in items:
if SearchDemo(childItem, value):
self.searchItems[category].append(childItem)
wx.EndBusyCursor()
self.RecreateTree()
def OnSearchCancelBtn(self, event):
self.filter.SetValue('')
self.OnSearch()
def SetTreeModified(self, modified):
item = self.tree.GetSelection()
if modified:
image = len(_demoPngs)
else:
image = self.tree.GetItemPyData(item)
self.tree.SetItemImage(item, image)
def WriteText(self, text):
if text[-1:] == '\n':
text = text[:-1]
wx.LogMessage(text)
def write(self, txt):
self.WriteText(txt)
#---------------------------------------------
def OnItemExpanded(self, event):
item = event.GetItem()
wx.LogMessage("OnItemExpanded: %s" % self.tree.GetItemText(item))
event.Skip()
#---------------------------------------------
def OnItemCollapsed(self, event):
item = event.GetItem()
wx.LogMessage("OnItemCollapsed: %s" % self.tree.GetItemText(item))
event.Skip()
#---------------------------------------------
def OnTreeLeftDown(self, event):
# reset the overview text if the tree item is clicked on again
pt = event.GetPosition();
item, flags = self.tree.HitTest(pt)
if item == self.tree.GetSelection():
self.SetOverview(self.tree.GetItemText(item)+" Overview", self.curOverview)
event.Skip()
#---------------------------------------------
def OnSelChanged(self, event):
if self.dying or not self.loaded or self.skipLoad:
return
item = event.GetItem()
itemText = self.tree.GetItemText(item)
self.LoadDemo(itemText)
#---------------------------------------------
def LoadDemo(self, demoName):
try:
wx.BeginBusyCursor()
self.pnl.Freeze()
os.chdir(self.cwd)
self.ShutdownDemoModule()
if demoName == self.overviewText:
# User selected the "wxPython Overview" node
# ie: _this_ module
# Changing the main window at runtime not yet supported...
self.demoModules = DemoModules(__name__)
self.SetOverview(self.overviewText, mainOverview)
self.LoadDemoSource()
self.UpdateNotebook(0)
else:
if os.path.exists(GetOriginalFilename(demoName)):
wx.LogMessage("Loading demo %s.py..." % demoName)
self.demoModules = DemoModules(demoName)
self.LoadDemoSource()
else:
self.SetOverview("wxPython", mainOverview)
self.codePage = None
self.UpdateNotebook(0)
finally:
wx.EndBusyCursor()
self.pnl.Thaw()
#---------------------------------------------
def LoadDemoSource(self):
self.codePage = None
self.codePage = DemoCodePanel(self.nb, self)
self.codePage.LoadDemo(self.demoModules)
#---------------------------------------------
def RunModule(self):
"""Runs the active module"""
module = self.demoModules.GetActive()
self.ShutdownDemoModule()
overviewText = ""
# o The RunTest() for all samples must now return a window that can
# be palced in a tab in the main notebook.
# o If an error occurs (or has occurred before) an error tab is created.
if module is not None:
wx.LogMessage("Running demo module...")
if hasattr(module, "overview"):
overviewText = module.overview
try:
self.demoPage = module.runTest(self, self.nb, self)
except:
self.demoPage = DemoErrorPanel(self.nb, self.codePage,
DemoError(sys.exc_info()), self)
bg = self.nb.GetThemeBackgroundColour()
if bg:
self.demoPage.SetBackgroundColour(bg)
assert self.demoPage is not None, "runTest must return a window!"
else:
# There was a previous error in compiling or exec-ing
self.demoPage = DemoErrorPanel(self.nb, self.codePage,
self.demoModules.GetErrorInfo(), self)
self.SetOverview(self.demoModules.name + " Overview", overviewText)
if self.firstTime:
# change to the demo page the first time a module is run
self.UpdateNotebook(2)
self.firstTime = False
else:
# otherwise just stay on the same tab in case the user has changed to another one
self.UpdateNotebook()
#---------------------------------------------
def ShutdownDemoModule(self):
if self.demoPage:
# inform the window that it's time to quit if it cares
if hasattr(self.demoPage, "ShutdownDemo"):
self.demoPage.ShutdownDemo()
wx.YieldIfNeeded() # in case the page has pending events
self.demoPage = None
#---------------------------------------------
def UpdateNotebook(self, select = -1):
nb = self.nb
debug = False
self.pnl.Freeze()
def UpdatePage(page, pageText):
pageExists = False
pagePos = -1
for i in range(nb.GetPageCount()):
if nb.GetPageText(i) == pageText:
pageExists = True
pagePos = i
break
if page:
if not pageExists:
# Add a new page
nb.AddPage(page, pageText, imageId=nb.GetPageCount())
if debug: wx.LogMessage("DBG: ADDED %s" % pageText)
else:
if nb.GetPage(pagePos) != page:
# Reload an existing page
nb.DeletePage(pagePos)
nb.InsertPage(pagePos, page, pageText, imageId=pagePos)
if debug: wx.LogMessage("DBG: RELOADED %s" % pageText)
else:
# Excellent! No redraw/flicker
if debug: wx.LogMessage("DBG: SAVED from reloading %s" % pageText)
elif pageExists:
# Delete a page
nb.DeletePage(pagePos)
if debug: wx.LogMessage("DBG: DELETED %s" % pageText)
else:
if debug: wx.LogMessage("DBG: STILL GONE - %s" % pageText)
if select == -1:
select = nb.GetSelection()
UpdatePage(self.codePage, "Demo Code")
UpdatePage(self.demoPage, "Demo")
if select >= 0 and select < nb.GetPageCount():
nb.SetSelection(select)
self.pnl.Thaw()
#---------------------------------------------
def SetOverview(self, name, text):
self.curOverview = text
lead = text[:6]
if lead != '<html>' and lead != '<HTML>':
text = '<br>'.join(text.split('\n'))
if wx.USE_UNICODE:
text = text.decode('iso8859_1')
self.ovr.SetPage(text)
self.nb.SetPageText(0, name)
#---------------------------------------------
# Menu methods
def OnFileExit(self, *event):
self.Close()
def OnToggleRedirect(self, event):
app = wx.GetApp()
if event.Checked():
app.RedirectStdio()
print "Print statements and other standard output will now be directed to this window."
else:
app.RestoreStdio()
print "Print statements and other standard output will now be sent to the usual location."
def OnAUIPerspectives(self, event):
perspective = self.perspectives_menu.GetLabel(event.GetId())
self.mgr.LoadPerspective(self.auiConfigurations[perspective])
self.mgr.Update()
def OnSavePerspective(self, event):
dlg = wx.TextEntryDialog(self, "Enter a name for the new perspective:", "AUI Configuration")
dlg.SetValue(("Perspective %d")%(len(self.auiConfigurations)+1))
if dlg.ShowModal() != wx.ID_OK:
return
perspectiveName = dlg.GetValue()
menuItems = self.perspectives_menu.GetMenuItems()
for item in menuItems:
if item.GetLabel() == perspectiveName:
wx.MessageBox("The selected perspective name:\n\n%s\n\nAlready exists."%perspectiveName,
"Error", style=wx.ICON_ERROR)
return
item = wx.MenuItem(self.perspectives_menu, -1, dlg.GetValue(),
"Load user perspective %d"%(len(self.auiConfigurations)+1),
wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU, self.OnAUIPerspectives, item)
self.perspectives_menu.AppendItem(item)
item.Check(True)
self.auiConfigurations.update({dlg.GetValue(): self.mgr.SavePerspective()})
def OnDeletePerspective(self, event):
menuItems = self.perspectives_menu.GetMenuItems()[1:]
lst = []
loadDefault = False
for item in menuItems:
lst.append(item.GetLabel())
dlg = wx.MultiChoiceDialog(self,
"Please select the perspectives\nyou would like to delete:",
"Delete AUI Perspectives", lst)
if dlg.ShowModal() == wx.ID_OK:
selections = dlg.GetSelections()
strings = [lst[x] for x in selections]
for sel in strings:
self.auiConfigurations.pop(sel)
item = menuItems[lst.index(sel)]
if item.IsChecked():
loadDefault = True
self.perspectives_menu.GetMenuItems()[0].Check(True)
self.perspectives_menu.DeleteItem(item)
lst.remove(sel)
if loadDefault:
self.mgr.LoadPerspective(self.auiConfigurations[DEFAULT_PERSPECTIVE])
self.mgr.Update()
def OnTreeExpansion(self, event):
self.tree.SetExpansionState(self.expansionState)
def OnHelpAbout(self, event):
from About import MyAboutBox
about = MyAboutBox(self)
about.ShowModal()
about.Destroy()
def OnHelpFind(self, event):
if self.finddlg != None:
return
self.nb.SetSelection(1)
self.finddlg = wx.FindReplaceDialog(self, self.finddata, "Find",
wx.FR_NOMATCHCASE | wx.FR_NOWHOLEWORD)
self.finddlg.Show(True)
def OnUpdateFindItems(self, evt):
evt.Enable(self.finddlg == None)
def OnFind(self, event):
editor = self.codePage.editor
self.nb.SetSelection(1)
end = editor.GetLastPosition()
textstring = editor.GetRange(0, end).lower()
findstring = self.finddata.GetFindString().lower()
backward = not (self.finddata.GetFlags() & wx.FR_DOWN)
if backward:
start = editor.GetSelection()[0]
loc = textstring.rfind(findstring, 0, start)
else:
start = editor.GetSelection()[1]
loc = textstring.find(findstring, start)
if loc == -1 and start != 0:
# string not found, start at beginning
if backward:
start = end
loc = textstring.rfind(findstring, 0, start)
else:
start = 0
loc = textstring.find(findstring, start)
if loc == -1:
dlg = wx.MessageDialog(self, 'Find String Not Found',
'Find String Not Found in Demo File',
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
if self.finddlg:
if loc == -1:
self.finddlg.SetFocus()
return
else:
self.finddlg.Destroy()
self.finddlg = None
editor.ShowPosition(loc)
editor.SetSelection(loc, loc + len(findstring))
def OnFindNext(self, event):
if self.finddata.GetFindString():
self.OnFind(event)
else:
self.OnHelpFind(event)
def OnFindClose(self, event):
event.GetDialog().Destroy()
self.finddlg = None
def OnOpenShellWindow(self, evt):
if self.shell:
# if it already exists then just make sure it's visible
s = self.shell
if s.IsIconized():
s.Iconize(False)
s.Raise()
else:
# Make a PyShell window
from wx import py
namespace = { 'wx' : wx,
'app' : wx.GetApp(),
'frame' : self,
}
self.shell = py.shell.ShellFrame(None, locals=namespace)
self.shell.SetSize((640,480))
self.shell.Show()
# Hook the close event of the main frame window so that we
# close the shell at the same time if it still exists
def CloseShell(evt):
if self.shell:
self.shell.Close()
evt.Skip()
self.Bind(wx.EVT_CLOSE, CloseShell)
def OnOpenWidgetInspector(self, evt):
# Activate the widget inspection tool
from wx.lib.inspection import InspectionTool
if not InspectionTool().initialized:
InspectionTool().Init()
# Find a widget to be selected in the tree. Use either the
# one under the cursor, if any, or this frame.
wnd = wx.FindWindowAtPointer()
if not wnd:
wnd = self
InspectionTool().Show(wnd, True)
#---------------------------------------------
def OnCloseWindow(self, event):
self.dying = True
self.demoPage = None
self.codePage = None
self.mainmenu = None
if self.tbicon is not None:
self.tbicon.Destroy()
config = GetConfig()
config.Write('ExpansionState', str(self.tree.GetExpansionState()))
config.Write('AUIPerspectives', str(self.auiConfigurations))
config.Flush()
self.Destroy()
#---------------------------------------------
def OnIdle(self, event):
if self.otherWin:
self.otherWin.Raise()
self.demoPage = self.otherWin
self.otherWin = None
#---------------------------------------------
def ShowTip(self):
config = GetConfig()
showTipText = config.Read("tips")
if showTipText:
showTip, index = eval(showTipText)
else:
showTip, index = (1, 0)
if showTip:
tp = wx.CreateFileTipProvider(opj("data/tips.txt"), index)
##tp = MyTP(0)
showTip = wx.ShowTip(self, tp)
index = tp.GetCurrentTip()
config.Write("tips", str( (showTip, index) ))
config.Flush()
#---------------------------------------------
def OnDemoMenu(self, event):
try:
selectedDemo = self.treeMap[self.mainmenu.GetLabel(event.GetId())]
except:
selectedDemo = None
if selectedDemo:
self.tree.SelectItem(selectedDemo)
self.tree.EnsureVisible(selectedDemo)
#---------------------------------------------
def OnIconfiy(self, evt):
wx.LogMessage("OnIconfiy: %s" % evt.Iconized())
evt.Skip()
#---------------------------------------------
def OnMaximize(self, evt):
wx.LogMessage("OnMaximize")
evt.Skip()
#---------------------------------------------
def OnActivate(self, evt):
wx.LogMessage("OnActivate: %s" % evt.GetActive())
evt.Skip()
#---------------------------------------------
def OnAppActivate(self, evt):
wx.LogMessage("OnAppActivate: %s" % evt.GetActive())
evt.Skip()
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class MySplashScreen(wx.SplashScreen):
def __init__(self):
bmp = wx.Image(opj("bitmaps/splash.png")).ConvertToBitmap()
wx.SplashScreen.__init__(self, bmp,
wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,
5000, None, -1)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.FutureCall(2000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
frame = wxPythonDemo(None, "wxPython: (A Demonstration)")
frame.Show()
if self.fc.IsRunning():
self.Raise()
wx.CallAfter(frame.ShowTip)
#---------------------------------------------------------------------------
from wx.lib.mixins.treemixin import ExpansionState
if USE_CUSTOMTREECTRL:
import wx.lib.customtreectrl as CT
TreeBaseClass = CT.CustomTreeCtrl
else:
TreeBaseClass = wx.TreeCtrl
class wxPythonDemoTree(ExpansionState, TreeBaseClass):
def __init__(self, parent):
TreeBaseClass.__init__(self, parent, style=wx.TR_DEFAULT_STYLE|
wx.TR_HAS_VARIABLE_ROW_HEIGHT)
self.BuildTreeImageList()
if USE_CUSTOMTREECTRL:
self.SetSpacing(10)
self.SetWindowStyle(self.GetWindowStyle() & ~wx.TR_LINES_AT_ROOT)
def AppendItem(self, parent, text, image=-1, wnd=None):
if USE_CUSTOMTREECTRL:
item = TreeBaseClass.AppendItem(self, parent, text, image=image, wnd=wnd)
else:
item = TreeBaseClass.AppendItem(self, parent, text, image=image)
return item
def BuildTreeImageList(self):
imgList = wx.ImageList(16, 16)
for png in _demoPngs:
imgList.Add(images.catalog[png].getBitmap())
# add the image for modified demos.
imgList.Add(images.catalog["custom"].getBitmap())
self.AssignImageList(imgList)
def GetItemIdentity(self, item):
return self.GetPyData(item)
#---------------------------------------------------------------------------
class MyApp(wx.App):
def OnInit(self):
"""
Create and show the splash screen. It will then create and show
the main frame when it is time to do so.
"""
wx.SystemOptions.SetOptionInt("mac.window-plain-transition", 1)
self.SetAppName("wxPyDemo")
# For debugging
#self.SetAssertMode(wx.PYAPP_ASSERT_DIALOG)
# Normally when using a SplashScreen you would create it, show
# it and then continue on with the applicaiton's
# initialization, finally creating and showing the main
# application window(s). In this case we have nothing else to
# do so we'll delay showing the main frame until later (see
# ShowMain above) so the users can see the SplashScreen effect.
splash = MySplashScreen()
splash.Show()
return True
#---------------------------------------------------------------------------
def main():
try:
demoPath = os.path.dirname(__file__)
os.chdir(demoPath)
except:
pass
app = MyApp(False)
app.MainLoop()
#---------------------------------------------------------------------------
mainOverview = """<html><body>
<h2>wxPython</h2>
<p> wxPython is a <b>GUI toolkit</b> for the Python programming
language. It allows Python programmers to create programs with a
robust, highly functional graphical user interface, simply and easily.
It is implemented as a Python extension module (native code) that
wraps the popular wxWindows cross platform GUI library, which is
written in C++.
<p> Like Python and wxWindows, wxPython is <b>Open Source</b> which
means that it is free for anyone to use and the source code is
available for anyone to look at and modify. Or anyone can contribute
fixes or enhancements to the project.
<p> wxPython is a <b>cross-platform</b> toolkit. This means that the
same program will run on multiple platforms without modification.
Currently supported platforms are 32-bit Microsoft Windows, most Unix
or unix-like systems, and Macintosh OS X. Since the language is
Python, wxPython programs are <b>simple, easy</b> to write and easy to
understand.
<p> <b>This demo</b> is not only a collection of test cases for
wxPython, but is also designed to help you learn about and how to use
wxPython. Each sample is listed in the tree control on the left.
When a sample is selected in the tree then a module is loaded and run
(usually in a tab of this notebook,) and the source code of the module
is loaded in another tab for you to browse and learn from.
"""
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
if __name__ == '__main__':
__name__ = 'Main'
main()
#----------------------------------------------------------------------------
| [
"RD@c3d73ce0-8a6f-49c7-b76d-6d57e0e08775"
]
| RD@c3d73ce0-8a6f-49c7-b76d-6d57e0e08775 |
be0ea72d88b880a4422662c70a42cd30c368ac6d | 12a5b72982291ac7c074210afc2c9dfe2c389709 | /online_judges/Codeforces/271/B/code.py | 186d8f4e41e3bac292297f62acafa3f9103c1638 | []
| no_license | krantirk/Algorithms-and-code-for-competitive-programming. | 9b8c214758024daa246a1203e8f863fc76cfe847 | dcf29bf976024a9d1873eadc192ed59d25db968d | refs/heads/master | 2020-09-22T08:35:19.352751 | 2019-05-21T11:56:39 | 2019-05-21T11:56:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | def bin_search(x, lista):
meio = (len(lista))/2
inicio = 0
fim = len(lista)
while inicio < fim:
if lista[meio] < x: inicio = meio + 1
elif lista[meio] > x: fim = meio
else: return meio
meio = (inicio + fim) / 2
return fim
n = int(raw_input())
a = map(int,raw_input().split())
m = int(raw_input())
q = map(int,raw_input().split())
aux = []
soma_ac = 0
for e in a:
soma_ac += e
aux.append(soma_ac)
for e in q:
print bin_search(e,aux) + 1
| [
"[email protected]"
]
| |
56af6b193a092ebfd4f16874bd4ad48af30c534d | f92486e7a546112e9f783a612724463d60de33b8 | /line/bin/easy_install | 8ea5fd711863e1254b4048dae009777e2ed05a7a | []
| no_license | omatsu0/chatbot | 4f0a2bc3093f37068da7ffe685bfdb242b3535ad | dad854d40a0494300897376a22d0c123cada8a60 | refs/heads/master | 2022-12-20T15:51:58.457332 | 2020-09-12T08:58:20 | 2020-09-12T08:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/Users/sss/Desktop/chatbot/line/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
3e58da2d6dd384b3d47fb8eba862f3472aae4e64 | 4aa6b7c3a5ae3817007e09ad1289c1e9f7a355c0 | /剑指offer/superJumpFloor.py | a4fe01802a8158a111a6560953ab7e0e854d2e99 | []
| no_license | liuhuipy/Algorithm-python | 8f5143e06cf5fa2de2c178e3ba9e5fd12b9bcdf7 | 4e92a0b874f956d1df84d1493f870a5d1f06cde2 | refs/heads/master | 2021-06-03T04:19:01.946149 | 2021-01-08T07:44:40 | 2021-01-08T07:44:40 | 99,838,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'liuhui'
'''
一只青蛙一次可以跳上1级台阶,也可以跳上2级……它也可以跳上n级。求该青蛙跳上一个n级的台阶总共有多少种跳法。
'''
class Solution:
def jumpFloorII(self, number):
# write code here
ans = 1
if number >= 2:
for i in range(number-1):
ans = ans * 2
return ans
if __name__ == '__main__':
solut = Solution()
res = solut.jumpFloorII(10)
print(res)
| [
"[email protected]"
]
| |
81464fa1fbd45533b5eca02d118798b2f058e87a | 54fdaa05078261180cbd7cc94c132527725b189d | /test/crab_ElectronPlots_newskim_eraF_70110.py | 6738548559045b4ba874512b81343c4f103a78bd | []
| no_license | psiddire/ZeeAnalyzer | e488d3b65108ca923bd459cda41e61f3bd746a5b | d94b1fd4f4de19f5cdeaf405e4c0d6629b889888 | refs/heads/master | 2021-09-07T12:20:36.554253 | 2018-02-22T18:31:52 | 2018-02-22T18:31:52 | 113,574,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # from https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'plots_Zee_newskim_eraF_70110'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runElectronPlots_newSkim_eraF.py'
config.Data.inputDataset = '/DoubleEG/Run2017F-PromptReco-v1/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 1000
config.Data.lumiMask = 'eraF.txt'
config.Data.runRange = '305044-306126'
#config.Data.totalUnits = 1
config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'Zee_ElectronPlots_newskim_eraF_70110'
config.Site.storageSite = 'T2_CH_CERN'
#all the configuration parameters https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3ConfigurationFile
#all crab commands https://twiki.cern.ch/twiki/bin/view/CMSPublic/CRAB3Commands
| [
"[email protected]"
]
| |
10b1c6ebba927444ab24d8082e2cd7350b1d7db2 | 6abc9b7e59aa2bc77d16bf0579bc2319db4fa20c | /miniverse/dataset/models.py | 8a5b1134e6babae592efd06546ed04c4a3163e1a | [
"MIT"
]
| permissive | IQSS/old-miniverse | b05823891fafd40a5b12f18894f3dff19404fe37 | daabcad2fbd6cc29cc05f0091f51157e4fe9e46a | refs/heads/master | 2021-01-21T03:15:54.392430 | 2014-06-27T16:05:55 | 2014-06-27T16:05:55 | 19,803,423 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | py | import os
from hashlib import md5
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.db import models
from django.template.defaultfilters import slugify
from dataverse.models import Dataverse
from core.models import TimeStampedModel
class DatasetState(models.Model):
"""
Version states for the DatasetVersion object
DRAFT, IN REVIEW, RELEASED, ARCHIVED, DEACCESSIONED
"""
name = models.CharField(max_length=70)
sort_order = models.IntegerField()
slug = models.SlugField(blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(DatasetState, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
ordering = ('sort_order', 'name',)
class Dataset(TimeStampedModel):
"""Expects a .zip file upload
Modify in the future for shapefiles loaded separately
"""
name = models.CharField(max_length=255)
dataverse = models.ForeignKey(Dataverse)
version_state = models.ForeignKey(DatasetState)
version_number = models.IntegerField(default=1)
minor_version_number = models.IntegerField(default=0)
description = models.TextField(blank=True)
md5 = models.CharField(max_length=40, blank=True, db_index=True, help_text='auto-filled on save')
#def get_geographic_metadata(self):
#GeographicMetadata.objects.select_related('datafile').all()
def get_dv_api_params(self):
if not self.id:
return {}
p = { 'dataset_id' : self.id\
, 'dataset_version_id' : self.version_number\
, 'dataset_name' : self.name\
, 'dataset_description' : self.description\
}
p.update(self.dataverse.get_dv_api_params())
return p
def save(self, *args, **kwargs):
if not self.id:
super(Dataset, self).save(*args, **kwargs)
self.md5 = md5('%s%s' % (self.id, self.name)).hexdigest()
super(Dataset, self).save(*args, **kwargs)
def natural_key(self):
return '%s-%s' % (self.name, self.dataverse)
def view_dataset_list(self):
lnk = reverse('view_dataset_list', kwargs={})
return '<a href="%s">view dataset</a>' % lnk
view_dataset_list.allow_tags = True
def get_files(self):
return self.datafile_set.all()
def __unicode__(self):
return self.name
class Meta:
ordering = ('name', )
#verbose_name = 'COA File Load Log'
class DataFile(TimeStampedModel):
"""Used for working with a selected shapefile, specifically using the extensions specified in WORLDMAP_MANDATORY_IMPORT_EXTENSIONS
"""
dataset_file = models.FileField(upload_to='datafile/%Y/%m/%d')# max_length=255)
dataset = models.ForeignKey(Dataset)
has_gis_data = models.BooleanField(default=False)
file_checksum = models.CharField(max_length=40, blank=True, db_index=True, help_text='auto-filled on save')
#mime_type = models.CharField(max_length=255, blank=True)
md5 = models.CharField(max_length=40, blank=True, db_index=True, help_text='auto-filled on save')
def get_geographic_metadata(self):
return self.geographicmetadata_set.filter(links_working=True)
def get_dv_api_params(self, request=None):
"""
Params to respond to API call from GeoConnect
"""
if not self.id:
return {}
# Params from Datafile
p = { 'datafile_id' : self.id\
, 'datafile_label': self.get_basename()\
#, 'has_gis_data' : self.has_gis_data
,'filename' : self.get_basename()\
,'filesize' : self.dataset_file.size\
,'created' : str(self.created)\
,'datafile_type': '--file-type--'\
,'datafile_expected_md5_checksum': self.file_checksum\
}
# Full url to file, if available
if request:
p['datafile_download_url'] = request.build_absolute_uri(self.dataset_file.url)
# Add params from owning Dataset and Dataverse
p.update(self.dataset.get_dv_api_params())
return p
def get_mapit_link(self):
return 'http://127.0.0.1:8000/shapefile/examine-dvn-file/%s/%s' % (self.dataset.id, self.id)
def dataverse_name(self):
return self.dataset.dataverse.name
dataverse_name.allow_tags = True
def get_basename(self):
return os.path.basename(self.dataset_file.name)
def save(self, *args, **kwargs):
if not self.id:
super(DataFile, self).save(*args, **kwargs)
self.md5 = md5('%s%s' % (self.id, self.dataset_file)).hexdigest()
self.file_checksum = self.md5 # fake, need to add real md5
super(DataFile, self).save(*args, **kwargs)
def __unicode__(self):
return self.get_basename()
class Meta:
ordering = ('dataset_file',)
| [
"[email protected]"
]
| |
81955d510e5da2c20cc455a0595ac502556be959 | d402525075cec8d8b1564eadf03024bcc45ada57 | /map_api/app.py | 82162354a9dad36de96f5d9e316d705302a8b7bd | []
| no_license | DevHerles/mapa_telesalud_api | 3d5e0589b88e7178d10edeb798e13d1745d33062 | de361f9857500b594cc16abffed7987777911673 | refs/heads/master | 2023-06-26T00:16:05.174034 | 2021-07-20T20:03:51 | 2021-07-20T22:37:15 | 387,901,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,924 | py | """APP
FastAPI app definition, initialization and definition of routes
"""
# # Installed # #
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi import status as statuscode
# # Package # #
from .models import *
from .exceptions import *
from .repositories import PeopleRepository, SymptomsRepository
from .middlewares import request_handler
from .settings import api_settings as settings
from .middlewares import request_handler
__all__ = ("app", "run")
app = FastAPI(title=settings.title)
app.middleware("http")(request_handler)
origins = [
"http://localhost",
"http://localhost:8080",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/imei/{imei}",
response_model=PersonRead,
description="Get a single person by its unique IMEI",
responses=get_exception_responses(PersonNotFoundException),
tags=["people"])
def _get_person(imei: str):
return PeopleRepository.getByImei(imei)
@app.get("/people",
response_model=PeopleRead,
description="List all the available persons",
tags=["people"])
def _list_people():
# TODO Filters
return PeopleRepository.list()
@app.get("/people/{person_id}",
response_model=PersonRead,
description="Get a single person by its unique ID",
responses=get_exception_responses(PersonNotFoundException),
tags=["people"])
def _get_person(person_id: str):
return PeopleRepository.get(person_id)
@app.post("/people",
description="Create a new person",
response_model=PersonRead,
status_code=statuscode.HTTP_201_CREATED,
responses=get_exception_responses(PersonAlreadyExistsException),
tags=["people"])
def _create_person(create: PersonCreate):
return PeopleRepository.create(create)
@app.patch(
"/people/{person_id}",
description="Update a single person by its unique ID, providing the fields to update",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(PersonNotFoundException,
PersonAlreadyExistsException),
tags=["people"])
def _update_person(person_id: str, update: PersonUpdate):
PeopleRepository.update(person_id, update)
# Symtoms
@app.get("/person-symptoms/{person_id}",
response_model=SymptomsRead,
description="List all the available symptoms",
tags=["symptoms"])
def _list_person_symptoms(person_id: str):
# TODO Filters
print(person_id)
return SymptomsRepository.list(person_id)
@app.get("/symptoms",
response_model=SymptomsRead,
description="List all the available symptoms",
tags=["symptoms"])
def _list_symptoms():
# TODO Filters
return SymptomsRepository.list()
@app.get("/symptoms/{symptom_id}",
response_model=SymptomRead,
description="Get a single symptom by its unique ID",
responses=get_exception_responses(SymptomNotFoundException),
tags=["symptoms"])
def _get_symptom(symptom_id: str):
return SymptomRepository.get(symptom_id)
@app.post("/symptoms",
description="Create a new symptom",
response_model=SymptomRead,
status_code=statuscode.HTTP_201_CREATED,
responses=get_exception_responses(SymptomAlreadyExistsException),
tags=["symptoms"])
def _create_symptom(create: SymptomCreate):
return SymptomsRepository.create(create)
@app.patch(
"/symptoms/{symptoms_id}",
description="Update a single symptom by its unique ID, providing the fields to update",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(SymptomNotFoundException,
SymptomAlreadyExistsException),
tags=["symptoms"])
def _update_symptom(symptom_id: str, update: SymptomUpdate):
SymptomRepository.update(symptom_id, update)
@app.delete("/symptoms/{symptom_id}",
description="Delete a single symptom by its unique ID",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(SymptomNotFoundException),
tags=["symptoms"])
def _delete_symptom(symptom_id: str):
SymptomsRepository.delete(symptom_id)
@app.delete("/people/{person_id}",
description="Delete a single person by its unique ID",
status_code=statuscode.HTTP_204_NO_CONTENT,
responses=get_exception_responses(PersonNotFoundException),
tags=["people"])
def _delete_person(person_id: str):
PeopleRepository.delete(person_id)
def run():
"""Run the API using Uvicorn"""
uvicorn.run(app,
host=settings.host,
port=settings.port,
log_level=settings.log_level.lower())
| [
"[email protected]"
]
| |
1c4ffdb96e1a0dc36fd40511b6292b9147273e6f | d8b93e08fdf884ebed89a38831e26e3753efea72 | /recalculation.py | 5aaa0153de63bce8736dbec8ec35e696db6b8732 | []
| no_license | wolfbolin/Everyclass-Occam | 68398ece9f5812aa1a0a31946e98181d559cc7ec | de347014c4237c88e99207fa05cb7fecb5325d1d | refs/heads/master | 2022-12-14T08:25:30.484056 | 2020-04-27T07:30:26 | 2020-04-27T07:30:26 | 156,236,558 | 2 | 1 | null | 2022-12-08T03:37:45 | 2018-11-05T15:08:01 | Python | UTF-8 | Python | false | false | 768 | py | # coding=utf-8
import Util
import Room
import Config
import Course
import Student
import Teacher
import Preprocess
if __name__ == "__main__":
config = Config.load_config("./Config")
# 重新计算所有学期数据
for semester in config["schedule"]:
Util.print_blue("当前计算学期: %s" % semester)
# 重新计算对象信息
Room.update(config, semester, config["schedule"][semester])
Course.update(config, semester, config["schedule"][semester])
Student.update(config, semester, config["schedule"][semester])
Teacher.update(config, semester, config["schedule"][semester])
# 重新完成数据预处理
Preprocess.lesson_data_oc(config, semester)
Preprocess.search_data(config)
| [
"[email protected]"
]
| |
7d77d28bee25dffd3bf67e2bbdf27ef676d79359 | 7d5075610b6358dd9fd57132c8876d533813807c | /bigtempo/processors/tests/simple_task_tests.py | cfe810b0931571c775d7c144cde8515c3a29a7ed | [
"MIT"
]
| permissive | rhlobo/bigtempo3 | 152e1f76c94939e0a2d69e264e0b66f24f007731 | 848eda5f07f7e61f7659bac335726c567b41083e | refs/heads/main | 2023-07-05T02:08:15.749854 | 2021-08-11T21:35:38 | 2021-08-11T21:35:38 | 394,079,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | # -*- coding: utf-8 -*-
import unittest
from mockito import mock, when, any as anyx, verify
import bigtempo.processors.simple_task as task
class TestModuleFunctions(unittest.TestCase):
def test_processingtask_factory_should_return_processing_task(self):
instance = mock()
registration = mock()
dependencies = mock()
result = task.factory(instance, registration, dependencies)
assert isinstance(result, task.SimpleDatasourceTask)
class TestSimpleDatasourceTask(unittest.TestCase):
def test_process_should_process_dependencies(self):
instance = mock()
registration = mock()
dependencies = {
'a': mock(task.SimpleDatasourceTask),
'b': mock(task.SimpleDatasourceTask),
'c': mock(task.SimpleDatasourceTask),
}
when(dependencies['a']).process(...).thenReturn(None)
when(dependencies['b']).process(...).thenReturn(None)
when(dependencies['c']).process(...).thenReturn(None)
task.SimpleDatasourceTask(instance, registration, dependencies).process()
verify(dependencies['a'], times=1).process()
verify(dependencies['b'], times=1).process()
verify(dependencies['c'], times=1).process()
verify(instance, times=1).evaluate(anyx(dict))
def test_process_should_receive_dependencies_process_results_as_context(self):
class DatasourceMock():
def evaluate(self, context):
assert isinstance(context, dict)
assert len(context) == 2
assert context['a'] == '1'
assert context['b'] == '2'
dependencies = {
'a': mock(task.SimpleDatasourceTask),
'b': mock(task.SimpleDatasourceTask),
}
when(dependencies['a']).process().thenReturn('1')
when(dependencies['b']).process().thenReturn('2')
task.SimpleDatasourceTask(DatasourceMock(), mock(), dependencies).process()
| [
"[email protected]"
]
| |
43fc2fd2befd914500fcf5023d14f97c2716dcb8 | 083373c1cb0a78bb0a9a2879c65f6079035cb5e9 | /django_env/bin/django-admin | d196305a5710761171142c474e0b401219ebb208 | []
| no_license | swadhikar/django | ae407ac2dc02739cf35d5eabad89ea7fc26d25fd | 9f2c3cb88ef42af65f4f3a230e34346fe7100c73 | refs/heads/master | 2020-04-21T09:08:58.131848 | 2019-05-01T10:46:21 | 2019-05-01T10:46:21 | 169,439,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | #!/home/swadhi/django/django_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
]
| ||
86e215f91e3353583812af8653c235bb5db5369c | aafba3346120db47cf87ba67dee21848576c337f | /tests/block_tools.py | 1bc6e256219f8866d8bd71df7f25f5befba26fea | [
"Apache-2.0"
]
| permissive | beetseeds/beet-blockchain | 9f7fa9e221364bb865a8b9f60455afc82b4a022b | e5d93f1f9041c48dd0c38416d845c8675bf22738 | refs/heads/main | 2023-07-14T21:30:18.089664 | 2021-09-10T01:40:00 | 2021-09-10T01:40:00 | 401,708,903 | 5 | 3 | Apache-2.0 | 2021-09-05T09:26:51 | 2021-08-31T13:14:50 | Python | UTF-8 | Python | false | false | 80,044 | py | import copy
import logging
import os
import random
import shutil
import sys
import tempfile
import time
from argparse import Namespace
from dataclasses import replace
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Any
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from chiabip158 import PyBIP158
from beet.cmds.init_funcs import create_all_ssl, create_default_beet_config
from beet.full_node.bundle_tools import (
best_solution_generator_from_template,
detect_potential_template_generator,
simple_solution_generator,
)
from beet.util.errors import Err
from beet.full_node.generator import setup_generator_args
from beet.full_node.mempool_check_conditions import GENERATOR_MOD
from beet.plotting.create_plots import create_plots
from beet.consensus.block_creation import unfinished_block_to_full_block
from beet.consensus.block_record import BlockRecord
from beet.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from beet.consensus.blockchain_interface import BlockchainInterface
from beet.consensus.coinbase import create_puzzlehash_for_pk, create_farmer_coin, create_pool_coin
from beet.consensus.constants import ConsensusConstants
from beet.consensus.default_constants import DEFAULT_CONSTANTS
from beet.consensus.deficit import calculate_deficit
from beet.consensus.full_block_to_block_record import block_to_block_record
from beet.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from beet.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from beet.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from beet.consensus.vdf_info_computation import get_signage_point_vdf_info
from beet.full_node.signage_point import SignagePoint
from beet.plotting.plot_tools import PlotInfo, load_plots, parse_plot_info
from beet.types.blockchain_format.classgroup import ClassgroupElement
from beet.types.blockchain_format.coin import Coin, hash_coin_list
from beet.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from beet.types.blockchain_format.pool_target import PoolTarget
from beet.types.blockchain_format.proof_of_space import ProofOfSpace
from beet.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from beet.types.blockchain_format.sized_bytes import bytes32
from beet.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from beet.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from beet.types.blockchain_format.vdf import VDFInfo, VDFProof
from beet.types.condition_with_args import ConditionWithArgs
from beet.types.end_of_slot_bundle import EndOfSubSlotBundle
from beet.types.full_block import FullBlock
from beet.types.generator_types import BlockGenerator, CompressorArg
from beet.types.spend_bundle import SpendBundle
from beet.types.unfinished_block import UnfinishedBlock
from beet.types.name_puzzle_condition import NPC
from beet.util.bech32m import encode_puzzle_hash
from beet.util.block_cache import BlockCache
from beet.util.condition_tools import ConditionOpcode, conditions_by_opcode
from beet.util.config import load_config, save_config
from beet.util.hash import std_hash
from beet.util.ints import uint8, uint16, uint32, uint64, uint128
from beet.util.keychain import Keychain, bytes_to_mnemonic
from beet.util.merkle_set import MerkleSet
from beet.util.prev_transaction_block import get_prev_transaction_block
from beet.util.path import mkdir
from beet.util.vdf_prover import get_vdf_info_and_proof
from tests.wallet_tools import WalletTool
from beet.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_local_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
)
test_constants = DEFAULT_CONSTANTS.replace(
**{
"MIN_PLOT_SIZE": 18,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 12,
"DIFFICULTY_STARTING": 2 ** 12,
"DISCRIMINANT_SIZE_BITS": 16,
"SUB_EPOCH_BLOCKS": 170,
"WEIGHT_PROOF_THRESHOLD": 2,
"WEIGHT_PROOF_RECENT_BLOCKS": 380,
"DIFFICULTY_CONSTANT_FACTOR": 33554432,
"NUM_SPS_SUB_SLOT": 16, # Must be a power of 2
"MAX_SUB_SLOT_BLOCKS": 50,
"EPOCH_BLOCKS": 340,
"BLOCKS_CACHE_SIZE": 340 + 3 * 50, # Coordinate with the above values
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"SUB_SLOT_ITERS_STARTING": 2 ** 10, # Must be a multiple of 64
"NUMBER_ZERO_BITS_PLOT_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
"MAX_FUTURE_TIME": 3600
* 24
* 10, # Allows creating blockchains with timestamps up to 10 days in the future, for testing
"COST_PER_BYTE": 1337,
"MEMPOOL_BLOCK_BUFFER": 6,
"INITIAL_FREEZE_END_TIMESTAMP": 1000,
"NETWORK_TYPE": 1,
}
)
log = logging.getLogger(__name__)
class BlockTools:
"""
Tools to generate blocks for testing.
"""
def __init__(
self, constants: ConsensusConstants = test_constants, root_path: Optional[Path] = None, const_dict=None
):
self._tempdir = None
if root_path is None:
self._tempdir = tempfile.TemporaryDirectory()
root_path = Path(self._tempdir.name)
self.root_path = root_path
create_default_beet_config(root_path)
self.keychain = Keychain("testing-1.8.0", True)
self.keychain.delete_all_keys()
self.farmer_master_sk_entropy = std_hash(b"block_tools farmer key")
self.pool_master_sk_entropy = std_hash(b"block_tools pool key")
self.farmer_master_sk = self.keychain.add_private_key(bytes_to_mnemonic(self.farmer_master_sk_entropy), "")
self.pool_master_sk = self.keychain.add_private_key(bytes_to_mnemonic(self.pool_master_sk_entropy), "")
self.farmer_pk = master_sk_to_farmer_sk(self.farmer_master_sk).get_g1()
self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()
self.farmer_ph: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()
)
self.pool_ph: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()
)
self.init_plots(root_path)
create_all_ssl(root_path)
self.all_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self.pool_pubkeys: List[G1Element] = [master_sk_to_pool_sk(sk).get_g1() for sk in self.all_sks]
self.farmer_pubkeys: List[G1Element] = [master_sk_to_farmer_sk(sk).get_g1() for sk in self.all_sks]
if len(self.pool_pubkeys) == 0 or len(self.farmer_pubkeys) == 0:
raise RuntimeError("Keys not generated. Run `beet generate keys`")
self.load_plots()
self.local_sk_cache: Dict[bytes32, Tuple[PrivateKey, Any]] = {}
self._config = load_config(self.root_path, "config.yaml")
self._config["logging"]["log_stdout"] = True
self._config["selected_network"] = "testnet0"
for service in ["harvester", "farmer", "full_node", "wallet", "introducer", "timelord", "pool"]:
self._config[service]["selected_network"] = "testnet0"
save_config(self.root_path, "config.yaml", self._config)
overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]]
updated_constants = constants.replace_str_to_bytes(**overrides)
if const_dict is not None:
updated_constants = updated_constants.replace(**const_dict)
self.constants = updated_constants
def change_config(self, new_config: Dict):
self._config = new_config
overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]]
updated_constants = self.constants.replace_str_to_bytes(**overrides)
self.constants = updated_constants
save_config(self.root_path, "config.yaml", self._config)
def load_plots(self):
_, loaded_plots, _, _ = load_plots({}, {}, self.farmer_pubkeys, self.pool_pubkeys, None, False, self.root_path)
self.plots: Dict[Path, PlotInfo] = loaded_plots
def init_plots(self, root_path: Path):
plot_dir = get_plot_dir()
mkdir(plot_dir)
temp_dir = get_plot_tmp_dir()
mkdir(temp_dir)
num_pool_public_key_plots = 15
num_pool_address_plots = 5
args = Namespace()
# Can't go much lower than 20, since plots start having no solutions and more buggy
args.size = 22
# Uses many plots for testing, in order to guarantee proofs of space at every height
args.num = num_pool_public_key_plots # Some plots created to a pool public key, and some to a pool puzzle hash
args.buffer = 100
args.farmer_public_key = bytes(self.farmer_pk).hex()
args.pool_public_key = bytes(self.pool_pk).hex()
args.pool_contract_address = None
args.tmp_dir = temp_dir
args.tmp2_dir = plot_dir
args.final_dir = plot_dir
args.plotid = None
args.memo = None
args.buckets = 0
args.stripe_size = 2000
args.num_threads = 0
args.nobitfield = False
args.exclude_final_dir = False
args.list_duplicates = False
test_private_keys = [
AugSchemeMPL.key_gen(std_hash(i.to_bytes(2, "big")))
for i in range(num_pool_public_key_plots + num_pool_address_plots)
]
try:
# No datetime in the filename, to get deterministic filenames and not re-plot
create_plots(
args,
root_path,
use_datetime=False,
test_private_keys=test_private_keys[:num_pool_public_key_plots],
)
# Create more plots, but to a pool address instead of public key
args.pool_public_key = None
args.pool_contract_address = encode_puzzle_hash(self.pool_ph, "xbt")
args.num = num_pool_address_plots
create_plots(
args,
root_path,
use_datetime=False,
test_private_keys=test_private_keys[num_pool_public_key_plots:],
)
except KeyboardInterrupt:
shutil.rmtree(plot_dir, ignore_errors=True)
sys.exit(1)
@property
def config(self) -> Dict:
return copy.deepcopy(self._config)
def get_plot_signature(self, m: bytes32, plot_pk: G1Element) -> G2Element:
"""
Returns the plot signature of the header data.
"""
farmer_sk = master_sk_to_farmer_sk(self.all_sks[0])
for _, plot_info in self.plots.items():
if plot_pk == plot_info.plot_public_key:
# Look up local_sk from plot to save locked memory
if plot_info.prover.get_id() in self.local_sk_cache:
local_master_sk, pool_pk_or_ph = self.local_sk_cache[plot_info.prover.get_id()]
else:
pool_pk_or_ph, _, local_master_sk = parse_plot_info(plot_info.prover.get_memo())
self.local_sk_cache[plot_info.prover.get_id()] = (local_master_sk, pool_pk_or_ph)
if isinstance(pool_pk_or_ph, G1Element):
include_taproot = False
else:
assert isinstance(pool_pk_or_ph, bytes32)
include_taproot = True
local_sk = master_sk_to_local_sk(local_master_sk)
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_sk.get_g1(), include_taproot)
assert agg_pk == plot_pk
harv_share = AugSchemeMPL.sign(local_sk, m, agg_pk)
farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk)
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(local_sk.get_g1(), farmer_sk.get_g1())
taproot_share: G2Element = AugSchemeMPL.sign(taproot_sk, m, agg_pk)
else:
taproot_share = G2Element()
return AugSchemeMPL.aggregate([harv_share, farm_share, taproot_share])
raise ValueError(f"Do not have key {plot_pk}")
def get_pool_key_signature(self, pool_target: PoolTarget, pool_pk: Optional[G1Element]) -> Optional[G2Element]:
# Returns the pool signature for the corresponding pk. If no pk is provided, returns None.
if pool_pk is None:
return None
for sk in self.all_sks:
sk_child = master_sk_to_pool_sk(sk)
if sk_child.get_g1() == pool_pk:
return AugSchemeMPL.sign(sk_child, bytes(pool_target))
raise ValueError(f"Do not have key {pool_pk}")
def get_farmer_wallet_tool(self) -> WalletTool:
return WalletTool(self.constants, self.farmer_master_sk)
def get_pool_wallet_tool(self) -> WalletTool:
return WalletTool(self.constants, self.pool_master_sk)
def get_consecutive_blocks(
self,
num_blocks: int,
block_list_input: List[FullBlock] = None,
farmer_reward_puzzle_hash: Optional[bytes32] = None,
pool_reward_puzzle_hash: Optional[bytes32] = None,
transaction_data: Optional[SpendBundle] = None,
seed: bytes = b"",
time_per_block: Optional[float] = None,
force_overflow: bool = False,
skip_slots: int = 0, # Force at least this number of empty slots before the first SB
guarantee_transaction_block: bool = False, # Force that this block must be a tx block
normalized_to_identity_cc_eos: bool = False,
normalized_to_identity_icc_eos: bool = False,
normalized_to_identity_cc_sp: bool = False,
normalized_to_identity_cc_ip: bool = False,
current_time: bool = False,
previous_generator: CompressorArg = None,
genesis_timestamp: Optional[uint64] = None,
force_plot_id: Optional[bytes32] = None,
) -> List[FullBlock]:
assert num_blocks > 0
if block_list_input is not None:
block_list = block_list_input.copy()
else:
block_list = []
constants = self.constants
transaction_data_included = False
if time_per_block is None:
time_per_block = float(constants.SUB_SLOT_TIME_TARGET) / float(constants.SLOT_BLOCKS_TARGET)
if farmer_reward_puzzle_hash is None:
farmer_reward_puzzle_hash = self.farmer_ph
if len(block_list) == 0:
if force_plot_id is not None:
raise ValueError("Cannot specify plot_id for genesis block")
initial_block_list_len = 0
genesis = self.create_genesis_block(
constants,
seed,
force_overflow=force_overflow,
skip_slots=skip_slots,
timestamp=(uint64(int(time.time())) if genesis_timestamp is None else genesis_timestamp),
)
log.info(f"Created block 0 iters: {genesis.total_iters}")
num_empty_slots_added = skip_slots
block_list = [genesis]
num_blocks -= 1
else:
initial_block_list_len = len(block_list)
num_empty_slots_added = uint32(0) # Allows forcing empty slots in the beginning, for testing purposes
if num_blocks == 0:
return block_list
height_to_hash, difficulty, blocks = load_block_list(block_list, constants)
latest_block: BlockRecord = blocks[block_list[-1].header_hash]
curr = latest_block
while not curr.is_transaction_block:
curr = blocks[curr.prev_hash]
start_timestamp = curr.timestamp
start_height = curr.height
curr = latest_block
blocks_added_this_sub_slot = 1
while not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
blocks_added_this_sub_slot += 1
finished_sub_slots_at_sp: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to signage point
finished_sub_slots_at_ip: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to infusion point
sub_slot_iters: uint64 = latest_block.sub_slot_iters # The number of iterations in one sub-slot
same_slot_as_last = True # Only applies to first slot, to prevent old blocks from being added
sub_slot_start_total_iters: uint128 = latest_block.ip_sub_slot_total_iters(constants)
sub_slots_finished = 0
pending_ses: bool = False
# Start at the last block in block list
# Get the challenge for that slot
while True:
slot_cc_challenge, slot_rc_challenge = get_challenges(
constants,
blocks,
finished_sub_slots_at_sp,
latest_block.header_hash,
)
prev_num_of_blocks = num_blocks
if num_empty_slots_added < skip_slots:
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
num_empty_slots_added += 1
else:
# Loop over every signage point (Except for the last ones, which are used for overflows)
for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA):
curr = latest_block
while curr.total_iters > sub_slot_start_total_iters + calculate_sp_iters(
constants, sub_slot_iters, uint8(signage_point_index)
):
if curr.height == 0:
break
curr = blocks[curr.prev_hash]
if curr.total_iters > sub_slot_start_total_iters:
finished_sub_slots_at_sp = []
if same_slot_as_last:
if signage_point_index < latest_block.signage_point_index:
# Ignore this signage_point because it's in the past
continue
signage_point: SignagePoint = get_signage_point(
constants,
BlockCache(blocks),
latest_block,
sub_slot_start_total_iters,
uint8(signage_point_index),
finished_sub_slots_at_sp,
sub_slot_iters,
normalized_to_identity_cc_sp,
)
if signage_point_index == 0:
cc_sp_output_hash: bytes32 = slot_cc_challenge
else:
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
constants,
slot_cc_challenge,
cc_sp_output_hash,
seed,
difficulty,
sub_slot_iters,
force_plot_id=force_plot_id,
)
for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS or force_overflow:
break
if same_slot_as_last:
if signage_point_index == latest_block.signage_point_index:
# Ignore this block because it's in the past
if required_iters <= latest_block.required_iters:
continue
assert latest_block.header_hash in blocks
additions = None
removals = None
if transaction_data_included:
transaction_data = None
if transaction_data is not None and not transaction_data_included:
additions = transaction_data.additions()
removals = transaction_data.removals()
assert start_timestamp is not None
if proof_of_space.pool_contract_puzzle_hash is not None:
if pool_reward_puzzle_hash is not None:
# The caller wants to be paid to a specific address, but this PoSpace is tied to an
# address, so continue until a proof of space tied to a pk is found
continue
pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
if pool_reward_puzzle_hash is not None:
pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))
else:
pool_target = PoolTarget(self.pool_ph, uint32(0))
if transaction_data is not None:
if previous_generator is not None:
block_generator: Optional[BlockGenerator] = best_solution_generator_from_template(
previous_generator, transaction_data
)
else:
block_generator = simple_solution_generator(transaction_data)
aggregate_signature = transaction_data.aggregated_signature
else:
block_generator = None
aggregate_signature = G2Element()
full_block, block_record = get_full_block_and_block_record(
constants,
blocks,
sub_slot_start_total_iters,
uint8(signage_point_index),
proof_of_space,
slot_cc_challenge,
slot_rc_challenge,
farmer_reward_puzzle_hash,
pool_target,
start_timestamp,
start_height,
time_per_block,
block_generator,
aggregate_signature,
additions,
removals,
height_to_hash,
difficulty,
required_iters,
sub_slot_iters,
self.get_plot_signature,
self.get_pool_key_signature,
finished_sub_slots_at_ip,
signage_point,
latest_block,
seed,
normalized_to_identity_cc_ip,
current_time=current_time,
)
if block_record.is_transaction_block:
transaction_data_included = True
else:
if guarantee_transaction_block:
continue
if pending_ses:
pending_ses = False
block_list.append(full_block)
if full_block.transactions_generator is not None:
compressor_arg = detect_potential_template_generator(
full_block.height, full_block.transactions_generator
)
if compressor_arg is not None:
previous_generator = compressor_arg
blocks_added_this_sub_slot += 1
blocks[full_block.header_hash] = block_record
log.info(f"Created block {block_record.height} ove=False, iters " f"{block_record.total_iters}")
height_to_hash[uint32(full_block.height)] = full_block.header_hash
latest_block = blocks[full_block.header_hash]
finished_sub_slots_at_ip = []
num_blocks -= 1
if num_blocks == 0:
return block_list
# Finish the end of sub-slot and try again next sub-slot
# End of sub-slot logic
if len(finished_sub_slots_at_ip) == 0:
# Block has been created within this sub-slot
eos_iters: uint64 = uint64(sub_slot_iters - (latest_block.total_iters - sub_slot_start_total_iters))
cc_input: ClassgroupElement = latest_block.challenge_vdf_output
rc_challenge: bytes32 = latest_block.reward_infusion_new_challenge
else:
# No blocks were successfully created within this sub-slot
eos_iters = sub_slot_iters
cc_input = ClassgroupElement.get_default_element()
rc_challenge = slot_rc_challenge
cc_vdf, cc_proof = get_vdf_info_and_proof(
constants,
cc_input,
slot_cc_challenge,
eos_iters,
)
rc_vdf, rc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
eos_iters,
)
eos_deficit: uint8 = (
latest_block.deficit if latest_block.deficit > 0 else constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
icc_eos_vdf, icc_ip_proof = get_icc(
constants,
uint128(sub_slot_start_total_iters + sub_slot_iters),
finished_sub_slots_at_ip,
latest_block,
blocks,
sub_slot_start_total_iters,
eos_deficit,
)
# End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively,
# in order for light clients to validate.
cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output)
if normalized_to_identity_cc_eos:
_, cc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_vdf.challenge,
sub_slot_iters,
True,
)
if pending_ses:
sub_epoch_summary: Optional[SubEpochSummary] = None
else:
sub_epoch_summary = next_sub_epoch_summary(
constants,
BlockCache(blocks, height_to_hash),
latest_block.required_iters,
block_list[-1],
False,
)
pending_ses = True
if sub_epoch_summary is not None:
ses_hash = sub_epoch_summary.get_hash()
new_sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
log.info(f"Sub epoch summary: {sub_epoch_summary}")
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
if icc_eos_vdf is not None:
# Icc vdf (Deficit of latest block is <= 4)
if len(finished_sub_slots_at_ip) == 0:
# This means there are blocks in this sub-slot
curr = latest_block
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
if curr.is_challenge_block(constants):
icc_eos_iters = uint64(sub_slot_start_total_iters + sub_slot_iters - curr.total_iters)
else:
icc_eos_iters = sub_slot_iters
else:
# This means there are no blocks in this sub-slot
icc_eos_iters = sub_slot_iters
icc_eos_vdf = VDFInfo(
icc_eos_vdf.challenge,
icc_eos_iters,
icc_eos_vdf.output,
)
if normalized_to_identity_icc_eos:
_, icc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
icc_eos_vdf.challenge,
icc_eos_iters,
True,
)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_eos_vdf)
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_block.deficit == 0 else None
cc_sub_slot = ChallengeChainSubSlot(
cc_vdf,
icc_sub_slot_hash,
ses_hash,
new_sub_slot_iters,
new_difficulty,
)
else:
# No icc
icc_sub_slot = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, None, ses_hash, new_sub_slot_iters, new_difficulty)
finished_sub_slots_at_ip.append(
EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
),
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
)
finished_sub_slots_eos = finished_sub_slots_at_ip.copy()
latest_block_eos = latest_block
overflow_cc_challenge = finished_sub_slots_at_ip[-1].challenge_chain.get_hash()
overflow_rc_challenge = finished_sub_slots_at_ip[-1].reward_chain.get_hash()
additions = None
removals = None
if transaction_data_included:
transaction_data = None
if transaction_data is not None and not transaction_data_included:
additions = transaction_data.additions()
removals = transaction_data.removals()
sub_slots_finished += 1
log.info(
f"Sub slot finished. blocks included: {blocks_added_this_sub_slot} blocks_per_slot: "
f"{(len(block_list) - initial_block_list_len)/sub_slots_finished}"
)
blocks_added_this_sub_slot = 0 # Sub slot ended, overflows are in next sub slot
# Handle overflows: No overflows on new epoch
if new_sub_slot_iters is None and num_empty_slots_added >= skip_slots and new_difficulty is None:
for signage_point_index in range(
constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA,
constants.NUM_SPS_SUB_SLOT,
):
# note that we are passing in the finished slots which include the last slot
signage_point = get_signage_point(
constants,
BlockCache(blocks),
latest_block_eos,
sub_slot_start_total_iters,
uint8(signage_point_index),
finished_sub_slots_eos,
sub_slot_iters,
normalized_to_identity_cc_sp,
)
if signage_point_index == 0:
cc_sp_output_hash = slot_cc_challenge
else:
assert signage_point is not None
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
qualified_proofs = self.get_pospaces_for_challenge(
constants,
slot_cc_challenge,
cc_sp_output_hash,
seed,
difficulty,
sub_slot_iters,
force_plot_id=force_plot_id,
)
for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS:
break
assert start_timestamp is not None
if proof_of_space.pool_contract_puzzle_hash is not None:
if pool_reward_puzzle_hash is not None:
# The caller wants to be paid to a specific address, but this PoSpace is tied to an
# address, so continue until a proof of space tied to a pk is found
continue
pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
if pool_reward_puzzle_hash is not None:
pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))
else:
pool_target = PoolTarget(self.pool_ph, uint32(0))
if transaction_data is not None:
if previous_generator is not None:
block_generator = best_solution_generator_from_template(
previous_generator, transaction_data
)
else:
block_generator = simple_solution_generator(transaction_data)
aggregate_signature = transaction_data.aggregated_signature
else:
block_generator = None
aggregate_signature = G2Element()
full_block, block_record = get_full_block_and_block_record(
constants,
blocks,
sub_slot_start_total_iters,
uint8(signage_point_index),
proof_of_space,
slot_cc_challenge,
slot_rc_challenge,
farmer_reward_puzzle_hash,
pool_target,
start_timestamp,
start_height,
time_per_block,
block_generator,
aggregate_signature,
additions,
removals,
height_to_hash,
difficulty,
required_iters,
sub_slot_iters,
self.get_plot_signature,
self.get_pool_key_signature,
finished_sub_slots_at_ip,
signage_point,
latest_block,
seed,
overflow_cc_challenge=overflow_cc_challenge,
overflow_rc_challenge=overflow_rc_challenge,
normalized_to_identity_cc_ip=normalized_to_identity_cc_ip,
current_time=current_time,
)
if block_record.is_transaction_block:
transaction_data_included = True
elif guarantee_transaction_block:
continue
if pending_ses:
pending_ses = False
block_list.append(full_block)
if full_block.transactions_generator is not None:
compressor_arg = detect_potential_template_generator(
full_block.height, full_block.transactions_generator
)
if compressor_arg is not None:
previous_generator = compressor_arg
blocks_added_this_sub_slot += 1
log.info(f"Created block {block_record.height } ov=True, iters " f"{block_record.total_iters}")
num_blocks -= 1
if num_blocks == 0:
return block_list
blocks[full_block.header_hash] = block_record
height_to_hash[uint32(full_block.height)] = full_block.header_hash
latest_block = blocks[full_block.header_hash]
finished_sub_slots_at_ip = []
finished_sub_slots_at_sp = finished_sub_slots_eos.copy()
same_slot_as_last = False
sub_slot_start_total_iters = uint128(sub_slot_start_total_iters + sub_slot_iters)
if num_blocks < prev_num_of_blocks:
num_empty_slots_added += 1
if new_sub_slot_iters is not None:
assert new_difficulty is not None
sub_slot_iters = new_sub_slot_iters
difficulty = new_difficulty
def create_genesis_block(
self,
constants: ConsensusConstants,
seed: bytes32 = b"",
timestamp: Optional[uint64] = None,
force_overflow: bool = False,
skip_slots: int = 0,
) -> FullBlock:
if timestamp is None:
timestamp = uint64(int(time.time()))
finished_sub_slots: List[EndOfSubSlotBundle] = []
unfinished_block: Optional[UnfinishedBlock] = None
ip_iters: uint64 = uint64(0)
sub_slot_total_iters: uint128 = uint128(0)
# Keep trying until we get a good proof of space that also passes sp filter
while True:
cc_challenge, rc_challenge = get_challenges(constants, {}, finished_sub_slots, None)
for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT):
signage_point: SignagePoint = get_signage_point(
constants,
BlockCache({}, {}),
None,
sub_slot_total_iters,
uint8(signage_point_index),
finished_sub_slots,
constants.SUB_SLOT_ITERS_STARTING,
)
if signage_point_index == 0:
cc_sp_output_hash: bytes32 = cc_challenge
else:
assert signage_point is not None
assert signage_point.cc_vdf is not None
cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
# If did not reach the target slots to skip, don't make any proofs for this sub-slot
qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
constants,
cc_challenge,
cc_sp_output_hash,
seed,
constants.DIFFICULTY_STARTING,
constants.SUB_SLOT_ITERS_STARTING,
)
# Try each of the proofs of space
for required_iters, proof_of_space in qualified_proofs:
sp_iters: uint64 = calculate_sp_iters(
constants,
uint64(constants.SUB_SLOT_ITERS_STARTING),
uint8(signage_point_index),
)
ip_iters = calculate_ip_iters(
constants,
uint64(constants.SUB_SLOT_ITERS_STARTING),
uint8(signage_point_index),
required_iters,
)
is_overflow = is_overflow_block(constants, uint8(signage_point_index))
if force_overflow and not is_overflow:
continue
if len(finished_sub_slots) < skip_slots:
continue
unfinished_block = create_test_unfinished_block(
constants,
sub_slot_total_iters,
constants.SUB_SLOT_ITERS_STARTING,
uint8(signage_point_index),
sp_iters,
ip_iters,
proof_of_space,
cc_challenge,
constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH,
PoolTarget(constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0)),
self.get_plot_signature,
self.get_pool_key_signature,
signage_point,
timestamp,
BlockCache({}),
seed=seed,
finished_sub_slots_input=finished_sub_slots,
)
assert unfinished_block is not None
if not is_overflow:
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_challenge,
ip_iters,
)
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
ip_iters,
)
assert unfinished_block is not None
total_iters_sp = uint128(sub_slot_total_iters + sp_iters)
return unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
None,
None,
finished_sub_slots,
None,
BlockCache({}),
total_iters_sp,
constants.DIFFICULTY_STARTING,
)
if signage_point_index == constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA - 1:
# Finish the end of sub-slot and try again next sub-slot
cc_vdf, cc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_challenge,
constants.SUB_SLOT_ITERS_STARTING,
)
rc_vdf, rc_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_challenge,
constants.SUB_SLOT_ITERS_STARTING,
)
cc_slot = ChallengeChainSubSlot(cc_vdf, None, None, None, None)
finished_sub_slots.append(
EndOfSubSlotBundle(
cc_slot,
None,
RewardChainSubSlot(
rc_vdf,
cc_slot.get_hash(),
None,
uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK),
),
SubSlotProofs(cc_proof, None, rc_proof),
)
)
if unfinished_block is not None:
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
finished_sub_slots[-1].challenge_chain.get_hash(),
ip_iters,
)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
finished_sub_slots[-1].reward_chain.get_hash(),
ip_iters,
)
total_iters_sp = uint128(
sub_slot_total_iters
+ calculate_sp_iters(
self.constants,
self.constants.SUB_SLOT_ITERS_STARTING,
unfinished_block.reward_chain_block.signage_point_index,
)
)
return unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
None,
None,
finished_sub_slots,
None,
BlockCache({}),
total_iters_sp,
constants.DIFFICULTY_STARTING,
)
sub_slot_total_iters = uint128(sub_slot_total_iters + constants.SUB_SLOT_ITERS_STARTING)
def get_pospaces_for_challenge(
self,
constants: ConsensusConstants,
challenge_hash: bytes32,
signage_point: bytes32,
seed: bytes,
difficulty: uint64,
sub_slot_iters: uint64,
force_plot_id: Optional[bytes32] = None,
) -> List[Tuple[uint64, ProofOfSpace]]:
found_proofs: List[Tuple[uint64, ProofOfSpace]] = []
plots: List[PlotInfo] = [
plot_info for _, plot_info in sorted(list(self.plots.items()), key=lambda x: str(x[0]))
]
random.seed(seed)
for plot_info in plots:
plot_id: bytes32 = plot_info.prover.get_id()
if force_plot_id is not None and plot_id != force_plot_id:
continue
if ProofOfSpace.passes_plot_filter(constants, plot_id, challenge_hash, signage_point):
new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, challenge_hash, signage_point)
qualities = plot_info.prover.get_qualities_for_challenge(new_challenge)
for proof_index, quality_str in enumerate(qualities):
required_iters = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
plot_info.prover.get_size(),
difficulty,
signage_point,
)
if required_iters < calculate_sp_interval_iters(constants, sub_slot_iters):
proof_xs: bytes = plot_info.prover.get_full_proof(new_challenge, proof_index)
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(plot_info.prover.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
include_taproot = False
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
include_taproot = True
plot_pk = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, include_taproot
)
proof_of_space: ProofOfSpace = ProofOfSpace(
new_challenge,
plot_info.pool_public_key,
plot_info.pool_contract_puzzle_hash,
plot_pk,
plot_info.prover.get_size(),
proof_xs,
)
found_proofs.append((required_iters, proof_of_space))
random_sample = found_proofs
if len(found_proofs) >= 1:
if random.random() < 0.1:
# Removes some proofs of space to create "random" chains, based on the seed
random_sample = random.sample(found_proofs, len(found_proofs) - 1)
return random_sample
def get_signage_point(
constants: ConsensusConstants,
blocks: BlockchainInterface,
latest_block: Optional[BlockRecord],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
finished_sub_slots: List[EndOfSubSlotBundle],
sub_slot_iters: uint64,
normalized_to_identity_cc_sp: bool = False,
) -> SignagePoint:
if signage_point_index == 0:
return SignagePoint(None, None, None, None)
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
overflow = is_overflow_block(constants, signage_point_index)
sp_total_iters = uint128(
sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
)
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
finished_sub_slots,
overflow,
latest_block,
blocks,
sp_total_iters,
sp_iters,
)
cc_sp_vdf, cc_sp_proof = get_vdf_info_and_proof(
constants,
cc_vdf_input,
cc_vdf_challenge,
cc_vdf_iters,
)
rc_sp_vdf, rc_sp_proof = get_vdf_info_and_proof(
constants,
rc_vdf_input,
rc_vdf_challenge,
rc_vdf_iters,
)
cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters)
if normalized_to_identity_cc_sp:
_, cc_sp_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_sp_vdf.challenge,
sp_iters,
True,
)
return SignagePoint(cc_sp_vdf, cc_sp_proof, rc_sp_vdf, rc_sp_proof)
def finish_block(
constants: ConsensusConstants,
blocks: Dict[bytes32, BlockRecord],
height_to_hash: Dict[uint32, bytes32],
finished_sub_slots: List[EndOfSubSlotBundle],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
unfinished_block: UnfinishedBlock,
required_iters: uint64,
ip_iters: uint64,
slot_cc_challenge: bytes32,
slot_rc_challenge: bytes32,
latest_block: BlockRecord,
sub_slot_iters: uint64,
difficulty: uint64,
normalized_to_identity_cc_ip: bool = False,
) -> Tuple[FullBlock, BlockRecord]:
is_overflow = is_overflow_block(constants, signage_point_index)
cc_vdf_challenge = slot_cc_challenge
if len(finished_sub_slots) == 0:
new_ip_iters = unfinished_block.total_iters - latest_block.total_iters
cc_vdf_input = latest_block.challenge_vdf_output
rc_vdf_challenge = latest_block.reward_infusion_new_challenge
else:
new_ip_iters = ip_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = slot_rc_challenge
cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
constants,
cc_vdf_input,
cc_vdf_challenge,
new_ip_iters,
)
cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
if normalized_to_identity_cc_ip:
_, cc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
cc_ip_vdf.challenge,
ip_iters,
True,
)
deficit = calculate_deficit(
constants,
uint32(latest_block.height + 1),
latest_block,
is_overflow,
len(finished_sub_slots),
)
icc_ip_vdf, icc_ip_proof = get_icc(
constants,
unfinished_block.total_iters,
finished_sub_slots,
latest_block,
blocks,
uint128(sub_slot_start_total_iters + sub_slot_iters) if is_overflow else sub_slot_start_total_iters,
deficit,
)
rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
rc_vdf_challenge,
new_ip_iters,
)
assert unfinished_block is not None
sp_total_iters = uint128(
sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
)
full_block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
cc_ip_vdf,
cc_ip_proof,
rc_ip_vdf,
rc_ip_proof,
icc_ip_vdf,
icc_ip_proof,
finished_sub_slots,
latest_block,
BlockCache(blocks),
sp_total_iters,
difficulty,
)
block_record = block_to_block_record(constants, BlockCache(blocks), required_iters, full_block, None)
return full_block, block_record
def get_challenges(
constants: ConsensusConstants,
blocks: Dict[uint32, BlockRecord],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_header_hash: Optional[bytes32],
) -> Tuple[bytes32, bytes32]:
if len(finished_sub_slots) == 0:
if prev_header_hash is None:
return constants.GENESIS_CHALLENGE, constants.GENESIS_CHALLENGE
curr: BlockRecord = blocks[prev_header_hash]
while not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
cc_challenge = curr.finished_challenge_slot_hashes[-1]
rc_challenge = curr.finished_reward_slot_hashes[-1]
else:
cc_challenge = finished_sub_slots[-1].challenge_chain.get_hash()
rc_challenge = finished_sub_slots[-1].reward_chain.get_hash()
return cc_challenge, rc_challenge
def get_plot_dir() -> Path:
cache_path = Path(os.path.expanduser(os.getenv("beet_ROOT", "~/.beet/"))) / "test-plots"
mkdir(cache_path)
return cache_path
def get_plot_tmp_dir():
return get_plot_dir() / "tmp"
def load_block_list(
block_list: List[FullBlock], constants: ConsensusConstants
) -> Tuple[Dict[uint32, bytes32], uint64, Dict[uint32, BlockRecord]]:
difficulty = 0
height_to_hash: Dict[uint32, bytes32] = {}
blocks: Dict[uint32, BlockRecord] = {}
for full_block in block_list:
if full_block.height == 0:
difficulty = uint64(constants.DIFFICULTY_STARTING)
else:
difficulty = full_block.weight - block_list[full_block.height - 1].weight
if full_block.reward_chain_block.signage_point_index == 0:
challenge = full_block.reward_chain_block.pos_ss_cc_challenge_hash
sp_hash = challenge
else:
assert full_block.reward_chain_block.challenge_chain_sp_vdf is not None
challenge = full_block.reward_chain_block.challenge_chain_sp_vdf.challenge
sp_hash = full_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_str = full_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, sp_hash
)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
full_block.reward_chain_block.proof_of_space.size,
uint64(difficulty),
sp_hash,
)
blocks[full_block.header_hash] = block_to_block_record(
constants,
BlockCache(blocks),
required_iters,
full_block,
None,
)
height_to_hash[uint32(full_block.height)] = full_block.header_hash
return height_to_hash, uint64(difficulty), blocks
def get_icc(
constants: ConsensusConstants,
vdf_end_total_iters: uint128,
finished_sub_slots: List[EndOfSubSlotBundle],
latest_block: BlockRecord,
blocks: Dict[bytes32, BlockRecord],
sub_slot_start_total_iters: uint128,
deficit: uint8,
) -> Tuple[Optional[VDFInfo], Optional[VDFProof]]:
if len(finished_sub_slots) == 0:
prev_deficit = latest_block.deficit
else:
prev_deficit = finished_sub_slots[-1].reward_chain.deficit
if deficit == prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# new slot / overflow sb to new slot / overflow sb
return None, None
if deficit == (prev_deficit - 1) == (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1):
# new slot / overflow sb to challenge sb
return None, None
if len(finished_sub_slots) != 0:
last_ss = finished_sub_slots[-1]
assert last_ss.infused_challenge_chain is not None
assert finished_sub_slots[-1].reward_chain.deficit <= (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
return get_vdf_info_and_proof(
constants,
ClassgroupElement.get_default_element(),
last_ss.infused_challenge_chain.get_hash(),
uint64(vdf_end_total_iters - sub_slot_start_total_iters),
)
curr = latest_block # curr deficit is 0, 1, 2, 3, or 4
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks[curr.prev_hash]
icc_iters = uint64(vdf_end_total_iters - latest_block.total_iters)
if latest_block.is_challenge_block(constants):
icc_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()
else:
icc_input = latest_block.infused_challenge_vdf_output
assert icc_input is not None
if curr.is_challenge_block(constants): # Deficit 4
icc_challenge_hash = curr.challenge_block_info_hash
else:
assert curr.finished_infused_challenge_slot_hashes is not None
# First block in sub slot has deficit 0,1,2 or 3
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
return get_vdf_info_and_proof(
constants,
icc_input,
icc_challenge_hash,
icc_iters,
)
def get_full_block_and_block_record(
constants: ConsensusConstants,
blocks: Dict[uint32, BlockRecord],
sub_slot_start_total_iters: uint128,
signage_point_index: uint8,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
slot_rc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
start_timestamp: uint64,
start_height: uint32,
time_per_block: float,
block_generator: Optional[BlockGenerator],
aggregate_signature: G2Element,
additions: Optional[List[Coin]],
removals: Optional[List[Coin]],
height_to_hash: Dict[uint32, bytes32],
difficulty: uint64,
required_iters: uint64,
sub_slot_iters: uint64,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
finished_sub_slots: List[EndOfSubSlotBundle],
signage_point: SignagePoint,
prev_block: BlockRecord,
seed: bytes = b"",
overflow_cc_challenge: bytes32 = None,
overflow_rc_challenge: bytes32 = None,
normalized_to_identity_cc_ip: bool = False,
current_time: bool = False,
) -> Tuple[FullBlock, BlockRecord]:
if current_time is True:
if prev_block.timestamp is not None:
timestamp = uint64(max(int(time.time()), prev_block.timestamp + int(time_per_block)))
else:
timestamp = uint64(int(time.time()))
else:
timestamp = uint64(start_timestamp + int((prev_block.height + 1 - start_height) * time_per_block))
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters)
unfinished_block = create_test_unfinished_block(
constants,
sub_slot_start_total_iters,
sub_slot_iters,
signage_point_index,
sp_iters,
ip_iters,
proof_of_space,
slot_cc_challenge,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
signage_point,
timestamp,
BlockCache(blocks),
seed,
block_generator,
aggregate_signature,
additions,
removals,
prev_block,
finished_sub_slots,
)
if (overflow_cc_challenge is not None) and (overflow_rc_challenge is not None):
slot_cc_challenge = overflow_cc_challenge
slot_rc_challenge = overflow_rc_challenge
full_block, block_record = finish_block(
constants,
blocks,
height_to_hash,
finished_sub_slots,
sub_slot_start_total_iters,
signage_point_index,
unfinished_block,
required_iters,
ip_iters,
slot_cc_challenge,
slot_rc_challenge,
prev_block,
sub_slot_iters,
difficulty,
normalized_to_identity_cc_ip,
)
return full_block, block_record
def get_name_puzzle_conditions_test(generator: BlockGenerator, max_cost: int) -> NPCResult:
"""
This is similar to get_name_puzzle_conditions(), but it doesn't validate
the conditions. We rely on this in tests to create invalid blocks.
safe_mode is implicitly True in this call
"""
try:
block_program, block_program_args = setup_generator_args(generator)
clvm_cost, result = GENERATOR_MOD.run_safe_with_cost(max_cost, block_program, block_program_args)
npc_list: List[NPC] = []
for res in result.first().as_iter():
conditions_list: List[ConditionWithArgs] = []
spent_coin_parent_id: bytes32 = res.first().as_atom()
res = res.rest()
spent_coin_puzzle_hash: bytes32 = res.first().as_atom()
res = res.rest()
spent_coin_amount: uint64 = uint64(res.first().as_int())
res = res.rest()
spent_coin: Coin = Coin(spent_coin_parent_id, spent_coin_puzzle_hash, spent_coin_amount)
for cond in res.first().as_iter():
condition = cond.first().as_atom()
cvl = ConditionWithArgs(ConditionOpcode(condition), cond.rest().as_atom_list())
conditions_list.append(cvl)
conditions_dict = conditions_by_opcode(conditions_list)
if conditions_dict is None:
conditions_dict = {}
npc_list.append(
NPC(spent_coin.name(), spent_coin.puzzle_hash, [(a, b) for a, b in conditions_dict.items()])
)
return NPCResult(None, npc_list, uint64(clvm_cost))
except Exception:
return NPCResult(uint16(Err.GENERATOR_RUNTIME_ERROR.value), [], uint64(0))
def create_test_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"",
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = random.randint(0, 100000000).to_bytes(32, "big")
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
result: NPCResult = get_name_puzzle_conditions_test(block_generator, constants.MAX_BLOCK_COST_CLVM)
cost = calculate_cost_of_program(block_generator.program, result, constants.COST_PER_BYTE)
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
# in order to allow creating blocks that mint coins, clamp the fee
# to 0, if it ends up being negative
if spend_bundle_fees < 0:
spend_bundle_fees = 0
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin in removals:
tx_removals.append(coin.name())
byte_array_tx.append(bytearray(coin.name()))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash: Optional[bytes32] = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
def create_test_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"",
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: Optional[bytes32] = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_test_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
| [
"[email protected]"
]
| |
d03b33dff1747f43c63760a1a272b4708f3aca49 | c5c56d7c14b4518e53bcde2527b9cc6e53a7e1b9 | /custom_assert/tennis.py | 5df7831abac8b0ca8585aea91cdde3d40996ccc3 | []
| no_license | lancelote/pluralsight-unit-testing-python | 0402a39e3800eec49f2be529e684d028689d3b47 | fd5ce8264bc95ed66109c4fa575a177248c3d49a | refs/heads/master | 2021-01-10T08:06:39.605195 | 2016-03-23T08:15:25 | 2016-03-23T08:15:25 | 51,952,064 | 4 | 6 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | SCORE_NAMES = ('Love', 'Fifteen', 'Thirty', 'Forty')
def tennis_score(player1, player2):
if player1 == player2:
return '%s-All' % SCORE_NAMES[player1]
else:
return '%s-%s' % (SCORE_NAMES[player1], SCORE_NAMES[player2])
| [
"[email protected]"
]
| |
bf9470c3ab98fc4a2ed3b629dd2537ada28fcb7e | 6cad5c613306789b9bd6387c2e7af02515b1c0ad | /django_document/inheritance/models/abstract_base_class.py | a42ba4fde37800e90813d8caad6d4f2461dfe01b | []
| no_license | Isaccchoi/django_document_project | ead5eb7b2e932ae5401d5a3cdb3672d3dfd8f9f5 | 980f25c98f99994e6148af16ed82ae4f12d50870 | refs/heads/master | 2021-05-08T06:12:51.261138 | 2017-10-13T05:14:58 | 2017-10-13T05:14:58 | 106,355,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | from django.db import models
__all__ = (
'School',
'CommonInfo',
'Student',
'Teacher',
)
class School(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class CommonInfo(models.Model):
# abstractBaseClass에서 상송진행시 ForeignKey의 related_name을 지정하면 오류 발생 - 역참조 이름이 같게됨
# 그렇기때문에 같지 않게 하기 위해 $(app_label)s 및 %(class)s를 사용해 충돌 제거
school = models.ForeignKey(School, blank=True, null=True, related_name='%(app_label)s_%(class)s_set')
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class Meta:
abstract = True
class Student(CommonInfo):
home_group = models.CharField(max_length=5)
def __str__(self):
return self.name
class Teacher(CommonInfo):
subject = models.CharField(max_length=30)
def __str__(self):
return self.name
| [
"[email protected]"
]
| |
6d1a08093446a147d751a66587036a6df1e2ebde | 142e8dbcd065e689dd7599f1f2b7ee23f2ae9616 | /validation_numeric_or_not.py | 59ce90e46cb34fe3fe57b6b83078bc38af6c18dd | []
| no_license | shanthivimalanataraajan01/Beginner | f62ef7ba9b4c99591ca61f5f68a75d542c4adeb1 | 0b45d623ae24b0896a1d3f91e01fc497c31edc1d | refs/heads/master | 2020-04-26T22:19:12.549521 | 2019-01-25T10:30:02 | 2019-01-25T10:30:02 | 173,869,297 | 0 | 0 | null | 2019-03-05T03:51:53 | 2019-03-05T03:51:53 | null | UTF-8 | Python | false | false | 64 | py | a=int(input())
if a.isalpha():
print("No")
else:
print("Yes")
| [
"[email protected]"
]
| |
adf1a0335935312323435fd90f890423097b9fad | e1fac9437a480e5d1ab9527a28c28f6ee3d7af6e | /skyrock/migrations/0014_auto_20190822_0913.py | db8d986ed406f1fb4ac05d1783a3beab3225eef2 | []
| no_license | LuLue7775/Skyrock-Backend-Training | 3e4c41bcc78fbfabb8a8c1114dd15ca94bc1055e | 3b6d3d697be1875442eeba5127c8798de1ca6499 | refs/heads/master | 2022-06-19T04:26:10.129558 | 2019-09-12T06:41:58 | 2019-09-12T06:41:58 | 207,708,169 | 1 | 1 | null | 2022-05-25T02:24:55 | 2019-09-11T02:45:41 | Python | UTF-8 | Python | false | false | 564 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-08-22 09:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skyrock', '0013_auto_20190822_0909'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='age',
),
migrations.AddField(
model_name='student',
name='birth_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"[email protected]"
]
| |
e2e1180c912988844c5fe890a9b70135731ea883 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /pQavNkBbdmvSMmx5x_2.py | 3582e4157098df37fe07a238039d40ead9325681 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | """
Create a function that returns the **majority vote** in a list. A majority
vote is an element that occurs **> N/2** times in a list (where **N** is the
length of the list).
### Examples
majority_vote(["A", "A", "B"]) ➞ "A"
majority_vote(["A", "A", "A", "B", "C", "A"]) ➞ "A"
majority_vote(["A", "B", "B", "A", "C", "C"]) ➞ None
### Notes
* The frequency of the majority element must be **strictly greater** than 1/2.
* If there is no majority element, return `None`.
* If the list is empty, return `None`.
"""
def majority_vote(lst):
for i in lst:
if lst.count(i) > len(lst)/2:
return i
| [
"[email protected]"
]
| |
b55d40212c755128e12ddd2efb8d0f9d653c8573 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/sphinx/venv/Lib/site-packages/setuptools/msvc.py | 467c8192bd17c3c14a577e9b90a44ffaec5dc861 | [
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
]
| permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1df5ddb5b9a19b10195da6054f634166b5d3f12771ddf66587cc886e594b199d
size 51126
| [
"[email protected]"
]
| |
359f5287850dad160771b45c8dccdd1bd9ad768b | 59381d3e69e4a288cdeb4aeecc2e9c84a28759b2 | /selvbetjening/sadmin2/tests/ui/dashboard.py | 56194920cccb403216b896dfd25cd46000b0f2ab | [
"MIT"
]
| permissive | animekita/selvbetjening | 88cb75164f8ab0b3341a6ba4dd85d425c601ee4d | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | refs/heads/master | 2021-07-05T01:10:59.900369 | 2015-09-17T15:15:29 | 2015-09-17T15:15:29 | 4,826,342 | 0 | 1 | MIT | 2021-06-10T17:35:22 | 2012-06-28T22:17:15 | Python | UTF-8 | Python | false | false | 347 | py | from django.core.urlresolvers import reverse
from common import UITestCase
class DashboardTestCase(UITestCase):
fixtures = ['sdemo-example-site.json']
def test_load(self):
self.login_admin()
# Check that the dashboard is the first page we see after login
self.assertTrue(self.wd.is_text_present('Dashboard'))
| [
"[email protected]"
]
| |
271de39a148eac6bca2cf614057de6f6b38f1002 | 8cf427b0574e8e41e5201cc02c3e736f264a2000 | /original/yolo3_auto_label/FLIR_ws/build/catkin_generated/order_packages.py | 34f5c1ed3463d16400524b16c632308592765db4 | []
| no_license | Lin1225/Data_amplification_all | a88561b9cae481561683b32b6cede35461fa0e3e | e988990ea8dd53b28ed2da6046ea7aeeda6a01b6 | refs/heads/master | 2023-05-06T03:41:00.002786 | 2021-05-28T02:48:38 | 2021-05-28T02:48:38 | 296,257,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/lab/Documents/Data_reforement_code/yolo3_auto_label/FLIR_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/lab/Desktop/NEW_hiwin_control/devel;/opt/ros/kinetic".split(';') if "/home/lab/Desktop/NEW_hiwin_control/devel;/opt/ros/kinetic" != "" else []
| [
"[email protected]"
]
| |
b80b2f3fb6d3d9ea31fe7f2a79ebb0112b4efb2a | 54ed8b1e0f9d0ae2d67cd86067fd920e82a4d441 | /litex_boards/platforms/gsd_butterstick.py | c1c2849cc2f012f8b4294bd7e6a8f1e8d075b2a5 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | doraemoncito/litex-boards | 88588260371666f23d17b3709794a020084dd7ff | 55ea71bd0199226e3e993fb7bd224b9c6d5d10ef | refs/heads/master | 2023-07-18T19:35:07.831531 | 2021-09-01T17:21:16 | 2021-09-01T17:21:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,072 | py | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Greg Davill <[email protected]>
# Copyright (c) 2021 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import OpenOCDJTAGProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io_r1_0 = [
# Clk
("clk30", 0, Pins("B12"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("C13"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("D12"), IOStandard("LVCMOS33")),
("user_led", 2, Pins(" U2"), IOStandard("LVCMOS33")),
("user_led", 3, Pins(" T3"), IOStandard("LVCMOS33")),
("user_led", 4, Pins("D13"), IOStandard("LVCMOS33")),
("user_led", 5, Pins("E13"), IOStandard("LVCMOS33")),
("user_led", 6, Pins("C16"), IOStandard("LVCMOS33")),
("user_led_color", 0, Pins("T1 R1 U1"), IOStandard("LVCMOS33")),
# Buttons
("user_btn", 0, Pins("U16"), IOStandard("SSTL135_I")),
("user_btn", 1, Pins("T17"), IOStandard("SSTL135_I")),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"G16 E19 E20 F16 F19 E16 F17 L20 "
"M20 E18 G18 D18 H18 C18 D17 G20 "),
IOStandard("SSTL135_I")),
Subsignal("ba", Pins("H16 F20 H20"), IOStandard("SSTL135_I")),
Subsignal("ras_n", Pins("K18"), IOStandard("SSTL135_I")),
Subsignal("cas_n", Pins("J17"), IOStandard("SSTL135_I")),
Subsignal("we_n", Pins("G19"), IOStandard("SSTL135_I")),
Subsignal("cs_n", Pins("J20 J16"), IOStandard("SSTL135_I")),
Subsignal("dm", Pins("U20 L18"), IOStandard("SSTL135_I")),
Subsignal("dq", Pins(
"U19 T18 U18 R20 P18 P19 P20 N20",
"L19 L17 L16 R16 N18 R17 N17 P17"),
IOStandard("SSTL135_I"),
Misc("TERMINATION=75")),
Subsignal("dqs_p", Pins("T19 N16"), IOStandard("SSTL135D_I"),
Misc("TERMINATION=OFF"),
Misc("DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("C20 J19"), IOStandard("SSTL135D_I")),
Subsignal("cke", Pins("F18 J18"), IOStandard("SSTL135_I")),
Subsignal("odt", Pins("K20 H17"), IOStandard("SSTL135_I")),
Subsignal("reset_n", Pins("E17"), IOStandard("SSTL135_I")),
Misc("SLEWRATE=FAST")
),
# RGMII Ethernet
("eth_clocks", 0,
Subsignal("tx", Pins("E15")),
Subsignal("rx", Pins("D11")),
IOStandard("LVCMOS33"),
Misc("SLEWRATE=FAST"),
),
("eth", 0,
Subsignal("rst_n", Pins("B20")),
Subsignal("mdio", Pins("D16")),
Subsignal("mdc", Pins("A19")),
Subsignal("rx_data", Pins("A16 C17 B17 A17")),
Subsignal("tx_ctl", Pins("D15")),
Subsignal("rx_ctl", Pins("B18")),
Subsignal("tx_data", Pins("C15 B16 A18 B19")),
IOStandard("LVCMOS33"),
Misc("SLEWRATE=FAST")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors_r1_0 = []
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
default_clk_name = "clk30"
default_clk_period = 1e9/30e6
def __init__(self, revision="1.0", device="85F", toolchain="trellis", **kwargs):
assert revision in ["1.0"]
self.revision = revision
io = {"1.0": _io_r1_0}[revision]
connectors = {"1.0": _connectors_r1_0}[revision]
LatticePlatform.__init__(self, f"LFE5UM5G-{device}-8BG381C", io, connectors, toolchain=toolchain, **kwargs)
def create_programmer(self):
return OpenOCDJTAGProgrammer("openocd_butterstick.cfg")
def do_finalize(self, fragment):
LatticePlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk30", loose=True), 1e9/30e6)
| [
"[email protected]"
]
| |
70d99d01332ce75b3a408d0d03328a3232c04d66 | 340b5d95c9dd0cfc3ff487a7bb927944ac40aa51 | /ch_3 (functions)/005_brote_force.py | 16d537d9bea57e70fa30621463bddcc0ce8d83bf | []
| no_license | sc-199/199 | 200970fb8bf0662755cda9c50599504392b3882f | 618d8e1136c188276135f9a685a878984c3ea644 | refs/heads/master | 2020-05-04T18:33:34.722852 | 2019-03-22T08:13:32 | 2019-03-22T08:13:32 | 167,672,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | '''
ალგორითმები
====================
უხეში ძალის მეთოდი
(Brute force)
გვაქვს განსაზღვრის არე და მნიშვნელობათა არე.
მაგალითად: მოცემულია კონკრეტული რიცხვი. ამ რიცხვისთვის გვაქვს ამონახსნთა სიმრავლე.
გარკვეული პროცედურების გავლის შემდეგ ამონასხნთა სიმრავლიდან ამონახსნს ვარჩევთ სათითაოდ,
რომელიც მოერგება ამ კონკრეტულ რიცხვს. ამას ეწოდება უხეში ძალის მეთოდი.
ცხადია, მნიშვნელობათა არიდან მოიძებნება ერთი ამონახსნი.
'''
# ----------------------
def is_simple_number(x):
''' ფუნქცია განსაზღვრავს x რიცხვი მარტივია თუ შედგენილი.
x – მთელი, დადებითი (ნატურალური) რიცხვია.
რიცხვი თუ მარტივია დაბრუნდება True, წინააღმდეგ შემთხვევაში – False
'''
divizor = 2
while divizor <= x**0.5:
if x % divizor == 0:
return False
divizor += 1
return True
# -----------------------
def factorize_number(x):
''' x რიცხვის მარტივ მამრავლებად დაშლა.
მამრავლებს ბეჭდავს ეკრანზე.
x – მთელი, დადებითი (ნატურალური) რიცხვია.
'''
divizor = 2
while x > 1:
if x % divizor == 0:
print(divizor, end =' ')
x //= divizor
else:
divizor += 1
# ========================
print("Is simple number:", is_simple_number(19))
print()
factorize_number(1024)
print()
factorize_number(999)
print()
# ------------------------
input("\nDone!..")
| [
"[email protected]"
]
| |
a1aec67ad9d26544e5297012f6c5d18c0ac75576 | 80225526644fa583e90e9bd319f2ed6666587515 | /grading/assign2/assign2_sol.py | 57663a0f470ed1485df6c7e0ba8a07aec603400d | [
"BSD-3-Clause"
]
| permissive | ChenQiang-CN/a301_2020 | 249776f46ebcb781e328805ac162642040300bff | 4bcb4a0181ea2d94207ede2536af51d671ba4930 | refs/heads/master | 2023-02-22T03:18:25.690639 | 2021-01-27T18:34:53 | 2021-01-27T18:34:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,600 | py | # ---
# jupyter:
# jupytext:
# notebook_metadata_filter: all,-language_info,-toc,-latex_envs
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ## Assignment 2 - solutions
#
# Chapter 2: A23, A24, A25, A26
# Chapter 8: A2, A4
# %%
import numpy as np
import string
# %% [markdown]
# # 2-A23
#
# What product of number density times absorption cross section is needed in order for 50% of the incident radiation to be absorbed by airborne volcanic ash over the following path length (km)?
#
# Given Equation 2.31a:
#
# $$
# \begin{align}
# E_{\text {transmitted}}&=E_{\text {incident}} \cdot \mathrm{e}^{-n \cdot b \cdot \Delta s}\\
# t &= 0.5 = \mathrm{e}^{-n \cdot b \cdot \Delta s}
# \end{align}
# $$
# What product of number density $n$ times absorption cross section $b$ is needed in order for 50% of the incident radiation $E_{\text {incident}}$ to be absorbed by airborne volcanic ash over the following path length (km)?
#
# (Note how I use the zip function to zip together 3 lists, then unzip them in the for loop
# one at a time)
# %%
# path length in km
# 14 values
delta_s = (0.2,0.4,0.6,0.8,1.0,1.5,2.0,
2.5,3.0,3.5,4.0,4.5,5.0,7.0)
letters = string.ascii_lowercase[:len(delta_s)]
# unpack the product of n x b
#
n_b= []
for the_delta in delta_s:
the_prod = -np.log(0.5)/the_delta
n_b.append(the_prod)
the_answer = zip(letters,delta_s,n_b)
for the_letter, the_delta, the_val in the_answer:
print(f"{the_letter}) delta_s={the_delta} km,"
f" --- nxb={the_val:5.3f} km^-1")
# %% [markdown]
# ## 2-A24
#
# Given optical depth $\tau$ in the equation 2.31c
#
# \$$E_{\text {transmitted}}=E_{\text {incident}} \cdot \mathrm{e}^{-\tau}$$
#
# What fraction of incident radiation is transmitted through a volcanic ash cloud of optical depth:
# %%
tau =(0.2,0.5,0.7,1.0,1.5,3.0,
4.0,5.0,6.0,7.0,10.0,15.0,20.0)
letters = string.ascii_lowercase[:len(tau)]
frac_list=[]
for the_tau in tau:
the_frac = np.exp(-the_tau)
frac_list.append(the_frac)
the_answer = zip(letters,tau,frac_list)
for the_letter, the_tau, the_frac in the_answer:
print(f"{the_letter}) tau={the_tau},"
f" --- transmissivity={the_frac:5.3f}")
# %% [markdown]
# ## 2-A25
#
# given $\gamma = n \cdot b = k \cdot \rho$
#
# Find the visial range $\Delta s$ such that the tranmissivity =
#
#
# $$
# \begin{align}
# t &= 0.02 = \exp ( -\gamma \cdot \Delta s) \\
# \Delta s &= -\log(0.02)/\gamma\ (meters)
# \end{align}
# $$
# %%
gamma = (0.00001,0.00002,0.00005,0.0001,
0.0002,0.0005,0.001,0.002,0.005,
0.01,0.02,0.05)
gamma_km = np.array(gamma)*1.e3 #km^{-1}
letters = string.ascii_lowercase[:len(gamma_km)]
delta_list= []
for the_gamma in gamma_km:
delta_s = -np.log(0.02)/the_gamma
delta_list.append(delta_s)
the_answer = zip(letters,gamma_km,delta_list)
for the_letter, the_gamma, delta_s in the_answer:
print(f"{the_letter}) gamma ={the_gamma} km^{-1},"
f" --- Delta s={delta_s:5.3f} km")
# %% [markdown]
# ## 2-A26
#
# (i) What is the value of solar downward direct radiative flux reaching the surface at the city from exercise A5 at noon on 4 July, given 20% coverage of cumulus (low) clouds.
# %% [markdown]
# Eq. 2.5
#
# $$
# \delta_{S} \approx \Phi_{r} \cdot \cos \left[\frac{C \cdot\left(d-d_{r}\right)}{d_{y}}\right]
# $$
#
#
# Eq. 2.6:
#
#
# $$
# \begin{aligned}
# \sin (\Psi)=& \sin (\phi) \cdot \sin \left(\delta_{S}\right)-\\
# & \cos (\phi) \cdot \cos \left(\delta_{S}\right) \cdot \cos \left[\frac{C \cdot t_{U T C}}{t_{d}}+\lambda_{e}\right]
# \end{aligned}
# $$
# %% [markdown]
# ## Timezones/leap years/daylight savings time make dates/times complicated
#
# For example, look at [this headache for daylight savings time folding](https://www.python.org/dev/peps/pep-0495/)
# I would use the [arrow package](https://github.com/arrow-py/arrow) for real work
# %%
import datetime as dt
from math import asin,sin,cos,pi
import numpy as np
try:
#
# use the ephem package to find exact summer solstice
#
import ephem
except ModuleNotFoundError:
pass
deg2rad=pi/180.
rad2deg=1./deg2rad
def find_deltas(the_date):
"""given a python datetime object (UTC)
find the solar declination angle in degrees
using Stull equation 2.5
Parameters
----------
the_date: datetime object with UTC timezone
Returns
-------
deltas: solar declination angle in degrees
"""
the_year=the_date.year
#
# find the length of the year (leap or regular) in days by subtracting
# two datetimes exactly 1 year apart -- jan 1, 0 hours, 0 minutes, 0 seconds
#
year_start=dt.datetime(the_year,1,1,0,0,0,tzinfo=dt.timezone.utc)
year_end=dt.datetime(the_year+1,1,1,0,0,0,tzinfo=dt.timezone.utc)
year_length=(year_end - year_start).days
print(f"this year has {year_length:6.3f} days")
phir=23.44 #axis tilt in degrees from stull
#
# run the following if you have the ephem package
# to get the exact solstice. Make sure you get the
# summer solstice by specifying solstice after May 31
#
try:
approx_solstice = dt.datetime(2020,5,31)
solstice=ephem.next_solstice(approx_solstice).datetime()
solstice = solstice.astimezone(dt.timezone.utc)
except:
#
# otherwise use june 21
#
solstice = dt.datetime(2020,6,21,0,0,0,tzinfo=dt.timezone.utc)
#number of days since the new year
the_day=(the_date - year_start).days
jan1=dt.datetime(the_date.year,1,1,0,0,0,tzinfo=dt.timezone.utc)
solstice_day=(solstice - jan1).days
#print('solstice has {} days'.format(solstice_day))
fraction=(the_day - solstice_day)/year_length
deltas=phir*cos(2*pi*fraction)
return deltas
# %%
def find_elevation(the_date,the_lat,the_lon):
"""find the solar elevation for a location in degrees
datetime object with a UTC timezone representing
local time, using Stull eqn. 2.6
Parameters
----------
the_date: datetime object
time in UTC
the_lat: float
degrees North
the_lon: float
degrees East
Returns
-------
elevation: float
solar elevation in degrees
"""
deltas=find_deltas(the_date)
deltas=deltas*deg2rad
phi= the_lat*deg2rad # latitude deg N
lambda_e = the_lon*deg2rad #longitude, deg E
#
# turn minutes into fractions of an hour
#
t_utc=the_date.hour + the_date.minute/60.
print(f"the longitude: {the_lon:5.2f} deg E, hour in utc {t_utc}")
#stull eqn 2.6
sin_psi=sin(phi)*sin(deltas) - cos(phi)*cos(deltas)*cos(2*pi*t_utc/24. + lambda_e)
elevation=asin(sin_psi)*rad2deg
#write 0 if under the horizon
if elevation < 0:
elevation=0.
return elevation
# %% [markdown]
# Start accumulating results in a dictionary for each part of A26. We will key on the city
# and each city will have its own dictionary.
# %%
#
# these time offsets are all standard time
# credit: Marjolein Ribberink
#
coords={
"Seattle":(47.6062, -122.3321,-8),
"Corvallis":(44.5646, -123.2620,-8),
"Boulder":(40.0150, -105.2705,-7),
"Norman":(35.2226, -97.4395,-6),
"Madison":(43.0731, -89.4012,-6),
"Toronto":(43.6532, -79.3832,-5),
"Montreal":(45.5017, -73.5673,-5),
"Boston":(42.3601, -71.0589,-5),
"NYC":(40.7128, -74.0060,-5),
"University Park":(40.8148, -77.8653,-5),
"Princeton":(40.3431, -74.6551,-5),
"Washington DC":(38.9072, -77.0369,-5),
"Raleigh":(35.7796, -78.6382,-5),
"Tallahassee":(30.4383, -84.2807,-5),
"Reading":(51.4543, -0.9781,0),
"Toulouse":(43.6047, 1.4442,1),
"Munchen":(48.1351, 11.5820,1),
"Bergen":(60.3913, 5.3221,1),
"Uppsala":(59.8586, 17.6389,1),
"DeBilt":(52.1093, 5.1810,1),
"Paris":(48.8566, 2.3522,1),
"Tokyo":(35.6804, 139.7690,8),
"Beijing":(39.9042, 116.4074,7),
"Warsaw":(52.2297, 21.0122,1),
"Madrid":(40.4168, 3.7038,1),
"Melbourne":(-37.8136, 144.9631,10),
"Vancouver":(49.2827, -123.1207,-8)
}
city_list = ['Vancouver','Reading','Norman']
results=dict()
for the_city in city_list:
print(f"\n{the_city}\n")
geocoords = coords[the_city]
the_lat, the_lon, tz_offset = geocoords
hour = 12 - tz_offset
the_date = dt.datetime(2020,6,21,hour,0,0,tzinfo=dt.timezone.utc)
elev=find_elevation(the_date,the_lat,the_lon)
print(f"lat {the_lat:5.2f} deg N, solar elev {elev:5.2f} deg")
results[the_city]={'elevation':elev}
print(f"\n{results}\n")
# %% [markdown]
# ### 2-A26a
#
# find the flux at the surface given Stull 2.35
#
# $$
# T_{r}=(0.6+0.2 \sin \Psi)\left(1-0.4 \sigma_{H}\right)\left(1-0.7 \sigma_{M}\right)\left(1-0.4 \sigma_{L}\right)
# $$
#
# with $\sigma_L=0.2$
# %%
def find_Tr(elevation,sigma_h,sigma_m,sigma_l):
"""
given a solar elevation and cloud fractions for 3 layers
find the effective solar flux transmission
Parameters
----------
elevation: float
solar elevation in degrees
sigma_h, sigma_m,sigma_l: floats
high, middle and low cloud fractions, 0 to 1
Returns
-------
Tr: float
the diffuse and direct flux transmission, 0 to 1
"""
import numpy as np
deg2rad = np.pi/180.
elevation = elevation*deg2rad
S0 = 1361
Tr = (0.6 + 0.2*np.sin(elevation))*(1-0.4*sigma_h)*(1-0.7*sigma_m)*(1-0.4*sigma_l)
print(f"cos(theta) factor is {np.sin(elevation):5.2f}")
return Tr
# %%
for the_city, results_dict in results.items():
print(f"\n{the_city}\n")
geocoords = coords[the_city]
the_lat, the_lon, tz_offset = geocoords
hour = 12 - tz_offset
the_date = dt.datetime(2020,6,21,hour,0,0,tzinfo=dt.timezone.utc)
elev=find_elevation(the_date,the_lat,the_lon)
sigma_h, sigma_m, sigma_l = 0, 0, 0.2
Tr = find_Tr(elev,sigma_h,sigma_m,sigma_l)
S0= -1361 #W/m^2
downward_flux = S0*Tr
print(f"high, middle, low cloud fractions: {sigma_h,sigma_m,sigma_l}")
print(f"the diffuse/direct transmissin is {Tr:5.3f}")
print(f"downward flux at the surface is {downward_flux:5.2f} W/m^2" )
results[the_city]['downward_flux'] = downward_flux
results[the_city]['high_med_low'] = (sigma_h,sigma_m,sigma_l)
print(f"\n{results}\n")
# %% [markdown]
# ## 2-A26b
#
# If the albedo is 0.5 in your town, what is the reflected solar flux at that same time
#
# Stull 2.36
# $$K_{\uparrow} = -A \cdot K_{\downarrow}$$
# %%
for the_city, results_dict in results.items():
flux = results_dict['downward_flux']
albedo = 0.5
upward_flux = -albedo*flux
print(f"{the_city}: {upward_flux:5.2f} W/m^2")
results[the_city]['upward_sw'] = upward_flux
results[the_city]['albedo'] = albedo
print(f"\n{results}\n")
# %% [markdown]
# ### 2-A26c
#
# What is the approx net longwave flux at the surface, according to Stull 2.39?
#
# $$I^{*}=b \cdot\left(1-0.1 \sigma_{H}-0.3 \sigma_{M}-0.6 \sigma_{L}\right)$$
# %%
b = 98.5 #W/m^2
for the_city,results_dict in results.items():
sigma_h, sigma_m, sigma_l = results_dict['high_med_low']
Istar = b*(1 - 0.1*sigma_h -0.3*sigma_m - 0.6*sigma_l)
results[the_city]['Istar']=Istar
print(f"{the_city}: {Istar:5.2f} W/m^2")
print(f"\n{results}\n")
# %% [markdown]
# ### 2-A26d
#
# What is the net surface radiation according to Stull 2.40a
#
# $$\mathbb{F}^{*}=-(1-A) \cdot S \cdot T_{r} \cdot \sin (\Psi)+I^{*}$$
#
# Note that in the loop below I can add things to `results_dict`, and the changes "stick"
# i.e. the for loop is giving me a reference to the original `results_dict`
# not a copy
# %%
for the_city, results_dict in results.items():
A = results_dict['albedo']
downward_flux = results_dict['downward_flux']
Istar = results_dict['Istar']
net_sfc_flux = -(1 - A)*downward_flux + Istar
print(f"{the_city}: {net_sfc_flux:5.2f} W/m^2")
results_dict['net_sfc_flux'] = net_sfc_flux
print(f"\n{results}\n")
# %% [markdown]
# ### 8-A2
#
# Find the blackbody radiance for the following sets of wavelength, temperature
# %%
from scipy.constants import c, h, k
#
# get Stull's c_1 and c_2 from fundamental constants
#
# c=2.99792458e+08 #m/s -- speed of light in vacuum
# h=6.62606876e-34 #J s -- Planck's constant
# k=1.3806503e-23 # J/K -- Boltzman's constant
c1 = 2. * h * c**2.
c2 = h * c / k
sigma = 2. * np.pi**5. * k**4. / (15 * h**3. * c**2.)
def calc_radiance(wavel, Temp):
"""
Calculate the blackbody radiance
Parameters
----------
wavel: float or array
wavelength (meters)
Temp: float
temperature (K)
Returns
-------
Llambda: float or arr
monochromatic radiance (W/m^2/m/sr)
"""
Llambda_val = c1 / (wavel**5. * (np.exp(c2 / (wavel * Temp)) - 1))
return Llambda_val
def planck_invert(wavel, Lstar):
"""
Calculate the brightness temperature
Parameters
----------
wavel: float
wavelength (meters)
Lstar: float or array
Blackbody radiance (W/m^2/m/sr)
Returns
-------
Tbright: float or arr
brightness temperature (K)
"""
Tbright = c2 / (wavel * np.log(c1 / (wavel**5. * Lstar) + 1.))
return Tbright
# %%
probset={
"a":{'wavelen':14.7,'Tc':-60},
"b":{'wavelen':14.4,'Tc':-60},
"c":{'wavelen':14.0,'Tc':-30},
"d":{'wavelen':13.7,'Tc':0},
"e":{'wavelen':13.4,'Tc':5},
"f":{'wavelen':12.7,'Tc':15},
"g":{'wavelen':12.0,'Tc':25},
"h":{'wavelen':11.0,'Tc':-5},
"i":{'wavelen':9.7,'Tc':-15}
}
for letter, prob_vals in probset.items():
Tk = prob_vals['Tc'] + 273.15
wavelen = prob_vals['wavelen']*1.e-6
Lbb = calc_radiance(wavelen,Tk)
print(f"{letter}) Tk={Tk:5.2f} K, Lbb={Lbb*1.e-6:5.2f} W/m^2/sr/micron")
# %% [markdown]
# ### 8-A4
#
# Find the brightness temperature for the following wavelengths given a radiance of $10^{-15}$ W/m^2/sr/micron.
# %%
wavelen = (0.6,3.8,4.0,4.1,4.4,4.5,4.6,6.5,7.0,7.5)
letters = string.ascii_lowercase[:len(wavelen)]
Lstar=(10**(-15))*1.e6 #W/m^2/sr/micron
for letter,wavel in zip(letters,wavelen):
wavel_meters=wavel*1.e-6
Tbright = planck_invert(wavel_meters,Lstar)
print(f"{letter}) Wavelength = {wavel:5.2f} microns, Tbright = {Tbright:5.2f} K")
| [
"[email protected]"
]
| |
4919d2fe56f781fbadaf6d51a0998a84e7c5aa3c | 491d2fd36f2ca26975b3eb302a3d5600415bf7c4 | /TensorFlow/computer_vision/Resnets/utils/logs/hooks_helper.py | 89b28ade2ae8aeaccd57914ccadefd223d2479c4 | [
"Apache-2.0"
]
| permissive | kmanchella-habana/Model-References | 9fa42654d57a867d82f417e9fff668946f9105f6 | 460d3b23ce75f30561e8f725ebcb21298897d163 | refs/heads/master | 2023-08-28T17:42:48.866251 | 2021-09-18T21:38:04 | 2021-09-18T21:38:04 | 411,371,667 | 0 | 0 | null | 2021-09-28T17:08:13 | 2021-09-28T17:08:13 | null | UTF-8 | Python | false | false | 6,251 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks helper to return a list of TensorFlow hooks for training by name.
More hooks can be added to this set. To add a new hook, 1) add the new hook to
the registry in HOOKS, 2) add a corresponding function that parses out necessary
parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from TensorFlow.computer_vision.Resnets.utils.logs import hooks
from TensorFlow.computer_vision.Resnets.utils.logs import logger
from TensorFlow.computer_vision.Resnets.utils.logs import metric_hook
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate_1',
'cross_entropy_1',
'accuracy_1'])
PROFILE = False
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a '
'TPU is specified. No hooks will be used.'
.format(name_list))
return []
train_hooks = [ tf.estimator.ProfilerHook(save_steps=50, output_dir=".") ] if PROFILE else []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.estimator.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter)
def get_profiler_hook(model_dir, save_steps=10, **kwargs): # pylint: disable=unused-argument
"""Function to get ProfilerHook.
Args:
model_dir: The directory to save the profile traces to.
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return tf.estimator.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(
batch_size=batch_size, every_n_steps=every_n_steps,
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
def get_logging_metric_hook(tensors_to_log=None,
every_n_secs=600,
**kwargs): # pylint: disable=unused-argument
"""Function to get LoggingMetricHook.
Args:
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
**kwargs: a dictionary of arguments.
Returns:
Returns a LoggingMetricHook that saves tensor values in a JSON format.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
metric_logger=logger.get_benchmark_logger(),
every_n_secs=every_n_secs)
def get_step_counter_hook(**kwargs):
"""Function to get StepCounterHook."""
del kwargs
return tf.estimator.StepCounterHook()
# A dictionary to map one hook name and its corresponding function
HOOKS = {
'loggingtensorhook': get_logging_tensor_hook,
'profilerhook': get_profiler_hook,
'examplespersecondhook': get_examples_per_second_hook,
'loggingmetrichook': get_logging_metric_hook,
'stepcounterhook': get_step_counter_hook
}
| [
"[email protected]"
]
| |
adf629c13d106ceb433534d50425de94aee5c25f | 6c2ddf52efccdfa15ce073da0e74d3352d5108c4 | /idact/detail/config/validation/validate_bool.py | f5e1aea27bb8050ac7dc15bc157df871549484c8 | [
"MIT"
]
| permissive | intdata-bsc/idact | 4bff248e644629b7ec634b282d790c305fc6703d | 54cb65a711c145351e205970c27c83e6393cccf5 | refs/heads/develop | 2020-05-17T20:33:52.890970 | 2019-12-26T00:03:58 | 2019-12-26T00:03:58 | 183,949,088 | 0 | 0 | MIT | 2019-12-26T00:03:59 | 2019-04-28T19:18:58 | Python | UTF-8 | Python | false | false | 658 | py | """This module contains a function for validating a boolean config entry."""
from typing import Optional
from idact.detail.config.validation.validation_error_message import \
validation_error_message
def validate_bool(value, label: Optional[str] = None) -> bool:
"""Returns the parameter, if it's a :class:`bool`, otherwise raises
an exception.
:param value: Object to validate.
:param label: Object label for error message.
:raises TypeError: On wrong type.
"""
if isinstance(value, bool):
return value
raise TypeError(validation_error_message(
label=label,
value=value))
| [
"[email protected]"
]
| |
d102b7d9ef0861fbbb91042145d024d4eedb4eab | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/references/discern/pusher.py | 43f5bb6436960491846009eeeb34fb37c8416c8e | [
"MIT"
]
| permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,797 | py | import rlkit.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.grill.launcher import grill_her_twin_sac_online_vae_full_experiment
import rlkit.torch.vae.vae_schedules as vae_schedules
from rlkit.torch.vae.conv_vae import imsize48_default_architecture
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
env_id='SawyerPushNIPSEasy-v0',
grill_variant=dict(
use_discern_sampling=True,
save_video=True,
online_vae_beta=20,
save_video_period=50,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1000,
num_steps_per_epoch=500,
num_steps_per_eval=500,
min_num_steps_before_training=10000,
batch_size=256,
max_path_length=50,
discount=0.99,
num_updates_per_env_step=2,
# collection_mode='online-parallel',
parallel_env_params=dict(
num_workers=1,
),
reward_scale=1,
),
her_kwargs=dict(
),
twin_sac_kwargs=dict(
train_policy_with_reparameterization=True,
soft_target_tau=1e-3, # 1e-2
policy_update_period=1,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
online_vae_kwargs=dict(
vae_training_schedule=vae_schedules.custom_schedule_2,
oracle_data=False,
vae_save_period=50,
parallel_vae_train=False,
),
diverse_kwargs=dict(
p_replace=.05,
p_add_non_diverse=.05,
goal_buffer_size=1024,
),
),
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
# decoder_distribution='bernoulli',
num_latents_to_sample=10,
),
power=.1,
),
normalize=False,
render=False,
exploration_noise=0.0,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
# generate_uniform_dataset_kwargs=dict(
# init_camera=sawyer_init_camera_zoomed_in,
# env_id='SawyerPushNIPS-v0',
# num_imgs=1000,
# use_cached_dataset=False,
# show=False,
# save_file_prefix='pusher',
# ),
# generate_uniform_dataset_fn=generate_uniform_dataset_reacher,
),
train_vae_variant=dict(
representation_size=4,
beta=20,
num_epochs=0,
dump_skew_debug_plots=False,
decoder_activation='gaussian',
# decoder_activation='sigmoid',
generate_vae_dataset_kwargs=dict(
N=40,
test_p=.9,
use_cached=True,
show=False,
oracle_dataset=True,
oracle_dataset_using_set_to_goal=True,
n_random_steps=100,
non_presampled_goal_img_is_garbage=True,
),
vae_kwargs=dict(
input_channels=3,
architecture=imsize48_default_architecture,
decoder_distribution='gaussian_identity_variance',
),
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
skew_dataset=True,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
version='no force',
)
search_space = {
'grill_variant.algo_kwargs.diverse_kwargs.p_replace': [.01]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 2
mode = 'gcp'
exp_prefix = 'steven-door-discern-new-visuals-comp'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_twin_sac_online_vae_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
snapshot_gap=50,
snapshot_mode='gap_and_last',
num_exps_per_instance=3,
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
)
)
| [
"[email protected]"
]
| |
f6495eb9f687d1146f820c4e46e6dcc9b4e71e7e | f64ac4dfdf43d5535c30fd7b58f9e80d45b884db | /GAN/conditional_gan/cifar100_cgan.py | 4a6f02fa7a5ab6843cc19a17df9baa77a8436d30 | [
"Unlicense"
]
| permissive | beckybai/generative-models | e13fe109d2a589e90cedba35c1c1614eaef12d03 | dc057e788b8b65ff2dfa7510cb717de6c1bd0b75 | refs/heads/master | 2021-01-20T01:50:03.585856 | 2017-07-08T17:43:03 | 2017-07-08T17:43:03 | 89,329,854 | 1 | 0 | null | 2017-04-25T07:18:31 | 2017-04-25T07:18:31 | null | UTF-8 | Python | false | false | 4,191 | py | import torch
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
from datetime import datetime
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
import torch.nn as nn
import torch.nn.functional as F
import shutil,sys
import mutil
import model
import data_convert
import owntool
gpu = 2
ngpu = 2
torch.cuda.set_device(gpu)
# mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
cifar_d = data_convert.cifar100()
mb_size = 100 # mini-batch_size
Z_dim = 100
label_dim = 100
X_dim = 32
y_dim = 1
cnt = 0
num = '0'
out_dir = './cifar100_result/basic_{}_{}/'.format(datetime.now(),num)
out_dir.replace(" ","_")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.copyfile(sys.argv[0], out_dir + '/training_script.py')
sys.stdout = mutil.Logger(out_dir)
in_channel=4
d_num = 3
G = model.G_Net_conv_32(ngpu,in_channel = Z_dim+label_dim, out_channel = 3).cuda()
D = model.D_Net_conv(ngpu,in_channel).cuda()
"""Weight Initialization"""
# def weights_init(m):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1:
# m.weight.data.normal_(0.0, 0.02)
""" ===================== TRAINING ======================== """
d_num = 3
# avd_num = 1/d_num
G_solver = optim.Adam(G.parameters(), lr=1e-4)
D_solver = optim.Adam(D.parameters(), lr=1e-4)
ones_label = Variable(torch.ones(mb_size)).cuda()
zeros_label = Variable(torch.zeros(mb_size)).cuda()
criterion = nn.BCELoss()
c_label = np.array(range(100))
def reset_d_grad():
D.zero_grad()
def step_d_optim():
D_solver.step()
for it in range(100000):
# Sample data
z = Variable(torch.randn(mb_size, Z_dim)).cuda()
X, c = cifar_d.batch_next(mb_size)
X = Variable(torch.from_numpy(X)).cuda()
# label_m = np.nonzero(c)[1]
c_v = Variable(torch.from_numpy(model.set_label_ve_ma(c,100).astype('float32'))).cuda() # for the conditon of the generator
label_m = model.set_label_cifar(c.astype('int'),mb_size,X_dim)
c = Variable(label_m).cuda()
# Dicriminator forward-loss-backward-update
D.zero_grad()
G.zero_grad()
x_g = torch.cat([z,c_v],1).t()
x_g.data.resize_(mb_size, Z_dim+label_dim, 1, 1)
G_sample = G(x_g).detach()
# X.data.resize_(mb_size, 1, X_dim, X_dim)
D_real = D(torch.cat([X,c],1))
D_fake = D(torch.cat([G_sample,c],1))
D_loss_fake = criterion(D_fake, zeros_label)
D_loss_real = criterion(D_real, ones_label)
D_loss_real.backward()
D_loss_fake.backward()
D_solver.step()
# step_d_optim()
# Housekeeping - reset gradient
D.zero_grad()
G.zero_grad()
# Generator forward-loss-backward-update
z = Variable(torch.randn(mb_size, Z_dim)).cuda()
x_g = torch.cat([z,c_v],1).t()
x_g.data.resize_(mb_size, Z_dim+ label_dim, 1, 1)
G_sample = G(x_g)
DG_loss = D(torch.cat([G_sample, c],1))
G_loss = criterion(DG_loss, ones_label)
G_loss.backward()
G_solver.step()
# Housekeeping - reset gradient
D.zero_grad()
G.zero_grad()
# Print and plot every now and then
if it % 500 == 0:
print('Iter-{}; D_loss_real/fake: {}/{}; G_loss: {}'.format(it, D_loss_real.data.tolist(),
D_loss_fake.data.tolist(), G_loss.data.tolist()))
c = c_label
c_v = Variable(torch.from_numpy(model.set_label_ve_ma(c,100).astype('float32'))).cuda()
x_g = torch.cat([z, c_v], 1).t()
x_g.data.resize_(mb_size, Z_dim + label_dim, 1, 1)
samples = G(x_g)
samples = samples.data.tolist()[:100]
output_path = out_dir + "{}.png".format(it)
owntool.save_color_picture_pixel(samples,output_path)
if it % 10000==0:
torch.save(G.state_dict(),'{}/G_{}.model'.format(out_dir,str(it)))
torch.save(D.state_dict(),'{}/D_{}.model'.format(out_dir,str(it)))
| [
"[email protected]"
]
| |
3670ebd37aad77b33e512dbf343658f1219a3ec3 | 7ce076dd764fe4b5c7881734f157bc6f77a99ead | /tests/providers/google/ads/hooks/test_ads.py | a2c1a4f06188115057442b3817c57f2dd2819315 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
]
| permissive | kaxil/airflow | db31c98e23f2e0d869d857484e56a7c58acef231 | 42f1da179db00491610946a0b089dd82269adc74 | refs/heads/master | 2023-04-28T04:46:38.478352 | 2020-09-28T20:51:16 | 2020-09-28T20:51:16 | 112,322,392 | 1 | 1 | Apache-2.0 | 2020-08-27T20:15:22 | 2017-11-28T10:42:19 | Python | UTF-8 | Python | false | false | 3,482 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import pytest
from airflow.providers.google.ads.hooks.ads import GoogleAdsHook
API_VERSION = "api_version"
ADS_CLIENT = {"key": "value"}
SECRET = "secret"
EXTRAS = {
"extra__google_cloud_platform__keyfile_dict": SECRET,
"google_ads_client": ADS_CLIENT,
}
@pytest.fixture()
def mock_hook():
with mock.patch("airflow.hooks.base_hook.BaseHook.get_connection") as conn:
hook = GoogleAdsHook(api_version=API_VERSION)
conn.return_value.extra_dejson = EXTRAS
yield hook
class TestGoogleAdsHook:
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_get_customer_service(self, mock_client, mock_hook):
mock_hook._get_customer_service()
client = mock_client.load_from_dict
client.assert_called_once_with(mock_hook.google_ads_config)
client.return_value.get_service.assert_called_once_with("CustomerService", version=API_VERSION)
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_get_service(self, mock_client, mock_hook):
mock_hook._get_service()
client = mock_client.load_from_dict
client.assert_called_once_with(mock_hook.google_ads_config)
client.return_value.get_service.assert_called_once_with("GoogleAdsService", version=API_VERSION)
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_search(self, mock_client, mock_hook):
service = mock_client.load_from_dict.return_value.get_service.return_value
rows = ["row1", "row2"]
service.search.side_effects = rows
# Here we mock _extract_rows to assert calls and
# avoid additional __iter__ calls
mock_hook._extract_rows = list
query = "QUERY"
client_ids = ["1", "2"]
mock_hook.search(client_ids=client_ids, query="QUERY", page_size=2)
expected_calls = [mock.call(c, query=query, page_size=2) for c in client_ids]
service.search.assert_has_calls(expected_calls)
def test_extract_rows(self, mock_hook):
iterators = [[1, 2, 3], [4, 5, 6]]
assert mock_hook._extract_rows(iterators) == sum(iterators, [])
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_list_accessible_customers(self, mock_client, mock_hook):
accounts = ["a", "b", "c"]
service = mock_client.load_from_dict.return_value.get_service.return_value
service.list_accessible_customers.return_value = mock.MagicMock(resource_names=accounts)
result = mock_hook.list_accessible_customers()
service.list_accessible_customers.assert_called_once_with()
assert accounts == result
| [
"[email protected]"
]
| |
78fbb20b7e5c786eecdbb8bd9b07159de59b5096 | 75169b83f2b975bff8baf61f0cf1264cf4b71a28 | /learnpy/basis/module.py | fa8e2d00029a2f6399f12cf00ac497782c1fe7fe | []
| no_license | Pangpang2/Python | a27024587ae51923954deefaaff304a26e5a944f | 0b3bcfbdcaa71253c798090713c052fd397bff3f | refs/heads/master | 2022-12-01T10:03:23.214443 | 2020-08-26T00:21:15 | 2020-08-26T00:21:15 | 290,171,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | from collections import Iterable
isinstance('abc', Iterable) | [
"[email protected]"
]
| |
68c38083ec1433c5550eab57e134154610ed105b | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /metapose/inference_time_optimization.py | 0944c4df83cb4b4fa70f165ee5f48dbd359db1eb | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 25,628 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for performing inference time optimization."""
# pylint: disable=invalid-name
import inspect
from typing import Sequence, Tuple, Callable, Mapping, Optional, Any # pylint: disable=g-importing-member,g-multiple-import
import tensorflow as tf
import tensorflow_probability as tfp
def procrustes(a,
b):
"""Computes rotation that aligns mean-centered `a` to mean-centered `b`."""
tf.debugging.assert_shapes([
(a, ('joints', 'dim')),
(b, ('joints', 'dim'))
])
a_m = tf.reduce_mean(a, axis=0)
b_m = tf.reduce_mean(b, axis=0)
a_c = a - a_m
b_c = b - b_m
A = tf.tensordot(a_c, b_c, axes=(0, 0))
_, U, V = tf.linalg.svd(A)
R = tf.tensordot(U, V, axes=(1, 1))
return R, a_m, b_m
def align_aba(
a,
b,
rescale = True
):
"""Produces `a` after optimal alignment to `b` and vice-versa."""
R, a_m, b_m = procrustes(a, b)
scale = tf.reduce_mean((tf.linalg.norm(b - b_m)) / tf.linalg.norm(a - a_m))
scale = scale if rescale else 1.0
a2b = (a - a_m) @ R * scale + b_m
b2a = (b - b_m) @ tf.transpose(R) / scale + a_m
return a2b, b2a, (a_m, b_m, R, scale)
# [batch, 6] -> [batch, 3, 3]
# see “On the Continuity of Rotation Representations in Neural Networks”
# by Zhou et al. 2019
def vec6d_to_rot_mat(vec):
"""Converts a batch of 6D parameterized rots to rotation matrices."""
x, y = vec[:, :3], vec[:, 3:]
xn = tf.linalg.normalize(x, axis=-1)[0]
z = tf.linalg.cross(xn, y)
zn = tf.linalg.normalize(z, axis=-1)[0]
yn = tf.linalg.cross(zn, xn)
mat = tf.stack([xn, yn, zn], axis=1)
return mat
def minimize_step_fn(func,
opt,
opt_arg_names,
args, kwargs
):
"""Minimizes the scalar-valued `func` using `opt` wrt `opt_arg_names`.
Produces a step_fn function that when called performs a single step of
minimizes a function `func`
Arguments:
func: a scalar-valued callable
opt: an instance of a tf.keras.optimizers.Optimizer
opt_arg_names: names of arguments to optimize with respect to
args: function argument list
kwargs: function argument dict
Returns:
A dict of variables corresponding to opt_arg_names.
The func must be such that `func(*args, **kwargs)` succeeds.
For names in in `opt_arg_names` corresponding arguments are used as
starting values.
Example:
def func(a, b, c): return a + b * c
opt = tf.keras.optimizers.Adam(...)
step_fn, var_dict = minimize_step_fn(
func, opt, ['b'], [1.0, 3.0], {'c': 5.0})
step_fn() # evaluates d func(a, b, c) / d b at (1.0, 3.0, 5.0)
print(var_dict['b']) # updated value
"""
full_kwargs = inspect.signature(func).bind(*args, **kwargs).arguments
extra_keys = set(opt_arg_names) - full_kwargs.keys()
if extra_keys:
raise ValueError('args %s are not in the original func' % extra_keys)
var_dict = {n: tf.Variable(full_kwargs[n], name=n) for n in opt_arg_names}
full_kwargs.update(var_dict)
loss_fn = lambda: func(**full_kwargs) # pylint: disable=unnecessary-lambda
def step_fn():
pre_loss = loss_fn()
opt.minimize(loss_fn, list(var_dict.values()))
return pre_loss
return step_fn, var_dict
def reparam(R, scale):
"""Reparameterize rotation (as 6D) and scale (as inv_softplus(scale))."""
R_reparam = R[:, :2, :]
scales_reparam = tf.math.log(tf.math.exp(scale) - 1.0) # inv softmax
return R_reparam, scales_reparam
def unreparam(R_re,
scale_re):
"""Un-reparameterize rotation (as 6D) and scale (as inv_softplus(scale))."""
R = vec6d_to_rot_mat(tf.reshape(R_re, (-1, 6)))
scale = tf.math.softplus(scale_re)
return R, scale
def initial_epi_estimate(
multi_view_pose3d_preds
):
"""(Stage 1) Estimate initial pose and cameras from per-view monocular 3D.
Assumes that the zero's camera frame is canonical (R=I, scale=1, shift=(0,0)).
Procrustes aligns each pose to the pose in zero's camera frame.
Arguments:
multi_view_pose3d_preds: (n_cam, n_joints, n_dim) monocular
Returns:
mean_pose3d_centered: (n_joints, n_dim) initial pose
R: (n_cam, 3, 3) initial camera rots
scale: (n_cam, ) initial camera scales
shift: (n_cam, 2) initial camera shifts
"""
all_aligned_preds = []
params = []
for view_id in tf.range(1, tf.shape(multi_view_pose3d_preds)[0]):
pred = multi_view_pose3d_preds[view_id]
_, aligned_pred, view_params = align_aba(multi_view_pose3d_preds[0], pred)
all_aligned_preds.append(aligned_pred)
params.append(view_params)
view0_mean = params[0][0]
mean_pose3d_centered = tf.reduce_mean(all_aligned_preds, axis=0) - view0_mean
first_view_params = [tf.zeros(3), view0_mean, tf.eye(3), 1.0]
all_view_params = [first_view_params] + params
_, shift, R, scale = map(tf.stack, zip(*all_view_params))
return mean_pose3d_centered, (R, scale, shift)
def project3d_weak(pose_pred, R, scale,
shift):
"""Performs true weak projection of poses using given camera params."""
tf.debugging.assert_shapes([
(pose_pred, ('joints', 3)),
(shift, ('cams', 3)),
(R, ('cams', 3, 3)),
(scale, ('cams',)),
])
rot_views = tf.einsum('jd,kdo->kjo', pose_pred, R) # (k=4, j=17, d=o=3)
back_rot_preds = rot_views * scale[:, None, None] + shift[:, None, :]
# > (4, 17, 3)
return back_rot_preds
# [batch, 2], [batch, n_comp, 4] -> [batch, ]
def gaussian_mixture_log_prob(points,
params,
eps):
"""Computes the likelihood of `points` given GMM `params`."""
tf.debugging.assert_shapes([
(points, ('batch_size', 2)),
(params, ('batch_size', 'n_comp', 4)), # [comp_weight, mu_x, mu_y, cov]
(eps, ())
])
mix_probs = params[:, :, 0] # [b, c]
mu_s = params[:, :, 1:3] # [b, c, 2]
covs = tf.sqrt(params[:, :, 3]) # [b, c]
diag_s = tf.repeat(covs[:, :, None], repeats=2, axis=2) # [b, c, 2]
norm = tfp.distributions.MultivariateNormalDiag(loc=mu_s, scale_diag=diag_s)
test_points = tf.repeat(points[:, None, :], params.shape[1], 1)
gauss_log_probs = norm.log_prob(tf.cast(test_points, tf.float64)) # [b, c]
log_gauss_plus_mix = gauss_log_probs + tf.math.log(mix_probs + eps) # [b, c]
final_log_probs = tf.reduce_logsumexp(log_gauss_plus_mix, axis=1) # [b, ]
return tf.cast(final_log_probs, tf.float32)
def total_frame_loss(pose_pred,
R,
scale,
shift,
mv_heatmaps_mixture):
"""The objective optimized by the full probabilistic iterative solver."""
tf.debugging.assert_shapes([
(pose_pred, ('joints', 3)),
(shift, ('cams', 3)),
(R, ('cams', 3, 3)),
(scale, ('cams',)),
(mv_heatmaps_mixture, ('cams', 'joints', 4, 4))
])
n_cam = mv_heatmaps_mixture.shape[0]
n_joint = pose_pred.shape[0]
views_preds = project3d_weak(pose_pred, R, scale, shift)
views_preds_2d_flat = tf.reshape(views_preds[:, :, :2], (n_cam * n_joint, 2))
mv_heatmap_flat = tf.reshape(mv_heatmaps_mixture, (n_cam * n_joint, 4, 4))
logp = gaussian_mixture_log_prob(views_preds_2d_flat, mv_heatmap_flat, 1e-8)
return -1 * tf.reduce_mean(logp)
h36m_edges = tf.convert_to_tensor(
[[0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [8, 14], [14, 15],
[15, 16], [11, 12], [12, 13], [0, 4], [0, 1], [1, 2], [2, 3],
[4, 5], [5, 6]])
def get_h36m_edge_lens(pose3d):
"""Computes an array of bone lenghts."""
flat_joints = tf.gather(pose3d, tf.reshape(h36m_edges, (-1,)))
edge_coords = tf.reshape(flat_joints, (-1, 2, 3))
edge_vecs = edge_coords[:, 0, :] - edge_coords[:, 1, :]
return tf.linalg.norm(edge_vecs, axis=-1)
def get_edge_len_loss(gt_edge_lens,
pose_pred):
"""Computes the scale-invariant bone length distance."""
pred_edge_lens = get_h36m_edge_lens(pose_pred)
norm_gt_edge_lens = gt_edge_lens / tf.reduce_mean(gt_edge_lens)
norm_pred_edge_lens = pred_edge_lens / tf.reduce_mean(pred_edge_lens)
len_err = tf.linalg.norm(norm_gt_edge_lens - norm_pred_edge_lens)
return len_err
def optimize_heatmap_logp(init_pose,
init_params,
heatmap_mix_arr,
gt_edge_lens,
edge_lens_lambda,
opt_steps,
report_n_results,
opt
):
"""Performs the full probabilistic iterative bundle adjustment.
Arguments:
init_pose: (n_joints, n_dim) initial human pose
init_params: a list or tuple of tensors [R, scale, shift]
as returned by `initial_epi_estimate`
heatmap_mix_arr: (n_cam, n_comp, 4) a tensor of GMM parameters
gt_edge_lens: (n_bones, ) the lenghts of all bones
edge_lens_lambda: the weight of the bone length loss
opt_steps: how many GD steps to take
report_n_results: how many steps to report
opt: an instance of a tf.keras.optimizers.Optimizer
Returns:
A tuple of six tensors:
steps_i: (report_n_results,)
losses: (report_n_results,)
pose_preds: (report_n_results, n_joints, 3)
Rs: (report_n_results, 3, 3)
scales: (report_n_results, 1)
shifts: (report_n_results, 2)
"""
def objective(pose_pred, R_re, scale_re, shift, heatmaps):
R, scale = unreparam(R_re, scale_re)
logp_loss = total_frame_loss(pose_pred, R, scale, shift, heatmaps)
losses = [logp_loss]
if gt_edge_lens is not None:
edge_len_loss = get_edge_len_loss(gt_edge_lens, pose_pred)
losses.append(edge_lens_lambda * edge_len_loss)
loss = sum(losses)
return loss
re_params_init = [*reparam(*init_params[:2]), init_params[2]]
opt_args = ['pose_pred', 'R_re', 'scale_re', 'shift']
init_argv = [init_pose] + re_params_init + [heatmap_mix_arr]
step_fn, var_dict = minimize_step_fn(objective, opt, opt_args, init_argv, {})
collect_every_n = opt_steps // (report_n_results - 1)
results = []
loss = objective(*init_argv)
for step_i in range(opt_steps):
if step_i % collect_every_n == 0 or step_i == (opt_steps - 1):
cur_re_params = [tf.identity(var_dict[v]) for v in opt_args]
cur_params = [cur_re_params[0],
*unreparam(*cur_re_params[1:3]),
cur_re_params[3]]
results.append([step_i, loss, *cur_params])
loss = step_fn()
result_arrs = list(map(tf.stack, zip(*results)))
return result_arrs
def convert_rec_pose2d_to_bbox_axis(
input_rec):
"""Converts coordinates / mixture params in a record from pixels to [0, 1]."""
# full keys list:
# 'pose3d', 'cam_pose3d', 'cam_rot', 'cam_intr', 'cam_kd', 'pose2d_gt',
# 'pose2d_repr', 'heatmaps', 'pose2d_pred', 'keys',
# 'bboxes', 'pose3d_epi_pred'
bboxes, pose2d_gt, pose2d_repr, mix_params, mean_pred2d = [
input_rec[x] for x in
['bboxes', 'pose2d_gt', 'pose2d_repr', 'heatmaps', 'pose2d_pred']]
sizes = tf.math.maximum(
bboxes[:, 1] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 2])
origins = tf.stack([bboxes[:, 2], bboxes[:, 0]], axis=-1)
sizes, origins = [tf.cast(x, tf.float64) for x in [sizes, origins]]
shifted_mixture_means = mix_params[:, :, :, 1:3] - origins[:, None, None, :]
mix_params_new = tf.concat([
mix_params[:, :, :, 0, None],
(shifted_mixture_means / sizes[:, None, None, None]),
mix_params[:, :, :, 3, None] / sizes[:, None, None, None]**2
], axis=-1)
pose2d_gt_new = (pose2d_gt - origins[:, None, :]) / sizes[:, None, None]
pose2d_repr_new = (pose2d_repr - origins[:, None, :]) / sizes[:, None, None]
mean_pred_new = (mean_pred2d - origins[:, None, :]) / sizes[:, None, None]
override_rec = {
'pose2d_gt': pose2d_gt_new,
'pose2d_repr': pose2d_repr_new,
'heatmaps': mix_params_new,
'pose2d_pred': mean_pred_new,
}
assert not(override_rec.keys() - input_rec.keys()), 'override has new keys'
return {**input_rec, **override_rec}
def take_camera_subset(input_rec,
subset = None
):
"""Returns a record with a subset of cameras."""
subset_idx = tf.convert_to_tensor(subset or list(range(4)))
subset_keys = ['cam_pose3d', 'cam_rot', 'cam_intr', 'cam_kd', 'pose2d_gt',
'pose2d_repr', 'heatmaps', 'pose2d_pred', 'keys',
'bboxes', 'pose3d_epi_pred']
override_rec = {
k: tf.gather(input_rec[k], subset_idx, axis=0) for k in subset_keys
}
assert not(override_rec.keys() - input_rec.keys()), 'override has new keys'
return {**input_rec, **override_rec, 'cam_subset': subset_idx}
def mean_norm(tensor, norm_axis, mean_axis):
return tf.reduce_mean(tf.linalg.norm(tensor, axis=norm_axis), axis=mean_axis)
def batch_pmpjpe(poses3d, pose3d_gt):
"""Batch procrustes aligned mean per joint position error."""
aligned_poses = [align_aba(pose, pose3d_gt)[0] for pose in poses3d]
aligned_poses = tf.convert_to_tensor(aligned_poses)
pose_err_norms = mean_norm(aligned_poses - pose3d_gt[None], -1, -1)
return pose_err_norms
def compute_opt_stats(input_rec,
iter_opt_results
):
"""Computes per-step metrics for the output of `optimize_heatmap_logp`.
See `run_inference_optimization` for the full spec of inputs and outputs.
Args:
input_rec: an input dict of heatmaps, monocular 3D poses, etc.
iter_opt_results: an output dict produced by optimize_heatmap_logp.
Returns:
A dict of tensors combining input record with predictions and metrics.
"""
pose3d_gt, pose2d_gt, mean_posenet_pred2d = [
tf.cast(input_rec[x], tf.float32)
for x in ['pose3d', 'pose2d_gt', 'pose2d_pred']
]
iters, losses, *opt_results = iter_opt_results
n_report = losses.shape[0]
# [n_report, 4, 17, 2]
opt_pose2d_preds = tf.convert_to_tensor([
project3d_weak(*[x[viz_id] for x in opt_results])[Ellipsis, :2]
for viz_id in range(n_report)
])
gt_aligned_projected2d = []
for viz_id in range(n_report):
gt_aligned_pose = align_aba(pose3d_gt, opt_results[0][viz_id])[0]
other_opt_params = [x[viz_id] for x in opt_results[1:]]
projected2d = project3d_weak(gt_aligned_pose, *other_opt_params)
gt_aligned_projected2d.append(projected2d[Ellipsis, :2])
gt_aligned_projected2d = tf.convert_to_tensor(
gt_aligned_projected2d, dtype=tf.float32)
iter_pmpjpe = batch_pmpjpe(opt_results[0], pose3d_gt)
iter_pose2d_err = mean_norm(opt_pose2d_preds - pose2d_gt, 3, (1, 2))
mean_posenet_gt_err = mean_norm(pose2d_gt - mean_posenet_pred2d, -1, None)
iter_mean_posenet_err = mean_norm(
opt_pose2d_preds - mean_posenet_pred2d, 3, (1, 2))
iter_gt2d_gt_aligned_proj_err = mean_norm(
gt_aligned_projected2d - pose2d_gt, 3, (1, 2))
augment_rec = {
'loss': losses,
'iters': iters,
'pose3d_opt_preds': opt_results[0],
'cam_rot_opt_preds': opt_results[1],
'scale_opt_preds': opt_results[2],
'shift_opt_preds': opt_results[3],
'pose2d_opt_preds': opt_pose2d_preds,
'pose3d_gt_aligned_pred_3d_proj': gt_aligned_projected2d,
'pose3d_pred_pmpjpe': iter_pmpjpe,
'pose2d_pred_err': iter_pose2d_err,
'pose2d_pred_vs_posenet_err': iter_mean_posenet_err,
'pose2d_gt_posenet_err_mean': mean_posenet_gt_err,
'pose3d_gt_backaligned_pose2d_gt_err': iter_gt2d_gt_aligned_proj_err,
}
assert not(augment_rec.keys() & input_rec.keys()), 'augment overrides keys'
return {**augment_rec, **input_rec}
def apply_distortion(xy, kd):
"""Apply full-perspective radial distortion."""
xx, yy = xy[:, 0], xy[:, 1]
k1, k2, p1, p2, k3 = [kd[i] for i in range(5)]
r_sq = xx**2 + yy**2 # r^2
c_radial = (1 + k1 * r_sq + k2 * r_sq**2 + k3 * r_sq**3)
x_kd = xx*c_radial + 2*p1*xx*yy + p2*(r_sq + 2*(xx**2))
y_kd = yy*c_radial + 2*p2*xx*yy + p1*(r_sq + 2*(yy**2))
xy = tf.stack([x_kd, y_kd], axis=1)
return xy
def project_3d_tf(
points,
cam_pose,
cam_rot,
ffpp,
kd,
weak = False,
eps = 1e-8):
"""Apply full-perspective projection. Use mean depth if weak=True."""
points_cent = (points - cam_pose[None])
cam_rot_mat_t = tf.transpose(cam_rot)
cam_frame_points = points_cent @ cam_rot_mat_t
xy_3d, zz = cam_frame_points[:, 0:2], cam_frame_points[:, 2, None]
zz_div = tf.reduce_mean(zz, keepdims=True) if weak else zz
k_mat = tf.cast(tf.convert_to_tensor([ffpp[:2]]), points.dtype)
pp = tf.cast(tf.convert_to_tensor([ffpp[2:]]), points.dtype)
# order of application as in:
# http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/parameters.html
xy = xy_3d / (zz_div + eps)
xy = apply_distortion(xy, kd)
xy = xy * k_mat
xy = xy + pp
return xy, zz[:, 0]
def get_fake_gt_heatmaps(
input_rec, gt_err_std = 0.0):
"""Get "fake" GMM parameters corresponding to (GT + normal(0, std))."""
pose_gt = input_rec['pose2d_repr'] # ('cams', 'joints', 2)
n_cams, n_joints = pose_gt.shape[:2]
gt_noise = tf.random.normal(pose_gt.shape, 0, gt_err_std, dtype=pose_gt.dtype)
pose_gt_noisy = pose_gt + gt_noise
mix_params_new = tf.concat([
0.25 * tf.ones((n_cams, n_joints, 4, 1), dtype=pose_gt.dtype),
tf.repeat(pose_gt_noisy[:, :, None, :], repeats=4, axis=2),
0.0003 * tf.ones((n_cams, n_joints, 4, 1), dtype=pose_gt.dtype),
], axis=-1)
return mix_params_new
def recompute_repr_with_weak_proj(
data_rec):
"""Estimate what 2D GT would have been if the true camera model was weak."""
new_repr = []
for i in range(data_rec['cam_pose3d'].shape[0]):
new_repr.append(
project_3d_tf(
data_rec['pose3d'],
data_rec['cam_pose3d'][i],
data_rec['cam_rot'][i],
data_rec['cam_intr'][i],
data_rec['cam_kd'][i],
weak=True))
dt = data_rec['pose2d_repr'].dtype
return tf.convert_to_tensor([x[0] for x in new_repr], dtype=dt)
def get_single_fake_perfect_epi_init(
data_rec, cam_id):
"""Estimate what a perfect monocular 3D prediction would have been."""
cams = [data_rec[k][cam_id]
for k in ['cam_pose3d', 'cam_rot', 'cam_intr', 'cam_kd']]
bbox = data_rec['bboxes'][cam_id]
f_mean = cams[2][:2].numpy().mean()
proj_weak, zz = project_3d_tf(data_rec['pose3d'], *cams, weak=True)
mean_z = zz.numpy().mean()
proj_weak = proj_weak.numpy()
z_scaled = zz[:, None].numpy() / mean_z * f_mean
cam_frame_xyz = tf.concat([proj_weak, z_scaled], axis=1)
cam_frame_xyz = tf.cast(cam_frame_xyz, tf.float32)
size = tf.math.maximum(bbox[1] - bbox[0], bbox[3] - bbox[2])
origin = tf.stack([bbox[2], bbox[0], 0], axis=-1)
size, origin = [tf.cast(x, tf.float32) for x in [size, origin]]
unit_xyz = (cam_frame_xyz - origin) / size
return unit_xyz
def get_full_fake_gt_init(data_rec):
n_cam = data_rec['pose3d_epi_pred'].shape[0]
epi_init = [get_single_fake_perfect_epi_init(data_rec, cam_id)
for cam_id in range(n_cam)]
dt = data_rec['pose3d_epi_pred'].dtype
return tf.convert_to_tensor(epi_init, dtype=dt)
def run_inference_optimization(data_rec,
opt_steps = 100,
report_n_results = 50,
cam_subset = None,
edge_lens_lambda = 0.0,
fake_gt_heatmaps = False,
fake_gt_ht_std = 0.0,
recompute_weak_repr = False,
learning_rate = 1e-2,
fake_gt_init = False,
random_init = False
):
"""Perform full probabilistic bundle adjustment with ablations.
Arguments:
data_rec: a dict with the following signature
* heatmaps (4, 17, 4, 4) float64 - pre-view (4) per-joint (17) pixel
coordinates with uncertainties in the format
{(mean_x, mean_y, sigma, pi)}_k^K; obtained by fitting a K-component
(for K=4) spherical GMMs to join location heatmaps predicted by a
stacked hourglass 2D pose estimation net
* pose2d_pred (4, 17, 2) float64 - pre-view per-joint pixel location (x,y)
predictions estimated from these heatmaps by computing the expected
value of each GMM
* keys (4,) string - corresponding frames of the original H36M dataset
in the format 's%07d_f%07d_c%02d' % (sequence, frame, camera)
* bboxes (4, 4) int32 - human bounding box used to estimate
2D pose heatmaps
* pose3d_epi_pred (4, 17, 3) float32 - per-view predictions of a
pre-trained monocular 3D pose estimation network
* pose3d (17, 3) float64 - GT 3D pose in the reference frame (RF) attached
to the center of mass of the subject, rotated to align the y-axis
with the hip line, and re-scaled to meters
* cam_pose3d (4, 3) float64 - GT 3D camera poses in the same RF
* cam_rot (4, 3, 3) float64 - GT 3D camera rotations in the same RF
* cam_intr (4, 4) float64 - GT 3D camera intrinsic parameters
* cam_kd (4, 5) float64 - GT 3D camera distortion parameters
* pose2d_gt (4, 17, 2) float64 - GT 2D per-frame human pose
* pose2d_repr (4, 17, 2) float64 - reprojected GT poses
opt_steps: the number of gradient decent steps to take
report_n_results: the number of optimizer steps to report
cam_subset: None (all cameras) or a tuple of cam_ids to use, e.g. (2, 3)
edge_lens_lambda: the weight of the (personalized) bone lengths loss
fake_gt_heatmaps: whether to replace GMM parameters with GT GMM with noise
fake_gt_ht_std: if `fake_gt_heatmaps=True`, the std of the noise added to GT
recompute_weak_repr: whether to replace GT used to compute "fake GT GMMs"
with GT one would have had if the true camera model was weak
learning_rate: Adam learning rate
fake_gt_init: whether to use perfect (GT) initialization
random_init: whether to use completely random initialization
Returns:
A dict with the same keys as in data_rec and following additional keys:
* loss (51,) float32 - probabilistic bundle adjustment losses
* iters (51,) int32 - iteration numbers
* pose3d_opt_preds (51, 17, 3) float32 - 3D pose predictions
* cam_rot_opt_preds (51, 4, 3, 3) float32 - camera rotation predictions
* scale_opt_preds (51, 4) float32 - predictions for the scale weak
camera parameter
* shift_opt_preds (51, 4, 3) float32 - predictions for the shift
weak camera parameter
* pose2d_opt_preds (51, 4, 17, 2) float32 - predicted 3D pose reprojected
using predicted camera parameters
* pose3d_gt_aligned_pred_3d_proj (51, 4, 17, 2) float32 - predicted 3D
pose reprojected using predicted camera parameters and aligned to
GT 3D using Procrustes alignment
* pose3d_pred_pmpjpe (51,) float32 - Procrustes-aligned Mean Per Joint
Position Error
* pose2d_pred_err (51,) float32 - 2D reprojection error wrt GT
* pose2d_pred_vs_posenet_err (51,) float32 - 2D error of the stack
hourglass network
* pose2d_gt_posenet_err_mean () float32 - final 2D error
* pose3d_gt_backaligned_pose2d_gt_err (51,) float32 - 2D error of
the predicted 3D projected using GT camera parameters
* cam_subset (4,) int32 - a subset of cameras used for inference
"""
if recompute_weak_repr:
# in case we want to train with "ground truth" weak projections
# to _simulate_ zero camera model error; never use in production
data_rec['pose2d_repr'] = recompute_repr_with_weak_proj(data_rec)
if fake_gt_init:
# project ground truth to get perfect mono 3d estimates
data_rec['pose3d_epi_pred'] = get_full_fake_gt_init(data_rec)
if random_init:
data_init = data_rec['pose3d_epi_pred']
data_rec['pose3d_epi_pred'] = tf.random.normal(
data_init.shape, dtype=data_init.dtype)
data_rec = convert_rec_pose2d_to_bbox_axis(data_rec)
data_rec = take_camera_subset(data_rec, cam_subset)
cn_mean_pred, init_cam = initial_epi_estimate(data_rec['pose3d_epi_pred'])
if fake_gt_heatmaps:
# replace real predicted joint heatmaps with gaussians around ground truth
data_rec['heatmaps'] = get_fake_gt_heatmaps(data_rec, fake_gt_ht_std)
if edge_lens_lambda > 0:
gt_edge_lens = tf.cast(get_h36m_edge_lens(data_rec['pose3d']), tf.float32)
else:
gt_edge_lens = None
opt = tf.keras.optimizers.Adam(learning_rate)
iter_opt_results = optimize_heatmap_logp(
cn_mean_pred, init_cam, data_rec['heatmaps'],
gt_edge_lens, edge_lens_lambda,
report_n_results=report_n_results, opt=opt, opt_steps=opt_steps)
opt_stats = compute_opt_stats(data_rec, iter_opt_results)
opt_stats = {k: tf.convert_to_tensor(v).numpy()
for k, v in opt_stats.items()}
# for k, v in opt_stats.items():
# print(k, v.dtype)
return opt_stats
| [
"[email protected]"
]
| |
5d9a50036af925c52e137946772a68b72ec1bfc2 | f62fd455e593a7ad203a5c268e23129473d968b6 | /vitrage-1.5.2/vitrage/datasources/neutron/port/__init__.py | ba3d53285a9c3d37c9cc95bffce2f122fa6717ec | [
"Apache-2.0"
]
| permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 1,487 | py | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import UpdateMethod
NEUTRON_PORT_DATASOURCE = 'neutron.port'
OPTS = [
cfg.StrOpt('transformer',
default='vitrage.datasources.neutron.port.'
'transformer.PortTransformer',
help='Neutron port transformer class path',
required=True),
cfg.StrOpt('driver',
default='vitrage.datasources.neutron.port.driver.PortDriver',
help='Neutron port driver class path',
required=True),
cfg.StrOpt('update_method',
default=UpdateMethod.PUSH,
help='None: updates only via Vitrage periodic snapshots.'
'Pull: updates every [changes_interval] seconds.'
'Push: updates by getting notifications from the'
' datasource itself.',
required=True),
]
| [
"[email protected]"
]
| |
213dd540eb8864eec2bd1888ba1fac73949e86c7 | 8200e9869cae6699d186a4cf9172800f95bede50 | /rainman/testing.py | 3158b7e3ba15826b2b6982e7f0379b854aa1e8d0 | []
| no_license | wickman/rainman | d3684f0b4fa834b8800650036caf69093ff0504a | 14dec93c76f8c3f902fe769f5e9d7a0ee1db95ca | refs/heads/master | 2016-09-05T12:57:25.794351 | 2014-08-13T22:27:19 | 2014-08-13T22:27:19 | 16,630,229 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,986 | py | from __future__ import print_function
import os
import random
import socket
from .client import Client
from .fileset import FileSet, Fileslice
from .fs import DISK
from .metainfo import MetaInfoBuilder
from .peer_id import PeerId
from .torrent import Torrent
from .scheduler import Scheduler
from tornado.testing import bind_unused_port
from twitter.common.dirutil import safe_mkdtemp, safe_mkdir
from twitter.common.quantity import Amount, Data, Time
class SocketClient(Client):
def __init__(self, sock, port, io_loop, peer_id=None, **kw):
self.__sock = sock
self.__port = port
super(SocketClient, self).__init__(peer_id or PeerId.generate(), io_loop=io_loop, **kw)
def listen(self):
self._port = self.__port
self.add_sockets([self.__sock])
def make_fileset(filelist, piece_size, fs=DISK):
"Given (filename, contents) list, return dir, FileSet pair."
td = safe_mkdtemp()
for filename, contents in filelist:
sl = Fileslice(os.path.join(td, filename), slice(0, len(contents)))
fs.fill(sl)
fs.write(sl, contents)
filelist = [(filename, len(contents)) for (filename, contents) in filelist]
return td, FileSet(filelist, piece_size)
def make_metainfo(filelist, piece_size, fs=DISK):
td, fileset = make_fileset(filelist, piece_size, fs=fs)
mib = MetaInfoBuilder(fileset.rooted_at(td), relpath=td)
return td, fileset, mib.build(fs)
def make_torrent(filelist, piece_size, tracker, fs=DISK):
td, fileset, metainfo = make_metainfo(filelist, piece_size, fs=fs)
torrent = Torrent()
torrent.info = metainfo
torrent.announce = tracker
return td, fileset, torrent
def random_stream(N):
return os.urandom(N)
def make_ensemble(
io_loop,
num_seeders=1,
num_leechers=1,
piece_size=16384,
max_filesize=32768,
total_filesize=1048576,
seed=31337,
scheduler_impl=Scheduler,
fs=DISK):
root = safe_mkdtemp()
seeder_sockets = [(PeerId.generate(), bind_unused_port()) for _ in range(num_seeders)]
leecher_sockets = [(PeerId.generate(), bind_unused_port()) for _ in range(num_leechers)]
tracker_info = os.path.join(root, 'tracker_info.txt')
with open(tracker_info, 'w') as fp:
for peer_id, (_, port) in seeder_sockets + leecher_sockets:
print('%s 127.0.0.1 %d' % (peer_id, port), file=fp)
tracker_info = 'file://' + tracker_info
random.seed(seed)
filelist = []
files = 0
while total_filesize > 0:
filesize = min(total_filesize, random.randrange(0, max_filesize))
total_filesize -= filesize
filename = '%x.txt' % files
filelist.append((filename, filesize))
content = random_stream(filesize)
for replica in ['dataset'] + ['seeder%d' % k for k in range(num_seeders)]:
safe_mkdir(os.path.join(root, replica))
real_path = os.path.join(root, replica, filename)
slice_ = Fileslice(real_path, slice(0, filesize))
fs.fill(slice_)
fs.write(slice_, content)
files += 1
fileset = FileSet(filelist, piece_size)
mib = MetaInfoBuilder(
fileset.rooted_at(os.path.join(root, 'dataset')),
relpath=os.path.join(root, 'dataset'),
piece_size=piece_size)
torrent = Torrent()
torrent.info = mib.build(fs)
torrent.announce = tracker_info
seeder_clients = []
leecher_clients = []
def make_peer(peer_id, listener, port, chroot):
client = SocketClient(listener, port, io_loop, peer_id, fs=fs)
scheduler = scheduler_impl(client, request_size=Amount(piece_size // 4, Data.BYTES))
client.listen()
client.register_torrent(torrent, root=chroot)
return scheduler
for index, (peer_id, (listener, port)) in enumerate(seeder_sockets):
seeder_clients.append(
make_peer(peer_id, listener, port, os.path.join(root, 'seeder%d' % index)))
for index, (peer_id, (listener, port)) in enumerate(leecher_sockets):
leecher_clients.append(
make_peer(peer_id, listener, port, os.path.join(root, 'leecher%d' % index)))
return torrent, seeder_clients, leecher_clients
| [
"[email protected]"
]
| |
dcee632cb5018883a234ec0cd01f4f23bcc5dca7 | 448c533feb888fd2b64d0d4f130ca4bc1c82e6c2 | /juparc/cli/select_cmd.py | 2f1e52e4d5465b1118e619e2e321a474887eaab1 | []
| no_license | gems-uff/jupyter-archaeology | e2433f745c70cbcc502cfc762127e75e3762ecb4 | 3bf3d6fdf672d2123c599fefa65292de3c3dc1c9 | refs/heads/main | 2023-03-24T21:23:06.999836 | 2021-03-12T00:40:50 | 2021-03-12T00:40:50 | 304,041,226 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,936 | py | """Select command: select notebooks"""
import json
import sys
import re
from ..extract import load, create_default
def value(original):
"""Convert value to int, float, tuple or str"""
if isinstance(original, (float, int, tuple)):
return original
if isinstance(original, str):
try:
return int(original)
except ValueError:
try:
return float(original)
except ValueError:
if "." in original:
try:
return tuple(int(x) for x in original.split('.'))
except ValueError:
pass
return original
return json.dumps(original)
def compare(notebook_arg, attr):
"""Compare argument to notebook value"""
if attr == "null":
return notebook_arg is None
nval = value(notebook_arg)
if attr.startswith(">"):
if attr.startswith(">="):
return nval >= value(attr[2:])
return nval > value(attr[1:])
if attr.startswith("<"):
if attr.startswith("<="):
return nval <= value(attr[2:])
return nval < value(attr[1:])
if attr.startswith("=="):
return nval == value(attr[2:].lstrip())
if attr.startswith("!="):
return nval != value(attr[2:].lstrip())
return re.match(attr, str(nval)) is not None
def select_cmd(args, _):
"""select cmd"""
if not args.notebooks:
lines = list(sys.stdin)
notebooks = json.loads("\n".join(lines))
else:
notebooks = [load(notebook) for notebook in args.notebooks]
attributes = create_default()
result = []
for notebook in notebooks:
add = True
for arg in attributes:
attr = getattr(args, arg, None)
if attr is None:
continue
attr = attr.strip()
if not compare(notebook[arg], attr):
add = False
continue
if add:
result.append(notebook)
if args.count:
print(len(result))
else:
print(json.dumps(result, indent=2))
def create_subparsers(
subparsers,
cmd='select',
helper='Select notebooks that match condition',
**defaults
):
"""create subcommands"""
parser = subparsers.add_parser(cmd, help=helper)
parser.set_defaults(func=select_cmd, command=parser)
parser.add_argument(
"-n", "--notebooks", default=None, nargs="*",
help="List of notebooks. If empty, it will read json from input"
)
parser.add_argument(
"-c", "--count", action="store_true",
help="Show count instead of notebooks"
)
attributes = create_default()
for attr in attributes:
default = defaults.get(attr, None)
parser.add_argument(
"--" + attr.replace('_', '-'), default=default,
help="Select " + attr
)
| [
"[email protected]"
]
| |
2a4a35023e91fbd52dc123b0da9b6c1f6dfcc06d | 1a2adb80f326435ec4f60bacfcc812566e687f12 | /unit_tests/bigquery/test_schema.py | 44cd8816e277b3a63a12bedf819d6675150fa099 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | runt18/gcloud-python | 66d92095c2b9541cffcb18531f5f5ad3d1adf37f | 3f836eb728488d78793f7aecaa573bd37f1f2d3c | refs/heads/master | 2021-01-23T15:41:53.642420 | 2016-09-06T20:36:13 | 2016-09-06T20:36:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestSchemaField(unittest.TestCase):
def _getTargetClass(self):
from gcloud.bigquery.schema import SchemaField
return SchemaField
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
field = self._makeOne('test', 'STRING')
self.assertEqual(field.name, 'test')
self.assertEqual(field.field_type, 'STRING')
self.assertEqual(field.mode, 'NULLABLE')
self.assertEqual(field.description, None)
self.assertEqual(field.fields, None)
def test_ctor_explicit(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
self.assertEqual(field.name, 'test')
self.assertEqual(field.field_type, 'STRING')
self.assertEqual(field.mode, 'REQUIRED')
self.assertEqual(field.description, 'Testing')
self.assertEqual(field.fields, None)
def test_ctor_subfields(self):
field = self._makeOne('phone_number', 'RECORD',
fields=[self._makeOne('area_code', 'STRING'),
self._makeOne('local_number', 'STRING')])
self.assertEqual(field.name, 'phone_number')
self.assertEqual(field.field_type, 'RECORD')
self.assertEqual(field.mode, 'NULLABLE')
self.assertEqual(field.description, None)
self.assertEqual(len(field.fields), 2)
self.assertEqual(field.fields[0].name, 'area_code')
self.assertEqual(field.fields[0].field_type, 'STRING')
self.assertEqual(field.fields[0].mode, 'NULLABLE')
self.assertEqual(field.fields[0].description, None)
self.assertEqual(field.fields[0].fields, None)
self.assertEqual(field.fields[1].name, 'local_number')
self.assertEqual(field.fields[1].field_type, 'STRING')
self.assertEqual(field.fields[1].mode, 'NULLABLE')
self.assertEqual(field.fields[1].description, None)
self.assertEqual(field.fields[1].fields, None)
def test___eq___name_mismatch(self):
field = self._makeOne('test', 'STRING')
other = self._makeOne('other', 'STRING')
self.assertNotEqual(field, other)
def test___eq___field_type_mismatch(self):
field = self._makeOne('test', 'STRING')
other = self._makeOne('test', 'INTEGER')
self.assertNotEqual(field, other)
def test___eq___mode_mismatch(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED')
other = self._makeOne('test', 'STRING', mode='NULLABLE')
self.assertNotEqual(field, other)
def test___eq___description_mismatch(self):
field = self._makeOne('test', 'STRING', description='Testing')
other = self._makeOne('test', 'STRING', description='Other')
self.assertNotEqual(field, other)
def test___eq___fields_mismatch(self):
sub1 = self._makeOne('sub1', 'STRING')
sub2 = self._makeOne('sub2', 'STRING')
field = self._makeOne('test', 'RECORD', fields=[sub1])
other = self._makeOne('test', 'RECORD', fields=[sub2])
self.assertNotEqual(field, other)
def test___eq___hit(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
other = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
self.assertEqual(field, other)
def test___eq___hit_case_diff_on_type(self):
field = self._makeOne('test', 'STRING', mode='REQUIRED',
description='Testing')
other = self._makeOne('test', 'string', mode='REQUIRED',
description='Testing')
self.assertEqual(field, other)
def test___eq___hit_w_fields(self):
sub1 = self._makeOne('sub1', 'STRING')
sub2 = self._makeOne('sub2', 'STRING')
field = self._makeOne('test', 'RECORD', fields=[sub1, sub2])
other = self._makeOne('test', 'RECORD', fields=[sub1, sub2])
self.assertEqual(field, other)
| [
"[email protected]"
]
| |
291e27fa3dca7be5618fa79c258cbc506a56be91 | 4c535d2c7b76955b014ed87f06f5a9f078730fa7 | /10008_2.py | 20590de890ded052a6f4f8fdd8ae246f9e3df8de | []
| no_license | jennyChing/onlineJudge | cf750b5b68456f8949bed0429c3b4a328ca2eeea | 0d317db29c21a9faec7a7cf43de5fa03fda50b3e | refs/heads/master | 2020-04-06T06:56:34.850903 | 2016-08-21T23:38:08 | 2016-08-21T23:38:08 | 52,972,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | import re
from collections import Counter
n = int(input())
line = []
for i in range(n):
line += re.findall('[a-zA-Z]', input().strip())
c = Counter(c.upper() for c in line)
for k, v in c.most_common():
print(k, v)
| [
"[email protected]"
]
| |
de5ce76ee7e89d5a5155a27cb0dd2b9425b6015a | b18fb2f2f7955e8830ec1c615ab82c14cd52c8f5 | /server/src/app/http/mods/admin/admin_tools.py | aa5906344bb4426eb3e4a256528544d7997c31fa | []
| no_license | xyzmyall/spff_public | 260492c6ee917093ba3e54364d61e989d184c810 | 9a9a0973bce715de8767e4991ad24515311752c9 | refs/heads/master | 2022-10-13T20:47:08.467510 | 2020-06-10T09:25:30 | 2020-06-10T09:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | import time
from werkzeug.security import check_password_hash, generate_password_hash
from app.http.http_tools.tools import http_tools
from data.server import Data
from common.common import common_tools as common, common_tools
class admin_tool():
@staticmethod
def upload_case(data,user_base):
content = common.get_base64(data['content'].encode('utf-8'))
content_md5 = common.get_md5(content)
user_id = user_base['id']
ctime = int(time.time())
event_time = common.str_to_time(data['event_time'] + ' 00:00:00')
title = common.get_base64(data['title'].encode('utf-8'))
if Data.find('case_info', [('title', '=', title)]) != None:
# self.send_faild(error.ERROR_CASE_EXIST)
return
params = {
'user_id': user_id,
'c_time': ctime,
# 'content': content,
'content_md5': content_md5,
'event_time': event_time,
'title': title,
}
Data.insert('case_info', params)
# 插入主体内容
cond = [
('user_id', '=', user_id),
('c_time', '=', ctime),
('content_md5', '=', content_md5)
]
res = Data.find('case_info', cond)
params = {
'case_id': res['id'],
'content': content
}
Data.insert('case_content',params)
# 取得id,插入内容到表中
res['content'] = content
http_tools.split_case_info(res)
return res
@staticmethod
def update_case(self):
# id = case_id
return
@staticmethod
def check_pw(user_base,pw):
pw_md5 = common_tools.get_md5(pw)
return check_password_hash(user_base['pwhash'], pw_md5)
@staticmethod
def create_pw(pw):
pw_md5 = common_tools.get_md5(pw)
return generate_password_hash(pw_md5)
| [
"[email protected]"
]
| |
4b441cd8eaaa31d69a152fb2fafbe683a2a2aac3 | dbf7910f178ff895516c5df286c3cf13e1dd48ca | /dashboard/urls.py | 771e1e715e42ef2b30e0afc8c6b1f5e8ba8114fa | []
| no_license | nimadorostkar/crop_stocks | df5b5ef42e746c4fe66416d33534685eb2d33053 | b382f3d912ad4276882eb7bb47a6147f335e4faa | refs/heads/master | 2023-05-15T00:48:46.889463 | 2021-06-10T08:26:58 | 2021-06-10T08:26:58 | 353,626,010 | 1 | 0 | null | 2021-04-25T07:08:59 | 2021-04-01T08:16:16 | HTML | UTF-8 | Python | false | false | 280 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.dashboard, name='dashboard'),
#path('payment/', views.payment, name='payment'),
path('ticket/', views.ticket, name='ticket'),
path('money_req/', views.money_req, name='money_req'),
]
| [
"[email protected]"
]
| |
bf4a161f80efb1db0526dcffe7eb8a5ff38a2e0c | d41a7e63204b66b3d3b27dbf1e1a3266080c8396 | /customers/models.py | 32243ad6316aac73f2807db3cd38549951c7a132 | []
| no_license | SimonOkello/salesapp | 552aa3c607a90ad9fea19aa08194750dccda1eca | 3480965fed58c214177f56231e6bd835e47bb201 | refs/heads/main | 2023-03-23T12:08:08.354434 | 2021-03-23T12:48:19 | 2021-03-23T12:48:19 | 350,634,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from django.db import models
# Create your models here.
class Customer(models.Model):
name = models.CharField(max_length=120)
logo = models.ImageField(upload_to='customers', default='default.png')
def __str__(self):
return self.name | [
"[email protected]"
]
| |
d19f42f3b942444cd6f0918191526326b5240ab2 | 18631e9a657324ef1f83da58f4346e9f2c368d28 | /test/functional/rpc_spentindex.py | d0528a1eb21af3a8430c831774faf5aa8af868f7 | [
"MIT"
]
| permissive | thehomosapien/AMLBitcoin | 1e68bf6621d9ee055385ef420b45c9dc289b4f8c | f097ca52c2e8039761f1927d83a9fe0b4c355b1c | refs/heads/master | 2020-05-26T08:27:40.095034 | 2019-05-24T07:31:43 | 2019-05-24T07:31:43 | 188,166,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,158 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2017-2018 The AmlBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC addressindex generation and fetching
#
import time
from test_framework.test_framework import AmlBitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(AmlBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
# Nodes 0/1 are "wallet" nodes
["-debug"],
["-debug", "-spentindex"],
# Nodes 2/3 are used for testing
["-debug", "-spentindex"],
["-debug", "-spentindex", "-txindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print("Testing spent index...")
feeAmlBits = 10000
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = int(unspent[0]["amount"] * 100000000 - feeAmlBits)
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(float(txVerbose2["vin"][0]["value"]), (amount + feeAmlBits) / 100000000)
assert_equal(txVerbose2["vin"][0]["valueSat"], amount + feeAmlBits)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
amount = int(amount - feeAmlBits);
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["valueSat"], amount + feeAmlBits)
assert_equal(float(txVerbose3["vin"][0]["value"]), (amount + feeAmlBits) / 100000000)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["valueSat"], amount + feeAmlBits)
assert_equal(float(txVerbose4["vin"][0]["value"]), (amount + feeAmlBits) / 100000000)
# Check block deltas
print("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 0)
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["inputs"][0]["AmlBits"], (amount + feeAmlBits) * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["outputs"][0]["AmlBits"], amount)
print("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main()
| [
"[email protected]"
]
| |
63e68fda343dd77d5274e64276af8f9dbe8a4cf3 | 18219d0fc95936ded56fe44f9a65ecb27f015232 | /195 processor function.py | 1bb14d07ad6b1b96ff846c371a8b2dc8eb157242 | []
| no_license | JDavid121/Script-Curso-Cisco-Python | 20a61b91b09376dcaef54f8ae5f86fe252de5c33 | 6d68c17ff3c3826e9fc609d110ce9d0e6ebf718b | refs/heads/master | 2021-05-18T04:54:59.948970 | 2020-03-29T20:19:53 | 2020-03-29T20:19:53 | 251,120,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 12:07:52 2020
processor fucntion
@author: David
"""
from platform import processor
# The processor() function returns a string filled with
# the real processor name (if possible).
print(processor())
| [
"[email protected]"
]
| |
5685f50ae8b3fe947479b8d83eeb57c297189aed | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/api/openstack/compute/contrib/floating_ip_dns.py | eb00f561cb048803c371d2722e3da287b968e117 | [
"Apache-2.0"
]
| permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,958 | py | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_utils import netutils
import webob
from patron.api.openstack import extensions
from patron.api.openstack import wsgi
from patron import exception
from patron.i18n import _
from patron import network
authorize = extensions.extension_authorizer('compute', 'floating_ip_dns')
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(object):
"""DNS domain controller for OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSDomainController, self).__init__()
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['patron.context']
authorize(context)
try:
domains = self.network_api.get_dns_domains(context)
except NotImplementedError:
msg = _("Unable to get dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
def update(self, req, id, body):
"""Add or modify domain entry."""
context = req.environ['patron.context']
authorize(context)
fqdomain = _unquote_domain(id)
try:
entry = body['domain_entry']
scope = entry['scope']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
try:
create_dns_domain(context, fqdomain, area)
except NotImplementedError:
msg = _("Unable to create dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
def delete(self, req, id):
"""Delete the domain identified by id."""
context = req.environ['patron.context']
authorize(context)
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to delete dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
class FloatingIPDNSEntryController(object):
"""DNS Entry controller for OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPDNSEntryController, self).__init__()
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['patron.context']
authorize(context)
domain = _unquote_domain(domain_id)
floating_ip = None
# Check whether id is a valid ipv4/ipv6 address.
if netutils.is_valid_ip(id):
floating_ip = id
try:
if floating_ip:
entries = self.network_api.get_dns_entries_by_address(
context, floating_ip, domain)
else:
entries = self.network_api.get_dns_entries_by_name(
context, id, domain)
except NotImplementedError:
msg = _("Unable to get dns entry")
raise webob.exc.HTTPNotImplemented(explanation=msg)
if not entries:
explanation = _("DNS entries not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
if floating_ip:
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
dns_entries = _translate_dns_entries_view(entrylist)
return wsgi.ResponseObject(dns_entries)
entry = _create_dns_entry(entries[0], id, domain)
return _translate_dns_entry_view(entry)
def update(self, req, domain_id, id, body):
"""Add or modify dns entry."""
context = req.environ['patron.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
try:
entries = self.network_api.get_dns_entries_by_name(
context, name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name, address,
domain)
except NotImplementedError:
msg = _("Unable to create dns entry")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id."""
context = req.environ['patron.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to delete dns entry")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
class Floating_ip_dns(extensions.ExtensionDescriptor):
"""Floating IP DNS support."""
name = "FloatingIpDns"
alias = "os-floating-ip-dns"
namespace = "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1"
updated = "2011-12-23T00:00:00Z"
def __init__(self, ext_mgr):
self.network_api = network.API()
super(Floating_ip_dns, self).__init__(ext_mgr)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ip-dns',
FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
| [
"[email protected]"
]
| |
f7c57842ae91adfdfd01bc187383d66e8676dec1 | 7c038e1d1309620bccbc6dc8c83434488b478d5b | /tests/test_rules_manager.py | cc4cb456e5e0142a853e960e34d88872c24f0635 | [
"CC0-1.0",
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | boschresearch/pcg_gazebo | 7d2c9a44c840a4f5c27474316c24ebaadbc0d82c | 346394cdb720c9df5ca1d1df6dac7ad0b6e436fc | refs/heads/master | 2022-11-22T09:40:01.197416 | 2022-11-21T07:22:10 | 2022-11-21T07:22:10 | 227,847,711 | 61 | 24 | Apache-2.0 | 2022-11-21T07:18:24 | 2019-12-13T13:36:24 | Python | UTF-8 | Python | false | false | 9,735 | py | #!/usr/bin/env python
# # Copyright (c) 2020 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
from pcg_gazebo.generators.rules import create_rule, get_rule_parameters
from pcg_gazebo.collection_managers import ConstraintsManager, RulesManager
from pcg_gazebo.utils import generate_random_string
import numpy as np
WORKSPACE_CONSTRAINT = dict(
name='cool_workspace',
type='workspace',
frame='world',
geometry_type='area',
points=[
[-6, -4, 0],
[-3, -4, 0],
[-3, 0, 0],
[-6, 0, 0]
],
holes=[
dict(
type='circle',
center=[-5, 0, 0],
radius=0.2
)
]
)
DOF_TAGS = ['x', 'y', 'z', 'roll', 'pitch', 'yaw']
UNIFORM_RULE_SAMPLE = dict(
type='uniform',
dofs=dict(x=True, y=True),
mean=random.random(),
min=-10,
max=10
)
FIXED_VALUE_RULE_SAMPLE = dict(
type='value',
dofs=dict(x=True, y=True),
value=random.random()
)
FROM_SET_RULE_SAMPLE = dict(
type='from_set',
dofs=dict(x=True, y=True, z=True),
values=[random.random() for _ in range(5)]
)
RANDOM_RULE_SAMPLE = dict(
type='random',
dofs=dict(x=True, y=True, z=True),
scaling_factor=random.random(),
offset=random.random()
)
WORKSPACE_RULE_SAMPLE = dict(
type='workspace',
dofs=dict(x=True, y=True),
workspace=WORKSPACE_CONSTRAINT['name']
)
class TestRulesManager(unittest.TestCase):
def test_examples(self):
sample = get_rule_parameters('value')
self.assertIn('tag', sample)
self.assertIn('dofs', sample)
self.assertIn('value', sample)
sample = get_rule_parameters('from_set')
self.assertIn('tag', sample)
self.assertIn('dofs', sample)
self.assertIn('values', sample)
sample = get_rule_parameters('random')
self.assertIn('tag', sample)
self.assertIn('dofs', sample)
self.assertIn('scaling_factor', sample)
self.assertIn('offset', sample)
sample = get_rule_parameters('uniform')
self.assertIn('tag', sample)
self.assertIn('dofs', sample)
self.assertIn('mean', sample)
self.assertIn('min', sample)
self.assertIn('max', sample)
sample = get_rule_parameters('workspace')
self.assertIn('tag', sample)
self.assertIn('dofs', sample)
self.assertIn('workspace', sample)
def test_fixed_value_rule(self):
value = random.random()
rule = create_rule('value', value=value)
self.assertIsNotNone(rule)
self.assertEqual(rule.value, value)
for tag in rule.dofs:
self.assertFalse(rule.dofs[tag])
for tag in DOF_TAGS:
dofs = dict()
for t in DOF_TAGS:
dofs[t] = t == tag
rule.dofs = dofs
pose = rule.get_pose()
for t in DOF_TAGS:
if t == tag:
self.assertTrue(np.isclose(getattr(pose, t), value))
else:
if t in ['x', 'y', 'z']:
self.assertEqual(getattr(pose, t), 0)
def test_from_set_rule(self):
values = [random.random() for _ in range(5)]
rule = create_rule('from_set', values=values)
self.assertIsNotNone(rule)
self.assertEqual(rule.values, values)
for tag in rule.dofs:
self.assertFalse(rule.dofs[tag])
for tag in DOF_TAGS:
dofs = dict()
for t in DOF_TAGS:
dofs[t] = t == tag
rule.dofs = dofs
pose = rule.get_pose()
for t in DOF_TAGS:
if t == tag:
found_value = False
for v in values:
if np.isclose(v, getattr(pose, t)):
found_value = True
break
self.assertTrue(found_value)
else:
if t in ['x', 'y', 'z']:
self.assertEqual(getattr(pose, t), 0)
def test_random_rule(self):
scaling_factor = random.random()
offset = random.random()
rule = create_rule(
'random',
scaling_factor=scaling_factor,
offset=offset)
self.assertIsNotNone(rule)
self.assertEqual(rule.scaling_factor, scaling_factor)
self.assertEqual(rule.offset, offset)
for tag in rule.dofs:
self.assertFalse(rule.dofs[tag])
for tag in DOF_TAGS:
dofs = dict()
for t in DOF_TAGS:
dofs[t] = t == tag
rule.dofs = dofs
pose = rule.get_pose()
for t in DOF_TAGS:
if t == tag:
self.assertNotEqual(getattr(pose, t), 0)
else:
if t in ['x', 'y', 'z']:
self.assertEqual(getattr(pose, t), 0)
def test_uniform_rule(self):
mean = random.random()
min = mean - 1
max = mean + 1
rule = create_rule(
'uniform',
mean=mean,
min=min,
max=max)
self.assertIsNotNone(rule)
self.assertEqual(rule.mean, mean)
self.assertEqual(rule.min, min),
self.assertEqual(rule.max, max)
for tag in rule.dofs:
self.assertFalse(rule.dofs[tag])
for tag in DOF_TAGS:
dofs = dict()
for t in DOF_TAGS:
dofs[t] = t == tag
rule.dofs = dofs
pose = rule.get_pose()
for t in DOF_TAGS:
if t == tag:
self.assertGreaterEqual(getattr(pose, t), min)
self.assertLessEqual(getattr(pose, t), max)
else:
if t in ['x', 'y', 'z']:
self.assertEqual(getattr(pose, t), 0)
def test_within_workspace_rule(self):
cm = ConstraintsManager.get_instance()
self.assertTrue(cm.add(**WORKSPACE_CONSTRAINT))
self.assertIn(WORKSPACE_CONSTRAINT['name'], cm.tags)
rule = create_rule(
'workspace',
workspace=WORKSPACE_CONSTRAINT['name'])
self.assertIsNotNone(rule)
pose = rule.get_pose()
constraint = cm.get(WORKSPACE_CONSTRAINT['name'])
self.assertTrue(constraint.contains_point([pose.x, pose.y]))
rule.dofs = dict(x=True, y=True, z=True)
pose = rule.get_pose()
constraint = cm.get(WORKSPACE_CONSTRAINT['name'])
self.assertTrue(constraint.contains_point([pose.x, pose.y]))
def test_add_rule_to_manager(self):
rm = RulesManager.get_instance()
rules = [
UNIFORM_RULE_SAMPLE,
FIXED_VALUE_RULE_SAMPLE,
FROM_SET_RULE_SAMPLE,
RANDOM_RULE_SAMPLE,
WORKSPACE_RULE_SAMPLE
]
for rule in rules:
name = generate_random_string(5)
rm.add(name=name, **rule)
self.assertIn(name, rm.tags)
self.assertEqual(len(rm.tags), len(rules))
def test_deprecated_rules_args(self):
DOFS = dict(x=True, y=True, z=True)
rule = create_rule(
policy=dict(
name='workspace',
args='my_workspace'
),
dofs=DOFS
)
self.assertIsNotNone(rule)
self.assertEqual(rule.name, 'workspace')
self.assertEqual(rule._workspace_tag, 'my_workspace')
rule = create_rule(
policy=dict(
name='uniform',
args=dict(
min=-1,
max=1
)
),
dofs=DOFS
)
self.assertIsNotNone(rule)
self.assertEqual(rule.name, 'uniform')
self.assertEqual(rule._min, -1)
self.assertEqual(rule._max, 1)
self.assertEqual(rule._mean, 0)
rule = create_rule(
policy=dict(
name='uniform',
args=dict(
min=3,
max=5,
mean=4
)
),
dofs=DOFS
)
self.assertIsNotNone(rule)
self.assertEqual(rule.name, 'uniform')
self.assertEqual(rule._min, 3)
self.assertEqual(rule._max, 5)
self.assertEqual(rule._mean, 4)
rule = create_rule(
policy=dict(
name='choice',
args=dict(
values=[1, 2, 3]
)
),
dofs=DOFS
)
self.assertIsNotNone(rule)
self.assertEqual(rule.name, 'from_set')
self.assertEqual(rule._values, [1, 2, 3])
rule = create_rule(
policy=dict(
name='value',
args=10
),
dofs=DOFS
)
self.assertIsNotNone(rule)
self.assertEqual(rule.name, 'value')
self.assertEqual(rule._value, 10)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
5054169d18b37fb296780346c9a5cbbc2117a629 | bf683eb4a6080cf67669de90d1afdad53fccb738 | /Lib/site-packages/django/db/migrations/loader.py | a6fe1afc15fb2e3670895b845a8b39c414222fbf | [
"MIT"
]
| permissive | mspgeek/Client_Portal | cd513308840aa4203554ebc1160f17f0dd4b17cf | 0267168bb90e8e9c85aecdd715972b9622b82384 | refs/heads/master | 2023-03-07T21:33:22.767108 | 2020-04-08T01:43:19 | 2020-04-08T01:43:19 | 253,946,635 | 6 | 0 | MIT | 2022-12-31T07:01:43 | 2020-04-08T00:43:07 | HTML | UTF-8 | Python | false | false | 14,863 | py | import os
import sys
from importlib import import_module, reload
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from .exceptions import (
AmbiguityError, BadMigrationError, InconsistentMigrationHistory,
NodeNotFoundError,
)
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader:
"""
Load migration files from disk and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
"""
Return the path to the migrations module for the specified app_label
and a boolean indicating if the module is specified in
settings.MIGRATION_MODULE.
"""
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label], True
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""Load the migrations from all INSTALLED_APPS from disk."""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_migrations) or (
not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# Empty directories are namespaces.
# getattr() needed on PY36 and older (replace w/attribute access).
if getattr(module, '__file__', None) is None:
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
reload(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
for migration_name in migration_names:
migration_module = import_module("%s.%s" % (module_name, migration_name))
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"""Return the named migration or raise NodeNotFoundError."""
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"""
Return the migration(s) which match the given app label and name_prefix.
"""
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
if migration_app_label == app_label and migration_name.startswith(name_prefix):
results.append((migration_app_label, migration_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return self.graph.root_nodes(key[0])[0]
else: # "__latest__"
return self.graph.leaf_nodes(key[0])[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def add_internal_dependencies(self, key, migration):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325).
continue
self.graph.add_dependency(migration, key, parent, skip_validation=True)
def add_external_dependencies(self, key, migration):
for parent in migration.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(migration, key, parent, skip_validation=True)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(migration, child, key, skip_validation=True)
def build_graph(self):
"""
Build a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = MigrationGraph()
self.replacements = {}
for key, migration in self.disk_migrations.items():
self.graph.add_node(key, migration)
# Internal (aka same-app) dependencies.
self.add_internal_dependencies(key, migration)
# Replacing migrations.
if migration.replaces:
self.replacements[key] = migration
# Add external dependencies now that the internal ones have been resolved.
for key, migration in self.disk_migrations.items():
self.add_external_dependencies(key, migration)
# Carry out replacements where possible.
for key, migration in self.replacements.items():
# Get applied status of each of this migration's replacement targets.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
# Ensure the replacing migration is only marked as applied if all of
# its replacement targets are.
if all(applied_statuses):
self.applied_migrations.add(key)
else:
self.applied_migrations.discard(key)
# A replacing migration can be used if either all or none of its
# replacement targets have been applied.
if all(applied_statuses) or (not any(applied_statuses)):
self.graph.remove_replaced_nodes(key, migration.replaces)
else:
# This replacing migration cannot be used because it is partially applied.
# Remove it from the graph and remap dependencies to it (#25945).
self.graph.remove_replacement_node(key, migration.replaces)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
# Check if the missing node could have been replaced by any squash
# migration but wasn't because the squash migration was partially
# applied before. In that case raise a more understandable exception
# (#23556).
# Get reverse replacements.
reverse_replacements = {}
for key, migration in self.replacements.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Try to reraise exception with more detail.
if exc.node in reverse_replacements:
candidates = reverse_replacements.get(exc.node, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
raise NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
exc.origin, exc.node[0], exc.node[1], tries
),
exc.node
) from exc
raise exc
def check_consistent_history(self, connection):
"""
Raise InconsistentMigrationHistory if any applied migrations have
unapplied dependencies.
"""
recorder = MigrationRecorder(connection)
applied = recorder.applied_migrations()
for migration in applied:
# If the migration is unknown, skip it.
if migration not in self.graph.nodes:
continue
for parent in self.graph.node_map[migration].parents:
if parent not in applied:
# Skip unapplied squashed migrations that have all of their
# `replaces` applied.
if parent in self.replacements:
if all(m in applied for m in self.replacements[parent].replaces):
continue
raise InconsistentMigrationHistory(
"Migration {}.{} is applied before its dependency "
"{}.{} on database '{}'.".format(
migration[0], migration[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Look through the loaded graph and detect any conflicts - apps
with more than one leaf migration. Return a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Return a ProjectState object representing the most recent state
that the loaded migrations represent.
See graph.make_state() for the meaning of "nodes" and "at_end".
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
| [
"[email protected]"
]
| |
e241a958e88c7024a4e560a2e58089f989f61381 | c7b11c24c15baf12451868e3dc62ff75301d85db | /orders/templatetags/orders_tags.py | 1168531aaf2fc585d1231c6fe2012fe022321a74 | []
| no_license | sunny10031982/Foodie | 91632aa62bdcad1eb1311b8277a12c610cedae89 | 5d5a163e2e271d7d9c30e43210a64c6c0e43ea72 | refs/heads/master | 2021-06-01T19:18:10.315872 | 2016-08-31T04:00:30 | 2016-08-31T04:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django import template
register = template.Library()
@register.filter
def get_total_price(obj):
return obj.get_cost() | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.