hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
deca8e26bb6a2a9ae53903a22809984f7a74b454 | 26,490 | py | Python | project.py | PetruSicoe/Python101-GameProject | 82121a8e110ee484acdf85843725882d60957b25 | [
"CC-BY-4.0"
]
| null | null | null | project.py | PetruSicoe/Python101-GameProject | 82121a8e110ee484acdf85843725882d60957b25 | [
"CC-BY-4.0"
]
| null | null | null | project.py | PetruSicoe/Python101-GameProject | 82121a8e110ee484acdf85843725882d60957b25 | [
"CC-BY-4.0"
]
| null | null | null | #!/usr/bin/env python3
from random import randrange
import random
import pygame, sys
from pygame.locals import *
import string
pygame.font.init()
MENU_WIDTH = 1000
MENU_HEIGHT = 1000
GUESS_WIDTH = 1000
GUESS_HEIGHT = 650
HANGMAN_WIDTH = 1300
HANGMAN_HEIGHT = 720
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
LIGHT_YELLOW = (255, 255, 102)
frame_rate = pygame.time.Clock()
back_ground = pygame.image.load("image_kids.jpg")
back_ground_guess = pygame.image.load("schoolboard.jpg")
class GameObject:
def __init__(self, position):
self.position = position
def input(self):
pass
def draw(self):
pass
class Menu(GameObject):
def __init__(self):
self.window = pygame.display.set_mode((MENU_WIDTH,MENU_HEIGHT))
pygame.display.set_caption('Meniu Joc')
#butoanele de accesare ale paginilor jocurilor
self.color_hang = (203, 195, 227)
self.color_hang_hover = (140,106,189)
self.left_hang = MENU_WIDTH / 4 + 100
self.top_hang = MENU_HEIGHT / 3
self.width_hang = 250
self.heigth_hang = 120
self.color_guess = (51, 255, 153)
self.color_guess_hover = (37, 186, 132)
self.left_guess = MENU_WIDTH / 4 + 20
self.top_guess = MENU_HEIGHT / 2 + 50
self.width_guess = 470
self.heigth_guess = 120
#[left, top, width, height]
self.hang_rect = pygame.Rect(self.left_hang, self.top_hang, self.width_hang, self.heigth_hang)
self.guess_rect = pygame.Rect(self.left_guess, self.top_guess, self.width_guess, self.heigth_guess)
def input(self):
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
mouse = pygame.mouse.get_pos()
if self.left_hang <= mouse[0] <= self.left_hang + self.width_hang and self.top_hang <= mouse[1] <= self.top_hang + self.heigth_hang:
hangman = Hangman()
hangman.run()
pygame.quit()
sys.exit()
elif self.left_guess <= mouse[0] <= self.left_guess + self.width_guess and self.top_guess <= mouse[1] <= self.top_guess + self.heigth_guess:
guess = GuessTheNumber()
guess.run()
pygeme.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def draw(self):
image_rect = back_ground.get_rect()
self.window.fill(BLACK)
self.window.blit(back_ground, image_rect)
#fonturi
self.font = pygame.font.SysFont('Comic Sans MS',50)
#titlul
title_x_pos = MENU_WIDTH / 6 + 50
title_y_pos = MENU_HEIGHT / 6
self.img = self.font.render('Childhood\'s Gamechest', True, BLACK)
self.window.blit(self.img, (title_x_pos , title_y_pos ))
#draw de buton Hangman. Schimb culoarea daca e hover
mouse = pygame.mouse.get_pos()
if self.left_hang <= mouse[0] <= self.left_hang + self.width_hang and self.top_hang <= mouse[1] <= self.top_hang + self.heigth_hang:
pygame.draw.rect(self.window, self.color_hang_hover, self.hang_rect)
else:
pygame.draw.rect(self.window, self.color_hang, self.hang_rect)
#pun text pe buton
self.hang_button = self.font.render('Hangman', True, BLACK)
self.window.blit(self.hang_button, (self.left_hang + 15, self.top_hang + 20))
#draw de buton guess the number
if self.left_guess <= mouse[0] <= self.left_guess + self.width_guess and self.top_guess <= mouse[1] <= self.top_guess + self.heigth_guess:
pygame.draw.rect(self.window, self.color_guess_hover, self.guess_rect)
else:
pygame.draw.rect(self.window, self.color_guess, self.guess_rect)
#pun text pe buton
self.guess_button = self.font.render('Guess the Number', True, BLACK)
self.window.blit(self.guess_button, (self.left_guess + 15, self.top_guess + 20))
pygame.display.update()
pygame.time.Clock().tick(60)
def run(self):
while True:
self.input()
self.draw()
class Hangman(GameObject):
def __init__(self):
self.window = pygame.display.set_mode((HANGMAN_WIDTH, HANGMAN_HEIGHT))
pygame.display.set_caption('Hangman')
self.text = ''
self.guess_text = ''
self.current_letter = ''
#fonturi
self.input_font = pygame.font.SysFont('Comic Sans MS',100)
self.letters_font = pygame.font.SysFont('Comic Sans MS',35)
self.title_font = pygame.font.SysFont('Algerian',100)
#imagine background
self.hang_background = pygame.image.load("papyrus.jpg")
#import de imagini care arata stadiul in functie de numarul de vieti
self.zero_img = pygame.image.load("0.jpg")
self.zero_img = pygame.transform.scale(self.zero_img, (self.zero_img.get_size()[0] + 100, self.zero_img.get_size()[1] + 100))
self.three_img = pygame.image.load("3.jpg")
self.three_img = pygame.transform.scale(self.three_img, (self.three_img.get_size()[0] + 100, self.three_img.get_size()[1] + 100))
self.five_img = pygame.image.load("5.jpg")
self.five_img = pygame.transform.scale(self.five_img, (self.five_img.get_size()[0] + 100, self.five_img.get_size()[1] + 100))
self.six_img = pygame.image.load("6.jpg")
self.six_img = pygame.transform.scale(self.six_img, (self.six_img.get_size()[0] + 100, self.six_img.get_size()[1] + 100))
self.seven_img = pygame.image.load("7.jpg")
self.seven_img = pygame.transform.scale(self.seven_img, (self.seven_img.get_size()[0] + 100, self.seven_img.get_size()[1] + 100))
self.eight_img = pygame.image.load("8.jpg")
self.eight_img = pygame.transform.scale(self.eight_img, (self.eight_img.get_size()[0] + 100, self.eight_img.get_size()[1] + 100))
self.nine_img = pygame.image.load("9.jpg")
self.nine_img = pygame.transform.scale(self.nine_img, (self.nine_img.get_size()[0] + 100, self.nine_img.get_size()[1] + 100))
self.ten_img = pygame.image.load("10.jpg")
self.ten_img = pygame.transform.scale(self.ten_img, (self.ten_img.get_size()[0] + 100, self.ten_img.get_size()[1] + 100))
#loc unde pun litera curenta
self.input_box = pygame.Rect(100, 400, 200, 200)
self.active_box = False
#culori pt input box
self.color_inactive = (64, 64, 64)
self.color_active = (224, 224, 224)
self.nr_lives = 6
self.won = False
self.lost = False
self.timer_index = 0
#fisier cu cuvintele de ghicit. Aleg unul random dintre ele
with open("hangman_input.txt") as file:
lines = file.readlines()
words = lines[randrange(len(lines))].strip("\n")
words = words.split()
self.guess_text = words[randrange(len(words))]
print("de ghicit: " + self.guess_text)
#fac o lista de tupluri cu litera, spatiul ei dedicat si daca e ghicita sau nu
self.letters= []
for i in range(len(self.guess_text)):
self.letters.append( (self.guess_text[i], pygame.Rect(10 + 100 * i, 200, 50 , 50), False) )
#Menu Button
self.color_menu = (203, 195, 227)
self.color_menu_hover = (140, 106, 189)
self.left_menu = HANGMAN_WIDTH / 2 - 200
self.top_menu = HANGMAN_HEIGHT / 2 + 100
self.width_menu = 300
self.heigth_menu = 120
self.menu_rect = pygame.Rect(self.left_menu, self.top_menu, self.width_menu, self.heigth_menu)
def input(self):
#pentru inchiderea meniului
for event in pygame.event.get():
#activarea buttonului de inchidere
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
menu = Menu()
menu.run()
pygame.quit()
sys.exit()
#putem modifica ceva din input box, daca este selectata
if self.active_box:
if event.key == pygame.K_RETURN:
#la enter procesez litera din input box
if len(self.text) > 0:
if self.text in self.guess_text:
pos = self.guess_text.find(self.text)
while pos != -1:
self.letters[pos] = (self.letters[pos][0], self.letters[pos][1], True)
pos = self.guess_text.find(self.text, pos + 1, len(self.guess_text))
#verific daca toate casutele sunt completate. Daca sunt, am castigat
just_won = True
for k in self.letters:
if not k[2]:
just_won = False
if just_won:
self.won = True
else:
self.nr_lives -= 1
if self.nr_lives == 0:
self.lost = True
self.text = ''
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
else:
self.current_letter = event.unicode.upper()
#doar o litera trebuie sa fie in casuta
if len(self.text) >= 1:
self.text = self.text[:-1]
self.text += self.current_letter
if event.type == pygame.MOUSEBUTTONDOWN:
if self.input_box.collidepoint(event.pos):
#fac toggle
self.active_box = not self.active_box
else:
self.active_box = False
if self.menu_rect.collidepoint(event.pos):
if self.won or self.lost:
menu = Menu()
menu.run()
pygame.quit()
sys.exit()
def draw(self):
image_rect = self.hang_background.get_rect()
self.window.fill(BLACK)
self.window.blit(self.hang_background, image_rect)
#desenex imaginea ce reprezinta starea + numarul de vieti
if self.nr_lives == 6:
image_rect = self.zero_img.get_rect()
hang_img = self.zero_img
elif self.nr_lives == 5:
image_rect = self.five_img.get_rect()
hang_img = self.five_img
elif self.nr_lives == 4:
image_rect = self.six_img.get_rect()
hang_img = self.six_img
elif self.nr_lives == 3:
image_rect = self.seven_img.get_rect()
hang_img = self.seven_img
elif self.nr_lives == 2:
image_rect = self.eight_img.get_rect()
hang_img = self.eight_img
elif self.nr_lives == 1:
image_rect = self.nine_img.get_rect()
hang_img = self.nine_img
else:
image_rect = self.ten_img.get_rect()
hang_img = self.ten_img
image_rect.x = 880
image_rect.y = 300
self.window.blit(hang_img, image_rect)
#titlul
title_x_pos = HANGMAN_WIDTH / 3 - 30
title_y_pos = 30
self.title = self.title_font.render('HANGMAN', True, BLACK)
self.window.blit(self.title, (title_x_pos , title_y_pos ))
if not self.won and not self.lost:
#literele ghicite/neghicite
for i in range(len(self.letters)):
pygame.draw.rect(self.window, self.color_inactive, self.letters[i][1])
text_surface = self.letters_font.render(self.letters[i][0],True, self.color_active)
if self.letters[i][2] == True:
self.window.blit(text_surface, (self.letters[i][1].x + 15, self.letters[i][1].y + 5))
if not self.active_box:
pygame.draw.rect(self.window, self.color_inactive, self.input_box)
else:
pygame.draw.rect(self.window, self.color_active, self.input_box)
if len(self.text) > 0:
text_surface = self.input_font.render(self.text,True, BLACK)
self.window.blit(text_surface, (self.input_box.x + 65, self.input_box.y + 20))
#caz daca am castigat
if self.won:
#afisez mesaj
if self.timer_index < 1:
self.timer_index += 0.01
text_surface = self.title_font.render("You win", True, GREEN)
self.window.blit(text_surface, (400, 400))
else:
#pun buton meniu dupa ce expira timpul
mouse = pygame.mouse.get_pos()
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.heigth_menu:
pygame.draw.rect(self.window, self.color_menu_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.color_menu, self.menu_rect)
#pun text pe buton
self.menu_button = self.letters_font.render('Back to Menu', True, self.color_inactive)
self.window.blit(self.menu_button, (self.left_menu + 30, self.top_menu + 30))
#caz daca am pierdut
if self.lost:
#afisez mesaj
if self.timer_index < 1:
self.timer_index += 0.01
text_surface = self.title_font.render("You lost", True, RED)
self.window.blit(text_surface, (400, 400))
else:
#pun buton meniu dupa ce expira timpul
mouse = pygame.mouse.get_pos()
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.heigth_menu:
pygame.draw.rect(self.window, self.color_menu_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.color_menu, self.menu_rect)
#pun text pe buton
self.menu_button = self.letters_font.render('Back to Menu', True, self.color_inactive)
self.window.blit(self.menu_button, (self.left_menu + 30, self.top_menu + 30))
pygame.display.update()
pygame.time.Clock().tick(60)
def run(self):
while True:
self.input()
self.draw()
class GuessTheNumber(GameObject):
def __init__(self):
self.window = pygame.display.set_mode((GUESS_WIDTH, GUESS_HEIGHT))
pygame.display.set_caption('Guess the Number')
self.index = 0
self.lives = 2
self.winner_text = ''
self.losing_text = ''
#fonturi
self.intro_font = pygame.font.SysFont('Comic Sans MS', 50)
self.number_font = pygame.font.SysFont('Comic Sans MS', 30)
self.lives_font = pygame.font.SysFont('Comic Sans MS', 20)
self.message_font = pygame.font.SysFont('Comic Sans MS', 40)
#culori
self.card_color = (194, 175, 161)
self.card_hover = (175, 122, 90)
self.choice = -1
#init cartonasele
#1
self.left_card_one = GUESS_WIDTH / 4 + 70
self.top_card_one = GUESS_HEIGHT / 3
self.width_card_one = 100
self.height_card_one = 70
self.card_one_rect = pygame.Rect(self.left_card_one, self.top_card_one, self.width_card_one, self.height_card_one)
self.rand_1 = randrange(5)
#2
self.left_card_two = GUESS_WIDTH / 4 + 320
self.top_card_two = GUESS_HEIGHT / 3
self.width_card_two = 100
self.height_card_two = 70
self.card_two_rect = pygame.Rect(self.left_card_two, self.top_card_two, self.width_card_two, self.height_card_two)
self.rand_2 = randrange(6, 10)
#3
self.left_card_three = GUESS_WIDTH / 4 + 70
self.top_card_three = GUESS_HEIGHT / 3 + 170
self.width_card_three = 100
self.height_card_three = 70
self.card_three_rect = pygame.Rect(self.left_card_three, self.top_card_three, self.width_card_three, self.height_card_three)
self.rand_3 = randrange(25, 35)
#4
self.left_card_four = GUESS_WIDTH / 4 + 320
self.top_card_four = GUESS_HEIGHT / 3 + 170
self.width_card_four = 100
self.height_card_four = 70
self.card_four_rect = pygame.Rect(self.left_card_four, self.top_card_four, self.width_card_four, self.height_card_four)
self.rand_4 = randrange(10, 20)
#pun toate randomurile intr-o lista
self.randoms_list = ['button_1', 'button_2', 'button_3', 'button_4']
self.to_guess = random.choice(self.randoms_list)
#butoane final
#REPLAY
self.left_replay = GUESS_WIDTH - 150
self.top_replay = GUESS_HEIGHT / 2 - 80
self.width_replay = 60
self.height_replay = 45
self.replay_rect = pygame.Rect(self.left_replay, self.top_replay, self.width_replay, self.height_replay)
#MENU
self.left_menu = GUESS_WIDTH - 150
self.top_menu = GUESS_HEIGHT / 2
self.width_menu = 60
self.height_menu = 45
self.menu_rect = pygame.Rect(self.left_menu, self.top_menu, self.width_menu, self.height_menu)
self.timer_index = 0
def input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit
if event.type == pygame.MOUSEBUTTONDOWN:
#verific pe ce cartonas a dat click jucatorul/daca click MENU/REPLAY
if self.card_one_rect.collidepoint(event.pos):
if self.randoms_list[0] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.card_two_rect.collidepoint(event.pos):
if self.randoms_list[1] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.card_three_rect.collidepoint(event.pos):
if self.randoms_list[2] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.card_four_rect.collidepoint(event.pos):
if self.randoms_list[3] == self.to_guess:
self.choice = 1
else:
self.choice = 0
self.lives -= 1
elif self.menu_rect.collidepoint(event.pos):
menu = Menu()
menu.run()
pygame.quit()
sys.exit()
elif self.replay_rect.collidepoint(event.pos):
guess = GuessTheNumber()
guess.run()
pygame.quit()
sys.exit()
def draw(self):
image_rect = back_ground_guess.get_rect()
self.window.fill(BLACK)
self.window.blit(back_ground_guess, image_rect)
#afisez titlul
welcome_text = self.intro_font.render('Welcome to GuessTheNumber!', True, WHITE)
self.window.blit(welcome_text, (150, 25))
#afisez numarul de vieti
lives_text = self.lives_font.render(f'lives: {self.lives}', True, LIGHT_YELLOW)
self.window.blit(lives_text, (680, 150))
mouse = pygame.mouse.get_pos()
#afisez cartonasele
#1
if self.left_card_one <= mouse[0] <= self.left_card_one + self.width_card_one and self.top_card_one <= mouse[1] <= self.top_card_one + self.height_card_one:
pygame.draw.rect(self.window, self.card_hover, self.card_one_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_one_rect)
self.button_1 = self.number_font.render(str(self.rand_1), True, BLACK)
self.window.blit(self.button_1, (self.card_one_rect.x + 40, self.card_one_rect.y + 10))
#2
if self.left_card_two <= mouse[0] <= self.left_card_two + self.width_card_two and self.top_card_two <= mouse[1] <= self.top_card_two + self.height_card_two:
pygame.draw.rect(self.window, self.card_hover, self.card_two_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_two_rect)
self.button_2 = self.number_font.render(str(self.rand_2), True, BLACK)
self.window.blit(self.button_2, (self.card_two_rect.x + 40, self.card_two_rect.y + 10))
#3
if self.left_card_three <= mouse[0] <= self.left_card_three + self.width_card_three and self.top_card_three <= mouse[1] <= self.top_card_three + self.height_card_three:
pygame.draw.rect(self.window, self.card_hover, self.card_three_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_three_rect)
self.button_3 = self.number_font.render(str(self.rand_3), True, BLACK)
self.window.blit(self.button_3, (self.card_three_rect.x + 40, self.card_three_rect.y + 10))
#4
if self.left_card_four <= mouse[0] <= self.left_card_four + self.width_card_four and self.top_card_four <= mouse[1] <= self.top_card_four + self.height_card_four:
pygame.draw.rect(self.window, self.card_hover, self.card_four_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.card_four_rect)
self.button_4 = self.number_font.render(str(self.rand_4), True, BLACK)
self.window.blit(self.button_4, (self.card_four_rect.x + 40, self.card_four_rect.y + 10))
if self.choice == 1:
self.winner_text = self.message_font.render('Wow, you won!', True, LIGHT_YELLOW)
self.window.blit(self.winner_text, (400, 300))
#buton replay
if self.left_replay <= mouse[0] <= self.left_replay + self.width_replay and self.top_replay <= mouse[1] <= self.top_replay + self.height_replay:
pygame.draw.rect(self.window, self.card_hover, self.replay_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.replay_rect)
self.replay_b = self.lives_font.render('Replay', True, BLACK)
self.window.blit(self.replay_b, (self.replay_rect.x + 1, self.replay_rect.y + 10))
#buton MENU
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.height_menu:
pygame.draw.rect(self.window, self.card_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.menu_rect)
self.menu_b = self.lives_font.render('Menu', True, BLACK)
self.window.blit(self.menu_b, (self.menu_rect.x + 1, self.menu_rect.y + 10))
elif self.choice == 0:
mouse = pygame.mouse.get_pos()
if self.lives == 1:
if self.timer_index < 1:
self.losing_text = self.message_font.render('Oopsey, only one life left!', True, LIGHT_YELLOW)
self.window.blit(self.losing_text, (300, 300))
self.timer_index+=0.01
elif self.lives == 0:
self.losing_text = self.message_font.render('Game over ya loser', True, LIGHT_YELLOW)
self.window.blit(self.losing_text, (350, 300))
#buton replay
if self.left_replay <= mouse[0] <= self.left_replay + self.width_replay and self.top_replay <= mouse[1] <= self.top_replay + self.height_replay:
pygame.draw.rect(self.window, self.card_hover, self.replay_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.replay_rect)
self.replay_b = self.lives_font.render('Replay', True, BLACK)
self.window.blit(self.replay_b, (self.replay_rect.x + 1, self.replay_rect.y + 10))
#buton MENU
if self.left_menu <= mouse[0] <= self.left_menu + self.width_menu and self.top_menu <= mouse[1] <= self.top_menu + self.height_menu:
pygame.draw.rect(self.window, self.card_hover, self.menu_rect)
else:
pygame.draw.rect(self.window, self.card_color, self.menu_rect)
self.menu_b = self.lives_font.render('Menu', True, BLACK)
self.window.blit(self.menu_b, (self.menu_rect.x + 1, self.menu_rect.y + 10))
pygame.display.update()
pygame.time.Clock().tick(60)
def run(self):
while True:
self.input()
self.draw()
if __name__ == "__main__":
menu = Menu()
menu.run() | 40.197269 | 177 | 0.555795 | 25,848 | 0.975764 | 0 | 0 | 0 | 0 | 0 | 0 | 2,007 | 0.075764 |
decaa14b52fa5524baf2d5d190931296e44de823 | 2,018 | py | Python | Modules/CrossMapLRN.py | EmilPi/PuzzleLib | 31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9 | [
"Apache-2.0"
]
| 52 | 2020-02-28T20:40:15.000Z | 2021-08-25T05:35:17.000Z | Modules/CrossMapLRN.py | EmilPi/PuzzleLib | 31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9 | [
"Apache-2.0"
]
| 2 | 2021-02-14T15:57:03.000Z | 2021-10-05T12:21:34.000Z | Modules/CrossMapLRN.py | EmilPi/PuzzleLib | 31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9 | [
"Apache-2.0"
]
| 8 | 2020-02-28T20:40:11.000Z | 2020-07-09T13:27:23.000Z | import numpy as np
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Backend.Dnn import crossMapLRN, crossMapLRNBackward
from PuzzleLib.Modules.LRN import LRN
class CrossMapLRN(LRN):
def __init__(self, N=5, alpha=1e-4, beta=0.75, K=2.0, name=None):
super().__init__(N, alpha, beta, K, name)
self.gradUsesOutData = True
def updateData(self, data):
self.data, self.workspace = crossMapLRN(data, N=self.N, alpha=self.alpha, beta=self.beta, K=self.K,
test=not self.train)
def updateGrad(self, grad):
self.grad = crossMapLRNBackward(self.inData, self.data, grad, self.workspace,
N=self.N, alpha=self.alpha, beta=self.beta, K=self.K)
def unittest():
maps = 10
data = gpuarray.to_gpu(np.random.randn(1, maps, 1, 1).astype(np.float32))
crossMapLrn = CrossMapLRN()
crossMapLrn(data)
lookBehind = int((crossMapLrn.N - 1) / 2)
lookAhead = crossMapLrn.N - lookBehind
hostData = data.get().reshape(maps, ).astype(np.float32)
norms = np.empty((maps, ), dtype=np.float32)
for i in range(maps):
norm = 0.0
for j in range(max(0, i - lookBehind), min(maps, i + lookAhead)):
norm += hostData[j]**2
norms[i] = crossMapLrn.K + norm * crossMapLrn.alpha / crossMapLrn.N
hostOutData = hostData / norms**crossMapLrn.beta
assert np.allclose(hostOutData, crossMapLrn.data.get().reshape(maps, ).astype(np.float32))
grad = gpuarray.to_gpu(np.random.randn(1, maps, 1, 1).astype(np.float32))
crossMapLrn.backward(grad)
hostGrad = grad.get().reshape(maps, ).astype(np.float32)
hostInGrad = np.zeros((maps, ), dtype=np.float32)
k = 2.0 * crossMapLrn.alpha * crossMapLrn.beta / crossMapLrn.N
for i in range(maps):
hostInGrad[i] += hostGrad[i] / norms[i]**crossMapLrn.beta
for j in range(max(0, i - lookBehind), min(maps, i + lookAhead)):
hostInGrad[j] -= hostGrad[i] * k * hostData[i] * hostData[j] / norms[i]**(crossMapLrn.beta+1)
assert np.allclose(hostInGrad, crossMapLrn.grad.get().reshape(maps, ).astype(np.float32))
if __name__ == "__main__":
unittest()
| 32.031746 | 101 | 0.700198 | 505 | 0.250248 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.004955 |
decc19f50e9a41be1bc95cb6e0bf5f4f77162b78 | 4,802 | py | Python | src/metrics.py | enryH/specpride | 1bedd87dc8f31a6b86426c6e03dc0c27706bc9aa | [
"Apache-2.0"
]
| 2 | 2020-01-14T12:02:52.000Z | 2020-01-14T14:03:30.000Z | src/metrics.py | enryH/specpride | 1bedd87dc8f31a6b86426c6e03dc0c27706bc9aa | [
"Apache-2.0"
]
| 5 | 2019-12-09T10:59:10.000Z | 2020-01-16T14:32:00.000Z | src/metrics.py | enryH/specpride | 1bedd87dc8f31a6b86426c6e03dc0c27706bc9aa | [
"Apache-2.0"
]
| 9 | 2020-01-14T12:26:54.000Z | 2020-01-16T08:26:06.000Z | import copy
from typing import Iterable
import numba as nb
import numpy as np
import spectrum_utils.spectrum as sus
def dot(spectrum1: sus.MsmsSpectrum, spectrum2: sus.MsmsSpectrum,
fragment_mz_tolerance: float) -> float:
"""
Compute the dot product between the given spectra.
Parameters
----------
spectrum1 : sus.MsmsSpectrum
The first spectrum.
spectrum2 : sus.MsmsSpectrum
The second spectrum.
fragment_mz_tolerance : float
The fragment m/z tolerance used to match peaks.
Returns
-------
float
The dot product similarity between the given spectra.
"""
return _dot(spectrum1.mz, _norm_intensity(np.copy(spectrum1.intensity)),
spectrum2.mz, _norm_intensity(np.copy(spectrum2.intensity)),
fragment_mz_tolerance)
@nb.njit
def _norm_intensity(spectrum_intensity: np.ndarray) -> np.ndarray:
"""
Normalize spectrum peak intensities.
Parameters
----------
spectrum_intensity : np.ndarray
The spectrum peak intensities to be normalized.
Returns
-------
np.ndarray
The normalized peak intensities.
"""
return spectrum_intensity / np.linalg.norm(spectrum_intensity)
@nb.njit
def _dot(mz: np.ndarray, intensity: np.ndarray, mz_other: np.ndarray,
intensity_other: np.ndarray, fragment_mz_tol: float) -> float:
"""
Compute the dot product between the given spectra.
Note: Spectrum intensities should be normalized prior to computing the dot
product.
Parameters
----------
mz : np.ndarray
The first spectrum's m/z values.
intensity : np.ndarray
The first spectrum's intensity values.
mz_other : np.ndarray
The second spectrum's m/z values.
intensity_other : np.ndarray
The second spectrum's intensity values.
fragment_mz_tol : float
The fragment m/z tolerance used to match peaks in both spectra with
each other.
Returns
-------
float
The dot product between both spectra.
"""
fragment_i, fragment_other_i, score = 0, 0, 0.
for fragment_i in range(len(mz)):
while (fragment_other_i < len(mz_other) - 1 and
mz_other[fragment_other_i] < mz[fragment_i] - fragment_mz_tol):
fragment_other_i += 1
if (abs(mz[fragment_i] - mz_other[fragment_other_i]) <= fragment_mz_tol
and fragment_other_i < len(mz_other)):
score += intensity[fragment_i] * intensity_other[fragment_other_i]
fragment_other_i += 1
return score
def avg_dot(representative: sus.MsmsSpectrum,
cluster_spectra: Iterable[sus.MsmsSpectrum],
fragment_mz_tolerance: float) -> float:
"""
Compute the average dot product between the cluster representative and all
cluster members.
Parameters
----------
representative : sus.MsmsSpectrum
The cluster representative spectrum.
cluster_spectra : Iterable[sus.MsmsSpectrum]
The cluster member spectra.
fragment_mz_tolerance : float
Fragment m/z tolerance used during spectrum comparison.
Returns
-------
float
The average dot product between the cluster representative and all
cluster members.
"""
return np.mean([dot(representative, spectrum, fragment_mz_tolerance)
for spectrum in cluster_spectra])
def fraction_by(representative: sus.MsmsSpectrum,
cluster_spectra: Iterable[sus.MsmsSpectrum],
fragment_mz_tolerance: float) -> float:
"""
Compute the fraction of intensity that is explained by the b and y-ions of
the representative spectrum.
This will be 0 if no peptide sequence is associated with the representative
spectrum.
Parameters
----------
representative : sus.MsmsSpectrum
The cluster representative spectrum.
cluster_spectra : Iterable[sus.MsmsSpectrum]
The cluster member spectra. Ignored.
fragment_mz_tolerance : float
Fragment m/z tolerance used to annotate the peaks of the representative
spectrum.
Returns
-------
float
The fraction of intensity that is explained by the b and y-ions of the
representative spectrum.
"""
if representative.peptide is None:
return 0
representative = (copy.copy(representative)
.remove_precursor_peak(fragment_mz_tolerance, 'Da')
.annotate_peptide_fragments(fragment_mz_tolerance, 'Da'))
annotated_peaks = [i for i, annot in enumerate(representative.annotation)
if annot is not None]
return (representative.intensity[annotated_peaks].sum()
/ representative.intensity.sum())
| 31.592105 | 79 | 0.660975 | 0 | 0 | 0 | 0 | 1,760 | 0.366514 | 0 | 0 | 2,598 | 0.541025 |
deccbee42c5be781692fc226272ac89e27a4e7a6 | 797 | py | Python | examples/multi-class_neural_network.py | sun1638650145/classicML | 7e0c2155bccb6e491a150ee689d3786526b74565 | [
"Apache-2.0"
]
| 12 | 2020-05-10T12:11:06.000Z | 2021-10-31T13:23:55.000Z | examples/multi-class_neural_network.py | sun1638650145/classicML | 7e0c2155bccb6e491a150ee689d3786526b74565 | [
"Apache-2.0"
]
| null | null | null | examples/multi-class_neural_network.py | sun1638650145/classicML | 7e0c2155bccb6e491a150ee689d3786526b74565 | [
"Apache-2.0"
]
| 2 | 2021-01-17T06:22:05.000Z | 2021-01-18T14:32:51.000Z | """
这个例子将展示如何使用BP神经网络构建多分类的神经网络.
"""
import sys
import classicML as cml
DATASET_PATH = './datasets/iris_dataset.csv'
CALLBACKS = [cml.callbacks.History(loss_name='categorical_crossentropy',
metric_name='accuracy')]
# 读取数据
ds = cml.data.Dataset(label_mode='one-hot',
standardization=True,
name='iris')
ds.from_csv(DATASET_PATH)
# 生成神经网络
model = cml.BPNN(seed=2021)
model.compile(network_structure=[4, 2, 3],
optimizer='sgd',
loss='categorical_crossentropy',
metric='accuracy')
# 训练神经网络
model.fit(ds.x, ds.y, epochs=1000, verbose=True, callbacks=CALLBACKS)
# 可视化历史记录(如果您使用的是MacOS, 请注释掉此句, 这句是为了在CI上测试用的.)
if sys.platform != 'darwin':
cml.plots.plot_history(CALLBACKS[0])
| 28.464286 | 72 | 0.644918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.4017 |
decdd56ee9283490fb231ea62e1de89aa2fa1fee | 2,596 | py | Python | pool/serializer/PoolSerializer.py | salran40/POAP | 9ff2ab68b55aeffe104d127c4beb8b1372b2c8de | [
"Apache-2.0"
]
| null | null | null | pool/serializer/PoolSerializer.py | salran40/POAP | 9ff2ab68b55aeffe104d127c4beb8b1372b2c8de | [
"Apache-2.0"
]
| null | null | null | pool/serializer/PoolSerializer.py | salran40/POAP | 9ff2ab68b55aeffe104d127c4beb8b1372b2c8de | [
"Apache-2.0"
]
| null | null | null | __author__ = "arunrajms"
from rest_framework import serializers
from pool.models import Pool
from rest_framework.validators import UniqueValidator
import re
TYPE_CHOICES = ['Integer','IP','IPv6','AutoGenerate','Vlan','MgmtIP']
PUT_TYPE_CHOICES = ['Integer','IP','IPv6','Vlan','MgmtIP']
SCOPE_CHOICES = ['global','fabric','switch']
class JSONSerializerField(serializers.Field):
""" Serializer for JSONField -- required to make field writable"""
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
class PoolRange(serializers.Serializer):
start = serializers.CharField(max_length=24, required=True)
end = serializers.CharField(max_length=24, required=True)
class PoolSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=100,required=True, \
validators=[UniqueValidator(queryset=Pool.objects.all())])
type = serializers.ChoiceField(TYPE_CHOICES)
range = PoolRange(required=False, many=True)
available = serializers.IntegerField(read_only=True)
used = serializers.IntegerField(read_only=True)
scope = serializers.ChoiceField(SCOPE_CHOICES)
def create(self,validated_data):
'''
TBD
'''
return Pool.objects.create(**validated_data)
class PoolGetSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=100,required=True)
type = serializers.CharField(max_length=100)
range = JSONSerializerField()#serializers.CharField(max_length=500)
used = serializers.IntegerField()
available = serializers.IntegerField()
scope = serializers.CharField()
class PoolGetDetailSerializer(serializers.Serializer):
value = serializers.CharField(max_length=100)
assigned = serializers.CharField(max_length=100,required=False)
lastmodified = serializers.CharField(max_length=100)
class PoolPutSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=100,required=True)
type = serializers.ChoiceField(PUT_TYPE_CHOICES)
range = PoolRange(required=False, many=True)
#available = serializers.IntegerField(read_only=True)
#used = serializers.IntegerField(read_only=True)
#scope = serializers.ChoiceField(SCOPE_CHOICES)
def create(self,validated_data):
'''
TBD
'''
return Pool.objects.create(**validated_data) | 36.56338 | 91 | 0.724961 | 2,225 | 0.857088 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.162173 |
dece77460bb0515a4dff433a0f6f8e80d7adc76c | 3,735 | py | Python | yiffscraper/downloader.py | ScraperT/yiffscraper | 49482a544fc7f11e6ea5db2626dbc2404529d656 | [
"MIT"
]
| 42 | 2019-12-23T23:55:12.000Z | 2022-02-07T04:12:59.000Z | yiffscraper/downloader.py | arin17bishwa/yiffscraper | 49482a544fc7f11e6ea5db2626dbc2404529d656 | [
"MIT"
]
| 7 | 2020-01-12T13:04:56.000Z | 2020-05-18T07:11:51.000Z | yiffscraper/downloader.py | arin17bishwa/yiffscraper | 49482a544fc7f11e6ea5db2626dbc2404529d656 | [
"MIT"
]
| 7 | 2020-03-12T03:47:53.000Z | 2020-07-26T08:05:55.000Z | import os
import platform
from datetime import datetime
import time
from pathlib import Path
import asyncio
from dateutil.parser import parse as parsedate
from dateutil import tz
import aiohttp
def longpath(p):
if p is None or platform.system() != "Windows":
return Path(p)
return Path("\\\\?\\" + str(Path.cwd() / p))
class UrlItem:
__slots__ = ("url", "size", "lastModified", "path")
def __init__(self, url, size, lastModified, path=None):
self.url = url
self.size = size
self.lastModified = lastModified
self.path = longpath(path)
def needsUpdate(self):
if self.path is None:
return False
fileLastModified = getFileTime(self.path)
if self.lastModified is None or fileLastModified is None:
return True
return self.lastModified > fileLastModified
@classmethod
async def fetchMetadata(cls, session, url, path):
async with session.head(url, allow_redirects=True) as response:
try:
response.raise_for_status()
except aiohttp.ClientResponseError as e:
# I don't like returning Exceptions, but I can't find a better way to pass a single error in an async loop
return (None, e)
size = int(response.headers.get("content-length", 0))
lastModified = parsedateOrNone(response.headers.get("last-modified", None))
return (cls(url, size, lastModified, path), None)
async def download(self, session, update):
if self.path is None:
return
if update and not await self.needsUpdate():
return
self.path.parent.mkdir(parents=True, exist_ok=True)
async with session.get(self.url) as response:
try:
response.raise_for_status()
except aiohttp.ClientResponseError as e:
# I don't like returning Exceptions, but I can't find a better way to pass a single error in an async loop
return (self, e)
with open(self.path, "wb") as out_file:
while True:
chunk = await response.content.read(8192)
if not chunk:
break
out_file.write(chunk)
url_timestamp = getTimestamp(self.lastModified)
os.utime(self.path, (url_timestamp, url_timestamp))
return (self, None)
@classmethod
async def fetchAllMetadata(cls, items):
async with newSession() as session:
tasks = [cls.fetchMetadata(session, i.url, i.path) for i in items]
for task in asyncio.as_completed(tasks):
urlitem = await task
yield urlitem
@classmethod
async def downloadAll(cls, urlitems, update):
async with newSession() as session:
tasks = [urlitem.download(session, update) for urlitem in urlitems]
for task in asyncio.as_completed(tasks):
yield await task
def __len__(self):
return self.size
def getFileTime(path):
try:
file_datetime = datetime.fromtimestamp(os.path.getmtime(path), tz=tz.tzutc())
except FileNotFoundError:
file_datetime = None
return file_datetime
def getTimestamp(t):
if t is None:
return None
timestamp = time.mktime(t.timetuple())
return timestamp
def parsedateOrNone(dateString):
if dateString is None:
return None
return parsedate(dateString)
def newSession():
connector = aiohttp.connector.TCPConnector(limit=25, limit_per_host=10)
timeout = aiohttp.ClientTimeout(total=None)
return aiohttp.ClientSession(connector=connector, timeout=timeout)
| 31.923077 | 122 | 0.626774 | 2,735 | 0.732262 | 537 | 0.143775 | 1,191 | 0.318876 | 2,081 | 0.557162 | 296 | 0.07925 |
decfc0841b9274cca97b69d13faf37aa2232005f | 58 | py | Python | tests/cases/print.py | wisn/py2many | e33871a3e54971407319e9df28dcadcdc3a49140 | [
"MIT"
]
| 1 | 2021-05-14T00:35:04.000Z | 2021-05-14T00:35:04.000Z | tests/cases/print.py | wisn/py2many | e33871a3e54971407319e9df28dcadcdc3a49140 | [
"MIT"
]
| null | null | null | tests/cases/print.py | wisn/py2many | e33871a3e54971407319e9df28dcadcdc3a49140 | [
"MIT"
]
| null | null | null | def main():
print(2)
print("b")
print(2, "b")
| 11.6 | 17 | 0.448276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.103448 |
ded08df80894e1241c99188254ecd7f7c259352b | 380 | py | Python | linked_lists/find_loop.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
]
| null | null | null | linked_lists/find_loop.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
]
| null | null | null | linked_lists/find_loop.py | maanavshah/coding-interview | 4c842cdbc6870da79684635f379966d1caec2162 | [
"MIT"
]
| null | null | null | # O(n) time | O(1) space
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
def findLoop(head):
slow = head.next
fast = head.next.next
while fast != slow:
slow = slow.next
fast = fast.next.next
fast = head
while fast != slow:
slow = slow.next
fast = fast.next
return fast
| 20 | 30 | 0.560526 | 100 | 0.263158 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.063158 |
ded4491d8cef57cccb094e0f83641638968be15a | 3,066 | py | Python | src/tests/attention_test.py | feperessim/attention_keras | 322a16ee147122026b63305aaa5e899d9e5de883 | [
"MIT"
]
| 422 | 2019-03-17T13:08:59.000Z | 2022-03-31T12:08:29.000Z | src/tests/attention_test.py | JKhodadadi/attention_keras | 322a16ee147122026b63305aaa5e899d9e5de883 | [
"MIT"
]
| 51 | 2019-03-17T20:08:11.000Z | 2022-03-18T03:51:42.000Z | src/tests/attention_test.py | JKhodadadi/attention_keras | 322a16ee147122026b63305aaa5e899d9e5de883 | [
"MIT"
]
| 285 | 2019-03-17T19:06:22.000Z | 2022-03-31T02:29:17.000Z | import pytest
from layers.attention import AttentionLayer
from tensorflow.keras.layers import Input, GRU, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
import tensorflow as tf
def test_attention_layer_standalone_fixed_b_fixed_t():
"""
Tests fixed batch size and time steps
Encoder and decoder has variable seq length and latent dim
"""
inp1 = Input(batch_shape=(5,10,15))
inp2 = Input(batch_shape=(5,15,25))
out, e_out = AttentionLayer()([inp1, inp2])
assert out.shape == tf.TensorShape([inp2.shape[0], inp2.shape[1], inp1.shape[2]])
assert e_out.shape == tf.TensorShape([inp1.shape[0], inp2.shape[1], inp1.shape[1]])
def check_tensorshape_equal(shape1, shape2):
print(shape1, shape2)
equal = []
for s1, s2 in zip(shape1, shape2):
if (s1 == s2) == None:
equal.append(True)
else:
equal.append(s1==s2)
return all(equal)
def test_attention_layer_standalone_none_b_fixed_t():
inp1 = Input(shape=(10, 15))
inp2 = Input(shape=(15, 25))
out, e_out = AttentionLayer()([inp1, inp2])
assert check_tensorshape_equal(out.shape, tf.TensorShape([None, inp2.shape[1], inp1.shape[2]]))
assert check_tensorshape_equal(e_out.shape, tf.TensorShape([None, inp2.shape[1], inp1.shape[1]]))
def test_attention_layer_standalone_none_b_none_t():
inp1 = Input(shape=(None, 15))
inp2 = Input(shape=(None, 25))
out, e_out = AttentionLayer()([inp1, inp2])
assert check_tensorshape_equal(out.shape, tf.TensorShape([None, None, inp1.shape[2]]))
assert check_tensorshape_equal(e_out.shape, tf.TensorShape([None, None, None]))
'''def test_attention_layer_nmt_none_b_fixed_t():
encoder_inputs = Input(shape=(12, 75), name='encoder_inputs')
decoder_inputs = Input(shape=(16 - 1, 80), name='decoder_inputs')
# Encoder GRU
encoder_gru = GRU(32, return_sequences=True, return_state=True, name='encoder_gru')
encoder_out, encoder_state = encoder_gru(encoder_inputs)
# Set up the decoder GRU, using `encoder_states` as initial state.
decoder_gru = GRU(32, return_sequences=True, return_state=True, name='decoder_gru')
decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)
# Attention layer
attn_layer = AttentionLayer(name='attention_layer')
attn_out, attn_states = attn_layer([encoder_out, decoder_out])
# Concat attention input and decoder GRU output
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])
# Dense layer
dense = Dense(80, activation='softmax', name='softmax_layer')
dense_time = TimeDistributed(dense, name='time_distributed_layer')
decoder_pred = dense_time(decoder_concat_input)
# Full model
full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)
full_model.compile(optimizer='adam', loss='categorical_crossentropy')
assert decoder_pred.shape == tf.TensorShape([])
def test_attention_layer_nmt_none_b_none_t():
pass''' | 37.390244 | 101 | 0.7182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,513 | 0.493477 |
ded5e7681d684ad45f836b0b523b89035ed45f16 | 1,572 | py | Python | Python/9248_Suffix_Array/9248_suffix_array_lcp_array.py | ire4564/Baekjoon_Solutions | 3e6689efa30d6b850cdc29570c76408a1e1b2b49 | [
"Apache-2.0"
]
| 4 | 2020-11-17T09:52:29.000Z | 2020-12-13T11:36:14.000Z | Python/9248_Suffix_Array/9248_suffix_array_lcp_array.py | ire4564/Baekjoon_Solutions | 3e6689efa30d6b850cdc29570c76408a1e1b2b49 | [
"Apache-2.0"
]
| 2 | 2020-11-19T11:21:02.000Z | 2020-11-19T22:07:15.000Z | Python/9248_Suffix_Array/9248_suffix_array_lcp_array.py | ire4564/Baekjoon_Solutions | 3e6689efa30d6b850cdc29570c76408a1e1b2b49 | [
"Apache-2.0"
]
| 12 | 2020-11-17T06:55:13.000Z | 2021-05-16T14:39:37.000Z | from itertools import zip_longest, islice
def to_int_keys_best(l):
seen = set()
ls = []
for e in l:
if not e in seen:
ls.append(e)
seen.add(e)
ls.sort()
index = {v: i for i, v in enumerate(ls)}
return [index[v] for v in l]
def suffix_array_best(s):
n = len(s)
k = 1
line = to_int_keys_best(s)
while max(line) < n - 1:
line = to_int_keys_best(
[a * (n + 1) + b + 1
for (a, b) in
zip_longest(line, islice(line, k, None),
fillvalue=-1)])
k <<= 1
return line
def lcp_array(s, sa):
n = len(s)
k = 0
lcp = [0] * n
rank = [0] * n
for i in range(n):
rank[sa[i]] = i
for i in range(n):
if rank[i] == n - 1:
k = 0
continue
j = sa[rank[i] + 1]
while i + k < n and j + k < n and s[i + k] == s[j + k]:
k += 1
lcp[rank[i]] = k;
if k:
k -= 1
return lcp
def inverse_array(l):
n = len(l)
ans = [0] * n
for i in range(n):
ans[l[i]] = i
return ans
if __name__ == '__main__':
L = input()
inverse_suffix_array = suffix_array_best(L)
suffix_array = inverse_array(inverse_suffix_array)
for item in suffix_array:
print(item + 1, end=' ')
LCP = lcp_array(L, suffix_array)
LCP.pop()
LCP.insert(0, 'x')
print()
for item in LCP:
print(item, end=' ')
| 20.684211 | 64 | 0.448473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.012087 |
ded667020b68f181edc8b21f22dbb71557c2c7cc | 1,329 | py | Python | lgr/tools/compare/utils.py | ron813c/lgr-core | 68ba730bf7f9e61cb97c9c08f61bc58b8ea24e7b | [
"BSD-3-Clause"
]
| 7 | 2017-07-10T22:39:52.000Z | 2021-06-25T20:19:28.000Z | lgr/tools/compare/utils.py | ron813c/lgr-core | 68ba730bf7f9e61cb97c9c08f61bc58b8ea24e7b | [
"BSD-3-Clause"
]
| 13 | 2016-10-26T19:42:00.000Z | 2021-12-13T19:43:42.000Z | lgr/tools/compare/utils.py | ron813c/lgr-core | 68ba730bf7f9e61cb97c9c08f61bc58b8ea24e7b | [
"BSD-3-Clause"
]
| 8 | 2016-11-07T15:40:27.000Z | 2020-09-22T13:48:52.000Z | # -*- coding: utf-8 -*-
"""
utils.py - Definition of utility functions.
"""
from collections import namedtuple
from lgr.utils import format_cp
VariantProperties = namedtuple('VariantProperties', ['cp', 'type',
'when', 'not_when',
'comment'])
def display_variant(variant):
"""
Nicely display a variant.
:param variant: The variant to display.
"""
return "Variant {}: type={} - when={} - not-when={} - comment={}".format(
format_cp(variant.cp), variant.type,
variant.when, variant.not_when,
variant.comment)
def compare_objects(first, second, cmp_fct):
"""
Compare two objects, possibly None.
:param first: First object.
:param second: Second object.
:param cmp_fct: A comparison function.
:return: The "greatest" object according to `cmp_fct`,
None if both values are None.
>>> compare_objects(1, 2, max)
2
>>> compare_objects(1, 2, min)
1
>>> compare_objects(None, None, max) is None
True
>>> compare_objects(1, None, min)
1
>>> compare_objects(None, 1, min)
1
"""
if first is None:
return second
if second is None:
return first
return cmp_fct(first, second)
| 24.611111 | 77 | 0.574116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.574868 |
ded78378f0da72d7d6e0a021bbb1b4a6004db8f0 | 2,386 | py | Python | tests/test__file_object.py | StateArchivesOfNorthCarolina/tomes_metadata | 8b73096c1b16e0db2895a6c01d4fc4fd9621cf55 | [
"MIT"
]
| null | null | null | tests/test__file_object.py | StateArchivesOfNorthCarolina/tomes_metadata | 8b73096c1b16e0db2895a6c01d4fc4fd9621cf55 | [
"MIT"
]
| 2 | 2018-09-12T20:36:22.000Z | 2018-09-13T20:14:50.000Z | tests/test__file_object.py | StateArchivesOfNorthCarolina/tomes-packager | 8b73096c1b16e0db2895a6c01d4fc4fd9621cf55 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# import modules.
import sys; sys.path.append("..")
import hashlib
import json
import logging
import os
import plac
import unittest
import warnings
from tomes_packager.lib.directory_object import *
from tomes_packager.lib.file_object import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_FileObject(unittest.TestCase):
def setUp(self):
# set attributes.
self.sample_file = __file__
self.sample_dir = os.path.dirname(self.sample_file)
self.dir_obj = DirectoryObject(self.sample_dir)
self.file_obj = FileObject(self.sample_file, self.dir_obj, self.dir_obj, 0)
def test__mimetype(self):
""" Is the MIME type for @self.file_obj correct? """
# get mime via mimetypes.guess_type.
mime = mimetypes.guess_type(self.sample_file)[0]
# make sure the FileObject mimetype is the same.
self.assertEqual(mime, self.file_obj.mimetype())
def test__checksum(self):
""" Is the SHA-1 hash for @self.file_obj correct? """
# get SHA-1 value of @self.sample_file via hashlib.
sha1 = hashlib.sha1()
with open(self.sample_file, "rb") as f:
sha1.update(f.read())
sha1 = sha1.hexdigest()
# get FileObject SHA-1 hash and suppress ResourceWarning in unittest.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sha1_obj = self.file_obj.checksum("SHA-1")
# make sure hashes are equal.
self.assertEqual(sha1, sha1_obj)
# CLI.
def main(filepath:("file path")):
"Converts a file to a FolderObject and prints its attributes to screen as JSON.\
\nexample: `python3 test__file_object.py sample_files/sample_rdf.xlsx`"
# convert @filepath to a FileObject.
dir_obj = DirectoryObject(os.path.dirname(filepath))
file_obj = FileObject(filepath, dir_obj, dir_obj, 0)
# collect @file_obj attributes.
fdict = {}
for att in file_obj.__dict__:
if att[0] == "_":
continue
try:
val = getattr(file_obj, att)()
except TypeError:
val = getattr(file_obj, att)
fdict[att] = str(val)
# convert @fdict to JSON.
js = json.dumps(fdict, indent=2)
print(js)
if __name__ == "__main__":
plac.call(main) | 27.744186 | 84 | 0.642079 | 1,267 | 0.531014 | 0 | 0 | 0 | 0 | 0 | 0 | 712 | 0.298407 |
ded98a6b09e99064104171c0327f9f5f8f68c1fa | 244 | py | Python | tests/test_helpers.py | c137digital/unv_web | 52bea090c630b4e2a393c70907d35c9558d259fa | [
"MIT"
]
| null | null | null | tests/test_helpers.py | c137digital/unv_web | 52bea090c630b4e2a393c70907d35c9558d259fa | [
"MIT"
]
| null | null | null | tests/test_helpers.py | c137digital/unv_web | 52bea090c630b4e2a393c70907d35c9558d259fa | [
"MIT"
]
| null | null | null | from unv.web.helpers import url_with_domain, url_for_static
def test_url_with_domain():
assert url_with_domain('/path') == 'https://app.local/path'
def test_simple_static_url():
assert url_for_static('asd.txt') == '/static/asd.txt'
| 24.4 | 63 | 0.737705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.233607 |
deda4206dc73f8dbe4b33d7d756e79510962b4d8 | 10,829 | py | Python | game.py | IliketoTranslate/Pickaxe-clicker | e74ebd66842bd47c4ed1c4460e9f45e30a2ad1d7 | [
"MIT"
]
| null | null | null | game.py | IliketoTranslate/Pickaxe-clicker | e74ebd66842bd47c4ed1c4460e9f45e30a2ad1d7 | [
"MIT"
]
| null | null | null | game.py | IliketoTranslate/Pickaxe-clicker | e74ebd66842bd47c4ed1c4460e9f45e30a2ad1d7 | [
"MIT"
]
| null | null | null | import pygame
icon = pygame.image.load("diamond_pickaxe.png")
screen_weight = 1750
screen_height = 980
pygame.init()
window = pygame.display.set_mode((screen_weight, screen_height))
pygame.display.set_caption('Pickaxe clicker')
pygame.display.set_icon(icon)
# zmienne
wytrzymałość_kilofa = 50
max_wytrzymałość_kilofa = 50
dodaj2 = 1
record = 0
game_version = "0.2.2"
last_update = "28.01.2022"
x_for_kilof = 400
y_for_kilof = 400
x_for_button1 = 1030
y_for_button1 = 80
x_for_button2 = 1030
y_for_button2 = 800
boost = 1
doswiadczenie = 0
dodaj = 1
max_dodaj = 1
kilof_upgrade = 100
choosed_kilof = 1
# obiekty
kilof = pygame.image.load("Drewniany_kilof.png")
kilof2 = pygame.image.load("Kamienny_kilof.png")
kilof3 = pygame.image.load("Zelazny_kilof.png")
kilof4 = pygame.image.load("Zloty_kilof.png")
kilof5 = pygame.image.load("Diamentowy_kilof.png")
button_upgrade = pygame.image.load("Button_upgrade.png")
button_upgrade_clicked = pygame.image.load("Button_upgrade_clicked.png")
button_upgrade2 = pygame.image.load("Button_upgrade2.png")
button_upgrade2_clicked = pygame.image.load("Button_upgrade2_clicked.png")
button_restart = pygame.image.load("Button_restart.png")
tlo = pygame.image.load("tlo.png")
tlo = pygame.transform.scale(tlo, (screen_weight, screen_height)) # skalowanie
# hitboxy
kilof_hitbox = pygame.rect.Rect(x_for_kilof, y_for_kilof, 160, 160) # tworzy hitbox do kilofa
button_upgrade_hitbox = pygame.rect.Rect(x_for_button1, y_for_button1, 650, 100) # tworzy hitbox do przycisku
button_upgrade2_hitbox = pygame.rect.Rect(x_for_button2, y_for_button2, 650, 100)
# funkcje
def draw_object(object, x, y) :
window.blit(object, (x, y)) # rysowanie objektu
def draw_hitbox(object) :
pygame.draw.rect(window, (93, 32, 32), object)
def zdarzenia_z_myszką() :
wytrzymałość_kilofa = 50
max_wytrzymałość_kilofa = 50
dodaj2 = 1
record = 0
game_version = "0.2.2"
last_update = "28.01.2022"
x_for_kilof = 400
y_for_kilof = 400
x_for_button1 = 1030
y_for_button1 = 80
x_for_button2 = 1030
y_for_button2 = 800
boost = 1
doswiadczenie = 0
dodaj = 1
max_dodaj = 1
kilof_upgrade = 100
choosed_kilof = 1
kilof_upgrade2 = kilof_upgrade - 1
if wytrzymałość_kilofa == 0 :
dodaj = 0
dodaj2 = 0
else :
dodaj2 = 1
dodaj = max_dodaj
if choosed_kilof > 0 and choosed_kilof < 5 :
if button_upgrade2_hitbox.collidepoint(pygame.mouse.get_pos()) and doswiadczenie > kilof_upgrade2 : # jeżeli mysz dotyka hitboxa
if pygame.mouse.get_pressed()[0]: # jeżeli naciśnieto lewy przycisk myszy
doswiadczenie = doswiadczenie - kilof_upgrade
if wytrzymałość_kilofa == 0 :
choosed_kilof = 1
kilof_upgrade = 100
dodaj = 0
dodaj2 = 0
wytrzymałość_kilofa = max_wytrzymałość_kilofa
else :
dodaj2 = 1
choosed_kilof += 1
max_wytrzymałość_kilofa = max_wytrzymałość_kilofa * 2
kilof_upgrade = kilof_upgrade * 2
wytrzymałość_kilofa = max_wytrzymałość_kilofa
pygame.time.wait(50)
else :
max_wytrzymałość_kilofa = 800
kilof_upgrade = 10000000000
if button_upgrade2_hitbox.collidepoint(pygame.mouse.get_pos()) and doswiadczenie > kilof_upgrade2 : # jeżeli mysz dotyka hitboxa
if pygame.mouse.get_pressed()[0]: # jeżeli naciśnieto lewy przycisk myszy
wytrzymałość_kilofa = max_wytrzymałość_kilofa
pygame.time.wait(50)
if kilof_hitbox.collidepoint(pygame.mouse.get_pos()):
if pygame.mouse.get_pressed()[0]:
pygame.time.wait(100)
doswiadczenie += dodaj
wytrzymałość_kilofa = wytrzymałość_kilofa - dodaj2
boost2 = boost - 1
if button_upgrade_hitbox.collidepoint(pygame.mouse.get_pos()) and doswiadczenie > boost2:
if pygame.mouse.get_pressed()[0]:
max_dodaj += choosed_kilof
doswiadczenie = doswiadczenie - boost
boost = boost * 2
pygame.time.wait(100)
if button_upgrade2_hitbox.collidepoint(pygame.mouse.get_pos()):
draw_object(button_upgrade2_clicked, x_for_button2, y_for_button2) # rysowanie przycisku
draw_object(text_kilof, 1040, 840) # rysowanie tekstu 2
else :
draw_object(button_upgrade2, x_for_button2, y_for_button2) # rysowanie przycisku
draw_object(text_kilof, 1040, 840) # rysowanie tekstu 2
if button_upgrade_hitbox.collidepoint(pygame.mouse.get_pos()):
draw_object(button_upgrade_clicked, x_for_button1, y_for_button1) # rysowanie przycisku
draw_object(text_ulepszenie, 1040, 110) # rysowanie tekstu 2
else :
draw_object(button_upgrade, x_for_button1, y_for_button1) # rysowanie przycisku
draw_object(text_ulepszenie, 1040, 110) # rysowanie tekstu 2
run = True
while run:
pygame.time.Clock().tick(100) # maksymalnie 100 fps
for event in pygame.event.get():
if event.type == pygame.QUIT: # jeśli gracz zamknie okienko
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE] :
run = False
# napisy
kilof_upgrade2 = kilof_upgrade - 1
text_wersja = pygame.font.Font.render(pygame.font.SysFont("Freemono", 50), f"Version : {game_version} | Last update : {last_update}", True, (255, 200, 100)) # generowanie tekstu
text_doswiadczenie = pygame.font.Font.render(pygame.font.SysFont("Dyuthi", 72), f"Doswiadczenie : {doswiadczenie}", True, (100, 100, 100)) # generowanie tekstu
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Kup kilof | Koszt : {kilof_upgrade}", True, (255, 255, 255)) # generowanie tekstu
text_WIP = pygame.font.Font.render(pygame.font.SysFont("Waree", 25), f"W I P (WORK IN PROGRESS)", True, (255, 255, 255)) # generowanie tekstu 2
text_wytrzymałość_kilofa = pygame.font.Font.render(pygame.font.SysFont("Dyuthi", 50), f"Wytrzymalosc kilofa : {wytrzymałość_kilofa}", True, (255, 255, 255)) # generowanie tekstu 2
text_record = pygame.font.Font.render(pygame.font.SysFont("Liberation Serif", 50), f"Record : {record}", True, (150, 150, 150))
if choosed_kilof > 0 and choosed_kilof < 5 :
if doswiadczenie > kilof_upgrade2 :
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Kup kilof | Koszt : {kilof_upgrade}, Dostepne", True, (255, 255, 255)) # generowanie tekstu 2
else :
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Kup kilof | Koszt : {kilof_upgrade}, Niedostepne", True, (255, 255, 255)) # generowanie tekstu 2
elif choosed_kilof == 5 :
text_kilof = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Nie ma wiecej dostepnych kilofow", True, (255, 255, 255)) # generowanie tekstu 2
boost2 = boost - 1
if doswiadczenie > boost2 :
text_ulepszenie = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Ulepszenie kilofa | Koszt : {boost}, Dostepne", True, (255, 255, 255)) # generowanie tekstu
else :
text_ulepszenie = pygame.font.Font.render(pygame.font.SysFont("Sawasdee", 25), f"Ulepszenie kilofa | Koszt : {boost}, Niedostepne", True, (255, 255, 255)) # generowanie tekstu
window.blit(tlo, (0, 0)) # rysowanie tła
# rysowanie hitboxów
draw_hitbox(kilof_hitbox) # rysowanie hitboxu do kilofa
draw_hitbox(button_upgrade_hitbox) # rysowanie hitboxu do przycisku upgrade
draw_hitbox(button_upgrade2_hitbox) # rysowanie hitboxu do przycisku upgrade2
# rysowanie obiektów
if choosed_kilof == 1 : draw_object(kilof, x_for_kilof, y_for_kilof) # rysowanie kilofu
elif choosed_kilof == 2 : draw_object(kilof2, x_for_kilof, y_for_kilof)
elif choosed_kilof == 3 : draw_object(kilof3, x_for_kilof, y_for_kilof)
elif choosed_kilof == 4 : draw_object(kilof4, x_for_kilof, y_for_kilof)
elif choosed_kilof == 5 or choosed_kilof > 5 : draw_object(kilof5, x_for_kilof, y_for_kilof)
draw_object(button_upgrade, x_for_button1, y_for_button1) # rysowanie przycisku
draw_object(button_upgrade2, x_for_button2, y_for_button2) # rysowanie przycisku 2
draw_object(button_restart, 0, 0)
draw_object(text_doswiadczenie, 224, 100) # rysowanie tekstu
draw_object(text_ulepszenie, 1040, 110) # rysowanie tekstu 2
draw_object(text_wersja, 10, 5) # rysowanie tekstu 3
draw_object(text_kilof, 1040, 840)
draw_object(text_WIP, 1170, 750)
draw_object(text_wytrzymałość_kilofa, 250, 300)
draw_object(text_record, 1280, 0)
# sprawdzanie zdarzeń z myszką
zdarzenia_z_myszką()
# sprawdzanie
if doswiadczenie > record :
record = doswiadczenie
#if x_for_button1 > 80 :
#if x_for_button2 > 800 :
# wydrukuj
pygame.display.update() | 49.447489 | 296 | 0.576138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,020 | 0.18527 |
dedba85b4c2428f8778fd3f7f0d4d19fee14a759 | 4,383 | py | Python | tests/test_predictor.py | WeijieChen2017/pytorch-3dunet | 15c782481cb7bc3e2083a80bcc8b114cc8697c20 | [
"MIT"
]
| 1 | 2021-08-04T04:03:37.000Z | 2021-08-04T04:03:37.000Z | tests/test_predictor.py | LalithShiyam/pytorch-3dunet | f6b6c13cb0bb6194e95976b0245b76aaa9e9a496 | [
"MIT"
]
| null | null | null | tests/test_predictor.py | LalithShiyam/pytorch-3dunet | f6b6c13cb0bb6194e95976b0245b76aaa9e9a496 | [
"MIT"
]
| 1 | 2022-03-14T04:43:24.000Z | 2022-03-14T04:43:24.000Z | import os
from tempfile import NamedTemporaryFile
import h5py
import numpy as np
import torch
from skimage.metrics import adapted_rand_error
from torch.utils.data import DataLoader
from pytorch3dunet.datasets.hdf5 import StandardHDF5Dataset
from pytorch3dunet.datasets.utils import prediction_collate, get_test_loaders
from pytorch3dunet.predict import _get_output_file, _get_predictor
from pytorch3dunet.unet3d.model import get_model
from pytorch3dunet.unet3d.predictor import EmbeddingsPredictor
from pytorch3dunet.unet3d.utils import remove_halo
class FakePredictor(EmbeddingsPredictor):
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, **kwargs):
super().__init__(model, loader, output_file, config, clustering, iou_threshold=iou_threshold, **kwargs)
def _embeddings_to_segmentation(self, embeddings):
return embeddings
class FakeModel:
def __call__(self, input):
return input
def eval(self):
pass
class TestPredictor:
def test_stanard_predictor(self, tmpdir, test_config):
# Add output dir
test_config['loaders']['output_dir'] = tmpdir
# create random dataset
tmp = NamedTemporaryFile(delete=False)
with h5py.File(tmp.name, 'w') as f:
shape = (32, 64, 64)
f.create_dataset('raw', data=np.random.rand(*shape))
# Add input file
test_config['loaders']['test']['file_paths'] = [tmp.name]
# Create the model with random weights
model = get_model(test_config)
# Create device and update config
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
test_config['device'] = device
model = model.to(device)
for test_loader in get_test_loaders(test_config):
output_file = _get_output_file(dataset=test_loader.dataset, output_dir=tmpdir)
predictor = _get_predictor(model, test_loader, output_file, test_config)
# run the model prediction on the entire dataset and save to the 'output_file' H5
predictor.predict()
def test_embeddings_predictor(self, tmpdir):
config = {
'model': {'output_heads': 1},
'device': torch.device('cpu')
}
slice_builder_config = {
'name': 'SliceBuilder',
'patch_shape': (64, 200, 200),
'stride_shape': (40, 150, 150)
}
transformer_config = {
'raw': [
{'name': 'ToTensor', 'expand_dims': False, 'dtype': 'long'}
]
}
gt_file = 'resources/sample_ovule.h5'
output_file = os.path.join(tmpdir, 'output_segmentation.h5')
dataset = StandardHDF5Dataset(gt_file, phase='test',
slice_builder_config=slice_builder_config,
transformer_config=transformer_config,
mirror_padding=None,
raw_internal_path='label')
loader = DataLoader(dataset, batch_size=1, num_workers=1, shuffle=False, collate_fn=prediction_collate)
predictor = FakePredictor(FakeModel(), loader, output_file, config, clustering='meanshift', bandwidth=0.5)
predictor.predict()
with h5py.File(gt_file, 'r') as f:
with h5py.File(output_file, 'r') as g:
gt = f['label'][...]
segm = g['segmentation/meanshift'][...]
arand_error = adapted_rand_error(gt, segm)[0]
assert arand_error < 0.1
def test_remove_halo(self):
patch_halo = (4, 4, 4)
shape = (128, 128, 128)
input = np.random.randint(0, 10, size=(1, 16, 16, 16))
index = (slice(0, 1), slice(12, 28), slice(16, 32), slice(16, 32))
u_patch, u_index = remove_halo(input, index, shape, patch_halo)
assert np.array_equal(input[:, 4:12, 4:12, 4:12], u_patch)
assert u_index == (slice(0, 1), slice(16, 24), slice(20, 28), slice(20, 28))
index = (slice(0, 1), slice(112, 128), slice(112, 128), slice(112, 128))
u_patch, u_index = remove_halo(input, index, shape, patch_halo)
assert np.array_equal(input[:, 4:16, 4:16, 4:16], u_patch)
assert u_index == (slice(0, 1), slice(116, 128), slice(116, 128), slice(116, 128))
| 35.346774 | 114 | 0.62423 | 3,823 | 0.872234 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.119553 |
dedbd6180bc5f6b44a69dd4d23b7983f144a3239 | 2,560 | py | Python | catalog/views.py | DigimundoTesca/Tv-Mundo | 09904759d1f4f9bf2d5c7c31b97af82c3c963bfd | [
"MIT"
]
| null | null | null | catalog/views.py | DigimundoTesca/Tv-Mundo | 09904759d1f4f9bf2d5c7c31b97af82c3c963bfd | [
"MIT"
]
| 6 | 2017-09-19T07:26:14.000Z | 2017-09-27T10:06:49.000Z | catalog/views.py | DigimundoTesca/Tv-Mundo | 09904759d1f4f9bf2d5c7c31b97af82c3c963bfd | [
"MIT"
]
| null | null | null | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from catalog.models import Videos, Category, Docs, Subscriber
from django.contrib.auth.decorators import login_required
@login_required
def home(request):
template = 'home.html'
category = Category.objects.all()
videos = Videos.objects.all()
grade = Subscriber.objects.get(user=request.user)
context = {
'grade': grade,
'videos': videos,
'title': "Tv Mundo",
'category' : category,
}
return render(request, template, context)
@login_required
def block(request, name):
template = 'block.html'
cat = Category.objects.all()
selCat = cat.get(title=name)
title = name
context = {
'title': title,
'category': cat,
'selCat': selCat,
}
return render(request, template, context)
@login_required
def catalog(request):
template = 'catalog.html'
category = Category.objects.all()
videos = Videos.objects.all()
docs = Docs.objects.all()
title = 'Catalogo'
context = {
'docs' : docs,
'videos' : videos,
'category' : category,
'title': title,
}
return render(request, template, context)
@login_required
def videos(request, name, pk=0):
template = 'videos.html'
videos = Videos.objects.filter(category__title=name).filter(status=True)
category = Category.objects.all()
docs = Docs.objects.filter(category__title=name)
title = name
if pk == '0':
s_vid = videos[:1].get()
else:
s_vid = videos.filter(pk=pk)
s_vid = s_vid[:1].get()
context = {
's_vid': s_vid,
'videos': videos,
'docs':docs,
'category': category,
'title': title,
}
return render(request, template, context)
@login_required
def images(request, name, pk=None):
template = 'images.html'
docs = Docs.objects.filter(category__title=name).filter(kind="IMG")
category = Category.objects.all()
title = name
context = {
'category': category,
'docs': docs,
'title': title,
}
return render(request, template, context)
@login_required
def docs(request, name, pk=None):
template = 'documents.html'
docs = Docs.objects.all().filter(category__title=name)
category = Category.objects.all()
title = name
context = {
'category': category,
'docs': docs,
'title': title,
}
return render(request, template, context)
| 24.380952 | 76 | 0.622656 | 0 | 0 | 0 | 0 | 2,311 | 0.902734 | 0 | 0 | 279 | 0.108984 |
dedc38f09d494832d839db3e999852609e6a45ac | 519 | py | Python | python/database/get_twitter_predict_by_order.py | visdata/DeepClue | 8d80ecd783919c97ba225db67664a0dfe5f3fb37 | [
"Apache-2.0"
]
| 1 | 2020-12-06T08:04:32.000Z | 2020-12-06T08:04:32.000Z | python/database/get_twitter_predict_by_order.py | visdata/DeepClue | 8d80ecd783919c97ba225db67664a0dfe5f3fb37 | [
"Apache-2.0"
]
| null | null | null | python/database/get_twitter_predict_by_order.py | visdata/DeepClue | 8d80ecd783919c97ba225db67664a0dfe5f3fb37 | [
"Apache-2.0"
]
| null | null | null | import MySQLdb
db = MySQLdb.connect('localhost', 'root', 'vis_2014', 'FinanceVis')
cursor = db.cursor()
sql = 'select predict_news_word from all_twitter where symbol=%s order by predict_news_word+0 desc'
cursor.execute(sql, ('AAPL', ))
results = cursor.fetchall()
file_twitter_predict = open('twitter_predict_AAPL.csv', 'wb')
for row in results:
predict = row[0]
if row[0] is None:
predict = 'NULL'
file_twitter_predict.write(predict+'\n')
file_twitter_predict.close()
cursor.close()
db.close() | 25.95 | 99 | 0.714836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.342967 |
dedd33f5b7d0869e4ad454abba7866e56edaacbb | 301 | py | Python | examples/matplotlib/mpl_plot_dot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
]
| 1,159 | 2018-04-03T08:50:54.000Z | 2022-03-31T18:03:52.000Z | examples/matplotlib/mpl_plot_dot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
]
| 1,656 | 2018-03-23T14:15:05.000Z | 2022-03-31T14:00:28.000Z | examples/matplotlib/mpl_plot_dot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
]
| 316 | 2018-04-03T14:25:52.000Z | 2022-03-25T10:41:29.000Z | """
Dot Plot
=========
_thumb: .2, .8
_example_title: Plot distribution.
"""
import matplotlib.pyplot as plt
import numpy as np
import arviz as az
az.style.use("arviz-darkgrid")
data = np.random.normal(0, 1, 1000)
az.plot_dot(data, dotcolor="C1", point_interval=True, figsize=(12, 6))
plt.show()
| 15.842105 | 70 | 0.69103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.322259 |
dedeaccf1b8d4bb294ba8b9e2278d86179d43f0e | 405 | py | Python | kattis/solutions/alphabetspam.py | yifeng-pan/competitive_programming | c59edb1e08aa2db2158a814e3d34f4302658d98e | [
"Unlicense"
]
| null | null | null | kattis/solutions/alphabetspam.py | yifeng-pan/competitive_programming | c59edb1e08aa2db2158a814e3d34f4302658d98e | [
"Unlicense"
]
| null | null | null | kattis/solutions/alphabetspam.py | yifeng-pan/competitive_programming | c59edb1e08aa2db2158a814e3d34f4302658d98e | [
"Unlicense"
]
| null | null | null | # https://open.kattis.com/problems/alphabetspam
import sys
import math
xs = input()
white = 0
lower = 0
higher =0
other = 0
for i in xs:
if i == '_':
white += 1
elif ('a' <= i) & (i <= 'z'):
lower += 1
elif ('A' <= i) & (i <= "Z"):
higher += 1
else:
other += 1
print(white / len(xs))
print(lower / len(xs))
print(higher /len(xs))
print(other / len(xs)) | 15.576923 | 47 | 0.511111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.153086 |
dee0061d48e6e49cac68657f95ed5ac4927eaa8e | 3,813 | py | Python | src/chain_orientation_three_vars_symbolic.py | Scriddie/Varsortability | 357213d5ceefb6362060c56e12c18b41dc689306 | [
"MIT"
]
| 4 | 2021-12-08T07:54:00.000Z | 2022-03-09T07:55:21.000Z | src/chain_orientation_three_vars_symbolic.py | Scriddie/Varsortability | 357213d5ceefb6362060c56e12c18b41dc689306 | [
"MIT"
]
| null | null | null | src/chain_orientation_three_vars_symbolic.py | Scriddie/Varsortability | 357213d5ceefb6362060c56e12c18b41dc689306 | [
"MIT"
]
| 1 | 2022-03-09T07:55:43.000Z | 2022-03-09T07:55:43.000Z | import numpy as np
from sympy import simplify, sqrt, symbols
from sympy.stats import Normal, covariance as cov, variance as var
def regcoeffs(x, y, z):
covxy = cov(x, y)
covyz = cov(y, z)
varx = var(x)
vary = var(y)
varz = var(z)
# forward
f1 = simplify(covxy / varx)
f2 = simplify(covyz / vary)
# backward
b1 = simplify(covyz / varz)
b2 = simplify(covxy / vary)
return f1, f2, b1, b2
if __name__ == "__main__":
ab, bc, a, b, c = symbols([
"beta_{A_to_B}",
"beta_{B_to_C}",
"sigma_A",
"sigma_B",
"sigma_C"])
Na = Normal('Na', 0, 1)
Nb = Normal('Nb', 0, 1)
Nc = Normal('Nc', 0, 1)
# SEM
# A -> B -> C
# raw
A = a * Na
B = ab * A + b * Nb
C = bc * B + c * Nc
# standardized
As = A / sqrt(var(A))
Bs = B / sqrt(var(B))
Cs = C / sqrt(var(C))
# scale-harmonized
Am = a * Na
Bm = (ab / (ab**2 + 1)**(1/2)) * Am + b * Nb
Cm = (bc / (bc**2 + 1)**(1/2)) * Bm + c * Nc
# forward/backward coefficients in raw setting
f1, f2, b1, b2 = regcoeffs(A, B, C)
# forward/backward coefficients in standardized setting
f1s, f2s, b1s, b2s = regcoeffs(As, Bs, Cs)
# forward/backward coefficients in scale-harmonized setting
f1m, f2m, b1m, b2m = regcoeffs(Am, Bm, Cm)
for weight_range in [(0.5, 2),
(0.5, .9),
(.1, .9)]:
raw = {
'f1<f2,b1>b2': 0,
'f1>f2,b1<b2': 0,
'other': 0
}
std = {
'f1<f2,b1>b2': 0,
'f1>f2,b1<b2': 0,
'other': 0
}
moj = {
'f1<f2,b1>b2': 0,
'f1>f2,b1<b2': 0,
'other': 0
}
for _ in range(100000):
# draw model parameters
a_to_b, b_to_c = np.random.uniform(*weight_range, size=2)
sA, sB, sC = np.random.uniform(0.5, 2, size=3)
a_to_b *= np.random.choice([-1, 1])
b_to_c *= np.random.choice([-1, 1])
subs = {
ab: a_to_b,
bc: b_to_c,
a: sA,
b: sB,
c: sC,
}
# raw
if (abs(f1.subs(subs)) < abs(f2.subs(subs))
and abs(b1.subs(subs)) > abs(b2.subs(subs))):
raw['f1<f2,b1>b2'] += 1
elif (abs(f1.subs(subs)) > abs(f2.subs(subs))
and abs(b1.subs(subs)) < abs(b2.subs(subs))):
raw['f1>f2,b1<b2'] += 1
else:
raw['other'] += 1
# standardized
if (abs(f1s.subs(subs)) < abs(f2s.subs(subs))
and abs(b1s.subs(subs)) > abs(b2s.subs(subs))):
std['f1<f2,b1>b2'] += 1
elif (abs(f1s.subs(subs)) > abs(f2s.subs(subs))
and abs(b1s.subs(subs)) < abs(b2s.subs(subs))):
std['f1>f2,b1<b2'] += 1
else:
std['other'] += 1
# scale-harmonized
if (abs(f1m.subs(subs)) < abs(f2m.subs(subs))
and abs(b1m.subs(subs)) > abs(b2m.subs(subs))):
moj['f1<f2,b1>b2'] += 1
elif (abs(f1m.subs(subs)) > abs(f2m.subs(subs))
and abs(b1m.subs(subs)) < abs(b2m.subs(subs))):
moj['f1>f2,b1<b2'] += 1
else:
moj['other'] += 1
print('weight_range', weight_range)
raw['correct'] = raw['f1<f2,b1>b2'] + raw['other'] / 2
print('raw\t\t', raw)
std['correct'] = std['f1<f2,b1>b2'] + std['other'] / 2
print('standardized\t', std)
moj['correct'] = moj['f1<f2,b1>b2'] + moj['other'] / 2
print('Mooij-scaled\t', moj)
print()
| 28.455224 | 69 | 0.441385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.186992 |
dee00922a67f6dff4732cf526028648896d0fc92 | 2,290 | py | Python | Phototweet.py | sbamueller/RasperryPi_BildFeinstaub | 3666db384ead64893b3c548065aa31cef6c126af | [
"Apache-2.0"
]
| null | null | null | Phototweet.py | sbamueller/RasperryPi_BildFeinstaub | 3666db384ead64893b3c548065aa31cef6c126af | [
"Apache-2.0"
]
| null | null | null | Phototweet.py | sbamueller/RasperryPi_BildFeinstaub | 3666db384ead64893b3c548065aa31cef6c126af | [
"Apache-2.0"
]
| null | null | null |
#!/usr/bin/env python2.7
# coding=<UTF-8>
# tweetpic.py take a photo with the Pi camera and tweet it
# by Alex Eames http://raspi.tv/?p=5918
import tweepy
from subprocess import call
from datetime import datetime
import requests
import json
i = datetime.now() #take time and date for filename
now = i.strftime('%Y%m%d-%H%M%S')
photo_name = now + '.jpg'
cmd = 'raspistill -t 500 -w 1024 -h 768 -o /home/pi/Pictures' + photo_name
call ([cmd], shell=True) #shoot the photo
def pick_values(sensor):
# Sensordaten fr SDS011 und DHT11 abfragen
# dazu die api von luftdaten.info nutzen
# Peter Furle @Alpensichtung Hotzenwald 04 2017
r = requests.get(sensor)
json_string = r.text
parsed_json = json.loads(json_string)
# pretty print um uberhaupt zu verstehen was da passiert
# print json.dumps(parsed_json, sort_keys=True, indent=4, separators=(',','$
l = len(parsed_json)-1
a = len(parsed_json[l]['sensordatavalues'])
if a == 1:
result=(parsed_json[l]['sensordatavalues'][0]['value_type'])+": "+(pars$
if a == 2:
result=(parsed_json[l]['sensordatavalues'][0]['value_type'])+": "+(pars$
result=result+" "+(parsed_json[l]['sensordatavalues'][1]['value_type'])$
return(result)
# Freiburger Sensor von sbamueller
url = 'http://api.luftdaten.info/static/v1/sensor/534/'
tweet = pick_values(url)
url = 'http://api.luftdaten.info/static/v1/sensor/533/'
tweet = tweet + " " + pick_values(url)
# Texte 140 Zeichen Tweets
tweet = tweet.replace('temperature: ','| Temp C:')
tweet = tweet.replace('P1:','| PM10:')
tweet = tweet.replace('P2:','PM2.5:')
#print(tweet)
# Consumer keys and access tokens, used for OAuth
CONSUMER_KEY = 'ihrKey'
CONSUMER_SECRET = 'ihrKey'
ACCESS_KEY = 'ihrKey'
ACCESS_SECRET = 'ihrKey'
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(CONSUMER_KEY , CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY , ACCESS_SECRET)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
# Send the tweet with photo
photo_path = '/home/pi/Pictures' + photo_name
status = 'Blick auf Freiburg mit Feinstaubwerten, Temp & Luftfeuchte ' + i.strf$
status = status + tweet
api.update_with_media(photo_path, status=status)
| 31.369863 | 80 | 0.691266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,174 | 0.512664 |
dee0dfeab71167aee2a17e14945c71c0e31e66be | 1,762 | py | Python | jaffalearn/logging.py | tqbl/jaffalearn | a5bb79fcb3e84fd6e17b6356429e5885386a5a58 | [
"0BSD"
]
| null | null | null | jaffalearn/logging.py | tqbl/jaffalearn | a5bb79fcb3e84fd6e17b6356429e5885386a5a58 | [
"0BSD"
]
| null | null | null | jaffalearn/logging.py | tqbl/jaffalearn | a5bb79fcb3e84fd6e17b6356429e5885386a5a58 | [
"0BSD"
]
| null | null | null | from pathlib import Path
import pandas as pd
from torch.utils.tensorboard import SummaryWriter
class Logger:
def __init__(self, system, log_dir, overwrite=False):
self.log_path = Path(log_dir) / 'history.csv'
self.system = system
self.tb_writer = None
# Remove any previous TensorBoard log files
if overwrite:
for path in self.log_path.parent.glob('*tfevents*'):
print(f'Deleting {path}')
path.unlink()
# Read from existing log file if applicable
if overwrite or not self.log_path.exists():
self.history = pd.DataFrame()
self.history.index.name = 'epoch'
else:
self.history = pd.read_csv(self.log_path, index_col=0)
def __call__(self):
self.step()
def step(self):
# Print results to stdout
results = self.system.summarize_results()
print(', '.join(['{}: {:.4f}'.format(k, v)
for k, v in results.items()]))
# Write results to TensorBoard log file
epoch = len(self.history)
if self.tb_writer is None:
self.tb_writer = SummaryWriter(self.log_path.parent)
for key, value in results.items():
self.tb_writer.add_scalar(key, value, epoch)
self.tb_writer.file_writer.flush()
# Write results to CSV file
self.history = self.history.append(results, ignore_index=True)
self.history.to_csv(self.log_path)
self.system.clear_results()
def truncate(self, epoch):
self.history = self.history.iloc[:epoch]
self.history.to_csv(self.log_path)
def close(self):
if self.tb_writer is not None:
self.tb_writer.close()
| 30.37931 | 70 | 0.605562 | 1,662 | 0.943246 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.137911 |
dee0ea830b4e14533eb75ccbf58b75a95766df8d | 3,369 | py | Python | python/soma_workflow/constants.py | denisri/soma-workflow | bc6f2f50d34437e86e850cb0d05ff26b041d560d | [
"CECILL-B"
]
| null | null | null | python/soma_workflow/constants.py | denisri/soma-workflow | bc6f2f50d34437e86e850cb0d05ff26b041d560d | [
"CECILL-B"
]
| 44 | 2018-10-30T16:57:10.000Z | 2022-03-15T10:54:57.000Z | python/soma_workflow/constants.py | populse/soma-workflow | e6d3e3c33ad41107ee3c959adc4832e6edd047f4 | [
"CECILL-B"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
author: Soizic Laguitton
organization: I2BM, Neurospin, Gif-sur-Yvette, France
organization: CATI, France
organization: IFR 49
License: `CeCILL version 2 <http://www.cecill.info/licences/Licence_CeCILL_V2-en.html>`_
'''
#
# Soma-workflow constants #
#
'''
Job status:
'''
NOT_SUBMITTED = "not_submitted"
UNDETERMINED = "undetermined"
QUEUED_ACTIVE = "queued_active"
SYSTEM_ON_HOLD = "system_on_hold"
USER_ON_HOLD = "user_on_hold"
USER_SYSTEM_ON_HOLD = "user_system_on_hold"
RUNNING = "running"
SYSTEM_SUSPENDED = "system_suspended"
USER_SUSPENDED = "user_suspended"
USER_SYSTEM_SUSPENDED = "user_system_suspended"
DONE = "done"
FAILED = "failed"
DELETE_PENDING = "delete_pending"
KILL_PENDING = "kill_pending"
SUBMISSION_PENDING = "submission_pending"
WARNING = "warning"
JOB_STATUS = [NOT_SUBMITTED,
UNDETERMINED,
QUEUED_ACTIVE,
SYSTEM_ON_HOLD,
USER_ON_HOLD,
USER_SYSTEM_ON_HOLD,
RUNNING,
SYSTEM_SUSPENDED,
USER_SUSPENDED,
USER_SYSTEM_SUSPENDED,
DONE,
FAILED,
DELETE_PENDING,
KILL_PENDING,
SUBMISSION_PENDING,
WARNING]
'''
Exit job status:
'''
EXIT_UNDETERMINED = "exit_status_undetermined"
EXIT_ABORTED = "aborted"
EXIT_NOTRUN = "aborted_before_running"
FINISHED_REGULARLY = "finished_regularly"
FINISHED_TERM_SIG = "finished_signal"
FINISHED_UNCLEAR_CONDITIONS = "finished_unclear_condition"
USER_KILLED = "killed_by_user"
JOB_EXIT_STATUS = [EXIT_UNDETERMINED,
EXIT_ABORTED,
FINISHED_REGULARLY,
FINISHED_TERM_SIG,
FINISHED_UNCLEAR_CONDITIONS,
USER_KILLED,
EXIT_NOTRUN]
'''
File transfer status:
'''
FILES_DO_NOT_EXIST = "do not exist"
FILES_ON_CLIENT = "on client side"
FILES_ON_CR = "on computing resource side"
FILES_ON_CLIENT_AND_CR = "on both sides"
TRANSFERING_FROM_CLIENT_TO_CR = "transfering client->cr"
TRANSFERING_FROM_CR_TO_CLIENT = "transfering cr->client"
FILES_UNDER_EDITION = "under edition"
FILE_TRANSFER_STATUS = [FILES_DO_NOT_EXIST,
FILES_ON_CLIENT,
FILES_ON_CR,
FILES_ON_CLIENT_AND_CR,
TRANSFERING_FROM_CLIENT_TO_CR,
TRANSFERING_FROM_CR_TO_CLIENT,
FILES_UNDER_EDITION]
'''
Transfer type
'''
TR_FILE_C_TO_CR = "file transfer form client to cr"
TR_DIR_C_TO_CR = "dir transfer from client to cr"
TR_MFF_C_TO_CR = "multi file format from client to cr"
TR_FILE_CR_TO_C = "file transfer form cr to client"
TR_DIR_CR_TO_C = "dir transfer from cr to client"
TR_MFF_CR_TO_C = "multi file format from cr to client"
TRANSFER_TYPES = [TR_FILE_C_TO_CR,
TR_DIR_C_TO_CR,
TR_MFF_C_TO_CR,
TR_FILE_CR_TO_C,
TR_DIR_CR_TO_C,
TR_MFF_CR_TO_C]
'''
Workflow status:
'''
WORKFLOW_NOT_STARTED = "worklflow_not_started"
WORKFLOW_IN_PROGRESS = "workflow_in_progress"
WORKFLOW_DONE = "workflow_done"
WORKFLOW_STATUS = [WORKFLOW_NOT_STARTED,
WORKFLOW_IN_PROGRESS,
WORKFLOW_DONE,
DELETE_PENDING,
WARNING]
| 28.310924 | 88 | 0.655091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,168 | 0.34669 |
dee4241a76fbf19cf565aab66e0521ce2380cc65 | 250 | py | Python | test/tests/global_and_local.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
]
| 1 | 2015-11-06T03:39:51.000Z | 2015-11-06T03:39:51.000Z | test/tests/global_and_local.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
]
| null | null | null | test/tests/global_and_local.py | kevinxucs/pyston | bdb87c1706ac74a0d15d9bc2bae53798678a5f14 | [
"Apache-2.0"
]
| null | null | null | # expected: fail
# - this particular check isn't implemented yet
# I would have expected this to be valid, but cPython and pypy err out saying "name 'x' is local and global"
print "first"
x = 1
def f(x):
global x
print "calling"
f(2)
print x
| 16.666667 | 108 | 0.696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.748 |
dee46fc1a2825aedf140afa6a83cd03a303bce36 | 1,980 | py | Python | lab4_2/helpers/scanner.py | cinnamonbreakfast/flcd | f9168c1965976e9ae9477ee6b163a026f61acb1b | [
"MIT"
]
| null | null | null | lab4_2/helpers/scanner.py | cinnamonbreakfast/flcd | f9168c1965976e9ae9477ee6b163a026f61acb1b | [
"MIT"
]
| null | null | null | lab4_2/helpers/scanner.py | cinnamonbreakfast/flcd | f9168c1965976e9ae9477ee6b163a026f61acb1b | [
"MIT"
]
| null | null | null |
res_words = []
seps = []
ops = []
def load_dom():
with open('data/tokens', 'r') as f:
for i in range(7):
separator = f.readline().strip()
if separator == "_": # Special case [SPACE]
separator = " "
seps.append(separator)
for i in range(15):
ops.append(f.readline().strip())
for i in range(21):
res_words.append(f.readline().strip())
def getStringToken(line, index):
token = ''
quotes = 0
while index < len(line) and quotes < 2:
if line[index] == '\'':
quotes += 1
token += line[index]
index += 1
return token, index
def isPartOfOperator(char):
for op in ops:
if char in op:
return True
return False
def getOperatorToken(line, index):
token = ''
try:
num = int(line[index:])
token +=line
index += 1
return token, index
except:
pass
while index < len(line) and isPartOfOperator(line[index]):
token += line[index]
index += 1
return token, index
def tokenize(line):
token = ''
index = 0
tokens = []
while index < len(line):
if isPartOfOperator(line[index]):
if token:
tokens.append(token)
token, index = getOperatorToken(line, index)
tokens.append(token)
token = ''
elif line[index] == '\'':
if token:
tokens.append(token)
token, index = getStringToken(line, index)
tokens.append(token)
token = ''
elif line[index] in seps:
if token:
tokens.append(token)
token, index = line[index], index + 1
tokens.append(token)
token = ''
else:
token += line[index]
index += 1
if token:
tokens.append(token)
return tokens | 22.5 | 74 | 0.491414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.032323 |
dee4deb771683414d1b0181d259bc1acc86fbf9f | 1,101 | py | Python | fastspider/item/item.py | coco369/fastspider | 464ba47176c005ed97005a79c5c4eee0bf0740b6 | [
"MIT"
]
| 6 | 2021-08-09T01:35:44.000Z | 2022-02-15T08:14:29.000Z | fastspider/item/item.py | coco369/fastspider | 464ba47176c005ed97005a79c5c4eee0bf0740b6 | [
"MIT"
]
| null | null | null | fastspider/item/item.py | coco369/fastspider | 464ba47176c005ed97005a79c5c4eee0bf0740b6 | [
"MIT"
]
| 4 | 2021-08-13T06:41:13.000Z | 2021-12-07T15:53:56.000Z | # encoding=utf-8
"""
Auth: coco369
Email: [email protected]
CreateTime: 2021/07/30
Desc: fastspider核心代码, 实体Item
"""
class BaseItemMetaClass(type):
def __new__(cls, name, bases, attrs):
attrs.setdefault("__name__", None)
attrs.setdefault("__table_name__", None)
attrs.setdefault("__update_key__", None)
attrs.setdefault("__unique_key__", None)
return type.__new__(cls, name, bases, attrs)
class Item(metaclass=BaseItemMetaClass):
"""
定义继承的元类, 子类直接拥有元类中的属性。
"""
def __init__(self):
pass
def __setitem__(self, key, value):
self.__class__.__dict__[key] = value
def __getitem__(self, key):
return self.__class__.__dict__[key]
@property
def item_name(self):
return self.__class__.__name__
@property
def table_name(self):
return self.__table_name__
@table_name.setter
def table_name(self, name):
self.__table_name__ = name
@property
def to_dict(self):
item_property = {}
for key, values in self.__dict__.items():
if key not in ("__name__", "__table_name__", "__update_key__", "__unique_key__"):
item_property[key] = values
return item_property
| 20.018182 | 84 | 0.728429 | 1,018 | 0.882914 | 0 | 0 | 435 | 0.377277 | 0 | 0 | 316 | 0.274068 |
dee8b0a49fcef498a3468a8ea4df153befa037f5 | 26,370 | py | Python | src/third_party/wiredtiger/test/suite/run.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
]
| null | null | null | src/third_party/wiredtiger/test/suite/run.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
]
| null | null | null | src/third_party/wiredtiger/test/suite/run.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
# ignored_file
# [END_TAGS]
#
# run.py
# Command line test runner
#
from __future__ import print_function
import glob, json, os, random, re, sys
if sys.version_info[0] <= 2:
print('WiredTiger requires Python version 3.0 or above')
sys.exit(1)
# Set paths
suitedir = sys.path[0]
wt_disttop = os.path.dirname(os.path.dirname(suitedir))
wt_3rdpartydir = os.path.join(wt_disttop, 'test', '3rdparty')
# Check for a local build that contains the wt utility. First check if the
# supplied an explicit build directory ('WT_BUILDDIR'), then the current
# working directory, and finally in the disttop directory.
# This isn't ideal - if a user has multiple builds in a tree we
# could pick the wrong one. We also need to account for the fact that there
# may be an executable 'wt' file the build directory.
env_builddir = os.getenv('WT_BUILDDIR')
curdir = os.getcwd()
if env_builddir and os.path.isfile(os.path.join(env_builddir, 'wt')):
wt_builddir = env_builddir
elif os.path.isfile(os.path.join(curdir, 'wt')):
wt_builddir = curdir
elif os.path.isfile(os.path.join(curdir, 'wt.exe')):
wt_builddir = curdir
elif os.path.isfile(os.path.join(wt_disttop, 'wt')):
wt_builddir = wt_disttop
elif os.path.isfile(os.path.join(wt_disttop, 'wt.exe')):
wt_builddir = wt_disttop
else:
print('Unable to find useable WiredTiger build')
sys.exit(1)
# Cannot import wiredtiger and supporting utils until we set up paths
# We want our local tree in front of any installed versions of WiredTiger.
# Don't change sys.path[0], it's the dir containing the invoked python script.
sys.path.insert(1, os.path.join(wt_builddir, 'lang', 'python'))
# Append to a colon separated path in the environment
def append_env_path(name, value):
path = os.environ.get(name)
if path == None:
v = value
else:
v = path + ':' + value
os.environ[name] = v
# If we built with libtool, explicitly put its install directory in our library
# search path. This only affects library loading for subprocesses, like 'wt'.
libsdir = os.path.join(wt_builddir, '.libs')
if os.path.isdir(libsdir):
append_env_path('LD_LIBRARY_PATH', libsdir)
if sys.platform == "darwin":
append_env_path('DYLD_LIBRARY_PATH', libsdir)
# Add all 3rd party directories: some have code in subdirectories
for d in os.listdir(wt_3rdpartydir):
for subdir in ('lib', 'python', ''):
if os.path.exists(os.path.join(wt_3rdpartydir, d, subdir)):
sys.path.insert(1, os.path.join(wt_3rdpartydir, d, subdir))
break
# unittest will be imported later, near to when it is needed.
unittest = None
def usage():
print('Usage:\n\
$ cd build\n\
$ python ../test/suite/run.py [ options ] [ tests ]\n\
\n\
Options:\n\
--asan run with an ASAN enabled shared library\n\
-b K/N | --batch K/N run batch K of N, 0 <= K < N. The tests\n\
are split into N batches and the Kth is run.\n\
-C file | --configcreate file create a config file for controlling tests\n\
-c file | --config file use a config file for controlling tests\n\
-D dir | --dir dir use dir rather than WT_TEST.\n\
dir is removed/recreated as a first step.\n\
-d | --debug run with \'pdb\', the python debugger\n\
-n | --dry-run perform a dry-run, listing all scenarios to\n\
be run without executing any.\n\
-g | --gdb all subprocesses (like calls to wt) use gdb\n\
-h | --help show this message\n\
| --hook name[=arg] set up hooks from hook_<name>.py, with optional arg\n\
-j N | --parallel N run all tests in parallel using N processes\n\
-l | --long run the entire test suite\n\
| --noremove do not remove WT_TEST or -D target before run\n\
-p | --preserve preserve output files in WT_TEST/<testname>\n\
-r N | --random-sample N randomly sort scenarios to be run, then\n\
execute every Nth (2<=N<=1000) scenario.\n\
-s N | --scenario N use scenario N (N can be symbolic, number, or\n\
list of numbers and ranges in the form 1,3-5,7)\n\
-t | --timestamp name WT_TEST according to timestamp\n\
-v N | --verbose N set verboseness to N (0<=N<=3, default=1)\n\
-i | --ignore-stdout dont fail on unexpected stdout or stderr\n\
-R | --randomseed run with random seeds for generates random numbers\n\
-S | --seed run with two seeds that generates random numbers, \n\
format "seed1.seed2", seed1 or seed2 can\'t be zero\n\
-z | --zstd run the zstd tests\n\
\n\
Tests:\n\
may be a file name in test/suite: (e.g. test_base01.py)\n\
may be a subsuite name (e.g. \'base\' runs test_base*.py)\n\
\n\
When -C or -c are present, there may not be any tests named.\n\
When -s is present, there must be a test named.\n\
')
# Find an executable of the given name in the execution path.
def which(name):
path = os.getenv('PATH')
for pathdir in path.split(os.path.pathsep):
fname = os.path.join(pathdir, name)
if os.path.exists(fname) and os.access(fname, os.X_OK):
return fname
return None
# Follow a symbolic link, returning the target
def follow_symlinks(pathname):
return os.path.realpath(pathname)
# Find all instances of a filename under a directory
def find(topdir, filename):
results = []
for root, dirs, files in os.walk(topdir, followlinks=True):
if filename in files:
results.append(os.path.join(root, filename))
return results
# Show an environment variable if verbose enough.
def show_env(verbose, envvar):
if verbose >= 2:
print(envvar + "=" + os.getenv(envvar))
# capture the category (AKA 'subsuite') part of a test name,
# e.g. test_util03 -> util
reCatname = re.compile(r"test_([^0-9]+)[0-9]*")
# Look for a list of the form 0-9,11,15-17.
def parse_int_list(str):
# Use a dictionary as the result set to avoid repeated list scans.
# (Only the keys are used; the values are ignored.)
ret = {}
# Divide the input into ranges separated by commas.
for r in str.split(","):
# Split the range we got (if it is one).
bounds = r.split("-")
if len(bounds) == 1 and bounds[0].isdigit():
# It's a single number with no dash.
scenario = int(bounds[0])
ret[scenario] = True
continue
if len(bounds) == 2 and bounds[0].isdigit() and bounds[1].isdigit():
# It's two numbers separated by a dash.
for scenario in range(int(bounds[0]), int(bounds[1]) + 1):
ret[scenario] = True
continue
# It's not valid syntax; give up.
return None
return ret
def restrictScenario(testcases, restrict):
if restrict == '':
return testcases
else:
scenarios = parse_int_list(restrict)
if scenarios is not None:
return [t for t in testcases
if hasattr(t, 'scenario_number') and t.scenario_number in scenarios]
else:
return [t for t in testcases
if hasattr(t, 'scenario_name') and t.scenario_name == restrict]
def addScenarioTests(tests, loader, testname, scenario):
loaded = loader.loadTestsFromName(testname)
tests.addTests(restrictScenario(generate_scenarios(loaded), scenario))
def configRecord(cmap, tup):
"""
Records this tuple in the config. It is marked as None
(appearing as null in json), so it can be easily adjusted
in the output file.
"""
tuplen = len(tup)
pos = 0
for name in tup:
last = (pos == tuplen - 1)
pos += 1
if not name in cmap:
if last:
cmap[name] = {"run":None}
else:
cmap[name] = {"run":None, "sub":{}}
if not last:
cmap = cmap[name]["sub"]
def configGet(cmap, tup):
"""
Answers the question, should we do this test, given this config file?
Following the values of the tuple through the map,
returning the first non-null value. If all values are null,
return True (handles tests that may have been added after the
config was generated).
"""
for name in tup:
if not name in cmap:
return True
run = cmap[name]["run"] if "run" in cmap[name] else None
if run != None:
return run
cmap = cmap[name]["sub"] if "sub" in cmap[name] else {}
return True
def configApplyInner(suites, configmap, configwrite):
newsuite = unittest.TestSuite()
for s in suites:
if type(s) is unittest.TestSuite:
newsuite.addTest(configApplyInner(s, configmap, configwrite))
else:
modname = s.__module__
catname = re.sub(reCatname, r"\1", modname)
classname = s.__class__.__name__
methname = s._testMethodName
tup = (catname, modname, classname, methname)
add = True
if configwrite:
configRecord(configmap, tup)
else:
add = configGet(configmap, tup)
if add:
newsuite.addTest(s)
return newsuite
def configApply(suites, configfilename, configwrite):
configmap = None
if not configwrite:
with open(configfilename, 'r') as f:
line = f.readline()
while line != '\n' and line != '':
line = f.readline()
configmap = json.load(f)
else:
configmap = {}
newsuite = configApplyInner(suites, configmap, configwrite)
if configwrite:
with open(configfilename, 'w') as f:
f.write("""# Configuration file for wiredtiger test/suite/run.py,
# generated with '-C filename' and consumed with '-c filename'.
# This shows the hierarchy of tests, and can be used to rerun with
# a specific subset of tests. The value of "run" controls whether
# a test or subtests will be run:
#
# true turn on a test and all subtests (overriding values beneath)
# false turn on a test and all subtests (overriding values beneath)
# null do not effect subtests
#
# If a test does not appear, or is marked as '"run": null' all the way down,
# then the test is run.
#
# The remainder of the file is in JSON format.
# !!! There must be a single blank line following this line!!!
""")
json.dump(configmap, f, sort_keys=True, indent=4)
return newsuite
def testsFromArg(tests, loader, arg, scenario):
# If a group of test is mentioned, do all tests in that group
# e.g. 'run.py base'
groupedfiles = glob.glob(suitedir + os.sep + 'test_' + arg + '*.py')
if len(groupedfiles) > 0:
for file in groupedfiles:
testsFromArg(tests, loader, os.path.basename(file), scenario)
return
# Explicit test class names
if not arg[0].isdigit():
if arg.endswith('.py'):
arg = arg[:-3]
addScenarioTests(tests, loader, arg, scenario)
return
# Deal with ranges
if '-' in arg:
start, end = (int(a) for a in arg.split('-'))
else:
start, end = int(arg), int(arg)
for t in xrange(start, end+1):
addScenarioTests(tests, loader, 'test%03d' % t, scenario)
def error(exitval, prefix, msg):
print('*** ERROR: {}: {}'.format(prefix, msg.replace('\n', '\n*** ')))
sys.exit(exitval)
if __name__ == '__main__':
# Turn numbers and ranges into test module names
preserve = timestamp = debug = dryRun = gdbSub = lldbSub = longtest = zstdtest = ignoreStdout = False
removeAtStart = True
asan = False
parallel = 0
random_sample = 0
batchtotal = batchnum = 0
seed = seedw = seedz = 0
configfile = None
configwrite = False
dirarg = None
scenario = ''
verbose = 1
args = sys.argv[1:]
testargs = []
hook_names = []
while len(args) > 0:
arg = args.pop(0)
from unittest import defaultTestLoader as loader
# Command line options
if arg[0] == '-':
option = arg[1:]
if option == '-asan':
asan = True
continue
if option == '-batch' or option == 'b':
if batchtotal != 0 or len(args) == 0:
usage()
sys.exit(2)
# Batch expects an argument that has int slash int.
# For example "-b 4/12"
try:
left, right = args.pop(0).split('/')
batchnum = int(left)
batchtotal = int(right)
except:
print('batch argument should be nnn/nnn')
usage()
sys.exit(2)
if batchtotal <= 0 or batchnum < 0 or batchnum >= batchtotal:
usage()
sys.exit(2)
continue
if option == '-dir' or option == 'D':
if dirarg != None or len(args) == 0:
usage()
sys.exit(2)
dirarg = args.pop(0)
continue
if option == '-debug' or option == 'd':
debug = True
continue
if option == '-dry-run' or option == 'n':
dryRun = True
continue
if option == '-gdb' or option == 'g':
gdbSub = True
continue
if option == '-lldb':
lldbSub = True
continue
if option == '-help' or option == 'h':
usage()
sys.exit(0)
if option == '-hook':
if len(args) == 0:
usage()
sys.exit(2)
hook_names.append(args.pop(0))
continue
if option == '-long' or option == 'l':
longtest = True
continue
if option == '-zstd' or option == 'z':
zstdtest = True
continue
if option == '-noremove':
removeAtStart = False
continue
if option == '-random-sample' or option == 'r':
if len(args) == 0:
usage()
sys.exit(2)
random_sample = int(args.pop(0))
if random_sample < 2 or random_sample > 1000:
usage()
sys.exit(2)
continue
if option == '-parallel' or option == 'j':
if parallel != 0 or len(args) == 0:
usage()
sys.exit(2)
parallel = int(args.pop(0))
continue
if option == '-preserve' or option == 'p':
preserve = True
continue
if option == '-scenario' or option == 's':
if scenario != '' or len(args) == 0:
usage()
sys.exit(2)
scenario = args.pop(0)
continue
if option == '-timestamp' or option == 't':
timestamp = True
continue
if option == '-verbose' or option == 'v':
if len(args) == 0:
usage()
sys.exit(2)
verbose = int(args.pop(0))
if verbose > 3:
verbose = 3
if verbose < 0:
verbose = 0
continue
if option == '--ignore-stdout' or option == 'i':
ignoreStdout = True
continue
if option == '-config' or option == 'c':
if configfile != None or len(args) == 0:
usage()
sys.exit(2)
configfile = args.pop(0)
continue
if option == '-configcreate' or option == 'C':
if configfile != None or len(args) == 0:
usage()
sys.exit(2)
configfile = args.pop(0)
configwrite = True
continue
if option == '-randomseed' or option == 'R':
seedw = random.randint(1, 0xffffffff)
seedz = random.randint(1, 0xffffffff)
continue
if option == '-seed' or option == 'S':
if seed != 0 or len(args) == 0:
usage()
sys.exit(2)
seed = args.pop(0)
[seedw, seedz] = seed.split('.')
if seedw == 0 or seedz == 0:
usage()
sys.exit(2)
continue
print('unknown arg: ' + arg)
usage()
sys.exit(2)
testargs.append(arg)
if asan:
# To run ASAN, we need to ensure these environment variables are set:
# ASAN_SYMBOLIZER_PATH full path to the llvm-symbolizer program
# LD_LIBRARY_PATH includes path with wiredtiger shared object
# LD_PRELOAD includes the ASAN runtime library
#
# Note that LD_LIBRARY_PATH has already been set above. The trouble with
# simply setting these variables in the Python environment is that it's
# too late. LD_LIBRARY_PATH is commonly cached by the shared library
# loader at program startup, and that's already been done before Python
# begins execution. Likewise, any preloading indicated by LD_PRELOAD
# has already been done.
#
# Our solution is to set the variables as appropriate, and then restart
# Python with the same argument list. The shared library loader will
# have everything it needs on the second go round.
#
# Note: If the ASAN stops the program with the error:
# Shadow memory range interleaves with an existing memory mapping.
# ASan cannot proceed correctly.
#
# try rebuilding with the clang options:
# "-mllvm -asan-force-dynamic-shadow=1"
# and make sure that clang is used for all compiles.
#
# We'd like to show this as a message, but there's no good way to
# detect this error from here short of capturing/parsing all output
# from the test run.
ASAN_ENV = "__WT_TEST_SUITE_ASAN" # if set, we've been here before
ASAN_SYMBOLIZER_PROG = "llvm-symbolizer"
ASAN_SYMBOLIZER_ENV = "ASAN_SYMBOLIZER_PATH"
LD_PRELOAD_ENV = "LD_PRELOAD"
SO_FILE_NAME = "libclang_rt.asan-x86_64.so"
if not os.environ.get(ASAN_ENV):
if verbose >= 2:
print('Enabling ASAN environment and rerunning python')
os.environ[ASAN_ENV] = "1"
show_env(verbose, "LD_LIBRARY_PATH")
if not os.environ.get(ASAN_SYMBOLIZER_ENV):
os.environ[ASAN_SYMBOLIZER_ENV] = which(ASAN_SYMBOLIZER_PROG)
if not os.environ.get(ASAN_SYMBOLIZER_ENV):
error(ASAN_SYMBOLIZER_ENV,
'symbolizer program not found in PATH')
show_env(verbose, ASAN_SYMBOLIZER_ENV)
if not os.environ.get(LD_PRELOAD_ENV):
symbolizer = follow_symlinks(os.environ[ASAN_SYMBOLIZER_ENV])
bindir = os.path.dirname(symbolizer)
sofiles = []
if os.path.basename(bindir) == 'bin':
libdir = os.path.join(os.path.dirname(bindir), 'lib')
sofiles = find(libdir, SO_FILE_NAME)
if len(sofiles) != 1:
if len(sofiles) == 0:
fmt = 'ASAN shared library file not found.\n' + \
'Set {} to the file location and rerun.'
error(3, SO_FILE_NAME, fmt.format(LD_PRELOAD_ENV))
else:
fmt = 'multiple ASAN shared library files found\n' + \
'under {}, expected just one.\n' + \
'Set {} to the correct file location and rerun.'
error(3, SO_FILE_NAME, fmt.format(libdir, LD_PRELOAD_ENV))
os.environ[LD_PRELOAD_ENV] = sofiles[0]
show_env(verbose, LD_PRELOAD_ENV)
# Restart python!
python = sys.executable
os.execl(python, python, *sys.argv)
elif verbose >= 2:
print('Python restarted for ASAN')
# We don't import wttest until after ASAN environment variables are set.
import wttest
# Use the same version of unittest found by wttest.py
unittest = wttest.unittest
tests = unittest.TestSuite()
from testscenarios.scenarios import generate_scenarios
import wthooks
hookmgr = wthooks.WiredTigerHookManager(hook_names)
# All global variables should be set before any test classes are loaded.
# That way, verbose printing can be done at the class definition level.
wttest.WiredTigerTestCase.globalSetup(preserve, removeAtStart, timestamp, gdbSub, lldbSub,
verbose, wt_builddir, dirarg, longtest, zstdtest,
ignoreStdout, seedw, seedz, hookmgr)
# Without any tests listed as arguments, do discovery
if len(testargs) == 0:
if scenario != '':
sys.stderr.write(
'run.py: specifying a scenario requires a test name\n')
usage()
sys.exit(2)
from discover import defaultTestLoader as loader
suites = loader.discover(suitedir)
# If you have an empty Python file, it comes back as an empty entry in suites
# and then the sort explodes. Drop empty entries first. Note: this converts
# suites to a list, but the sort does that anyway. Also note: there seems to be
# no way to count other than iteration; there's a count method but it also
# returns zero for test files that contain a test class with no test functions,
# and it's not clear that dropping those here is correct.
def isempty(s):
count = 0
for c in s:
count += 1
return (count == 0)
suites = [s for s in suites if not isempty(s)]
suites = sorted(suites, key=lambda c: str(list(c)[0]))
if configfile != None:
suites = configApply(suites, configfile, configwrite)
tests.addTests(restrictScenario(generate_scenarios(suites), ''))
else:
for arg in testargs:
testsFromArg(tests, loader, arg, scenario)
tests = hookmgr.filter_tests(tests)
# Shuffle the tests and create a new suite containing every Nth test from
# the original suite
if random_sample > 0:
random_sample_tests = []
for test in tests:
random_sample_tests.append(test)
random.shuffle(random_sample_tests)
tests = unittest.TestSuite(random_sample_tests[::random_sample])
if debug:
import pdb
pdb.set_trace()
if batchtotal != 0:
# For test batching, we want to split up all the tests evenly, and
# spread out the tests, so each batch contains tests of all kinds. We'd
# like to prioritize the lowest scenario numbers first, so if there's a
# failure, we won't have to do all X thousand of some test's scenarios
# before we see a failure in the next test. To that end, we define a
# sort function that sorts by scenario first, and test name second.
hugetests = set()
def get_sort_keys(test):
s = 0
name = test.simpleName()
if hasattr(test, 'scenario_number'):
s = test.scenario_number
if s > 1000:
hugetests.add(name) # warn for too many scenarios
return (s, test.simpleName()) # sort by scenario number first
all_tests = sorted(tests, key = get_sort_keys)
if not longtest:
for name in hugetests:
print("WARNING: huge test " + name + " has > 1000 scenarios.\n" +
"That is only appropriate when using the --long option.\n" +
"The number of scenarios for the test should be pruned")
# At this point we have an ordered list of all the tests.
# Break it into just our batch.
tests = unittest.TestSuite(all_tests[batchnum::batchtotal])
if dryRun:
for line in tests:
print(line)
else:
result = wttest.runsuite(tests, parallel)
sys.exit(0 if result.wasSuccessful() else 1)
sys.exit(0)
| 40.631741 | 105 | 0.573834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,091 | 0.420592 |
dee9227b4b6629ca39d002a84205390a69b06f7b | 29,997 | py | Python | Code/Minner/SumDialog.py | lizhangjie316/Minner | f6aebd51cef981d726b53db8d62d1b1703fe2649 | [
"MIT"
]
| 1 | 2020-11-05T07:11:33.000Z | 2020-11-05T07:11:33.000Z | Code/Minner0827/ui/SumDialog.py | lizhangjie316/Minner | f6aebd51cef981d726b53db8d62d1b1703fe2649 | [
"MIT"
]
| null | null | null | Code/Minner0827/ui/SumDialog.py | lizhangjie316/Minner | f6aebd51cef981d726b53db8d62d1b1703fe2649 | [
"MIT"
]
| 1 | 2020-11-05T07:19:44.000Z | 2020-11-05T07:19:44.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SumDialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from SumAllTable import DataGridAll
class Sum_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1214, 675)
Dialog.setStyleSheet("background-color:#555555\n"
"")
self.gridLayout_2 = QtWidgets.QGridLayout(Dialog)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_20 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_20.sizePolicy().hasHeightForWidth())
self.label_20.setSizePolicy(sizePolicy)
self.label_20.setMinimumSize(QtCore.QSize(0, 30))
self.label_20.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_20.setStyleSheet("QLabel{\n"
" border:none;\n"
" font-size:16px;\n"
" font-weight:400;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,stop:0 #777777,stop:1 #000000);\n"
" color:white;\n"
" }")
self.label_20.setObjectName("label_20")
self.gridLayout_2.addWidget(self.label_20, 0, 0, 1, 2)
self.widget = QtWidgets.QWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMaximumSize(QtCore.QSize(16777215, 740))
self.widget.setStyleSheet("background-color:#444444")
self.widget.setObjectName("widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(0, 30))
self.label.setMaximumSize(QtCore.QSize(16777215, 30))
self.label.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
" background:#444444;\n"
"}")
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.widget_2 = QtWidgets.QWidget(self.widget)
self.widget_2.setMinimumSize(QtCore.QSize(0, 250))
self.widget_2.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_2.setObjectName("widget_2")
self.widget_3 = QtWidgets.QWidget(self.widget_2)
self.widget_3.setGeometry(QtCore.QRect(40, 20, 381, 41))
self.widget_3.setObjectName("widget_3")
self.label_2 = QtWidgets.QLabel(self.widget_3)
self.label_2.setGeometry(QtCore.QRect(20, 10, 72, 15))
self.label_2.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
"}")
self.label_2.setObjectName("label_2")
self.comboBox = QtWidgets.QComboBox(self.widget_3)
self.comboBox.setGeometry(QtCore.QRect(170, 10, 161, 22))
self.comboBox.setStyleSheet("background-color:#444444;")
self.comboBox.setObjectName("comboBox")
self.widget_4 = QtWidgets.QWidget(self.widget_2)
self.widget_4.setGeometry(QtCore.QRect(40, 70, 381, 41))
self.widget_4.setObjectName("widget_4")
self.label_3 = QtWidgets.QLabel(self.widget_4)
self.label_3.setGeometry(QtCore.QRect(20, 10, 91, 16))
self.label_3.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
"}")
self.label_3.setObjectName("label_3")
self.comboBox_2 = QtWidgets.QComboBox(self.widget_4)
self.comboBox_2.setGeometry(QtCore.QRect(170, 10, 161, 22))
self.comboBox_2.setStyleSheet("background-color:#444444;")
self.comboBox_2.setObjectName("comboBox_2")
self.pushButton = QtWidgets.QPushButton(self.widget_2)
self.pushButton.setGeometry(QtCore.QRect(170, 180, 93, 28))
self.pushButton.setStyleSheet("QPushButton{\n"
" border:none;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,stop:0 rgb(96,157,200),stop:1 rgb(0,94,150));\n"
" font-size:15px;\n"
" color:white;\n"
" width:120px;\n"
" height:40px;\n"
" text-align:center;\n"
" border-radius:5px;\n"
" }\n"
"QPushButton:hover{\n"
" color:#0caaff\n"
"}\n"
"QPushButton:pressed{\n"
"background-color: rgb(50, 88, 138)\n"
"}\n"
"QPushButton:disabled{\n"
"color:rgb(172, 172, 172);\n"
"background-color:rgb(93, 93, 93)\n"
"}")
self.pushButton.setObjectName("pushButton")
self.verticalLayout_2.addWidget(self.widget_2)
self.gridLayout_2.addWidget(self.widget, 1, 0, 1, 1)
self.widget_11 = QtWidgets.QWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_11.sizePolicy().hasHeightForWidth())
self.widget_11.setSizePolicy(sizePolicy)
self.widget_11.setMinimumSize(QtCore.QSize(720, 0))
self.widget_11.setMaximumSize(QtCore.QSize(16777215, 740))
self.widget_11.setStyleSheet("background-color:#444444")
self.widget_11.setObjectName("widget_11")
self.gridLayout_8 = QtWidgets.QGridLayout(self.widget_11)
self.gridLayout_8.setObjectName("gridLayout_8")
self.label_63 = QtWidgets.QLabel(self.widget_11)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_63.sizePolicy().hasHeightForWidth())
self.label_63.setSizePolicy(sizePolicy)
self.label_63.setMinimumSize(QtCore.QSize(0, 30))
self.label_63.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_63.setStyleSheet("QLabel {\n"
" border-radius: 3px;\n"
" color:white;\n"
" background:#444444;\n"
"}")
self.label_63.setObjectName("label_63")
self.gridLayout_8.addWidget(self.label_63, 0, 0, 1, 1)
self.widget_15 = QtWidgets.QWidget(self.widget_11)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_15.sizePolicy().hasHeightForWidth())
self.widget_15.setSizePolicy(sizePolicy)
self.widget_15.setMinimumSize(QtCore.QSize(0, 480))
self.widget_15.setStyleSheet("QWidget{\n"
"background-color:#555555\n"
"}\n"
"QPushButton{\n"
" border:none;\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,stop:0 rgb(96,157,200),stop:1 rgb(0,94,150));\n"
" font-size:15px;\n"
" color:white;\n"
" width:120px;\n"
" height:40px;\n"
" text-align:center;\n"
" border-radius:5px;\n"
" }\n"
"QPushButton:hover{\n"
" color:#0caaff\n"
"}\n"
"QPushButton:pressed{\n"
"background-color: rgb(50, 88, 138)\n"
"}\n"
"QPushButton:disabled{\n"
"color:rgb(172, 172, 172);\n"
"background-color:rgb(93, 93, 93)\n"
"}\n"
"\n"
"QLable{\n"
"color:white;\n"
"background:#222222;\n"
"}")
self.widget_15.setObjectName("widget_15")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget_15)
self.verticalLayout.setObjectName("verticalLayout")
self.datagridall=DataGridAll()
self.verticalLayout.addWidget(self.datagridall)
self.gridLayout_8.addWidget(self.widget_15, 1, 0, 1, 1)
self.gridLayout_2.addWidget(self.widget_11, 1, 1, 2, 1)
self.widget_5 = QtWidgets.QWidget(Dialog)
self.widget_5.setStyleSheet("background-color:#444444")
self.widget_5.setObjectName("widget_5")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget_5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.tabWidget = QtWidgets.QTabWidget(self.widget_5)
self.tabWidget.setStyleSheet("QTabWidget{\n"
"background-color:#666666;\n"
"}\n"
"QTabWidget::pane{\n"
"\n"
"border-top: 1px solid;\n"
"\n"
"border-color: #333333;\n"
"\n"
"}\n"
"\n"
"QTabBar::tab {\n"
"min-width:80px;\n"
"min-height:30px;\n"
"color: white;\n"
"background:#666666;\n"
"\n"
"border: 0px solid;\n"
"\n"
"}\n"
"\n"
"QTabBar::tab:selected{\n"
"min-width:85px;\n"
"min-height:30px;\n"
"color: white;\n"
"background:#009DE2;\n"
"border: 0px solid;\n"
"border-bottom: 2px solid;\n"
"}\n"
"")
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.tab)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget_6 = QtWidgets.QWidget(self.tab)
self.widget_6.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_6.setObjectName("widget_6")
self.gridLayout = QtWidgets.QGridLayout(self.widget_6)
self.gridLayout.setObjectName("gridLayout")
self.label_5 = QtWidgets.QLabel(self.widget_6)
self.label_5.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.widget_6)
self.label_14.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 5, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.widget_6)
self.label_12.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 3, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.widget_6)
self.label_8.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 4, 0, 1, 1)
self.label_13 = QtWidgets.QLabel(self.widget_6)
self.label_13.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 4, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.widget_6)
self.label_11.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 2, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.widget_6)
self.label_7.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.widget_6)
self.label_10.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.widget_6)
self.label_4.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget_6)
self.label_6.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.widget_6)
self.label_9.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 5, 0, 1, 1)
self.horizontalLayout.addWidget(self.widget_6)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.widget_7 = QtWidgets.QWidget(self.tab_2)
self.widget_7.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_7.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_7.setObjectName("widget_7")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget_7)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_15 = QtWidgets.QLabel(self.widget_7)
self.label_15.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 0, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.widget_7)
self.label_16.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_16.setObjectName("label_16")
self.gridLayout_3.addWidget(self.label_16, 1, 0, 1, 1)
self.label_17 = QtWidgets.QLabel(self.widget_7)
self.label_17.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 1, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.widget_7)
self.label_18.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_18.setObjectName("label_18")
self.gridLayout_3.addWidget(self.label_18, 2, 0, 1, 1)
self.label_19 = QtWidgets.QLabel(self.widget_7)
self.label_19.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_19.setObjectName("label_19")
self.gridLayout_3.addWidget(self.label_19, 2, 1, 1, 1)
self.label_21 = QtWidgets.QLabel(self.widget_7)
self.label_21.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_21.setObjectName("label_21")
self.gridLayout_3.addWidget(self.label_21, 3, 0, 1, 1)
self.label_22 = QtWidgets.QLabel(self.widget_7)
self.label_22.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_22.setObjectName("label_22")
self.gridLayout_3.addWidget(self.label_22, 3, 1, 1, 1)
self.label_23 = QtWidgets.QLabel(self.widget_7)
self.label_23.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_23.setObjectName("label_23")
self.gridLayout_3.addWidget(self.label_23, 4, 0, 1, 1)
self.label_24 = QtWidgets.QLabel(self.widget_7)
self.label_24.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_24.setObjectName("label_24")
self.gridLayout_3.addWidget(self.label_24, 4, 1, 1, 1)
self.label_25 = QtWidgets.QLabel(self.widget_7)
self.label_25.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_25.setObjectName("label_25")
self.gridLayout_3.addWidget(self.label_25, 5, 0, 1, 1)
self.label_26 = QtWidgets.QLabel(self.widget_7)
self.label_26.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_26.setObjectName("label_26")
self.gridLayout_3.addWidget(self.label_26, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.widget_8 = QtWidgets.QWidget(self.tab_5)
self.widget_8.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_8.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_8.setObjectName("widget_8")
self.gridLayout_4 = QtWidgets.QGridLayout(self.widget_8)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_27 = QtWidgets.QLabel(self.widget_8)
self.label_27.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_27.setObjectName("label_27")
self.gridLayout_4.addWidget(self.label_27, 0, 0, 1, 1)
self.label_28 = QtWidgets.QLabel(self.widget_8)
self.label_28.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_28.setObjectName("label_28")
self.gridLayout_4.addWidget(self.label_28, 1, 0, 1, 1)
self.label_29 = QtWidgets.QLabel(self.widget_8)
self.label_29.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_29.setObjectName("label_29")
self.gridLayout_4.addWidget(self.label_29, 1, 1, 1, 1)
self.label_30 = QtWidgets.QLabel(self.widget_8)
self.label_30.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_30.setObjectName("label_30")
self.gridLayout_4.addWidget(self.label_30, 2, 0, 1, 1)
self.label_31 = QtWidgets.QLabel(self.widget_8)
self.label_31.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_31.setObjectName("label_31")
self.gridLayout_4.addWidget(self.label_31, 2, 1, 1, 1)
self.label_32 = QtWidgets.QLabel(self.widget_8)
self.label_32.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_32.setObjectName("label_32")
self.gridLayout_4.addWidget(self.label_32, 3, 0, 1, 1)
self.label_33 = QtWidgets.QLabel(self.widget_8)
self.label_33.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_33.setObjectName("label_33")
self.gridLayout_4.addWidget(self.label_33, 3, 1, 1, 1)
self.label_34 = QtWidgets.QLabel(self.widget_8)
self.label_34.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_34.setObjectName("label_34")
self.gridLayout_4.addWidget(self.label_34, 4, 0, 1, 1)
self.label_35 = QtWidgets.QLabel(self.widget_8)
self.label_35.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_35.setObjectName("label_35")
self.gridLayout_4.addWidget(self.label_35, 4, 1, 1, 1)
self.label_36 = QtWidgets.QLabel(self.widget_8)
self.label_36.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_36.setObjectName("label_36")
self.gridLayout_4.addWidget(self.label_36, 5, 0, 1, 1)
self.label_37 = QtWidgets.QLabel(self.widget_8)
self.label_37.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_37.setObjectName("label_37")
self.gridLayout_4.addWidget(self.label_37, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_5, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.widget_9 = QtWidgets.QWidget(self.tab_6)
self.widget_9.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_9.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_9.setObjectName("widget_9")
self.gridLayout_5 = QtWidgets.QGridLayout(self.widget_9)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_38 = QtWidgets.QLabel(self.widget_9)
self.label_38.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_38.setObjectName("label_38")
self.gridLayout_5.addWidget(self.label_38, 0, 0, 1, 1)
self.label_39 = QtWidgets.QLabel(self.widget_9)
self.label_39.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_39.setObjectName("label_39")
self.gridLayout_5.addWidget(self.label_39, 1, 0, 1, 1)
self.label_40 = QtWidgets.QLabel(self.widget_9)
self.label_40.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_40.setObjectName("label_40")
self.gridLayout_5.addWidget(self.label_40, 1, 1, 1, 1)
self.label_41 = QtWidgets.QLabel(self.widget_9)
self.label_41.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_41.setObjectName("label_41")
self.gridLayout_5.addWidget(self.label_41, 2, 0, 1, 1)
self.label_42 = QtWidgets.QLabel(self.widget_9)
self.label_42.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_42.setObjectName("label_42")
self.gridLayout_5.addWidget(self.label_42, 2, 1, 1, 1)
self.label_43 = QtWidgets.QLabel(self.widget_9)
self.label_43.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_43.setObjectName("label_43")
self.gridLayout_5.addWidget(self.label_43, 3, 0, 1, 1)
self.label_44 = QtWidgets.QLabel(self.widget_9)
self.label_44.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_44.setObjectName("label_44")
self.gridLayout_5.addWidget(self.label_44, 3, 1, 1, 1)
self.label_45 = QtWidgets.QLabel(self.widget_9)
self.label_45.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_45.setObjectName("label_45")
self.gridLayout_5.addWidget(self.label_45, 4, 0, 1, 1)
self.label_46 = QtWidgets.QLabel(self.widget_9)
self.label_46.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_46.setObjectName("label_46")
self.gridLayout_5.addWidget(self.label_46, 4, 1, 1, 1)
self.label_47 = QtWidgets.QLabel(self.widget_9)
self.label_47.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_47.setObjectName("label_47")
self.gridLayout_5.addWidget(self.label_47, 5, 0, 1, 1)
self.label_48 = QtWidgets.QLabel(self.widget_9)
self.label_48.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_48.setObjectName("label_48")
self.gridLayout_5.addWidget(self.label_48, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_6, "")
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName("tab_7")
self.widget_10 = QtWidgets.QWidget(self.tab_7)
self.widget_10.setGeometry(QtCore.QRect(10, 10, 431, 171))
self.widget_10.setStyleSheet("background-color: rgb(85, 85, 85);")
self.widget_10.setObjectName("widget_10")
self.gridLayout_6 = QtWidgets.QGridLayout(self.widget_10)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_49 = QtWidgets.QLabel(self.widget_10)
self.label_49.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_49.setObjectName("label_49")
self.gridLayout_6.addWidget(self.label_49, 0, 0, 1, 1)
self.label_50 = QtWidgets.QLabel(self.widget_10)
self.label_50.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_50.setObjectName("label_50")
self.gridLayout_6.addWidget(self.label_50, 1, 0, 1, 1)
self.label_51 = QtWidgets.QLabel(self.widget_10)
self.label_51.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_51.setObjectName("label_51")
self.gridLayout_6.addWidget(self.label_51, 1, 1, 1, 1)
self.label_52 = QtWidgets.QLabel(self.widget_10)
self.label_52.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_52.setObjectName("label_52")
self.gridLayout_6.addWidget(self.label_52, 2, 0, 1, 1)
self.label_53 = QtWidgets.QLabel(self.widget_10)
self.label_53.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_53.setObjectName("label_53")
self.gridLayout_6.addWidget(self.label_53, 2, 1, 1, 1)
self.label_54 = QtWidgets.QLabel(self.widget_10)
self.label_54.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_54.setObjectName("label_54")
self.gridLayout_6.addWidget(self.label_54, 3, 0, 1, 1)
self.label_55 = QtWidgets.QLabel(self.widget_10)
self.label_55.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_55.setObjectName("label_55")
self.gridLayout_6.addWidget(self.label_55, 3, 1, 1, 1)
self.label_56 = QtWidgets.QLabel(self.widget_10)
self.label_56.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_56.setObjectName("label_56")
self.gridLayout_6.addWidget(self.label_56, 4, 0, 1, 1)
self.label_57 = QtWidgets.QLabel(self.widget_10)
self.label_57.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_57.setObjectName("label_57")
self.gridLayout_6.addWidget(self.label_57, 4, 1, 1, 1)
self.label_58 = QtWidgets.QLabel(self.widget_10)
self.label_58.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_58.setObjectName("label_58")
self.gridLayout_6.addWidget(self.label_58, 5, 0, 1, 1)
self.label_59 = QtWidgets.QLabel(self.widget_10)
self.label_59.setStyleSheet("QLabel {\n"
" color:white;\n"
"}")
self.label_59.setObjectName("label_59")
self.gridLayout_6.addWidget(self.label_59, 5, 1, 1, 1)
self.tabWidget.addTab(self.tab_7, "")
self.verticalLayout_3.addWidget(self.tabWidget)
self.gridLayout_2.addWidget(self.widget_5, 2, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_20.setText(_translate("Dialog", " 分割统计"))
self.label.setText(_translate("Dialog", "统计设置"))
self.label_2.setText(_translate("Dialog", "摄像头ID:"))
self.label_3.setText(_translate("Dialog", "摄像头点位:"))
self.pushButton.setText(_translate("Dialog", "统计"))
self.label_63.setText(_translate("Dialog", "统计设置"))
self.label_5.setText(_translate("Dialog", "(1)20+:"))
self.label_14.setText(_translate("Dialog", "%"))
self.label_12.setText(_translate("Dialog", "%"))
self.label_8.setText(_translate("Dialog", "(4)2+:"))
self.label_13.setText(_translate("Dialog", "%"))
self.label_11.setText(_translate("Dialog", "%"))
self.label_7.setText(_translate("Dialog", "(3)5+:"))
self.label_10.setText(_translate("Dialog", "%"))
self.label_4.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_6.setText(_translate("Dialog", "(2)10+:"))
self.label_9.setText(_translate("Dialog", "(5)平均尺寸:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "1分钟"))
self.label_15.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_16.setText(_translate("Dialog", "(1)20+:"))
self.label_17.setText(_translate("Dialog", "%"))
self.label_18.setText(_translate("Dialog", "(2)10+:"))
self.label_19.setText(_translate("Dialog", "%"))
self.label_21.setText(_translate("Dialog", "(3)5+:"))
self.label_22.setText(_translate("Dialog", "%"))
self.label_23.setText(_translate("Dialog", "(4)2+:"))
self.label_24.setText(_translate("Dialog", "%"))
self.label_25.setText(_translate("Dialog", "(5)平均尺寸:"))
self.label_26.setText(_translate("Dialog", "%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "5分钟"))
self.label_27.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_28.setText(_translate("Dialog", "(1)20+:"))
self.label_29.setText(_translate("Dialog", "%"))
self.label_30.setText(_translate("Dialog", "(2)10+:"))
self.label_31.setText(_translate("Dialog", "%"))
self.label_32.setText(_translate("Dialog", "(3)5+:"))
self.label_33.setText(_translate("Dialog", "%"))
self.label_34.setText(_translate("Dialog", "(4)2+:"))
self.label_35.setText(_translate("Dialog", "%"))
self.label_36.setText(_translate("Dialog", "(5)平均尺寸:"))
self.label_37.setText(_translate("Dialog", "%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("Dialog", "10分钟"))
self.label_38.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_39.setText(_translate("Dialog", "(1)20+:"))
self.label_40.setText(_translate("Dialog", "%"))
self.label_41.setText(_translate("Dialog", "(2)10+:"))
self.label_42.setText(_translate("Dialog", "%"))
self.label_43.setText(_translate("Dialog", "(3)5+:"))
self.label_44.setText(_translate("Dialog", "%"))
self.label_45.setText(_translate("Dialog", "(4)2+:"))
self.label_46.setText(_translate("Dialog", "%"))
self.label_47.setText(_translate("Dialog", "(5)平均尺寸:"))
self.label_48.setText(_translate("Dialog", "%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate("Dialog", "15分钟"))
self.label_49.setText(_translate("Dialog", "平均矿石尺寸占比:"))
self.label_50.setText(_translate("Dialog", "(1)20+:"))
self.label_51.setText(_translate("Dialog", "%"))
self.label_52.setText(_translate("Dialog", "(2)10+:"))
self.label_53.setText(_translate("Dialog", "%"))
self.label_54.setText(_translate("Dialog", "(3)5+:"))
self.label_55.setText(_translate("Dialog", "%"))
self.label_56.setText(_translate("Dialog", "(4)2+:"))
self.label_57.setText(_translate("Dialog", "%"))
self.label_58.setText(_translate("Dialog", "(5)平均尺寸:"))
self.label_59.setText(_translate("Dialog", "%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7), _translate("Dialog", "30分钟"))
| 43.791241 | 116 | 0.657432 | 29,828 | 0.987649 | 0 | 0 | 0 | 0 | 0 | 0 | 6,849 | 0.226781 |
deeb28c75145a6bebc3771235fab7a32732db4c0 | 684 | py | Python | models/t_complex_gateway.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
]
| null | null | null | models/t_complex_gateway.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
]
| null | null | null | models/t_complex_gateway.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
]
| null | null | null | from dataclasses import dataclass, field
from typing import Optional
from .t_expression import TExpression
from .t_gateway import TGateway
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TComplexGateway(TGateway):
class Meta:
name = "tComplexGateway"
activation_condition: Optional[TExpression] = field(
default=None,
metadata={
"name": "activationCondition",
"type": "Element",
"namespace": "http://www.omg.org/spec/BPMN/20100524/MODEL",
}
)
default: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
| 24.428571 | 71 | 0.622807 | 468 | 0.684211 | 0 | 0 | 479 | 0.700292 | 0 | 0 | 177 | 0.258772 |
deebcfc8092fc857d0a9f335ddd1bffc49d0f520 | 2,371 | py | Python | fixture/orm.py | NovikovMA/python_training_mantis | c8de0ec193e2ec644d8053f8e1b7fc1ee8fb1525 | [
"Apache-2.0"
]
| null | null | null | fixture/orm.py | NovikovMA/python_training_mantis | c8de0ec193e2ec644d8053f8e1b7fc1ee8fb1525 | [
"Apache-2.0"
]
| null | null | null | fixture/orm.py | NovikovMA/python_training_mantis | c8de0ec193e2ec644d8053f8e1b7fc1ee8fb1525 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
__author__ = 'M.Novikov'
from model.project import Project # Проекты Mantis
from pony.orm import * # Работа с базой данных
from pymysql.converters import decoders # Преобразование данных
class ORMFixture:
db = Database()
# Структура объектов, привязываемых к базе данных
class ORMProject(db.Entity): # Проект Mantis
_table_ = "mantis_project_table" # Наименование связываемой таблицы базы данных
id = PrimaryKey(int, column="id") # Идентификатор
name = Optional(str, column="name") # Название
description = Optional(str, column="description") # Описание
# Создание фикстуры работы с базой данных
def __init__(self, host, name, user, password):
self.db.bind("mysql", host=host, database=name, user=user, password=password, conv=decoders) # Подключение к базе данных с разрешением преобразования данных в извесные форматы
self.db.generate_mapping() # Сопоставление объектов класса и таблиц базы данных
#sql_debug(True) # Вывод формируемых sql-запросов в консоль
# Уничтожение фикстуры работы с базой данных
def destroy(self):
pass
# Преобразование объектов ORMGroup (текущий формат) к формату объектов model (общий формат)
def convert_project_to_model(self, projects):
def convert(project): # Преобразование одного ORM-объекта проекта Mantis в формат модели проекта Mantis
return Project(id=project.id, name=project.name, description=project.description)
return list(map(convert, projects)) # Преобразованный список проектов
# Получение списка проектов
@db_session # Метка выполнения функции в рамках единой сессии
def get_project_list(self):
return self.convert_project_to_model(select(p for p in ORMFixture.ORMProject)) # Получение (запрос к базе) списка проектов
| 59.275 | 186 | 0.567693 | 2,601 | 0.869318 | 0 | 0 | 360 | 0.120321 | 0 | 0 | 1,568 | 0.524064 |
deecfb2ff8809fa583a186388e95973a391ea0c6 | 3,577 | py | Python | volDB/migrations/0001_initial.py | leg2015/CSCapstone19Volunteers | ae0fcf1e8ce4fafe8578edd0a3943574703046fa | [
"MIT"
]
| 4 | 2020-01-13T23:30:34.000Z | 2021-03-17T21:23:57.000Z | volDB/migrations/0001_initial.py | leg2015/CSCapstone19Volunteers | ae0fcf1e8ce4fafe8578edd0a3943574703046fa | [
"MIT"
]
| 5 | 2020-02-12T03:25:17.000Z | 2021-06-10T22:29:16.000Z | volDB/migrations/0001_initial.py | leg2015/CSCapstone19Volunteers | ae0fcf1e8ce4fafe8578edd0a3943574703046fa | [
"MIT"
]
| null | null | null | # Generated by Django 2.1.7 on 2019-02-23 18:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('addressID', models.AutoField(db_column='addressID', primary_key=True, serialize=False)),
('street', models.CharField(db_column='street', max_length=100)),
('city', models.CharField(db_column='city', max_length=20)),
('state', models.CharField(db_column='state', max_length=20)),
('zipCode', models.IntegerField(db_column='zipCode')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('category', models.CharField(db_column='category', max_length=20)),
('categoryID', models.AutoField(db_column='categoryID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Email',
fields=[
('email', models.EmailField(db_column='email', max_length=254)),
('emailID', models.AutoField(db_column='emailID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('location', models.CharField(db_column='location', max_length=20)),
('locationID', models.AutoField(db_column='locationID', primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('name', models.CharField(db_column='orgName', max_length=100)),
('orgID', models.AutoField(db_column='orgID', primary_key=True, serialize=False)),
('mission', models.TextField(db_column='missionStatement')),
('opportunities', models.TextField(db_column='volOpportunities')),
('website', models.URLField(db_column='volURL')),
('notes', models.TextField(db_column='notes')),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('phoneID', models.AutoField(db_column='phoneID', primary_key=True, serialize=False)),
('phone', models.CharField(db_column='phone', max_length=10)),
('orgid', models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization')),
],
),
migrations.AddField(
model_name='location',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
migrations.AddField(
model_name='email',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
migrations.AddField(
model_name='category',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
migrations.AddField(
model_name='address',
name='orgID',
field=models.ForeignKey(db_column='orgID', on_delete=django.db.models.deletion.DO_NOTHING, to='volDB.Organization'),
),
]
| 41.593023 | 137 | 0.574224 | 3,451 | 0.964775 | 0 | 0 | 0 | 0 | 0 | 0 | 664 | 0.18563 |
deedff750596df4bfdfcd2656752ec59911b5e80 | 2,713 | py | Python | crawler/page_fetcher.py | AssisRaphael/PageColector | 6753376996f12ee1cced96b89a3e34d6fdf66529 | [
"MIT"
]
| null | null | null | crawler/page_fetcher.py | AssisRaphael/PageColector | 6753376996f12ee1cced96b89a3e34d6fdf66529 | [
"MIT"
]
| null | null | null | crawler/page_fetcher.py | AssisRaphael/PageColector | 6753376996f12ee1cced96b89a3e34d6fdf66529 | [
"MIT"
]
| null | null | null | from bs4 import BeautifulSoup
from threading import Thread
import requests
from urllib.parse import urlparse,urljoin
from urllib import parse
class PageFetcher(Thread):
def __init__(self, obj_scheduler):
self.obj_scheduler = obj_scheduler
def request_url(self,obj_url):
"""
Faz a requisição e retorna o conteúdo em binário da URL passada como parametro
obj_url: Instancia da classe ParseResult com a URL a ser requisitada.
"""
url = parse.urlunparse(obj_url)
if "http" not in url:
url = "http:" + url
response = requests.get(url)
response.headers['User-Agent'] = self.obj_scheduler.str_usr_agent
if response.headers['content-type'].find('text/html') == -1:
return None
return response.content
def discover_links(self,obj_url,int_depth,bin_str_content):
"""
Retorna os links do conteúdo bin_str_content da página já requisitada obj_url
"""
soup = BeautifulSoup(bin_str_content,features="lxml")
for link in soup.select('a'):
try:
obj_new_url = urlparse(link['href'])
except:
continue
if obj_new_url.netloc == '':
if "http" in obj_new_url.path:
obj_new_url = urlparse(obj_new_url.path)
else:
obj_new_url = urlparse(urljoin(parse.urlunparse(obj_url), parse.urlunparse(obj_new_url)))
# print('rrr: ', obj_new_url.netloc+obj_new_url.path)
if obj_new_url.netloc != obj_url.netloc:
int_new_depth = 0
else:
int_new_depth = int_depth + 1
yield obj_new_url,int_new_depth
def crawl_new_url(self):
"""
Coleta uma nova URL, obtendo-a do escalonador
"""
obj_url, int_depth = self.obj_scheduler.get_next_url()
bin_str_content = self.request_url(obj_url)
if bin_str_content is not None:
#print(obj_url)
multi_obj = self.discover_links(obj_url, int_depth, bin_str_content)
while True:
try:
url, depth = next(multi_obj)
#print(url)
print(parse.urlunparse(url))
self.obj_scheduler.add_new_page(url, depth)
except StopIteration:
break
def run(self):
"""
Executa coleta enquanto houver páginas a serem coletadas
"""
while not self.obj_scheduler.has_finished_crawl():
self.crawl_new_url()
| 33.085366 | 109 | 0.570586 | 2,576 | 0.946711 | 964 | 0.354282 | 0 | 0 | 0 | 0 | 607 | 0.22308 |
def0c98ea1f503e25e5a4f61d70a095d8ff1d77d | 141 | py | Python | app/celery_worker.py | cjarv/celery_dev | 1c0489ccf456249d5bd8d21da40ebe4572842af6 | [
"MIT"
]
| null | null | null | app/celery_worker.py | cjarv/celery_dev | 1c0489ccf456249d5bd8d21da40ebe4572842af6 | [
"MIT"
]
| null | null | null | app/celery_worker.py | cjarv/celery_dev | 1c0489ccf456249d5bd8d21da40ebe4572842af6 | [
"MIT"
]
| null | null | null | from factories.celery import create_celery
from factories.application import create_application
celery = create_celery(create_application()) | 35.25 | 52 | 0.87234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
def0d455f3332a2d6ded90d585855fcbfa88a92a | 2,098 | py | Python | simublocks/dialog/importCodeDialog.py | bentoavb/simublocks | 9d4a5600b8aecd2d188e9191d78789a1bd725ab8 | [
"MIT"
]
| 2 | 2020-05-14T12:34:43.000Z | 2020-06-11T23:48:09.000Z | simublocks/dialog/importCodeDialog.py | bentoavb/simublocks | 9d4a5600b8aecd2d188e9191d78789a1bd725ab8 | [
"MIT"
]
| null | null | null | simublocks/dialog/importCodeDialog.py | bentoavb/simublocks | 9d4a5600b8aecd2d188e9191d78789a1bd725ab8 | [
"MIT"
]
| 1 | 2020-05-12T07:01:28.000Z | 2020-05-12T07:01:28.000Z | # MIT License
#
# Copyright (c) 2020 Anderson Vitor Bento
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from simublocks.dialog.dialogTools import dialogTools
class importCodeDialog(object):
def __init__(self, code):
root = self.root = tk.Tk()
root.resizable(0,0)
root.title("Import Code and Packages")
self.inputCode = ScrolledText(root, height=5,width=50)
self.inputCode.insert(tk.END, code)
self.inputCode.grid(row=0, column=0,columnspan=2)
tk.Button(root, width=11, text="Save", command=self.save_button).grid(row=1, column=0)
tk.Button(root, width=11, text="Cancel", command=self.cancel_button).grid(row=1, column=1)
dialogTools.center(root)
def save_button(self):
self.returning = {
'code': self.inputCode.get(1.0, tk.END),
'status': 'ok'
}
self.root.quit()
def cancel_button(self):
self.returning = {
'status': 'cancel'
}
self.root.quit() | 38.851852 | 98 | 0.704957 | 860 | 0.409914 | 0 | 0 | 0 | 0 | 0 | 0 | 1,168 | 0.556721 |
def0f90a3cae5abac2b0927d079c001b98668c18 | 1,365 | py | Python | python-socket-mult-thread/server.py | Programmer-Edilson/min-projects | 62dfa55e8875b3d0d3e6cc9cb504c3f3f7da064e | [
"MIT"
]
| 1 | 2021-02-28T17:33:59.000Z | 2021-02-28T17:33:59.000Z | python-socket-mult-thread/server.py | Programmer-Edilson/min-projects | 62dfa55e8875b3d0d3e6cc9cb504c3f3f7da064e | [
"MIT"
]
| null | null | null | python-socket-mult-thread/server.py | Programmer-Edilson/min-projects | 62dfa55e8875b3d0d3e6cc9cb504c3f3f7da064e | [
"MIT"
]
| null | null | null | import socket
import os
from _thread import start_new_thread
ip = "localhost"
port = 1234
global number_of_connections
number_of_connections = 0
server = socket.socket()
server.bind((ip, port))
server.listen(5)
def handle_client(socket_client):
global number_of_connections
msg = "You are connected!"
socket_client.send(msg.encode())
name = socket_client.recv(1024)
print("[#] client name:", name.decode())
while True:
request = socket_client.recv(1024)
print("[{}] : ".format(name.decode()), request.decode())
msg = request.decode()
msg = msg.upper()
socket_client.send(msg.encode())
if not request:
print("[#] {} desconnected".format(name.decode()))
number_of_connections -= 1
print("[#] clients connected : ", number_of_connections)
break
socket_client.close()
def engine():
global number_of_connections
print("[#] waiting for clients...")
while True:
client, address = server.accept()
connection = (address[0], address[1])
print("[#] new connection {} : {}".format(address[0], address[1]))
start_new_thread(handle_client, (client, ))
number_of_connections += 1
print("[#] clients connected : ", number_of_connections)
server.close()
engine()
| 24.375 | 74 | 0.621245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.136996 |
def2f40bc3a8f54d1a406e95811076ed0688d708 | 658 | py | Python | delete_unuse_callkit.py | eyolo2021/ios-ui-sdk-set | a8897320c356ddd6dbfe964ef68eb76701759f03 | [
"MIT"
]
| 14 | 2021-03-06T08:47:30.000Z | 2022-02-11T09:42:24.000Z | delete_unuse_callkit.py | eyolo2021/ios-ui-sdk-set | a8897320c356ddd6dbfe964ef68eb76701759f03 | [
"MIT"
]
| 3 | 2021-03-19T11:12:42.000Z | 2021-11-29T14:56:33.000Z | delete_unuse_callkit.py | Zuzi007/ios-ui-sdk-set | 2e51added5d697b4d1ab1ba2887ad297b408e7b0 | [
"MIT"
]
| 12 | 2021-07-02T02:44:52.000Z | 2022-03-01T05:15:22.000Z | #coding=utf-8
import os
delete_files=["RCCall.mm","RCCXCall.m"]
start_key = "RCCallKit_Delete_Start"
end_key = "RCCallKit_Delete_end"
def delete_used(file_path):
print(file_path)
f = open(file_path,"r")
lines = f.readlines()
f.close()
# print(lines)
result = []
flag = False
for l in lines:
if start_key in l:
flag = True
elif end_key in l:
flag = False
if flag is True:
continue
result.append(l)
f = open(file_path,"w")
f.writelines(result)
f.close()
for root,dirs,files in os.walk("./CallKit"):
for file in files:
if file in delete_files:
print("will delete %s" % file)
delete_used(os.path.join(root,file))
| 15.666667 | 44 | 0.674772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.196049 |
def6b9e9ff86d2545be01b4fc202577ea606b159 | 525 | py | Python | global_setting.py | aixiwang/mqtt_datajs | 91091d63f73e64916e6ca3fa5e9279dd361d3c86 | [
"BSD-3-Clause"
]
| null | null | null | global_setting.py | aixiwang/mqtt_datajs | 91091d63f73e64916e6ca3fa5e9279dd361d3c86 | [
"BSD-3-Clause"
]
| null | null | null | global_setting.py | aixiwang/mqtt_datajs | 91091d63f73e64916e6ca3fa5e9279dd361d3c86 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
#---------------------------------------
#
# Copyright(c) Aixi Wang 2014-2015
#---------------------------------------
# v1 -- initial version
#---------------------------------------
#-----------------------
# mail
#-----------------------
global mail_sender,mail_smtpserver,mail_username,mail_password
global mail_enable,mail_to
mail_to = '[email protected]'
mail_username = '[email protected]'
mail_password = 'xxx'
mail_smtpserver = 'xxx.xxx.xxx'
mail_sender = '[email protected]'
mail_enable = 1
| 23.863636 | 62 | 0.493333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.622857 |
def712afd23e0562bd689a7a3ab2431ec0fae53a | 631 | py | Python | Chatbot_Rest/urls.py | chenpocufa/Chatbot_CN | 5e13c129c159143610f4dfc99478d401dd5777e6 | [
"Apache-2.0"
]
| 1 | 2019-08-02T06:09:34.000Z | 2019-08-02T06:09:34.000Z | Chatbot_Rest/urls.py | yuxuan2015/Chatbot_CN | 1adf1c01d3eced5f0644102bdec9be22705b6f3f | [
"Apache-2.0"
]
| null | null | null | Chatbot_Rest/urls.py | yuxuan2015/Chatbot_CN | 1adf1c01d3eced5f0644102bdec9be22705b6f3f | [
"Apache-2.0"
]
| null | null | null | #-*- coding:utf-8 _*-
"""
@author:charlesXu
@file: urls.py
@desc: 接口url
@time: 2019/05/10
"""
# ===============
#
# apis 下面的路由
#
# ===============
from django.urls import path
from intent_rest_controller import intent_controller
from entity_extraction_controller import entity_ext_controller
from bot_controller import get_chat_msg # 聊天
from time_convert_server import time_convert # 时间转换器
urlpatterns = [
path('entity', entity_ext_controller), # 实体抽取
path('intent', intent_controller), # 意图识别
path('chat', get_chat_msg), # chatbot接口
path('time_convert', time_convert) # 时间转换器
] | 21.033333 | 62 | 0.667195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.409289 |
def7709b7d7d970c7608ad7be378d822d2e33518 | 14,334 | py | Python | tests/frameworks/test_wsgi.py | tirkarthi/python-sensor | 9872d146ac00baff2673fde5ba97fdbe596869a4 | [
"MIT"
]
| 61 | 2017-09-27T02:50:17.000Z | 2022-03-22T12:13:37.000Z | tests/frameworks/test_wsgi.py | tirkarthi/python-sensor | 9872d146ac00baff2673fde5ba97fdbe596869a4 | [
"MIT"
]
| 82 | 2017-07-11T13:47:33.000Z | 2022-03-22T10:10:38.000Z | tests/frameworks/test_wsgi.py | takeaway/python-sensor | 52d6eaa2d6a8e625201bad36ac2448201c4bd63d | [
"MIT"
]
| 27 | 2017-09-11T16:22:32.000Z | 2022-03-11T17:21:49.000Z | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import time
import urllib3
import unittest
import tests.apps.flask_app
from ..helpers import testenv
from instana.singletons import agent, tracer
class TestWSGI(unittest.TestCase):
def setUp(self):
""" Clear all spans before a test run """
self.http = urllib3.PoolManager()
self.recorder = tracer.recorder
self.recorder.clear_spans()
time.sleep(0.1)
def tearDown(self):
""" Do nothing for now """
return None
def test_vanilla_requests(self):
response = self.http.request('GET', testenv["wsgi_server"] + '/')
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
self.assertIsNone(tracer.active_span)
self.assertEqual(response.status, 200)
def test_get_request(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["wsgi_server"] + '/')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
self.assertIsNone(tracer.active_span)
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(200, response.status)
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
self.assertIsNone(wsgi_span.sy)
self.assertIsNone(urllib3_span.sy)
self.assertIsNone(test_span.sy)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv['wsgi_port']), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
def test_synthetic_request(self):
headers = {
'X-INSTANA-SYNTHETIC': '1'
}
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["wsgi_server"] + '/', headers=headers)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
self.assertIsNone(tracer.active_span)
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
self.assertTrue(wsgi_span.sy)
self.assertIsNone(urllib3_span.sy)
self.assertIsNone(test_span.sy)
def test_complex_request(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["wsgi_server"] + '/complex')
spans = self.recorder.queued_spans()
self.assertEqual(5, len(spans))
self.assertIsNone(tracer.active_span)
spacedust_span = spans[0]
asteroid_span = spans[1]
wsgi_span = spans[2]
urllib3_span = spans[3]
test_span = spans[4]
assert response
self.assertEqual(200, response.status)
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
# Same traceId
trace_id = test_span.t
self.assertEqual(trace_id, urllib3_span.t)
self.assertEqual(trace_id, wsgi_span.t)
self.assertEqual(trace_id, asteroid_span.t)
self.assertEqual(trace_id, spacedust_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
self.assertEqual(asteroid_span.p, wsgi_span.s)
self.assertEqual(spacedust_span.p, asteroid_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
self.assertIsNone(asteroid_span.ec)
self.assertIsNone(spacedust_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv['wsgi_port']), wsgi_span.data["http"]["host"])
self.assertEqual('/complex', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
def test_custom_header_capture(self):
# Hack together a manual custom headers list
agent.options.extra_http_headers = [u'X-Capture-This', u'X-Capture-That']
request_headers = {}
request_headers['X-Capture-This'] = 'this'
request_headers['X-Capture-That'] = 'that'
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["wsgi_server"] + '/', headers=request_headers)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
self.assertIsNone(tracer.active_span)
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(200, response.status)
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv['wsgi_port']), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
assert "X-Capture-This" in wsgi_span.data["http"]["header"]
self.assertEqual("this", wsgi_span.data["http"]["header"]["X-Capture-This"])
assert "X-Capture-That" in wsgi_span.data["http"]["header"]
self.assertEqual("that", wsgi_span.data["http"]["header"]["X-Capture-That"])
def test_secret_scrubbing(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["wsgi_server"] + '/?secret=shhh')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
self.assertIsNone(tracer.active_span)
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(200, response.status)
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv['wsgi_port']), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('secret=<redacted>', wsgi_span.data["http"]["params"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
def test_with_incoming_context(self):
request_headers = dict()
request_headers['X-INSTANA-T'] = '0000000000000001'
request_headers['X-INSTANA-S'] = '0000000000000001'
response = self.http.request('GET', testenv["wsgi_server"] + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
wsgi_span = spans[0]
self.assertEqual(wsgi_span.t, '0000000000000001')
self.assertEqual(wsgi_span.p, '0000000000000001')
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
def test_with_incoming_mixed_case_context(self):
request_headers = dict()
request_headers['X-InSTANa-T'] = '0000000000000001'
request_headers['X-instana-S'] = '0000000000000001'
response = self.http.request('GET', testenv["wsgi_server"] + '/', headers=request_headers)
assert response
self.assertEqual(200, response.status)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
wsgi_span = spans[0]
self.assertEqual(wsgi_span.t, '0000000000000001')
self.assertEqual(wsgi_span.p, '0000000000000001')
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
def test_response_headers(self):
with tracer.start_active_span('test'):
response = self.http.request('GET', testenv["wsgi_server"] + '/')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
self.assertIsNone(tracer.active_span)
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert response
self.assertEqual(200, response.status)
assert 'X-INSTANA-T' in response.headers
assert(int(response.headers['X-INSTANA-T'], 16))
self.assertEqual(response.headers['X-INSTANA-T'], wsgi_span.t)
assert 'X-INSTANA-S' in response.headers
assert(int(response.headers['X-INSTANA-S'], 16))
self.assertEqual(response.headers['X-INSTANA-S'], wsgi_span.s)
assert 'X-INSTANA-L' in response.headers
self.assertEqual(response.headers['X-INSTANA-L'], '1')
assert 'Server-Timing' in response.headers
server_timing_value = "intid;desc=%s" % wsgi_span.t
self.assertEqual(response.headers['Server-Timing'], server_timing_value)
| 36.943299 | 102 | 0.647133 | 14,078 | 0.98214 | 0 | 0 | 0 | 0 | 0 | 0 | 2,584 | 0.180271 |
def7ae196a0259e7e64d4dfd6522b1ee72138646 | 16,178 | py | Python | api/yolo_minimal/utils.py | simonsmh/www | 1741545e636540b9eb250840347f091082fe301a | [
"MIT"
]
| 5 | 2015-12-19T11:18:54.000Z | 2016-08-27T02:21:59.000Z | api/yolo_minimal/utils.py | simonsmh/www | 1741545e636540b9eb250840347f091082fe301a | [
"MIT"
]
| null | null | null | api/yolo_minimal/utils.py | simonsmh/www | 1741545e636540b9eb250840347f091082fe301a | [
"MIT"
]
| 1 | 2020-10-30T13:25:33.000Z | 2020-10-30T13:25:33.000Z | import math
import os
import random
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
def xyxy2xywh(x):
# Transform box coordinates from [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right) to [x, y, w, h]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Transform box coordinates from [x, y, w, h] to [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right)
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
gain = max(img1_shape) / max(img0_shape) # gain = old / new
pad = (
(img1_shape[1] - img0_shape[1] * gain) / 2,
(img1_shape[0] - img0_shape[0] * gain) / 2,
) # wh padding
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
coords[:, 2] -= coords[:, 0] # xyxy2xywh
coords[:, 3] -= coords[:, 1]
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (
torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)
).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
iou = inter / union # iou
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(
b1_x1, b2_x1
) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + (
(b2_y1 + b2_y2) - (b1_y1 + b1_y2)
) ** 2 / 4
if DIoU:
return iou - rho2 / c2 # DIoU
elif (
CIoU
): # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(
torch.atan(w2 / h2) - torch.atan(w1 / h1), 2
)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.t())
area2 = box_area(box2.t())
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (
(
torch.min(box1[:, None, 2:], box2[:, 2:])
- torch.max(box1[:, None, :2], box2[:, :2])
)
.clamp(0)
.prod(2)
)
return inter / (
area1[:, None] + area2 - inter
) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (
wh1.prod(2) + wh2.prod(2) - inter
) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(
prediction,
conf_thres=0.1,
iou_thres=0.6,
multi_label=True,
classes=None,
agnostic=False,
):
"""
Performs Non-Maximum Suppression on inference results
Returns detections with shape:
nx6 (x1, y1, x2, y2, conf, cls)
"""
# Box constraints
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
method = "merge"
nc = prediction[0].shape[1] - 5 # number of classes
multi_label &= nc > 1 # multiple labels per box
output = [None] * len(prediction)
for xi, x in enumerate(prediction): # image index, image inference
# Apply conf constraint
x = x[x[:, 4] > conf_thres]
# Apply width-height constraint
x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)]
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero().t()
x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)
else: # best class only
conf, j = x[:, 5:].max(1)
x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)
# Filter by class
if classes:
x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]
# Apply finite constraint
if not torch.isfinite(x).all():
x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# if method == 'fast_batch':
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5] * 0 if agnostic else x[:, 5] # classes
boxes, scores = (
x[:, :4].clone() + c.view(-1, 1) * max_wh,
x[:, 4],
) # boxes (offset by class), scores
if method == "merge": # Merge NMS (boxes merged using weighted mean)
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if n < 1e4: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
# weights = (box_iou(boxes, boxes).tril_() > iou_thres) * scores.view(-1, 1) # box weights
# weights /= weights.sum(0) # normalize
# x[:, :4] = torch.mm(weights.T, x[:, :4])
weights = (box_iou(boxes[i], boxes) > iou_thres) * scores[
None
] # box weights
x[i, :4] = torch.mm(
weights / weights.sum(1, keepdim=True), x[:, :4]
).float() # merged boxes
elif method == "vision":
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
elif method == "fast": # FastNMS from https://github.com/dbolya/yolact
iou = box_iou(boxes, boxes).triu_(diagonal=1) # upper triangular iou matrix
i = iou.max(0)[0] < iou_thres
output[xi] = x[i]
return output
def model_info(model, verbose=False):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(
x.numel() for x in model.parameters() if x.requires_grad
) # number gradients
if verbose:
print(
"%5s %40s %9s %12s %20s %10s %10s"
% ("layer", "name", "gradient", "parameters", "shape", "mu", "sigma")
)
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace("module_list.", "")
print(
"%5g %40s %9s %12g %20s %10.3g %10.3g"
% (
i,
name,
p.requires_grad,
p.numel(),
list(p.shape),
p.mean(),
p.std(),
)
)
try: # FLOPS
from thop import profile
macs, _ = profile(model, inputs=(torch.zeros(1, 3, 480, 640),))
fs = ", %.1f GFLOPS" % (macs / 1e9 * 2)
except:
fs = ""
if verbose:
print(
"Model Summary: %g layers, %g parameters, %g gradients%s"
% (len(list(model.parameters())), n_p, n_g, fs)
)
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = torch.nn.Conv2d(
conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True,
)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
if conv.bias is not None:
b_conv = conv.bias
else:
b_conv = torch.zeros(conv.weight.size(0))
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(
torch.sqrt(bn.running_var + bn.eps)
)
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def scale_img(img, ratio=1.0, same_shape=True): # img(16,3,256,416), r=ratio
# scales img(bs,3,y,x) by ratio
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize
if not same_shape: # pad/crop img
gs = 64 # (pixels) grid size
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def parse_model_cfg(path):
# Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3'
if not path.endswith(".cfg"): # add .cfg suffix if omitted
path += ".cfg"
if not os.path.exists(path) and os.path.exists(
"cfg" + os.sep + path
): # add cfg/ prefix if omitted
path = "cfg" + os.sep + path
with open(path, "r") as f:
lines = f.read().split("\n")
lines = [x for x in lines if x and not x.startswith("#")]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
mdefs = [] # module definitions
for line in lines:
if line.startswith("["): # This marks the start of a new block
mdefs.append({})
mdefs[-1]["type"] = line[1:-1].rstrip()
if mdefs[-1]["type"] == "convolutional":
mdefs[-1][
"batch_normalize"
] = 0 # pre-populate with zeros (may be overwritten later)
else:
key, val = line.split("=")
key = key.rstrip()
if key == "anchors": # return nparray
mdefs[-1][key] = np.array([float(x) for x in val.split(",")]).reshape(
(-1, 2)
) # np anchors
elif (key in ["from", "layers", "mask"]) or (
key == "size" and "," in val
): # return array
mdefs[-1][key] = [int(x) for x in val.split(",")]
else:
val = val.strip()
if val.isnumeric(): # return int or float
mdefs[-1][key] = (
int(val) if (int(val) - float(val)) == 0 else float(val)
)
else:
mdefs[-1][key] = val # return string
# Check all fields are supported
supported = [
"type",
"batch_normalize",
"filters",
"size",
"stride",
"pad",
"activation",
"layers",
"groups",
"from",
"mask",
"anchors",
"classes",
"num",
"jitter",
"ignore_thresh",
"truth_thresh",
"random",
"stride_x",
"stride_y",
"weights_type",
"weights_normalization",
"scale_x_y",
"beta_nms",
"nms_kind",
"iou_loss",
"iou_normalizer",
"cls_normalizer",
"iou_thresh",
]
f = [] # fields
for x in mdefs[1:]:
[f.append(k) for k in x if k not in f]
u = [x for x in f if x not in supported] # unsupported fields
assert not any(u), (
"Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631"
% (u, path)
)
return mdefs
def letterbox(
img,
new_shape=(416, 416),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True,
):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
) # add border
return img, ratio, (dw, dh)
def get_file_location(path):
if not os.path.exists(path) and os.path.exists(
os.path.split(os.path.realpath(__file__))[0] + os.sep + path
): # add $PWD/ prefix if omitted
return os.path.split(os.path.realpath(__file__))[0] + os.sep + path
else:
return
| 34.49467 | 117 | 0.52627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,764 | 0.294474 |
def8727d101b934efb5715bc01f3842eeeee3ee3 | 4,934 | py | Python | ec2stack/__init__.py | sureshanaparti/cloudstack-ec2stack | 8e07435d3d04357995f2a5d337adef62ecbfdd8d | [
"Apache-2.0"
]
| 13 | 2015-05-06T13:38:13.000Z | 2021-11-09T21:39:01.000Z | ec2stack/__init__.py | sureshanaparti/cloudstack-ec2stack | 8e07435d3d04357995f2a5d337adef62ecbfdd8d | [
"Apache-2.0"
]
| 3 | 2015-08-21T17:31:20.000Z | 2021-07-07T08:39:11.000Z | ec2stack/__init__.py | sureshanaparti/cloudstack-ec2stack | 8e07435d3d04357995f2a5d337adef62ecbfdd8d | [
"Apache-2.0"
]
| 17 | 2015-07-24T06:00:59.000Z | 2021-11-09T21:38:52.000Z | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module creates the flask application.
"""
import os
import sys
import argparse
from alembic import command
from alembic.config import Config as AlembicConfig
from flask import Flask
from ConfigParser import SafeConfigParser
from ec2stack.controllers import *
from ec2stack.core import DB
from ec2stack.models import User
def create_app(settings=None):
"""
Creates a flask application.
@param settings: Settings override object.
@return: The flask application.
"""
app = Flask(__name__)
if settings:
app.config.from_object(settings)
else:
args = _generate_args()
profile = args.pop('profile')
app.config['DEBUG'] = args.pop('debug')
config_file = _load_config_file()
database_uri = _load_database()
_config_from_config_profile(config_file, profile, app)
app.config['SQLALCHEMY_DATABASE_URI'] = database_uri
DB.init_app(app)
default_controller = __import__(
'ec2stack.controllers.' + 'default', None, None, 'DEFAULT'
)
default_controller = getattr(default_controller, 'DEFAULT')
app.register_blueprint(default_controller)
return app
def _generate_args():
"""
Generate command line arguments for ec2stack-configure.
@return: args.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--profile',
required=False,
help='The profile to run ec2stack with, default is initial',
default='initial'
)
parser.add_argument(
'-d',
'--debug',
required=False,
help='Turn debug on for application',
default=False
)
args = parser.parse_args()
return vars(args)
def _load_config_file():
"""
Checks that the user's configuration file exists and returns its path.
@return: The path to the user's configuration file.
"""
config_file = os.path.join(
os.path.expanduser('~'),
'.ec2stack/ec2stack.conf'
)
if not os.path.exists(config_file):
sys.exit('No configuration found, please run ec2stack-configure')
return config_file
def _config_from_config_profile(config_file, profile, app):
"""
Configures ec2stack app based on configuration profile.
@param config_file: current config file configuration.
@param profile: the profile to set the attribute in.
"""
config = SafeConfigParser()
config.read(config_file)
if not config.has_section(profile):
sys.exit('No profile matching ' + profile +
' found in configuration, please run ec2stack-configure -p ' + profile)
for attribute in config.options(profile):
app.config[attribute.upper()] = config.get(profile, attribute)
instance_type_map = {}
instance_section = profile + "instancemap"
if config.has_section(instance_section):
for attribute in config.options(instance_section):
instance_type_map[attribute] = config.get(
instance_section, attribute)
app.config['INSTANCE_TYPE_MAP'] = instance_type_map
resource_type_map = {}
resource_section = profile + "resourcemap"
if config.has_section(resource_section):
for attribute in config.options(resource_section):
resource_type_map[attribute] = config.get(
resource_section, attribute)
app.config['RESOURCE_TYPE_MAP '] = resource_type_map
def _load_database():
"""
Checks that the user's database exists and returns its uri.
@return: The uri to the user's database.
"""
database_file = os.path.join(
os.path.expanduser('~'),
'.ec2stack/ec2stack.sqlite'
)
if not os.path.exists(database_file):
directory = os.path.join(os.path.dirname(__file__), '../migrations')
config = AlembicConfig(os.path.join(
directory,
'alembic.ini'
))
config.set_main_option('script_location', directory)
command.upgrade(config, 'head', sql=False, tag=None)
return 'sqlite:///' + database_file
| 28.356322 | 88 | 0.676125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,086 | 0.422781 |
def8fdb574ef8e8309feae4ee72edbfe9a0a3beb | 926 | py | Python | schematics_proto3/unset.py | mlga/schematics-proto3 | 588fe5bc212e203688166638a1c52dfeda931403 | [
"MIT"
]
| null | null | null | schematics_proto3/unset.py | mlga/schematics-proto3 | 588fe5bc212e203688166638a1c52dfeda931403 | [
"MIT"
]
| 11 | 2020-04-09T13:33:54.000Z | 2020-08-19T17:38:26.000Z | schematics_proto3/unset.py | mlga/schematics-proto3 | 588fe5bc212e203688166638a1c52dfeda931403 | [
"MIT"
]
| null | null | null | # -*- coding:utf-8 -*-
"""
Test module docstring.
"""
import threading
from typing import Type
class UnsetType:
"""
Test docstring.
"""
__slots__ = []
_instance: 'UnsetType' = None
_lock: threading.Lock = threading.Lock()
def __str__(self):
return 'Unset'
def __repr__(self):
return 'Unset'
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __len__(self):
return 0
def __bool__(self):
return False
def __hash__(self):
return hash(self._lock)
__nonzero__ = __bool__
def __new__(cls: Type['UnsetType']):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
Unset = UnsetType() # pylint: disable=invalid-name
| 17.807692 | 56 | 0.580994 | 774 | 0.835853 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.160907 |
def92f706f99835e10af1d0e6310107f2432dbe5 | 193 | py | Python | aws_lambda_powertools/event_handler/__init__.py | nayaverdier/aws-lambda-powertools-python | cd15ee97746356a84c6f196dbd2d26a34ea50411 | [
"Apache-2.0",
"MIT-0"
]
| 1,208 | 2020-05-20T19:06:29.000Z | 2022-03-30T14:17:47.000Z | aws_lambda_powertools/event_handler/__init__.py | nayaverdier/aws-lambda-powertools-python | cd15ee97746356a84c6f196dbd2d26a34ea50411 | [
"Apache-2.0",
"MIT-0"
]
| 859 | 2020-05-22T09:59:54.000Z | 2022-03-31T08:31:30.000Z | aws_lambda_powertools/event_handler/__init__.py | nayaverdier/aws-lambda-powertools-python | cd15ee97746356a84c6f196dbd2d26a34ea50411 | [
"Apache-2.0",
"MIT-0"
]
| 163 | 2020-05-18T21:08:25.000Z | 2022-03-28T12:03:37.000Z | """
Event handler decorators for common Lambda events
"""
from .api_gateway import ApiGatewayResolver
from .appsync import AppSyncResolver
__all__ = ["AppSyncResolver", "ApiGatewayResolver"]
| 21.444444 | 51 | 0.797927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.487047 |
def98cf0f4126cdcda2bee2e5c8d96a01bc4937b | 1,351 | py | Python | solutions/5/guillaume/LookAhead.py | larsbratholm/champs_kaggle | fda4f213d02fd5e0138a86c52b4140c9f94fec6e | [
"MIT"
]
| 9 | 2020-08-14T23:11:16.000Z | 2021-08-09T16:23:43.000Z | solutions/5/guillaume/LookAhead.py | larsbratholm/champs_kaggle | fda4f213d02fd5e0138a86c52b4140c9f94fec6e | [
"MIT"
]
| 1 | 2020-11-19T09:29:14.000Z | 2020-11-19T09:29:14.000Z | solutions/5/guillaume/LookAhead.py | larsbratholm/champs_kaggle | fda4f213d02fd5e0138a86c52b4140c9f94fec6e | [
"MIT"
]
| 2 | 2020-09-09T02:53:57.000Z | 2020-12-06T08:20:52.000Z | import itertools as it
from torch.optim import Optimizer
class LookAhead(Optimizer):
def __init__(self, base_optimizer,alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
for group in self.param_groups:
group["step_counter"] = 0
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha,p.data - q.data)
p.data.copy_(q.data)
return loss
| 37.527778 | 75 | 0.559585 | 1,293 | 0.957069 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.092524 |
defcc91baa71d0c94f476ef6cc3d35765b3516a0 | 2,263 | py | Python | addexp.py | Shajm44n/Expense | db3355d4d81d5dd57ceea81b1170724b8893e523 | [
"MIT"
]
| null | null | null | addexp.py | Shajm44n/Expense | db3355d4d81d5dd57ceea81b1170724b8893e523 | [
"MIT"
]
| null | null | null | addexp.py | Shajm44n/Expense | db3355d4d81d5dd57ceea81b1170724b8893e523 | [
"MIT"
]
| null | null | null | from tkinter import *
# import expdate
import mysql.connector
db_connect=mysql.connector.connect(host="localhost",user="root",password="maan",database="expense")
db_cursor=db_connect.cursor()
def add_expense(day,month,year):
print("add exp")
window=Tk()
window.title("Expense list")
l_message=Label(window)
l_msg=Label(window)
print(day)
print(month)
print(year)
l_trans=Label(window,text="Transport :")
e_trans=Entry(window)
l_food=Label(window,text="Food :")
e_food=Entry(window)
l_home=Label(window,text="Home :")
e_home=Entry(window)
l_ent=Label(window,text="Entertainment :")
e_ent=Entry(window)
l_utl=Label(window,text="Utilities :")
e_utl=Entry(window)
l_health=Label(window,text="Health :")
e_health=Entry(window)
l_oth=Label(window,text="Others :")
e_oth=Entry(window)
def enter_data():
trans=int(e_trans.get())
food=int(e_food.get())
home=int(e_home.get())
ent=int(e_ent.get())
utl=int(e_utl.get())
health=int(e_health.get())
other=int(e_oth.get())
total=trans+food+home+ent+utl+health+other
print(total)
db_cursor.execute(f"insert into daily(day,month,year,Transport,Food,Home,Entertainment,Utilities,Health,Others,Total)values('{day}','{month}','{year}','{trans}','{food}','{home}','{ent}','{utl}','{health}','{other}','{total}')")
db_connect.commit()
db_connect.close()
l_msg.config(text=" Data has been Updated!")
add_exp= Button(window, text= "add expense", command= enter_data)
add_exp.pack(pady=30)
l_trans.place(x =20,y=50)
e_trans.place(x =120,y=50)
l_food.place(x =20,y=70)
e_food.place(x =120,y=70)
l_home.place(x =20,y=90)
e_home.place(x =120,y=90)
l_ent.place(x =20,y=110)
e_ent.place(x =120,y=110)
l_utl.place(x =20,y=130)
e_utl.place(x =120,y=130)
l_health.place(x =20,y=150)
e_health.place(x =120,y=150)
l_oth.place(x =20,y=170)
e_oth.place(x =120,y=170)
l_message.place(x=50,y=100)
l_msg.place(x=120,y=170)
exit_button = Button(window, text="Exit", command=window.destroy)
exit_button.pack(pady=200)
window.geometry("800x800")
window.mainloop()
| 32.797101 | 236 | 0.643836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.182059 |
defd13835bf657af58b23494bf16c9abcbbae2e8 | 1,217 | py | Python | BlackJack/UserInterface/BlackJackUI.py | Kasyx709/BlackJack | a99cd9327e466ed51dadbf4b5407c2370f998b82 | [
"MIT"
]
| null | null | null | BlackJack/UserInterface/BlackJackUI.py | Kasyx709/BlackJack | a99cd9327e466ed51dadbf4b5407c2370f998b82 | [
"MIT"
]
| null | null | null | BlackJack/UserInterface/BlackJackUI.py | Kasyx709/BlackJack | a99cd9327e466ed51dadbf4b5407c2370f998b82 | [
"MIT"
]
| null | null | null | from __future__ import division
from BlackJack.UserInterface import tk
from BlackJack.UserInterface import tkFont
from BlackJack.UserInterface import BlackJackWindows
from BlackJack.UserInterface import SelectGameType
from BlackJack.UserInterface import Helpers
class BlackJackUI(object):
def __init__(self):
super().__init__()
self.root = tk.Tk()
self.root.font = tkFont.Font(family='Garamond', size=16, weight='bold')
self.root.option_add("*Font", self.root.font)
self.root.wm_minsize(1312, 558)
self.root.wm_resizable(width=1, height=1)
self.root.wm_title('BlackJack - 2019')
# self.root.overrideredirect(True)
self.root.geometry("+250+250")
self.root.wm_attributes("-topmost", True)
self.root.configure(background='Green')
self.root.withdraw()
SelectGameType(parent=BlackJackWindows(self.root))
self.root.bind("<ButtonPress-1>", lambda event: Helpers.start_xy(event, self.root))
self.root.bind("<ButtonRelease-1>", lambda event: Helpers.stop_xy(event, self.root))
self.root.bind("<B1-Motion>", lambda event: Helpers.configure_xy(event, self.root))
self.root.mainloop()
| 40.566667 | 92 | 0.696795 | 951 | 0.78143 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.124076 |
defd4d718f41568b76388eb0230161d0d48bb24e | 263 | py | Python | winecasino/core/entities/__init__.py | harlov/winecasino | ae29b2c8f75bfd05ad141fd3b596f1db7c103690 | [
"MIT"
]
| null | null | null | winecasino/core/entities/__init__.py | harlov/winecasino | ae29b2c8f75bfd05ad141fd3b596f1db7c103690 | [
"MIT"
]
| null | null | null | winecasino/core/entities/__init__.py | harlov/winecasino | ae29b2c8f75bfd05ad141fd3b596f1db7c103690 | [
"MIT"
]
| null | null | null | from .country import Country
from .game import Game
from .game import Bid
from .user import User
from .grape import Grape
from .wine import Wine
from .base import new_id
__all__ = [
"new_id",
"Country",
"Game",
"Grape",
"User",
"Wine",
]
| 15.470588 | 28 | 0.653992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.159696 |
defdbd583ad5f6b3a08353cba72476c7dbaff00c | 295 | py | Python | download-deveres/para-execicios-curso-em-video/exe019.py | Hugo-Oliveira-RDO11/meus-deveres | b5e41015e2cb95946262678e82197e5f47d56271 | [
"MIT"
]
| null | null | null | download-deveres/para-execicios-curso-em-video/exe019.py | Hugo-Oliveira-RDO11/meus-deveres | b5e41015e2cb95946262678e82197e5f47d56271 | [
"MIT"
]
| null | null | null | download-deveres/para-execicios-curso-em-video/exe019.py | Hugo-Oliveira-RDO11/meus-deveres | b5e41015e2cb95946262678e82197e5f47d56271 | [
"MIT"
]
| null | null | null | import random
p = str(input('digite o nome do primeiro aluno :'))
s = str(input('o nome do segundo aluno :'))
t = str(input('o nome do terceiro aluno :'))
q = str(input('o nome do quato aluno :'))
lista = [p, s, t, q]
aluno = random.choice(lista)
print('o aluno sorteado foi {}'.format(aluno))
| 29.5 | 51 | 0.657627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.474576 |
defde4b16a7fe68a1c0b7ba26a303a5bb6a695bc | 12,389 | py | Python | cma-evolve.py | simondlevy/CMA-Gym | ce0056873d42eae2b6769fe22fcf872459694f30 | [
"Apache-2.0"
]
| null | null | null | cma-evolve.py | simondlevy/CMA-Gym | ce0056873d42eae2b6769fe22fcf872459694f30 | [
"Apache-2.0"
]
| null | null | null | cma-evolve.py | simondlevy/CMA-Gym | ce0056873d42eae2b6769fe22fcf872459694f30 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import gym
import torch
import numpy as np
import multiprocessing as mp
import os
import pickle
import sys
import time
import logging
import cma
import argparse
from torchmodel import StandardFCNet
def _makedir(name):
if not os.path.exists(name):
os.makedirs(name)
def get_logger():
_makedir('log')
_makedir('data')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
logger = logging.getLogger('MAIN')
logger.setLevel(logging.DEBUG)
return logger
class Task:
def __init__(self, envname, hidden_size, max_steps, target, pop_size, reps, test_reps, weight_decay, noise_std, sigma):
self.task = envname
self.env_fn = lambda: gym.make(self.task)
self.repetitions = reps
self.test_repetitions = test_reps
env = self.env_fn()
self.action_dim = env.action_space.shape[0]
self.state_dim = env.observation_space.shape[0]
self.reward_to_fitness = lambda r: r
self.max_steps = max_steps
self.pop_size = pop_size
self.num_workers = mp.cpu_count()
self.action_clip = lambda a: np.clip(a, -1, 1)
self.target = target
self.hidden_size = hidden_size
self.model_fn = lambda: StandardFCNet(self.state_dim, self.action_dim, self.hidden_size)
model = self.model_fn()
self.initial_weight = model.get_weight()
self.weight_decay = weight_decay
self.action_noise_std = noise_std
self.sigma = sigma
self.tag = 'CMA-%d' % (hidden_size)
class BaseModel:
def get_weight(self):
weight = []
for param in self.parameters():
weight.append(param.data.numpy().flatten())
weight = np.concatenate(weight, 0)
return weight
def set_weight(self, solution):
offset = 0
for param in self.parameters():
param_shape = param.data.numpy().shape
param_size = np.prod(param_shape)
src_param = solution[offset: offset + param_size]
if len(param_shape) > 1:
src_param = src_param.reshape(param_shape)
param.data = torch.FloatTensor(src_param)
offset += param_size
assert offset == len(solution)
class Normalizer:
def __init__(self, filter_mean=True):
self.m = 0
self.v = 0
self.n = 0.
self.filter_mean = filter_mean
def state_dict(self):
return {'m': self.m,
'v': self.v,
'n': self.n}
def load_state_dict(self, saved):
self.m = saved['m']
self.v = saved['v']
self.n = saved['n']
def __call__(self, o):
self.m = self.m * (self.n / (self.n + 1)) + o * 1 / (1 + self.n)
self.v = self.v * (self.n / (self.n + 1)) + (o - self.m) ** 2 * 1 / (1 + self.n)
self.std = (self.v + 1e-6) ** .5 # std
self.n += 1
if self.filter_mean:
o_ = (o - self.m) / self.std
else:
o_ = o / self.std
return o_
class StaticNormalizer:
def __init__(self, o_size):
self.offline_stats = SharedStats(o_size)
self.online_stats = SharedStats(o_size)
def __call__(self, o_):
o = torch.FloatTensor([o_] if np.isscalar(o_) else o_)
self.online_stats.feed(o)
if self.offline_stats.n[0] == 0:
return o_
std = (self.offline_stats.v + 1e-6) ** .5
o = (o - self.offline_stats.m) / std
o = o.numpy()
if np.isscalar(o_):
o = np.asscalar(o)
else:
o = o.reshape(o_.shape)
return o
class SharedStats:
def __init__(self, o_size):
self.m = torch.zeros(o_size)
self.v = torch.zeros(o_size)
self.n = torch.zeros(1)
self.m.share_memory_()
self.v.share_memory_()
self.n.share_memory_()
def feed(self, o):
n = self.n[0]
new_m = self.m * (n / (n + 1)) + o / (n + 1)
self.v.copy_(self.v * (n / (n + 1)) + (o - self.m) * (o - new_m) / (n + 1))
self.m.copy_(new_m)
self.n.add_(1)
def zero(self):
self.m.zero_()
self.v.zero_()
self.n.zero_()
def load(self, stats):
self.m.copy_(stats.m)
self.v.copy_(stats.v)
self.n.copy_(stats.n)
def merge(self, B):
A = self
n_A = self.n[0]
n_B = B.n[0]
n = n_A + n_B
delta = B.m - A.m
m = A.m + delta * n_B / n
v = A.v * n_A + B.v * n_B + delta * delta * n_A * n_B / n
v /= n
self.m.copy_(m)
self.v.copy_(v)
self.n.add_(B.n)
def state_dict(self):
return {'m': self.m.numpy(),
'v': self.v.numpy(),
'n': self.n.numpy()}
def load_state_dict(self, saved):
self.m = torch.FloatTensor(saved['m'])
self.v = torch.FloatTensor(saved['v'])
self.n = torch.FloatTensor(saved['n'])
def fitness_shift(x):
x = np.asarray(x).flatten()
ranks = np.empty(len(x))
ranks[x.argsort()] = np.arange(len(x))
ranks /= (len(x) - 1)
ranks -= .5
return ranks
class Worker(mp.Process):
def __init__(self, id, task_q, result_q, stop):
mp.Process.__init__(self)
self.id = id
self.task_q = task_q
self.result_q = result_q
self.stop = stop
def run(self):
np.random.seed()
while not self.stop.value:
if self.task_q.empty():
continue
id, solution = self.task_q.get()
fitness, steps = self.evalfun(solution)
self.result_q.put([id, fitness, steps])
class Evaluator:
def __init__(self, config, state_normalizer):
self.model = config.model_fn()
self.repetitions = config.repetitions
self.env = config.env_fn()
self.state_normalizer = state_normalizer
self.config = config
def eval(self, solution):
self.model.set_weight(solution)
rewards = []
steps = []
for i in range(self.repetitions):
reward, step = self.single_run()
rewards.append(reward)
steps.append(step)
return -np.mean(rewards), np.sum(steps)
def single_run(self):
state = self.env.reset()
total_reward = 0
steps = 0
while True:
state = self.state_normalizer(state)
action = self.model(np.stack([state])).data.numpy().flatten()
action += np.random.randn(len(action)) * self.config.action_noise_std
action = self.config.action_clip(action)
state, reward, done, info = self.env.step(action)
steps += 1
total_reward += reward
if done:
return total_reward, steps
class CMAWorker(Worker):
def __init__(self, id, state_normalizer, task_q, result_q, stop, config):
Worker.__init__(self, id, task_q, result_q, stop)
self.evalfun = Evaluator(config, state_normalizer).eval
def train(config, logger):
task_queue = mp.SimpleQueue()
result_queue = mp.SimpleQueue()
stop = mp.Value('i', False)
stats = SharedStats(config.state_dim)
normalizers = [StaticNormalizer(config.state_dim) for _ in range(config.num_workers)]
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
workers = [CMAWorker(id, normalizers[id], task_queue, result_queue, stop, config) for id in range(config.num_workers)]
for w in workers: w.start()
opt = cma.CMAOptions()
opt['tolfun'] = -config.target
opt['popsize'] = config.pop_size
opt['verb_disp'] = 0
opt['verb_log'] = 0
opt['maxiter'] = sys.maxsize
es = cma.CMAEvolutionStrategy(config.initial_weight, config.sigma, opt)
total_steps = 0
initial_time = time.time()
training_rewards = []
training_steps = []
training_timestamps = []
test_mean, test_std = test(config, config.initial_weight, stats)
logger.info('total steps %8d, %+4.0f(%+4.0f)' % (total_steps, test_mean, test_std))
training_rewards.append(test_mean)
training_steps.append(0)
training_timestamps.append(0)
while True:
solutions = es.ask()
for id, solution in enumerate(solutions):
task_queue.put((id, solution))
while not task_queue.empty():
continue
result = []
while len(result) < len(solutions):
if result_queue.empty():
continue
result.append(result_queue.get())
result = sorted(result, key=lambda x: x[0])
total_steps += np.sum([r[2] for r in result])
cost = [r[1] for r in result]
best_solution = solutions[np.argmin(cost)]
elapsed_time = time.time() - initial_time
test_mean, test_std = test(config, best_solution, stats)
best = -np.min(cost)
logger.info('total steps = %8d test = %+4.0f (%4.0f) best = %+4.0f (%+4.0f) elapased time = %4.0f sec' %
(total_steps, test_mean, test_std, best, config.target, elapsed_time))
training_rewards.append(test_mean)
training_steps.append(total_steps)
training_timestamps.append(elapsed_time)
#with open('data/%s-best_solution_%s.bin' % (TAG, config.task), 'wb') as f: # XXX gets stuck
# pickle.dump(solutions[np.argmin(result)], f)
if best > config.target:
logger.info('Best score of %f exceeds target %f' % (best, config.target))
break
if config.max_steps and total_steps > config.max_steps:
logger.info('Maximum number of steps exceeded')
stop.value = True
break
cost = fitness_shift(cost)
es.tell(solutions, cost)
# es.disp()
for normalizer in normalizers:
stats.merge(normalizer.online_stats)
normalizer.online_stats.zero()
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
stop.value = True
for w in workers: w.join()
return [training_rewards, training_steps, training_timestamps]
def test(config, solution, stats):
normalizer = StaticNormalizer(config.state_dim)
normalizer.offline_stats.load_state_dict(stats.state_dict())
evaluator = Evaluator(config, normalizer)
evaluator.model.set_weight(solution)
rewards = []
for i in range(config.test_repetitions):
reward, _ = evaluator.single_run()
rewards.append(reward)
return np.mean(rewards), np.std(rewards) / config.repetitions
def multi_runs(task, logger, runs=1):
if not os.path.exists('log'):
os.makedirs('log')
fh = logging.FileHandler('log/%s-%s.txt' % (task.tag, task.task))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
stats = []
for run in range(runs):
logger.info('Run %3d/%3d' % (run+1, runs))
stats.append(train(task, logger))
with open('data/%s-stats-%s.bin' % (task.tag, task.task), 'wb') as f:
pickle.dump(stats, f)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', type=str, default='Pendulum-v0')
parser.add_argument('--nhid', help='# of hidden units', type=int, default=64)
parser.add_argument('--target', help='reward goal', type=float, default=-np.inf)
parser.add_argument('--max-steps', help='maximum number of steps', type=int, default=int(2e7))
parser.add_argument('--pop-size', help='population size', type=int, default=64)
parser.add_argument('--reps', help='repetitions', type=int, default=10)
parser.add_argument('--test-reps', help='test repetitions', type=int, default=10)
parser.add_argument('--weight-decay', help='weight decay', type=float, default=0.005)
parser.add_argument('--noise-std', help='noise standard deviation', type=float, default=0)
parser.add_argument('--sigma', help='sigma', type=float, default=1)
args = parser.parse_args()
task = Task(args.env, args.nhid, args.max_steps, args.target, args.pop_size, args.reps, args.test_reps,
args.weight_decay, args.noise_std, args.sigma)
logger = get_logger()
p = mp.Process(target=multi_runs, args=(task,logger))
p.start()
p.join()
if __name__ == '__main__':
main()
| 33.574526 | 123 | 0.600291 | 6,318 | 0.509969 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.073372 |
defec38e9abb5a9b6de6de6949355eb8f83f8c74 | 342 | py | Python | src/arcclimate/temperature.py | youworks/arcclimate | 62a9eece267e42ccddfc5145e8ee50776470f7bf | [
"MIT"
]
| null | null | null | src/arcclimate/temperature.py | youworks/arcclimate | 62a9eece267e42ccddfc5145e8ee50776470f7bf | [
"MIT"
]
| null | null | null | src/arcclimate/temperature.py | youworks/arcclimate | 62a9eece267e42ccddfc5145e8ee50776470f7bf | [
"MIT"
]
| 1 | 2022-03-08T01:04:47.000Z | 2022-03-08T01:04:47.000Z | """
気温の関するモジュール
"""
import numpy as np
def get_corrected_TMP(TMP: np.ndarray, ele_gap: float) -> np.ndarray:
"""気温の標高補正
Args:
TMP (np.ndarray): 気温 [℃]
ele_gap (np.ndarray): 標高差 [m]
Returns:
np.ndarray: 標高補正後の気温 [C]
Notes:
気温減率の平均値を0.0065℃/mとする。
"""
return TMP + ele_gap * -0.0065
| 15.545455 | 69 | 0.564327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.693548 |
defeff29d76d14fa0aceaad7cd54a55164f7136c | 2,386 | py | Python | rastervision/data/label_store/default.py | carderne/raster-vision | 915fbcd3263d8f2193e65c2cd0eb53e050a47a01 | [
"Apache-2.0"
]
| 4 | 2019-03-11T12:38:15.000Z | 2021-04-06T14:57:52.000Z | rastervision/data/label_store/default.py | carderne/raster-vision | 915fbcd3263d8f2193e65c2cd0eb53e050a47a01 | [
"Apache-2.0"
]
| null | null | null | rastervision/data/label_store/default.py | carderne/raster-vision | 915fbcd3263d8f2193e65c2cd0eb53e050a47a01 | [
"Apache-2.0"
]
| 1 | 2019-10-29T09:22:09.000Z | 2019-10-29T09:22:09.000Z | from abc import (ABC, abstractmethod)
import os
import rastervision as rv
class LabelStoreDefaultProvider(ABC):
@staticmethod
@abstractmethod
def is_default_for(task_type):
"""Returns True if this label store is the default for this tasks_type"""
pass
@staticmethod
@abstractmethod
def handles(task_type, s):
"""Returns True of this provider is a default for this task_type and string"""
pass
@abstractmethod
def construct(s=None):
"""Construts a default LabelStore based on the string.
"""
pass
class ObjectDetectionGeoJSONStoreDefaultProvider(LabelStoreDefaultProvider):
@staticmethod
def is_default_for(task_type):
return task_type == rv.OBJECT_DETECTION
@staticmethod
def handles(task_type, uri):
if task_type == rv.OBJECT_DETECTION:
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.json', '.geojson']
return False
@staticmethod
def construct(uri=None):
b = rv.LabelStoreConfig.builder(rv.OBJECT_DETECTION_GEOJSON)
if uri:
b = b.with_uri(uri)
return b.build()
class ChipClassificationGeoJSONStoreDefaultProvider(LabelStoreDefaultProvider):
@staticmethod
def is_default_for(task_type):
return task_type == rv.CHIP_CLASSIFICATION
@staticmethod
def handles(task_type, uri):
if task_type == rv.CHIP_CLASSIFICATION:
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.json', '.geojson']
return False
@staticmethod
def construct(uri=None):
b = rv.LabelStoreConfig.builder(rv.CHIP_CLASSIFICATION_GEOJSON)
if uri:
b = b.with_uri(uri)
return b.build()
class SemanticSegmentationRasterStoreDefaultProvider(
LabelStoreDefaultProvider):
@staticmethod
def is_default_for(task_type):
return task_type == rv.SEMANTIC_SEGMENTATION
@staticmethod
def handles(task_type, uri):
if task_type == rv.SEMANTIC_SEGMENTATION:
ext = os.path.splitext(uri)[1]
return ext.lower() in ['.tiff', '.tif']
return False
@staticmethod
def construct(uri=None):
b = rv.LabelStoreConfig.builder(rv.SEMANTIC_SEGMENTATION_RASTER)
if uri:
b = b.with_uri(uri)
return b.build()
| 27.425287 | 86 | 0.65088 | 2,299 | 0.963537 | 0 | 0 | 1,950 | 0.817267 | 0 | 0 | 264 | 0.110645 |
720247461650041909a7ce79fd85da841234a38b | 6,425 | py | Python | app.py | aniketjana03/TimeSheet | 519b3bdad79dedb7210747906bf4b8e24e64691a | [
"Apache-2.0"
]
| null | null | null | app.py | aniketjana03/TimeSheet | 519b3bdad79dedb7210747906bf4b8e24e64691a | [
"Apache-2.0"
]
| null | null | null | app.py | aniketjana03/TimeSheet | 519b3bdad79dedb7210747906bf4b8e24e64691a | [
"Apache-2.0"
]
| null | null | null | import sys
import os
from flask import Flask, flash, redirect, render_template, request, url_for, session
from flaskext.mysql import MySQL
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from flask_session import Session
from database import Database
from makedb import MakeDB
from helpers import generate_weekID
import pymysql
from boto.s3.connection import S3Connection
# init flask app
app = Flask(__name__)
app.config['TESTING'] = False
# session secret key
app.secret_key = os.environ.get('SECRETKEYFLASK')
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Password Hashing
bcrypt = Bcrypt(app)
# init MySQL database
mysql = MySQL()
mysql.init_app(app)
#Make database if not EXISTS
MakeDB()
@app.route("/", methods=["GET", "POST"])
def index():
#Call DB class to init database
db=Database()
if not session.get("user_id"):
return redirect("/login")
current_user = session["user_id"]
if request.method == "POST":
weekDates={}
dayStatus={}
days=["statusSaturday", "statusSunday", "statusMonday", "statusTuesday", "statusWednesday", "statusThursday", "statusFriday"]
for i in range(0,7):
weekDates[i]=request.form.get(f'td{i+1}')
dayStatus[i]=request.form.get(days[i])
if not weekDates[i]:
flash("Please select date from datepicker!")
return redirect(url_for("index"))
if not dayStatus[i]:
flash("Please select day status for all dates!")
return redirect(url_for("index"))
# DEBUG
print(weekDates, file=sys.stderr)
print(dayStatus, file=sys.stderr)
weekID = generate_weekID(weekDates)
try:
db.timesheet_target(current_user, dayStatus, weekID)
finally:
db.close_cursor()
flash(f'Timesheet successfully submitted for the week starting on {weekDates[0]}!')
return redirect(url_for("index"))
else:
return render_template("timesheet.html", user = current_user)
@app.route("/login", methods=["GET", "POST"])
def login():
""" User logs in here """
error = None
# Forget past user ids
session.clear()
#Call DB class to init database
db=Database()
# If user reached route via post method
if request.method == "POST":
email = request.form.get("email")
email = email.strip()
password = request.form.get("password")
# print(username, password)
if not email:
# Return error message TODO
error = "Please enter Email ID!"
# return render_template('login.html', error=error)
flash("Please enter Email ID!")
return redirect(url_for("login"))
elif not password:
# Return error message TODO
error = "Please enter password!"
# return render_template('login.html', error=error)
flash("Please enter password!")
return redirect(url_for("/"))
# Check login credentials from database
try:
rows = db.check_credentials_from_email(email)
#DEBUG
print(rows, file=sys.stderr)
if len(rows)!=1 or not (bcrypt.check_password_hash(rows[0]["user_password"],password) or not (rows[0]["email_id"]==email)):
error = "Invalid credentials"
flash("Invalid credentials")
# return redirect(url_for("login"))
return render_template("login.html", error=error)
# Remember which user has logged in
session["user_id"] = rows[0]["EmployeeID"]
finally:
db.close_cursor()
# redirect to main page
flash('You were successfully logged in')
return redirect("/")
# User reached route via get method
else:
return render_template("login.html")
@app.route("/logout")
def logout():
# session.pop('username', None)
session.clear()
# redirect to main page
return redirect("/")
@app.route("/register", methods=['GET', 'POST'])
def register():
error = None
db=Database()
if request.method == "POST":
email = request.form.get("email")
email=email.strip()
password = request.form.get("password")
confirm = request.form.get("confirm")
if not request.form.get("first"):
# Return error message TODO
error = "First Name field missing"
return error
elif not request.form.get("last"):
# Return error message TODO
error = "Last Name field missing"
return error
elif not email:
# Return error message TODO
error = "Email field missing"
return error
elif not password:
# Return error message TODO
error = "Password field missing"
return error
elif not confirm:
# Return error message TODO
error = "Confirm password field missing"
return error
elif not request.form.get("dob"):
# Return error message TODO
error = "D.O.B. field missing"
return error
elif password != confirm:
# Return error message # TODO
error = "Passwords do not match"
flash(error)
return redirect(url_for("register"))
try:
# Check if user is in our database
rows = db.check_exist(email)
if len(rows)!=0:
error = "User already exists"
flash(error)
return redirect(url_for("register"))
else:
# insert user details in database
pw_hash = bcrypt.generate_password_hash(password)
db.insert_user(email, pw_hash)
emp_id = db.return_emp_id(email)
db.insert_employee_details(emp_id, request.form.get("first").strip(), request.form.get("last").strip(), request.form.get("dob"))
finally:
db.close_cursor()
flash("You have successfully registered!")
return redirect(url_for("login"))
else:
return render_template("register.html")
if __name__ == "__main__":
app.run()
| 32.125 | 144 | 0.598599 | 0 | 0 | 0 | 0 | 5,525 | 0.859922 | 0 | 0 | 2,088 | 0.324981 |
7202ced44b536e7785d48d42a3fe09355e98fc12 | 448 | py | Python | guestbook/models.py | Bespolezniy/geek-world | 8fbaf451b4e87e48e73eb289035ec0ea68ea0e68 | [
"MIT"
]
| null | null | null | guestbook/models.py | Bespolezniy/geek-world | 8fbaf451b4e87e48e73eb289035ec0ea68ea0e68 | [
"MIT"
]
| null | null | null | guestbook/models.py | Bespolezniy/geek-world | 8fbaf451b4e87e48e73eb289035ec0ea68ea0e68 | [
"MIT"
]
| null | null | null | from django.db import models
# Create your models here.
class GuestBook(models.Model):
user = models.CharField(max_length=15, verbose_name="User")
date = models.DateTimeField(db_index=True, auto_now_add=True, verbose_name="Published")
content = models.TextField(verbose_name="Content")
class Meta:
ordering = ["-date"]
verbose_name = "Guest book entry"
verbose_name_plural = "Guest book entries" | 37.333333 | 92 | 0.694196 | 386 | 0.861607 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.21875 |
72043f3633eddba64964dbbdb6f17d84cf1d6267 | 34,859 | py | Python | PA1/PA1_Q2/P21CS007_VGG16.py | aryachiranjeev/Dependable-AI | 750570572c1baaa2590a89c0982e2f71b15b48b9 | [
"MIT"
]
| null | null | null | PA1/PA1_Q2/P21CS007_VGG16.py | aryachiranjeev/Dependable-AI | 750570572c1baaa2590a89c0982e2f71b15b48b9 | [
"MIT"
]
| null | null | null | PA1/PA1_Q2/P21CS007_VGG16.py | aryachiranjeev/Dependable-AI | 750570572c1baaa2590a89c0982e2f71b15b48b9 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense,Flatten,GlobalAveragePooling2D,Input,Lambda
from tensorflow.keras.models import Model,load_model
import tensorflow.keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import preprocess_input
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score,confusion_matrix
from skimage.color import rgb2gray
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# In[110]:
def brute_vgg16():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
brute_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.1
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
brute_model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd',metrics=['accuracy']) #tf.keras.optimizers.Adam(learning_rate=0.0001)
history = brute_model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=epochs, batch_size=16)
brute_model.save("vgg16_cifar10")
y_pred_train = brute_model.predict(x_train)
predictions_train = np.argmax(y_pred_train,axis=1)
print("training accuracy:",accuracy_score(np.argmax(y_train,axis=1),predictions_train))
y_pred_test = brute_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test))
# plot loss during training
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
# plot accuracy during training
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show()
return brute_model
# In[150]:
def test_brute_model_on_gray_scale_test_images(brute_model):
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
def gray_images(x_test):
gray_x_test = []
for i in x_test:
gray_scale = rgb2gray(i)
gray_x_test.append(np.dstack((gray_scale,gray_scale,gray_scale)))
gray_x_test = np.array(gray_x_test)
print(gray_x_test.shape)
return gray_x_test
gray_x_test = gray_images(x_test)
y_pred_test = brute_model.predict(gray_x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
print("gray scale confusion matrix:\n",confusion_matrix(np.argmax(y_test,axis=1),prediction_test))
# In[112]:
def class_wise_accuracy(models):
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
confus_matrix = confusion_matrix(np.argmax(y_test,axis=1),prediction_test)
print("confusion_matrix:\n",confus_matrix)
class_accuracy = []
class_TP = []
for i in range(confus_matrix.shape[0]):
for j in range(confus_matrix.shape[1]):
if i == j:
TP = confus_matrix[i][j]
class_TP.append(TP)
for k in range(confus_matrix.shape[1]):
ca = (class_TP[k] / confus_matrix[:,k].sum())*100
class_accuracy.append(ca)
print("class ",k," accuracy ",labels[k]," :",ca,"%")
class_accuracy = np.array(class_accuracy)
return class_accuracy
# In[113]:
def bias_metrics(class_accuracy,models):
dob = np.std(class_accuracy)
print("Degree of Bias:",dob)
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
from sklearn.metrics import confusion_matrix
confuse_matrix = confusion_matrix(y_test_without_one_hot, prediction_test)
print("confusion_matrix:\n",confuse_matrix)
FP = confuse_matrix.sum(axis=0) - np.diag(confuse_matrix)
FN = confuse_matrix.sum(axis=1) - np.diag(confuse_matrix)
TP = np.diag(confuse_matrix)
TN = confuse_matrix.sum() - (FP+FN+TP)
FP=FP.astype(float)
TP=TP.astype(float)
FN=FN.astype(float)
TN=TN.astype(float)
FNR = FN/(TP+FN)
FPR = FP/(TN+FN)
print("FPR:",FPR)
print("FNR:",FNR)
AFR = ((FPR.sum()/10)+(FNR.sum()/10))/2
print("AFR:",AFR)
# In[151]:
test_brute_model_on_gray_scale_test_images(brute_model)
# In[115]:
#brute model
print("/nbrute model/n")
brute_model = brute_vgg16()
test_brute_model_on_gray_scale_test_images(brute_model)
class_accuracy_brute_model = class_wise_accuracy(brute_model)
bias_metrics(class_accuracy_brute_model,brute_model)
# In[40]:
def create_results(brute_model):
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
y_pred_test = brute_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
df = pd.DataFrame(np.hstack((y_test_without_one_hot,prediction_test.reshape(len(prediction_test),1))),columns=['y_test','y_test_pred'],index=None)
print(df.head())
df.to_csv("y_test_prediction_test.csv",index=False)
correct_idxes = []
incorrect_idxes = []
for i in range(len(prediction_test)):
if y_test_without_one_hot[i] == prediction_test[i]:
correct_idxes.append(i)
elif y_test_without_one_hot[i] != prediction_test[i]:
incorrect_idxes.append(i)
cv2.imwrite("correct"+str(int(y_test_without_one_hot[correct_idxes[0]][0]))+".jpg",x_test[correct_idxes[0]])
cv2.imwrite("correct"+str(int(y_test_without_one_hot[correct_idxes[1]][0]))+".jpg",x_test[correct_idxes[1]])
cv2.imwrite("incorrect"+str(int(y_test_without_one_hot[incorrect_idxes[0]][0]))+".jpg",x_test[incorrect_idxes[0]])
cv2.imwrite("incorrect"+str(int(y_test_without_one_hot[incorrect_idxes[1]][0]))+".jpg",x_test[incorrect_idxes[1]])
# In[68]:
class GradCAM:
def __init__(self, model, classIdx, layerName=None):
self.model = model
self.classIdx = classIdx
self.layerName = layerName
if self.layerName is None:
self.layerName = self.find_target_layer()
def find_target_layer(self):
for layer in reversed(self.model.layers):
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
def compute_heatmap(self, image, eps=1e-8):
gradModel = Model(inputs=[self.model.inputs],outputs=[self.model.get_layer(self.layerName).output, self.model.output])
with tf.GradientTape() as tape:
inputs = tf.cast(image, tf.float32)
(convOutputs, predictions) = gradModel(inputs)
loss = predictions[:, tf.argmax(predictions[0])]
grads = tape.gradient(loss, convOutputs)
castConvOutputs = tf.cast(convOutputs > 0, "float32")
castGrads = tf.cast(grads > 0, "float32")
guidedGrads = castConvOutputs * castGrads * grads
convOutputs = convOutputs[0]
guidedGrads = guidedGrads[0]
weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
(w, h) = (image.shape[2], image.shape[1])
heatmap = cv2.resize(cam.numpy(), (w, h))
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) + eps
heatmap = numer / denom
heatmap = (heatmap * 255).astype("uint8")
return heatmap
def overlay_heatmap(self, heatmap, image, alpha=0.5,colormap=cv2.COLORMAP_VIRIDIS):
heatmap = cv2.applyColorMap(heatmap, colormap)
output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
return (heatmap, output)
def make_gradCAM(img_path,brute_model,classified,layer_name="block5_conv3"):
image = cv2.imread(img_path)
image = cv2.resize(image, (32, 32))
image = np.expand_dims(image, axis=0)
preds = brute_model.predict(image)
i = np.argmax(preds[0])
icam = GradCAM(brute_model, i,layer_name)
heatmap = icam.compute_heatmap(image)
heatmap = cv2.resize(heatmap, (32, 32))
image = cv2.imread(img_path)
image = cv2.resize(image, (32, 32))
(heatmap, output) = icam.overlay_heatmap(heatmap, image, alpha=0.5)
fig, ax = plt.subplots(1, 3)
ax[0].imshow(heatmap)
ax[1].imshow(image)
ax[2].imshow(output)
plt.savefig("GradCAM_"+ str(classified)+str(img_path[-5])+".jpg")
plt.show()
plt.close()
layer_names = ["block5_conv3","block4_conv2"]
for l in layer_names:
print("layer name:",l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg",brute_model,classified="correct",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg",brute_model,classified="correct",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg",brute_model,classified="incorrect",layer_name=l)
make_gradCAM("/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg",brute_model,classified="incorrect",layer_name =l)
# In[161]:
def grad_cam_pp(model, img,layer_name="block5_conv3", label_name=None,category_id=None):
img_tensor = np.expand_dims(img, axis=0)
conv_layer = model.get_layer(layer_name)
heatmap_model = Model([model.inputs], [conv_layer.output, model.output])
with tf.GradientTape() as gtape1:
with tf.GradientTape() as gtape2:
with tf.GradientTape() as gtape3:
conv_output, predictions = heatmap_model(img_tensor)
if category_id==None:
category_id = np.argmax(predictions[0])
output = predictions[:, category_id]
conv_first_grad = gtape3.gradient(output, conv_output)
conv_second_grad = gtape2.gradient(conv_first_grad, conv_output)
conv_third_grad = gtape1.gradient(conv_second_grad, conv_output)
global_sum = np.sum(conv_output, axis=(0, 1, 2))
alpha_num = conv_second_grad[0]
alpha_denom = conv_second_grad[0]*2.0 + conv_third_grad[0]*global_sum
alpha_denom = np.where(alpha_denom != 0.0, alpha_denom, 1e-10)
alphas = alpha_num/alpha_denom
alpha_normalization_constant = np.sum(alphas, axis=(0,1))
alphas /= alpha_normalization_constant
weights = np.maximum(conv_first_grad[0], 0.0)
deep_linearization_weights = np.sum(weights*alphas, axis=(0,1))
grad_CAM_map = np.sum(deep_linearization_weights*conv_output[0], axis=2)
heatmap = np.maximum(grad_CAM_map, 0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
return heatmap
def superimpose(img, cam):
heatmap = cv2.resize(cam, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
heatmap = cv2.cvtColor(heatmap,cv2.COLOR_BGR2RGB)
superimposed_img = heatmap * .5 + img * .5
superimposed_img = np.minimum(superimposed_img, 255.0).astype(np.uint8)
return img, heatmap, superimposed_img
def plot(img,cam):
img = cv2.resize(img, (32, 32))
img, heatmap, superimposed_img = superimpose(img, cam)
fig, axs = plt.subplots(ncols=3, figsize=(9, 4))
axs[0].imshow(img)
axs[0].set_title('original image')
axs[0].axis('off')
axs[1].imshow(heatmap)
axs[1].set_title('heatmap')
axs[1].axis('off')
axs[2].imshow(superimposed_img)
axs[2].set_title('superimposed image')
axs[2].axis('off')
plt.show()
plt.close()
layer_names = ["block5_conv3","block4_conv2"]
for l in layer_names:
print("layer name:",l)
img_path1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual7pred7.jpg"
img_path2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/correct_actual8pred8.jpg"
img_path3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual3pred0.jpg"
img_path4 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q2/incorrect_actual5pred4.jpg"
img1 = cv2.imread(img_path1)
cam1 = grad_cam_pp(brute_model, img1,layer_name=l, label_name=labels,category_id=int(img_path1[-5]))
plot(img1,cam1)
img2 = cv2.imread(img_path2)
cam2 = grad_cam_pp(brute_model, img2,layer_name=l, label_name=labels,category_id=int(img_path2[-5]))
plot(img2,cam2)
img3 = cv2.imread(img_path3)
cam3 = grad_cam_pp(brute_model, img3,layer_name=l, label_name=labels,category_id=int(img_path3[-5]))
plot(img3,cam3)
img4 = cv2.imread(img_path4)
cam4 = grad_cam_pp(brute_model, img4,layer_name=l, label_name=labels,category_id=int(img_path4[-5]))
plot(img4,cam4)
# In[162]:
def preprocessed_data_model():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
train_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,horizontal_flip=True, rotation_range=20)
train_datagen.fit(x_train)
valid_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
valid_datagen.fit(x_valid)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
train_generator = train_datagen.flow(x_train, y_train, batch_size=16)
valid_generator = valid_datagen.flow(x_valid, y_valid, batch_size=16)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
preprocessed_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.1
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
preprocessed_model.compile(loss = 'categorical_crossentropy',optimizer = 'sgd',metrics=['accuracy'])
history = preprocessed_model.fit(x=train_generator,steps_per_epoch=len(train_generator),validation_data=valid_generator,validation_steps=len(valid_generator),epochs=epochs)
# model evaluation
_, test_accuracy = preprocessed_model.evaluate_generator(test_generator, steps=len(test_generator),verbose=0)
print("test accuracy:",test_accuracy)
train_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,horizontal_flip=True, rotation_range=20)
train_datagen.fit(x_train)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
train_generator = train_datagen.flow(x_train, y_train, batch_size=16)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
_, train_accuracy = preprocessed_model.evaluate_generator(train_generator, steps=len(train_generator),verbose=0)
print("train accuracy:",train_accuracy)
y_pred_test = preprocessed_model.predict(x=test_generator, steps=len(test_generator))
predictions_test = np.argmax(y_pred_test, axis=1)
preprocessed_model.save("vgg16_cifar10_preprocessed_rot_new")
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.savefig("loss_preprocess_flip_rot.png")
plt.close()
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.savefig("accuracy_preprocess_flip_rot.png")
plt.close()
return preprocessed_model
# In[38]:
def preprocess_helper():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_test = to_categorical(y_test_without_one_hot)
test_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True)
test_datagen.fit(x_test)
test_generator = test_datagen.flow(x_test, y_test, batch_size=16)
x_test_preprocessed = []
y_test_preprocessed = []
for i in range(len(test_generator)):
for img in test_generator[i][0]:
x_test_preprocessed.append(img)
for lb in test_generator[i][1]:
y_test_preprocessed.append(lb)
x_test_preprocessed = np.array(x_test_preprocessed)
y_test_preprocessed = np.array(y_test_preprocessed)
return x_test_preprocessed,y_test_preprocessed
def class_wise_accuracy_preprocess(models,x_test,y_test):
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
y_test_without_one_hot = np.argmax(y_test,axis=1)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test)*100,"%")
confus_matrix = confusion_matrix(np.argmax(y_test,axis=1),prediction_test)
print("confusion_matrix:\n",confus_matrix)
class_accuracy = []
class_TP = []
for i in range(confus_matrix.shape[0]):
for j in range(confus_matrix.shape[1]):
if i == j:
TP = confus_matrix[i][j]
class_TP.append(TP)
for k in range(confus_matrix.shape[1]):
ca = (class_TP[k] / confus_matrix[:,k].sum())*100
class_accuracy.append(ca)
print("class ",k," accuracy ",labels[k]," :",ca,"%")
class_accuracy = np.array(class_accuracy)
return class_accuracy
def bias_metrics_preprocess(class_accuracy,models,x_test,y_test):
dob = np.std(class_accuracy)
print("Degree of Bias:",dob)
y_test_without_one_hot = np.argmax(y_test,axis=1)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
from sklearn.metrics import confusion_matrix
confuse_matrix = confusion_matrix(y_test_without_one_hot, prediction_test)
print("confusion_matrix:\n",confuse_matrix)
FP = confuse_matrix.sum(axis=0) - np.diag(confuse_matrix)
FN = confuse_matrix.sum(axis=1) - np.diag(confuse_matrix)
TP = np.diag(confuse_matrix)
TN = confuse_matrix.sum() - (FP+FN+TP)
FP=FP.astype(float)
TP=TP.astype(float)
FN=FN.astype(float)
TN=TN.astype(float)
FNR = FN/(TP+FN)
FPR = FP/(TN+FN)
print("FPR:",FPR)
print("FNR:",FNR)
AFR = ((FPR.sum()/10)+(FNR.sum()/10))/2
print("AFR:",AFR)
def create_results_preprocess(models,x_test,y_test):
y_test_without_one_hot = np.argmax(y_test,axis=1)
print(y_test.shape)
print(x_test.shape)
y_pred_test = models.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
df = pd.DataFrame(np.hstack((y_test_without_one_hot.reshape(len(y_test_without_one_hot),1),prediction_test.reshape(len(prediction_test),1))),columns=['y_test','y_test_pred'],index=None)
print(df.head())
df.to_csv("y_test_prediction_test.csv",index=False)
correct_idxes = []
incorrect_idxes = []
for i in range(len(prediction_test)):
if y_test_without_one_hot[i] == prediction_test[i]:
correct_idxes.append(i)
elif y_test_without_one_hot[i] != prediction_test[i]:
incorrect_idxes.append(i)
# In[39]:
#preporocess model
print("\npreporocess model\n")
preprocessed_model = preprocessed_data_model()
x_test_preprocessed,y_test_preprocessed = preprocess_helper()
preprocessed_model1 = tf.keras.models.load_model("/home/euclid/Desktop/Chiranjeev/DAI/vgg16_cifar10_preprocessed_rot_new")
class_accuracy_preprocessed_model1 = class_wise_accuracy_preprocess(preprocessed_model1, x_test_preprocessed,y_test_preprocessed)
bias_metrics_preprocess(class_accuracy_preprocessed_model1,preprocessed_model1, x_test_preprocessed,y_test_preprocessed)
create_results_preprocess(preprocessed_model1, x_test_preprocessed,y_test_preprocessed)
# In[118]:
def method_model():
(x_train, y_train_without_one_hot), (x_test, y_test_without_one_hot) = tf.keras.datasets.cifar10.load_data()
y_train = to_categorical(y_train_without_one_hot)
y_test = to_categorical(y_test_without_one_hot)
x_train,x_valid,y_train,y_valid = train_test_split(x_train,y_train,test_size = 0.2,shuffle=True,random_state = 42)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_valid.shape)
print(y_valid.shape)
vgg16 = VGG16(include_top=True,weights = None, input_shape = (32,32,3))
out = Dense(10,activation='softmax',name = 'fc3')(vgg16.get_layer('fc2').output)
kl_model = Model(inputs = vgg16.input,outputs = out)
epochs = 20
learning_rate = 0.01
decay_rate = learning_rate/epochs
sgd = tf.keras.optimizers.SGD(lr=learning_rate, decay=decay_rate, momentum=0.9, nesterov=False)
kl_model.compile(loss = 'kullback_leibler_divergence',optimizer = 'sgd',metrics=['accuracy']) #tf.keras.optimizers.Adam(learning_rate=0.0001)
history = kl_model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=epochs, batch_size=16)
kl_model.save("vgg16_cifar10_method")
y_pred_train = kl_model.predict(x_train)
predictions_train = np.argmax(y_pred_train,axis=1)
print("training accuracy:",accuracy_score(np.argmax(y_train,axis=1),predictions_train))
y_pred_test = kl_model.predict(x_test)
prediction_test = np.argmax(y_pred_test,axis=1)
print("test accuracy:",accuracy_score(np.argmax(y_test,axis=1),prediction_test))
plt.subplot(211)
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.subplot(212)
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show()
return kl_model
# In[119]:
#method model
print("/nmethod model/n")
kl_model = method_model()
class_accuracy_kl_model = class_wise_accuracy(kl_model)
bias_metrics(class_accuracy_kl_model,kl_model)
create_results(kl_model)
# In[120]:
print("\npreprocessed model\n")
class_accuracy_preprocessed = class_wise_accuracy(preprocessed_model)
print("each class accuracies preprocessed",class_accuracy_preprocessed)
bias_metrics(class_accuracy_preprocessed,preprocessed_model)
print("\nmethod model\n")
class_accuracy_method = class_wise_accuracy(kl_model)
print("each class accuracies mehtod",class_accuracy_method)
bias_metrics(class_accuracy_method,kl_model)
# In[187]:
def check_bias_by_counting(filename):
df = pd.read_csv(filename)
#gender 0 (g1) male
race1_correct_g1 = 0
race2_correct_g1 = 0
race3_correct_g1 = 0
race4_correct_g1 = 0
age_0_28_correct_g1 = 0
age_29_56_correct_g1 = 0
age_57_84_correct_g1 = 0
age_85_116_correct_g1 = 0
race1_incorrect_g1 = 0
race2_incorrect_g1 = 0
race3_incorrect_g1 = 0
race4_incorrect_g1 = 0
age_0_28_incorrect_g1 = 0
age_29_56_incorrect_g1 = 0
age_57_84_incorrect_g1 = 0
age_85_116_incorrect_g1 = 0
#gender 1 (g2) female
race1_correct_g2 = 0
race2_correct_g2 = 0
race3_correct_g2 = 0
race4_correct_g2 = 0
age_0_28_correct_g2 = 0
age_29_56_correct_g2 = 0
age_57_84_correct_g2 = 0
age_85_116_correct_g2 = 0
race1_incorrect_g2 = 0
race2_incorrect_g2 = 0
race3_incorrect_g2 = 0
race4_incorrect_g2 = 0
age_0_28_incorrect_g2 = 0
age_29_56_incorrect_g2 = 0
age_57_84_incorrect_g2 = 0
age_85_116_incorrect_g2 = 0
df_np = df.iloc[:,:].values
for i in range(len(df_np)):
#correct predictions
if df_np[i][2] == df_np[i][4]:
#male
if df_np[i][2] == 0:
# age groups
if df_np[i][1] == 0:
age_0_28_correct_g1 += 1
elif df_np[i][1] == 1:
age_29_56_correct_g1+=1
elif df_np[i][1] == 2:
age_57_84_correct_g1 += 1
elif df_np[i][1] == 3:
age_85_116_correct_g1 += 1
#race groups
if df_np[i][3] == 0:
race1_correct_g1 += 1
elif df_np[i][3] == 1:
race2_correct_g1+=1
elif df_np[i][3] == 2:
race3_correct_g1 += 1
elif df_np[i][3] == 3:
race4_correct_g1 += 1
#female
elif df_np[i][2] == 1:
# age groups
if df_np[i][1] == 0:
age_0_28_correct_g2 += 1
elif df_np[i][1] == 1:
age_29_56_correct_g2+=1
elif df_np[i][1] == 2:
age_57_84_correct_g2 += 1
elif df_np[i][1] == 3:
age_85_116_correct_g2 += 1
#race groups
if df_np[i][3] == 0:
race1_correct_g2 += 1
elif df_np[i][3] == 1:
race2_correct_g2+=1
elif df_np[i][3] == 2:
race3_correct_g2 += 1
elif df_np[i][3] == 3:
race4_correct_g2 += 1
elif df_np[i][2] != df_np[i][4]:
#male
if df_np[i][2] == 0:
# age groups
if df_np[i][1] == 0:
age_0_28_incorrect_g1 += 1
elif df_np[i][1] == 1:
age_29_56_incorrect_g1+=1
elif df_np[i][1] == 2:
age_57_84_incorrect_g1 += 1
elif df_np[i][1] == 3:
age_85_116_incorrect_g1 += 1
#race groups
if df_np[i][3] == 0:
race1_incorrect_g1 += 1
elif df_np[i][3] == 1:
race2_incorrect_g1+=1
elif df_np[i][3] == 2:
race3_incorrect_g1 += 1
elif df_np[i][3] == 3:
race4_incorrect_g1 += 1
#female
elif df_np[i][2] == 1:
# age groups
if df_np[i][1] == 0:
age_0_28_incorrect_g2 += 1
elif df_np[i][1] == 1:
age_29_56_incorrect_g2+=1
elif df_np[i][1] == 2:
age_57_84_incorrect_g2 += 1
elif df_np[i][1] == 3:
age_85_116_incorrect_g2 += 1
#race groups
if df_np[i][3] == 0:
race1_incorrect_g2 += 1
elif df_np[i][3] == 1:
race2_incorrect_g2+=1
elif df_np[i][3] == 2:
race3_incorrect_g2 += 1
elif df_np[i][3] == 3:
race4_incorrect_g2 += 1
print("DoB")
#gender 1
race1_accuracy_g1 = (race1_correct_g1/(race1_correct_g1+race1_incorrect_g1))*100
race2_accuracy_g1 = (race2_correct_g1/(race2_correct_g1+race2_incorrect_g1))*100
race3_accuracy_g1 = (race3_correct_g1/(race3_correct_g1+race3_incorrect_g1))*100
race4_accuracy_g1 = (race4_correct_g1/(race4_correct_g1+race4_incorrect_g1))*100
print("race1_accuracy_g1:",race1_accuracy_g1)
print("race2_accuracy_g1:",race2_accuracy_g1)
print("race3_accuracy_g1:",race3_accuracy_g1)
print("race4_accuracy_g1:",race4_accuracy_g1)
age_0_28_accuracy_g1 = (age_0_28_correct_g1/(age_0_28_correct_g1+age_0_28_incorrect_g1))*100
age_29_56_accuracy_g1 = (age_29_56_correct_g1/(age_29_56_correct_g1+age_29_56_incorrect_g1))*100
age_57_84_accuracy_g1 = (age_57_84_correct_g1/(age_57_84_correct_g1+age_57_84_incorrect_g1))*100
age_85_116_accuracy_g1 = (age_85_116_correct_g1/(age_85_116_correct_g1+age_85_116_incorrect_g1))*100
print("age_0_28_accuracy_g1:",age_0_28_accuracy_g1)
print("age_29_56_accuracy_g1:",age_29_56_accuracy_g1)
print("age_57_84_accuracy_g1:",age_57_84_accuracy_g1)
print("age_85_116_accuracy_g1:",age_85_116_accuracy_g1)
#gender2
race1_accuracy_g2 = (race1_correct_g2/(race1_correct_g2+race1_incorrect_g2))*100
race2_accuracy_g2 = (race2_correct_g2/(race2_correct_g2+race2_incorrect_g2))*100
race3_accuracy_g2 = (race3_correct_g2/(race3_correct_g2+race3_incorrect_g2))*100
race4_accuracy_g2 = (race4_correct_g2/(race4_correct_g2+race4_incorrect_g2))*100
print("race1_accuracy_g2:",race1_accuracy_g2)
print("race2_accuracy_g2:",race2_accuracy_g2)
print("race3_accuracy_g2:",race3_accuracy_g2)
print("race4_accuracy_g2:",race4_accuracy_g2)
age_0_28_accuracy_g2 = (age_0_28_correct_g2/(age_0_28_correct_g2+age_0_28_incorrect_g2))*100
age_29_56_accuracy_g2 = (age_29_56_correct_g2/(age_29_56_correct_g2+age_29_56_incorrect_g2))*100
age_57_84_accuracy_g2 = (age_57_84_correct_g2/(age_57_84_correct_g2+age_57_84_incorrect_g2))*100
age_85_116_accuracy_g2 = (age_85_116_correct_g2/(age_85_116_correct_g2+age_85_116_incorrect_g2))*100
print("age_0_28_accuracy_g2:",age_0_28_accuracy_g2)
print("age_29_56_accuracy_g2:",age_29_56_accuracy_g2)
print("age_57_84_accuracy_g2:",age_57_84_accuracy_g2)
print("age_85_116_accuracy_g2:",age_85_116_accuracy_g2)
print("DoB across race")
dob_across_race1 = np.std(np.array([race1_accuracy_g1,race1_accuracy_g2]))
dob_across_race2 = np.std(np.array([race2_accuracy_g1,race2_accuracy_g2]))
dob_across_race3 = np.std(np.array([race3_accuracy_g1,race3_accuracy_g2]))
dob_across_race4 = np.std(np.array([race4_accuracy_g1,race4_accuracy_g2]))
dob_across_race_overall = (dob_across_race1+dob_across_race2+dob_across_race3+dob_across_race4)/4
dob_across_race_overall
print("dob_across_race_overall:",dob_across_race_overall)
print("DoB across age")
dob_across_age_0_28 = np.std(np.array([age_0_28_accuracy_g1,age_0_28_accuracy_g2]))
dob_across_age_29_56 = np.std(np.array([age_29_56_accuracy_g1,age_29_56_accuracy_g2]))
dob_across_age_57_84 = np.std(np.array([age_57_84_accuracy_g1,age_57_84_accuracy_g2]))
dob_across_age_85_116 = np.std(np.array([age_85_116_accuracy_g1,age_85_116_accuracy_g2]))
dob_across_age_overall = (dob_across_age_0_28+dob_across_age_29_56+dob_across_age_57_84+dob_across_age_85_116)/4
print("dob_across_age_overall:",dob_across_age_overall)
return dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall
# In[200]:
print("cross entropy loss")
filename1 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/categorical_cross_entropy/test_gender_across_race_age_y_test_pred2_optimizer2_45.csv"
dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename1)
print("\nfocal loss")
filename2 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/focal_loss/test_gender_across_race_age_y_test_pred2_optimizer2_45_focal_loss.csv"
dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename2)
print("\nLinearsvm")
filename3 = "/home/euclid/Desktop/Chiranjeev/DAI/Assignment_1_Q1/svm/test_gender_across_race_age_y_test_pred2_optimizer2_svm.csv"
dob_across_race1,dob_across_race2,dob_across_race3,dob_across_race4,dob_across_age_0_28,dob_across_age_29_56,dob_across_age_57_84,dob_across_age_85_116,dob_across_race_overall,dob_across_age_overall = check_bias_by_counting(filename3)
| 33.16746 | 235 | 0.685304 | 1,889 | 0.05419 | 0 | 0 | 0 | 0 | 0 | 0 | 3,936 | 0.112912 |
72045094280bf8b19ef8956f47fe38ea87d738b3 | 1,027 | py | Python | notebooks/general.py | transientlunatic/grasshopper | 1d3822427970d200341ff9d2823949fb4b27e001 | [
"0BSD"
]
| 3 | 2020-09-26T01:27:13.000Z | 2020-09-30T05:47:42.000Z | notebooks/general.py | transientlunatic/gravpy | 1d3822427970d200341ff9d2823949fb4b27e001 | [
"0BSD"
]
| null | null | null | notebooks/general.py | transientlunatic/gravpy | 1d3822427970d200341ff9d2823949fb4b27e001 | [
"0BSD"
]
| null | null | null | import numpy as np
import astropy.units as u
def snr(signal, detector):
"""
Calculate the SNR of a signal in a given detector,
assuming that it has been detected with an optimal filter.
See e.g. arxiv.org/abs/1408.0740
Parameters
----------
signal : Source
A Source object which describes the source producing the
signal, e.g. a CBC.
detector : Detector
A Detector object describing the instrument making the observation
e.g. aLIGO.
Returns
-------
SNR : float
The signal-to-noise ratio of the signal in the detector.
"""
if signal.ncycles():
ncycles = np.sqrt(2*signal.ncycles(detector.frequencies))
else:
ncycles = 1
noise = detector.psd(detector.frequencies)
ampli = signal.raw_strain(detector.frequencies) * ncycles
fraction = 4*(np.abs(ampli)**2 / noise)
fraction[np.isnan(fraction)]=0
return np.sqrt(np.trapz(fraction, x=detector.frequencies, dx=0.01*u.hertz))
| 30.205882 | 79 | 0.635833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 558 | 0.54333 |
72082ffdc0eb8ab81095d7d094328792a40cbcea | 6,898 | py | Python | dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py | lin-tan/fairness-variance | 7f6aee23160707ffe78f429e5d960022ea1c9fe4 | [
"BSD-3-Clause"
]
| null | null | null | dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py | lin-tan/fairness-variance | 7f6aee23160707ffe78f429e5d960022ea1c9fe4 | [
"BSD-3-Clause"
]
| null | null | null | dlfairness/original_code/FairALM/Experiments-CelebA/results/quantitative_results/plot_celeba.py | lin-tan/fairness-variance | 7f6aee23160707ffe78f429e5d960022ea1c9fe4 | [
"BSD-3-Clause"
]
| null | null | null | '''
Script to plot the accuracy and the fairness measures for different algorithms
from the log files
'''
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import os
print(os.getcwd())
import numpy as np
plt.style.use('ggplot')
def create_acc_lists(filepath):
train_acc = []
train_ddp = []
train_deo = []
valid_acc = []
valid_ddp = []
valid_deo = []
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
#if 'Epoch: 040/100' in line:
# break
if 'Train Acc' in line:
line = line.strip()
linesegs = line.split(' | ')
train_acc.append(float(linesegs[1].split(': ')[1].strip('%')))
train_ddp.append(float(linesegs[2].split(': ')[1].strip('%')))
train_deo.append(float(linesegs[3].split(': ')[1].strip('%')))
elif 'Valid Acc' in line:
line = line.strip()
linesegs = line.split(' | ')
valid_acc.append(float(linesegs[1].split(': ')[1].strip('%')))
valid_ddp.append(float(linesegs[2].split(': ')[1].strip('%')))
valid_deo.append(float(linesegs[3].split(': ')[1].strip('%')))
line = fp.readline()
cnt += 1
return train_acc, train_ddp, train_deo, valid_acc, valid_ddp, valid_deo
def color(R, G, B):
return (float(R)/255, float(G)/255, float(B)/255)
def BLUE():
return color(0, 77, 128)
def RED():
return color(181, 23, 0)
def make_plot_helper(arr, legends, xlabel, ylabel, outname):
epoch_list = np.arange(1, arr.shape[1] + 1)
fig, axs = plt.subplots(1, 1, figsize=(5,4), sharey=False)
fig.patch.set_visible(False)
axs.set_facecolor(color(240, 240, 240))
axs.tick_params(axis='x', colors='black')
axs.tick_params(axis='y', colors='black')
axs.xaxis.label.set_color('black')
axs.yaxis.label.set_color('black')
axs.set_ylim([0, arr.max() + 15])
#plt.gca().set_color_cycle(['red', 'blue', 'green', 'yellow'])
colors=[RED(), BLUE()]
for value, legend, c in zip(arr, legends, colors):
plt.plot(epoch_list, value, label=legend, color=c)
axs.set_xlabel(xlabel, fontweight='bold')
axs.set_ylabel(ylabel, fontweight='bold')
title = ylabel.replace("%", "").upper()
#plt.title(title, fontweight='bold')#, x=0.7, y=0.1)
leg = axs.legend(loc='upper right', frameon=False)
for line in leg.get_lines():
line.set_linewidth(4.0)
fig.tight_layout()
outname.replace('$', '_')
fig.savefig(outname, bbox_inches='tight')
print('Plotted ' + outname)
def make_plot(list1, list2, legend1, legend2, plot_type, suffix=None):
arr1 = np.array(list1)
arr2 = np.array(list2)
legend = [legend1, legend2]
arr = np.array([arr1, arr2])
xlabel = 'Epochs'
if plot_type == 'acc':
arr = 100 - arr
ylabel = 'Error %' if plot_type == 'acc' else 'DEO'
legend1 = '_'.join(legend1.split(' '))
legend2 = '_'.join(legend2.split(' '))
#pdb.set_trace()
if 'penalty' in legend2:
legend2 = 'l2_penalty'
if 'penalty' in legend1:
legend1 = 'l2_penalty'
outname = '_'.join([legend1, legend2, plot_type])
if suffix is not None:
outname += '_' + suffix
make_plot_helper(arr, legend, xlabel, ylabel, outname)
def gen_main_plots():
# Used in the main paper for generating plots
file_name = 'no_1p_lr0p01.txt'
_, _, _, no_acc, _, no_deo = create_acc_lists(file_name)
file_name = 'with_1p_fairalm_eta60_inner5_lr0p01.txt'
_, _, _, fair_acc, _, fair_deo = create_acc_lists(file_name)
file_name = 'with_1e_L2_PENALTY_eta0p01_lr0p01.txt'
_, _, _, l2_acc, _, l2_deo = create_acc_lists(file_name)
MEDIUM_SIZE = 12
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
make_plot(no_acc, fair_acc, 'Unconstrained', 'FairALM', 'acc')
make_plot(no_deo, fair_deo, 'Unconstrained', 'FairALM', 'deo')
make_plot(no_acc, l2_acc, 'Unconstrained', '$\ell_2$ penalty', 'acc')
make_plot(no_deo, l2_deo, 'Unconstrained', '$\ell_2$ penalty', 'deo')
def gen_fair_alm_plots(no_filename, fair_alm_filename, suffix):
_, _, _, no_acc, _, no_deo = create_acc_lists(no_filename)
_, _, _, fair_acc, _, fair_deo = create_acc_lists(fair_alm_filename)
make_plot(no_acc, fair_acc, 'Unconstrained', 'FairALM', 'acc', suffix)
make_plot(no_deo, fair_deo, 'Unconstrained', 'FairALM', 'deo', suffix)
def gen_l2_plots(no_filename, l2_filename, suffix):
_, _, _, no_acc, _, no_deo = create_acc_lists(no_filename)
_, _, _, l2_acc, _, l2_deo = create_acc_lists(l2_filename)
make_plot(no_acc, l2_acc, 'Unconstrained', "$\ell_2$ penalty", 'acc', suffix)
make_plot(no_deo, l2_deo, 'Unconstrained', "$\ell_2$ penalty", 'deo', suffix)
def gen_l2_fair_alm_plots(l2_filename, fair_alm_filename, suffix):
_, _, _, l2_acc, _, l2_deo = create_acc_lists(l2_filename)
_, _, _, fair_acc, _, fair_deo = create_acc_lists(fair_alm_filename)
make_plot(l2_acc, fair_acc, "$\ell_2$ penalty", 'FairALM', 'acc', suffix)
make_plot(l2_deo, fair_deo, "$\ell_2$ penalty", 'FairALM', 'deo', suffix)
def gen_all_plots():
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
file_name = 'no_1p_lr0p01.txt'
fair_alm_filenames = {'eta60': 'FAIR_ALM_eta60_inner5_lr0p01.txt',
'eta40': 'FAIR_ALM_eta40_inner5_lr0p01.txt',
'eta45': 'FAIR_ALM_eta45_lr0p01.txt',
'eta50': 'FAIR_ALM_eta50_lr0p01.txt',
'eta80': 'FAIR_ALM_eta80_inner5_lr0p01.txt',
'eta20': 'FAIR_ALM_eta20_inner5_lr0p01.txt'}
l2_filenames = {'eta0p01': 'L2_PENALTY_eta0p01_lr0p01.txt',
'eta0p001': 'L2_PENALTY_eta0p001_lr0p01.txt',
'eta0p1': 'L2_PENALTY_eta0p1_lr0p01.txt',
'eta1': 'L2_PENALTY_eta1_lr0p01.txt'}
for eta, name in fair_alm_filenames.items():
gen_fair_alm_plots(file_name, name, eta)
for eta, name in l2_filenames.items():
gen_l2_plots(file_name, name, eta)
for l2_eta, l2_name in l2_filenames.items():
for alm_eta, alm_name in fair_alm_filenames.items():
gen_l2_fair_alm_plots(l2_name, alm_name, l2_eta+'_'+alm_eta)
if __name__ == "__main__":
gen_all_plots()
| 36.691489 | 81 | 0.621919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,634 | 0.23688 |
72085eb6f35c638ad1743b5ae7bd6a8de18fc6f3 | 682 | py | Python | conqueror/scraper/base_yandex.py | piotrmaslanka/yandex-conqueror | cd0b50a43e25551f91150e0bee4f9cd307e4adce | [
"MIT"
]
| 12 | 2022-03-01T22:45:05.000Z | 2022-03-16T05:46:24.000Z | conqueror/scraper/base_yandex.py | piotrmaslanka/yandex-conqueror | cd0b50a43e25551f91150e0bee4f9cd307e4adce | [
"MIT"
]
| 1 | 2022-03-02T10:18:05.000Z | 2022-03-02T11:03:52.000Z | conqueror/scraper/base_yandex.py | piotrmaslanka/yandex-conqueror | cd0b50a43e25551f91150e0bee4f9cd307e4adce | [
"MIT"
]
| 1 | 2022-03-02T10:18:35.000Z | 2022-03-02T10:18:35.000Z | import requests
from satella.coding.decorators import retry
@retry(3, exc_classes=requests.RequestException)
def get_yandex_request(url, arguments) -> dict:
"""
Return a JSON object querying Yandex at provided parameters.
Handling CSRF will be done automatically.
:param url: URL to ask
:param arguments: dictionary of arguments to add
:return: object returned via endpoint
"""
resp = requests.get(url, params=arguments)
resp.raise_for_status()
data = resp.json()
if list(data.keys()) == ['csrfToken']:
arguments['csrfToken'] = data['csrfToken']
return get_yandex_request(url, arguments)
else:
return data
| 28.416667 | 64 | 0.692082 | 0 | 0 | 0 | 0 | 619 | 0.907625 | 0 | 0 | 279 | 0.409091 |
72097fdf43f5937088d329748fec0dc61447255f | 6,142 | py | Python | engine/azbatchengine.py | asedighi/azure_realtime_batch | c2cf4c8edc2bbded8377842fcad6370fd35af44e | [
"MIT"
]
| 3 | 2020-05-08T16:20:07.000Z | 2021-10-06T11:16:10.000Z | engine/azbatchengine.py | asedighi/azure_realtime_batch | c2cf4c8edc2bbded8377842fcad6370fd35af44e | [
"MIT"
]
| null | null | null | engine/azbatchengine.py | asedighi/azure_realtime_batch | c2cf4c8edc2bbded8377842fcad6370fd35af44e | [
"MIT"
]
| null | null | null | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# @author: asedighi
import asyncio
import sys
sys.path.append('.')
sys.path.append('..')
sys.path.append('/mnt/resource/batch/tasks/shared/')
sys.path.append('/mnt/resource/batch/tasks/shared/engine')
sys.path.append('/mnt/resource/batch/tasks/shared/batchwrapper')
sys.path.append('/mnt/resource/batch/tasks/shared/tasks')
from batchwrapper.config import getRandomizer
from batchwrapper.config import AzureCredentials
from batchwrapper.config import ReadConfig
from batchwrapper.config import TaskConfig
from batchwrapper.config import find_file_path
import argparse
import ntpath
from engine.taskengine import task_loop
from subprocess import *
from azure.storage.blob import BlobServiceClient
from azure.servicebus import ServiceBusClient
import os
class AzureBatchEngine():
def __init__(self):
os.chdir('/mnt/resource/batch/tasks/shared/engine')
configuration = AzureCredentials()
self.storage_string = configuration.getStorageConnectionString()
self.servicebus_string = configuration.get_service_bus_connection_string()
self.blob_service_client = BlobServiceClient.from_connection_string(self.storage_string)
self.service_bus_client = ServiceBusClient.from_connection_string(self.servicebus_string)
task = TaskConfig()
self.container_name = task.getOutputContainer()
paged_cont = self.blob_service_client.list_containers(name_starts_with=self.container_name)
counter = 0
for i in paged_cont:
counter += 1
if counter == 0:
self.blob_container_client = self.blob_service_client.create_container(self.container_name)
print("\tCreated {}... ".format(self.container_name))
else:
self.blob_container_client = self.blob_service_client.get_container_client(self.container_name)
print("\tContainer {} exists already... ".format(self.container_name))
print("Output Container to be used is: {}... ".format(self.container_name))
self.file_list_to_upload = list()
self.result_to_upload = ''
def getOutputContainer(self):
return self.container_name
def readJsonConfigFile(self, name=''):
if name == '':
return
return ReadConfig(name)
def java_runner(self, args) -> list:
#print("argumet is of type in java runner", type(args))
#print("argumet is ", args)
os.chdir('/mnt/resource/batch/tasks/shared/tasks')
process = Popen(args, stdout=PIPE, stderr=PIPE)
ret = []
while process.poll() is None:
line = process.stdout.readline()
if line != b'' and len(line) > 0 and line.endswith(b'\n'):
ret.append(line[:-1].decode('utf-8'))
stdout, stderr = process.communicate()
ret += stdout.split(b'\n')
if stderr != b'':
ret += stderr.split(b'\n')
ret.remove(b'')
return ret
def do(self):
#in_data = ' '.join(args[1:])
#in_data = args[1:]
#print("setting arguments to: ", in_data)
#task_command = (args[0], in_data)
task_loop(self, "../tasks")
#self.uploadResultData()
self.uploadFiles()
def do_action(self, *args):
pass
def addFileToUpload(self, file_name=''):
#/mnt/batch/tasks/workitems/<job id>/job-<#>/<task id>/wd
#/mnt/batch/tasks/shared
name = find_file_path(file_name, "../")
print("Found file to upload: {}".format(name))
if name != '':
self.file_list_to_upload.extend([name])
print("Will upload: {}".format(self.file_list_to_upload))
def dataToUpload(self, data: str =''):
if data != '':
self.result_to_upload = data
self.uploadResultData()
def uploadResultData(self):
##print("the current working directory for uploading results is: {}".format(os.getcwd()))
filen = "result_" + getRandomizer() + ".txt"
if self.result_to_upload != '':
text_file = open(filen, "w")
n = text_file.write(self.result_to_upload)
text_file.close()
self.addFileToUpload(filen)
def uploadFiles(self):
for output_file in self.file_list_to_upload:
print('Uploading file {} to container [{}]...'.format(output_file, self.container_name))
self.blob_client = self.blob_service_client.get_blob_client(container=self.container_name, blob=ntpath.basename(output_file))
# Upload the created file
with open(output_file, "rb") as data:
self.blob_client.upload_blob(data)
self.file_list_to_upload.remove(output_file)
if __name__ == '__main__':
print("Starting engine ...")
#all_input = sys.argv[1:];
#data_input = ' '.join(all_input[1:])
#foo = (all_input[0], data_input)
#print(foo)
#exit(1)
engine = AzureBatchEngine()
engine.do()
| 29.38756 | 137 | 0.667209 | 3,986 | 0.648974 | 0 | 0 | 0 | 0 | 0 | 0 | 2,214 | 0.360469 |
720a41d918f83d5bbf26dfd204b04b9dc1b4ac43 | 1,090 | py | Python | j.py | chirag127/Language-Translator-Using-Tkinter-in-Python | c790a0672c770cf703559d99c74ad581643f4d2f | [
"MIT"
]
| null | null | null | j.py | chirag127/Language-Translator-Using-Tkinter-in-Python | c790a0672c770cf703559d99c74ad581643f4d2f | [
"MIT"
]
| null | null | null | j.py | chirag127/Language-Translator-Using-Tkinter-in-Python | c790a0672c770cf703559d99c74ad581643f4d2f | [
"MIT"
]
| null | null | null | import tkinter as tk
import sys
class PrintLogger(): # create file like object
def __init__(self, textbox): # pass reference to text widget
self.textbox = textbox # keep ref
def write(self, text):
self.textbox.insert(tk.END, text) # write text to textbox
# could also scroll to end of textbox here to make sure always visible
def flush(self): # needed for file like object
pass
if __name__ == '__main__':
while True:
try:
def do_something():
print('i did something')
# root.after(1000, do_something)
print("qiaulfskhdnliukf")
root = tk.Tk()
t = tk.Text()
t.pack()
# create instance of file like object
pl = PrintLogger(t)
# replace sys.stdout with our object
sys.stdout = pl
# now we can print to stdout or file
print('hello world')
print('hello world')
root.mainloop()
except:
print("exception")
| 24.772727 | 82 | 0.542202 | 395 | 0.362385 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.377064 |
720b01f5be1444386ad583c605e2465546f819c4 | 2,695 | py | Python | byteweiser.py | urbanware-org/byteweiser | fc90d17b51ead44af53401dc9c8ca5f0efc5e72e | [
"MIT"
]
| 3 | 2017-11-27T00:35:04.000Z | 2017-12-13T22:41:31.000Z | byteweiser.py | urbanware-org/byteweiser | fc90d17b51ead44af53401dc9c8ca5f0efc5e72e | [
"MIT"
]
| 1 | 2017-03-08T19:04:49.000Z | 2017-03-08T19:04:49.000Z | byteweiser.py | urbanware-org/byteweiser | fc90d17b51ead44af53401dc9c8ca5f0efc5e72e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================
# ByteWeiser - Byte comparison and replacement tool
# Main script
# Copyright (C) 2021 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# GitHub: https://github.com/urbanware-org/byteweiser
# GitLab: https://gitlab.com/urbanware-org/byteweiser
# ============================================================================
import os
import sys
def main():
from core import clap
from core import common
from core import main
try:
p = clap.Parser()
except Exception as e:
print("%s: error: %s" % (os.path.basename(sys.argv[0]), e))
sys.exit(1)
p.set_description("Compare two files and replace different bytes.")
p.set_epilog("Further information and usage examples can be found "
"inside the documentation file for this script.")
# Required arguments
p.add_avalue("-i", "--input-file", "source file where to read the data "
"from", "input_file", None, True)
p.add_avalue("-o", "--output-file", "destination file where to write "
"data into", "output_file", None, True)
# Optional arguments
p.add_avalue("-b", "--buffer-size", "buffer size in bytes", "buffer_size",
4096, False)
p.add_switch(None, "--no-hashes", "do not use file hash comparison",
"no_hash", True, False)
p.add_switch(None, "--no-progress", "do not display the process "
"percentage", "no_progress", True, False)
p.add_switch("-q", "--quiet", "disable output", "quiet", True, False)
p.add_switch("-s", "--simulate", "do not change the output file",
"simulate", True, False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print(common.get_version())
sys.exit(0)
args = p.parse_args()
try:
hashes = not args.no_hash
progress = not args.no_progress
verbose = not args.quiet
byteweiser = main.ByteWeiser()
byteweiser.compare_and_replace(args.input_file, args.output_file,
args.buffer_size, args.simulate,
verbose, progress, hashes)
except Exception as e:
p.error(e)
if __name__ == "__main__":
main()
# EOF
| 34.551282 | 78 | 0.562152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,234 | 0.457885 |
720b83b3d481df1e875ae4b17eade77f3a7f0679 | 9,798 | py | Python | scripts/st_dashboard.py | rsmith49/simple-budget-pld | 1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743 | [
"Apache-2.0"
]
| 1 | 2022-01-01T14:44:40.000Z | 2022-01-01T14:44:40.000Z | scripts/st_dashboard.py | rsmith49/simple-budget-pld | 1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743 | [
"Apache-2.0"
]
| null | null | null | scripts/st_dashboard.py | rsmith49/simple-budget-pld | 1bee5a26f53aa4a5b0aab49ee4c158b5ecb7c743 | [
"Apache-2.0"
]
| null | null | null | import altair as alt
import os
import pandas as pd
import streamlit as st
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dotenv import load_dotenv
from plaid.api_client import ApiClient
from plaid.exceptions import ApiException
from pathlib import Path
from traceback import format_exc
from urllib.error import URLError
sys.path.append(os.getcwd())
load_dotenv()
from src.budget import Budget
from src.transactions import get_transactions_df
from src.user_modifications import transform_pipeline
from src.views import top_vendors
EXISTING_TRANSACTIONS_FILE = f"{Path.home()}/.ry-n-shres-budget-app/all_transactions.csv"
TRANSACTION_GRACE_BUFFER = relativedelta(days=10) # How far before latest transaction to pull from
@st.cache(
hash_funcs={ApiClient: lambda *args, **kwargs: 0}
)
def get_transaction_data():
try:
existing_df = pd.read_csv(EXISTING_TRANSACTIONS_FILE)
existing_df['date'] = existing_df['date'].astype(str)
except FileNotFoundError:
existing_df = None
# Get Plaid output
now = datetime.now().strftime('%Y-%m-%d')
if existing_df is not None:
latest_date = existing_df['date'].max()
start_date = (datetime.strptime(latest_date, '%Y-%m-%d') - TRANSACTION_GRACE_BUFFER).strftime('%Y-%m-%d')
latest_transactions_df = get_transactions_df(start_date, now)
latest_transactions_df['date'] = latest_transactions_df['date'].astype(str)
all_transactions_df = pd.concat([
existing_df[existing_df['date'] < start_date],
latest_transactions_df
])
else:
all_transactions_df = get_transactions_df(
'2016-01-01',
now
)
os.makedirs(EXISTING_TRANSACTIONS_FILE[:EXISTING_TRANSACTIONS_FILE.rfind("/")], exist_ok=True)
all_transactions_df.to_csv(EXISTING_TRANSACTIONS_FILE, index=False)
# Fix for Streamlit Cache issues
all_transactions_df = all_transactions_df.drop(
['payment_meta', 'location'],
axis=1
)
all_transactions_df['category'] = all_transactions_df['category'].astype(str)
return all_transactions_df
def write_df(df: pd.DataFrame):
"""Helper function to st.write a DF with amount stylized to dollars"""
st.dataframe(
df.style.format({
col_name: "{:,.2f}"
for col_name in ["amount", "Total Spent"]
})
)
# TODO: Make non-budgeted columns show up on bar chart, just without ticks
# TODO: Make all-time a budget period option (figure out what to do about this - maybe it only shows up for one month?)
# TODO: Allow you to set custom start date for your budget period (i.e. make your monthly spending start on the 3rd)
# TODO: Fix the duplicate charge issue with pending charges
def single_inc_spending_summary(df: pd.DataFrame, date_inc_key: str, curr_date: str, is_current: bool = False) -> None:
"""Creates display for a single date increment
Parameters
----------
df
Transactions Dataframe
date_inc_key
The key for date increment (one of week, month, year)
curr_date
The selected date increment value
is_current
Whether the date represents the most recent date increment
"""
budget = Budget(df)
curr_df = df[df[date_inc_key] == curr_date]
total_spending_str = f"{curr_df['amount'].sum():,.2f}"
if budget.budget_plan:
show_budget = st.checkbox("Budget View", value=True)
total_budget = budget.total_limit(date_inc_key)
if budget.budget_plan and show_budget:
metric_col1, metric_col2 = st.columns(2)
with metric_col1:
st.metric(f"Total Spending", total_spending_str)
with metric_col2:
st.metric(f"Total Budget", f"{total_budget:,.2f}")
simple_summary = budget.simple_summary(date_inc_key, curr_date)
bar = alt.Chart(simple_summary).mark_bar().encode(
y="category",
x="spent",
tooltip=alt.Tooltip(field="spent", aggregate="sum", type="quantitative"),
).properties(
height=alt.Step(60)
)
ticks = alt.Chart(simple_summary).mark_tick(
color="red",
thickness=3,
size=60 * 0.9,
).encode(
y="category",
x="total_budget",
tooltip=alt.Tooltip(field="total_budget", aggregate="sum", type="quantitative")
)
if is_current:
ticks += alt.Chart(simple_summary).mark_tick(
color="white",
thickness=2,
size=60 * 0.9,
).encode(
y="category",
x="projected_budget",
)
st.altair_chart(bar + ticks, use_container_width=True)
else:
st.metric(f"Total Spending", total_spending_str)
chart = alt.Chart(curr_df).mark_bar().encode(
x=alt.X("sum(amount)", axis=alt.Axis(title='Spent')),
y=alt.Y("category_1", axis=alt.Axis(title="Category")),
tooltip=alt.Tooltip(field="amount", aggregate="sum", type="quantitative"),
).properties(
height=alt.Step(40),
)
st.altair_chart(chart, use_container_width=True)
with st.expander("Largest Transactions"):
write_df(
curr_df[["date", "amount", "name", "category_1", "category_2"]].sort_values(
by="amount",
ascending=False
)
)
def df_for_certain_categories(df: pd.DataFrame) -> pd.DataFrame:
"""Helper function to get a DF filtered by any user selected categories"""
categories = st.multiselect(
f"Select any categories to only see spending for",
options=sorted(df['category_1'].unique()),
default=[],
)
if len(categories) > 0:
bool_key = df['category_1'] == 'NOT_A CATEGORY'
for cat in categories:
bool_key = bool_key | (df['category_1'] == cat)
df = df[bool_key]
return df
def main():
try:
st.set_page_config(initial_sidebar_state="collapsed")
try:
df = get_transaction_data().copy()
except ApiException as e:
# TODO: Check e for if it is item expiration
st.write("Error accessing Plaid - using old transaction data for now")
st.error(f"{e}")
try:
df = pd.read_csv(EXISTING_TRANSACTIONS_FILE)
except FileNotFoundError:
st.write("Could not find existing transactions file - cannot run this app")
raise e
df = transform_pipeline(df)
# Organizing Page
st.write("# Budget Display")
date_inc = st.sidebar.selectbox(
f"Select the timespan (week, month, year) that you would like to use to view your spending by",
["Month", "Week", "Year"],
)
date_inc_key = date_inc.lower()
date_inc_label = date_inc[0].upper() + date_inc[1:]
categories_to_ignore = st.sidebar.multiselect(
"Any categories to ignore in calculations",
options=sorted(df["category_1"].unique()),
default=["Income"]
)
start_date = st.sidebar.select_slider(
f"Enter a Start Date for viewing your spending",
sorted(df["date"].unique())
)
end_date = st.sidebar.select_slider(
f"Enter an End Date to view your spending until",
sorted(df["date"].unique()),
value=df["date"].max()
)
if start_date is not None:
df = df[df['date'] >= start_date]
if end_date is not None:
df = df[df['date'] <= end_date]
# Preprocessing
if len(categories_to_ignore):
for category in categories_to_ignore:
df = df[df['category_1'] != category]
df['week'] = df['date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d').strftime('%Y-%V'))
if 'month' not in df:
df['month'] = df['date'].apply(lambda x: x[:7])
df['year'] = df['date'].apply(lambda x: x[:4])
# Data Viz
st.write(f"## Single {date_inc_label} in Spending")
available_date_incs = sorted(df[date_inc_key].unique(), reverse=True)
curr_date = st.selectbox(
f"Pick a {date_inc_label}",
options=available_date_incs,
format_func=lambda label: f"{label} ({df[df[date_inc_key] == label]['amount'].sum():,.2f})"
)
single_inc_spending_summary(
df,
date_inc_key,
curr_date,
is_current=curr_date == max(available_date_incs)
)
st.write(f"## {date_inc_label}ly Spending History")
history_df = df_for_certain_categories(df)
st.bar_chart(history_df.groupby(date_inc_key).sum("amount").sort_index(ascending=False))
st.write(f"## Most Expensive Single {date_inc} Categories")
write_df(top_vendors(df, groupby=[date_inc_key, 'category_1']))
st.write("## All Transactions")
write_df(df)
# TODO: Figure out how we want to show the various conflicting budget periods
# - Do we want the triple layered bar chart still? (spending / projected / limit)
# - Do we just want 2 views? How can we give category level info well
return
except URLError as e:
st.error(
"""
**This demo requires internet access.**
Connection error: %s
"""
% e.reason
)
except Exception as e:
st.error(f"""
Something Broke :(
Error: {e}
Traceback: {format_exc()}
""")
if __name__ == "__main__":
main()
| 33.101351 | 119 | 0.610431 | 0 | 0 | 0 | 0 | 1,400 | 0.142813 | 0 | 0 | 3,021 | 0.308171 |
720ee96617fe84100cbf9c9517c56d368835bd2c | 16,818 | py | Python | scripts/devvnet_manager.py | spmckenney/Devv-Core | eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98 | [
"MIT"
]
| null | null | null | scripts/devvnet_manager.py | spmckenney/Devv-Core | eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98 | [
"MIT"
]
| null | null | null | scripts/devvnet_manager.py | spmckenney/Devv-Core | eb30ae3a092e3fe0f9f756f5f31bdce4f6215b98 | [
"MIT"
]
| null | null | null | import yaml
import argparse
import sys
import os
import subprocess
import time
def get_devvnet(filename):
with open(filename, "r") as f:
buf = ''.join(f.readlines())
conf = yaml.load(buf, Loader=yaml.Loader)
# Set bind_port values
port = conf['devvnet']['base_port']
for a in conf['devvnet']['shards']:
for b in a['process']:
port = port + 1
b['bind_port'] = port
return(conf['devvnet'])
class Devvnet(object):
_base_port = 0
_password_file = ""
_working_dir = ""
_config_file = ""
_host = ""
_host_index_map = {}
def __init__(self, devvnet):
self._devvnet = devvnet
self._shards = []
self._host_index_map = devvnet['host_index_map']
try:
self._base_port = devvnet['base_port']
self._working_dir = devvnet['working_dir']
self._password_file = devvnet['password_file']
self._config_file = devvnet['config_file']
self._host = devvnet['host']
except KeyError:
pass
current_port = self._base_port
for i in self._devvnet['shards']:
print("Adding shard {}".format(i['shard_index']))
s = Shard(i, self._host_index_map, self._config_file, self._password_file)
current_port = s.initialize_bind_ports(current_port)
s.evaluate_hostname(self._host)
s.connect_shard_nodes()
self._shards.append(s)
for i,shard in enumerate(self._shards):
print("shard: "+ str(shard))
for i2,node in enumerate(shard.get_nodes()):
node.grill_raw_subs(shard.get_index())
for rsub in node.get_raw_subs():
print("Getting for shard/name/node_index {}/{}/{}".format(rsub.get_shard_index(), rsub._name, rsub._node_index))
n = self.get_shard(rsub.get_shard_index()).get_node(rsub._name, rsub._node_index)
node.add_subscriber(n.get_host(), n.get_port())
node.add_working_dir(self._working_dir)
def __str__(self):
s = "Devvnet\n"
s += "base_port : "+str(self._base_port)+"\n"
s += "working_dir : "+str(self._working_dir)+"\n"
for shard in self._shards:
s += str(shard)
return s
def get_shard(self, index):
return self._shards[index]
def get_shards(self):
return self._shards
def get_num_nodes(self):
count = 0
for shard in self._shards:
count += shard.get_num_nodes()
return count
class Shard(object):
_shard_index = 0;
_working_dir = ""
_shard = None
_nodes = []
_host = ""
def __init__(self, shard, host_index_map, config_file, password_file):
self._shard = shard
self._nodes = get_nodes(shard, host_index_map)
self._shard_index = self._shard['shard_index']
try:
self._host = self._shard['host']
except:
pass
try:
self._config_file = self._shard['config_file']
except:
self._config_file = config_file
try:
self._password_file = self._shard['password_file']
except:
self._password_file = password_file
try:
self._name = self._shard['t1']
self._type = "T1"
except:
try:
self._name = self._shard['t2']
self._type = 'T2'
except:
print("Error: Shard type neither Tier1 (t1) or Tier2 (t2)")
for n in self._nodes:
n.set_config_file(self._config_file)
n.set_password_file(self._password_file)
n.set_type(self._type)
#self._connect_shard_nodes()
def __str__(self):
s = "type: " + self._type + "\n"
s += "index: " + str(self._shard_index) + "\n"
for node in self._nodes:
s += " " + str(node) + "\n"
return s
def initialize_bind_ports(self, port_num):
current_port = port_num
for node in self._nodes:
node.set_port(current_port)
current_port = current_port + 1
return current_port
def connect_shard_nodes(self):
v_index = [i for i,x in enumerate(self._nodes) if x.is_validator()]
a_index = [i for i,x in enumerate(self._nodes) if x.is_announcer()]
r_index = [i for i,x in enumerate(self._nodes) if x.is_repeater()]
for i in v_index:
host = self._nodes[i].get_host()
port = self._nodes[i].get_port()
#print("setting port to {}".format(port))
for j in v_index:
if i == j:
continue
self._nodes[j].add_subscriber(host, port)
for k in a_index:
announcer = self._nodes[k]
if self._nodes[i].get_index() == announcer.get_index():
self._nodes[i].add_subscriber(announcer.get_host(), announcer.get_port())
break
for l in r_index:
#print(type(self._nodes[i].get_index()))
#if self._nodes[i].get_index() == self._nodes[l].get_index():
self._nodes[l].add_subscriber(host, port)
def evaluate_hostname(self, host):
if self._host == "":
self._host = host
for node in self._nodes:
node.set_host(node.get_host().replace("${node_index}", str(node.get_index())))
if node.get_host().find("format") > 0:
#print("formatting")
node.set_host(eval(node.get_host()))
node.evaluate_hostname(self._host)
def get_nodes(self):
return self._nodes
def get_num_nodes(self):
return len(self._nodes)
def get_node(self, name, index):
node = [x for x in self._nodes if (x.get_name() == name and x.get_index() == index)]
if len(node) == 0:
return None
if len(node) != 1:
raise("WOOP: identical nodes? ")
return node[0]
#node = [y for y in nodes if y.get_index() == index
#shard1_validators = [x for x in conf['devvnet']['shards'][1]['process'] if x['name'] == 'validator']
def get_index(self):
return self._shard_index
class RawSub():
def __init__(self, name, shard_index, node_index):
self._name = name
self._shard_index = shard_index
self._node_index = node_index
def __str__(self):
sub = "({}:{}:{})".format(self._name, self._shard_index, self._node_index)
return sub
def get_shard_index(self):
return self._shard_index
def substitute_node_index(self, node_index):
if self._node_index == "${node_index}":
self._node_index = int(node_index)
else:
print("WARNING: not subbing "+str(self._node_index)+" with "+str(node_index))
return
class Sub():
def __init__(self, host, port):
self._host = host
self._port = port
def __str__(self):
sub = "({}:{})".format(self.get_host(), str(self.get_port()))
return sub
def __eq__(self, other):
if self._host != other.get_host():
return False
if self._port != other.get_port():
return False
return True
def get_host(self):
return self._host
def set_host(self, hostname):
self._host = hostname
def get_port(self):
return self._port
def set_port(self, port):
self._port = port
class Node():
def __init__(self, shard_index, index, name, host, port = 0):
self._name = name
self._type = ""
self._shard_index = int(shard_index)
self._index = int(index)
self._host = host
self._bind_port = int(port)
self._subscriber_list = []
self._raw_sub_list = []
self._working_dir = ""
def __str__(self):
subs = "s["
for sub in self._subscriber_list:
subs += str(sub)
subs += "]"
rawsubs = "r["
for rawsub in self._raw_sub_list:
rawsubs += str(rawsub)
rawsubs += "]"
s = "node({}:{}:{}:{}:{}) {} {}".format(self._name, self._index, self._host, self._bind_port, self._working_dir, subs, rawsubs)
return s
def add_working_dir(self, directory):
wd = directory.replace("${name}", self._name)
wd = wd.replace("${shard_index}", str(self._shard_index))
wd = wd.replace("${node_index}", str(self.get_index()))
self._working_dir = wd
def is_validator(self):
return(self._name == "validator")
def is_announcer(self):
return(self._name == "announcer")
def is_repeater(self):
return(self._name == "repeater")
def add_subscriber(self, host, port):
self._subscriber_list.append(Sub(host,port))
def add_raw_sub(self, name, shard_index, node_index):
rs = RawSub(name,shard_index, node_index)
#print("adding rawsub: "+str(rs))
self._raw_sub_list.append(rs)
def evaluate_hostname(self, host):
for sub in self._subscriber_list:
sub.set_host(sub.get_host().replace("${node_index}", str(self.get_index())))
if sub.get_host().find("format") > 0:
print("formatting")
sub.set_host(eval(sub.get_host()))
def grill_raw_subs(self, shard_index):
for sub in self._raw_sub_list:
sub.substitute_node_index(self._index)
#d = subs.replace("${node_index}", str(self._index))
print("up "+str(sub))
def get_raw_subs(self):
return self._raw_sub_list
def get_type(self):
return self._type
def set_type(self, type):
self._type = type
def get_name(self):
return self._name
def get_shard_index(self):
return self._shard_index
def get_index(self):
return self._index
def get_host(self):
return self._host
def set_host(self, host):
self._host = host
def get_port(self):
return self._bind_port
def set_port(self, port):
self._bind_port = port
def get_config_file(self):
return self._config_file
def set_config_file(self, config):
self._config_file = config
def get_password_file(self):
return self._password_file
def set_password_file(self, file):
self._password_file = file
def get_subscriber_list(self):
return self._subscriber_list
def get_working_dir(self):
return self._working_dir
def set_working_dir(self, working_dir):
self._working_dir = working_dir
def get_nodes(yml_dict, host_index_map):
nodes = []
shard_index = yml_dict['shard_index']
try:
host_index_map = yml_dict['host_index_map']
print("Using shard's {} for shard {}".format(host_index_map, shard_index))
except:
print("Using devvnet host_index_map ({}) for shard {}".format(host_index_map, shard_index))
for proc in yml_dict['process']:
try:
print("creating {} {} processes".format(len(host_index_map), proc['name']))
for node_index in host_index_map:
node = Node(shard_index, node_index, proc['name'], host_index_map[node_index], proc['bind_port'])
try:
rawsubs = proc['subscribe']
for sub in proc['subscribe']:
try:
si = sub['shard_index']
except:
si = yml_dict['shard_index']
node.add_raw_sub(sub['name'], si, sub['node_index'])
except:
pass
nodes.append(node)
except:
nodes.append(Node(shard_index, ind, proc['name'], proc['host'], proc['bind_port']))
print("creating a "+proc['name']+" process")
return nodes
def run_validator(node):
# ./devcash --node-index 0 --config ../opt/basic_shard.conf --config ../opt/default_pass.conf --host-list tcp://localhost:56551 --host-list tcp://localhost:56552 --host-list tcp://localhost:57550 --bind-endpoint tcp://*:56550
cmd = []
cmd.append("./devcash")
cmd.extend(["--shard-index", str(node.get_shard_index())])
cmd.extend(["--node-index", str(node.get_index())])
cmd.extend(["--num-consensus-threads", "1"])
cmd.extend(["--num-validator-threads", "1"])
cmd.extend(["--config", node.get_config_file()])
cmd.extend(["--config", node.get_password_file()])
cmd.extend(["--bind-endpoint", "tcp://*:" + str(node.get_port())])
for sub in node.get_subscriber_list():
cmd.extend(["--host-list", "tcp://" + sub.get_host() + ":" + str(sub.get_port())])
return cmd
def run_announcer(node):
# ./announcer --node-index 0 --shard-index 1 --mode T2 --stop-file /tmp/stop-devcash-announcer.ctl --inn-keys ../opt/inn.key --node-keys ../opt/node.key --bind-endpoint 'tcp://*:50020' --working-dir ../../tmp/working/input/laminar4/ --key-pass password --separate-ops true
cmd = []
cmd.append("./pb_announcer")
cmd.extend(["--shard-index", str(node.get_shard_index())])
cmd.extend(["--node-index", str(node.get_index())])
cmd.extend(["--config", node.get_config_file()])
cmd.extend(["--config", node.get_password_file()])
cmd.extend(["--mode", node.get_type()])
cmd.extend(["--bind-endpoint", "tcp://*:" + str(node.get_port())])
cmd.extend(["--separate-ops", "true"])
cmd.extend(["--start-delay", str(30)])
cmd.extend(["--protobuf-endpoint", "tcp://*:" + str(node.get_port() + 100)])
return cmd
def run_repeater(node):
# ./repeater --node-index 0 --shard-index 1 --mode T2 --stop-file /tmp/stop-devcash-repeater.ctl --inn-keys ../opt/inn.key --node-keys ../opt/node.key --working-dir ../../tmp/working/output/repeater --host-list tcp://localhost:56550 --key-pass password
cmd = []
cmd.append("./repeater")
cmd.extend(["--shard-index", str(node.get_shard_index())])
cmd.extend(["--node-index", str(node.get_index())])
cmd.extend(["--num-consensus-threads", "1"])
cmd.extend(["--num-validator-threads", "1"])
cmd.extend(["--mode", node.get_type()])
cmd.extend(["--working-dir", node.get_working_dir()])
cmd.extend(["--protobuf-endpoint", "tcp://*:" + str(node.get_port() + 200)])
for sub in node.get_subscriber_list():
cmd.extend(["--host-list", "tcp://" + sub.get_host() + ":" + str(sub.get_port())])
return cmd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch a devvnet.')
parser.add_argument('--logdir', action="store", dest='logdir', help='Directory to log output')
parser.add_argument('--start-processes', action="store_true", dest='start', default=True, help='Start the processes')
parser.add_argument('--hostname', action="store", dest='hostname', default=None, help='Debugging output')
parser.add_argument('--debug', action="store_true", dest='start', default=False, help='Debugging output')
parser.add_argument('devvnet', action="store", help='YAML file describing the devvnet')
args = parser.parse_args()
print(args)
print("logdir: " + args.logdir)
print("start: " + str(args.start))
print("hostname: " + str(args.hostname))
print("devvnet: " + args.devvnet)
devvnet = get_devvnet(args.devvnet)
d = Devvnet(devvnet)
num_nodes = d.get_num_nodes()
logfiles = []
cmds = []
for s in d.get_shards():
for n in s.get_nodes():
if args.hostname and (args.hostname != n.get_host()):
continue
if n.get_name() == 'validator':
cmds.append(run_validator(n))
elif n.get_name() == 'repeater':
cmds.append(run_repeater(n))
elif n.get_name() == 'announcer':
cmds.append(run_announcer(n))
logfiles.append(os.path.join(args.logdir,
n.get_name()+"_s"+
str(n.get_shard_index())+"_n"+
str(n.get_index())+"_output.log"))
ps = []
for index,cmd in enumerate(cmds):
print("Node " + str(index) + ":")
print(" Command: ", *cmd)
print(" Logfile: ", logfiles[index])
if args.start:
with open(logfiles[index], "w") as outfile:
ps.append(subprocess.Popen(cmd, stdout=outfile, stderr=outfile))
time.sleep(1.5)
if args.start:
for p in ps:
print("Waiting for nodes ... ctl-c to exit.")
p.wait()
print("Goodbye.")
| 33.171598 | 276 | 0.576347 | 10,288 | 0.611726 | 0 | 0 | 0 | 0 | 0 | 0 | 3,179 | 0.189024 |
72103568b2899de2bb48ee1f49834b293ab3bb81 | 5,896 | py | Python | run_qasm.py | t-imamichi/qiskit-utility | 2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b | [
"Apache-2.0"
]
| 6 | 2019-02-27T11:53:18.000Z | 2022-03-02T21:28:05.000Z | run_qasm.py | t-imamichi/qiskit-utility | 2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b | [
"Apache-2.0"
]
| null | null | null | run_qasm.py | t-imamichi/qiskit-utility | 2e71d0457bba0e6eb91daa9dbb32f52d87fe9f0b | [
"Apache-2.0"
]
| 2 | 2019-05-03T23:52:03.000Z | 2020-12-22T12:12:38.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
'''
This tool submits a QASM file to any backend and show the result.
It requires 'Qconfig.py' to set a token of IBM Quantum Experience.
It supports the following backends:
ibmqx2(5 qubits), ibmqx4(5 qubits), ibmqx5(16 qubits), simulator(32 qubits).
see https://quantumexperience.ng.bluemix.net/qx/devices for more details of the backends.
Examples:
$ python run_qasm.py -b # show backend information
$ python run_qasm.py -c # show remaining credits
$ python run_qasm.py -l 10 # show job list (10 jobs)
$ python run_qasm.py -j (job id) # show the result of a job
$ python run_qasm.py -q (qasm file) # submit a qasm file
$ python run_qasm.py -z -l 10 # show job list (10 jobs) of qconsole
$ python run_qasm.py -z -d ibmq_20_tokyo -q (qasm file) # submit a qasm file to ibmq_20_tokyo
'''
import json
import time
from argparse import ArgumentParser
from IBMQuantumExperience import IBMQuantumExperience
try:
import Qconfig
except ImportError:
raise RuntimeError('You need "Qconfig.py" with a token in the same directory.')
def options():
parser = ArgumentParser()
parser.add_argument('-q', '--qasm', action='store', help='QASM file')
parser.add_argument('-d', '--device', action='store', default='sim',
help='choose a device to run the input (sim [default], qx2, qx4, qx5, hpc)')
parser.add_argument('-s', '--shots', action='store', default=1000, type=int,
help='Number of shots (default: 1000)')
parser.add_argument('-i', '--interval', action='store', default=2, type=int,
help='Interval time to poll a result (default: 2)')
parser.add_argument('-l', '--job-list', action='store', default=10, type=int,
help='Number of jobs to show')
parser.add_argument('-j', '--jobid', action='store', type=str, help='Get job information')
parser.add_argument('-z', '--qconsole', action='store_true', help='Use qconsole instead of QX')
parser.add_argument('-b', '--backends', action='store_true', help='Show backends information')
parser.add_argument('-m', '--disable-multishotopt', action='store_true', help='Disable multi-shot optimization')
parser.add_argument('-c', '--credits', action='store_true', help='Show my credits')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose')
args = parser.parse_args()
if args.verbose:
print('options:', args)
return args
class JobManager:
def __init__(self, qconsole=False):
site = 'qconsole' if qconsole else 'qx'
self._api = IBMQuantumExperience(Qconfig.APItoken[site], Qconfig.config[site])
@staticmethod
def read_asm(infilename):
with open(infilename) as infile:
return ''.join(infile.readlines())
def run_qasm(self, qasm, device='sim', shots=1000, verbose=True, interval=2, multishotopt=True):
qasms = [{'qasm': qasm}]
devices = {'sim': 'ibmq_qasm_simulator',
'qx2': 'ibmqx2', 'qx4': 'ibmqx4', 'qx5': 'ibmqx5'}
if device in devices:
dev = devices[device]
else:
dev = device
hpc = None
if dev == 'ibmq_qasm_simulator':
hpc = {'multishot_optimization': multishotopt, 'omp_num_threads': 1}
out = self._api.run_job(job=qasms, backend=dev, shots=shots, max_credits=5, hpc=hpc)
if 'error' in out:
print(out['error']['message'])
return None
jobid = out['id']
print('job id:', jobid)
results = self._api.get_job(jobid)
if verbose:
print(results['status'])
while results['status'] == 'RUNNING':
time.sleep(interval)
results = self._api.get_job(jobid)
if verbose:
print(results['status'])
return results
def get_job_list(self, n_jobs):
jobs = self._api.get_jobs(limit=n_jobs)
tab = {}
for v in jobs:
job_id = v['id']
status = v['status']
cdate = v['creationDate']
tab[cdate] = (status, job_id)
for cdate, v in sorted(tab.items()):
print('{}\t{}\t{}'.format(cdate, *v))
def get_job(self, job_id):
result = self._api.get_job(job_id)
print(json.dumps(result, sort_keys=True, indent=2))
def get_credits(self):
print('credits :', self._api.get_my_credits())
def available_backends(self, verbose=False):
tab = {}
for e in self._api.available_backends() + self._api.available_backend_simulators():
status = self._api.backend_status(e['name'])
try:
tab[e['name']] = [':', str(e['nQubits']) + ' qubits,', e['description'], status]
except KeyError:
tab[e['name']] = [':', status]
if verbose:
tab[e['name']].append(e)
for k, v in sorted(tab.items()):
print(k, *v)
def main():
args = options()
jm = JobManager(args.qconsole)
if args.backends:
jm.available_backends(args.verbose)
if args.credits:
jm.get_credits()
if args.qasm:
qasm = jm.read_asm(args.qasm)
interval = max(1, args.interval)
results = jm.run_qasm(qasm=qasm, device=args.device, shots=args.shots, interval=interval,
multishotopt=not args.disable_multishotopt)
print(json.dumps(results, indent=2, sort_keys=True))
elif args.jobid:
jm.get_job(args.jobid)
elif args.job_list > 0:
jm.get_job_list(args.job_list)
if __name__ == '__main__':
main()
| 39.046358 | 116 | 0.608887 | 2,503 | 0.424525 | 0 | 0 | 131 | 0.022218 | 0 | 0 | 2,064 | 0.350068 |
72118299f37a0a55a9f0a207024fcdd8ae01fcd7 | 445 | py | Python | alembic/versions/175f5441bd46_adding_usrname_column.py | thiere18/fastapi-boilerplate | 6760e0e49caa915563d44897262d493b012207c0 | [
"MIT"
]
| 5 | 2021-12-10T17:35:31.000Z | 2021-12-30T18:36:23.000Z | alembic/versions/175f5441bd46_adding_usrname_column.py | thiere18/fastapi-boilerplate | 6760e0e49caa915563d44897262d493b012207c0 | [
"MIT"
]
| 1 | 2021-11-21T13:59:03.000Z | 2021-11-21T13:59:03.000Z | alembic/versions/175f5441bd46_adding_usrname_column.py | thiere18/fastapi-boilerplate | 6760e0e49caa915563d44897262d493b012207c0 | [
"MIT"
]
| 1 | 2021-12-07T14:08:12.000Z | 2021-12-07T14:08:12.000Z | """adding usrname column
Revision ID: 175f5441bd46
Revises: 186abcf43cae
Create Date: 2021-11-20 22:54:04.157131
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '175f5441bd46'
down_revision = '186abcf43cae'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('users', sa.Column('username', sa.String(255),nullable=False)
)
pass
def downgrade():
pass
| 16.481481 | 79 | 0.72809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.45618 |
7211ad9fb739bb9a8cf35bb0752773293df5ab6b | 2,356 | py | Python | api/teams/models.py | wepickheroes/wepickheroes.github.io | 032c2a75ef058aaceb795ce552c52fbcc4cdbba3 | [
"MIT"
]
| 3 | 2018-02-15T20:04:23.000Z | 2018-09-29T18:13:55.000Z | api/teams/models.py | wepickheroes/wepickheroes.github.io | 032c2a75ef058aaceb795ce552c52fbcc4cdbba3 | [
"MIT"
]
| 5 | 2018-01-31T02:01:15.000Z | 2018-05-11T04:07:32.000Z | api/teams/models.py | prattl/wepickheroes | 032c2a75ef058aaceb795ce552c52fbcc4cdbba3 | [
"MIT"
]
| null | null | null | from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from nucleus.models import (
AbstractBaseModel,
EmailRecord,
TeamMember,
)
User = get_user_model()
class Team(AbstractBaseModel):
name = models.CharField(max_length=255)
logo_url = models.CharField(max_length=255, null=True, blank=True)
players = models.ManyToManyField(User, through='nucleus.TeamMember', related_name='teams')
captain = models.ForeignKey(User, null=True, blank=True, related_name='teams_captain_of',
on_delete=models.SET_NULL)
creator = models.ForeignKey(User, null=True, blank=True, related_name='teams_created',
on_delete=models.SET_NULL)
def save(self, *args, **kwargs):
adding = self._state.adding
super().save(*args, **kwargs)
if adding:
if self.captain:
TeamMember.objects.create(team=self, player=self.captain)
elif self.creator:
TeamMember.objects.create(team=self, player=self.creator)
def __str__(self):
return self.name
INVITE_TEMPLATE = """Hello,
You've been invited to join a team on push.gg. Click the link below to sign up:
{signup_link}
- Push League
"""
class TeamInvite(AbstractBaseModel):
team = models.ForeignKey('teams.Team', on_delete=models.CASCADE)
player_email = models.EmailField()
player = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
def save(self, *args, **kwargs):
try:
previous_self = TeamInvite.objects.get(pk=self.pk)
except TeamInvite.DoesNotExist:
previous_self = None
new_instance = not previous_self
super().save(*args, **kwargs)
if new_instance:
self.send_email()
def send_email(self):
subject = "You have been invited to a team on push.gg"
email_body = INVITE_TEMPLATE.format(
signup_link="",
)
self.player.email_user(
"You have been invited to a team on push.gg",
email_body,
)
EmailRecord.objects.create(
to=self.player_email,
from_address=settings.DEFAULT_FROM_EMAIL,
subject=subject,
text_content=email_body
)
| 29.45 | 94 | 0.639219 | 1,983 | 0.841681 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.121392 |
7211e7dcde6526670f3ae011a8fd15606f93b81e | 1,826 | py | Python | tables_io/lazy_modules.py | LSSTDESC/tables_io | 1c2f119c928d05d237b1c8509e340d29650ceb8b | [
"MIT"
]
| 1 | 2021-08-13T15:41:58.000Z | 2021-08-13T15:41:58.000Z | tables_io/lazy_modules.py | LSSTDESC/tables_io | 1c2f119c928d05d237b1c8509e340d29650ceb8b | [
"MIT"
]
| 18 | 2021-08-12T00:09:36.000Z | 2022-02-24T21:11:18.000Z | tables_io/lazy_modules.py | LSSTDESC/tables_io | 1c2f119c928d05d237b1c8509e340d29650ceb8b | [
"MIT"
]
| null | null | null | """ Lazy loading modules """
import sys
import importlib.util
class DeferredModuleError:
""" Class to throw an error if you try to use a modules that wasn't loaded """
def __init__(self, moduleName):
self._moduleName = moduleName
@property
def moduleName(self):
""" Return the name of the module this is associated to """
return self._moduleName
def __getattr__(self, item):
raise ImportError("Module %s was not loaded, so call to %s.%s fails" %
(self.moduleName, self.moduleName, item))
def lazyImport(modulename):
""" This will allow us to lazy import various modules
Parameters
----------
modulename : `str`
The name of the module in question
Returns
-------
module : `importlib.LazyModule`
A lazy loader for the module in question
"""
try:
return sys.modules[modulename]
except KeyError:
spec = importlib.util.find_spec(modulename)
if spec is None:
print("Can't find module %s" % modulename)
return DeferredModuleError(modulename)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
try:
_ = dir(module)
except ValueError:
pass
return module
tables = lazyImport('tables')
apTable = lazyImport('astropy.table')
fits = lazyImport('astropy.io.fits')
h5py = lazyImport('h5py')
pd = lazyImport('pandas')
pq = lazyImport('pyarrow.parquet')
HAS_TABLES = tables is not None
HAS_PQ = pq is not None
HAS_FITS = fits is not None
HAS_ASTROPY = apTable is not None
HAS_HDF5 = h5py is not None
HAS_PANDAS = pd is not None
| 26.852941 | 82 | 0.645126 | 506 | 0.277108 | 0 | 0 | 135 | 0.073932 | 0 | 0 | 647 | 0.354326 |
721392272e51a8013f6d83d05f9c457dc8ce2f53 | 4,811 | py | Python | print_results.py | MicImbriani/Keras-PRBX | ab9dd8196e6f184336f5b30715635670d3586136 | [
"CC0-1.0"
]
| 1 | 2021-09-18T12:42:28.000Z | 2021-09-18T12:42:28.000Z | print_results.py | MicImbriani/SkinLesion-Segm-Classif-UNet-FocusNet-ResNet50 | ab9dd8196e6f184336f5b30715635670d3586136 | [
"CC0-1.0"
]
| null | null | null | print_results.py | MicImbriani/SkinLesion-Segm-Classif-UNet-FocusNet-ResNet50 | ab9dd8196e6f184336f5b30715635670d3586136 | [
"CC0-1.0"
]
| null | null | null | import numpy as np
from keras.optimizers import Adam, SGD
from tensorflow.keras.metrics import AUC
import metrics
from networks.unet_nn import unet
from networks.unet_res_se_nn import unet_res_se
from networks.focus import get_focusnetAlpha
from networks.resnet import get_res
from data_processing.generate_new_dataset import generate_targets
from tensorflow.keras.applications.resnet50 import preprocess_input
########### SEGMENTATION ###########
# U-Net
model = unet(batch_norm=False)
model.load_weights("/var/tmp/mi714/NEW/models/UNET/unet10/unet10_weights.h5")
# U-Net BatchNorm
# model = unet(batch_norm=True)
# model.load_weights("/var/tmp/mi714/NEW/models/UNET_BN/unet_bn10/unet_bn10_weights.h5")
# U-Net Res SE
# model = unet_res_se()
# model.load_weights("/var/tmp/mi714/NEW/models/UNET_RES_SE/unet_res_se10/unet_res_se10_weights.h5")
#Focusnet
# model = get_focusnetAlpha()
# model.load_weights("/var/tmp/mi714/NEW/models/FOCUS/focusnet10/focusnet10_weights.h5")
########### CLASSIFICATION ###########
# model = get_res()
# Original
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_OG/resnet_og10/resnet_og10_weights.h5")
# U-Net
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_BN/resnet_unet10/resnet_unet10_weights.h5")
# U-Net BatchNorm
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_BN/resnet_unet_bn10/resnet_unet_bn10_weights.h5")
# Res SE U-Net
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_UNET_RES_SE/resnet_unet_res_se10/resnet_unet_res_se10_weights.h5")
# FocusNet
# model.load_weights("/var/tmp/mi714/NEW/models/RESNETS/RESNET_FOCUSNET/resnet_focusnet7/resnet_focusnet7_weights.h5")
# Data, Masks & Classification target labels
# trainData = np.load('/var/tmp/mi714/test_new_npy2/data.npy')
# valData = np.load('/var/tmp/mi714/test_new_npy2/dataval.npy')
testData = np.load('/var/tmp/mi714/NEW/npy_dataset/datatest.npy')
# Segmentation masks
# trainMask = np.load('/var/tmp/mi714/test_new_npy2/dataMask.npy')
# valMask = np.load('/var/tmp/mi714/test_new_npy2/dataMaskval.npy')
testMask = np.load('/var/tmp/mi714/NEW/npy_dataset/dataMasktest.npy')
########### SEGMENTATION ###########
X = testData
y = testMask
X = X.astype('float32')
y /= 255. # scale masks to [0, 1]
my_adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
model.compile(optimizer=my_adam,
loss=metrics.focal_loss,
metrics=[metrics.dice_coef_loss,
metrics.jaccard_coef_loss,
metrics.true_positive,
metrics.true_negative,
])
score = model.evaluate(X, y, verbose=1)
dice_coef_loss = score[1]
jac_indx_loss = score[2]
true_positive = score[3]
true_negative = score[4]
print(f"""
RESULTS:
Dice Coefficient Loss: {dice_coef_loss}
Jaccard Index Loss: {jac_indx_loss}
True Positive: {true_positive}
True Negative: {true_negative}
""")
########### CLASSIFICATION ###########
# # Classification data
# # x_train = np.concatenate((trainData,)*3, axis=-1)
# # x_train = preprocess_input(x_train)
# # x_val = np.concatenate((valData,)*3, axis=-1)
# # x_val = preprocess_input(x_val)
# x_test = np.concatenate((testData,)*3, axis=-1)
# x_test = preprocess_input(x_test)
# # Classification target labels
# path = "/var/tmp/mi714/NEW/aug_dataset/"
# # y_train = generate_targets(path + "ISIC-2017_Training_Data",
# # path + "ISIC-2017_Training_Part3_GroundTruth.csv")
# # y_val = generate_targets(path + "ISIC-2017_Validation_Data",
# # path + "ISIC-2017_Validation_Part3_GroundTruth.csv")
# y_test = generate_targets(path + "ISIC-2017_Test_v2_Data",
# path + "ISIC-2017_Test_v2_Part3_GroundTruth.csv")
# X = x_test
# y = y_test
# my_adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
# # Compile model and print summary
# rocauc = AUC(num_thresholds=200,
# curve="ROC",
# summation_method="interpolation",
# name=None,
# dtype=None,
# thresholds=None,
# multi_label=False,
# label_weights=None,
# )
# model.compile(loss='categorical_crossentropy',
# optimizer=my_adam,
# metrics=[metrics.sensitivity,
# metrics.specificity,
# rocauc,
# 'acc'
# ])
# score = model.evaluate(X, y, verbose=1)
# binary_ce = score[0]
# sensitivity = score[1]
# specificity = score[2]
# rocauc = score[3]
# acc = score[4]
# print(f"""
# RESULTS:
# Binary Cross-Entropy Loss: {binary_ce}
# Sensitivity: {sensitivity}
# Specificity: {specificity}
# AUC ROC: {rocauc}
# Accuracy: {acc}
# """)
| 29.335366 | 129 | 0.675327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,616 | 0.751611 |
72140b20f916fb997edbec8a00bb1402df3614ca | 9,466 | py | Python | game.py | distortedsignal/bohnanza | dfbcfafbdd07cb924cbbc2adc36db7e51673e546 | [
"Apache-2.0"
]
| null | null | null | game.py | distortedsignal/bohnanza | dfbcfafbdd07cb924cbbc2adc36db7e51673e546 | [
"Apache-2.0"
]
| null | null | null | game.py | distortedsignal/bohnanza | dfbcfafbdd07cb924cbbc2adc36db7e51673e546 | [
"Apache-2.0"
]
| null | null | null | """
An implementation of Bohnanza
@author: David Kelley, 2018
"""
import random
from collections import defaultdict
class Card:
"""Card Object
Name and point thresholds are the only properties. The point thresholds
are organized the way they are on the card - to get 1 point, you need th
number of cards listed first in the point_thresholds, 2 for the 2nd, ...
"""
types = {'garden':[2, 2, 3], 'red': [2, 3, 4, 5],
'black-eyed': [2, 4, 5, 6], 'soy': [2, 4, 6, 7],
'green': [3, 5, 6, 7], 'stink': [3, 5, 7, 8],
'chili': [3, 6, 8, 9], 'blue': [4, 6, 8, 10]}
def __init__(self,name):
self.name = name
self.point_thresholds = self.types[self.name]
def __repr__(self):
return self.name
def __eq__(self, card2):
return self.name == card2.name
class Deck:
types = {'garden': 6, 'red': 8, 'black-eyed': 10, 'soy': 12,
'green': 14, 'stink': 16, 'chili': 18, 'blue': 20}
def __init__(self):
cards = [Card(name) for name in
self.types.keys() for i in range(0, self.types[name]) ]
self.draw_order = random.sample(cards, len(cards))
self.discard_order = []
self.completed_rounds = 0
def __repr__(self):
str_out = ("Deck with:\n Draw pile: " +
str(len(self.draw_order)) + " cards\n Discard pile: " +
str(len(self.discard_order)) + " cards\n")
return str_out
def draw(self, nCards):
"""Get nCards from the deck
If there are no cards left, you get only as many as are available
"""
out = []
for iC in range(nCards):
out.append(self.draw_single())
return out
def draw_single(self):
"""Get a single card from the deeck"""
if len(self.draw_order) == 0:
# Shuffle the discard pile if the draw pile is empty
self.draw_order = random.sample(
self.discard_order, len(self.discard_order))
self.discard_order = []
self.completed_rounds += 1
if len(self.draw_order) == 0:
return []
else:
return self.draw_order.pop(0)
def discard(self, c):
if isinstance(c, list):
for iCard in c:
self.discard_order.append(iCard)
else:
self.discard_order.append(c)
class Player:
"""Generic player class. Should be subtyped later for new strategies.
Each player type must implement the following methods:
plant: takes an array of cards and plants them
"""
def __init__(self, seat, strat):
self.hand = []
self.fields = [[], []]
self.points = 0
self.point_discards = []
self.seat = seat
self.strategy = strat
def __repr__(self):
names = []
for iField in range(2):
if len(self.fields[iField]) == 0:
names.append("[Empty]")
else:
names.append(str(self.fields[iField][0]) + \
"(" + str(len(self.fields[iField])) + ")")
return ("Player " + str(self.seat+1) + ".\nHand: " +
str(self.hand)
+ "\nField 1: " + names[0]
+ "\nField 2: " + names[1] + "\n")
def plant_from_hand(self, game_state):
"""Get strategy's choice and execute"""
if len(self.hand) == 0:
return
field_to_plant, cards = self.strategy.plant_from_hand(self)
for (iField, iCard) in zip(field_to_plant, cards):
self.plant_field(iField, iCard, game_state)
self.hand.pop(0)
def plant_from_draw(self, cards, game_state):
"""Get strategy's choice and execute"""
field_to_plant, cards = \
self.strategy.plant_from_trade(self, cards)
for (iField, iCard) in zip(field_to_plant, cards):
self.plant_field(iField, iCard, game_state)
def plant_field(self, field_num, card, game_state):
"""Put card down on field, harvest if neccessary"""
if len(self.fields[field_num]) > 0 and \
card != self.fields[field_num][0]:
self.harvest_field(field_num, game_state)
self.fields[field_num].append(card)
def harvest_field(self, field_num, game_state):
"""Get points, discard cards to correct place"""
nBeans = len(self.fields[field_num])
if nBeans == 0:
return []
nPoints = sum([i <= nBeans for i in self.fields[field_num][0].point_thresholds])
self.points += nPoints
for_discard = self.fields[field_num][0:(nBeans-nPoints)]
for_points = self.fields[field_num][(nBeans-nPoints):]
if len(for_discard) + len(for_points) != nBeans:
print('error')
raise AssertionError("Improper harvest.")
# Handle cards from field
game_state._deck.discard(for_discard)
self.point_discards.extend(for_points)
# Empty the field
self.fields[field_num] = []
class Game:
def __init__(self, player_strats):
self._deck = Deck()
self._players = [Player(i, player_strats[i]) for i in range(len(player_strats))]
self.deal_game(len(self._players))
def __repr__(self):
return "Bohnanza game with \n" + str(self._players) + " players."
def run(self):
active_player = 0
round_number = 1
empty_deck = False
while not (self.game_over() or empty_deck):
empty_deck = self.turn(active_player)
active_player += 1
if active_player >= len(self._players):
active_player = 0
round_number += 1
points = [p.points for p in self._players]
return points
# print("GAME OVER\nPlayer points: " + \
# str([p.points for p in self._players]))
def deal_game(self, nPlayers):
"""Initial game setup"""
for iPlayer in range(0,nPlayers):
self._players[iPlayer].hand.extend(self._deck.draw(5))
def game_over(self):
"""The game is over after completing 3 times through the deck"""
return len(self._deck.draw_order) == 0 and \
self._deck.completed_rounds >= 2
def turn(self, player_num):
"""Have a player take a turn"""
self.gamestate_is_valid()
# Step 1: Plant fron hand
self._players[player_num].plant_from_hand(self)
self.gamestate_is_valid()
# Step 2: Draw new cards & trade
faceup_cards = self._deck.draw(2)
if (len(faceup_cards) != 2) or any([not card for card in faceup_cards]):
self._deck.discard(faceup_cards)
return 1
# trade_spec = self._strategy[player_num].trade(faceup_cards)
# self.execute_trade(trade_spec)
self.gamestate_is_valid(faceup_cards)
# Step 3: Plant new cards
self._players[player_num].plant_from_draw(faceup_cards, self)
self.gamestate_is_valid()
# Step 4: Draw new cards
new_cards = self._deck.draw(3)
if (len(new_cards) != 3) or any([not card for card in new_cards]):
self._deck.discard(new_cards)
return 1
self._players[player_num].hand.extend(new_cards)
self.gamestate_is_valid()
def gamestate_is_valid(self, addl_cards=[], throw_exception=False):
"""
If !throw_exception, returns a boolean of if the game state is valid
If throw_exception, throws an exception if the game state is not valid
"""
original_types = Deck.types
current_cards = defaultdict(int)
for card in self._deck.draw_order:
current_cards[card.name] += 1
for card in self._deck.discard_order:
current_cards[card.name] += 1
for card in addl_cards:
current_cards[card.name] += 1
for player in self._players:
for card in player.hand:
current_cards[card.name] += 1
for card in player.point_discards:
current_cards[card.name] += 1
for field in player.fields:
for card in field:
current_cards[card.name] += 1
for key in original_types:
if original_types[key] != current_cards[key]:
if not throw_exception:
return False
raise AssertionError("not all cards are present")
return True
class Strategy:
def __init__(self, seat):
self.name = "Generic"
def __repr__(self):
return self.name + " player."
def plant_from_hand(self, cards, player):
"""Return a list of which field to put cards in for the given player
"""
pass
def plant_from_trade(self, cards, player):
"""Return a list of which field to put cards in for the given player
"""
pass
def trade(self, cards):
"""Trade with other players. Still working out what the mechanics of
this are
"""
pass
| 34.421818 | 88 | 0.555145 | 9,299 | 0.982358 | 0 | 0 | 0 | 0 | 0 | 0 | 2,132 | 0.225227 |
72155749ca290c85d0fa365110369fcce2862271 | 1,872 | py | Python | pytype/tests/test_calls.py | JelleZijlstra/pytype | 962a0ebc05bd24dea172381b2bedcc547ba53dd5 | [
"Apache-2.0"
]
| 11 | 2017-02-12T12:19:50.000Z | 2022-03-06T08:56:48.000Z | pytype/tests/test_calls.py | JelleZijlstra/pytype | 962a0ebc05bd24dea172381b2bedcc547ba53dd5 | [
"Apache-2.0"
]
| null | null | null | pytype/tests/test_calls.py | JelleZijlstra/pytype | 962a0ebc05bd24dea172381b2bedcc547ba53dd5 | [
"Apache-2.0"
]
| 2 | 2017-06-27T14:41:57.000Z | 2021-12-05T11:27:33.000Z | """Tests for calling other functions, and the corresponding checks."""
from pytype import utils
from pytype.tests import test_inference
class CallsTest(test_inference.InferenceTest):
"""Tests for checking function calls."""
def testOptional(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x: int, y: int = ..., z: int = ...) -> int
""")
self.assertNoErrors("""\
import mod
mod.foo(1)
mod.foo(1, 2)
mod.foo(1, 2, 3)
""", pythonpath=[d.path])
def testMissing(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter")])
def testExtraneous(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2, 3)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-arg-count")])
def testMissingKwOnly(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y, *, z) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "missing-parameter", r"\bz\b")])
def testExtraKeyword(self):
with utils.Tempdir() as d:
d.create_file("mod.pyi", """
def foo(x, y) -> int
""")
_, errors = self.InferAndCheck("""\
import mod
mod.foo(1, 2, z=3)
""", pythonpath=[d.path])
self.assertErrorLogIs(errors, [(2, "wrong-keyword-args")])
if __name__ == "__main__":
test_inference.main()
| 26.742857 | 73 | 0.553953 | 1,678 | 0.896368 | 0 | 0 | 0 | 0 | 0 | 0 | 818 | 0.436966 |
7216c0aa91d2cb7e990847e2823233ead4e36ab3 | 724 | py | Python | test/test_learning_00.py | autodrive/NAIST_DeepLearning | ac2c0512c43f71ea7df68567c5e24e689ac18aea | [
"Apache-2.0"
]
| 1 | 2018-09-26T01:52:35.000Z | 2018-09-26T01:52:35.000Z | test/test_learning_00.py | autodrive/NAIST_DeepLearning | ac2c0512c43f71ea7df68567c5e24e689ac18aea | [
"Apache-2.0"
]
| 5 | 2015-12-31T10:56:43.000Z | 2018-11-16T08:57:12.000Z | test/test_learning_00.py | autodrive/NAIST_DeepLearning | ac2c0512c43f71ea7df68567c5e24e689ac18aea | [
"Apache-2.0"
]
| 1 | 2018-09-26T01:52:37.000Z | 2018-09-26T01:52:37.000Z | import unittest
import lecture1_code00 as dl
from sklearn.datasets.samples_generator import make_blobs
class TestDeepLearning(unittest.TestCase):
def setUp(self):
self.X, self.Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
def tearDown(self):
del self.X
del self.Y
def test_linear_model_00(self):
x = [1.0, -1.0]
w = [1, 1, 0]
result = dl.linear_model(w, x)
self.assertAlmostEqual(x[0]*w[0] + x[1]*w[1] + w[2] * 1, result,)
x3 = [1.0, -1.0, 1.0]
w3 = [1, 2, 1, 0.5]
result3 = dl.linear_model(w3, x3)
self.assertAlmostEqual(x3[0]*w3[0] + x3[1]*w3[1] + x3[2] * w3[2] + w3[3] * 1.0, result3,)
| 27.846154 | 97 | 0.585635 | 618 | 0.853591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7217f6133fa71477eb286daa69250fadb04142e7 | 2,389 | py | Python | edumediaitem/views_manage.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
]
| null | null | null | edumediaitem/views_manage.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
]
| null | null | null | edumediaitem/views_manage.py | shagun30/djambala-2 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | [
"BSD-3-Clause"
]
| null | null | null | #-*-coding: utf-8 -*-
"""
/dms/edumediaitem/views_manage.py
.. enthaelt den View fuer die Management-Ansicht des Medienpaketes
Django content Management System
Hans Rauch
[email protected]
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 11.09.2007 Beginn der Arbeit
"""
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.roles import require_permission
from dms.roles import UserEditPerms
from dms.folder.views_manage import do_manage
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_edit_folderish')
def edumediaitem_manage(request, item_container):
""" Pflegemodus des Medienpakets """
user_perms = UserEditPerms(request.user.username, request.path)
add_ons = {}
add_ons[0] = [ { 'url' : get_site_url(item_container,
'index.html/add/edufileitem/'),
'info': _(u'Datei')},
{ 'url' : get_site_url(item_container, 'index.html/add/edutextitem/'),
'info': _(u'Textdokument')},
{ 'url' : get_site_url(item_container, 'index.html/add/edulinkitem/'),
'info': _(u'Verweis')},
]
add_ons[1] = [
{ 'url' : get_site_url(item_container,
'index.html/add/imagethumb/?' + \
'max_width=120&max_height=80'),
'info': _(u'Minibild für Verweise etc.')},
{ 'url' : get_site_url(item_container,
'index.html/add/image/'),
'info': _(u'Bild, Foto, Grafik')},
]
add_ons[2] = [ { 'url' : get_site_url(item_container, 'index.html/add/userfolder/'),
'info': _(u'Community-Mitglieder eintragen, löschen, Rechte ändern ...')}, ]
add_ons[3] = []
app_name = 'edumediaitem'
my_title = _(u'Medienpaket pflegen')
my_title_own = _(u'Eigene Ressourcen etc. pflegen')
dont = { 'navigation_left_mode': False, }
return do_manage(request, item_container, user_perms, add_ons, app_name,
my_title, my_title_own, dont)
| 38.532258 | 95 | 0.577648 | 0 | 0 | 0 | 0 | 1,654 | 0.691472 | 0 | 0 | 1,001 | 0.418478 |
721813c43ddcb76146e7ed608cacf427665451b5 | 414 | py | Python | crop_yield_prediction/models/deep_gaussian_process/__init__.py | facebookresearch/Context-Aware-Representation-Crop-Yield-Prediction | 9c29459e9521303f40d9d6aaa938da0c23ab4ad8 | [
"MIT"
]
| 12 | 2020-09-17T21:55:18.000Z | 2022-01-14T21:05:23.000Z | crop_yield_prediction/models/deep_gaussian_process/__init__.py | hulaba/Context-Aware-Representation-Crop-Yield-Prediction | 9c29459e9521303f40d9d6aaa938da0c23ab4ad8 | [
"MIT"
]
| null | null | null | crop_yield_prediction/models/deep_gaussian_process/__init__.py | hulaba/Context-Aware-Representation-Crop-Yield-Prediction | 9c29459e9521303f40d9d6aaa938da0c23ab4ad8 | [
"MIT"
]
| 5 | 2020-10-10T10:18:14.000Z | 2021-12-21T07:36:27.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .feature_engineering import get_features_for_deep_gaussian
from .convnet import ConvModel
from .rnn import RNNModel
__all__ = ['get_features_for_deep_gaussian',
'ConvModel',
'RNNModel']
| 29.571429 | 65 | 0.748792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.596618 |
7218bf5a5c0b747c4d1438e666c01d7117d13c58 | 3,546 | py | Python | projector_server/projects/views.py | changyang-liu/Projector | 4924400f54e0ce823fd488f9cfd0f7e52d2df55f | [
"MIT"
]
| null | null | null | projector_server/projects/views.py | changyang-liu/Projector | 4924400f54e0ce823fd488f9cfd0f7e52d2df55f | [
"MIT"
]
| 2 | 2021-06-10T19:12:07.000Z | 2021-09-22T19:00:50.000Z | projector_server/projects/views.py | CS97-Projector/Projector | 4924400f54e0ce823fd488f9cfd0f7e52d2df55f | [
"MIT"
]
| null | null | null | from django.http import HttpResponse, JsonResponse, Http404
from django.core.exceptions import PermissionDenied
from rest_framework.parsers import JSONParser, FormParser, MultiPartParser
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework import generics
from rest_framework.response import Response
from projects.models import Project
from projects.serializers import ProjectSerializer
from projects.permissions import IsOwnerOrReadOnly
from django.contrib.auth.models import User
class BaseProjectView(generics.ListCreateAPIView):
"""
View all projects (GET) or create a new project (POST)
"""
parser_classes = [FormParser, MultiPartParser]
permission_classes = [IsAuthenticatedOrReadOnly]
queryset = Project.objects.all()
serializer_class = ProjectSerializer
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class DetailedProjectView(generics.RetrieveUpdateDestroyAPIView):
"""
View a particular project (GET), edit a project (PUT) or delete a project (DELETE)
"""
parser_classes = [FormParser, MultiPartParser]
permission_classes = [IsOwnerOrReadOnly]
queryset = Project.objects.all()
serializer_class = ProjectSerializer
class JoinProjectView(generics.UpdateAPIView):
"""
Update a particular project's join request list or member list (PATCH)
"""
permission_classes = [IsAuthenticatedOrReadOnly]
queryset = Project.objects.all()
serializer_class = ProjectSerializer
def update(self, request, *args, **kwargs):
project = self.get_object()
user = request.data.get('user')
updateType = request.data.get('type')
userObject = User.objects.get(id__exact=user['id'])
if updateType == "Join Request":
# Only add user to join request list if not already in it
# Users cannot join for each other
if request.user not in project.join_requests.all():
project.join_requests.add(request.user)
elif updateType == "Cancel Request":
# Only project owner or requester can cancel request
if request.user != project.owner and request.user != userObject:
raise PermissionDenied
# Only remove user from join request list if already in it
if userObject in project.join_requests.all():
project.join_requests.remove(userObject)
elif updateType == "Accept":
# Only owner can accept
if request.user != project.owner:
raise PermissionDenied
project.join_requests.remove(userObject)
project.members.add(userObject)
else:
content = {"error": "Invalid update type"}
return Response(content, status=400)
return super().update(request, *args, **kwargs)
class LikesView(generics.UpdateAPIView):
"""
View for liking a project
"""
permission_classes = [IsAuthenticatedOrReadOnly]
queryset = Project.objects.all()
serializer_class = ProjectSerializer
def update(self, request, *args, **kwargs):
project = self.get_object()
if request.user not in project.liked_by.all():
project.liked_by.add(request.user)
project.likes = project.likes + 1
project.save()
else:
content = {"error": "Already liked this project"}
return Response(content, status=400)
return super().update(request, *args, **kwargs)
| 38.129032 | 86 | 0.683305 | 3,019 | 0.851382 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.179357 |
721a5ce052e7d21ea063652b0a161c21042f7f06 | 1,089 | py | Python | tests/test_muduapiclient.py | hanqingliu/mudu-api-python-client | 92541df27a518dad5312b39749dfbb8bd471a6b8 | [
"Apache-2.0"
]
| null | null | null | tests/test_muduapiclient.py | hanqingliu/mudu-api-python-client | 92541df27a518dad5312b39749dfbb8bd471a6b8 | [
"Apache-2.0"
]
| null | null | null | tests/test_muduapiclient.py | hanqingliu/mudu-api-python-client | 92541df27a518dad5312b39749dfbb8bd471a6b8 | [
"Apache-2.0"
]
| null | null | null | import ddt
import mock
from unittest import TestCase
from muduapiclient.client import MuduApiClient, gen_signed_params
import time
@ddt.ddt
class MuduApiClientTests(TestCase):
@ddt.unpack
@ddt.data(
('ACCESS_KEY', 'SECRET_KEY', {'page':1, 'live_status':2}),
)
def test_gen_signed_params(self, ak, sk, kwargs):
original_time = time.time
time.time = mock.Mock(return_value='1234567890')
signed_params = gen_signed_params(ak, sk, kwargs)
time.time = original_time
self.assertIn('sign', signed_params)
self.assertEqual(signed_params['sign'], 'af7470c6f59d051c633401d1fd0b86fd1aa05352')
self.assertNotIn('secret_key', signed_params)
@ddt.unpack
@ddt.data(
('ACCESS_KEY', 'SECRET_KEY', {'page':1, 'live_status':2}),
('507cfcdfe351e13e6f1c8ba87b80969f', 'SECRET_KEY', {'page':1, 'live_status':4}),
)
def test_call_live(self, ak, sk, kwargs):
api = MuduApiClient(ak, sk)
response = api.call('POST', 'live', 'List', **kwargs)
self.assertIn('code', response)
| 33 | 91 | 0.662994 | 947 | 0.869605 | 0 | 0 | 956 | 0.87787 | 0 | 0 | 253 | 0.232323 |
721b25bd54ec37339248810e92f0fd66777d24b1 | 586 | py | Python | Projectreview/Projectreview/urls.py | bestgunman/Gitwaxingproduct | 44c5fdd57aaa87765929e12a828d1cd17cfbbc0d | [
"MIT"
]
| 1 | 2017-01-30T07:02:51.000Z | 2017-01-30T07:02:51.000Z | Projectreview/Projectreview/urls.py | bestgunman/Gitwaxingproduct | 44c5fdd57aaa87765929e12a828d1cd17cfbbc0d | [
"MIT"
]
| null | null | null | Projectreview/Projectreview/urls.py | bestgunman/Gitwaxingproduct | 44c5fdd57aaa87765929e12a828d1cd17cfbbc0d | [
"MIT"
]
| null | null | null | from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from productapp.views import product_list
from reviewapp.views import index
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^admin/', admin.site.urls),
url(r'^review/', include('reviewapp.urls')),
url(r'^product/', include('productapp.urls')),
url(r'^account/', include('accountapp.urls')),
]
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
) | 30.842105 | 61 | 0.711604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.182594 |
721e9bba1e7ea66054b20c27b7571b65855aeaa1 | 5,970 | py | Python | ttt.py | YukkuriC/PyTicTacToe | c38b330faeb956d82b401e5863c4982f725e5dab | [
"MIT"
]
| null | null | null | ttt.py | YukkuriC/PyTicTacToe | c38b330faeb956d82b401e5863c4982f725e5dab | [
"MIT"
]
| null | null | null | ttt.py | YukkuriC/PyTicTacToe | c38b330faeb956d82b401e5863c4982f725e5dab | [
"MIT"
]
| null | null | null | __doc__ = '''
井字棋基础设施
包含棋盘类与单局游戏运行内核
'''
from threading import Thread
from time import process_time
if 'enums':
OK = 0 # 游戏继续
ENDGAME = 1 # 形成三连
DRAW = 2 # 棋盘已满平局
INVALID = -1 # 非法返回值(类型错误/出界)
CONFILCT = -2 # 冲突落子(下于已有棋子位置)
ERROR = -3 # 代码报错
TIMEOUT = -4 # 代码超时
class Board:
"""
基础棋盘类,用于计算局面情形+发放双方玩家所用局面
使用数字1、2分别代表不同方玩家落子
"""
def __init__(self):
self.pool = {} # 仅填充1/2的字典
self.history = [] # 落子历史
def get_board(self, plr: int):
"""
为指定玩家编号返回其局面字典
字典键为2长度元组,每位数字(0,1,2)分别代表行号与列号
返回对象中包含3*3棋盘位置,对应值均为字符串,含义如下:
"S": 我方落子
"F": 对方落子
"E": 空
"""
res = {}
for x in range(3):
for y in range(3):
if (x, y) in self.pool:
res[x, y] = 'S' if self.pool[x, y] == plr else 'F'
else:
res[x, y] = 'E'
return res
def drop(self, plr, pos):
"""
指定玩家编号plr在指定位置pos落子
返回落子结果
"""
if self._drop_data_check(pos): # 非法落子检查
self.history.append('INVALID')
return INVALID
self.history.append(pos)
if pos in self.pool: # 冲突落子检查
return CONFILCT
self.pool[pos] = plr # 落子,检查游戏结束状态
return self._check_endgame()
def _drop_data_check(self, pos):
"""
检验落子位置对象是否符合要求
要求:
* 必须为列表或元组
* 长度必须为2
* 每位均为int,取值只可为0,1,2
"""
if not isinstance(pos, (list, tuple)):
return INVALID
if len(pos) != 2:
return INVALID
for i in pos:
if not (isinstance(i, int) and 0 <= i <= 2):
return INVALID
return OK
def _check_endgame(self):
""" 检查游戏状态是否结束 """
for x in range(3):
if self._3_equal(self.pool.get((x, i))
for i in range(3)): # axis 0
return ENDGAME
if self._3_equal(self.pool.get((i, x))
for i in range(3)): # axis 1
return ENDGAME
if self._3_equal(self.pool.get((i, i)) for i in range(3)): # 正对角线
return ENDGAME
if self._3_equal(self.pool.get((i, 2 - i)) for i in range(3)): # 反对角线
return ENDGAME
return OK # 不执行平局判断
def _3_equal(self, row):
""" 辅助函数:检查一行3数(非空)相等状态 """
row = iter(row)
n1 = next(row)
if not n1:
return False
for n in row:
if n != n1:
return False
return True
class Game:
"""
井字棋游戏对象
接收运行双方代码并收集结果
codes:
双方代码模块,其中包含play函数,可接收Board.get_board结果作为参数并返回落子位置
names:
双方代码模块名称
timeout:
时间限制
"""
def __init__(self, codes, names=['code1', 'code2'], timeout=10):
self.codes = codes
self.names = names
self.timeout = timeout
@staticmethod
def _stringfy_error(e):
return '%s: %s' % (
type(e).__name__,
e,
)
@staticmethod
def _thread_wrap(code, board, thr_output: dict):
"""
线程内运行代码,输出结果
输入:
code: 待运行模块
board: 当前局面
output: 容纳返回值的字典
"result": 模块play函数运行结果
"error": 捕捉的运行异常
"dt": 运行用时
"""
res = {
"result": None,
"error": None,
}
try:
t1 = process_time()
output = code.play(board)
t2 = process_time()
res['result'] = output
except Exception as e:
t2 = process_time()
res['error'] = Game._stringfy_error(e)
res['dt'] = t2 - t1
thr_output.update(res)
def _get_result(self, winner, reason, extra=None):
"""
构造比赛结果字典
"orders": 该局落子顺序
"winner": 胜者
0 - 先手胜
1 - 后手胜
None - 平局
"reason": 终局原因序号
"extra": 额外描述
"timeouts": 双方使用时间历史
"""
return {
'names': self.names,
'orders': self.board.history,
'winner': winner,
'reason': reason,
'extra': extra,
'timeouts': self.timeout_history,
}
def match(self):
"""
运行一场比赛
返回值: 比赛结果字典
"""
self.board = Board()
timeouts = [self.timeout] * 2
self.timeout_history = []
for nround in range(9):
# 构造当局进程
plr_idx = nround % 2
thread_output = {}
frame = self.board.get_board(plr_idx + 1)
thr = Thread(target=self._thread_wrap,
args=(self.codes[plr_idx], frame, thread_output))
# 限时运行
thr.start()
thr.join(timeouts[plr_idx])
# 判断线程死循环
if thr.is_alive():
return self._get_result(1 - plr_idx, TIMEOUT, '死循环')
# 计时统计,判断超时
timeouts[plr_idx] -= thread_output['dt']
if timeouts[plr_idx] < 0:
return self._get_result(1 - plr_idx, TIMEOUT)
self.timeout_history.append(timeouts.copy())
# 判断报错
if thread_output['error']:
return self._get_result(
1 - plr_idx,
ERROR,
thread_output['error'],
)
# 落子判断
res = self.board.drop(plr_idx + 1, thread_output['result'])
if res == OK: # 继续循环
continue
return self._get_result(
plr_idx if res == ENDGAME else 1 - plr_idx,
res,
)
return self._get_result(None, DRAW) # 平局
if __name__ == '__main__':
import codes.dumb_ordered as plr1, codes.dumb_random as plr2
game = Game([plr1, plr2])
print(game.match())
| 25.512821 | 78 | 0.469514 | 6,440 | 0.915553 | 0 | 0 | 931 | 0.132357 | 0 | 0 | 2,595 | 0.368922 |
721ec82c86e8517afd6fcd583254496b9ad3500f | 400 | py | Python | cursoemvideo/ex008.py | rafaelsantosmg/cev_python3 | 2fa2561b46409bebbd6a2020c60aa8f946fe6244 | [
"MIT"
]
| 1 | 2021-03-22T03:08:41.000Z | 2021-03-22T03:08:41.000Z | cursoemvideo/ex008.py | rafaelsantosmg/cev_python3 | 2fa2561b46409bebbd6a2020c60aa8f946fe6244 | [
"MIT"
]
| null | null | null | cursoemvideo/ex008.py | rafaelsantosmg/cev_python3 | 2fa2561b46409bebbd6a2020c60aa8f946fe6244 | [
"MIT"
]
| null | null | null | """Escreva um programa que leia o valor em metros e o exiba convertido em centímetros e milímetros"""
from utilidadescev.dado import leia_real
n = leia_real('Digite a metragem: ')
km = n / 1000
hec = n / 100
dam = n / 10
dec = n * 10
cent = n * 100
mil = n * 1000
print(f'{km:.3f}km')
print(f'{hec:.2f}hm')
print(f'{dam:.1f}dam')
print(f'{dec:.0f}dm')
print(f'{cent:.0f}cm ')
print(f'{mil:.0f}mm')
| 22.222222 | 101 | 0.6525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.522388 |
72207e110b7ba0434449b56ad831fee21813b6dc | 1,015 | py | Python | Minor Project/Weather GUI/pyowm_helper.py | ComputerScientist-01/Technocolabs-Internship-Project | 3675cc6b9a40a885a29b105ec9b29945a1e4620c | [
"MIT"
]
| 4 | 2020-07-08T11:32:29.000Z | 2021-08-05T02:54:02.000Z | Minor Project/Weather GUI/pyowm_helper.py | ComputerScientist-01/Technocolabs-Internship-Project | 3675cc6b9a40a885a29b105ec9b29945a1e4620c | [
"MIT"
]
| null | null | null | Minor Project/Weather GUI/pyowm_helper.py | ComputerScientist-01/Technocolabs-Internship-Project | 3675cc6b9a40a885a29b105ec9b29945a1e4620c | [
"MIT"
]
| null | null | null | import os
import pyowm
from datetime import datetime
from timezone_conversion import gmt_to_eastern
#API_KEY = os.environ['API_KEY']
owm=pyowm.OWM('0833f103dc7c2924da06db624f74565c')
mgr=owm.weather_manager()
def get_temperature():
days = []
dates = []
temp_min = []
temp_max = []
forecaster = mgr.forecast_at_place('New York, US', '3h')
forecast=forecaster.forecast
for weather in forecast:
day = gmt_to_eastern(weather.reference_time())
date = day.date()
if date not in dates:
dates.append(date)
temp_min.append(None)
temp_max.append(None)
days.append(date)
temperature = weather.temperature('fahrenheit')['temp']
if not temp_min[-1] or temperature < temp_min[-1]:
temp_min[-1] = temperature
if not temp_max[-1] or temperature > temp_max[-1]:
temp_max[-1] = temperature
return(days, temp_min, temp_max)
if __name__ == '__main__':
get_temperature()
| 28.194444 | 63 | 0.639409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.110345 |
7221d6876591c7703ef947738f8354cdcf1efa5d | 82,647 | py | Python | MCDR.py | Qltan/MCDR | ad8abfafcbe19dd50f31fc4122faf3eb633be9d5 | [
"MIT"
]
| null | null | null | MCDR.py | Qltan/MCDR | ad8abfafcbe19dd50f31fc4122faf3eb633be9d5 | [
"MIT"
]
| null | null | null | MCDR.py | Qltan/MCDR | ad8abfafcbe19dd50f31fc4122faf3eb633be9d5 | [
"MIT"
]
| null | null | null | import copy
import datetime
import json
import math
import multiprocessing
import numpy as np
import os
import pandas as pd
import pydotplus
import random
import re
import time
from math import *
from sklearn import metrics
_CLUSTER_DATA = './bike_sharing_data/mydata'
RATEDATA = './bike_sharing_data/mydata/'
rateName = 'rental_return_rate_cluster_6_month_678_timedelta_5.json'
# STATION_STATUS = './station_status_by_id'
def getMonthCluster():
cluster = '6'
month = 678
return month, cluster
def getCluster():
with open(os.path.join(_CLUSTER_DATA, 'clusters.json'), 'r') as f:
clusters = json.load(f)
del clusters['5']['402']
del clusters['5']['491']
return clusters
def getRateData():
with open(os.path.join(RATEDATA, rateName), 'r') as f:
rateData = json.load(f)
return rateData
def getPositionAndStations_id():
clusters = getCluster()
month, cluster = getMonthCluster()
use_cluster = clusters[cluster]
stations_id = []
position = {}
for key, values in use_cluster.items():
stations_id.append(key)
position[key] = values['position']
return position, stations_id
def getInitialInfo():
month, cluster = getMonthCluster()
pattern2 = re.compile('^cluster_[0-9]+_')
filelist2 = os.listdir(_CLUSTER_DATA)
for filename in filelist2:
if filename == 'cluster_6_month_678_initialStationInfo.json':
cluster1 = filename.split('_')[1]
month1 = filename.split('_')[3]
if cluster1 == str(cluster) and month1 == str(month):
print(filename)
with open(os.path.join(_CLUSTER_DATA, filename), 'r') as f:
initialInfo = json.load(f)
return initialInfo
def haversine_array(lat1, lng1, lat2, lng2):
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
# print lat1, lng1, lat2, lng2
a = lat1 - lat2
b = lng1 - lng2
s = 2 * asin(sqrt(pow(sin(a / 2), 2) + cos(lat1) * cos(lat2) * pow(sin(b / 2), 2)))
earth_radius = 6378.137
s = s * earth_radius
if s < 0:
return round(-s, 3)
else:
return round(s, 3)
return h
def manhattan_distance(lat1, lng1, lat2, lng2):
a = haversine_array(lat1, lng1, lat1, lng2)
b = haversine_array(lat1, lng1, lat2, lng1)
return a + b
def getNeighbor(stations_id, position):
neighbor = {}
maxDis = 0
for station_id1 in stations_id:
nei = []
for station_id2 in stations_id:
d = manhattan_distance(position[str(station_id1)][0], position[str(station_id1)][1],
position[str(station_id2)][0], position[str(station_id2)][1])
if 0.6 > d > 0:
nei.append(str(station_id2))
if d > maxDis:
maxDis = d
neighbor[str(station_id1)] = nei
return neighbor
def getOlderNeighbor(stations_id, position):
neighbor = {}
maxDis = 0
for station_id1 in stations_id:
nei = []
for station_id2 in stations_id:
d = manhattan_distance(position[str(station_id1)][0], position[str(station_id1)][1],
position[str(station_id2)][0], position[str(station_id2)][1])
if 0.9 > d > 0:
nei.append(str(station_id2))
if d > maxDis:
maxDis = d
neighbor[str(station_id1)] = nei
return neighbor
def getMonthDayAndHour(): # month, day and hour used in this experiment
mon = 8
day = 99
hour = 7
return mon, day, hour
def getStation_status():
monDay = {'6': 30, '7': 31, '8': 31}
mon, day, hour = getMonthDayAndHour()
initialByDay = {}
totalDocksDict = {}
initialInfo = getInitialInfo()
position, stations_id = getPositionAndStations_id()
for station_id, values in initialInfo.items():
totD = values['totalDocks']
totalDocksDict[str(station_id)] = totD
for day in range(0, monDay[str(mon)]):
sta = {}
for station_id, values in initialInfo.items():
inf = values['info']
monInf = inf[str(mon)]
sta[str(station_id)] = monInf[day]
initialByDay[str(day + 1)] = sta
station_status = {}
for day in range(0, monDay[str(mon)]):
station_status1 = {}
for station_id in stations_id:
stationInf = initialByDay[str(day + 1)][str(station_id)][str(day + 1)][str(hour)]
station_status1[str(station_id)] = stationInf
station_status[str(day + 1)] = station_status1
return station_status, totalDocksDict
###########################
# MCTS algorithm
class BikeSystem(object):
def __init__(self, availStations=[]):
self.availStations = copy.deepcopy(availStations)
def update(self, station_id):
self.availStations.remove(str(station_id))
class MCTS(object):
def __init__(self, availStations, time=6, max_actions=1000):
self.availStations = availStations
self.calculation_time = float(time)
self.max_actions = max_actions
self.confident = 8
self.equivalence = 10000 # calc beta
self.max_depth = 1
self.fileCount = 0
def get_action(self, rootStationId, starttime, neighbor, rateData, station_status, totalDocksDict, day,
olderNeighbor): # rootStationId: current truck parking station
position, stations_id = getPositionAndStations_id()
if len(self.availStations) == 1:
return self.availStations[0]
self.visited_times = {} # key: station_id, value: visited times
simulations = 0
begin = time.time()
Q = {str(sta_id): -99999 for sta_id in self.availStations} # recalculation Q value
balanceBikeNums = {str(sta_id): 0 for sta_id in self.availStations}
countmax = 0
count = 0
expandStaSet = set()
# self.fileCount = 0
while simulations < self.max_actions + 1:
availStations_copy = copy.deepcopy(self.availStations)
countmax, count = self.run_simulation(availStations_copy, rootStationId, Q, starttime, balanceBikeNums,
neighbor,
simulations,
expandStaSet, countmax, count, rateData, station_status,
totalDocksDict, day, olderNeighbor, position)
simulations += 1
# select the station with the maximum Q value
maxQ, selected_station_id = self.select_one_station(Q, starttime, rateData, totalDocksDict, station_status, day,
rootStationId)
print("total simulations=", simulations)
print("Time spent in the simulation process:", str(time.time() - begin))
print('Maximum number of access to uct:' + str(countmax))
print('Total number of access to uct:' + str(count))
print('Maximum Q:', maxQ)
print('Maximum depth searched:', self.max_depth)
return selected_station_id
def select_one_station(self, Q, starttime, rateData, totalDocksDict, station_status, day, rootStationId):
notInServiceLevalStas = []
t_interval = starttime / 5
mon = 8
hour = 7
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
for sta in self.availStations:
rateDict = rateData[str(sta)]
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
availableBikes = station_status[str(day)][str(sta)]['availableBikes']
availableDocks = station_status[str(day)][str(sta)]['availableDocks']
totalDocks = totalDocksDict[str(sta)]
for i in np.arange(0,
int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
totalDocks = totalDocksDict[str(sta)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_interval), int(t_interval) + 24):
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if serviceLevel == [] or float(realBikes) < min(serviceLevel) or float(realBikes) > max(serviceLevel):
notInServiceLevalStas.append(sta)
if not notInServiceLevalStas:
maxQ, sta_id = max((Q[str(sta_id)], sta_id) for sta_id in self.availStations)
else:
maxQ, sta_id = max((Q[str(sta_id)], sta_id) for sta_id in notInServiceLevalStas)
if maxQ == -99999:
minDis = 10000
print(notInServiceLevalStas)
position, stations_id = getPositionAndStations_id()
for sta in notInServiceLevalStas:
dis = manhattan_distance(position[str(rootStationId)][0], position[str(rootStationId)][1],
position[str(sta)][0], position[str(sta)][1])
if dis < minDis:
minDis = dis
sta_id = sta
# maxQ, sta_id = max((Q[str(sta_id)], sta_id) for sta_id in self.availStations)
if sta_id == '238':
print(Q)
print('Q[238]:' + str(Q['238']))
return maxQ, sta_id
def run_simulation(self, availStations, rootStationId,
Q, starttime, balanceBikeNums, neighbor, simulations, expandStaSet, countmax,
count2, rateData, station_status, totalDocksDict, day, olderNeighbor,
position): # conduct run_simulation and get a path
"""
MCTS main process
"""
visited_times = self.visited_times
# availStations = bikeSystem.availStations
visited_paths = []
cumulativeDis = [] # The total travel distance of the truck
expand = True
selectedSta = rootStationId
dropNum = 0
pickNum = 0
# simulation
count = 0
countRequestFlag = 0
neiStaQ = []
for t in range(1, self.max_actions + 1):
lastStation = selectedSta
if all(visited_times.get(station_id) for station_id in availStations): # UCB
log_total = log(sum(visited_times[str(sta_id)] for sta_id in availStations))
value, sta_id = max((
Q[str(sta_id)] + sqrt(self.confident * log_total / visited_times[str(sta_id)]),
sta_id)
for sta_id in
availStations)
selectedSta = sta_id
count += 1
count2 += 1
else:
availNeighbor = [sta_id for sta_id in neighbor[str(lastStation)] if sta_id in availStations]
if len(availNeighbor) and random.random() < 0:
selectedSta = random.choice(availNeighbor)
else:
selectedSta = random.choice(availStations)
# bikeSystem.update(selectedSta)
availStations.remove(str(selectedSta))
# Expand
if expand is True and str(selectedSta) not in visited_times:
expand = False
visited_times[str(selectedSta)] = 0
expandStaSet.add(str(selectedSta))
if t > self.max_depth:
self.max_depth = t
visited_paths.append(selectedSta)
is_full = not len(availStations)
isRequest, endtime, dropNum0, pickNum0, real_bikes, real_docks = self.getRequest(lastStation, selectedSta,
Q, starttime,
cumulativeDis, rateData,
station_status,
totalDocksDict, day,
position)
starttime = endtime
if isRequest:
availselectedStaNeighbor = [sta_id for sta_id in olderNeighbor[str(selectedSta)] if
sta_id in availStations]
# neiStaQ = {str(sta):0 for sta in availselectedStaNeighbor}
for neiSta in availselectedStaNeighbor:
cumulativeDisCopy = copy.deepcopy(cumulativeDis)
diss = []
dis = manhattan_distance(position[str(selectedSta)][0], position[str(selectedSta)][1],
position[str(neiSta)][0], position[str(neiSta)][1])
# cumulativeDisCopy.append(dis)
cumulativeDisCopy.append(dis)
v = 7 # 10m/s == 36km/h truck speed
t = dis * 1000 / v
t_arrive = starttime + round(t / 60)
t_interval = round(t_arrive / 5)
serviceLevel, real_bikess, real_dockss = self.getServiceLevel(neiSta, t_interval,
rateData, station_status,
totalDocksDict, day)
dropNum = 0
pickNum = 0
if not serviceLevel: # return>>rental
pickNum = real_bikes
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
if minBikes <= real_bikes <= maxBikes:
pass
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes # TN
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
balanceBikeNumss = dropNum + pickNum
flag = -1
if dropNum > 0:
flag = 0
elif pickNum > 0:
flag = 1
neiStaQ.append(self.getScore(cumulativeDisCopy, balanceBikeNumss, real_bikess, real_dockss, flag))
if is_full or isRequest:
break
if count > countmax:
countmax = count
# Back-propagation
balanceBikeNums[str(selectedSta)] = dropNum0 + pickNum0
flag = -1
if dropNum0 > 0:
flag = 0
elif pickNum0 > 0:
flag = 1
# if selectedSta=='229':
# print('real_docks:'+str(real_docks))
for sta_id in visited_paths:
if sta_id not in visited_times:
continue
visited_times[str(sta_id)] += 1
if isRequest:
if not neiStaQ:
neiStaQ.append(0)
score = self.getScore(cumulativeDis, balanceBikeNums[str(selectedSta)],
real_bikes, real_docks, flag) + np.mean(neiStaQ)
Q[str(sta_id)] = (abs(Q[str(sta_id)]) * (visited_times[str(sta_id)] - 1) +
score) / visited_times[str(sta_id)]
Q[str(sta_id)] = round(Q[str(sta_id)], 4)
log_dir = './bike_sharing_data/mydata/log/' + str(self.fileCount + 1)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
with open(log_dir + '/' + str(simulations + 1) + '.json',
'w') as f:
json.dump(Q, f)
return countmax, count2
def getScore(self, cumulativeDis, balanceNums, real_bikes, real_docks, flag):
disScore = 0
numScore = 0
score = 0
if sum(cumulativeDis) <= 300:
disScore = 10
elif sum(cumulativeDis) <= 600:
disScore = 6
elif sum(cumulativeDis) <= 1000:
disScore = 4
elif sum(cumulativeDis) <= 1500:
disScore = 2
elif sum(cumulativeDis) <= 2000:
disScore = 0
else:
disScore = -5
# dis = sum(cumulativeDis)
# if dis>=3000:
# disScore = -10
# elif dis>=2000:
# disScore = 20-10*(dis/1000)
# elif dis>=0:
# disScore = 10-5*(dis/1000)
if balanceNums == 0:
numScore = 0
elif balanceNums <= 3:
numScore = 2
elif balanceNums <= 6:
numScore = 4
elif balanceNums <= 10:
numScore = 6
else:
numScore = 10
# if balanceNums >=10:
# numScore = 10
# else:
# numScore = balanceNums
urgencyScore = 0
if flag == 0 and real_bikes <= 1:
urgencyScore = 10
elif flag == 1 and real_docks <= 1:
urgencyScore = 10
elif flag == -1:
return 0
score = 0.5 * disScore + 0.5 * numScore + urgencyScore
return score
def getRequest(self, lastStation, selectedSta, Q, starttime, cumulativeDis, rateData, station_status,
totalDocksDict, day, position):
dis = manhattan_distance(position[str(lastStation)][0], position[str(lastStation)][1],
position[str(selectedSta)][0], position[str(selectedSta)][1])
cumulativeDis.append(round(dis * 1000, 3))
noise = abs(np.random.normal(loc=0.0, scale=2))
v = 7 # 8m/s == 36km/h
t = dis * 1000 / v #
t_arrive = starttime + round(t / 60)
t_interval = round(t_arrive / 5)
serviceLevel, real_bikes, real_docks = self.getServiceLevel(selectedSta, t_interval, rateData, station_status,
totalDocksDict, day)
dropNum = 0
pickNum = 0
endtime = t_arrive
if not serviceLevel: # return>>rental
endtime = t_arrive + real_bikes * 0.3 + noise
pickNum = real_bikes
return True, endtime, dropNum, pickNum, real_bikes, real_docks
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
if minBikes <= real_bikes <= maxBikes:
endtime = t_arrive + noise
return False, endtime, dropNum, pickNum, real_bikes, real_docks
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes
endtime = t_arrive + dropNum * 0.3 + noise # drop/take time (30s)
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
endtime = t_arrive + pickNum * 0.3 + noise
return True, endtime, dropNum, pickNum, real_bikes, real_docks
def getServiceLevel(self, selectedSta, t_interval, rateData, station_status, totalDocksDict, day):
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
rateDict = rateData[str(selectedSta)]
t_intervalFlag = 0
if hour == 7:
t_intervalFlag = 0
elif hour == 8:
t_intervalFlag = 12
elif hour == 9:
t_intervalFlag = 24
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(selectedSta)]['availableBikes']
iniDocks = station_status[str(day)][str(selectedSta)]['availableDocks']
totalDocks = totalDocksDict[str(selectedSta)]
serviceLevel = []
availableBikes = iniBikes
# print('selectedSta:'+str(selectedSta))
availableDocks = iniDocks
# print('selectedSta:'+str(selectedSta))
# print('iniBikes:'+str(iniBikes))
for i in np.arange(int(t_intervalFlag), int(t_interval) + int(t_intervalFlag)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0:
pass # rental_lost += deltNum
if float(availableDocks) < 1.0:
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_intervalFlag) + int(t_interval), int(t_interval) + int(t_intervalFlag) + 24):
deltaNum = 0
if j >= 48:
break
else:
try:
deltaNum = rental_rate_0[j] - return_rate_0[j]
except:
print('raredata error! j:' + str(j))
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availaableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
return serviceLevel, math.floor(float(realBikes)), math.floor(float(realDocks))
def start(availStations, neighbor, lostNums, visitedPath, cumulativeDis, startStation, balanceNum, mutex, realtimeBikes,
day, olderNeighbor):
print("start running, the process number is %d" % (os.getpid()))
mcts = MCTS(availStations)
selectedSta = startStation
starttime = 0
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# visitedPath = []
# cumulativeDis = []
info = {}
visitedPath.append(selectedSta)
totalLost = 0
print('start station:' + str(selectedSta))
# lostNums = {}
isRequest, starttime, dropNum, pickNum, rentalLost, returnLost, realbikes = getRequest(selectedSta, selectedSta,
starttime,
cumulativeDis, rateData,
station_status,
totalDocksDict, day)
lostNums[str(selectedSta)] = float(rentalLost) + float(returnLost)
totalLost += lostNums[str(selectedSta)]
info['time'] = starttime
info['realbikes'] = realbikes
realtimeBikes[str(selectedSta)] = info
if int(dropNum) > 0:
balanceNum[str(selectedSta)] = -int(dropNum)
elif int(pickNum) > 0:
balanceNum[str(selectedSta)] = int(pickNum)
else:
balanceNum[str(selectedSta)] = 0
if isRequest:
print('sub-process:pid=%d' % os.getpid())
print('balance station:' + str(selectedSta) + ' dropNum:' + str(dropNum) + ' pickNum:' + str(pickNum))
print('customer loss:' + str(lostNums[str(selectedSta)]))
print('current time:' + str(starttime) + ' min')
print('travel distance:')
print(cumulativeDis)
# bikeSystem.update(selectedSta)
availStations.remove(str(selectedSta))
mcts.fileCount = 0
while 1:
lastSta = selectedSta
info = {}
mutex.acquire()
if not len(availStations):
print('There are no stations need to be balanced')
lostNums['totalLost'] = totalLost
mutex.release()
break
selectedSta = mcts.get_action(lastSta, starttime, neighbor, rateData, station_status, totalDocksDict, day,
olderNeighbor)
mcts.fileCount += 1
print('through station:' + str(selectedSta))
# bikeSystem.update(selectedSta)
availStations.remove(str(selectedSta))
mutex.release()
visitedPath.append(selectedSta)
isRequest, starttime, dropNum, pickNum, rentalLost, returnLost, realbikes = getRequest(lastSta, selectedSta,
starttime,
cumulativeDis, rateData,
station_status,
totalDocksDict, day)
lostNums[str(selectedSta)] = float(rentalLost) + float(returnLost)
totalLost += lostNums[str(selectedSta)]
info['time'] = starttime
info['realbikes'] = realbikes
realtimeBikes[str(selectedSta)] = info
if int(dropNum) > 0:
balanceNum[str(selectedSta)] = -int(dropNum)
elif int(pickNum) > 0:
balanceNum[str(selectedSta)] = int(pickNum)
else:
balanceNum[str(selectedSta)] = 0
if isRequest:
print('sub-process:pid=%d' % os.getpid())
print('balance station:' + str(selectedSta) + ' dropNum:' + str(dropNum) + ' pickNum:' + str(pickNum))
print('customer loss:' + str(lostNums[str(selectedSta)]))
print('current time:' + str(starttime) + ' min')
print('travel distance:')
print(cumulativeDis)
if not len(availStations):
print('There are no stations need to be balanced')
lostNums['totalLost'] = totalLost
break
print('****************************************************')
def getRequest(lastStation, selectedSta, starttime, cumulativeDis, rateData, station_status, totalDocksDict, day):
position, stations_id = getPositionAndStations_id()
dis = manhattan_distance(position[str(lastStation)][0], position[str(lastStation)][1],
position[str(selectedSta)][0],
position[str(selectedSta)][1])
cumulativeDis.append(round(dis * 1000, 3))
noise = abs(abs(np.random.normal(loc=0.0, scale=2)))
v = 7 # 10m/s == 36km/h
t = dis * 1000 / v
t_arrive = starttime + t // 60
t_interval = t_arrive // 5
dropNum = 0
pickNum = 0
realbikes = 0
serviceLevel, real_bikes, real_docks, rentalLost, returnLost = getServiceLevel(selectedSta, t_interval, rateData,
station_status, totalDocksDict, day)
if not serviceLevel: # return>>rental
print('serviceLevel is null')
endtime = t_arrive + real_bikes * 0.3 + noise
pickNum = real_bikes
realbikes = 0
return True, endtime, dropNum, pickNum, realbikes, returnLost, realbikes
else:
minBikes = min(serviceLevel)
maxBikes = max(serviceLevel)
endtime = t_arrive
if minBikes <= real_bikes <= maxBikes:
endtime = t_arrive + noise
if selectedSta == '127':
print('dropNum:' + str(dropNum))
print('pickNum:' + str(pickNum))
realbikes = real_bikes
return False, endtime, dropNum, pickNum, rentalLost, returnLost, realbikes
else:
if real_bikes < minBikes:
dropNum = minBikes - real_bikes
endtime = t_arrive + dropNum * 0.3 + noise
if real_bikes > maxBikes:
pickNum = real_bikes - maxBikes
endtime = t_arrive + pickNum * 0.3 + noise
if selectedSta == '127':
print('dropNum:' + str(dropNum))
print('pickNum:' + str(pickNum))
if pickNum != 0:
realbikes = maxBikes
elif dropNum != 0:
realbikes = minBikes
return True, endtime, dropNum, pickNum, rentalLost, returnLost, realbikes
def getServiceLevel(selectedSta, t_interval, rateData, station_status, totalDocksDict, day):
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
rateDict = rateData[str(selectedSta)]
t_intervalFlag = 0
if hour == 7:
t_intervalFlag = 0
elif hour == 8:
t_intervalFlag = 12
elif hour == 9:
t_intervalFlag = 24
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(selectedSta)]['availableBikes']
iniDocks = station_status[str(day)][str(selectedSta)]['availableDocks']
totalDocks = totalDocksDict[str(selectedSta)]
serviceLevel = []
availableBikes = iniBikes
availableDocks = iniDocks
if selectedSta == '127':
print('iniBikes:' + str(availableBikes))
print('iniDocks:' + str(availableDocks))
print('t_interval:' + str(t_interval))
print(totalDocks)
rentalLost = 0
returnLost = 0
for i in np.arange(int(t_intervalFlag), int(t_interval) + int(t_intervalFlag)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
if selectedSta == '127':
print('realBikes:' + str(availableBikes))
print('realDocks:' + str(availableDocks))
realBikes = availableBikes
realDocks = availableDocks
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(int(t_intervalFlag) + int(t_interval), int(t_interval) + int(t_intervalFlag) + 24):
deltaNum = 0
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if selectedSta == '127':
print(serviceLevel)
return serviceLevel, math.floor(float(realBikes)), math.floor(float(realDocks)), rentalLost, returnLost
def mctsAlgorithm():
experiment_path = './bike_sharing_data/mydata/experiment_result2'
# month, day, hour = getMonthDayAndHour()
month = 8
hour = 7
day1 = [i for i in range(1, 32)]
day2 = [5, 6, 12, 13, 19, 20, 26, 27] # The weekend of August!
days = [i for i in day1 if i not in day2]
# 11 -> 1
for day in days:
position, stations_id = getPositionAndStations_id()
availStations = stations_id
availStations = multiprocessing.Manager().list(availStations)
realtimeBikes = multiprocessing.Manager().dict()
lostNums1 = multiprocessing.Manager().dict()
visitedPath1 = multiprocessing.Manager().list()
cumulativeDis1 = multiprocessing.Manager().list()
balanceNum1 = multiprocessing.Manager().dict()
lostNums2 = multiprocessing.Manager().dict()
visitedPath2 = multiprocessing.Manager().list()
cumulativeDis2 = multiprocessing.Manager().list()
balanceNum2 = multiprocessing.Manager().dict()
neighbor = getNeighbor(stations_id, position)
olderNeighbor = getOlderNeighbor(stations_id, position)
startStation1 = '237'
startStation2 = '369'
mutex = multiprocessing.Lock()
p1 = multiprocessing.Process(target=start, args=(
availStations, neighbor, lostNums1, visitedPath1, cumulativeDis1, startStation1, balanceNum1, mutex,
realtimeBikes, day, olderNeighbor))
p2 = multiprocessing.Process(target=start, args=(
availStations, neighbor, lostNums2, visitedPath2, cumulativeDis2, startStation2, balanceNum2, mutex,
realtimeBikes, day, olderNeighbor))
p1.start()
p2.start()
p1.join()
p2.join()
print('customer loss:' + str(lostNums1))
print('through station:' + str(visitedPath1))
print('balanced number:' + str(balanceNum1))
print('travel distance:' + str(cumulativeDis1))
print('customer loss:' + str(lostNums2))
print('through station:' + str(visitedPath2))
print('balanced number:' + str(balanceNum2))
print('travel distance:' + str(cumulativeDis2))
print('pre-process:pid=%d' % os.getpid())
print('real status of stations:' + str(realtimeBikes))
filename = 'result_month_' + str(month) + '_day_' + str(day) + '_hour_' + str(hour) + '.json'
realtimeBikes1 = {}
for sta, dicts in realtimeBikes.items():
realtimeBikes1[str(sta)] = dicts
experimentResult = {}
resultTruck1 = {}
resultTruck2 = {}
lostNums11 = {}
balanceNum11 = {}
for sta, num in lostNums1.items():
lostNums11[str(sta)] = num
for sta, num in balanceNum1.items():
balanceNum11[str(sta)] = num
resultTruck1['lostUsers'] = lostNums11
resultTruck1['visitedPath'] = list(visitedPath1)
resultTruck1['balanceNum'] = balanceNum11
resultTruck1['travelDis'] = list(cumulativeDis1)
lostNums22 = {}
balanceNum22 = {}
for sta, num in lostNums2.items():
lostNums22[str(sta)] = num
for sta, num in balanceNum2.items():
balanceNum22[str(sta)] = num
resultTruck2['lostUsers'] = lostNums22
resultTruck2['visitedPath'] = list(visitedPath2)
resultTruck2['balanceNum'] = balanceNum22
resultTruck2['travelDis'] = list(cumulativeDis2)
experimentResult['truck1'] = resultTruck1
experimentResult['truck2'] = resultTruck2
experimentResult['afterBalanceRealBikes'] = realtimeBikes1
experiment_path = './bike_sharing_data/mydata/experiment_result2/epsilon_0'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(experimentResult, f)
print('day' + str(day) + 'finished!')
def noRepositionStart(lostNums):
starttime = 0
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon,day2,hour = getMonthDayAndHour()
mon = 8
for day in range(1, 32):
totalLost = 0
lost = {}
for station_id in stations_id:
rateDict = rateData[str(station_id)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(station_id)]['availableBikes']
iniDocks = station_status[str(day)][str(station_id)]['availableDocks']
totalDocks = totalDocksDict[str(station_id)]
availableBikes = iniBikes
availableDocks = iniDocks
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48):
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lost[str(station_id)] = rentalLost + returnLost
totalLost += lost[str(station_id)]
lost['totalLost'] = totalLost
print(totalLost)
lostNums[str(day)] = lost
def noReposition():
experiment_path = './bike_sharing_data/mydata/noReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
# month,day,hour = getMonthDayAndHour()
month = 8
hour = 7
lostNums = {}
noRepositionStart(lostNums)
print(lostNums)
filename = 'noRepositionLost_month_' + str(month) + '_hour_' + str(78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
def staticRepositionStart(lostNums):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
mon, day, hour = getMonthDayAndHour()
for day in range(1, 32):
totalLost = 0
lost = {}
for station_id in stations_id:
rateDict = rateData[str(station_id)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(station_id)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
deltaNum = 0
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
# print('availableBikes:'+str(availableBikes))
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
# print('availableDocks:'+str(availableDocks))
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
# print('availableDocks:'+str(availableDocks))
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
iniBikes = serviceLevel[random.choice(range(0, len(serviceLevel)))]
iniDocks = int(totalDocks) - iniBikes
availableBikes = iniBikes
availableDocks = iniDocks
# if station_id == '127':
# print('iniBikes:' + str(availableBikes))
# print('iniDocks:' + str(availableDocks))
# print(totalDocks)
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lost[str(station_id)] = rentalLost + returnLost
totalLost += lost[str(station_id)]
lost['totalLost'] = totalLost
print(totalLost)
lostNums[str(day)] = lost
def staticReposition():
experiment_path = './bike_sharing_data/mydata/staticReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
month, day, hour = getMonthDayAndHour()
lostNums = {}
staticRepositionStart(lostNums)
print(lostNums)
filename = 'staticRepositionLost_month_' + str(month) + '_hour_' + str(78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
def nearestNeihborRepositionStart(startStation, availStations, mutex, realtimeBikes, day, beforeBalancedTotalLost):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
dropStation = []
pickStation = []
balanceStas = []
for sta in availStations:
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
if int(iniBikes) < 5:
dropStation.append(str(sta))
balanceStas.append(str(sta))
if int(iniDocks) < 5:
pickStation.append(str(sta))
balanceStas.append(str(sta))
# balanceSta = startStation
starttime = 0
v = 7
while True:
if starttime > 80:
break
info = {}
diss = []
minDis = 10
pickNum = 0
dropNum = 0
print('balanceStas' + str(balanceStas))
if not balanceStas:
break
mutex.acquire()
balanceStas = [s for s in balanceStas if s in availStations]
if not balanceStas:
break
for sta in balanceStas:
dis = manhattan_distance(position[str(startStation)][0], position[str(startStation)][1], position[sta][0],
position[sta][1])
if dis < minDis:
minDis = dis
balanceSta = sta
startStation = balanceSta
availStations.remove(str(balanceSta))
mutex.release()
rateDict = rateData[str(balanceSta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(balanceSta)]
t_travel = dis * 1000 / v
t_min = math.ceil(t_travel / 60)
t = starttime + t_min
t_interval = t / 5
availableBikes = station_status[str(day)][str(balanceSta)]['availableBikes']
availableDocks = station_status[str(day)][str(balanceSta)]['availableDocks']
rentalLost = 0
returnLost = 0
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
beforeBalancedTotalLost.value = beforeBalancedTotalLost.value + returnLost + rentalLost
noise = abs(np.random.normal(loc=0.0, scale=2))
if balanceSta in dropStation:
if float(realBikes) >= 12:
endtime = t + noise
dropNum = 0
info['realbikes'] = realBikes
else:
dropNum = 12 - int(realBikes)
endtime = t + dropNum * 0.3 + noise
info['realbikes'] = 12
if balanceSta in pickStation:
if float(realDocks) >= 12:
endtime = t + noise
pickNum = 0
info['realbikes'] = float(totalDocks) - float(realDocks)
else:
pickNum = 12 - int(realDocks)
endtime = t + pickNum * 0.3 + noise
info['realbikes'] = float(totalDocks) - 12
info['time'] = endtime
realtimeBikes[str(balanceSta)] = info
staLost = {}
starttime = endtime
print('drop:' + str(dropNum))
print('pick:' + str(pickNum))
print('distance:' + str(minDis))
print('starttime:' + str(starttime))
print(realtimeBikes)
balanceStas = []
pickStation = []
dropStation = []
for sta in availStations:
t_interval = starttime / 5
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
availableBikes = iniBikes
availableDocks = iniDocks
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
if float(realBikes) < 5:
dropStation.append(str(sta))
balanceStas.append(str(sta))
if float(realDocks) < 5:
pickStation.append(str(sta))
balanceStas.append(str(sta))
# getNearestNeighborLost(realtimeBikes,rateData,totalDocksDict,lostNums,station_status)
# print(dropStation)
# print(pickStation)
# print(diss)
def getNearestNeighborLost(realtimeBikes, day):
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon,day,hour = getMonthDayAndHour()
mon = 8
hour = 7
position, stations_id = getPositionAndStations_id()
balancedSta = []
totalLost = 0
lostNums = {}
for sta, values in realtimeBikes.items():
balancedSta.append(sta)
rentalLost = 0
returnLost = 0
time = values['time']
realbikes = values['realbikes']
time_interval = time / 5
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = int(totalDocksDict[str(sta)])
availableBikes = realbikes
availableDocks = float(totalDocks) - float(realbikes)
for i in np.arange(int(time_interval), 48): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lostNums[str(sta)] = rentalLost + returnLost
totalLost += lostNums[str(sta)]
leftStations = [sta for sta in stations_id if sta not in balancedSta]
for sta in leftStations:
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
totalDocks = totalDocksDict[str(sta)]
availableBikes = iniBikes
availableDocks = iniDocks
rentalLost = 0
returnLost = 0
for i in np.arange(0, 48): # real-time bikes docks
deltaNum = 0
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0 and (deltaNum - float(availableBikes)) > 0:
rentalLost += (deltaNum - float(availableBikes))
pass # rental_lost += deltNum
if deltaNum < 0 and (abs(deltaNum) - float(availableDocks)) > 0:
returnLost += (abs(deltaNum) - float(availableDocks))
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
lostNums[str(sta)] = rentalLost + returnLost
totalLost += lostNums[str(sta)]
lostNums['totalLost'] = totalLost
print(totalLost)
return lostNums
def nearestNeihborReposition():
experiment_path = './bike_sharing_data/mydata/nearestNeihborReposition'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
# month, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
for day in range(1, 32):
realtimeBikes = multiprocessing.Manager().dict()
position, stations_id = getPositionAndStations_id()
availStations = multiprocessing.Manager().list(stations_id)
beforeBalancedTotalLost = multiprocessing.Value("d", 0)
startStation1 = '237'
startStation2 = '369'
lostNums = {}
mutex = multiprocessing.Lock()
p1 = multiprocessing.Process(target=nearestNeihborRepositionStart, args=(
startStation1, availStations, mutex, realtimeBikes, day, beforeBalancedTotalLost))
p2 = multiprocessing.Process(target=nearestNeihborRepositionStart, args=(
startStation2, availStations, mutex, realtimeBikes, day, beforeBalancedTotalLost))
p1.start()
p2.start()
p1.join(9)
p2.join(9)
print(realtimeBikes)
lostNums = getNearestNeighborLost(realtimeBikes, day)
lostNums['totalLost'] += beforeBalancedTotalLost.value
print(lostNums)
filename = 'nearestNeihborRepositionLost_month_' + str(mon) + '_day_' + str(day) + '_hour_' + str(
78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(lostNums, f)
print('day' + str(day) + 'finished!')
def nearestNeihborBaseServiceLevelRepositionStart(startStation, availStations, mutex, realtimeBikes,
visitedPath, visitedDis, balanceNum, beforeBalancedTotalLost, day):
position, stations_id = getPositionAndStations_id()
rateData = getRateData()
station_status, totalDocksDict = getStation_status()
# mon, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
dropStation = []
pickStation = []
balanceStas = []
for sta in stations_id:
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
totalDocks = totalDocksDict[str(sta)]
serviceLevel = []
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
deltaNum = 0
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if not serviceLevel:
pickStation.append(str(sta))
balanceStas.append(str(sta))
else:
if float(iniBikes) < min(serviceLevel):
dropStation.append(str(sta))
balanceStas.append(str(sta))
if float(iniDocks) > max(serviceLevel):
pickStation.append(str(sta))
balanceStas.append(str(sta))
# balanceSta = startStation
visitedPath.append(startStation)
starttime = 0
v = 7
while True:
info = {}
minDis = 10
pickNum = 0
dropNum = 0
print('balanceStas' + str(balanceStas))
if not balanceStas:
break
mutex.acquire()
balanceStas = [s for s in balanceStas if s in availStations]
if not balanceStas:
break
for sta in balanceStas:
dis = manhattan_distance(position[str(startStation)][0], position[str(startStation)][1], position[sta][0],
position[sta][1])
if dis < minDis:
minDis = dis
balanceSta = sta
startStation = balanceSta
availStations.remove(str(balanceSta))
mutex.release()
rateDict = rateData[str(balanceSta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
totalDocks = totalDocksDict[str(balanceSta)]
t_travel = dis * 1000 / v
t_min = math.ceil(t_travel / 60)
t = starttime + t_min
t_interval = t / 5
availableBikes = station_status[str(day)][str(balanceSta)]['availableBikes']
availableDocks = station_status[str(day)][str(balanceSta)]['availableDocks']
rentalLost = 0
returnLost = 0
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if float(availableBikes) < 1.0 and deltaNum > 0:
rentalLost += deltaNum
pass # rental_lost += deltNum
if float(availableDocks) < 1.0 and deltaNum < 0:
returnLost += abs(deltaNum)
pass # return_lost += deltNum
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
mutex.acquire()
beforeBalancedTotalLost.value = beforeBalancedTotalLost.value + rentalLost + returnLost
mutex.release()
realBikes = availableBikes
realDocks = availableDocks
totalDocks = totalDocksDict[str(balanceSta)]
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
noise = abs(np.random.normal(loc=0.0, scale=2))
if balanceSta in dropStation:
if min(serviceLevel) <= float(realBikes) <= max(serviceLevel):
endtime = t + noise
dropNum = 0
info['realbikes'] = realBikes
else:
dropNum = min(serviceLevel) - math.floor(float(realBikes))
endtime = t + dropNum * 0.3 + noise
info['realbikes'] = min(serviceLevel)
if balanceSta in pickStation:
if float(realBikes) <= max(serviceLevel):
endtime = t + noise
pickNum = 0
info['realbikes'] = float(realBikes)
else:
pickNum = math.floor(float(realBikes)) - max(serviceLevel)
endtime = t + pickNum * 0.3 + noise
info['realbikes'] = max(serviceLevel)
info['time'] = endtime
realtimeBikes[str(balanceSta)] = info
starttime = endtime
print('balanceSta:' + str(balanceSta))
print('drop:' + str(dropNum))
print('pick:' + str(pickNum))
print('distance:' + str(minDis))
print('starttime:' + str(starttime))
print(realtimeBikes)
visitedPath.append(balanceSta)
visitedDis.append(minDis)
if int(dropNum) > 0:
balanceNum[str(balanceSta)] = -dropNum
elif int(pickNum) > 0:
balanceNum[str(balanceSta)] = pickNum
else:
balanceNum[str(balanceSta)] = 0
balanceStas = []
pickStation = []
dropStation = []
for sta in availStations:
t_interval = starttime / 5
iniBikes = station_status[str(day)][str(sta)]['availableBikes']
iniDocks = station_status[str(day)][str(sta)]['availableDocks']
availableBikes = iniBikes
availableDocks = iniDocks
totalDocks = totalDocksDict[str(sta)]
rateDict = rateData[str(sta)]
month = str(mon) if int(mon) >= 10 else '0' + str(mon)
day1 = str(day) if int(day) >= 10 else '0' + str(day)
date = '2017-' + str(month) + '-' + str(day1)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
if date.weekday() < 5:
rental_rate_0 = rateDict['rental_rate_0']
return_rate_0 = rateDict['return_rate_0']
elif date.weekday() < 7:
rental_rate_0 = rateDict['rental_rate_1']
return_rate_0 = rateDict['return_rate_1']
for i in np.arange(0, int(t_interval)): # real-time bikes docks
deltaNum = rental_rate_0[i] - return_rate_0[i]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes < 0:
availableBikes = 0
availableDocks = float(availableDocks) + deltaNum
if availableDocks > float(totalDocks):
availableBikes = 0
availableDocks = float(totalDocks)
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks < 0:
availableDocks = 0
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes > float(totalDocks):
availableDocks = 0
availableBikes = float(totalDocks)
realBikes = availableBikes
realDocks = availableDocks
serviceLevel = []
for docks in range(1, int(totalDocks)):
availableBikes = int(totalDocks) - docks
availableDocks = docks
flag = 0
for j in np.arange(0, 19):
deltaNum = 0
if j >= 48:
break
else:
deltaNum = rental_rate_0[j] - return_rate_0[j]
if deltaNum > 0:
availableBikes = float(availableBikes) - deltaNum
if availableBikes <= 1:
flag = 1
break
availableDocks = float(availableDocks) + deltaNum
if availableDocks >= float(totalDocks) - 1:
flag = 1
break
else:
availableDocks = float(availableDocks) - abs(deltaNum)
if availableDocks <= 1:
flag = 1
break
availableBikes = float(availableBikes) + abs(deltaNum)
if availableBikes >= float(totalDocks) - 1:
# print('availableBikes:'+str(availableBikes))
flag = 1
break
if flag == 0:
serviceLevel.append(int(totalDocks) - int(docks))
if not serviceLevel:
pickStation.append(str(sta))
balanceStas.append(str(sta))
else:
if float(realBikes) < min(serviceLevel):
dropStation.append(str(sta))
balanceStas.append(str(sta))
if float(realBikes) > max(serviceLevel):
pickStation.append(str(sta))
balanceStas.append(str(sta))
print('dropStation:' + str(dropStation))
print('pickStation:' + str(pickStation))
def nearestNeihborBaseServiceLevelReposition():
experiment_path = './bike_sharing_data/mydata/nearestNeihborRepositionBasedService'
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
# month, day, hour = getMonthDayAndHour()
mon = 8
hour = 7
for day in range(1, 32):
realtimeBikes = multiprocessing.Manager().dict()
visitedPath1 = multiprocessing.Manager().list()
visitedDis1 = multiprocessing.Manager().list()
balanceNum1 = multiprocessing.Manager().dict()
visitedPath2 = multiprocessing.Manager().list()
visitedDis2 = multiprocessing.Manager().list()
balanceNum2 = multiprocessing.Manager().dict()
position, stations_id = getPositionAndStations_id()
availStations = multiprocessing.Manager().list(stations_id)
beforeBalancedTotalLost = multiprocessing.Value("d", 0)
startStation1 = '237'
startStation2 = '369'
lostNums = {}
mutex = multiprocessing.Lock()
p1 = multiprocessing.Process(target=nearestNeihborBaseServiceLevelRepositionStart,
args=(
startStation1, availStations, mutex, realtimeBikes, visitedPath1, visitedDis1,
balanceNum1, beforeBalancedTotalLost, day))
p2 = multiprocessing.Process(target=nearestNeihborBaseServiceLevelRepositionStart,
args=(
startStation2, availStations, mutex, realtimeBikes, visitedPath2, visitedDis2,
balanceNum2, beforeBalancedTotalLost, day))
p1.start()
p2.start()
p1.join(8)
p2.join(8)
print(realtimeBikes)
lostNums = getNearestNeighborLost(realtimeBikes, day)
experimentResult = {}
truck1 = {}
truck2 = {}
truck1['visitedPath'] = list(visitedPath1)
truck1['visitedDis'] = list(visitedDis1)
balanceNum11 = {}
for sta, values in balanceNum1.items():
balanceNum11[str(sta)] = values
truck1['balanceNum'] = balanceNum11
truck2['visitedPath'] = list(visitedPath2)
truck2['visitedDis'] = list(visitedDis2)
balanceNum22 = {}
for sta, values in balanceNum2.items():
balanceNum22[str(sta)] = values
truck2['balanceNum'] = balanceNum22
experimentResult['truck1'] = truck1
experimentResult['truck2'] = truck2
lostNums['totalLost'] += beforeBalancedTotalLost.value
experimentResult['lost'] = lostNums
print('through stations:' + str(visitedPath1))
print('balanced number:' + str(balanceNum1))
print('travel distance:' + str(visitedDis1))
print('through stations:' + str(visitedPath2))
print('balanced number:' + str(balanceNum2))
print('travel distance:' + str(visitedDis2))
print("beforeBalancedTotalLost:" + str(beforeBalancedTotalLost.value))
print('total customer loss:' + str(lostNums['totalLost']))
print('lostNums:' + str(lostNums))
filename = 'nearestNeihborRepositionResult_month_' + str(mon) + '_day_' + str(day) + '_hour_' + str(
78910) + '.json'
with open(os.path.join(experiment_path, filename), 'w') as f:
json.dump(experimentResult, f)
print('day' + str(day) + 'finished!')
if __name__ == '__main__':
mctsAlgorithm()
# noReposition()
# staticReposition()
# nearestNeihborReposition()
# nearestNeihborBaseServiceLevelReposition()
| 42.77795 | 121 | 0.527799 | 21,888 | 0.264837 | 0 | 0 | 0 | 0 | 0 | 0 | 7,757 | 0.093857 |
7222707469c1717bc369a16b35dc8703f4ba96c7 | 4,692 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
]
| null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
]
| null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Components/Energy/Storages/Batteries/Constant_Mass/Lithium_Ion_LiFePO4_18650.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
]
| null | null | null | ## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
# Lithium_Ion_LiFePO4_18650.py
#
# Created: Feb 2020, M. Clarke
# Modified: Sep 2021, R. Erhard
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# suave imports
from SUAVE.Core import Units
from .Lithium_Ion import Lithium_Ion
# package imports
import numpy as np
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
class Lithium_Ion_LiFePO4_18650(Lithium_Ion):
""" Specifies discharge/specific energy characteristics specific
18650 lithium-iron-phosphate-oxide battery cells.
Assumptions:
N/A
Source:
# Cell Information
Saw, L. H., Yonghuang Ye, and A. A. O. Tay. "Electrochemical–thermal analysis of
18650 Lithium Iron Phosphate cell." Energy Conversion and Management 75 (2013):
162-174.
# Electrode Area
Muenzel, Valentin, et al. "A comparative testing study of commercial
18650-format lithium-ion battery cells." Journal of The Electrochemical
Society 162.8 (2015): A1592.
# Cell Thermal Conductivities
(radial)
Murashko, Kirill A., Juha Pyrhönen, and Jorma Jokiniemi. "Determination of the
through-plane thermal conductivity and specific heat capacity of a Li-ion cylindrical
cell." International Journal of Heat and Mass Transfer 162 (2020): 120330.
(axial)
Saw, L. H., Yonghuang Ye, and A. A. O. Tay. "Electrochemical–thermal analysis of
18650 Lithium Iron Phosphate cell." Energy Conversion and Management 75 (2013):
162-174.
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
def __defaults__(self):
self.tag = 'Lithium_Ion_LiFePO4_Cell'
self.cell.diameter = 0.0185 # [m]
self.cell.height = 0.0653 # [m]
self.cell.mass = 0.03 * Units.kg # [kg]
self.cell.surface_area = (np.pi*self.cell.height*self.cell.diameter) + (0.5*np.pi*self.cell.diameter**2) # [m^2]
self.cell.volume = np.pi*(0.5*self.cell.diameter)**2*self.cell.height # [m^3]
self.cell.density = self.cell.mass/self.cell.volume # [kg/m^3]
self.cell.electrode_area = 0.0342 # [m^2] # estimated
self.cell.max_voltage = 3.6 # [V]
self.cell.nominal_capacity = 1.5 # [Amp-Hrs]
self.cell.nominal_voltage = 3.6 # [V]
self.cell.charging_voltage = self.cell.nominal_voltage # [V]
self.watt_hour_rating = self.cell.nominal_capacity * self.cell.nominal_voltage # [Watt-hours]
self.specific_energy = self.watt_hour_rating*Units.Wh/self.cell.mass # [J/kg]
self.specific_power = self.specific_energy/self.cell.nominal_capacity # [W/kg]
self.ragone.const_1 = 88.818 * Units.kW/Units.kg
self.ragone.const_2 = -.01533 / (Units.Wh/Units.kg)
self.ragone.lower_bound = 60. * Units.Wh/Units.kg
self.ragone.upper_bound = 225. * Units.Wh/Units.kg
self.resistance = 0.022 # [Ohms]
self.specific_heat_capacity = 1115 # [J/kgK]
self.cell.specific_heat_capacity = 1115 # [J/kgK]
self.cell.radial_thermal_conductivity = 0.475 # [J/kgK]
self.cell.axial_thermal_conductivity = 37.6 # [J/kgK]
return | 53.931034 | 136 | 0.449915 | 4,187 | 0.89142 | 0 | 0 | 0 | 0 | 0 | 0 | 1,953 | 0.415797 |
7222d418de71e1a6408735de5ac964a29e9e3865 | 83,182 | py | Python | motor/__init__.py | globocom/motor | a1c91ab7b223bb1ada742605e2e8d11a39fe5f1e | [
"Apache-2.0"
]
| null | null | null | motor/__init__.py | globocom/motor | a1c91ab7b223bb1ada742605e2e8d11a39fe5f1e | [
"Apache-2.0"
]
| null | null | null | motor/__init__.py | globocom/motor | a1c91ab7b223bb1ada742605e2e8d11a39fe5f1e | [
"Apache-2.0"
]
| null | null | null | # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Motor, an asynchronous driver for MongoDB and Tornado."""
import collections
import functools
import inspect
import socket
import time
import warnings
import weakref
from tornado import ioloop, iostream, gen, stack_context
from tornado.concurrent import Future
import greenlet
import bson
import pymongo
import pymongo.auth
import pymongo.common
import pymongo.database
import pymongo.errors
import pymongo.mongo_client
import pymongo.mongo_replica_set_client
import pymongo.son_manipulator
from pymongo.pool import _closed, SocketInfo
import gridfs
from pymongo.database import Database
from pymongo.collection import Collection
from pymongo.cursor import Cursor, _QUERY_OPTIONS
from gridfs import grid_file
import util
__all__ = ['MotorClient', 'MotorReplicaSetClient', 'Op']
version_tuple = (0, 1, '+')
def get_version_string():
if isinstance(version_tuple[-1], basestring):
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
version = get_version_string()
"""Current version of Motor."""
# TODO: ensure we're doing
# timeouts as efficiently as possible, test performance hit with timeouts
# from registering and cancelling timeouts
HAS_SSL = True
try:
import ssl
except ImportError:
ssl = None
HAS_SSL = False
def check_deprecated_kwargs(kwargs):
if 'safe' in kwargs:
raise pymongo.errors.ConfigurationError(
"Motor does not support 'safe', use 'w'")
if 'slave_okay' in kwargs or 'slaveok' in kwargs:
raise pymongo.errors.ConfigurationError(
"Motor does not support 'slave_okay', use read_preference")
callback_type_error = TypeError("callback must be a callable")
def motor_sock_method(method):
"""Wrap a MotorSocket method to pause the current greenlet and arrange
for the greenlet to be resumed when non-blocking I/O has completed.
"""
@functools.wraps(method)
def _motor_sock_method(self, *args, **kwargs):
child_gr = greenlet.getcurrent()
main = child_gr.parent
assert main, "Should be on child greenlet"
timeout_object = None
if self.timeout:
def timeout_err():
# Running on the main greenlet. If a timeout error is thrown,
# we raise the exception on the child greenlet. Closing the
# IOStream removes callback() from the IOLoop so it isn't
# called.
self.stream.set_close_callback(None)
self.stream.close()
child_gr.throw(socket.timeout("timed out"))
timeout_object = self.stream.io_loop.add_timeout(
time.time() + self.timeout, timeout_err)
# This is run by IOLoop on the main greenlet when operation
# completes; switch back to child to continue processing
def callback(result=None):
self.stream.set_close_callback(None)
if timeout_object:
self.stream.io_loop.remove_timeout(timeout_object)
child_gr.switch(result)
# Run on main greenlet
def closed():
if timeout_object:
self.stream.io_loop.remove_timeout(timeout_object)
# The child greenlet might have died, e.g.:
# - An operation raised an error within PyMongo
# - PyMongo closed the MotorSocket in response
# - MotorSocket.close() closed the IOStream
# - IOStream scheduled this closed() function on the loop
# - PyMongo operation completed (with or without error) and
# its greenlet terminated
# - IOLoop runs this function
if not child_gr.dead:
child_gr.throw(socket.error("error"))
self.stream.set_close_callback(closed)
try:
kwargs['callback'] = callback
# method is MotorSocket.open(), recv(), etc. method() begins a
# non-blocking operation on an IOStream and arranges for
# callback() to be executed on the main greenlet once the
# operation has completed.
method(self, *args, **kwargs)
# Pause child greenlet until resumed by main greenlet, which
# will pass the result of the socket operation (data for recv,
# number of bytes written for sendall) to us.
return main.switch()
except socket.error: # TODO: delete
raise
except IOError, e:
# If IOStream raises generic IOError (e.g., if operation
# attempted on closed IOStream), then substitute socket.error,
# since socket.error is what PyMongo's built to handle. For
# example, PyMongo will catch socket.error, close the socket,
# and raise AutoReconnect.
raise socket.error(str(e))
return _motor_sock_method
class MotorSocket(object):
"""Replace socket with a class that yields from the current greenlet, if
we're on a child greenlet, when making blocking calls, and uses Tornado
IOLoop to schedule child greenlet for resumption when I/O is ready.
We only implement those socket methods actually used by pymongo.
"""
def __init__(
self, sock, io_loop, use_ssl,
certfile, keyfile, ca_certs, cert_reqs):
self.use_ssl = use_ssl
self.certfile = certfile
self.keyfile = keyfile
self.ca_certs = ca_certs
self.cert_reqs = cert_reqs
self.timeout = None
if self.use_ssl:
# TODO: use full SSL options.
self.stream = iostream.SSLIOStream(sock, io_loop=io_loop)
else:
self.stream = iostream.IOStream(sock, io_loop=io_loop)
def setsockopt(self, *args, **kwargs):
self.stream.socket.setsockopt(*args, **kwargs)
def settimeout(self, timeout):
# IOStream calls socket.setblocking(False), which does settimeout(0.0).
# We must not allow pymongo to set timeout to some other value (a
# positive number or None) or the socket will start blocking again.
# Instead, we simulate timeouts by interrupting ourselves with
# callbacks.
self.timeout = timeout
@motor_sock_method
def connect(self, pair, callback):
"""
:Parameters:
- `pair`: A tuple, (host, port)
"""
self.stream.connect(pair, callback)
def sendall(self, data):
assert greenlet.getcurrent().parent, "Should be on child greenlet"
try:
self.stream.write(data)
except IOError, e:
# PyMongo is built to handle socket.error here, not IOError
raise socket.error(str(e))
if self.stream.closed():
# Something went wrong while writing
raise socket.error("write error")
@motor_sock_method
def recv(self, num_bytes, callback):
self.stream.read_bytes(num_bytes, callback)
def close(self):
sock = self.stream.socket
try:
self.stream.close()
except KeyError:
# Tornado's _impl (epoll, kqueue, ...) has already removed this
# file descriptor from its dict.
pass
finally:
# Sometimes necessary to avoid ResourceWarnings in Python 3:
# specifically, if the fd is closed from the OS's view, then
# stream.close() throws an exception, but the socket still has an
# fd and so will print a ResourceWarning. In that case, calling
# sock.close() directly clears the fd and does not raise an error.
if sock:
sock.close()
def fileno(self):
return self.stream.socket.fileno()
class MotorPool(object):
def __init__(
self, io_loop, pair, max_size, net_timeout, conn_timeout, use_ssl,
use_greenlets,
ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs=None, ssl_ca_certs=None,
wait_queue_timeout=None, wait_queue_multiple=None):
"""
A pool of MotorSockets.
:Parameters:
- `io_loop`: An IOLoop instance
- `pair`: a (hostname, port) tuple
- `max_size`: The maximum number of open sockets. Calls to
`get_socket` will block if this is set, this pool has opened
`max_size` sockets, and there are none idle. Set to `None` to
disable.
- `net_timeout`: timeout in seconds for operations on open connection
- `conn_timeout`: timeout in seconds for establishing connection
- `use_ssl`: bool, if True use an encrypted connection
- `use_greenlets`: ignored.
- `ssl_keyfile`: The private keyfile used to identify the local
connection against mongod. If included with the ``certfile` then
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
- `ssl_certfile`: The certificate file used to identify the local
connection against mongod. Implies ``ssl=True``.
- `ssl_cert_reqs`: Specifies whether a certificate is required from
the other side of the connection, and whether it will be validated
if provided. It must be one of the three values ``ssl.CERT_NONE``
(certificates ignored), ``ssl.CERT_OPTIONAL``
(not required, but validated if provided), or ``ssl.CERT_REQUIRED``
(required and validated). If the value of this parameter is not
``ssl.CERT_NONE``, then the ``ssl_ca_certs`` parameter must point
to a file of CA certificates. Implies ``ssl=True``.
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
"certification authority" certificates, which are used to validate
certificates passed from the other end of the connection.
Implies ``ssl=True``.
- `wait_queue_timeout`: (integer) How long (in milliseconds) a
callback will wait for a socket from the pool if the pool has no
free sockets.
- `wait_queue_multiple`: (integer) Multiplied by max_pool_size to
give the number of callbacks allowed to wait for a socket at one
time.
"""
self.io_loop = io_loop
self.sockets = set()
self.pair = pair
self.max_size = max_size
self.net_timeout = net_timeout
self.conn_timeout = conn_timeout
self.wait_queue_timeout = wait_queue_timeout
self.wait_queue_multiple = wait_queue_multiple
self.use_ssl = use_ssl
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.ssl_cert_reqs = ssl_cert_reqs
self.ssl_ca_certs = ssl_ca_certs
# Keep track of resets, so we notice sockets created before the most
# recent reset and close them.
self.pool_id = 0
if HAS_SSL and use_ssl and not ssl_cert_reqs:
self.ssl_cert_reqs = ssl.CERT_NONE
self.motor_sock_counter = 0
self.queue = collections.deque()
# Timeout handles to expire waiters after wait_queue_timeout.
self.waiter_timeouts = {}
if self.wait_queue_multiple is None:
self.max_waiters = None
else:
self.max_waiters = self.max_size * self.wait_queue_multiple
def reset(self):
self.pool_id += 1
sockets, self.sockets = self.sockets, set()
for sock_info in sockets:
sock_info.close()
def create_connection(self, pair):
"""Connect to `pair` and return the socket object.
This is a modified version of create_connection from
CPython >=2.6.
"""
host, port = pair or self.pair
# Check if dealing with a unix domain socket
if host.endswith('.sock'):
if not hasattr(socket, "AF_UNIX"):
raise pymongo.errors.ConnectionFailure(
"UNIX-sockets are not supported on this system")
addrinfos = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, host)]
else:
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
# TODO: use Tornado 3's async resolver.
addrinfos = [
(af, socktype, proto, sa) for af, socktype, proto, dummy, sa in
socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)]
err = None
for res in addrinfos:
af, socktype, proto, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
motor_sock = MotorSocket(
sock, self.io_loop, use_ssl=self.use_ssl,
certfile=self.ssl_certfile, keyfile=self.ssl_keyfile,
ca_certs=self.ssl_ca_certs, cert_reqs=self.ssl_cert_reqs)
if af != getattr(socket, 'AF_UNIX', None):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
motor_sock.settimeout(self.conn_timeout or 20.0)
# Important to increment the count before beginning to connect.
self.motor_sock_counter += 1
# MotorSocket pauses this greenlet and resumes when connected.
motor_sock.connect(sa)
return motor_sock
except socket.error, e:
self.motor_sock_counter -= 1
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpreter that doesn't
# support IPv6. The test case is Jython2.5.1 which doesn't
# support IPv6 at all.
raise socket.error('getaddrinfo failed')
def connect(self, pair):
"""Connect to Mongo and return a new connected MotorSocket. Note that
the pool does not keep a reference to the socket -- you must call
maybe_return_socket() when you're done with it.
"""
child_gr = greenlet.getcurrent()
main = child_gr.parent
assert main, "Should be on child greenlet"
if self.max_size and self.motor_sock_counter >= self.max_size:
if self.max_waiters and len(self.queue) >= self.max_waiters:
raise self._create_wait_queue_timeout()
waiter = stack_context.wrap(child_gr.switch)
self.queue.append(waiter)
if self.wait_queue_timeout is not None:
deadline = time.time() + self.wait_queue_timeout
timeout = self.io_loop.add_timeout(
deadline,
functools.partial(
child_gr.throw,
pymongo.errors.ConnectionFailure,
self._create_wait_queue_timeout()))
self.waiter_timeouts[waiter] = timeout
# Yield until maybe_return_socket passes spare socket in.
return main.switch()
else:
motor_sock = self.create_connection(pair)
motor_sock.settimeout(self.net_timeout)
return SocketInfo(motor_sock, self.pool_id)
def get_socket(self, pair=None, force=False):
"""Get a socket from the pool.
Returns a :class:`SocketInfo` object wrapping a connected
:class:`MotorSocket`, and a bool saying whether the socket was from
the pool or freshly created.
:Parameters:
- `pair`: optional (hostname, port) tuple
- `force`: optional boolean, forces a connection to be returned
without blocking, even if `max_size` has been reached.
"""
forced = False
if force:
# If we're doing an internal operation, attempt to play nicely with
# max_size, but if there is no open "slot" force the connection
# and mark it as forced so we don't decrement motor_sock_counter
# when it's returned.
if self.motor_sock_counter >= self.max_size:
forced = True
if self.sockets:
sock_info, from_pool = self.sockets.pop(), True
sock_info = self._check(sock_info, pair)
else:
sock_info, from_pool = self.connect(pair), False
sock_info.forced = forced
sock_info.last_checkout = time.time()
return sock_info
def async_get_socket(self, pair=None):
"""Get a ``Future`` which will resolve to a socket."""
loop = self.io_loop
future = Future()
def _get_socket():
# Runs on child greenlet.
try:
result = self.get_socket(pair)
loop.add_callback(functools.partial(future.set_result, result))
except Exception, e:
loop.add_callback(functools.partial(future.set_exception, e))
# Start running the operation on a greenlet.
greenlet.greenlet(_get_socket).switch()
return future
def start_request(self):
raise NotImplementedError("Motor doesn't implement requests")
in_request = end_request = start_request
def discard_socket(self, sock_info):
"""Close and discard the active socket."""
sock_info.close()
def maybe_return_socket(self, sock_info):
"""Return the socket to the pool.
In PyMongo this method only returns the socket if it's not the request
socket, but Motor doesn't do requests.
"""
if sock_info.closed:
if not sock_info.forced:
self.motor_sock_counter -= 1
return
# Give it to the greenlet at the head of the line, or return it to the
# pool, or discard it.
if self.queue:
waiter = self.queue.popleft()
if waiter in self.waiter_timeouts:
self.io_loop.remove_timeout(self.waiter_timeouts.pop(waiter))
with stack_context.NullContext():
self.io_loop.add_callback(functools.partial(waiter, sock_info))
elif (len(self.sockets) < self.max_size
and sock_info.pool_id == self.pool_id):
self.sockets.add(sock_info)
else:
sock_info.close()
if not sock_info.forced:
self.motor_sock_counter -= 1
if sock_info.forced:
sock_info.forced = False
def _check(self, sock_info, pair):
"""This side-effecty function checks if this pool has been reset since
the last time this socket was used, or if the socket has been closed by
some external network error, and if so, attempts to create a new socket.
If this connection attempt fails we reset the pool and reraise the
error.
Checking sockets lets us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid AutoReconnects completely anyway.
"""
error = False
if sock_info.closed:
error = True
elif self.pool_id != sock_info.pool_id:
sock_info.close()
error = True
elif time.time() - sock_info.last_checkout > 1:
if _closed(sock_info.sock):
sock_info.close()
error = True
if not error:
return sock_info
else:
try:
return self.connect(pair)
except socket.error:
self.reset()
raise
def __del__(self):
# Avoid ResourceWarnings in Python 3.
for sock_info in self.sockets:
sock_info.close()
def _create_wait_queue_timeout(self):
return pymongo.errors.ConnectionFailure(
'Timed out waiting for socket from pool with max_size %r and'
' wait_queue_timeout %r' % (
self.max_size, self.wait_queue_timeout))
def callback_from_future(future):
"""Return a callback that sets a Future's result or exception"""
def callback(result, error):
if error:
future.set_exception(error)
else:
future.set_result(result)
return callback
def asynchronize(motor_class, sync_method, has_write_concern, doc=None):
"""Decorate `sync_method` so it accepts a callback or returns a Future.
The method runs on a child greenlet and calls the callback or resolves
the Future when the greenlet completes.
:Parameters:
- `motor_class`: Motor class being created, e.g. MotorClient.
- `sync_method`: Bound method of pymongo Collection, Database,
MongoClient, or Cursor
- `has_write_concern`: Whether the method accepts getLastError options
- `doc`: Optionally override sync_method's docstring
"""
@functools.wraps(sync_method)
def method(self, *args, **kwargs):
check_deprecated_kwargs(kwargs)
loop = self.get_io_loop()
callback = kwargs.pop('callback', None)
if callback:
if not callable(callback):
raise callback_type_error
future = None
else:
future = Future()
def call_method():
# Runs on child greenlet.
# TODO: ew, performance?
try:
result = sync_method(self.delegate, *args, **kwargs)
if callback:
# Schedule callback(result, None) on main greenlet.
loop.add_callback(functools.partial(
callback, result, None))
else:
# Schedule future to be resolved on main greenlet.
loop.add_callback(functools.partial(
future.set_result, result))
except Exception, e:
if callback:
loop.add_callback(functools.partial(
callback, None, e))
else:
loop.add_callback(functools.partial(
future.set_exception, e))
# Start running the operation on a greenlet.
greenlet.greenlet(call_method).switch()
return future
# This is for the benefit of motor_extensions.py, which needs this info to
# generate documentation with Sphinx.
method.is_async_method = True
method.has_write_concern = has_write_concern
name = sync_method.__name__
if name.startswith('__') and not name.endswith("__"):
# Mangle, e.g. Cursor.__die -> Cursor._Cursor__die
classname = motor_class.__delegate_class__.__name__
name = '_%s%s' % (classname, name)
method.pymongo_method_name = name
if doc is not None:
method.__doc__ = doc
return method
class MotorAttributeFactory(object):
"""Used by Motor classes to mark attributes that delegate in some way to
PyMongo. At module import time, each Motor class is created, and MotorMeta
calls create_attribute() for each attr to create the final class attribute.
"""
def create_attribute(self, cls, attr_name):
raise NotImplementedError
class Async(MotorAttributeFactory):
def __init__(self, attr_name, has_write_concern):
"""A descriptor that wraps a PyMongo method, such as insert or remove,
and returns an asynchronous version of the method, which accepts a
callback or returns a Future.
:Parameters:
- `attr_name`: The name of the attribute on the PyMongo class, if
different from attribute on the Motor class
- `has_write_concern`: Whether the method accepts getLastError options
"""
super(Async, self).__init__()
self.attr_name = attr_name
self.has_write_concern = has_write_concern
def create_attribute(self, cls, attr_name):
name = self.attr_name or attr_name
if name.startswith('__'):
# Mangle: __simple_command becomes _MongoClient__simple_command.
name = '_%s%s' % (cls.__delegate_class__.__name__, name)
method = getattr(cls.__delegate_class__, name)
return asynchronize(cls, method, self.has_write_concern)
def wrap(self, original_class):
return WrapAsync(self, original_class)
def unwrap(self, motor_class):
return UnwrapAsync(self, motor_class)
class WrapBase(MotorAttributeFactory):
def __init__(self, prop):
super(WrapBase, self).__init__()
self.property = prop
class WrapAsync(WrapBase):
def __init__(self, prop, original_class):
"""Like Async, but before it executes the callback or resolves the
Future, checks if result is a PyMongo class and wraps it in a Motor
class. E.g., Motor's map_reduce should pass a MotorCollection instead
of a PyMongo Collection to the Future. Uses the wrap() method on the
owner object to do the actual wrapping. E.g.,
Database.create_collection returns a Collection, so MotorDatabase has:
create_collection = AsyncCommand().wrap(Collection)
Once Database.create_collection is done, Motor calls
MotorDatabase.wrap() on its result, transforming the result from
Collection to MotorCollection, which is passed to the callback or
Future.
:Parameters:
- `prop`: An Async, the async method to call before wrapping its result
in a Motor class.
"""
super(WrapAsync, self).__init__(prop)
self.original_class = original_class
def create_attribute(self, cls, attr_name):
async_method = self.property.create_attribute(cls, attr_name)
original_class = self.original_class
@functools.wraps(async_method)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop('callback', None)
def done_callback(result, error):
if error:
callback(None, error)
return
# Don't call isinstance(), not checking subclasses.
if result.__class__ == original_class:
# Delegate to the current object to wrap the result.
new_object = self.wrap(result)
else:
new_object = result
callback(new_object, None)
if callback:
if not callable(callback):
raise callback_type_error
async_method(self, *args, callback=done_callback, **kwargs)
else:
future = Future()
# The final callback run from inside done_callback.
callback = callback_from_future(future)
async_method(self, *args, callback=done_callback, **kwargs)
return future
return wrapper
class UnwrapAsync(WrapBase):
def __init__(self, prop, motor_class):
"""Like Async, but checks if arguments are Motor classes and unwraps
them. E.g., Motor's drop_database takes a MotorDatabase, unwraps it,
and passes a PyMongo Database instead.
"""
super(UnwrapAsync, self).__init__(prop)
self.motor_class = motor_class
def create_attribute(self, cls, attr_name):
f = self.property.create_attribute(cls, attr_name)
motor_class = self.motor_class
def _unwrap_obj(obj):
if isinstance(motor_class, basestring):
# Delayed reference - e.g., drop_database is defined before
# MotorDatabase is, so it was initialized with
# unwrap('MotorDatabase') instead of unwrap(MotorDatabase).
actual_motor_class = globals()[motor_class]
else:
actual_motor_class = motor_class
# Don't call isinstance(), not checking subclasses.
if obj.__class__ == actual_motor_class:
return obj.delegate
else:
return obj
@functools.wraps(f)
def _f(*args, **kwargs):
# Call _unwrap_obj on each arg and kwarg before invoking f.
args = [_unwrap_obj(arg) for arg in args]
kwargs = dict([
(key, _unwrap_obj(value)) for key, value in kwargs.items()])
return f(*args, **kwargs)
return _f
class AsyncRead(Async):
def __init__(self, attr_name=None):
"""A descriptor that wraps a PyMongo read method like find_one() that
returns a Future.
"""
Async.__init__(self, attr_name=attr_name, has_write_concern=False)
class AsyncWrite(Async):
def __init__(self, attr_name=None):
"""A descriptor that wraps a PyMongo write method like update() that
accepts getLastError options and returns a Future.
"""
Async.__init__(self, attr_name=attr_name, has_write_concern=True)
class AsyncCommand(Async):
def __init__(self, attr_name=None):
"""A descriptor that wraps a PyMongo command like copy_database() that
returns a Future and does not accept getLastError options.
"""
Async.__init__(self, attr_name=attr_name, has_write_concern=False)
def check_delegate(obj, attr_name):
if not obj.delegate:
raise pymongo.errors.InvalidOperation(
"Call open() on %s before accessing attribute '%s'" % (
obj.__class__.__name__, attr_name))
class ReadOnlyPropertyDescriptor(object):
def __init__(self, attr_name):
self.attr_name = attr_name
def __get__(self, obj, objtype):
if obj:
check_delegate(obj, self.attr_name)
return getattr(obj.delegate, self.attr_name)
else:
# We're accessing this property on a class, e.g. when Sphinx wants
# MotorGridOut.md5.__doc__
return getattr(objtype.__delegate_class__, self.attr_name)
def __set__(self, obj, val):
raise AttributeError
class ReadOnlyProperty(MotorAttributeFactory):
"""Creates a readonly attribute on the wrapped PyMongo object"""
def create_attribute(self, cls, attr_name):
return ReadOnlyPropertyDescriptor(attr_name)
DelegateMethod = ReadOnlyProperty
"""A method on the wrapped PyMongo object that does no I/O and can be called
synchronously"""
class ReadWritePropertyDescriptor(ReadOnlyPropertyDescriptor):
def __set__(self, obj, val):
check_delegate(obj, self.attr_name)
setattr(obj.delegate, self.attr_name, val)
class ReadWriteProperty(MotorAttributeFactory):
"""Creates a mutable attribute on the wrapped PyMongo object"""
def create_attribute(self, cls, attr_name):
return ReadWritePropertyDescriptor(attr_name)
class MotorMeta(type):
"""Initializes a Motor class, calling create_attribute() on all its
MotorAttributeFactories to create the actual class attributes.
"""
def __new__(cls, class_name, bases, attrs):
new_class = type.__new__(cls, class_name, bases, attrs)
# If new_class has no __delegate_class__, then it's a base like
# MotorClientBase; don't try to update its attrs, we'll use them
# for its subclasses like MotorClient.
if getattr(new_class, '__delegate_class__', None):
for base in reversed(inspect.getmro(new_class)):
# Turn attribute factories into real methods or descriptors.
for name, attr in base.__dict__.items():
if isinstance(attr, MotorAttributeFactory):
new_class_attr = attr.create_attribute(new_class, name)
setattr(new_class, name, new_class_attr)
return new_class
class MotorBase(object):
__metaclass__ = MotorMeta
def __eq__(self, other):
if (
isinstance(other, self.__class__)
and hasattr(self, 'delegate')
and hasattr(other, 'delegate')
):
return self.delegate == other.delegate
return NotImplemented
name = ReadOnlyProperty()
get_document_class = ReadOnlyProperty()
set_document_class = ReadOnlyProperty()
document_class = ReadWriteProperty()
read_preference = ReadWriteProperty()
tag_sets = ReadWriteProperty()
secondary_acceptable_latency_ms = ReadWriteProperty()
write_concern = ReadWriteProperty()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.delegate)
class MotorOpenable(object):
"""Base class for Motor objects that can be initialized in two stages:
basic setup in __init__ and setup requiring I/O in open(), which
creates the delegate object.
"""
__metaclass__ = MotorMeta
__delegate_class__ = None
def __init__(self, delegate, io_loop, *args, **kwargs):
if io_loop:
if not isinstance(io_loop, ioloop.IOLoop):
raise TypeError(
"io_loop must be instance of IOLoop, not %r" % io_loop)
self.io_loop = io_loop
else:
self.io_loop = ioloop.IOLoop.current()
self.delegate = delegate
# Store args and kwargs for when open() is called
self._init_args = args
self._init_kwargs = kwargs
def get_io_loop(self):
return self.io_loop
def open(self, callback=None):
"""Actually initialize. Takes an optional callback, or returns a Future
that resolves to self when opened.
:Parameters:
- `callback`: Optional function taking parameters (self, error)
"""
if callback and not callable(callback):
raise callback_type_error
if callback:
self._open(callback=callback)
else:
future = Future()
self._open(callback=callback_from_future(future))
return future
def _open(self, callback):
if self.delegate:
callback(self, None) # Already open
def _connect():
# Run on child greenlet
try:
args, kwargs = self._delegate_init_args()
self.delegate = self.__delegate_class__(*args, **kwargs)
callback(self, None)
except Exception, e:
callback(None, e)
# Actually connect on a child greenlet
gr = greenlet.greenlet(_connect)
gr.switch()
def _delegate_init_args(self):
"""Return args, kwargs to create a delegate object"""
return self._init_args, self._init_kwargs
class MotorClientBase(MotorOpenable, MotorBase):
"""MotorClient and MotorReplicaSetClient common functionality.
"""
database_names = AsyncRead()
server_info = AsyncRead()
alive = AsyncRead()
close_cursor = AsyncCommand()
drop_database = AsyncCommand().unwrap('MotorDatabase')
disconnect = DelegateMethod()
tz_aware = ReadOnlyProperty()
close = DelegateMethod()
is_primary = ReadOnlyProperty()
is_mongos = ReadOnlyProperty()
max_bson_size = ReadOnlyProperty()
max_pool_size = ReadOnlyProperty()
get_default_database = DelegateMethod()
_ensure_connected = AsyncCommand()
def __init__(self, delegate, io_loop, *args, **kwargs):
check_deprecated_kwargs(kwargs)
super(MotorClientBase, self).__init__(
delegate, io_loop, *args, **kwargs)
def open_sync(self):
"""Synchronous open(), returning self.
Under the hood, this method creates a new Tornado IOLoop, runs
:meth:`open` on the loop, and deletes the loop when :meth:`open`
completes.
"""
if self.connected:
return self
# Run a private IOLoop until connected or error
private_loop = ioloop.IOLoop()
standard_loop, self.io_loop = self.io_loop, private_loop
try:
private_loop.run_sync(self.open)
return self
finally:
# Replace the private IOLoop with the default loop
self.io_loop = standard_loop
if self.delegate:
self.delegate.pool_class = functools.partial(
MotorPool, self.io_loop)
for pool in self._get_pools():
pool.io_loop = self.io_loop
pool.reset()
# Clean up file descriptors.
private_loop.close()
def sync_client(self):
"""Get a PyMongo MongoClient / MongoReplicaSetClient with the same
configuration as this MotorClient / MotorReplicaSetClient.
"""
return self.__delegate_class__(
*self._init_args, **self._init_kwargs)
def __getattr__(self, name):
if not self.connected:
msg = (
"Can't access attribute '%s' on %s before calling open()"
" or open_sync()" % (name, self.__class__.__name__))
raise pymongo.errors.InvalidOperation(msg)
return MotorDatabase(self, name)
__getitem__ = __getattr__
@property
def connected(self):
"""True after :meth:`open` or :meth:`open_sync` completes"""
return self.delegate is not None
@gen.coroutine
def _copy_database(
self, _callback, from_name, to_name, from_host, username,
password):
pool, sock_info = None, None
try:
if not isinstance(from_name, basestring):
raise TypeError("from_name must be an instance "
"of %s" % (basestring.__name__,))
if not isinstance(to_name, basestring):
raise TypeError("to_name must be an instance "
"of %s" % (basestring.__name__,))
pymongo.database._check_name(to_name)
yield self._ensure_connected(True)
pool = self._get_primary_pool()
sock_info = yield self._async_get_socket(pool)
copydb_command = bson.SON([
('copydb', 1),
('fromdb', from_name),
('todb', to_name)])
if from_host is not None:
copydb_command['fromhost'] = from_host
if username is not None:
getnonce_command = bson.SON(
[('copydbgetnonce', 1), ('fromhost', from_host)])
response, ms = yield self._simple_command(
sock_info, 'admin', getnonce_command)
nonce = response['nonce']
copydb_command['username'] = username
copydb_command['nonce'] = nonce
copydb_command['key'] = pymongo.auth._auth_key(
nonce, username, password)
result = yield self._simple_command(
sock_info, 'admin', copydb_command)
if _callback:
_callback(result, None)
else:
raise gen.Return(result)
except Exception, e:
if _callback:
_callback(None, e)
else:
raise
finally:
if pool and sock_info:
pool.maybe_return_socket(sock_info)
def copy_database(
self, from_name, to_name, from_host=None, username=None,
password=None, callback=None):
"""Copy a database, potentially from another host.
Accepts an optional callback, and returns a ``Future``.
Raises :class:`~pymongo.errors.InvalidName` if `to_name` is
not a valid database name.
If `from_host` is ``None`` the current host is used as the
source. Otherwise the database is copied from `from_host`.
If the source database requires authentication, `username` and
`password` must be specified.
:Parameters:
- `from_name`: the name of the source database
- `to_name`: the name of the target database
- `from_host` (optional): host name to copy from
- `username` (optional): username for source database
- `password` (optional): password for source database
- `callback`: Optional function taking parameters (response, error)
"""
# PyMongo's implementation uses requests, so rewrite for Motor.
if callback:
if not callable(callback):
raise callback_type_error
future = self._copy_database(
callback, from_name, to_name, from_host, username, password)
if not callback:
return future
def _delegate_init_args(self):
"""Override MotorOpenable._delegate_init_args to ensure
auto_start_request is False and _pool_class is MotorPool.
"""
kwargs = self._init_kwargs.copy()
kwargs['auto_start_request'] = False
kwargs['_pool_class'] = functools.partial(MotorPool, self.io_loop)
return self._init_args, kwargs
class MotorClient(MotorClientBase):
__delegate_class__ = pymongo.mongo_client.MongoClient
kill_cursors = AsyncCommand()
fsync = AsyncCommand()
unlock = AsyncCommand()
nodes = ReadOnlyProperty()
host = ReadOnlyProperty()
port = ReadOnlyProperty()
_simple_command = AsyncCommand(attr_name='__simple_command')
def __init__(self, *args, **kwargs):
"""Create a new connection to a single MongoDB instance at *host:port*.
:meth:`open` or :meth:`open_sync` must be called before using a new
MotorClient. No property access is allowed before the connection
is opened.
MotorClient takes the same constructor arguments as
`MongoClient`_, as well as:
:Parameters:
- `io_loop` (optional): Special :class:`tornado.ioloop.IOLoop`
instance to use instead of default
.. _MongoClient: http://api.mongodb.org/python/current/api/pymongo/mongo_client.html
"""
super(MotorClient, self).__init__(
None, kwargs.pop('io_loop', None), *args, **kwargs)
def _get_pools(self):
# TODO: expose the PyMongo pool, or otherwise avoid this
return [self.delegate._MongoClient__pool]
def _get_primary_pool(self):
# TODO: expose the PyMongo pool, or otherwise avoid this
return self.delegate._MongoClient__pool
def _async_get_socket(self, pool):
"""Return a ``Future`` that will resolve to a socket."""
# MongoClient passes host and port into the pool for each call to
# get_socket.
return pool.async_get_socket((self.host, self.port))
class MotorReplicaSetClient(MotorClientBase):
__delegate_class__ = pymongo.mongo_replica_set_client.MongoReplicaSetClient
primary = ReadOnlyProperty()
secondaries = ReadOnlyProperty()
arbiters = ReadOnlyProperty()
hosts = ReadOnlyProperty()
seeds = ReadOnlyProperty()
close = DelegateMethod()
_simple_command = AsyncCommand(attr_name='__simple_command')
def __init__(self, *args, **kwargs):
"""Create a new connection to a MongoDB replica set.
:meth:`open` or :meth:`open_sync` must be called before using a new
MotorReplicaSetClient. No property access is allowed before the
connection is opened.
MotorReplicaSetClient takes the same constructor arguments as
`MongoReplicaSetClient`_, as well as:
:Parameters:
- `io_loop` (optional): Special :class:`tornado.ioloop.IOLoop`
instance to use instead of default
.. _MongoReplicaSetClient: http://api.mongodb.org/python/current/api/pymongo/mongo_replica_set_client.html
"""
super(MotorReplicaSetClient, self).__init__(
None, kwargs.pop('io_loop', None), *args, **kwargs)
def open_sync(self):
"""Synchronous open(), returning self.
Under the hood, this method creates a new Tornado IOLoop, runs
:meth:`open` on the loop, and deletes the loop when :meth:`open`
completes.
"""
super(MotorReplicaSetClient, self).open_sync()
# We need to wait for open_sync() to complete and restore the
# original IOLoop before starting the monitor.
self.delegate._MongoReplicaSetClient__monitor.start_motor(self.io_loop)
return self
def open(self, callback=None):
"""Actually initialize. Takes an optional callback, or returns a Future
that resolves to self when opened.
:Parameters:
- `callback`: Optional function taking parameters (self, error)
"""
if callback and not callable(callback):
raise callback_type_error
def opened(self, error):
if error:
callback(None, error)
else:
try:
monitor = self.delegate._MongoReplicaSetClient__monitor
monitor.start_motor(self.io_loop)
except Exception, e:
callback(None, e)
else:
callback(self, None) # No errors
if callback:
super(MotorReplicaSetClient, self)._open(callback=opened)
else:
future = Future()
# The final callback run from inside opened
callback = callback_from_future(future)
super(MotorReplicaSetClient, self)._open(callback=opened)
return future
def _delegate_init_args(self):
# This _monitor_class will be passed to PyMongo's
# MongoReplicaSetClient when we create it.
args, kwargs = super(
MotorReplicaSetClient, self)._delegate_init_args()
kwargs['_monitor_class'] = MotorReplicaSetMonitor
return args, kwargs
def _get_pools(self):
# TODO: expose the PyMongo RSC members, or otherwise avoid this.
rs_state = self.delegate._MongoReplicaSetClient__rs_state
return [member.pool for member in rs_state._members]
def _get_primary_pool(self):
# TODO: expose the PyMongo RSC members, or otherwise avoid this.
rs_state = self.delegate._MongoReplicaSetClient__rs_state
if rs_state.primary_member:
return rs_state.primary_member.pool
def _async_get_socket(self, pool):
"""Return a ``Future`` that will resolve to a socket."""
# MongoReplicaSetClient sets pools' host and port when it creates them.
return pool.async_get_socket()
# PyMongo uses a background thread to regularly inspect the replica set and
# monitor it for changes. In Motor, use a periodic callback on the IOLoop to
# monitor the set.
class MotorReplicaSetMonitor(pymongo.mongo_replica_set_client.Monitor):
def __init__(self, rsc):
msg = (
"First argument to MotorReplicaSetMonitor must be"
" MongoReplicaSetClient, not %r" % rsc)
assert isinstance(
rsc, pymongo.mongo_replica_set_client.MongoReplicaSetClient), msg
# Super makes two MotorGreenletEvents: self.event and self.refreshed.
# We only use self.refreshed.
pymongo.mongo_replica_set_client.Monitor.__init__(
self, rsc, event_class=util.MotorGreenletEvent)
self.timeout_obj = None
self.started = False
def shutdown(self, dummy=None):
if self.timeout_obj:
self.io_loop.remove_timeout(self.timeout_obj)
self.stopped = True
def refresh(self):
assert greenlet.getcurrent().parent, "Should be on child greenlet"
try:
self.rsc.refresh()
except pymongo.errors.AutoReconnect:
pass
# RSC has been collected or there
# was an unexpected error.
except:
return
finally:
# Switch to greenlets blocked in wait_for_refresh().
self.refreshed.set(self.io_loop)
self.timeout_obj = self.io_loop.add_timeout(
time.time() + self._refresh_interval, self.async_refresh)
def async_refresh(self):
greenlet.greenlet(self.refresh).switch()
def start(self):
"""No-op: PyMongo thinks this starts the monitor, but Motor starts
the monitor separately to ensure it uses the right IOLoop."""
pass
def start_sync(self):
"""No-op: PyMongo thinks this starts the monitor, but Motor starts
the monitor separately to ensure it uses the right IOLoop."""
pass
def start_motor(self, io_loop):
self.io_loop = io_loop
self.started = True
self.timeout_obj = self.io_loop.add_timeout(
time.time() + self._refresh_interval, self.async_refresh)
def schedule_refresh(self):
self.refreshed.clear()
if self.io_loop and self.async_refresh:
if self.timeout_obj:
self.io_loop.remove_timeout(self.timeout_obj)
self.io_loop.add_callback(self.async_refresh)
def join(self, timeout=None):
# PyMongo calls join() after shutdown() -- this is not a thread, so
# shutdown works immediately and join is unnecessary
pass
def wait_for_refresh(self, timeout_seconds):
# self.refreshed is a util.MotorGreenletEvent.
self.refreshed.wait(self.io_loop, timeout_seconds)
def is_alive(self):
return self.started and not self.stopped
isAlive = is_alive
class MotorDatabase(MotorBase):
__delegate_class__ = Database
set_profiling_level = AsyncCommand()
reset_error_history = AsyncCommand()
add_user = AsyncCommand()
remove_user = AsyncCommand()
logout = AsyncCommand()
command = AsyncCommand()
authenticate = AsyncCommand()
eval = AsyncCommand()
create_collection = AsyncCommand().wrap(Collection)
drop_collection = AsyncCommand().unwrap('MotorCollection')
validate_collection = AsyncRead().unwrap('MotorCollection')
collection_names = AsyncRead()
current_op = AsyncRead()
profiling_level = AsyncRead()
profiling_info = AsyncRead()
error = AsyncRead()
last_status = AsyncRead()
previous_error = AsyncRead()
dereference = AsyncRead()
incoming_manipulators = ReadOnlyProperty()
incoming_copying_manipulators = ReadOnlyProperty()
outgoing_manipulators = ReadOnlyProperty()
outgoing_copying_manipulators = ReadOnlyProperty()
def __init__(self, connection, name):
if not isinstance(connection, MotorClientBase):
raise TypeError("First argument to MotorDatabase must be "
"MotorClientBase, not %r" % connection)
self.connection = connection
self.delegate = Database(connection.delegate, name)
def __getattr__(self, name):
return MotorCollection(self, name)
__getitem__ = __getattr__
def wrap(self, collection):
# Replace pymongo.collection.Collection with MotorCollection
return self[collection.name]
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
Newly added manipulators will be applied before existing ones.
:Parameters:
- `manipulator`: the manipulator to add
"""
# We override add_son_manipulator to unwrap the AutoReference's
# database attribute.
if isinstance(manipulator, pymongo.son_manipulator.AutoReference):
db = manipulator.database
if isinstance(db, MotorDatabase):
manipulator.database = db.delegate
self.delegate.add_son_manipulator(manipulator)
def get_io_loop(self):
return self.connection.get_io_loop()
class MotorCollection(MotorBase):
__delegate_class__ = Collection
create_index = AsyncCommand()
drop_indexes = AsyncCommand()
drop_index = AsyncCommand()
drop = AsyncCommand()
ensure_index = AsyncCommand()
reindex = AsyncCommand()
rename = AsyncCommand()
find_and_modify = AsyncCommand()
map_reduce = AsyncCommand().wrap(Collection)
update = AsyncWrite()
insert = AsyncWrite()
remove = AsyncWrite()
save = AsyncWrite()
index_information = AsyncRead()
count = AsyncRead()
options = AsyncRead()
group = AsyncRead()
distinct = AsyncRead()
inline_map_reduce = AsyncRead()
find_one = AsyncRead()
aggregate = AsyncRead()
uuid_subtype = ReadWriteProperty()
full_name = ReadOnlyProperty()
def __init__(self, database, name=None, *args, **kwargs):
if isinstance(database, Collection):
# Short cut
self.delegate = Collection
elif not isinstance(database, MotorDatabase):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
else:
self.database = database
self.delegate = Collection(self.database.delegate, name)
def __getattr__(self, name):
# dotted collection name, like foo.bar
return MotorCollection(
self.database,
self.name + '.' + name
)
def find(self, *args, **kwargs):
"""Create a :class:`MotorCursor`. Same parameters as for
PyMongo's `find`_.
Note that :meth:`find` does not take a `callback` parameter, nor does
it return a Future, because :meth:`find` merely creates a
:class:`MotorCursor` without performing any operations on the server.
:class:`MotorCursor` methods such as :meth:`~MotorCursor.to_list` or
:meth:`~MotorCursor.count` perform actual operations.
.. _find: http://api.mongodb.org/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find
"""
if 'callback' in kwargs:
raise pymongo.errors.InvalidOperation(
"Pass a callback to each, to_list, or count, not to find.")
cursor = self.delegate.find(*args, **kwargs)
return MotorCursor(cursor, self)
def wrap(self, collection):
# Replace pymongo.collection.Collection with MotorCollection
return self.database[collection.name]
def get_io_loop(self):
return self.database.get_io_loop()
class MotorCursorChainingMethod(MotorAttributeFactory):
def create_attribute(self, cls, attr_name):
cursor_method = getattr(Cursor, attr_name)
@functools.wraps(cursor_method)
def return_clone(self, *args, **kwargs):
cursor_method(self.delegate, *args, **kwargs)
return self
# This is for the benefit of motor_extensions.py
return_clone.is_motorcursor_chaining_method = True
return_clone.pymongo_method_name = attr_name
return return_clone
class MotorCursor(MotorBase):
__delegate_class__ = Cursor
count = AsyncRead()
distinct = AsyncRead()
explain = AsyncRead()
_refresh = AsyncRead()
cursor_id = ReadOnlyProperty()
alive = ReadOnlyProperty()
batch_size = MotorCursorChainingMethod()
add_option = MotorCursorChainingMethod()
remove_option = MotorCursorChainingMethod()
limit = MotorCursorChainingMethod()
skip = MotorCursorChainingMethod()
max_scan = MotorCursorChainingMethod()
sort = MotorCursorChainingMethod()
hint = MotorCursorChainingMethod()
where = MotorCursorChainingMethod()
_Cursor__die = AsyncCommand()
def __init__(self, cursor, collection):
"""You don't construct a MotorCursor yourself, but acquire one from
:meth:`MotorCollection.find`.
.. note::
There is no need to manually close cursors; they are closed
by the server after being fully iterated
with :meth:`to_list`, :meth:`each`, or :meth:`fetch_next`, or
automatically closed by the client when the :class:`MotorCursor` is
cleaned up by the garbage collector.
"""
self.delegate = cursor
self.collection = collection
self.started = False
self.closed = False
def _get_more(self, callback):
"""
Get a batch of data asynchronously, either performing an initial query
or getting more data from an existing cursor.
:Parameters:
- `callback`: function taking parameters (batch_size, error)
"""
if not self.alive:
raise pymongo.errors.InvalidOperation(
"Can't call get_more() on a MotorCursor that has been"
" exhausted or killed.")
self.started = True
self._refresh(callback=callback)
@property
def fetch_next(self):
"""A Future used with `gen.coroutine`_ to asynchronously retrieve the
next document in the result set, fetching a batch of documents from the
server if necessary. Resolves to ``False`` if there are no more
documents, otherwise :meth:`next_object` is guaranteed to return a
document.
.. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html
.. testsetup:: fetch_next
MongoClient().test.test_collection.remove()
collection = MotorClient().open_sync().test.test_collection
.. doctest:: fetch_next
>>> @gen.coroutine
... def f():
... yield collection.insert([{'_id': i} for i in range(5)])
... cursor = collection.find().sort([('_id', 1)])
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['_id']) + ', ')
... print 'done'
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
.. note:: While it appears that fetch_next retrieves each document from
the server individually, the cursor actually fetches documents
efficiently in `large batches`_.
.. _`large batches`: http://docs.mongodb.org/manual/core/read-operations/#cursor-behaviors
"""
future = Future()
if not self._buffer_size() and self.alive:
if self.delegate._Cursor__empty:
# Special case, limit of 0
future.set_result(False)
return future
def cb(batch_size, error):
if error:
future.set_exception(error)
else:
future.set_result(bool(batch_size))
self._get_more(cb)
return future
elif self._buffer_size():
future.set_result(True)
return future
else:
# Dead
future.set_result(False)
return future
def next_object(self):
"""Get a document from the most recently fetched batch, or ``None``.
See :attr:`fetch_next`.
"""
# __empty is a special case: limit of 0
if self.delegate._Cursor__empty or not self._buffer_size():
return None
return self.delegate.next()
def each(self, callback):
"""Iterates over all the documents for this cursor.
`each` returns immediately, and `callback` is executed asynchronously
for each document. `callback` is passed ``(None, None)`` when iteration
is complete.
Cancel iteration early by returning ``False`` from the callback. (Only
``False`` cancels iteration: returning ``None`` or 0 does not.)
.. testsetup:: each
MongoClient().test.test_collection.remove()
collection = MotorClient().open_sync().test.test_collection
.. doctest:: each
>>> from tornado.ioloop import IOLoop
>>> collection = MotorClient().open_sync().test.collection
>>> def inserted(result, error):
... global cursor
... if error:
... raise error
... cursor = collection.find().sort([('_id', 1)])
... cursor.each(callback=each)
...
>>> def each(result, error):
... if error:
... raise error
... elif result:
... sys.stdout.write(str(result['_id']) + ', ')
... else:
... # Iteration complete
... IOLoop.current().stop()
... print 'done'
...
>>> collection.insert(
... [{'_id': i} for i in range(5)], callback=inserted)
>>> IOLoop.current().start()
0, 1, 2, 3, 4, done
.. note:: Unlike other Motor methods, ``each`` requires a callback and
does not return a Future, so it cannot be used with
``gen.coroutine.`` :meth:`to_list` or :attr:`fetch_next` are much
easier to use.
:Parameters:
- `callback`: function taking (document, error)
"""
if not callable(callback):
raise callback_type_error
self._each_got_more(callback, None, None)
def _each_got_more(self, callback, batch_size, error):
if error:
callback(None, error)
return
add_callback = self.get_io_loop().add_callback
while self._buffer_size() > 0:
try:
doc = self.delegate.next() # decrements self.buffer_size
except StopIteration:
# Special case: limit of 0
add_callback(functools.partial(callback, None, None))
self.close()
return
# Quit if callback returns exactly False (not None). Note we
# don't close the cursor: user may want to resume iteration.
if callback(doc, None) is False:
return
if self.alive and (self.cursor_id or not self.started):
self._get_more(functools.partial(self._each_got_more, callback))
else:
# Complete
add_callback(functools.partial(callback, None, None))
def to_list(self, length, callback=None):
"""Get a list of documents.
.. testsetup:: to_list
MongoClient().test.test_collection.remove()
collection = MotorClient().open_sync().test.test_collection
.. doctest:: to_list
>>> @gen.coroutine
... def f():
... yield collection.insert([{'_id': i} for i in range(4)])
... cursor = collection.find().sort([('_id', 1)])
... docs = yield cursor.to_list(length=2)
... while docs:
... print docs
... docs = yield cursor.to_list(length=2)
...
... print 'done'
...
>>> ioloop.IOLoop.current().run_sync(f)
[{u'_id': 0}, {u'_id': 1}]
[{u'_id': 2}, {u'_id': 3}]
done
:Parameters:
- `length`: maximum number of documents to return for this call
- `callback` (optional): function taking (document, error)
If a callback is passed, returns None, else returns a Future.
.. versionchanged:: 0.2
`length` parameter is no longer optional.
"""
length = pymongo.common.validate_positive_integer('length', length)
if (self.delegate._Cursor__query_flags
& _QUERY_OPTIONS['tailable_cursor']):
raise pymongo.errors.InvalidOperation(
"Can't call to_list on tailable cursor")
if callback and not callable(callback):
raise callback_type_error
# Special case: limit of 0.
if self.delegate._Cursor__empty:
if callback:
callback([], None)
else:
future = Future()
future.set_result([])
return future
the_list = []
if callback:
self._to_list_got_more(callback, the_list, length, None, None)
else:
future = Future()
self._to_list_got_more(
callback_from_future(future), the_list, length, None, None)
return future
def _to_list_got_more(self, callback, the_list, length, batch_size, error):
if error:
callback(None, error)
return
collection = self.collection
fix_outgoing = collection.database.delegate._fix_outgoing
if length is None:
# No maximum length, get all results, apply outgoing manipulators
results = (fix_outgoing(data, collection) for data in self.delegate._Cursor__data)
the_list.extend(results)
self.delegate._Cursor__data.clear()
else:
while self._buffer_size() > 0 and len(the_list) < length:
the_list.append(fix_outgoing(self.delegate._Cursor__data.popleft(), collection))
if (not self.delegate._Cursor__killed
and (self.cursor_id or not self.started)
and (length is None or length > len(the_list))):
get_more_cb = functools.partial(
self._to_list_got_more, callback, the_list, length)
self._get_more(callback=get_more_cb)
else:
callback(the_list, None)
def clone(self):
"""Get a clone of this cursor."""
return MotorCursor(self.delegate.clone(), self.collection)
def rewind(self):
"""Rewind this cursor to its unevaluated state."""
self.delegate.rewind()
self.started = False
return self
def get_io_loop(self):
return self.collection.get_io_loop()
def close(self, callback=None):
"""Explicitly kill this cursor on the server. If iterating with
:meth:`each`, cease.
:Parameters:
- `callback` (optional): function taking (result, error).
If a callback is passed, returns None, else returns a Future.
"""
self.closed = True
return self._Cursor__die(callback=callback)
def __getitem__(self, index):
"""Get a slice of documents from this cursor.
Raises :class:`~pymongo.errors.InvalidOperation` if this
cursor has already been used.
To get a single document use an integral index, e.g.:
.. testsetup:: getitem
MongoClient().test.test_collection.remove()
collection = MotorClient().open_sync().test.test_collection
.. doctest:: getitem
>>> @gen.coroutine
... def fifth_item():
... yield collection.insert([{'i': i} for i in range(10)])
... cursor = collection.find().sort([('i', 1)])[5]
... yield cursor.fetch_next
... doc = cursor.next_object()
... print doc['i']
...
>>> IOLoop.current().run_sync(fifth_item)
5
Any limit previously applied to this cursor will be ignored.
The cursor returns ``None`` if the index is greater than or equal to
the length of the result set.
.. doctest:: getitem
>>> @gen.coroutine
... def one_thousandth_item():
... cursor = collection.find().sort([('i', 1)])[1000]
... yield cursor.fetch_next
... print cursor.next_object()
...
>>> IOLoop.current().run_sync(one_thousandth_item)
None
To get a slice of documents use a slice index like
``cursor[start:end]``.
.. doctest:: getitem
>>> @gen.coroutine
... def second_through_fifth_item():
... cursor = collection.find().sort([('i', 1)])[2:6]
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['i']) + ', ')
... print 'done'
...
>>> IOLoop.current().run_sync(second_through_fifth_item)
2, 3, 4, 5, done
This will apply a skip of 2 and a limit of 4 to the cursor. Using a
slice index overrides prior limits or skips applied to this cursor
(including those applied through previous calls to this method).
Raises :class:`~pymongo.errors.IndexError` when the slice has a step,
a negative start value, or a stop value less than or equal to
the start value.
:Parameters:
- `index`: An integer or slice index to be applied to this cursor
"""
self._check_not_started()
if isinstance(index, slice):
# Slicing a cursor does no I/O - it just sets skip and limit - so
# we can slice it immediately.
self.delegate[index]
return self
else:
if not isinstance(index, (int, long)):
raise TypeError("index %r cannot be applied to MotorCursor "
"instances" % index)
# Get one document, force hard limit of 1 so server closes cursor
# immediately
return self[self.delegate._Cursor__skip + index:].limit(-1)
def _buffer_size(self):
# TODO: expose so we don't have to use double-underscore hack
return len(self.delegate._Cursor__data)
def _check_not_started(self):
if self.started:
raise pymongo.errors.InvalidOperation(
"MotorCursor already started")
def __copy__(self):
return MotorCursor(self.delegate.__copy__(), self.collection)
def __deepcopy__(self, memo):
return MotorCursor(self.delegate.__deepcopy__(memo), self.collection)
def __del__(self):
# This MotorCursor is deleted on whatever greenlet does the last
# decref, or (if it's referenced from a cycle) whichever is current
# when the GC kicks in. We may need to send the server a killCursors
# message, but in Motor only direct children of the main greenlet can
# do I/O. First, do a quick check whether the cursor is still alive on
# the server:
if self.cursor_id and self.alive:
if greenlet.getcurrent().parent:
# We're on a child greenlet, send the message.
self.delegate.close()
else:
# We're on the main greenlet, start the operation on a child.
self.close()
class MotorGridOut(MotorOpenable):
"""Class to read data out of GridFS.
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~motor.MotorGridFS`.
"""
__delegate_class__ = gridfs.GridOut
__getattr__ = DelegateMethod()
_id = ReadOnlyProperty()
filename = ReadOnlyProperty()
name = ReadOnlyProperty()
content_type = ReadOnlyProperty()
length = ReadOnlyProperty()
chunk_size = ReadOnlyProperty()
upload_date = ReadOnlyProperty()
aliases = ReadOnlyProperty()
metadata = ReadOnlyProperty()
md5 = ReadOnlyProperty()
tell = DelegateMethod()
seek = DelegateMethod()
read = AsyncRead()
readline = AsyncRead()
def __init__(
self, root_collection, file_id=None, file_document=None,
io_loop=None
):
if isinstance(root_collection, grid_file.GridOut):
# Short cut
super(MotorGridOut, self).__init__(root_collection, io_loop)
else:
if not isinstance(root_collection, MotorCollection):
raise TypeError(
"First argument to MotorGridOut must be "
"MotorCollection, not %r" % root_collection)
assert io_loop is None, \
"Can't override IOLoop for MotorGridOut"
super(MotorGridOut, self).__init__(
None, root_collection.get_io_loop(), root_collection.delegate,
file_id, file_document)
@gen.coroutine
def stream_to_handler(self, request_handler):
"""Write the contents of this file to a
:class:`tornado.web.RequestHandler`. This method will call `flush` on
the RequestHandler, so ensure all headers have already been set.
For a more complete example see the implementation of
:class:`~motor.web.GridFSHandler`.
Returns a Future.
.. code-block:: python
class FileHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self, filename):
db = self.settings['db']
fs = yield motor.MotorGridFS(db()).open()
try:
gridout = yield fs.get_last_version(filename)
except gridfs.NoFile:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", gridout.content_type)
self.set_header("Content-Length", gridout.length)
yield gridout.stream_to_handler(self)
self.finish()
.. seealso:: Tornado `RequestHandler <http://tornadoweb.org/en/stable/web.html#request-handlers>`_
"""
written = 0
while written < self.length:
# Reading chunk_size at a time minimizes buffering
chunk = yield self.read(self.chunk_size)
# write() simply appends the output to a list; flush() sends it
# over the network and minimizes buffering in the handler.
request_handler.write(chunk)
request_handler.flush()
written += len(chunk)
class MotorGridIn(MotorOpenable):
__delegate_class__ = gridfs.GridIn
__getattr__ = DelegateMethod()
closed = ReadOnlyProperty()
close = AsyncCommand()
write = AsyncCommand().unwrap(MotorGridOut)
writelines = AsyncCommand().unwrap(MotorGridOut)
_id = ReadOnlyProperty()
md5 = ReadOnlyProperty()
filename = ReadOnlyProperty()
name = ReadOnlyProperty()
content_type = ReadOnlyProperty()
length = ReadOnlyProperty()
chunk_size = ReadOnlyProperty()
upload_date = ReadOnlyProperty()
def __init__(self, root_collection, **kwargs):
"""
Class to write data to GridFS. If instantiating directly, you must call
:meth:`open` before using the `MotorGridIn` object. However,
application developers should not generally need to instantiate this
class - see :meth:`~motor.MotorGridFS.new_file`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 256 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
:Parameters:
- `root_collection`: A :class:`MotorCollection`, the root collection
to write to
- `**kwargs` (optional): file level options (see above)
"""
if isinstance(root_collection, grid_file.GridIn):
# Short cut
MotorOpenable.__init__(
self, root_collection, kwargs.pop('io_loop', None))
else:
if not isinstance(root_collection, MotorCollection):
raise TypeError(
"First argument to MotorGridIn must be "
"MotorCollection, not %r" % root_collection)
assert 'io_loop' not in kwargs, (
"Can't override IOLoop for MotorGridIn")
MotorOpenable.__init__(
self, None, root_collection.get_io_loop(),
root_collection.delegate, **kwargs)
MotorGridIn.set = asynchronize(
MotorGridIn, gridfs.GridIn.__setattr__, False, doc="""
Set an arbitrary metadata attribute on the file. Stores value on the server
as a key-value pair within the file document once the file is closed. If
the file is already closed, calling `set` will immediately update the file
document on the server.
Metadata set on the file appears as attributes on a :class:`~MotorGridOut`
object created from the file.
:Parameters:
- `name`: Name of the attribute, will be stored as a key in the file
document on the server
- `value`: Value of the attribute
- `callback`: Optional callback to execute once attribute is set.
""")
class MotorGridFS(MotorOpenable):
__delegate_class__ = gridfs.GridFS
new_file = AsyncRead().wrap(grid_file.GridIn)
get = AsyncRead().wrap(grid_file.GridOut)
get_version = AsyncRead().wrap(grid_file.GridOut)
get_last_version = AsyncRead().wrap(grid_file.GridOut)
list = AsyncRead()
exists = AsyncRead()
delete = AsyncCommand()
def __init__(self, database, collection="fs"):
"""
An instance of GridFS on top of a single Database. You must call
:meth:`open` before using the `MotorGridFS` object.
:Parameters:
- `database`: a :class:`MotorDatabase`
- `collection` (optional): A string, name of root collection to use,
such as "fs" or "my_files"
.. mongodoc:: gridfs
"""
if not isinstance(database, MotorDatabase):
raise TypeError("First argument to MotorGridFS must be "
"MotorDatabase, not %r" % database)
self.collection = database[collection]
MotorOpenable.__init__(
self, None, database.get_io_loop(), database.delegate, collection)
@gen.coroutine
def _put(self, data, _callback, **kwargs):
try:
grid_file = yield MotorGridIn(self.collection, **kwargs).open()
# w >= 1 necessary to avoid running 'filemd5' command before
# all data is written, especially with sharding.
if 0 == self.collection.write_concern.get('w'):
raise pymongo.errors.ConfigurationError(
"Motor does not allow unacknowledged put() to GridFS")
try:
yield grid_file.write(data)
finally:
yield grid_file.close()
except Exception, e:
if _callback:
_callback(None, e)
else:
raise
else:
if _callback:
_callback(grid_file._id, None)
else:
raise gen.Return(grid_file._id)
def put(self, data, callback=None, **kwargs):
"""Put data into GridFS as a new file.
Equivalent to doing:
.. code-block:: python
@gen.coroutine
def f(data, **kwargs):
try:
f = yield my_gridfs.new_file(**kwargs)
yield f.write(data)
finally:
yield f.close()
`data` can be either an instance of :class:`str` (:class:`bytes`
in python 3) or a file-like object providing a :meth:`read` method.
If an `encoding` keyword argument is passed, `data` can also be a
:class:`unicode` (:class:`str` in python 3) instance, which will
be encoded as `encoding` before being written. Any keyword arguments
will be passed through to the created file - see
:meth:`~MotorGridIn` for possible arguments. Returns the
``"_id"`` of the created file.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `data`: data to be written as a file.
- `callback`: Optional function taking parameters (_id, error)
- `**kwargs` (optional): keyword arguments for file creation
"""
# PyMongo's implementation uses requests, so rewrite for Motor.
if callback and not callable(callback):
raise callback_type_error
future = self._put(data, _callback=callback, **kwargs)
if not callback:
return future
def wrap(self, obj):
if obj.__class__ is grid_file.GridIn:
return MotorGridIn(obj, io_loop=self.get_io_loop())
elif obj.__class__ is grid_file.GridOut:
return MotorGridOut(obj, io_loop=self.get_io_loop())
def Op(fn, *args, **kwargs):
"""Obsolete; here for backwards compatibility with Motor 0.1.
Op had been necessary for ease-of-use with Tornado 2 and @gen.engine. But
Motor 0.2 is built for Tornado 3, @gen.coroutine, and Futures, so motor.Op
is deprecated.
"""
msg = "motor.Op is deprecated, simply call %s and yield its Future." % (
fn.__name__)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
result = fn(*args, **kwargs)
assert isinstance(result, Future)
return result
| 36.936945 | 118 | 0.608112 | 73,020 | 0.877834 | 4,509 | 0.054206 | 13,003 | 0.15632 | 0 | 0 | 36,141 | 0.434481 |
72230a4712ff2722d5fd895c22c3d235aabfdf44 | 3,544 | py | Python | del_dupli_in_fasta.py | ba1/BioParsing | 8a0257d4765a7bc86fef7688762abbeaaf3cef07 | [
"MIT"
]
| 1 | 2017-06-19T15:15:26.000Z | 2017-06-19T15:15:26.000Z | del_dupli_in_fasta.py | ba1/BioParsing | 8a0257d4765a7bc86fef7688762abbeaaf3cef07 | [
"MIT"
]
| null | null | null | del_dupli_in_fasta.py | ba1/BioParsing | 8a0257d4765a7bc86fef7688762abbeaaf3cef07 | [
"MIT"
]
| null | null | null | '''
Created on Oct 20, 2015
@author: bardya
'''
import os
import argparse
from Bio import SeqIO
def parse_args():
parser = argparse.ArgumentParser(description='Delete all duplicate entries (header+sequence) in fasta. If only sequence identical, add "| duplicate" to header.')
parser.add_argument('-i', dest='infilepath', metavar='<fasta_file_path>', type=argparse.FileType('rt'),
help='path to an fasta file')
parser.add_argument('-o', dest='outfilepath', metavar='<fasta_file_path>', type=argparse.FileType('w'),
help='path to desired output fasta file')
parser.add_argument('-m', dest='mode', metavar='<header|sequence>', type=str, choices=["header", "Header", "sequence", "Sequence"],
default="header", help='mode headers checks for "headers and then sequence". Mode sequence searches only for sequence duplicates')
parser.add_argument('-k', dest='keep_flag', action="store_true",
help='with this options nothing gets deleted. Headers get count number attached to end of the line to make them unique.')
parser.add_argument('-rn', dest='rename_flag', action="store_true",
help='with this options nothing gets deleted. Headers get replaced by an integer reflecting the count')
parser.add_argument('--version', action='version', version='0.12')
return parser.parse_args()
def readfasta(seqdbfile, keep_flag=False, rename_flag=False):
from collections import Counter
try:
seqs = SeqIO.parse(seqdbfile, "fasta")
except:
seqs = SeqIO.parse(seqdbfile, "clustal")
seqlst = []
dupcount = 0
modcount = 0
for seq in seqs:
currIDlst = [e.id for e in seqlst]
if rename_flag:
seq.id = ">" + str(Counter(seqlst)[str(seq.id)] + 1)
modcount += 1
continue
if seq.id in currIDlst:
ind = currIDlst.index(seq.id)
if keep_flag:
seq.id = str(seq.id) + "_" + str(Counter(seqlst)[str(seq.id)] + 1)
modcount += 1
continue
if seqlst[ind].seq == seq.seq:
dupcount += 1
continue
else:
seq.id = str(seq.id) + "_" + str(Counter(seqlst)[str(seq.id)] + 1)
modcount += 1
seqlst.append(seq)
stats_dict = {"delentries":dupcount, "numofseqs":len(seqlst), "modentries":modcount}
return seqlst, stats_dict
def printStats(stats_dict):
outp = """
#Entries remaining in output:\t{numofseqs}
#Entries deleted:\t{delentries}
#Headers modified:\t{modentries}
""".format(**stats_dict)
print(outp)
def writefasta(outfile, seqlst):
count = SeqIO.write(seqlst, outfile, "fasta")
outfile.close()
if __name__ == '__main__':
args = parse_args()
try:
inputfile = open(args.infilepath.name, 'r')
outputfile = open(args.outfilepath.name, 'w')
# if not os.path.basename(args.outfilepath.name) == "basename":
# outputfile = open(args.outfilepath.name, 'w')
# else:
# outputfile = open(os.path.join(os.path.dirname(args.outfilepath.name),os.path.basename(args.infilepath.name) + '_consensus.faa'), 'w')
except:
print('IOError occured')
seqlst, stats_dict = readfasta(args.infilepath.name, keep_flag=args.keep_flag, rename_flag=args.rename_flag)
printStats(stats_dict)
writefasta(outputfile, seqlst) | 35.79798 | 165 | 0.615406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.362585 |
7224268eb003eeb3fc96967b78416eccf0509110 | 491 | py | Python | lclpy/aidfunc/logging.py | nobody1570/lspy | 1cf6efbafbbf8ddb54ba7a875e82c562f010edd1 | [
"MIT"
]
| 3 | 2021-11-27T22:11:38.000Z | 2022-02-10T11:42:06.000Z | lclpy/aidfunc/logging.py | nobody1570/lspy | 1cf6efbafbbf8ddb54ba7a875e82c562f010edd1 | [
"MIT"
]
| null | null | null | lclpy/aidfunc/logging.py | nobody1570/lspy | 1cf6efbafbbf8ddb54ba7a875e82c562f010edd1 | [
"MIT"
]
| null | null | null |
def log_improvement(value):
"""function to log improvements to the command line.
Parameters
----------
value : int or float
The value for the improvement
"""
print("Improvement : " + str(value))
def log_passed_worse(value):
"""function to log the passing of worse solutions to the command line.
Parameters
----------
value : int or float
The value for the improvement
"""
print("Passed worse: " + str(value))
| 18.185185 | 74 | 0.592668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.749491 |
72244921b06692f1b6e6b261aabc73be9e8ccb0e | 897 | py | Python | tests/test_as_class_methods.py | pokidovea/immobilus | 42f115a13b4aa060b7ed186e81fe56e1a07c4b2d | [
"Apache-2.0"
]
| 13 | 2016-11-26T16:13:11.000Z | 2021-12-21T11:10:50.000Z | tests/test_as_class_methods.py | pokidovea/immobilus | 42f115a13b4aa060b7ed186e81fe56e1a07c4b2d | [
"Apache-2.0"
]
| 20 | 2017-03-06T00:50:22.000Z | 2019-08-26T20:12:39.000Z | tests/test_as_class_methods.py | pokidovea/immobilus | 42f115a13b4aa060b7ed186e81fe56e1a07c4b2d | [
"Apache-2.0"
]
| 6 | 2017-08-28T07:23:54.000Z | 2021-12-03T13:03:50.000Z | # https://github.com/pokidovea/immobilus/issues/30
from immobilus import immobilus # noqa
from immobilus.logic import fake_time, fake_localtime, fake_gmtime, fake_strftime, fake_mktime
from datetime import datetime
class SomeClass(object):
method = None
def test_fake_time():
SomeClass.method = fake_time
instance = SomeClass()
instance.method()
def test_fake_localtime():
SomeClass.method = fake_localtime
instance = SomeClass()
instance.method(12345)
def test_fake_gmtime():
SomeClass.method = fake_gmtime
instance = SomeClass()
instance.method(12345)
def test_fake_strftime():
SomeClass.method = fake_strftime
instance = SomeClass()
instance.method('%H:%M')
def test_fake_mktime():
SomeClass.method = fake_mktime
timetuple = datetime.utcnow().timetuple()
instance = SomeClass()
instance.method(timetuple)
| 18.6875 | 94 | 0.723523 | 42 | 0.046823 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.070234 |
722517ddb7cf57ba0cdaeeaa839501f03c9155b4 | 322 | py | Python | plotter/plotter.py | kalinkinisaac/modular | 301d26ad222a5ef3278aaf251908e0a8537bb58f | [
"MIT"
]
| null | null | null | plotter/plotter.py | kalinkinisaac/modular | 301d26ad222a5ef3278aaf251908e0a8537bb58f | [
"MIT"
]
| null | null | null | plotter/plotter.py | kalinkinisaac/modular | 301d26ad222a5ef3278aaf251908e0a8537bb58f | [
"MIT"
]
| null | null | null | import abc
class Plotter(abc.ABC):
def __init__(self, ax=None, bokeh_fig=None):
if not ax and not bokeh_fig:
raise ValueError('ax or bokeh_fig should be provided.')
self._ax = ax
self._bokeh_fig = bokeh_fig
def plot(self, *args, **kwargs):
raise NotImplementedError
| 23 | 67 | 0.63354 | 308 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.114907 |
72279efb6ba56531335b2f093691a4196e8f4923 | 2,531 | py | Python | ardupilot/Tools/autotest/param_metadata/wikiemit.py | quadrotor-IITKgp/emulate_GPS | 3c888d5b27b81fb17e74d995370f64bdb110fb65 | [
"MIT"
]
| 1 | 2021-07-17T11:37:16.000Z | 2021-07-17T11:37:16.000Z | ardupilot/Tools/autotest/param_metadata/wikiemit.py | arl-kgp/emulate_GPS | 3c888d5b27b81fb17e74d995370f64bdb110fb65 | [
"MIT"
]
| null | null | null | ardupilot/Tools/autotest/param_metadata/wikiemit.py | arl-kgp/emulate_GPS | 3c888d5b27b81fb17e74d995370f64bdb110fb65 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import re
from param import *
from emit import Emit
# Emit docs in a form acceptable to the APM wiki site
class WikiEmit(Emit):
def __init__(self):
wiki_fname = 'Parameters.wiki'
self.f = open(wiki_fname, mode='w')
preamble = '''#summary Dynamically generated list of documented parameters
= Table of Contents =
<wiki:toc max_depth="4" />
= Vehicles =
'''
self.f.write(preamble)
def close(self):
self.f.close
def camelcase_escape(self, word):
if re.match(r"([A-Z][a-z]+[A-Z][a-z]*)", word.strip()):
return "!"+word
else:
return word
def wikichars_escape(self, text):
for c in "*,{,},[,],_,=,#,^,~,!,@,$,|,<,>,&,|,\,/".split(','):
text = re.sub("\\"+c, '`'+c+'`', text)
return text
def emit_comment(self, s):
self.f.write("\n\n=" + s + "=\n\n")
def start_libraries(self):
self.emit_comment("Libraries")
def emit(self, g, f):
t = "\n\n== %s Parameters ==\n" % (self.camelcase_escape(g.name))
for param in g.params:
if hasattr(param, 'DisplayName'):
t += "\n\n=== %s (%s) ===" % (self.camelcase_escape(param.DisplayName),self.camelcase_escape(param.name))
else:
t += "\n\n=== %s ===" % self.camelcase_escape(param.name)
if hasattr(param, 'Description'):
t += "\n\n_%s_\n" % self.wikichars_escape(param.Description)
else:
t += "\n\n_TODO: description_\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
t+= " * Values \n"
values = (param.__dict__[field]).split(',')
t+="|| *Value* || *Meaning* ||\n"
for value in values:
v = value.split(':')
t+="|| "+v[0]+" || "+self.camelcase_escape(v[1])+" ||\n"
else:
t += " * %s: %s\n" % (self.camelcase_escape(field), self.wikichars_escape(param.__dict__[field]))
#print t
self.f.write(t)
| 34.671233 | 121 | 0.468589 | 2,393 | 0.945476 | 0 | 0 | 0 | 0 | 0 | 0 | 646 | 0.255235 |
722ad974ef9283199399d93bbd17a334c7d31249 | 1,038 | py | Python | master.py | iAzurel/thepicturesorter | 21a3aee26adcfca0838db63be1434f7c49cd9548 | [
"MIT"
]
| null | null | null | master.py | iAzurel/thepicturesorter | 21a3aee26adcfca0838db63be1434f7c49cd9548 | [
"MIT"
]
| null | null | null | master.py | iAzurel/thepicturesorter | 21a3aee26adcfca0838db63be1434f7c49cd9548 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
from PIL import Image
import os, os.path
import cv2
import sys
# Detect faces, then returns number of faces.
def detect_face(image_path, face_cascade):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Change the values based on needs.
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=7,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
return faces
# Moves pictures based on detection of faces.
def imagesChecker():
imgs_path = '/home/murtaza/Documents/thepicturesorter/Pictures/'
nofacesdir = '/home/murtaza/Documents/thepicturesorter/NoFaces/'
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
imgs = os.listdir(imgs_path)
for i in range (0, len(imgs)):
faces = detect_face(imgs_path + '/' + imgs[i], face_cascade)
if len(faces) == 0:
os.rename(os.path.abspath(imgs_path + imgs[i]), nofacesdir + imgs[i])
def main():
imagesChecker()
if __name__ == "__main__":
main() | 23.590909 | 76 | 0.716763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 299 | 0.288054 |
722c1e45a1768734b80ecc9c4958f9182d017de1 | 1,241 | py | Python | src/si/supervised/Ensemble.py | pg42862/Sistemas_Inteligentes_para_a_MBIONF | 5dd5b487da5b3c6a0989274598c911cc639138a3 | [
"Apache-2.0"
]
| null | null | null | src/si/supervised/Ensemble.py | pg42862/Sistemas_Inteligentes_para_a_MBIONF | 5dd5b487da5b3c6a0989274598c911cc639138a3 | [
"Apache-2.0"
]
| null | null | null | src/si/supervised/Ensemble.py | pg42862/Sistemas_Inteligentes_para_a_MBIONF | 5dd5b487da5b3c6a0989274598c911cc639138a3 | [
"Apache-2.0"
]
| 1 | 2021-11-15T16:15:50.000Z | 2021-11-15T16:15:50.000Z | from.Model import Model
import numpy as np
def majority(values):
return max(set(values), key=values.count)#melhor valor
def average(values):
return sum(values)/len(values)#ou a media
class Ensemble(Model):
def __init__(self, models, fvote, score):
super(Ensemble, self).__init__()
self.models = models#lista de modelos
self.fvote = fvote#majority ou average
self.score = score
def fit(self, dataset):
self.dataset = dataset
for model in self.models:
model.fit(dataset)
self.is_fitted = True
def predict(self, x):
assert self.is_fitted, 'Model not fitted'
preds = [model.predict(x) for model in self.models]#return de uma lista com o predict the x a partir dos diferentes modelos na lista
vote = self.fvote(preds)#vai realizar o majority ou o average dos valores do predict escolhendo o melhor
return vote
def cost(self, X=None, Y=None):
X = X if X is not None else self.dataset.X
Y = Y if Y is not None else self.dataset.Y
Y_pred = np.ma.apply_along_axis(self.predict, axis=0, arr=X.T)#arr e masked
#Y_pred e o nosso masked
return self.score(Y, Y_pred)#accuracy_score
| 32.657895 | 140 | 0.65834 | 1,044 | 0.841257 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.235294 |
722cd36e985871c8a2f5b1f558365071a7104a73 | 11,915 | py | Python | pygto900.py | marissakotze/timDIMM | dde00a3bb6ca7c3d9b71e24f9363350a0e2a323f | [
"BSD-3-Clause"
]
| 1 | 2021-06-06T15:26:36.000Z | 2021-06-06T15:26:36.000Z | pygto900.py | marissakotze/timDIMM | dde00a3bb6ca7c3d9b71e24f9363350a0e2a323f | [
"BSD-3-Clause"
]
| null | null | null | pygto900.py | marissakotze/timDIMM | dde00a3bb6ca7c3d9b71e24f9363350a0e2a323f | [
"BSD-3-Clause"
]
| 3 | 2015-07-29T15:16:35.000Z | 2017-12-01T13:02:36.000Z | #!/usr/bin/env python
"""pygto900 contains commands to interface with a astro-physics gto900 mount
"""
import serial
import io
import sys
import string
import math
import time
from binutils import *
from datetime import datetime
from astropy.coordinates import Angle
def airmass(a):
ang = Angle(90, unit='degree') - a
return 1.0/math.cos(ang.radian)
class GTO900:
def __init__(self, port="/dev/ttyUSB1"):
'''
we use the py27-serial package to implement
communication.to the telescope. The port may change
'''
self.ser = serial.Serial(port,
baudrate=9600,
bytesize=8,
timeout=3)
self.catalog={}
def __enter__(self):
self.clear()
self.long_format()
self.set_current_date()
self.set_local_time()
return self
def __exit__(self, errtype, value, traceback):
self.close()
if errtype is not None:
raise
def command(self, command):
self.ser.write(command)
def read(self):
resp = self.ser.readline()
return resp.strip('#')
def close(self):
self.clear()
self.ser.close()
#check the result
def check(self):
result = self.ser.read(1)
self.clear()
return result
#clear all the values
def clear(self):
self.command('#')
#set the format as long
def long_format(self):
self.command("#:U#")
# set the offset from greenwich mean time
def set_gmt_offset(hrs):
self.command("#:SG %s#" % hrs)
return self.check()
# set the current longitude
def set_longitude(d, m, s):
long_str = "%d*%02d:%02d" % (d, m, s)
self.command("#:Sg %s#" % long_str )
return self.check()
# set the current latitude
def set_latitude(d, m, s):
lat_str = "%d*%02d:%02d" % (d, m, s)
self.command("#:St %s#" % lat_str)
return self.check()
# set current date (MM/DD/YY)
def set_current_date(self):
ut = datetime.utcnow()
date_str = ut.strftime("%m/%d/%Y")
self.command("#:SC %s#" % date_str)
blah = self.read()
return self.read()
# set local time (HH:MM:SS)
def set_local_time(self):
sast = datetime.now()
time_str = sast.strftime("%H:%M:%S")
self.command("#:SL %s#" % time_str)
return self.check()
# get UTC offset from GTO900
def get_UTC_offset(self):
self.command("#:GG#")
output = self.read()
#sign, h, m, s, ds = string.scan(/(.)(\d+):(\d+):(\d+).(\d+)/)[0]
return output
# get current site longitude
def get_longitude(self):
self.command("#:Gg#")
output = self.read()
# sign, d, m, s = string.scan(/(.)(\d+)\*(\d+):(\d+)/)[0]
# output = sign + d + ":" + m + ":" + s
return output.replace('*', ':')
# get current site latitude
def get_latitude(self):
self.command("#:Gt#")
output = self.read()
return output.replace('*', ':')
# get local time
def get_local_time(self):
self.command("#:GL#")
output = self.read()
return output
# get local date
def get_local_date(self):
self.command("#:GC#")
output = self.read()
output = output.replace(':', '/')
return output
# get LST
def lst(self):
self.command("#:GS#")
return self.read()
#get RA
def ra(self):
self.command("#:GR#")
return self.read()
#get dec
def dec(self):
self.command("#:GD#")
d = self.read()
d = d.replace('*', ':')
return d
# read the current altitude from the GTO900
def alt(self):
self.command("#:GA#")
output = self.read()
return output.replace('*', ':')
# calculate current GTO900 airmass
def airmass(self):
a = Angle(self.alt(), unit='degree')
return airmass(a)
# read the current azimuth from the GTO900
def az(self):
self.command("#:GZ#")
output = self.read()
if output:
return output.replace('*',':')
else:
return None
# command the GTO900 to slew to the target RA and Dec
def slew(self):
self.command("#:MS#")
result=self.read()
if result:
print 'Slewing...'
else:
print result
# command the GTO900 to move in the given direction at
# the current guide or centering rate
def move(self, mdir):
if mdir in ["e","n","s","w"]:
self.command("#:M%s#" % mdir)
# command the GTO900 to move in the given direction at
# the current guide or centering rate
# for a given number of ms
def move_ms(self,mdir, ms):
if mdir in ["e","n","s","w"]:
self.command("#:M%s%s#" % (mdir, ms))
# swap north-south buttons
def swap_ns(self):
self.command("#:NS#")
# swap east-west buttons
def swap_ew(self):
self.command("#:EW#")
# command the GTO900 to stop motion in the given direction
def halt(self, mdir):
if mdir in ["e","n","s","w"]:
self.command("#:Q%s#" % mdir)
# command the GTO900 to halt all mount motion
def haltall(self):
self.command("#:Q#")
# select guide rate
def select_guide_rate(self, rate=''):
if rate in ['', '0', '1', '2']:
self.command("#:RG%s#" % rate)
# select centering rate
def select_center_rate(self, rate=''):
if rate in ['', '0', '1', '2']:
self.command("#:RC%s#" % rate)
# set centering rate
def set_center_rate(self, rate):
if rate > 0 and rate < 256:
self.command("#:Rc%s#" % rate)
# select slew rate
def select_slew_rate(self, rate=''):
if rate in ['', '0', '1', '2']:
self.command("#:RS%s#" % rate)
# set slew rate
def set_slew_rate(self, rate):
if rate > 0 and rate <= 1200:
self.command("#:Rs %s#" % rate)
# select tracking rate
def select_tracking_rate(self, rate=''):
if rate in ['', '0', '1', '2', '9']:
self.command("#:RS%s#" % rate)
# set RA tracking rate
def set_RA_rate(self, rate):
self.command("#:RR %s#" % rate)
# set Dec tracking rate
def set_Dec_rate(self,rate):
self.command("#:RD %s#" % rate)
# set amount of Dec backlash compensation (in seconds)
def set_Dec_backlash(self,sec):
self.command("#:Bd 00*00:%s#" % sec)
return self.read()
# set amount of RA backlash compensation (in seconds)
def set_RA_backlash(self, sec):
self.command("#:Br 00:00:%s#" %sec)
return self.read()
# invoke parked mode
def park_mode(self):
self.command("#:KA#")
# park off
def park_off(self):
self.command("#PO:#")
# query pier
def pier(self):
self.command("#:pS#")
return self.read()
# sync mount
def sync(self):
self.command("#:CM#")
return self.read()
# re-cal mount
def recal(self):
self.command("#:CMR#")
return self.read()
# define commanded RA
def command_ra(self, h, m, s):
r_str = "%02d:%02d:%02d" % (h,m,s)
self.command("#:Sr %s#" % r_str)
return self.check()
def command_ra_raw(self, r):
self.command("#:Sr %s#" % r)
return self.check()
# define commanded Dec
def command_dec(self, d, m, s):
d_str = "%d*%02d:%02d" % (d,m,s)
self.command("#:Sd %s#" % d_str)
return self.check()
def command_dec_raw(self, d):
self.command("#:Sd %s#" % d)
return self.check()
# define commanded Alt
def command_alt(self, d, m, s):
alt_str = "+%d*%02d:%02d" % (d,m,s)
self.command("#:Sa %s#" % alt_str)
return self.check()
# define commanded Az
def command_az(self, d, m, s):
az_str = "%d*%02d:%02d" % (d,m,s)
self.command("#:Sz %s#" % az_str)
return self.check()
# increase reticle brightness
def increase_reticle_brightness(self):
self.command("#:B+#")
# decrease reticle brightness
def decrease_reticle_brightness(self):
self.command("#:B-#")
# command the focuser to move inward (toward the primary)
def focus_in(self):
self.command("#:F+#")
# command the focuser to move outward (away from the primary)
def focus_out(self):
self.command("#:F-#")
# focus fast
def focus_fast(self):
self.command("#:FF#")
# focus slow
def focus_slow(self):
self.command("#FS#")
# halt all focuser motion
def focus_halt(self):
self.command("#:FQ#")
# get telescope firmware
def get_firmware(self):
self.command("#:V#")
return self.read()
# startup procedure
def startup(self):
self.clear()
self.clear()
self.clear()
self.long_format()
def status(g):
"""Chck the values for the telescope"""
ra = g.ra()
dec = g.dec()
lst = g.lst()
ha = Angle('%s hour' % lst) - Angle('%s hour' % ra)
alt = g.alt()
az = g.az()
a = Angle(alt, unit='degree')
z = airmass(a)
p = g.pier()
return ra,dec,ha,lst,alt,az,z,p
def slew(g, ra, dec, niter=100):
"""Slew to a location
Paramters
---------
ra: astropy.coordinates.Angle
Right Ascension of source
dec: astropy.coordinates.Angle
Declination of source
niter: int
Number of loops to attempt if monitoring progress
"""
g.command_ra(ra.hms[0], ra.hms[1], ra.hms[2])
g.command_dec(dec.dms[0], dec.dms[1], dec.dms[2])
g.slew()
for i in range(niter):
try:
r = Angle(g.ra(), unit='hour')
d = Angle(g.dec(), unit='degree')
except Exception,e:
print e
continue
dist = ((r.degree - ra.degree)**2 + (d.degree-dec.degree)**2)**0.5
if dist < 1.0/60.0:
print 'Done Slewing'
return
else:
print '%5.2f degrees to go until target' % dist
return
def init(g):
"""Initialize the telescope"""
print "Initializing mount...."
g.startup()
return
def nudge(g, mdir):
"""Nudge the telescope in one direction"""
g.set_center_rate(10)
g.move(mdir)
time.sleep(1)
g.halt(mdir)
time.sleep(1)
def usage():
"""Print the usage string"""
usage_str = """
Usage for pygto900:
python pygto900 [init/status/log/move/nudge/slew/sync/park/help] [optional]
"""
print usage_str
if __name__=='__main__':
task = sys.argv[1].lower()
if len(sys.argv) < 2:
usage()
exit()
if task in ['help']:
usage()
exit()
with GTO900() as g:
if task == 'status':
results = status(g)
output ="At RA = %s, Dec = %s, HA = %s, LST = %s, Alt = %s, Az = %s, secz = %.2f, on the %s side of the pier" % results
print output
elif task == 'log':
results = status(g)
print '%s %s %s %s %s %s %.2f %s' % results
elif task == 'init':
init(g)
elif task == 'park':
g.park_mode()
elif task == 'park_off':
g.park_off()
elif task == 'sync':
g.sync()
elif task == 'move':
g.move(sys.argv[2])
elif task == 'nudge':
nudge(g, sys.argv[2])
elif task == 'slew':
ra = Angle(sys.argv[2], unit='hour')
dec = Angle(sys.argv[3], unit='degree')
slew(g, ra, dec)
elif task == 'help':
usage()
else:
usage()
#y=GTO900()
#print y.ra(), y.dec()
#y.lst(), y.get_local_time(), y.get_local_date()
#print y.get_UTC_offset(), y.get_longitude(), y.get_latitude()
#print y.command_ra(12,01,34)
#print y.command_dec(-37,01,34)
#print y.slew()
#print y.ra(), y.dec()
#print y.alt(), y.az()
#print y.move('e')
#print y.alt(), y.az()
| 24.56701 | 130 | 0.544943 | 8,531 | 0.715988 | 0 | 0 | 0 | 0 | 0 | 0 | 3,834 | 0.321779 |
7230fd2e2774f3460096d023d321613a2a314e63 | 2,850 | py | Python | webscripts/plotlygraphs.py | KathrynDH/DataDashboard | 1bf61497480f778a1c7cc9ce9fc7fb48b3067606 | [
"MIT"
]
| null | null | null | webscripts/plotlygraphs.py | KathrynDH/DataDashboard | 1bf61497480f778a1c7cc9ce9fc7fb48b3067606 | [
"MIT"
]
| null | null | null | webscripts/plotlygraphs.py | KathrynDH/DataDashboard | 1bf61497480f778a1c7cc9ce9fc7fb48b3067606 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 15:56:55 2021
@author: Kathryn Haske
Create plotly graphs for webpage
"""
import pandas as pd
import plotly.graph_objs as go
def line_graph(x_list, df, name_col, y_cols, chart_title, x_label, y_label):
"""
Function to create plotly line graph
Args:
x_list (list): graph x values
df (Pandas DataFrame): dataframe to use for series and y-values
name_col (string): df column to use for series names
y_cols (int or slice object): df column numbers to use for y-values
chart_title (string): title for chart
x_label (string): label for x-axis
y_label (string): label for y-axis
Returns:
dictionary for plotly line graph
"""
graph = []
for index, row in df.iterrows():
graph.append(go.Scatter(
x = x_list,
y = row.tolist()[y_cols],
mode = 'lines',
name = row[name_col]
))
graph_layout = dict(title = chart_title,
xaxis = dict(title = x_label),
yaxis = dict(title = y_label),
)
return dict(data=graph, layout=graph_layout)
def scatter_plot(x_vals, y_vals, names, chart_title, x_label, y_label):
"""
Function to create plotly scatter plot
Args:
x_vals (list): graph x values
y_vals (list): graph y values
names (list of strings): title for each marker
chart_title (string): title for chart
x_label (string): label for x-axis
y_label (string): label for y-axis
Returns:
dictionary for plotly scatter plot
"""
graph= [go.Scatter(
x = x_vals,
y = y_vals,
mode = 'markers',
text=names,
marker=dict(
color=y_vals, #set color equal to a variable
colorscale='Viridis' # plotly colorscale
)
)]
graph_layout = dict(title = chart_title,
xaxis = dict(title = x_label),
yaxis = dict(title = y_label),
)
return dict(data=graph, layout=graph_layout)
def bar_chart(x_vals, y_vals, chart_title, x_label, y_label):
"""
Function to create plotly bar graph
Args:
x_vals (list): graph x values
y_vals (list): graph y values
chart_title (string): title for chart
x_label (string): label for x-axis
y_label (string): label for y-axis
Returns:
dictionary for plotly bar graph
"""
graph = [go.Bar(
x = x_vals,
y = y_vals
)]
graph_layout = dict(title = chart_title,
xaxis = dict(title = x_label),
yaxis = dict(title = y_label),
)
return dict(data=graph, layout=graph_layout)
| 27.403846 | 76 | 0.567018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,431 | 0.502105 |
72314feeba462045a5c4c66db5b70dc7ce89e3a1 | 2,505 | py | Python | jsl/experimental/seql/agents/bfgs_agent.py | AdrienCorenflos/JSL | 8a3ba27179a2bd90207214fccb81df884b05c3d0 | [
"MIT"
]
| null | null | null | jsl/experimental/seql/agents/bfgs_agent.py | AdrienCorenflos/JSL | 8a3ba27179a2bd90207214fccb81df884b05c3d0 | [
"MIT"
]
| null | null | null | jsl/experimental/seql/agents/bfgs_agent.py | AdrienCorenflos/JSL | 8a3ba27179a2bd90207214fccb81df884b05c3d0 | [
"MIT"
]
| null | null | null | import jax.numpy as jnp
from jax import vmap
from jax.scipy.optimize import minimize
import chex
import typing_extensions
from typing import Any, NamedTuple
import warnings
from jsl.experimental.seql.agents.agent_utils import Memory
from jsl.experimental.seql.agents.base import Agent
from jsl.experimental.seql.utils import posterior_noise, mse
Params = Any
class ModelFn(typing_extensions.Protocol):
def __call__(self,
params: chex.Array,
inputs: chex.Array):
...
class ObjectiveFn(typing_extensions.Protocol):
def __call__(self,
params: chex.Array,
inputs: chex.Array,
outputs: chex.Array,
model_fn: ModelFn):
...
class BeliefState(NamedTuple):
params: Params
class Info(NamedTuple):
# True if optimization succeeded
success: bool
'''
0 means converged (nominal)
1=max BFGS iters reached
3=zoom failed
4=saddle point reached
5=max line search iters reached
-1=undefined
'''
status: int
# final function value.
loss: float
def bfgs_agent(objective_fn: ObjectiveFn = mse,
model_fn: ModelFn = lambda mu, x: x @ mu,
obs_noise: float = 0.01,
buffer_size: int = jnp.inf,
threshold: int = 1):
assert threshold <= buffer_size
memory = Memory(buffer_size)
def init_state(x: chex.Array):
return BeliefState(jnp.squeeze(x))
def update(belief: BeliefState,
x: chex.Array,
y: chex.Array):
assert buffer_size >= len(x)
x_, y_ = memory.update(x, y)
if len(x_) < threshold:
warnings.warn("There should be more data.", UserWarning)
info = Info(False, -1, jnp.inf)
return belief, info
optimize_results = minimize(objective_fn,
belief.params,
(x_, y_, model_fn),
method="BFGS")
info = Info(optimize_results.success,
optimize_results.status,
optimize_results.fun)
return BeliefState(optimize_results.x), info
def predict(belief: BeliefState,
x: chex.Array):
d, *_ = x.shape
noise = obs_noise * jnp.eye(d)
return model_fn(belief.params, x), noise
return Agent(init_state, update, predict) | 26.09375 | 68 | 0.578842 | 744 | 0.297006 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.103393 |
72320fd783db7905693b184e50b586992cf4d02b | 2,379 | py | Python | abusech/urlhaus.py | threatlead/abusech | 6c62f51f773cb17ac6943d87fb697ce1e9dae049 | [
"MIT"
]
| null | null | null | abusech/urlhaus.py | threatlead/abusech | 6c62f51f773cb17ac6943d87fb697ce1e9dae049 | [
"MIT"
]
| null | null | null | abusech/urlhaus.py | threatlead/abusech | 6c62f51f773cb17ac6943d87fb697ce1e9dae049 | [
"MIT"
]
| null | null | null | from .abusech import AbuseCh
from collections import namedtuple
from datetime import datetime
class UrlHaus(AbuseCh):
base_url = 'https://urlhaus.abuse.ch'
urls = namedtuple('UrlHaus', ['id', 'date_added', 'url', 'url_status', 'threat', 'tags', 'urlhaus_link', 'reporter'])
payloads = namedtuple('Payload', ['timestamp', 'url', 'type', 'md5', 'sha256', 'signature'])
def parse_url_csv(self, urllist):
data = []
for row in urllist:
data.append(self.urls(
id=int(row[0].strip('"')),
date_added=datetime.strptime(row[1].strip('"'), self.date_format),
url=row[2].strip('"'),
url_status=row[3].strip('"'),
threat=row[4].strip('"'),
tags=row[5].strip('"'),
urlhaus_link=row[6].strip('"'),
reporter=row[7].strip('"')
))
return data
def get_data_dump(self):
response = self.get_url(url='{0}/downloads/csv/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=8)
return self.parse_url_csv(urllist=urllist)
def get_recent_urls(self):
response = self.get_url(url='{0}/downloads/csv_recent/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=8)
return self.parse_url_csv(urllist=urllist)
def get_online_urls(self):
response = self.get_url(url='{0}/downloads/csv_online/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=8)
return self.parse_url_csv(urllist=urllist)
def get_payloads(self):
response = self.get_url(url='{0}/downloads/payloads/'.format(self.base_url))
urllist = self.parse_validate_csv(response=response, columns=6)
data = []
for row in urllist:
data.append(self.payloads(
timestamp=datetime.strptime(row[0].strip('"'), self.date_format),
url=row[1].strip('"'),
type=row[2].strip('"').lower(),
md5=row[3].strip('"') if len(row[3].strip('"')) == 32 else None,
sha256=row[4].strip('"') if len(row[4].strip('"')) == 64 else None,
signature=None if row[5].strip('"').lower() == "none" else row[5].strip('"').lower(),
))
return data
| 43.254545 | 121 | 0.584279 | 2,282 | 0.959227 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.133249 |
7232736c521560e15f88e41ebeb5b1e597203059 | 114 | py | Python | ufmt/tests/__init__.py | pmeier/ufmt | 29385731d3399d065968921b7502d321acf6faef | [
"MIT"
]
| null | null | null | ufmt/tests/__init__.py | pmeier/ufmt | 29385731d3399d065968921b7502d321acf6faef | [
"MIT"
]
| null | null | null | ufmt/tests/__init__.py | pmeier/ufmt | 29385731d3399d065968921b7502d321acf6faef | [
"MIT"
]
| null | null | null | # Copyright 2021 John Reese
# Licensed under the MIT license
from .cli import CliTest
from .core import CoreTest
| 19 | 32 | 0.789474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.517544 |
7233257f2eb3efc2be88861adf3e83bb76f78498 | 442 | py | Python | tests/components/eafm/conftest.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
]
| 2 | 2021-05-19T19:05:08.000Z | 2021-06-06T06:51:05.000Z | tests/components/eafm/conftest.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
]
| 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | tests/components/eafm/conftest.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
]
| 2 | 2020-12-25T16:31:22.000Z | 2020-12-30T20:53:56.000Z | """eafm fixtures."""
import pytest
from tests.async_mock import patch
@pytest.fixture()
def mock_get_stations():
"""Mock aioeafm.get_stations."""
with patch("homeassistant.components.eafm.config_flow.get_stations") as patched:
yield patched
@pytest.fixture()
def mock_get_station():
"""Mock aioeafm.get_station."""
with patch("homeassistant.components.eafm.sensor.get_station") as patched:
yield patched
| 22.1 | 84 | 0.719457 | 0 | 0 | 328 | 0.742081 | 364 | 0.823529 | 0 | 0 | 189 | 0.427602 |
7233678cd98a3bf61296f7c1aa2006b01024a6ac | 5,894 | py | Python | thorbanks/checks.py | Jyrno42/django-thorbanks | a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1 | [
"BSD-3-Clause"
]
| 6 | 2015-06-15T12:47:05.000Z | 2019-04-24T01:32:12.000Z | thorbanks/checks.py | Jyrno42/django-thorbanks | a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1 | [
"BSD-3-Clause"
]
| 13 | 2015-12-23T14:29:26.000Z | 2021-02-18T18:35:56.000Z | thorbanks/checks.py | Jyrno42/django-thorbanks | a8e2daf20b981aecb0c8ee76b0474b6c8e2baad1 | [
"BSD-3-Clause"
]
| 3 | 2016-08-08T10:35:39.000Z | 2020-12-29T23:10:55.000Z | import os
from django.conf import settings
from django.core.checks import Error, register
from thorbanks.settings import configure, parse_banklinks
@register
def check_model_settings(app_configs, **kwargs):
issues = []
manual_models = getattr(settings, "THORBANKS_MANUAL_MODELS", None)
if manual_models is None: # No manual models
# If no manual models then we need to ensure that `thorbanks_models` is configured correctly
if "thorbanks_models" not in settings.INSTALLED_APPS:
issues.append(
Error(
"thorbanks_models must be added to settings.INSTALLED_APPS when not using THORBANKS_MANUAL_MODELS",
id="thorbanks.E001",
)
)
migration_modules = getattr(settings, "MIGRATION_MODULES", {})
if not migration_modules.get("thorbanks_models", ""):
issues.append(
Error(
"Thorbanks is missing from settings.MIGRATION_MODULES",
hint="Add it to your settings like this - `MIGRATION_MODULES = "
'{ "thorbanks_models": "shop.thorbanks_migrations" }.',
id="thorbanks.E002",
)
)
else:
if manual_models is not None and not isinstance(manual_models, dict):
issues.append(
Error(
"settings.THORBANKS_MANUAL_MODELS must be a dict",
hint="See docstring of thorbanks.settings.get_model.",
id="thorbanks.E003",
)
)
if "thorbanks_models" in settings.INSTALLED_APPS:
issues.append(
Error(
"thorbanks_models should not be added to "
"settings.INSTALLED_APPS when using THORBANKS_MANUAL_MODELS",
id="thorbanks.E011",
)
)
return issues
@register
def check_banklink_settings(app_configs, **kwargs):
issues = []
links = parse_banklinks(getattr(settings, "BANKLINKS", None))
if links and isinstance(links, dict):
# Verify it contains valid data
for bank_name, data in links.items():
if len(bank_name) > 16:
issues.append(
Error(
"settings.BANKLINKS keys are limited to 16 characters ({})".format(
bank_name
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E005",
)
)
if not isinstance(data, dict):
issues.append(
Error(
"settings.BANKLINKS['{}'] must be a dict with settings for the bank".format(
bank_name
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E006",
)
)
continue
required_keys = [
"REQUEST_URL",
"PRIVATE_KEY",
"PUBLIC_KEY",
"CLIENT_ID",
"BANK_ID",
"PROTOCOL",
"PRINTABLE_NAME",
"IMAGE_PATH",
"TYPE",
"ORDER",
]
if data["PROTOCOL"] == "ipizza":
for key in required_keys:
if key not in data or data[key] is None:
issues.append(
Error(
"settings.BANKLINKS['{}']: {} is required".format(
bank_name, key
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E007",
)
)
if data["PUBLIC_KEY"] is not None and not os.path.isfile(
data["PUBLIC_KEY"]
):
issues.append(
Error(
"settings.BANKLINKS['{}']: PUBLIC_KEY file `{}` does not exist".format(
bank_name, data["PUBLIC_KEY"]
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E008",
)
)
if data["PRIVATE_KEY"] is not None and not os.path.isfile(
data["PRIVATE_KEY"]
):
issues.append(
Error(
"settings.BANKLINKS['{}']: PRIVATE_KEY file `{}` does not exist".format(
bank_name, data["PRIVATE_KEY"]
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E009",
)
)
else:
issues.append(
Error(
"settings.BANKLINKS['{}']: PROTOCOL must be ipizza".format(
bank_name
),
hint="See docstring of thorbanks.settings.parse_banklinks.",
id="thorbanks.E010",
)
)
else:
issues.append(
Error(
"settings.BANKLINKS must be a dict",
hint="See docstring of thorbanks.settings.parse_banklinks for reference.",
id="thorbanks.E004",
)
)
configure()
return issues
| 35.293413 | 119 | 0.449779 | 0 | 0 | 0 | 0 | 5,738 | 0.973532 | 0 | 0 | 1,868 | 0.316932 |
72349a7b999fcd7724c457b5d3ee54f95ec82969 | 36,028 | py | Python | otk/trains.py | draustin/otk | c6e91423ec79b85b380ee9385f6d27c91f92503d | [
"MIT"
]
| 7 | 2020-05-17T14:26:42.000Z | 2022-02-14T04:52:54.000Z | otk/trains.py | uamhforever/otk | c6e91423ec79b85b380ee9385f6d27c91f92503d | [
"MIT"
]
| 17 | 2020-04-10T22:50:00.000Z | 2020-06-18T04:54:19.000Z | otk/trains.py | uamhforever/otk | c6e91423ec79b85b380ee9385f6d27c91f92503d | [
"MIT"
]
| 1 | 2022-02-14T04:52:45.000Z | 2022-02-14T04:52:45.000Z | """Defining and analysing axisymmetric optical systems."""
import itertools
from functools import singledispatch
from dataclasses import dataclass
from abc import ABC, abstractmethod
from typing import Sequence, Tuple, Mapping
import numpy as np
import scipy.optimize
from . import abcd, paraxial, functions, ri
from .functions import calc_sphere_sag
# TODO Make Interface composition of Surface and refractive indeices.
@dataclass
class Interface:
"""An interface between two media in an axisymmetric optical system.
Attributes:
n1: Refractive index on first side.
n2: Index on second side.
roc (scalar): Radius of curvature.
radius (scalar): Radius/half-diagonal of interface.
"""
n1: ri.Index
n2: ri.Index
roc: float
radius: float
def __post_init__(self):
self.sag = functions.calc_sphere_sag(self.roc, self.radius)
def __str__(self):
return '%s - ROC %.3g mm, radius %.3g mm - %s'%(self.n1, self.roc*1e3, self.radius*1e3, self.n2)
#
# def __repr__(self):
# return 'Interface(n1=%r, n2=%r, roc=%g, radius=%g)'%(self.n1, self.n2, self.roc, self.radius)
def get_abcd(self, lamb):
return abcd.curved_interface(self.n1(lamb), self.n2(lamb), self.roc)
def make_parameter_string(self):
if np.isfinite(self.roc):
return 'ROC %.3f mm'%(self.roc*1e3)
else:
return 'plane'
def reverse(self):
return Interface(self.n2, self.n1, -self.roc, self.radius)
def get_points(self, num_points=32):
"""Returns (num_points, 2) array."""
x = np.linspace(-self.radius, self.radius, num_points)
# TODO use calc_sag.
h = calc_sphere_sag(self.roc, x)
xys = np.c_[x, h]
return xys
def calc_sag(self, rho, derivative: bool = False):
"""Calculate sag of surface.
Positive ROC means positive sag.
Args:
rho: Distance from center.
derivative: If True, derivative is returned as well.
"""
sag = functions.calc_sphere_sag(self.roc, rho)
if derivative:
grad_sag = functions.calc_sphere_sag(self.roc, rho, True)
return sag, grad_sag
else:
return sag
def calc_aperture(self, x, y, shape: str):
"""Returns binary aperture mask at (x, y)."""
if shape == 'circle':
return x**2 + y**2 <= self.radius
elif shape == 'square':
half_side = self.radius/2**0.5
return (abs(x) <= half_side) & (abs(y) <= half_side)
else:
raise ValueError(f'Unknown shape {shape}.')
def calc_mask(self, lamb, rho, derivative: bool = False):
n1 = self.n1(lamb)
n2 = self.n2(lamb)
jdeltak = 2.j*np.pi/lamb*(n1 - n2)
sag, grad_sag = self.calc_sag(rho, True)
f = np.exp(jdeltak*sag)
if derivative:
gradf = jdeltak*grad_sag*f
return f, gradf
else:
return f
@dataclass
class ConicInterface(Interface):
"""An Interface with conic and aspheric terms.
We take kappa as defined in Spencer and Murty, JOSA 52(6) p 672, 1962. Note that in some other contexts,
k = kappa - 1 is used as the conic constant. This is the case in Zemax i.e. kappa here equals Zemax conic constant
plus 1.
Useful links:
https://www.iap.uni-jena.de/iapmedia/de/Lecture/Advanced+Lens+Design1393542000/ALD13_Advanced+Lens+Design+7+_+Aspheres+and+freeforms.pdf
Args:
n1: Index on first side.
n2: Index on second side.
roc (scalar): Radius of curvature.
kappa (scalar): Conic parameter. Special values:
kappa < 0: Hyperboloid.
kappa = 0: Paraboloid.
0 < kappa < 1: Elipsoid of revolution about major axis.
kappa = 1: Sphere
kappa > 1: Elipsoid of revolution about minor axis.
alphas (sequence): Second and higher order coefficients.
"""
kappa: float
alphas: Sequence
def __post_init__(self):
self.alphas = np.asarray(self.alphas)
def __str__(self):
return '%s - ROC %g mm, radius %g mm, kappa %.3f, alphas %s - %s)'%(
self.n1, self.roc*1e3, self.radius*1e3, self.kappa, ', '.join('%g'%v for v in self.alphas), self.n2)
#
# def __repr__(self):
# return 'ConicInterface(%r, %r, %r, %r, %r, %r)'%(
# self.n1, self.n2, self.roc, self.radius, self.kappa, self.alphas)
def reverse(self):
return ConicInterface(self.n2, self.n1, -self.roc, self.radius, self.kappa, -self.alphas)
def calc_sag(self, rho, derivative: bool = False):
"""Calculate sag of surface.
Positive ROC means positive sag.
Args:
rho: Distance from center.
derivative: If True, tuple of sag and its derivative is returned.
"""
sag = functions.calc_conic_sag(self.roc, self.kappa, self.alphas, rho, False)
if derivative:
grad_sag = functions.calc_conic_sag(self.roc, self.kappa, self.alphas, rho, True)
return sag, grad_sag
else:
return sag
@dataclass
class Train:
"""Immutable class representing a sequence of Interfaces with defined spaces in between and at the ends.
Systems that begins and/or ends on an interface is handled as special cases by setting the first and/or last element
of spaces to zero.
TODO If needed, many methods are candidates for memoization.
TODO make frozen
TODO add make method instead of correcting in post_init (cleaner type checking).
"""
interfaces: Tuple[Interface]
spaces: Tuple[float]
def __post_init__(self):
assert len(self.spaces) == len(self.interfaces) + 1
# assert interfaces[0].n1 is None
# assert interfaces[-1].n2 is None
for i0, i1 in zip(self.interfaces[:-1], self.interfaces[1:]):
assert i0.n2 == i1.n1
self.interfaces = tuple(self.interfaces)
self.spaces = tuple(float(space) for space in self.spaces)
self.length = sum(self.spaces)
# self.mf = self._calc_abcd() # self.mb = np.linalg.inv(self.mf)
def __str__(self):
def format_space(s):
return f'{s*1e3:.3g} mm'
return format_space(self.spaces[0]) + ' / ' + ' / '.join(f'{i} / {format_space(s)}' for i, s in zip(self.interfaces, self.spaces[1:]))
def make_parameter_strings(self):
strs = ['total length %.3f mm'%(self.length*1e3)]
interface = self.interfaces[0]
strs.append('%s, thickness %.3f mm'%(interface.n1, self.spaces[0]*1e3))
for interface, space in zip(self.interfaces, self.spaces[1:]):
strs.append(interface.make_parameter_string())
strs.append('%s, thickness %.3f mm'%(interface.n2, space*1e3))
return tuple(strs)
def pad(self, space0, space1=None):
if space1 is None:
space1 = space0
spaces = (self.spaces[0] + space0,) + self.spaces[1:-1] + (self.spaces[-1] + space1,)
return Train(self.interfaces, spaces)
def pad_to_transform(self, lamb=None):
"""Pad first and last space so train performs Fourier transform.
Returns:
train: Same as original except that first and last space are modified.
"""
return self.pad(*self.get_working_distances(
lamb)) # ws = self.working_distances # spaces = (self.spaces[0] + ws[0],) + self.spaces[1:-1] + (self.spaces[-1] + ws[1],) # return Train(self.interfaces, spaces)
def calc_abcd(self, lamb=None):
m = abcd.propagation(self.spaces[0])
for interface, space in zip(self.interfaces, self.spaces[1:]):
# Apply interface.
m = np.matmul(interface.get_abcd(lamb), m)
# Propagate to next surface (or exit surface).
m = np.matmul(abcd.propagation(space), m)
return m
def calc_abcd_bi(self, lamb=None):
mf = self.calc_abcd(lamb)
mb = np.linalg.inv(mf)
return mf, mb
def get_focal_lengths(self, lamb=None):
"""Rear and front focal lengths."""
mf, mb = self.calc_abcd_bi(lamb)
return np.asarray((1/mb[1, 0], -1/mf[1, 0]))
def get_effective_focal_length(self, lamb=None):
focal_lengths = self.get_focal_lengths(lamb)
efl = focal_lengths[0]/self.interfaces[0].n1(lamb)
assert np.isclose(focal_lengths[1]/self.interfaces[-1].n2(lamb), efl)
return efl
def get_working_distances(self, lamb=None):
# Move this into abcd module?
mf, mb = self.calc_abcd_bi(lamb)
wf = -mf[0, 0]/mf[1, 0]
wb = mb[0, 0]/mb[1, 0]
return wb, wf
def get_principal_planes(self, lamb=None):
"""Calculate principal planes.
The distances are defined from the ends of the system, including the first and last spaces. They are positive to
the right.
Returns:
ppb, ppf: Distances to principal planes before and after the system.
"""
fb, ff = self.get_focal_lengths(lamb)
wb, wf = self.get_working_distances(lamb)
ppb = fb - wb
ppf = wf - ff
return ppb, ppf
def get_petzval_sum(self, lamb=None):
"""Calculate Petzval sum of train.
Args:
lamb (scalar): Wavelength.
Returns:
scalar: Petzval sum.
"""
ns = itertools.chain((i.n1 for i in self.interfaces), [self.interfaces[-1].n2])
n0s = [n(lamb) for n in ns]
rocs = [i.roc for i in self.interfaces]
return paraxial.calc_multi_element_lens_petzval_sum(n0s, rocs)
@classmethod
def make_singlet_transform1(cls, n: ri.Index, ws: Tuple[float, float], f: float, radius: float, lamb: float = None,
interface_class=Interface, interface_args=None, interface_kwargs=None, n_ext: ri.Index = ri.vacuum) -> 'Train':
"""Make a singlet Fourier transform given working distances and focal length.
Args:
n (ri.Index): Defines refractive index.
w (scalar of pair of scalars): Working distance(s).
f (scalar): Transform focal length.
lamb (scalar): Design wavelength.
Returns:
roc: radius of curvature
d: center thickness
"""
try:
ws = ws[0], ws[1]
except TypeError:
ws = ws, ws
if interface_args is None:
interface_args = (), ()
if interface_kwargs is None:
interface_kwargs = {}, {}
rocs, d = paraxial.design_singlet_transform(n(lamb), ws, f, n_ext(lamb))
interfaces = (interface_class(n_ext, n, rocs[0], radius, *interface_args[0], **interface_kwargs[0]),
interface_class(n, n_ext, rocs[1], radius, *interface_args[1], **interface_kwargs[1]))
train = cls(interfaces, (ws[0], d, ws[1]))
return train
@classmethod
def make_singlet_transform2(cls, n, f, shape, thickness, radius, lamb=None):
"""Make singlet transform given shape and thickness.
Args:
n: Refractive index.
f: Focal length.
shape: Coddington shape factor. 0 = symmetric. +1 is convex-plano, -1 is plano-convex.
thickness: Center thickness.
lamb (scalar): Design wavelength.
Returns:
Train object, including working distances.
"""
n0 = n(lamb)
rocs = paraxial.design_singlet(n0, f, shape, thickness)
f, h1, h2 = paraxial.calc_thick_spherical_lens(n0, *rocs, thickness)
interfaces = Interface(ri.vacuum, n, rocs[0], radius), Interface(n, ri.vacuum, rocs[1], radius)
train = Train(interfaces, (f - h1, thickness, f + h2))
assert np.allclose(train.get_focal_lengths(lamb), (f, f))
assert np.allclose(train.get_working_distances(lamb), (0, 0))
return train
@classmethod
def design_singlet(cls, n:ri.Index, f:float, shape:float, thickness:float, radius:float, lamb=None, ne=ri.vacuum):
"""Make train representing a singlet lens.
Args:
n: Lens refractive index.
f: Focal length, inverse of lens power. Sometimes called effective focal length. Distance to
focus of parallel rays divided by external refractive index.
shape: Coddington shape factor. 0 = symmetric. +1 is convex-plano, -1 is plano-convex.
thickness: Center thickness.
radius: Radius/half-diagonal.
lamb: Design wavelength.
ne: External refractive index.
Returns:
Train: First and last space is zero.
"""
n0 = n(lamb)
ne0 = ne(lamb)
rocs = paraxial.design_singlet(n0, f, shape, thickness, ne0)
f_, h1, h2 = paraxial.calc_thick_spherical_lens(n0, *rocs, thickness, ne0)
assert np.isclose(f_, f)
interfaces = Interface(ne, n, rocs[0], radius), Interface(n, ne, rocs[1], radius)
train = Train(interfaces, (0, thickness, 0))
assert np.allclose(train.get_focal_lengths(lamb)/ne0, f)
return train
def make_singlet_transform_conic(cls, n, ws, f, radius, lamb=None, kappas=(1, 1), alphass=((), ())):
return cls.make_singlet_transform1(n, ws, f, lamb, radius, ConicInterface, list(zip(kappas, alphass)))
def reverse(self):
interfaces = tuple(i.reverse() for i in self.interfaces[::-1])
return Train(interfaces, self.spaces[::-1])
def __add__(self, other):
interfaces = self.interfaces + other.interfaces
if len(self.interfaces) > 0 and len(other.interfaces) > 0:
assert self.interfaces[-1].n2 == other.interfaces[0].n1
spaces = self.spaces[:-1] + (self.spaces[-1] + other.spaces[0],) + other.spaces[1:]
return Train(interfaces, spaces)
def pad_to_half_transform(self, lamb: float = None, inner_space: float = None, f: float = None):
"""Adjust input and output space to perform half of a lens.
Only one of inner_space and f should be given.
Args:
lamb: Design wavelength.
inner_space: Space added between self and reversed self.
f: Focal length of resulting transform.
Returns:
train
"""
if inner_space is None:
# Need to add the inner principal plane distances.
inner_space = paraxial.infer_combined_lens_separation(*self.get_focal_lengths(lamb), f) + 2* \
self.get_principal_planes(lamb)[1]
else:
assert f is None
pair = (self + Train([], [inner_space]) + self.reverse()).pad_to_transform(lamb)
train = Train(self.interfaces, (pair.spaces[0],) + self.spaces[1:-1] + (self.spaces[-1] + inner_space/2,))
return train
def make_html_table(self, doc, lamb=None):
"""Make HTML table list spaces and interfaces.
Args:
doc (yattag.Doc object): Table is added to this.
"""
with doc.tag('table', cellspacing=10):
doc.line('caption', 'List of material, its thickness, and profile of the following interface')
with doc.tag('tr'):
doc.line('th', 'Color')
doc.line('th', 'Material')
doc.line('th', 'Index')
doc.line('th', 'Thickness (mm)')
doc.line('th', 'ROC (mm)')
n = self.interfaces[0].n1
for space, interface in zip(self.spaces, self.interfaces + (None,)):
with doc.tag('tr'):
color = n.section_color
if color is None:
color = (1, 1, 1)
# doc.line('td', ' ', bgcolor='rgb(%d,%d,%d)'%tuple(int(round(c*255)) for c in color), width=100)
doc.line('td', ' ', style='background-color:rgb(%d,%d,%d)'%tuple(int(round(c*255)) for c in color),
width=100)
doc.line('td', n.name)
doc.line('td', n(lamb))
doc.line('td', '%.3f'%(space*1e3))
if interface is not None:
doc.line('td', '%.3f mm'%(interface.roc*1e3))
n = interface.n2
def subset(self, start: int = None, stop: int = None):
"""Make train consisting of interval subset of interfaces."""
if start is None:
start = 0
if stop is None:
stop = len(self.interfaces)
if stop < 0:
spaces_stop = stop + len(self.spaces)
else:
spaces_stop = stop + 1
return Train(self.interfaces[start:stop], self.spaces[start:spaces_stop])
def crop_to_finite(self) -> 'Train':
# Crop to exclude surfaces of infinite thickness.
infs = [n for n, space in enumerate(self.spaces) if np.isinf(space)]
if len(infs) == 0:
cropped = self
else:
if len(infs) == 1:
inf = infs[0]
if inf < len(self.spaces)/2:
spaces = (0.,) + self.spaces[inf + 1:]
interfaces = self.interfaces[inf:]
else:
spaces = self.spaces[:inf] + (0.,)
interfaces = self.interfaces[:inf]
else:
raise ValueError(f"Don't know how to handle {len(infs)} surfaces with infinite thickness.")
cropped = Train(interfaces, spaces)
return cropped
def consolidate(self) -> 'Train':
"""Remove interfaces with same material on either side."""
# Remove same material interfaces
space = self.spaces[0]
n = self.interfaces[0].n1
spaces = []
interfaces = []
for next_space, interface in zip(self.spaces[1:], self.interfaces):
if interface.n2 == n:
space += next_space
else:
spaces.append(space)
interfaces.append(interface)
space = next_space
n = interface.n2
spaces.append(space)
return Train(interfaces, spaces)
class Surface(ABC):
"""An axisymmetric surface between two media.
TODO define radius. Is sag constant outside this i.e. is sag(rho) = sag(radius) for rho > radius?"""
roc: float
radius: float
def __init__(self, roc: float, radius: float):
self.roc = float(roc)
self.radius = float(radius)
def __eq__(self, other):
return type(self) is type(other) and self.roc == other.roc and self.radius == other.radius
def isclose(self, other):
return np.isclose(self.roc, other.roc) and np.isclose(self.radius, other.radius)
@property
@abstractmethod
def sag_range(self) -> np.ndarray:
"""Returns minimum and maximum sag.
TODO must be tight bound? Convert to method so can specify rho interval?"""
pass
@abstractmethod
def to_interface(self, n1: ri.Index, n2: ri.Index):
pass
@abstractmethod
def reverse(self) -> 'Surface':
pass
@abstractmethod
def calc_sag(self, rho, derivative: bool = False):
"""Calculate sag of surface.
Positive ROC means positive sag.
Args:
rho: Distance from center.
derivative: If True, derivative is returned as well.
"""
pass
class SphericalSurface(Surface):
@property
def sag_range(self) -> np.ndarray:
# TODO this assumes monotonicity
sag = self.calc_sag(self.radius)
return np.asarray((min(sag, 0), max(sag, 0)))
def to_interface(self, n1, n2):
return Interface(n1, n2, self.roc, self.radius)
def reverse(self) -> 'SphericalSurface':
return SphericalSurface(-self.roc, self.radius)
def __repr__(self):
return f'SphericalSurface(roc={self.roc}, radius={self.radius})'
# TODO move to rt1
# def make_profile(self):
# if np.isfinite(self.roc):
# return rt1.SphericalProfile(self.roc)
# else:
# return rt1.PlanarProfile()
def calc_sag(self, rho, derivative: bool = False):
"""Calculate sag of surface.
Positive ROC means positive sag.
Args:
rho: Distance from center.
derivative: If True, derivative is returned as well.
"""
sag = functions.calc_sphere_sag(self.roc, rho)
if derivative:
grad_sag = functions.calc_sphere_sag(self.roc, rho, True)
return sag, grad_sag
else:
return sag
@singledispatch
def to_surface(obj, *args, **kwargs) -> Surface:
raise NotImplementedError
@to_surface.register
def _(obj: Interface, radius: float=None) -> Surface:
if radius is None:
radius = obj.radius
return SphericalSurface(obj.roc, radius)
class ConicSurface(Surface):
def __init__(self, roc: float, radius: float, kappa: float, alphas: Sequence[float] = None):
Surface.__init__(self, roc, radius)
self.kappa = kappa
if alphas is None:
alphas = []
self.alphas = np.asarray(alphas)
def to_interface(self, n1, n2):
return ConicInterface(n1, n2, self.roc, self.radius, self.kappa, self.alphas)
def reverse(self) -> 'ConicSurface':
return ConicSurface(-self.roc, self.radius, self.kappa, -self.alphas)
# TODO move to rt1
# def make_profile(self):
# return rt1.ConicProfile(self.roc, self.kappa, self.alphas)
@property
def sag_range(self) -> np.ndarray:
s = self.calc_sag(self.radius)
return np.array((min(s, 0), max(s, 0)))
def __repr__(self):
return f'ConicSurface(roc={self.roc}, radius={self.radius}, kappa={self.kappa}, alphas={self.alphas})'
def __eq__(self, other):
return Surface.__eq__(self, other) and self.kappa == other.kappa and np.array_equal(self.alphas, other.alphas)
def isclose(self, other):
return Surface.isclose(self, other) and np.isclose(self.kappa, other.kappa) and np.allclose(self.alphas, other.alphas)
def calc_sag(self, rho, derivative: bool = False):
"""Calculate sag of surface.
Positive ROC means positive sag.
Args:
rho: Distance from center.
derivative: If True, tuple of sag and its derivative is returned.
"""
sag = functions.calc_conic_sag(self.roc, self.kappa, self.alphas, rho, False)
if derivative:
grad_sag = functions.calc_conic_sag(self.roc, self.kappa, self.alphas, rho, True)
return sag, grad_sag
else:
return sag
@to_surface.register
def _(obj: ConicInterface, radius: float=None) -> ConicSurface:
if radius is None:
radius = obj.radius
return ConicSurface(obj.roc, radius, obj.kappa, obj.alphas)
class SegmentedInterface(Interface):
def __init__(self, n1, n2, segments: Sequence[Surface], sags: Sequence[float]):
radius = sum(s.radius for s in segments)
Interface.__init__(self, n1, n2, segments[0].roc, radius)
if sags is None:
sags = np.zeros((len(segments),))
sags = np.asarray(sags)
assert len(sags) == len(segments)
self.segments = segments
self.sags = sags
def __repr__(self):
return 'SegmentedInterface(%r, %r, %r, %r)'%(self.n1, self.n2, self.segments, self.sags)
def __str__(self):
# TODO improve
return '%s - (%s) - %s'%(self.n1, ', '.join(str(s) for s in self.segments), self.n2)
def reverse(self):
segments = [s.reverse() for s in self.segments]
return SegmentedInterface(self.n2, self.n1, segments, -self.sags)
# TODO move to rt1
# def make_profile(self):
# # TODO move to single dispatch in rt
# assert len(self.segments) == 2
# profiles = [s.make_profile() for s in self.segments]
# boundary = rt1.CircleBoundary(self.segments[0].radius*2)
# return rt1.BinaryProfile(profiles, boundary)
def calc_sag(self, rho, derivative: bool = False):
raise NotImplementedError()
# TODO make radius of each segment the outer radius rather than the incremental radius.
class SegmentedSurface(Surface):
def __init__(self, segments: Sequence[Surface], sags: Sequence[float]):
radius = sum(s.radius for s in segments)
Surface.__init__(self, segments[0].roc, radius)
sags = np.asarray(sags)
assert len(sags) == len(segments)
self.segments = tuple(segments)
self.sags = sags
def __repr__(self):
return f'SegmentedSurface({self.segments}, {self.sags})'
#def __str__(self):
# return ', '.join(str(s) for s in self.segments)
def reverse(self) -> 'SegmentedSurface':
segments = [s.reverse() for s in self.segments]
return SegmentedSurface(segments)
def to_interface(self, n1, n2):
return SegmentedInterface(n1, n2, self.segments, self.sags)
def calc_sag(self, rho: float, derivative: bool = False):
rho = float(rho)
rho0 = 0
for segment in self.segments:
rho0 += segment.radius
if rho <= rho0:
return segment.calc_sag(rho)
# TODO clamp?
return self.segments[-1].calc_sag(rho)
@property
def sag_range(self) -> np.ndarray:
rngs = [segment.sag_range for segment in self.segments]
return np.array((min(rng[0] for rng in rngs), max(rng[1] for rng in rngs)))
@to_surface.register
def _(obj: SegmentedInterface, radius: float=None) -> SegmentedSurface:
# TODO sort this out
if radius is None:
radius = obj.radius
return SegmentedSurface(obj.segments, obj.sags)
class Singlet:
"""A singlet.
Attributes:
surfaces: Front and back surfaces.
thickness: Center thickness.
n: Defines internal refractive index.
"""
surfaces: Tuple[Surface, Surface]
thickness: float
n: ri.Index
radius: float
def __init__(self, surfaces: Tuple[Surface, Surface], thickness: float, n: ri.Index):
self.surfaces = surfaces
self.thickness = float(thickness)
self.n = n
self.radius = surfaces[0].radius
assert surfaces[1].radius == self.radius
def make_interfaces(self, n1, n2=None):
if n2 is None:
n2 = n1
return self.surfaces[0].to_interface(n1, self.n), self.surfaces[1].to_interface(self.n, n2)
def __repr__(self):
return f'Singlet(surfaces=({self.surfaces[0]}, {self.surfaces[1]}), thickness={self.thickness}, n={self.n})'
def reverse(self) -> 'Singlet':
return Singlet((self.surfaces[1].reverse(), self.surfaces[0].reverse()), self.thickness, self.n)
@classmethod
def from_focal_length(cls, f: float, n: ri.Index, center_thickness: float, radius: float, shape_factor: float = 0,
n_external: ri.Index = ri.vacuum, lamb: float = None):
rocs = paraxial.design_singlet(n(lamb), f, shape_factor, center_thickness, n_external(lamb))
return cls.from_rocs(rocs, n, center_thickness, radius)
@classmethod
def from_rocs(cls, rocs: Tuple[float, float], n: ri.Index, center_thickness: float, radius: float):
return cls(tuple(SphericalSurface(roc, radius) for roc in rocs), center_thickness, n)
def to_train(self, n1: ri.Index, n2: ri.Index = None) -> Train:
"""Convert singlet to train.
Args:
n1: Initial external refractive index.
n2: Final external refractive index. Defaults to n1.
Returns:
self as a train, with zero initial and final spaces.
"""
if n2 is None:
n2 = n1
interfaces = self.surfaces[0].to_interface(n1, self.n), self.surfaces[1].to_interface(self.n, n2)
return Train(interfaces, (0, self.thickness, 0))
class SingletSequence:
def __init__(self, singlets: Sequence[Singlet], spaces: Sequence[float], n_external: ri.Index = ri.vacuum):
"""A sequence of singlets in a homogeneous medium with external and internal spaces.
The first and last spaces must be included i.e. len(spaces) = len(singlets) + 1.
"""
assert len(spaces) == len(singlets) + 1
self.singlets = tuple(singlets)
self.spaces = tuple(spaces)
self.n_external = n_external
self.center_length = sum(self.spaces) + sum(s.thickness for s in self.singlets)
def to_train(self):
interfaces = [i for singlet in self.singlets for i in singlet.make_interfaces(self.n_external)]
spaces = self.spaces[:1] + tuple(
s for singlet, space in zip(self.singlets, self.spaces[1:]) for s in (singlet.thickness, space))
return Train(interfaces, spaces)
def __add__(self, other):
assert self.n_external == other.n_external
spaces = self.spaces[:-1] + (self.spaces[-1] + other.spaces[0],) + other.spaces[1:]
return SingletSequence(self.singlets + other.singlets, spaces, self.n_external)
def __repr__(self):
return f'SingletSequence(({", ".join(repr(e) for e in self.singlets)}), ({", ".join("%.3f mm"%(s*1e3) for s in self.spaces)}), {self.n_external})'
def __str__(self):
def format_space(s):
return f'{s*1e3:.3g} mm'
return '(' + format_space(self.spaces[0]) + ' / ' + ' / '.join(f'{singlet} / {format_space(space)}' for singlet, space in zip(self.singlets, self.spaces[1:])) + ') in ' + str(self.n_external)
def reverse(self):
singlets = tuple(s.reverse() for s in self.singlets[::-1])
return SingletSequence(singlets, self.spaces[::-1], self.n_external)
def split(self, index: int, frac: float = 0.5) -> Tuple['SingletSequence', 'SingletSequence']:
before = SingletSequence(self.singlets[:index], self.spaces[:index] + (self.spaces[index]*frac,), self.n_external)
after = SingletSequence(self.singlets[index:], (self.spaces[index]*(1 - frac),) + self.spaces[index+1:], self.n_external)
return before, after
@classmethod
def from_train(cls, train: Train, radii='equal'):
assert len(train.interfaces)%2 == 0
n_external = train.interfaces[0].n1
assert all(i.n1 == n_external for i in train.interfaces[::2])
assert all(i.n2 == n_external for i in train.interfaces[1::2])
singlets = []
spaces = []
for i1, i2, space1, space2 in zip(train.interfaces[::2], train.interfaces[1::2], train.spaces[::2],
train.spaces[1::2]):
spaces.append(space1)
n = i1.n2
thickness = space2
if radii == 'max':
radius = max((i1.radius, i2.radius))
elif radii == 'equal':
radius = i1.radius
assert np.isclose(radius, i2.radius)
else:
radius = radii
singlets.append(Singlet((to_surface(i1, radius=radius), to_surface(i2, radius=radius)), thickness, n))
spaces.append(train.spaces[-1])
return cls(singlets, spaces, n_external)
@classmethod
def from_train2(cls, train: Train, radii_mode: str = 'equal'):
singlets = []
spaces = []
space = train.spaces[0]
index0 = 0
n_external = train.interfaces[0].n1
while index0 < len(train.interfaces):
interface0 = train.interfaces[index0]
if interface0.n1 != interface0.n2 and interface0.n2 != n_external:
spaces.append(space)
space = 0
radius = interface0.radius
for index1 in range(index0 + 1, len(train.interfaces)):
interface1 = train.interfaces[index1]
if radii_mode == 'max':
radius = max(radius, interface1.radius)
elif radii_mode == 'equal':
if radius != interface1.radius:
raise ValueError(f'Singlet that starts at interface {index0} has radius {radius} but interface {index1} has radius {interface1.radius}.')
else:
raise ValueError(f'Unknown radii_mode {radii_mode}.')
if interface1.n2 != interface0.n2:
thickness = sum(train.spaces[index0+1 : index1+1])
# TODO tidy this up after reimplementing Interface as composition of Surface.
def pad(i: Interface):
outer_radius = radius - i.radius
if outer_radius > 1e-6: # TODO civilize
outer_surface = SphericalSurface(np.inf, outer_radius)
outer_sag = i.calc_sag(i.radius)
i = SegmentedInterface(i.n1, i.n2, (to_surface(i), outer_surface), (0, outer_sag))
return i
interface0 = pad(interface0)
interface1 = pad(interface1)
singlets.append(Singlet((to_surface(interface0, radius=radius), to_surface(interface1, radius=radius)), thickness, interface0.n2))
break
else:
raise ValueError(f'Singlet that starts at interface {index0} does not finish.')
index0 = index1
else:
index0 += 1
space += train.spaces[index0]
spaces.append(space)
return cls(singlets, spaces, n_external)
@classmethod
def design_symmetric_singlet_transform(cls, n: ri.Index, f_transform: float, working_distance: float,
min_thickness: float, field_radius: float, shape_factor: float = 0, n_external: ri.Index = ri.vacuum,
lamb: float = None) -> 'SingletSequence':
"""Design a symmetric Fourier transform consisting of a pair of singlets.
Args:
n: Refractive index of singlets.
f_transform: Focal length of transform.
working_distance: Distance from focal planes to first piece of glass.
min_thickness: Minimum thickness of singlets i.e. edge thickness since they are positive.
field_radius: Radius of input field - determines lens sizes.
shape_factor: Coddington shape factor - positive means convex-plano.
n_external: Index of external medium.
lamb: Design wavelength.
Returns:
Half of the transform i.e. first space is on-axis working distance and last space is
to midpoint of transform pair.
"""
tan_theta = field_radius/f_transform
def make_half_transform_train(curvature):
rocs = paraxial.calc_rocs(curvature, shape_factor)
edge_propagation_distance = working_distance + functions.calc_sphere_sag(max(rocs[0], 0), field_radius)
radius = field_radius + edge_propagation_distance*tan_theta
thickness = paraxial.calc_center_thickness(rocs, radius, min_thickness)
singlet = Singlet(tuple(SphericalSurface(roc, radius) for roc in rocs), thickness, n)
sequence = SingletSequence((singlet,), (0, 0), n_external)
train = sequence.to_train()
half_transform_train = train.pad_to_half_transform(lamb, f=f_transform)
return half_transform_train
def calc_error(curvature):
train = make_half_transform_train(curvature)
space0 = working_distance - functions.calc_sphere_sag(min(train.interfaces[0].roc, 0), field_radius)
error = train.spaces[0] - space0
return error
curvature_guess = 1/((n(lamb) - n_external(lamb))*f_transform)
curvature = scipy.optimize.fsolve(calc_error, curvature_guess)
train = make_half_transform_train(curvature)
sequence = cls.from_train(train)
return sequence
| 39.634763 | 199 | 0.601865 | 34,771 | 0.96511 | 0 | 0 | 26,073 | 0.723687 | 0 | 0 | 10,545 | 0.292689 |
723547959ebc4a91f17440d870c4a23f152e86d1 | 4,705 | py | Python | rm_protection/rm_p.py | https-waldoww90-wadewilson-com/rm-protection | 4dcc678fa687373fb4439c5c4409f7649e653084 | [
"MIT"
]
| 490 | 2017-02-03T14:15:50.000Z | 2022-03-31T02:57:20.000Z | rm_protection/rm_p.py | https-waldoww90-wadewilson-com/rm-protection | 4dcc678fa687373fb4439c5c4409f7649e653084 | [
"MIT"
]
| 8 | 2017-02-03T16:13:53.000Z | 2017-05-28T05:20:45.000Z | rm_protection/rm_p.py | alanzchen/rm-protection | 4dcc678fa687373fb4439c5c4409f7649e653084 | [
"MIT"
]
| 41 | 2017-02-04T15:13:26.000Z | 2021-12-19T08:58:38.000Z | from sys import argv, exit
from os.path import expanduser as expu, expandvars as expv
from os.path import basename, dirname, abspath, isdir, exists
from subprocess import Popen, PIPE
from builtins import input
from rm_protection.config import Config
c = Config()
evaledpaths = []
def pprint(msg):
global c
print(c.rm_prefix + msg)
def ask(evalpath, parent=False):
global evaledpaths
if evalpath in evaledpaths:
return True
else:
with open(evalpath, "r") as f:
question = f.readline().rstrip("\n")
answer = f.readline().rstrip("\n")
try:
flags = f.readline().rstrip("\n")
except:
flags = ''
if parent and 'R' not in flags:
pprint(original_path(evalpath) + ' is protected but flag "R" is missing')
evaledpaths.append(evalpath)
return True
else:
if parent:
pprint('The parent directory ' + original_path(evalpath) + ' is protected')
pprint(original_path(evalpath) + ": " + question)
if input("Answer: ") == answer:
evaledpaths.append(evalpath)
return True
else:
if parent:
return False
else:
pprint("Wrong answer! " + original_path(evalpath) + " will not be removed")
pprint("The answer is stored in " + evalpath)
return False
def original_path(evalpath):
global c
basepath = dirname(evalpath)
filename = basename(evalpath)[1:-len(c.suffix)]
if basepath == '/':
return basepath + filename
else:
return basepath + '/' + filename
def ask_in(q, a):
return bool(input(q) in a)
def gen_evalpaths(path):
paths = {}
path = dirname(path)
while path != '/':
evalpath = gen_eval(path)
paths[path] = evalpath
path = dirname(path)
return paths
def gen_eval(path):
global c
basedir = dirname(path)
if basedir == '/':
basedir = ''
return basedir + "/." + basename(path) + c.suffix
def parent_clear(file_evalpaths, path):
for filepath in file_evalpaths:
parent_eval = file_evalpaths[filepath]
if exists(parent_eval):
if not ask(parent_eval, parent=True):
pprint(path + ' will not be removed')
return False
return True
def rm(rm_args=None):
global c
global evaledpaths
args = ''
paths = []
evalpaths = []
option_end = False
if not rm_args:
rm_args = argv[1:]
for arg in rm_args:
if arg == '--':
option_end = True
elif (arg.startswith("-") and not option_end) or arg in c.invalid:
pass
else:
path = abspath(expv(expu(arg)))
file_evalpaths = gen_evalpaths(path)
evalpath = gen_eval(path)
if c.suffix in arg:
pprint(path + " is a protection file")
if ask_in(q="Do you want to remove it? (y/n) ", a="Yesyes"):
args += arg + ' '
else:
pprint(path + " will not be removed")
continue
if exists(evalpath):
if ask(evalpath):
paths.append(path)
evalpaths.append(evalpath)
else:
continue
if not parent_clear(file_evalpaths, path):
continue
if isdir(path):
find_exec = "find " + path + " -name " + "\".*" + c.suffix + "\"" + " -print"
out, err = Popen(find_exec, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True).communicate()
for pfile in iter(out.splitlines()):
pprint("A protected file or directory is found inside " + path)
if not ask(pfile):
pprint("Terminated due to potentially dangerous action")
exit(1)
args += bash_path(arg) + ' '
Popen("rm " + args, shell=True).wait()
remove_protection_files = ''
for evalpath, path in zip(evalpaths, paths):
if exists(evalpath) and not exists(path):
remove_protection_files += bash_path(evalpath) + ' '
if remove_protection_files:
Popen("rm " + remove_protection_files, shell=True).wait()
evaledpaths = []
def bash_path(path):
for sym in "\\#;,\'\"|{}[]() *&?@<>=!":
path = ("\\"+sym).join(path.split(sym))
return path
if __name__ == "__main__":
rm()
| 30.953947 | 120 | 0.530287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.10712 |
72381b6de058125b33932e8f4cd988e19b104ff7 | 6,856 | py | Python | src/text_normalizer/tokenization/_tokenize.py | arkataev/text_normalizer | a99326e31012157980d014c9730ac94bd1d18c1d | [
"MIT"
]
| null | null | null | src/text_normalizer/tokenization/_tokenize.py | arkataev/text_normalizer | a99326e31012157980d014c9730ac94bd1d18c1d | [
"MIT"
]
| null | null | null | src/text_normalizer/tokenization/_tokenize.py | arkataev/text_normalizer | a99326e31012157980d014c9730ac94bd1d18c1d | [
"MIT"
]
| null | null | null | """Модуль для создания и работы с токенами"""
import logging
import re
import string
from enum import IntEnum
from functools import lru_cache
from typing import Tuple, Iterator
from nltk.corpus import stopwords
from nltk.tokenize import ToktokTokenizer
from nltk.tokenize.api import TokenizerI
from ..config import RegexConfigType, PipelineConfigType, load_regex_conf, load_conf
__all__ = [
'sent_tokenize',
'TokTok',
'token_type',
'to_token',
'TokenType',
'iTokenTuple',
'russian_stopwords',
'replace_bigrams',
'KILO_POSTFIX',
'init_cache',
'cache_clear',
'get_tokenizer'
]
logger = logging.getLogger('rtn')
# Символ, которым токенизатор будет выделять токены с "тысячным" префиксом (e.g. 5к, 5 к )
KILO_POSTFIX = '%'
russian_stopwords = stopwords.words("russian")
_spaces = string.whitespace
_punct = set(f'{string.punctuation}{"«»…=#-——–``"}{string.whitespace}')
_isolating_punct = {'"', "'", '{', '}', '[', ']', '(', ')', '«', '»'}
_synonyms = load_conf(PipelineConfigType.SYNONIMS)
_regex_time = load_regex_conf(RegexConfigType.TIME)
class TokenType(IntEnum):
"""
Типы токенов.
NB! IntEnum позволяет быстро проверять соответствие типа токена
>>> TokenType.NUM == TokenType.NUM
True
>>> [TokenType.TXT, TokenType.PUNKT] == [TokenType.TXT, TokenType.PUNKT]
True
"""
NONE = 0
TXT = 1
PUNKT = 2
DATE = 3
NUM = 4
TIME = 5
PHONE = 6
EMOJI = 7
URL = 8
EMAIL = 9
PUNKT_ISO = 10 # изолирующая пунктуация (e.g. "", (), [] etc.)
SPACE = 11
CARDNUM = 12
class iTokenTuple(Tuple):
"""
Интерфейс для создания и работы с токенами.
NB! Данный класс НЕ следует использовать в качестве конструктора, т.к это значительно замедлит
создание объектов. Оптимальнее - возвращать из функций, реализующих данный интерфейс, простые
картежи с элементами нужного типа в нужном порядке.
"""
_value: str
_type: TokenType
class RegexTokenType:
"""
Определитель типа токена на основе регулярных выражений.
Проверяет совпадения токена против фиксированного списка регулярных выражений.
Если совпадение найдено, возвращается соответствующий тип токена, иначе - специальный тип TokeType.NONE
>>> tok_rextype = RegexTokenType()
>>> tok_rextype('20.10.2020')
TokenType.DATE
>>> tok_rextype('[email protected]')
TokenType.EMAIL
>>> tok_rextype('https://pypi.org/')
TokenType.URL
"""
def __init__(self):
self.regex = {
TokenType.DATE: load_regex_conf(RegexConfigType.DATE),
TokenType.EMAIL: load_regex_conf(RegexConfigType.EMAIL),
TokenType.URL: load_regex_conf(RegexConfigType.URL),
TokenType.TIME: load_regex_conf(RegexConfigType.TIME),
}
def __call__(self, token: str) -> TokenType:
r = self.regex
for key in r:
if r[key].match(token):
return key
return TokenType.NONE
class TokTok(TokenizerI):
"""
В качестве основы используется набор регулярных выражений и упрощенный алгоритм обработки строки
из токенизатора `TokTok <https://www.nltk.org/api/nltk.tokenize.html#module-nltk.tokenize.toktok>`_.
"""
def __init__(self):
self._regexes = ToktokTokenizer.TOKTOK_REGEXES[:]
self._regexes[2] = (_regex_time, r"(\1)")
self._regexes.insert(3, (re.compile(r"(?<![а-яА-Я])([а-яА-Я]{1})(\/)([а-яА-Я]{1})"), r"\1\3 "))
self._regexes.insert(4, (re.compile(r"(\d)(-)([а-яА-Я]+)"), r"\1\3 "))
self._regexes.append((re.compile(r"(-«»)"), r" \1 "))
self._regexes.append((re.compile(r"\s+(-)(\w+)"), r" \1 \2 "))
self._regexes.append((re.compile(r"(\w+)(-)\s"), r" \1 \2 "))
self._regexes.append((re.compile(r"(?<=[а-яА-я])([/\\])"), r" \1 "))
self._regexes.append((re.compile(r"([=…№\-——'\s]+)(\d+)([=…№\-——'\s]+)"), r" \1 \2 \3"))
# Выделение токенов с "тысячным" префиксом (e.g. 5к, 5 к )
self._regexes.append((re.compile(r"(\d)\s?[кk]"), rf"{KILO_POSTFIX}\1{KILO_POSTFIX}"))
self._regexes.append(ToktokTokenizer.FUNKY_PUNCT_2)
def tokenize(self, text: str) -> [str]:
for regexp, subsitution in self._regexes:
text = regexp.sub(subsitution, text)
text = text.strip()
return text.split()
@lru_cache(maxsize=1)
def get_tokenizer() -> TokenizerI:
return TokTok()
@lru_cache(maxsize=1)
def get_regex_type() -> RegexTokenType:
return RegexTokenType()
def sent_tokenize(sentence: str, tokenizer: TokenizerI) -> Iterator[iTokenTuple]:
"""
Создает итератор картежей с токеном и типом токена из предложения
:param sentence: предложение
:param tokenizer: токенизатор поддерживающий интерфейс NLTK-TokenizerI
"""
return map(to_token, tokenizer.tokenize(sentence))
def token_type(token_string: str) -> TokenType:
"""Определить тип токена"""
if not token_string:
return TokenType.NONE
if token_string in _spaces: # "in" works faster then calling a method ' '.isspace()
return TokenType.SPACE
elif token_string in _isolating_punct:
return TokenType.PUNKT_ISO
elif token_string in _punct:
return TokenType.PUNKT
elif token_string.isnumeric():
return TokenType.NUM
rextype = get_regex_type()
type_ = rextype(token_string)
if type_ is not TokenType.NONE:
return type_
return TokenType.TXT
def to_token(token_string: str) -> iTokenTuple:
"""
Создать токен из строки
>>> to_token('.')
('.', TokenType.PUNKT)
>>> to_token('1ый')
('1', TokenType.NUM)
>>> to_token('[email protected]')
('[email protected]', TokenType.EMAIL)
:param token_string: строка без пробелов
"""
return token_string, token_type(token_string)
def replace_bigrams(tokens: Iterator[iTokenTuple]) -> Iterator[iTokenTuple]:
"""
Заменить биграммы на токены из словаря.
Служит для быстрой замены токенов вроде "когда то" на "когда-то", а также прочих биграмм.
>>> from text_normalizer.tokenization import replace_bigrams
>>> replace_bigrams(iter(['окко', TokenType.TXT), ('тв', TokenType.TXT)]))
('окко-тв', TokenType.TXT)
"""
crnt = None
buffer = []
for token, _type in tokens:
crnt, prev = token, crnt
synonym = _synonyms.get(f'{crnt}', crnt)
if prev:
bigram = _synonyms.get(f'{prev} {crnt}')
if bigram:
buffer[-1] = (bigram, _type)
continue
buffer.append((synonym, _type))
yield from buffer
def init_cache():
get_regex_type()
get_tokenizer()
logger.debug('Cache initiated')
def cache_clear():
get_regex_type.cache_clear()
get_tokenizer.cache_clear()
logger.debug('Cache cleared')
| 27.534137 | 107 | 0.641774 | 3,939 | 0.497411 | 898 | 0.113398 | 165 | 0.020836 | 0 | 0 | 4,001 | 0.505241 |
723871eadc62b9694db68243f51e537122c22e01 | 299 | py | Python | tests/data/test_to_array.py | maki-nage/rxsci | 64c9956752cbdd4c65aa9f054b6b28318a056625 | [
"MIT"
]
| 3 | 2021-05-03T13:40:46.000Z | 2022-03-06T07:59:30.000Z | tests/data/test_to_array.py | maki-nage/rxsci | 64c9956752cbdd4c65aa9f054b6b28318a056625 | [
"MIT"
]
| 9 | 2020-10-22T21:08:10.000Z | 2021-08-05T09:01:26.000Z | tests/data/test_to_array.py | maki-nage/rxsci | 64c9956752cbdd4c65aa9f054b6b28318a056625 | [
"MIT"
]
| 2 | 2021-01-05T16:48:54.000Z | 2021-08-07T12:51:01.000Z | from array import array
import rx
import rxsci as rs
def test_to_array():
actual_result = []
source = [1, 2, 3, 4]
rx.from_(source).pipe(
rs.data.to_array('d')
).subscribe(
on_next=actual_result.append
)
assert actual_result == [array('d', [1, 2, 3, 4])]
| 17.588235 | 54 | 0.595318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.020067 |
7239365caa1436583482800c75a7cb1d2a4fbe35 | 18,942 | py | Python | pi/los.py | Coding-Badly/Little-Oven | 3d1178f495aea1180e25bddbb4f139d8e37e6a65 | [
"Apache-2.0"
]
| null | null | null | pi/los.py | Coding-Badly/Little-Oven | 3d1178f495aea1180e25bddbb4f139d8e37e6a65 | [
"Apache-2.0"
]
| null | null | null | pi/los.py | Coding-Badly/Little-Oven | 3d1178f495aea1180e25bddbb4f139d8e37e6a65 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
"""=============================================================================
los for Little-Oven. los (Little Oven Setup) prepares a Raspberry Pi for
Little-Oven development. This module does the actual work. los (no
extension) is a bash script that creates a service that runs this code.
Running the following puts the whole mess in motion...
curl -s "https://raw.githubusercontent.com/Coding-Badly/Little-Oven/master/pi/los" | bash
journalctl -u los.service
----------------------------------------------------------------------------
Copyright 2019 Brian Cook (aka Coding-Badly)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================================================================="""
import grp
import json
import os
import pathlib
import pwd
import requests
import stat
import subprocess
import time
import uuid
class CurrentStepManager():
def __init__(self):
self._path_step = pathlib.Path('los.step')
self._current_step = None
def get_current_step(self):
if self._current_step is None:
try:
current_step_text = self._path_step.read_text()
self._current_step = int(current_step_text)
except FileNotFoundError:
self._current_step = 1
return self._current_step
def increment_current_step(self):
_ = self.get_current_step()
self._current_step += 1
self._path_step.write_text(str(self._current_step))
class DirectoryMaker():
def __init__(self, default_final_mode=0o700):
self._default_final_mode = default_final_mode
self._uid = pwd.getpwnam("pi").pw_uid
self._gid = grp.getgrnam("pi").gr_gid
def mkdir(self, path, parents=False, final_mode=None):
final_mode = self._default_final_mode if final_mode is None else final_mode
path.mkdir(mode=0o777, parents=parents, exist_ok=True)
os.chown(str(path), self._uid, self._gid)
path.chmod(final_mode)
def chown(self, path):
os.chown(str(path), self._uid, self._gid)
def wall(text):
subprocess.run(['wall',text], check=True)
def wall_and_print(text, step=None):
if step is not None:
text = 'Step #{}: {}'.format(int(step), text)
wall(text)
print(text)
def update_then_upgrade():
time.sleep(5.0)
wall('Update the APT package list.')
subprocess.run(['apt-get','-y','update'], check=True)
wall('Upgrade APT packages.')
subprocess.run(['apt-get','-y','upgrade'], check=True)
def simple_get(source_url, destination_path):
r = requests.get(source_url, stream=True)
r.raise_for_status()
with destination_path.open('wb') as f:
for chunk in r.iter_content(64*1024):
f.write(chunk)
def check_global_config():
global global_config
if path_los_json.exists():
with path_los_json.open() as f:
global_config = json.load(f)
else:
global_config = dict()
csm = CurrentStepManager()
path_los_json = pathlib.Path('los.json')
check_global_config()
MODE_EXECUTABLE = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
need_reboot = False
go_again = True
while go_again:
go_again = False
if csm.get_current_step() == 1:
wall_and_print('Ensure the operating system is up-to-date.', csm.get_current_step())
update_then_upgrade()
need_reboot = True
csm.increment_current_step()
elif csm.get_current_step() == 2:
wall_and_print('Install Git.', csm.get_current_step())
subprocess.run(['apt-get','-y','install','git'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 3:
wall_and_print('Install Python development.', csm.get_current_step())
subprocess.run(['apt-get','-y','install','python3-dev'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 4:
wall_and_print('Ensure the operating system is up-to-date again.', csm.get_current_step())
update_then_upgrade()
need_reboot = True
csm.increment_current_step()
elif csm.get_current_step() == 5:
wall_and_print('Install pip.', csm.get_current_step())
path_get_pip = pathlib.Path('get-pip.py')
simple_get('https://bootstrap.pypa.io/get-pip.py', path_get_pip)
subprocess.run(['python3',str(path_get_pip)], check=True)
path_get_pip.unlink()
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 6:
wall_and_print('Install Python modules required by this module.', csm.get_current_step())
subprocess.run(['pip','install', 'xkcdpass'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 7:
wall_and_print('Get the global configuration file.', csm.get_current_step())
base_url = os.environ.get('LOS_BASE_URL', 'https://raw.githubusercontent.com/Coding-Badly/Little-Oven/master/pi')
get_this = base_url + '/' + 'los.json'
try:
simple_get(get_this, path_los_json)
except requests.exceptions.HTTPError:
pass
check_global_config()
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 8:
wall_and_print('Set the password using the https://xkcd.com/936/ technique.', csm.get_current_step())
from xkcdpass import xkcd_password as xp
wordfile = xp.locate_wordfile()
mywords = xp.generate_wordlist(wordfile=wordfile, min_length=5, max_length=8)
new_password = xp.generate_xkcdpassword(mywords, delimiter=',', numwords=3)
wall_and_print(' The new password is...')
wall_and_print(' {}'.format(new_password))
# fix: Send the new password to a repository.
new_password = 'whatever' # rmv
pi_new_password = ('pi:' + new_password).encode('ascii')
subprocess.run("chpasswd", input=pi_new_password, check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 9:
wall_and_print('Change the hostname.', csm.get_current_step())
path_hostname = pathlib.Path('/etc/hostname')
path_hostname.write_text('Little-Oven\n')
subprocess.run(['sed','-i',"s/raspberrypi/Little-Oven/",'/etc/hosts'], check=True)
need_reboot = True
csm.increment_current_step()
elif csm.get_current_step() == 10:
wall_and_print('Change the timezone.', csm.get_current_step())
# Why localtime has to be removed...
# https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1554806
# date "+%Z %z"
pathlib.Path('/etc/timezone').write_text('America/Chicago\n')
pathlib.Path('/etc/localtime').unlink()
subprocess.run(['dpkg-reconfigure','-f','noninteractive','tzdata'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 11:
wall_and_print('Change the keyboard layout.', csm.get_current_step())
# debconf-get-selections | grep keyboard-configuration
# The top entry is suspect. "gb" was the value after changing
# keyboards using dpkg-reconfigure.
keyboard_conf = """
keyboard-configuration\tkeyboard-configuration/xkb-keymap\tselect\tus
keyboard-configuration\tkeyboard-configuration/layoutcode\tstring\tus
keyboard-configuration\tkeyboard-configuration/layout\tselect\tEnglish (US)
keyboard-configuration\tkeyboard-configuration/variant\tselect\tEnglish (US)
""".encode("ascii")
subprocess.run("debconf-set-selections", input=keyboard_conf, check=True)
subprocess.run(['dpkg-reconfigure','-f','noninteractive','keyboard-configuration'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 12:
wall_and_print('Change the locale.', csm.get_current_step())
# locale
locale_conf = """
locales\tlocales/locales_to_be_generated\tmultiselect\ten_US.UTF-8 UTF-8
locales\tlocales/default_environment_locale\tselect\ten_US.UTF-8
""".encode("ascii")
subprocess.run("debconf-set-selections", input=locale_conf, check=True)
subprocess.run(['sed','-i',"s/^# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/",'/etc/locale.gen'], check=True)
subprocess.run(['dpkg-reconfigure','-f','noninteractive','locales'], check=True)
subprocess.run(['update-locale','LANG=en_US.UTF-8'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 13:
wall_and_print('Configure Git.', csm.get_current_step())
this_mac = format(uuid.getnode(), 'X')
config_by_this_mac = global_config.get(this_mac, None)
config_github = config_by_this_mac.get('github', None) if config_by_this_mac else None
if config_github:
# Set basic Git configuration.
git_user_name = config_github.get('user.name', 'Git User Name Goes Here')
git_user_email = config_github.get('user.email', '[email protected]')
git_core_editor = config_github.get('core.editor', 'nano')
subprocess.run(['git','config','--system','user.name',git_user_name], check=True)
subprocess.run(['git','config','--system','user.email',git_user_email], check=True)
subprocess.run(['git','config','--system','core.editor',git_core_editor], check=True)
# Ensure the .ssh directory exists.
path_dot_ssh = pathlib.Path('/home/pi/.ssh')
# https://superuser.com/questions/215504/permissions-on-private-key-in-ssh-folder
dm = DirectoryMaker()
dm.mkdir(path_dot_ssh)
# Add a Github section to the .ssh/config file.
path_ssh_config = path_dot_ssh / 'config'
with path_ssh_config.open('at') as f:
f.write('Host github.com\n')
f.write(' User git\n')
f.write(' Hostname github.com\n')
f.write(' PreferredAuthentications publickey\n')
f.write(' IdentityFile ~/.ssh/github/id_rsa\n')
dm.chown(path_ssh_config)
# Create a github subdirectory for the Github key pair.
path_github = path_dot_ssh / 'github'
dm.mkdir(path_github)
# Generate the Github key pair.
path_id_rsa = path_github / 'id_rsa'
# ssh-keygen -t rsa -C "[email protected]" -b 1024 -N '' -f ~/.ssh/github/id_rsa
subprocess.run(['ssh-keygen','-t','rsa','-C',git_user_email,'-b','4096','-N','','-f',str(path_id_rsa)], check=True)
dm.chown(path_id_rsa)
dm.chown(path_id_rsa.with_suffix('.pub'))
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 14:
# wall_and_print('Install PiFace Digital 2 packages from GitHub.', csm.get_current_step())
# # Common
# subprocess.run(['git','clone','git://github.com/piface/pifacecommon.git','/home/pi/python-things/pifacecommon'], check=True)
# subprocess.run(['python3','/home/pi/python-things/pifacecommon/setup.py','install'], cwd='/home/pi/python-things/pifacecommon/', check=True)
# #subprocess.run(['rm','-rf','/home/pi/python-things/pifacecommon'], check=True)
# # Digital I/O
# subprocess.run(['git','clone','git://github.com/piface/pifacedigitalio.git','/home/pi/python-things/pifacedigitalio'], check=True)
# subprocess.run(['python3','/home/pi/python-things/pifacedigitalio/setup.py','install'], cwd='/home/pi/python-things/pifacedigitalio/', check=True)
# #subprocess.run(['rm','-rf','/home/pi/python-things/pifacedigitalio'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 15:
# wall_and_print('Install python-dispatch package from GitHub.', csm.get_current_step())
# subprocess.run(['git','clone','https://github.com/Coding-Badly/python-dispatch.git','/home/pi/python-things/python-dispatch'], check=True)
# subprocess.run(['python3','/home/pi/python-things/python-dispatch/setup.py','install'], cwd='/home/pi/python-things/python-dispatch/', check=True)
# #subprocess.run(['rm','-rf','/home/pi/python-dispatch'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 16:
wall_and_print('Clone the Little Oven.', csm.get_current_step())
# git clone [email protected]:Coding-Badly/Little-Oven.git /home/pi/Little-Oven
# git clone https://github.com/Coding-Badly/Little-Oven.git /home/pi/Little-Oven
subprocess.run(['git','clone','https://github.com/Coding-Badly/Little-Oven.git','/home/pi/Little-Oven'], check=True)
try:
subprocess.run(['git','checkout','-t','remotes/origin/master'], cwd='/home/pi/Little-Oven', stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as exc:
if not "already exists" in exc.stderr.decode("utf-8"):
raise
# Change the remote url to use ssh.
# git remote set-url origin [email protected]:Coding-Badly/Little-Oven.git
subprocess.run(['git','remote','set-url','origin','[email protected]:Coding-Badly/Little-Oven.git'], cwd='/home/pi/Little-Oven', check=True)
# Use pip to install dependencies.
path_requirements = pathlib.Path('/home/pi/Little-Oven/requirements.txt')
if path_requirements.exists():
subprocess.run(['pip','install','-U','-r',str(path_requirements)], check=True)
# Fix ownership of the Little-Oven repository.
subprocess.run(['chown','-R','pi:pi','/home/pi/Little-Oven'], check=True)
# Prepare the cache directory.
dm = DirectoryMaker(default_final_mode=0o755)
path_cache = pathlib.Path('/var/cache/Rowdy Dog Software/Little-Oven/pans')
dm.mkdir(path_cache, parents=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 17:
# wall_and_print('Install PiFace Digital 2 initialization service.', csm.get_current_step())
# subprocess.run(['cp','/home/pi/Little-Oven/pi/init_PiFace_Digital_2.service','/etc/systemd/system/init_PiFace_Digital_2.service'], check=True)
# subprocess.run(['systemctl','enable','init_PiFace_Digital_2.service'], check=True)
# need_reboot = True
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 18:
wall_and_print('Configure Rust to be easily installed.', csm.get_current_step())
# Download rustup.sh to a common location and make it Read + Execute
# for everyone. Writable for the owner (root).
path_rustup_sh = pathlib.Path('/usr/local/bin/rustup.sh')
simple_get('https://sh.rustup.rs', path_rustup_sh)
path_rustup_sh.chmod(MODE_EXECUTABLE)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 19:
wall_and_print('Install FUSE (support for VeraCrypt).', csm.get_current_step())
subprocess.run(['apt-get','-y','install','fuse'], check=True)
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 20:
wall_and_print('Configure VeraCrypt to be easily installed.', csm.get_current_step())
# Prepare a directory for the VeraCrypt files.
dm = DirectoryMaker(default_final_mode=0o755)
path_temp = pathlib.Path('./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW')
dm.mkdir(path_temp, parents=True)
# Download the install script
path_tar_bz2 = path_temp / 'veracrypt-setup.tar.bz2'
simple_get('https://launchpad.net/veracrypt/trunk/1.21/+download/veracrypt-1.21-raspbian-setup.tar.bz2', path_tar_bz2)
# Extract the contents
subprocess.run(['tar','xvfj',str(path_tar_bz2),'-C',str(path_temp)], check=True)
path_src = path_temp / 'veracrypt-1.21-setup-console-armv7'
path_dst = pathlib.Path('/usr/local/bin/veracrypt-setup')
# Copy the console setup to a location on the PATH
subprocess.run(['cp',str(path_src),str(path_dst)], check=True)
# Remove the temporary directory
subprocess.run(['rm','-rf',str(path_temp)], check=True)
# Run the install script
#subprocess.run(['bash',str(path_setup),'--quiet'], check=True)
# mkdir veracrypt_CErQ2nnwvZCVeKQHhLV24TWW
# wget --output-document=./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-setup.tar.bz2 https://launchpad.net/veracrypt/trunk/1.21/+download/veracrypt-1.21-raspbian-setup.tar.bz2
# tar xvfj ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-setup.tar.bz2 -C ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW
# ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-1.21-setup-console-armv7 --check
# ./veracrypt_CErQ2nnwvZCVeKQHhLV24TWW/veracrypt-1.21-setup-console-armv7 --quiet
# rm -rf veracrypt_CErQ2nnwvZCVeKQHhLV24TWW
go_again = True
csm.increment_current_step()
elif csm.get_current_step() == 21:
wall_and_print('Check for Rust and VeraCrypt after login.', csm.get_current_step())
# Write the following to /etc/profile.d/check_for_rust_and_veracrypt.sh and make it
# executable.
check_for_rust_and_veracrypt = """#!/bin/bash
if [ ! -e $HOME/.cargo ]; then
rustup.sh -y
fi
if ! command -v veracrypt; then
veracrypt-setup
fi
"""
path_check_for = pathlib.Path('/etc/profile.d/check_for_rust_and_veracrypt.sh')
path_check_for.write_text(check_for_rust_and_veracrypt)
path_check_for.chmod(MODE_EXECUTABLE)
go_again = True
csm.increment_current_step()
#elif csm.get_current_step() == 20:
# wall_and_print('One last reboot for good measure.', csm.get_current_step())
# need_reboot = True
# csm.increment_current_step()
# fix: Configure Little-Oven to automatically run on boot.
else:
wall_and_print('Little-Oven installed. Disabling the los service.')
subprocess.run(['systemctl','disable','los.service'], check=True)
if need_reboot:
wall_and_print('REBOOT!')
time.sleep(5.0)
subprocess.run(['reboot'], check=True)
| 50.244032 | 184 | 0.658853 | 1,208 | 0.063774 | 0 | 0 | 0 | 0 | 0 | 0 | 8,598 | 0.453912 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.