id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3400620
|
<reponame>ihalseide/brick-rain
#!/usr/bin/env python3
import pygame
import sys
import random
import time
from base_scene import BaseScene
from game_over_scene import GameOverScene
from piece import Piece, SHAPES, COLORS
from game_resources import IMAGES, SOUNDS
from clouds import Cloud
BOX_SIZE = 20 # how big each square is
X_MARGIN = 15
Y_MARGIN = 115 # padding between game board and SCREEN edge
BOARD_SIZE = (10, 20) # rows and columns of game board
BIG_FONT = pygame.font.Font('freesansbold.ttf', 25)
NORMAL_FONT = pygame.font.Font('freesansbold.ttf', 15)
BG_COLOR = (0, 0, 0)
BOARD_COLOR = (50, 50, 200)
BORDER_COLOR = (255, 255, 255)
TEXT_COLOR = (255, 255, 255)
MOVEMENT_FREQ = .2 # how quickly the player can move the pieces
def calculate_lvl_and_freq(score):
level = int(score / 10) + 1
fall_freq = .27 - (level * .02) + .18
return level, fall_freq
class GameScene(BaseScene):
def __init__(self, board_size=BOARD_SIZE):
super().__init__()
# choose random song
if False: #random.randint(0,1): I like song2 better
pygame.mixer.music.load('assets/song.ogg')
else:
pygame.mixer.music.load('assets/song 2.ogg')
pygame.mixer.music.play(-1)
# this array is for the blocks on the board
self.board_width, self.board_height = board_size
self.board = self.get_empty_board()
self.last_move_down_time = time.time()
self.last_move_sideways_time = time.time()
self.last_fall_time = time.time()
self.moving_down = False
self.moving_left = False
self.moving_right = False
self.score = 0
self.level, self.fall_freq = calculate_lvl_and_freq(self.score)
self.next_piece = self.new_piece()
self.falling_piece = self.new_piece()
# visual please
self.clouds = pygame.sprite.Group()
self.last_cloud_time = time.time()
self.cloud_wait = 0
# buttons are the same space from the game board as the game board...
# ... is from the edge of the screen
button_x = (X_MARGIN * 2) + (BOX_SIZE * self.board_width)
self.next_rect = pygame.Rect(button_x, (Y_MARGIN + 25), 100, 100)
self.pause_rect = pygame.Rect(button_x, (25 + self.next_rect.bottomleft[1]), 100, 100)
self.help_rect = pygame.Rect(button_x, (25 + self.pause_rect.bottomleft[1]), 100, 100)
self.paused = False
self.helping = False
def get_new_cloud(self):
# pick random pixel x on board
y = random.randint(self.to_pixel_coords(0,0)[1], self.to_pixel_coords(0,self.board_height)[1])
return Cloud((0, y), 0, pygame.display.get_surface().get_rect().width)
def to_pixel_coords(self, box_x, box_y):
x = X_MARGIN + (box_x * BOX_SIZE)
y = Y_MARGIN + (box_y * BOX_SIZE)
return x, y
def get_empty_board(self):
return [[None for x in range(self.board_width)] for y in range(self.board_height)]
def is_on_board(self, x, y):
return 0 <= x < self.board_width and y < self.board_height
def new_piece(self):
# must initialize shape first
# take shape input
shape = Piece.to_array(random.choice(list(SHAPES.values())))
color = random.choice(COLORS)
p = Piece(self, int(self.board_width / 2), -2, shape, color, BOX_SIZE)
for i in range(random.randint(0,3)):
p.rotate()
return p
def process_inputs(self, events, pressed_keys):
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
if self.pause_rect.collidepoint(pygame.mouse.get_pos()) and not self.helping:
self.paused = not self.paused
elif self.help_rect.collidepoint(pygame.mouse.get_pos()):
self.helping = not self.helping
self.paused = self.helping
elif event.type == pygame.KEYDOWN:
k = event.key
if k == pygame.K_p:
# toggle pause
self.paused = not self.paused
# don't process keys if paused
if self.paused or self.helping:
return
if k == pygame.K_UP:
# rotate
fp = self.falling_piece
rotated = Piece(fp.parent, fp.x, fp.y, fp.shape, fp.color, fp.box_size) # make a copy of falling piece
rotated.rotate()
if self.is_valid_position(rotated):
self.falling_piece = rotated
#SOUNDS['rotate'].play() # doesn't feel right
elif k == pygame.K_DOWN:
# move downwards faster
self.moving_down = True
if self.is_valid_position(self.falling_piece, adj_y=1):
self.falling_piece.y += 1
self.last_move_down_time = time.time()
elif k == pygame.K_LEFT and self.is_valid_position(self.falling_piece, adj_x=-1):
# move left
self.falling_piece.x -= 1
self.moving_left = True
self.moving_right = False
self.last_move_sideways_time = time.time()
elif k == pygame.K_RIGHT and self.is_valid_position(self.falling_piece, adj_x=1):
# move right
self.falling_piece.x += 1
self.moving_left = False
self.moving_right = True
self.last_move_sideways_time = time.time()
elif k == pygame.K_SPACE:
# drop to bottom
self.moving_down = False
self.moving_left = False
self.moving_right = False
y = 0
for i in range(1, self.board_height):
if not self.is_valid_position(self.falling_piece, adj_y=i):
break
y = i
self.falling_piece.y += y
elif event.type == pygame.KEYUP:
k = event.key
if k == pygame.K_LEFT:
self.moving_left = False
elif k == pygame.K_RIGHT:
self.moving_right = False
elif k == pygame.K_DOWN:
self.moving_down = False
def generate_clouds(self):
if time.time() - self.last_cloud_time >= self.cloud_wait:
self.clouds.add(self.get_new_cloud())
self.last_cloud_time = time.time()
self.cloud_wait = random.uniform(.5, 3)
def update(self):
#don't update if paused
if self.paused or self.helping:
pygame.mixer.music.pause()
return
else:
pygame.mixer.music.unpause()
# do updating:
# no more input, now for updating
# user move it sideways
if (self.moving_left or self.moving_right) and time.time() - self.last_move_sideways_time > MOVEMENT_FREQ:
if self.moving_left and self.is_valid_position(self.falling_piece, adj_x=-1):
self.falling_piece.x -= 1
if self.moving_right and self.is_valid_position(self.falling_piece, adj_x=1):
self.falling_piece.x += 1
self.last_move_sideways_time = time.time()
# user move down
if self.moving_down and time.time() - self.last_move_down_time > MOVEMENT_FREQ and self.is_valid_position(self.falling_piece, adj_y=1):
self.falling_piece.y += 1
self.last_move_down_time = time.time()
# natural fall`
if time.time() - self.last_fall_time > self.fall_freq:
# landed?
if not self.is_valid_position(self.falling_piece, adj_y=1):
self.add_to_board(self.falling_piece)
# use list from removing lines to change score and animate
removed_lines = self.remove_complete_lines()
self.score += removed_lines
self.level, self.fall_freq = calculate_lvl_and_freq(self.score)
self.falling_piece = None
# only play 1 sound
if removed_lines:
SOUNDS['break'].play()
else:
SOUNDS['rotate'].play()
else:
self.falling_piece.y += 1
self.last_fall_time = time.time()
# tick falling piece or detect lose
if self.falling_piece is None:
self.falling_piece = self.next_piece
self.next_piece = self.new_piece()
self.last_fall_time = time.time()
# check top blocks to determine GAME OVER
if not self.is_valid_position(self.falling_piece):
self.switch_to_scene(GameOverScene(self.score, self.level, GameScene( (self.board_width, self.board_height) )))
return
for x in range(3, self.board_width-4):
if self.board[0][x] is not None:
self.switch_to_scene(GameOverScene(self.score, self.level, GameScene( (self.board_width, self.board_height) )))
return
# clouds
self.generate_clouds()
self.clouds.update()
def display(self, screen):
# don't draw crucial game info if help or pause is shown
screen.fill((0,0,0))
self.draw_board(screen)
self.draw_space(screen)
self.draw_next_piece(screen)
self.draw_buttons(screen)
if self.falling_piece is not None and not (self.paused or self.helping): # there might not be a current falling piece
self.falling_piece.draw(screen)
self.draw_status(screen)
if self.helping:
self.show_help(screen)
elif self.paused:
self.show_pause(screen)
def draw_buttons(self, screen):
pygame.draw.rect(screen, (0, 0, 0), self.pause_rect, 5)
pygame.draw.rect(screen, (125, 255, 0), self.pause_rect)
surf = NORMAL_FONT.render('Pause', True, TEXT_COLOR)
surf_rect = surf.get_rect()
surf_rect.center = self.pause_rect.center
screen.blit(surf, surf_rect)
pygame.draw.rect(screen, (0, 0, 0), self.help_rect, 5)
pygame.draw.rect(screen, (255, 125, 0), self.help_rect)
surf = NORMAL_FONT.render('Help', True, TEXT_COLOR)
surf_rect = surf.get_rect()
surf_rect.center = self.help_rect.center
screen.blit(surf, surf_rect)
def draw_space(self, screen):
screen_rect = screen.get_rect()
screen.fill(BG_COLOR, (0, 0, screen_rect.width, Y_MARGIN-2))
screen.fill(BG_COLOR, (0, self.to_pixel_coords(0,self.board_height)[1]+2, screen_rect.width, 100))
screen.fill(BG_COLOR, (0, Y_MARGIN-2, X_MARGIN-2, screen_rect.height))
x = self.to_pixel_coords(self.board_width,0)[0]+2
screen.fill(BG_COLOR, ( x, Y_MARGIN-2, 140, screen_rect.height))
screen.blit(IMAGES['title'], (0,0))
def draw_box(self, screen, box_x, box_y, color, pixel_x=None, pixel_y=None, draw_blank=False):
# pixel args override box coords
if pixel_x is None and pixel_y is None:
pixel_x, pixel_y = self.to_pixel_coords(box_x, box_y)
the_rect = (pixel_x, pixel_y, BOX_SIZE, BOX_SIZE)
if color is None:
if draw_blank:
pygame.draw.rect(screen, BOARD_COLOR, the_rect)
else:
return
else:
screen.blit(IMAGES[color+' brick'], the_rect)
def draw_board(self, screen):
# draw border
pygame.draw.rect(screen, BORDER_COLOR, (X_MARGIN, Y_MARGIN, self.board_width*BOX_SIZE, self.board_height*BOX_SIZE), 5)
# draw background
pygame.draw.rect(screen, BOARD_COLOR, (X_MARGIN, Y_MARGIN, self.board_width*BOX_SIZE, self.board_height*BOX_SIZE))
self.clouds.draw(screen)
if self.paused or self.helping:
return
for x in range(self.board_width):
for y in range(self.board_height):
cell = self.board[y][x]
self.draw_box(screen, x, y, cell)
def add_to_board(self, piece):
for x in range(piece.width):
for y in range(piece.height):
if piece.get_at(x, y) is not None:
self.board[y + piece.y][x + piece.x] = piece.color
def is_complete_line(self, y):
for x in range(self.board_width):
if self.board[y][x] is None:
return False
return True
def remove_complete_lines(self):
lines_removed = 0
y = self.board_height - 1 # start from bottom
while y >= 0:
if self.is_complete_line(y):
lines_removed += 1
for pull_down_y in range(y, 0, -1):
for x in range(self.board_width):
self.board[pull_down_y][x] = self.board[pull_down_y-1][x]
# clear top line
for x in range(self.board_width):
self.board[0][x] = None
else:
y -= 1
return lines_removed
def is_valid_position(self, piece, adj_x=0, adj_y=0):
for x in range(piece.width):
for y in range(piece.height):
is_above_board = y + piece.y + adj_y < 0
if is_above_board or piece.get_at(x, y) is None:
continue
if not self.is_on_board(x + piece.x + adj_x, y + piece.y + adj_y):
return False
if self.board[y + piece.y + adj_y][x + piece.x + adj_x] is not None:
return False
return True
def draw_status(self, screen):
score_surf = NORMAL_FONT.render("Score: %s"%self.score, True, TEXT_COLOR)
score_rect = score_surf.get_rect()
score_rect.bottomleft = (X_MARGIN, Y_MARGIN-5)
screen.blit(score_surf, score_rect)
level_surf = NORMAL_FONT.render("Level: %s"%self.level, True, TEXT_COLOR)
level_rect = level_surf.get_rect()
level_rect.bottomleft = (2*X_MARGIN+BOX_SIZE*self.board_width, Y_MARGIN-5)
screen.blit(level_surf, level_rect)
def draw_next_piece(self, screen):
next_area = self.next_rect
pygame.draw.rect(screen, BORDER_COLOR, next_area, 5)
pygame.draw.rect(screen, BOARD_COLOR, next_area)
surf = NORMAL_FONT.render("Next:", True, TEXT_COLOR)
rect = surf.get_rect()
rect.topleft = (next_area.topleft[0], next_area.topleft[1]-25)
screen.blit(surf, rect)
# dont draw next piece if not playing
if self.paused or self.helping:
return
center_x = BOX_SIZE * self.next_piece.width / 2
center_y = BOX_SIZE * self.next_piece.height / 2
self.next_piece.draw(screen, pixel_x=next_area.center[0]-center_x, pixel_y=next_area.center[1]-center_y)
def show_pause(self, screen):
screen_height = screen.get_rect().height
surf = BIG_FONT.render("PAUSED", True, TEXT_COLOR)
surf_rect = surf.get_rect()
surf_rect.center = (X_MARGIN+(BOX_SIZE*self.board_width/2), screen_height/2)
screen.blit(surf, surf_rect)
def show_help(self, screen):
screen_height = screen.get_rect().height
x = X_MARGIN + (BOX_SIZE * self.board_width / 2)
y = screen_height/2
surf = BIG_FONT.render("HELP", True, TEXT_COLOR)
surf_rect = surf.get_rect()
surf_rect.center = (x, y)
screen.blit(surf, surf_rect)
for i, text in enumerate(['Move piece = Arrow keys', 'Rotate piece = Up arrow key', 'Drop piece = Space key']):
surf = NORMAL_FONT.render(text, True, TEXT_COLOR)
surf_rect = surf.get_rect()
surf_rect.center = (x, y + (i+1)*30)
screen.blit(surf, surf_rect)
|
StarcoderdataPython
|
1820397
|
<reponame>ollien/Screenshot-Uploader
class ConfigReader():
def __init__(self,name="config.txt"):
self.keys={}
self.name = name
#Read Keys from file
def readKeys(self):
keysFile=open(self.name,"r")
fileLines=keysFile.readlines()
keysFile.close()
self.keys.clear()
for item in fileLines:
#If last char is \n
if (item[-1]=='\n'):
item=item[:-1]
#If a commented line
if (item[0]=='#'):
continue
#If a new line is the first char
elif (item[0]=='\n'):
continue
else:
#Get Position of equal sign
pos=item.find('=')
if pos != -1:
#Name of the key is [0:pos], Value of the key is [pos+1:-1] (Stripping the \n char at the end)
self.keys[item[0:pos]]=item[pos+1:]
#Return the keys, read allows you to get the keys without re-reading the file.
def getKeys(self,read=True):
if read:
self.readKeys()
return self.keys
|
StarcoderdataPython
|
4808527
|
<filename>tests/sensor/test_bno055.py
import time
import unittest
import pigpio
from pisat.handler import PigpioI2CHandler
from pisat.sensor import Bno055
from pisat.tester.sensor import SensorTestor
ADDRESS_BNO055 = 0x28
class TestBNO055(unittest.TestCase):
def setUp(self) -> None:
pi = pigpio.pi()
handler = PigpioI2CHandler(pi, ADDRESS_BNO055)
self.bno055 = Bno055(handler, name="bno055")
self.bno055.change_operation_mode(Bno055.OperationMode.NDOF)
self.testor = SensorTestor(self.bno055)
def test_bench_mark(self):
result = self.testor.exec_benchmark()
print(f"time to read 100 times: {result}")
def test_remap(self):
print("Current Axis Map")
print("----------------")
print(f"x: {self.bno055.axis_x}, sign: {self.bno055.sign_x}")
print(f"y: {self.bno055.axis_y}, sign: {self.bno055.sign_y}")
print(f"z: {self.bno055.axis_z}, sign: {self.bno055.sign_z}")
print()
self.bno055.remap_axis(self.bno055.Axis.Y, self.bno055.Axis.X, self.bno055.Axis.Z)
self.bno055.remap_sign(x=self.bno055.AxisSign.NEGATIVE)
print("Axes remapped.", end="\n\n")
self.bno055._read_map_config()
self.bno055._read_map_sign()
print("New Axis Map")
print("----------------")
print(f"x: {self.bno055.axis_x}, sign: {self.bno055.sign_x}")
print(f"y: {self.bno055.axis_y}, sign: {self.bno055.sign_y}")
print(f"z: {self.bno055.axis_z}, sign: {self.bno055.sign_z}")
print()
# reset
self.bno055.reset_axis()
self.bno055.reset_sign()
def test_calibration(self):
print()
print("Calibration status")
print("------------------")
self.bno055.load_calib_stat()
print(f"sys: {self.bno055.calib_stat_sys}")
print(f"acc: {self.bno055.calib_stat_acc}")
print(f"mag: {self.bno055.calib_stat_mag}")
print(f"gyro: {self.bno055.calib_stat_gyro}")
def test_observe(self):
self.bno055.remap_axis(self.bno055.Axis.Y, self.bno055.Axis.X, self.bno055.Axis.Z)
self.bno055.remap_sign(z=self.bno055.AxisSign.NEGATIVE)
self.testor.observe()
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
11201182
|
__version__ = '0.6'
from .antenna import *
from .topica import TopicaResult
from .digital_twin import DigitalTwin
|
StarcoderdataPython
|
8120301
|
<reponame>pavlanovak/Iskanje-besed-UVP
<<<<<<< HEAD
PRAVILNO_MESTO_IN_CRKA = 'x'
NEPRAVILO_MESTO_IN_CRKA = '-'
=======
>>>>>>> 80858d2af68bead28a54e3714cc7cd06b1d21285
import random
import json
<<<<<<< HEAD
class Igra:
def __init__(self, beseda, ugibanja, stanje, tocke):
self.beseda = beseda
self.ugibanja = []
self.stanje = ZACETEK, IGRA, ZADEL, PORAZ
'self.tocke = '
def pokazi_crke_in_dolzino(self):
crke = []
for i in beseda:
if i not in crke:
crke.append(i)
else:
continue
return crke
=======
#VSA STANJA
ZACETEK = "Z"
KONEC = "K"
ZMAGA = "w"
PORAZ = "L"
IGRANJE = 'I'
ZACETNE_TOCKE = 100
PRAVILNO_MESTO_IN_CRKA = 'x'
NEPRAVILO_MESTO_IN_CRKA = '-'
#
class Igra:
def __init__(self, beseda, ugibanja=None, stanje=ZACETEK, tocke=ZACETNE_TOCKE):
self.beseda = beseda
self.ugibanja = ugibanja or []
self.stanje = stanje
self.tocke = tocke
def pokazi_crke_in_dolzino(self):
crke = []
for i in self.beseda:
if i not in crke:
crke.append(i)
return (crke, len(self.beseda))
def ugibaj(self, ugibana_beseda):
ugibana_beseda = ugibana_beseda.upper()
zadetki = 0
for i in range(len(self.beseda)):
if ugibana_beseda[i] == self.beseda[i]:
zadetki += 1
self.ugibanja.append((ugibana_beseda, zadetki))
self.tocke -= 10
if ugibana_beseda == self.beseda:
self.stanje = ZMAGA
return ZMAGA
elif self.tocke < 0:
self.stanje = PORAZ
return PORAZ
else:
self.stanje = IGRANJE
return IGRANJE
>>>>>>> 80858d2af68bead28a54e3714cc7cd06b1d21285
with open('/Users/pavlanovak/Desktop/uvp 2021/Iskanje-besed-UVP/Besede.txt', 'r') as f:
bazen_besed = [beseda.strip().upper() for beseda in f.readlines()]
def nova_igra():
beseda = random.choice(bazen_besed)
<<<<<<< HEAD
return Igra(beseda, [])
=======
return Igra(beseda)
class Ugibanje:
def __init__(self, stanje, datoteka_z_besedami):
self.igre = {}
self.datoteka_s_stanjem = stanje
self.datoteka_z_besedami = datoteka_z_besedami
def prost_id_igre(self):
if self.igre == {}:
return 0
else:
return max(self.igre.keys()) + 1
def nova_igra(self):
self.nalozi_igre_iz_datoteke()
with open(self.datoteka_z_besedami, 'r') as f:
bazen_besed = [beseda.strip().upper() for beseda in f.readlines()]
igra = nova_igra()
beseda = random.choice(bazen_besed)
igra = Igra(beseda)
id_igre = self.prost_id_igre()
self.igre[id_igre] = (igra, ZACETEK)
self.zapisi_igro_v_datoteko()
return id_igre
def zapisi_igro_v_datoteko(self):
with open(self.datoteka_s_stanjem, 'w') as f:
igre_predelano = {id_igre : ((igra.beseda, igra.ugibanja, igra.stanje, igra.tocke), stanje) for (id_igre, (igra, stanje)) in self.igre.items()}
json.dump(igre_predelano, f)
def nalozi_igre_iz_datoteke(self):
with open(self.datoteka_s_stanjem, 'r') as f:
igre_predelano = json.load(f)
self.igre = { int(id_igre): (Igra(beseda, ugibanja, stanje, tocke), stanje) for (id_igre, ((beseda, ugibanja, stanje, tocke), stanje)) in igre_predelano.items()}
def ugibaj(self, id_igre, beseda):
self.nalozi_igre_iz_datoteke()
(igra, _) = self.igre[id_igre]
stanje = igra.ugibaj(beseda)
self.igre[id_igre] = (igra, stanje)
self.zapisi_igro_v_datoteko()
>>>>>>> 80858d2af68bead28a54e3714cc7cd06b1d21285
|
StarcoderdataPython
|
5152498
|
"""Abstract Handler with helper methods."""
from clang.cindex import CursorKind, TypeKind
from ctypeslib.codegen import typedesc
from ctypeslib.codegen.util import log_entity
import logging
log = logging.getLogger('handler')
class CursorKindException(TypeError):
"""When a child node of a VAR_DECL is parsed as an initialization value,
when its not actually part of that initiwlization value."""
pass
class InvalidDefinitionError(TypeError):
"""When a structure is invalid in the source code, sizeof, alignof returns
negatives value. We detect it and do our best."""
pass
class InvalidTranslationUnitException(TypeError):
"""When a translation unit is invalid"""
pass
class DuplicateDefinitionException(KeyError):
"""When we encounter a duplicate declaration/definition name."""
pass
################################################################
class ClangHandler(object):
"""
Abstract class for handlers.
"""
def __init__(self, parser):
self.parser = parser
self._unhandled = []
def register(self, name, obj):
return self.parser.register(name, obj)
def get_registered(self, name):
return self.parser.get_registered(name)
def is_registered(self, name):
return self.parser.is_registered(name)
def remove_registered(self, name):
return self.parser.remove_registered(name)
def set_location(self, obj, cursor):
""" Location is also used for codegeneration ordering."""
if (hasattr(cursor, 'location') and cursor.location is not None and
cursor.location.file is not None):
obj.location = (cursor.location.file.name, cursor.location.line)
return
def set_comment(self, obj, cursor):
""" If a comment is available, add it to the typedesc."""
if isinstance(obj, typedesc.T):
obj.comment = cursor.brief_comment
return
def make_python_name(self, name):
"""Transforms an USR into a valid python name."""
# FIXME see cindex.SpellingCache
for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),
("$", "DOLLAR"), (".", "DOT"), ("@", "_"), (":", "_"),
('-', '_')]:
if k in name: # template
name = name.replace(k, v)
# FIXME: test case ? I want this func to be neutral on C valid
# names.
if name.startswith("__"):
return "_X" + name
if len(name) == 0:
pass
elif name[0] in "01234567879":
return "_" + name
return name
def _make_unknown_name(self, cursor):
'''Creates a name for unname type'''
parent = cursor.lexical_parent
pname = self.get_unique_name(parent)
log.debug('_make_unknown_name: Got parent get_unique_name %s',pname)
# we only look at types declarations
_cursor_decl = cursor.type.get_declaration()
# we had the field index from the parent record, as to differenciate
# between unnamed siblings of a same struct
_i = 0
found = False
# Look at the parent fields to find myself
for m in parent.get_children():
# FIXME: make the good indices for fields
log.debug('_make_unknown_name child %d %s %s %s',_i,m.kind, m.type.kind,m.location)
if m.kind not in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,
CursorKind.CLASS_DECL]:#,
#CursorKind.FIELD_DECL]:
continue
if m == _cursor_decl:
found = True
break
_i+=1
if not found:
raise NotImplementedError("_make_unknown_name BUG %s" % cursor.location)
# truncate parent name to remove the first part (union or struct)
_premainer = '_'.join(pname.split('_')[1:])
name = '%s_%d'%(_premainer,_i)
return name
def get_unique_name(self, cursor):
"""get the spelling or create a unique name for a cursor"""
name = ''
if cursor.kind in [CursorKind.UNEXPOSED_DECL]:
return ''
# covers most cases
name = cursor.spelling
if cursor.kind == CursorKind.CXX_BASE_SPECIFIER:
name = cursor.type.spelling
# if its a record decl or field decl and its type is unnamed
if name == '':
# a unnamed object at the root TU
if (cursor.semantic_parent
and cursor.semantic_parent.kind == CursorKind.TRANSLATION_UNIT):
name = self.make_python_name(cursor.get_usr())
log.debug('get_unique_name: root unnamed type kind %s',cursor.kind)
elif cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,
CursorKind.CLASS_DECL,CursorKind.FIELD_DECL]:
name = self._make_unknown_name(cursor)
log.debug('Unnamed cursor type, got name %s',name)
else:
log.debug('Unnamed cursor, No idea what to do')
#import code
#code.interact(local=locals())
return ''
if cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,
CursorKind.CLASS_DECL, CursorKind.CXX_BASE_SPECIFIER]:
names= {CursorKind.STRUCT_DECL: 'struct',
CursorKind.UNION_DECL: 'union',
CursorKind.CLASS_DECL: 'struct',
CursorKind.TYPE_REF: '',
CursorKind.CXX_BASE_SPECIFIER: 'struct'
}
name = '%s_%s'%(names[cursor.kind],name)
log.debug('get_unique_name: name "%s"',name)
return name
def is_fundamental_type(self, t):
return (not self.is_pointer_type(t) and
t.kind in self.parser.ctypes_typename.keys())
def is_pointer_type(self, t):
return t.kind == TypeKind.POINTER
def is_array_type(self, t):
return (t.kind == TypeKind.CONSTANTARRAY or
t.kind == TypeKind.INCOMPLETEARRAY or
t.kind == TypeKind.VARIABLEARRAY or
t.kind == TypeKind.DEPENDENTSIZEDARRAY)
def is_unexposed_type(self, t):
return t.kind == TypeKind.UNEXPOSED
def is_literal_cursor(self, t):
return (t.kind == CursorKind.INTEGER_LITERAL or
t.kind == CursorKind.FLOATING_LITERAL or
t.kind == CursorKind.IMAGINARY_LITERAL or
t.kind == CursorKind.STRING_LITERAL or
t.kind == CursorKind.CHARACTER_LITERAL)
def get_literal_kind_affinity(self, literal_kind):
''' return the list of fundamental types that are adequate for which
this literal_kind is adequate'''
if literal_kind == CursorKind.INTEGER_LITERAL:
return [TypeKind.USHORT, TypeKind.UINT, TypeKind.ULONG,
TypeKind.ULONGLONG, TypeKind.UINT128,
TypeKind.SHORT, TypeKind.INT, TypeKind.LONG,
TypeKind.LONGLONG, TypeKind.INT128, ]
elif literal_kind == CursorKind.STRING_LITERAL:
return [TypeKind.CHAR16, TypeKind.CHAR32, TypeKind.CHAR_S,
TypeKind.SCHAR, TypeKind.WCHAR] # DEBUG
elif literal_kind == CursorKind.CHARACTER_LITERAL:
return [TypeKind.CHAR_U, TypeKind.UCHAR]
elif literal_kind == CursorKind.FLOATING_LITERAL:
return [TypeKind.FLOAT, TypeKind.DOUBLE, TypeKind.LONGDOUBLE]
elif literal_kind == CursorKind.IMAGINARY_LITERAL:
return []
return []
def get_ctypes_name(self, typekind):
return self.parser.get_ctypes_name(typekind)
def get_ctypes_size(self, typekind):
return self.parser.get_ctypes_size(typekind)
def parse_cursor(self, cursor):
return self.parser.parse_cursor(cursor)
def parse_cursor_type(self, _cursor_type):
return self.parser.parse_cursor_type(_cursor_type)
################################
# do-nothing element handlers
@log_entity
def _pass_through_children(self, node, **args):
for child in node.get_children():
self.parser.startElement(child)
return True
def _do_nothing(self, node, **args):
name = self.get_unique_name(node)
#import code
# code.interact(local=locals())
log.warning('_do_nothing for %s/%s',node.kind.name, name)
return True
###########################################
# TODO FIXME: 100% cursor/type Kind coverage
def __getattr__(self, name, **args):
if name not in self._unhandled:
log.warning('%s is not handled',name)
self._unhandled.append(name)
return self._do_nothing
|
StarcoderdataPython
|
4929451
|
<filename>tests/vendin_machine/test_vending_machine.py
import pytest
from vending_machine.hoge.vending_machine import VendingMachine
# 自販機に金額を投入できることを確認するテスト
def test_insert_money():
vending_machine = VendingMachine()
vending_machine.insert(100)
# 【Vending Machineの機能】10円、100円、XX
## テスト内容:指定された金額は受け入れて、それ以外はErrorを起こすテスト
insert_money_list = [
(10),
(50),
(100),
(500),
(1000),
]
@pytest.mark.parametrize("money", insert_money_list)
def test_valid_money(money):
"""
正しい金額が設定されていることを確認するテスト関数
"""
# 自販機の入力金額を読み取る関数を設置
result = VendingMachine().check_insert_money(money)
assert result
def test_invalid_money():
"""
不正の金額が設定されていることを確認するテスト関数
"""
insert_money = 200
# 自販機の入力金額を読み取る関数を設置
result = VendingMachine().check_insert_money(insert_money)
assert not result
def test_multi_insert_money():
"""
複数回投入
"""
vending_machine = VendingMachine()
money = {10: 1, 50: 0, 100: 1, 500: 0, 1000: 0}
vending_machine.insert(money)
assert True
def test_total_money():
"""
投入金額の総計
"""
vending_machine = VendingMachine()
money = {10: 1, 50: 0, 100: 1, 500: 0, 1000: 0}
vending_machine.insert(money)
actual = vending_machine.get_total()
expected = 0
assert actual == expected
# 複数回投入
# 投入・投入金額の総計
# insert_money = [100,100]
# def test_get_num_money():
# assert count(insert_mon)
# def test_get_total_money():
# assert sum(insert_money)
# 複数回メソッドを呼べるようにする。入れた回数分お金を集計できているかテスト
##
# 払い戻しXX
# 払い戻しができるかテスト、投入金額の数値が返ってくるかテスト
|
StarcoderdataPython
|
3585720
|
# Generated by Django 4.0.1 on 2022-01-19 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('management', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='emp_id',
field=models.CharField(default='emp211057', max_length=70),
),
]
|
StarcoderdataPython
|
5027374
|
from appversion.views import VersionAPIView
from django.conf.urls import url
from ifns.views import GetIfnsRequisitesByCode
urlpatterns = [
url(
r'^get_ifns_requisites_by_code/(?P<code>[A-Za-z0-9]+)/$',
GetIfnsRequisitesByCode.as_view(),
name='get_ifns_requisites_by_code'
),
url(r'^version/$', VersionAPIView.as_view(), name='version'),
]
|
StarcoderdataPython
|
6549041
|
def diagonals_inds(dim: int, size: int) -> List[Tuple]:
# e.g. if 2 dimension and size = 3
# 1,1 : 3,3
# 1,3 : 3,1
# 3,1 : 1,3
# 3,3 : 1,1
# get a list of all corners that with 0 index in first dimension
corners_all = it.product([0, size - 1], repeat = dim)
corners_0 = [corner for corner in corners_all if corner[0] == 0]
diagonals = []
for corner in corners_0:
diagonal = []
diagonal.append(corner)
# add rest of diagonal
for i in range(1, size):
tmp = tuple(c - i for c in corner)
inds = tuple(abs(t) for t in tmp)
diagonal.append(inds)
diagonals.append(diagonal)
return diagonals
def lines_inds(dim: int, size: int, flatten: bool = True) -> \
Tuple[Union[List[Tuple[int]], List[List[Tuple[int]]]], int]:
lines = []
count = 0
# loop over the numbers of dimensions in which the line exists
for i in range(dim):
diagonals = diagonals_inds(i + 1, size)
# loop over all possible combinations of i-dimensional hypercubes
for j in it.combinations(range(dim), r = i + 1):
for diagonal in diagonals:
# the other dimensions can assume any position from a combination of all positions
for position in it.product(range(size), repeat = dim - i - 1):
# these are the other dimension
od = set(range(dim)) - set(j)
# for each cell in diagonal
diag_ext = []
for c in diagonal:
diag_ext.append(hp.insert_into_tuple(c, od, position))
lines.append(diag_ext)
#lines.extend(diag_ext) if flatten else lines.append(diag_ext)
return lines, count
def get_lines_inds(lines: List[np.ndarray], dim: int) -> List[Tuple[Tuple[int]]]:
# assume flat list of lines
size = lines[0].size
shape = [size] * dim
lines_inds = []
for line in lines:
line_inds = []
for j in range(size):
cell_inds = np.unravel_index(line[j], shape)
line_inds.append(cell_inds)
lines_inds.append(tuple(line_inds))
return lines_inds
def insert_into_tuple(tup: Tuple, pos: Union[int, Iterable[int]],
val: Union[Any, Iterable[Any]]) -> Tuple:
""" Insert values into a tuple.
Parameters
----------
tup : tuple
the tuple into which values are to be inserted
pos : Union[int, Iterable[int]]
The positions into which values are to be inserted
val : Union[Any, Iterable[Any]]
The values corresponding to the positions in `pos`
Returns
-------
Tuple:
A copy of `tup` with values inserted.
Raises
------
ValueError
If length of `pos` is not equal to length of `val`
See Also
--------
list.insert
Notes
-----
`tup` is converted to a list and the list.insert method is used to
insert values. the list is then converted to a tuple and returned.
Examples
--------
>>> tup = (0, 1, 2, 3)
>>> pos = (5, 1)
>>> val = (9, 8)
>>> insert_into_tuple(tup, pos, val)
(0, 8, 1, 2, 3, 9)
>>> insert_into_tuple(tup, (), ())
(0, 1, 2, 3)
"""
tl = list(tup)
try:
# first assume pos and val are iterable and not single integers
if len(pos) != len(val):
raise ValueError("pos and val must be of the same length")
if len(pos) == 0:
return tup
# sort pos so from low to high; sort val correspondingly
stl = list(zip(*sorted(zip(pos, val))))
for p, v in zip(stl[0], stl[1]):
tl.insert(p, v)
except:
# perhaps pos and cal are integers
tl.insert(pos, val)
return tuple(tl)
def unique(it: Iterable[Any]) -> bool:
""" check if all elements of an iterable of unqiue
Parameters
----------
it : Iterable[Any]
The iterable to be checked for unique elements
Returns
-------
bool:
True if all elements of `it` of unique; False otherwise
Notes
-----
Iterates over every element until a match is found (or not
found if all elements are unique).
If the elements of `it` are hashable then code such as
len(it) == len(set(it)) is more more efficient.
Examples
--------
>>> it = [[0, 1], [0,2], [0,1]]
>>> unique(it)
False
>>> it = [[0, 1], [0,2], [1,2]]
>>> unique(it)
True
"""
seen = []
return not any(i in seen or seen.append(i) for i in it)
|
StarcoderdataPython
|
144317
|
<reponame>shivp950/InnerEye-DeepLearning
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import os
import subprocess
import sys
from pathlib import Path
from typing import List
def run_mypy(files: List[str]) -> int:
"""
Runs mypy on the specified files, printing whatever is sent to stdout (i.e. mypy errors).
Because of an apparent bug in mypy, we run mypy in --verbose mode, so that log lines are printed to
stderr. We intercept these, and assume that any files mentioned in them have been processed.
We run mypy repeatedly on the files that were not mentioned until there are none remaining, or until
no further files are mentioned in the logs.
:param files: list of .py files to check
:return: maximum return code from any of the mypy runs
"""
return_code = 0
iteration = 1
while files:
dirs = sorted(set(os.path.dirname(file) or "." for file in files))
print(f"Iteration {iteration}: running mypy on {len(files)} files in {len(dirs)} directories")
# Set of files we are hoping to see mentioned in the mypy log.
files_to_do = set(files)
for index, dir in enumerate(dirs, 1):
# Adding "--no-site-packages" might be necessary if there are errors in site packages,
# but it may stop inconsistencies with site packages being spotted.
command = ["mypy", "--config=mypy.ini", "--verbose", dir]
print(f"Processing directory {index:2d} of {len(dirs)}: {dir}")
# We pipe stdout and then print it, otherwise lines can appear in the wrong order in builds.
process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
return_code = max(return_code, process.returncode)
for line in process.stdout.split("\n"):
if line and not line.startswith("Success: "):
print(line)
# Remove from files_to_do every Python file that's reported as processed in the log.
for line in process.stderr.split("\n"):
tokens = line.split()
if len(tokens) == 4 and tokens[0] == "LOG:" and tokens[1] == "Parsing":
name = tokens[2]
elif len(tokens) == 7 and tokens[:4] == ["LOG:", "Metadata", "fresh", "for"]:
name = tokens[-1]
else:
continue
if name.endswith(".py"):
if name.startswith("./") or name.startswith(".\\"):
name = name[2:]
files_to_do.discard(name)
# If we didn't manage to discard any files, there's no point continuing. This should not occur, but if
# it does, we don't want to continue indefinitely.
if len(files_to_do) == len(files):
print("No further files appear to have been checked! Unchecked files are:")
for file in sorted(files_to_do):
print(f" {file}")
return_code = max(return_code, 1)
break
files = sorted(files_to_do)
iteration += 1
return return_code
def main() -> int:
"""
Runs mypy on the files in the argument list, or every *.py file under the current directory if there are none.
"""
current_dir = Path(".")
if sys.argv[1:]:
file_list = [Path(arg) for arg in sys.argv[1:] if arg.endswith(".py")]
else:
# We don't want to check the files in the submodule if any, partly because they should already have
# been checked in the original repo, and partly because we don't want the module name clashes mypy would
# otherwise report.
submodule_name = "innereye-deeplearning"
files = set(current_dir.glob('*.py'))
for path in current_dir.glob('*'):
if path.name != submodule_name:
files.update(path.rglob('*.py'))
file_list = list(files)
return run_mypy(sorted(str(file) for file in file_list))
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
8002146
|
<gh_stars>10-100
import concurrent
import geocoder
import json
import s3fs
import pandas as pd
import urllib
import logging
import os
from datetime import datetime
from sqlalchemy import create_engine
import zlib
from itertools import zip_longest
import time
FORMAT = '%(asctime)-15s %(levelname)-6s %(message)s'
DATE_FORMAT = '%b %d %H:%M:%S'
formatter = logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT)
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
GOOGLE_MAPS_API_KEY = os.getenv('GOOGLE_MAPS_API_KEY')
engine = create_engine(os.getenv('CRAIGGER_CONN'))
def geocode(mapaddress, geo_region, post_hood,
min_confidence=9, mykey=GOOGLE_MAPS_API_KEY):
try:
mapaddress2 = mapaddress if ' near ' not in mapaddress else mapaddress.split('near')[0]
post_hood2 = post_hood.replace('(', '').replace(')', '')
start = time.process_time()
q = '{0} {1} {2}'.format(mapaddress2, geo_region, post_hood2)
g = geocoder.google(q, key=mykey)
#logger.info("processed_time: {0} || {1} || google address: {2}, confidence: {3}".format(
# round(time.process_time()-start,4), q, g.address, g.confidence))
return {'mapaddress':mapaddress, 'geo.region':geo_region,'post_hood':post_hood, 'address':g.address, 'quality':g.quality, 'lat':g.lat, 'lng':g.lng, 'zip':g.postal, 'craig_address_hash':address_hash((mapaddress,geo_region,post_hood)), 'gconfidence': g.confidence}
except:
logger.exception(post_hood)
def process_chunk(chunk):
def geo_helper(x):
if x is None:
return
try:
if all(x):
return geocode(x[0], x[1], x[2])
else:
logger.info("None in geo input: {}".format(x))
except:
logger.exception('geo ehhh')
results = []
#not threaded ok.
#for x in chunk:
# result = geo_helper(x)
# if result:
# results.append(result)
#logger.info('chunk done! size was: {}'.format(len(chunk)))
#queue.full ....
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for result in executor.map(geo_helper, chunk):
if result:
results.append(result)
geo_results = pd.DataFrame(results)
geo_results = geo_results.dropna()
ts = time.process_time()
# psycopg2 connections are thread safe but not across processes "level 2 thread safe"
little_engine = create_engine(os.getenv('CRAIGGER_CONN'))
geo_results.to_sql('geocoder_results', little_engine, if_exists='append', index=False)
logger.info('wrote {0} geocoder results. time: {1}'.format(len(geo_results), round(time.process_time()-ts,5)))
return geo_results
def build_address_map(limit=5):
df = pd.read_sql('SELECT mapaddress, "geo.region", post_hood, data_accuracy from cragprod WHERE "geo.region" = \'US-CA\' and data_accuracy > 9 LIMIT {}'.format(limit), engine)
df = df[df['mapaddress'].notnull()]
df = df.drop_duplicates()
try:
geo_already_done = pd.read_sql('SELECT * FROM geocoder_results', engine)
except:
geo_already_done = None
cache_hits = 0
geo_input = set()
for idx,row in df.iterrows():
try:
craig_hash = address_hash((row['mapaddress'], row['geo.region'], row['post_hood']))
if geo_already_done is None or craig_hash not in geo_already_done['craig_address_hash'].values:
geo_input.add((row['mapaddress'], row['geo.region'], row['post_hood']))
else:
cache_hits += 1
logger.debug('cache hit: {}'.format(craig_hash))
except KeyError:
logger.exception('eh')
geo_chunks = chunks(geo_input, 25)
logger.warning('starting google geocoder, df.shape: {0} rows: {1} cache_hits: {2}'.format(df.shape, len(geo_input), cache_hits))
results = []
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as pexecutor:
for result in pexecutor.map(process_chunk, geo_chunks):
try:
if len(result) > 0:
results.append(result)
except:
logger.exception('??')
if not results:
return geo_already_done
geo_results = pd.concat(results)
if geo_already_done is not None:
logger.info('stacking cached')
geo_results = pd.concat([geo_results, geo_already_done])
print(geo_results)
return geo_results
def address_hash(x):
try:
return zlib.adler32((x[0]+x[1]+x[2]).encode('utf-8'))
except:
return 0
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable"""
return zip_longest(*[iter(iterable)]*n, fillvalue=None)
def main():
# this uses geocoder api but saves to my db
dfgeo = build_address_map(limit=45000)
print(dfgeo)
# query the postprocessed db
logger.info('query the postprocessed db...')
dfrent = pd.read_sql('SELECT post_price, post_date, post_bedroom_count, post_sqft, price_per_sqft, netloc, housing, furnished, crawl_date, mapaddress, "geo.region", post_hood from cragprod WHERE "geo.region" = \'US-CA\' and data_accuracy > 9', engine)
dfrent = dfrent.drop_duplicates()
dfrent['craig_address_hash'] = dfrent[['mapaddress', 'geo.region', 'post_hood']].apply(address_hash,axis=1)
logger.info('dfg = pd.merge(dfgeo,dfrent)')
dfg = pd.merge(dfgeo,dfrent)
print(dfg)
logger.info("dfg.to_sql('joined_results', engine, if_exists='append', index=False)")
dfg.to_sql('joined_results', engine, if_exists='append', index=False)
return 0
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
89624
|
<gh_stars>0
import subprocess
import click
@click.command()
def cli():
"""
Stop all services.
:return: Subprocess call result
"""
cmd = 'pkill honcho && docker-compose stop'
return subprocess.call(cmd, shell=True)
|
StarcoderdataPython
|
381011
|
<filename>20211202/4.py
# [0: icID, 1: cardType, 2:tradeType, 3: UpLine, 4: UpTime, 5: UpStation,
# 6: DownLine, 7: DownTime, 8: DownStation]
import time
data_file = r"20211202\Subway_20180301\Subway_20180301.txt"
# data_file = r"20211202\Subway_20180301\Subway_20190301_top100000.txt"
# data_file = "test.txt"
out_file = r"20211202\PeopleInSubwayTime.txt"
timeslot = {}
# remove redundant char in a line
def lineProcess(l: str):
lst = l.lstrip('SUB,').rstrip(',NM\n').split(',')
return lst
# calculate ride time, return None if data is invalid
def rideTime(l: list):
if (l[4][:8] != "20180301") or (l[7][:8] != "20180301"):
print(l[4], l[4][:8], l[7], l[7][:8])
# print("wrong date")
return None
upTime = time.mktime(time.strptime(l[4],"%Y%m%d%H%M%S"))
downTime = time.mktime(time.strptime(l[7],"%Y%m%d%H%M%S"))
timeUsed = downTime - upTime
if (timeUsed == 0) or (timeUsed <= 0):
# print("wrong time")
return None
return timeUsed
# convert second to minutes with rounding off
def sec2min(s: float):
return round(s / 60)
# record ride time to its corresponding dictionary item
def record(t: int):
global timeslot
if t > 120:
t = 120
if t in timeslot.keys():
timeslot[t] += 1
else:
timeslot[t] = 1
with open(data_file, "r") as f:
line = f.readline() # Discard first line
line = f.readline() # Read in second line for processing
while line != '':
lineLst = lineProcess(line)
rtime = rideTime(lineLst)
if rtime:
record(sec2min(rtime))
line = f.readline()
with open(out_file, "w") as f:
for i in range(121):
if i in timeslot.keys():
f.write("{} {}\n".format(i, timeslot[i]))
|
StarcoderdataPython
|
9776790
|
number = list(map(int, input().strip().split()))
ss = ".|."
string = "WELCOME"
num2, num3 = 1, number[0] - 2
for num in range(1, number[0] + 1):
if int((number[0] - 1) / 2) > num - 1:
print("-" * int((number[1] - (len(ss) * num2)) / 2) + ss * (1 * num2) + "-" * int((number[1] - (len(ss) * num2)) / 2))
num2 += 2
if int((number[0] - 1) / 2) == num:
print("-" * int((number[1] - (len(string) - 1)) / 2) + string + "-" * int((number[1] - (len(string) - 1)) / 2))
if int((number[0] - 1) / 2) < num + 1:
print("-" * int((number[1] - (len(ss) * num3)) / 2) + ss * (1 * num3) + "-" * int((number[1] - (len(ss) * num3)) / 2))
num3 -= 2
if num3 <= 0:
break
|
StarcoderdataPython
|
1645375
|
stages = ['''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''', '''
+---+
| |
O |
/|\ |
/ |
|
=========
''', '''
+---+
| |
O |
/|\ |
|
|
=========
''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========
''', '''
+---+
| |
O |
|
|
|
=========
''', '''
+---+
| |
|
|
|
|
=========
''']
from random import choice, gammavariate
word_list = ["aardvark", "baboon", "camel"]
chosen_word = choice(word_list)
# Creating a display
display = []
for _ in range(len(chosen_word)):
display += "_"
#print(display)
lives = 6
game_over = False
while not game_over:
guess = input('Guess a letter: ')
# Show the letter chosen on display
for position in range(len(chosen_word)):
letter = chosen_word[position]
if letter == guess:
display[position] = letter
if guess not in chosen_word:
lives -= 1
if lives == 0:
game_over = True
print('You Lose')
#Join all the elements in the list and turn it into a String.
print(f"{' '.join(display)}")
# Condition of stop for the loop
if "_" not in display:
game_over = True
print('You Win')
print(stages[lives])
|
StarcoderdataPython
|
11399935
|
<gh_stars>1-10
from transformers import AutoTokenizer, DataCollatorWithPadding
import torch
import pytorch_lightning as pl
from torch.utils.data import DataLoader
import datasets
from loguru import logger
from typing import List, Union, Dict
from functools import partial
def _collate_fn(features, tokenizer: AutoTokenizer):
encoder_features = [
{
key[len("encoder_") :]: value
for key, value in feat.items()
if key.startswith("encoder_")
}
for feat in features
]
decoder_features = [
{
key[len("decoder_") :]: value
for key, value in feat.items()
if key.startswith("decoder_")
}
for feat in features
]
encoder_features = tokenizer.pad(
encoder_features, padding=True, return_tensors="pt"
)
decoder_features = tokenizer.pad(
decoder_features, padding=True, return_tensors="pt"
)
decoder_features["input_ids"] = torch.where(
decoder_features["input_ids"] == tokenizer.pad_token_id,
-100,
decoder_features["input_ids"],
)
batch = {
**{"encoder_" + key: value for key, value in encoder_features.items()},
**{"decoder_" + key: value for key, value in decoder_features.items()},
}
return batch
class MultiNewsLightningDataModule(pl.LightningDataModule):
def __init__(
self,
tokenizer: AutoTokenizer,
batch_size: int,
num_workers: int,
max_length: int,
):
super().__init__()
self.tokenizer = tokenizer
self.batch_size = batch_size
self.num_workers = num_workers
self.max_length = max_length
self.collate_fn = partial(_collate_fn, tokenizer=self.tokenizer)
def prepare_data(self):
logger.info("Multi_news dataset loading....")
self.dataset = datasets.load_dataset("multi_news")
logger.info("Loading of multi_news datasets completed.")
self.train, self.validation, self.test = (
self.dataset["train"],
self.dataset["validation"],
self.dataset["test"],
)
self.columns = [
"encoder_input_ids",
"encoder_attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.train = self._data_processing(self.train, "Training")
self.validation = self._data_processing(self.validation, "Validation")
self.test = self._data_processing(self.test, "Testing")
def _data_processing(self, dataset: datasets.arrow_dataset.Dataset, name: str):
logger.info(f"{name} data transformation...")
dataset = dataset.map(self._transform)
dataset.set_format(type="torch", columns=self.columns)
logger.info(f"{name} data transformation completed.")
return dataset
def _transform(self, item):
doc, summary = item["document"], item["summary"]
doc_output = self.tokenizer(doc, truncation=True)
summary_output = self.tokenizer(summary, truncation=True)
output = {
"encoder_input_ids": doc_output["input_ids"],
"encoder_attention_mask": doc_output["attention_mask"],
"decoder_input_ids": summary_output["input_ids"],
"decoder_attention_mask": summary_output["attention_mask"],
}
return output
def train_dataloader(
self,
) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
return DataLoader(
self.train,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
collate_fn=self.collate_fn,
)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.validation,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.test,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
class SQuADLightningDataModule(pl.LightningDataModule):
def __init__(
self,
tokenizer: AutoTokenizer,
batch_size: int,
num_workers: int,
max_length: int,
):
super().__init__()
self.tokenizer = tokenizer
self.batch_size = batch_size
self.num_workers = num_workers
self.max_length = max_length
self.collate_fn = partial(_collate_fn, tokenizer=self.tokenizer)
self.prefix = "question: "
def _transform(self, item):
context, question, answer = (
item["context"],
item["question"],
item["answers"]["text"][0],
)
input_text = self.prefix + answer + self.tokenizer.cls_token + context
context = self.tokenizer(input_text, truncation=True)
question = self.tokenizer(question, truncation=True)
output = {
"encoder_input_ids": context["input_ids"],
"encoder_attention_mask": context["attention_mask"],
"decoder_input_ids": question["input_ids"],
"decoder_attention_mask": question["attention_mask"],
}
return output
def _data_processing(self, dataset: datasets.arrow_dataset.Dataset, name: str):
logger.info(f"{name} data transformation...")
dataset = dataset.map(self._transform)
dataset.set_format(type="torch", columns=self.columns)
logger.info(f"{name} data transformation completed.")
return dataset
def prepare_data(self) -> None:
logger.info("Loading SQuAD dataset...")
self.dataset = datasets.load_dataset("squad")
self.train, self.validation = self.dataset["train"], self.dataset["validation"]
self.columns = [
"encoder_input_ids",
"encoder_attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.train = self._data_processing(self.train, "Training")
self.validation = self._data_processing(self.validation, "Validation")
def train_dataloader(
self,
) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
return DataLoader(
self.train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.validation,
batch_size=self.batch_size,
num_workers=self.num_workers,
collate_fn=self.collate_fn,
)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return self.val_dataloader()
|
StarcoderdataPython
|
3292046
|
"""
Every valid email consists of a local name and a domain name, separated by the '@' sign. Besides lowercase letters, the email may contain one or more '.' or '+'
find the unique email addresses
"""
from typing import List
class Solution:
def numUniqueEmails(self, emails: List[str]) -> int:
def validemail(email: str):
local_name = email.split("@")[0]
domain_name = email.split("@")[1]
res = ""
if local_name:
for c in local_name:
if c.isalpha():
res += c
if c == "+":
break
return res + "@" + domain_name
else:
return None
ans = set()
for email in emails:
ans.add(validemail(email))
# print(ans)
return len(ans)
emails = ["<EMAIL>","<EMAIL>+<EMAIL>","<EMAIL>+<EMAIL>"]
emails = ["<EMAIL>","<EMAIL>","<EMAIL>"]
s = Solution()
print(s.numUniqueEmails(emails))
|
StarcoderdataPython
|
111234
|
<filename>aiida/orm/implementation/django/calculation/job/__init__.py
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
import datetime
from django.db import transaction, IntegrityError
from django.db.models import Q
from aiida.common.utils import str_timedelta
from aiida.common.datastructures import sort_states, calc_states
from aiida.common.exceptions import ModificationNotAllowed, DbContentError
from aiida.backends.djsite.utils import get_automatic_user
from aiida.orm.group import Group
from aiida.orm.implementation.django.calculation import Calculation
from aiida.orm.implementation.general.calculation.job import (
AbstractJobCalculation)
from aiida.common.old_pluginloader import from_type_to_pluginclassname
from aiida.utils import timezone
class JobCalculation(AbstractJobCalculation, Calculation):
def _set_state(self, state):
"""
Set the state of the calculation.
Set it in the DbCalcState to have also the uniqueness check.
Moreover (except for the IMPORTED state) also store in the 'state'
attribute, useful to know it also after importing, and for faster
querying.
.. todo:: Add further checks to enforce that the states are set
in order?
:param state: a string with the state. This must be a valid string,
from ``aiida.common.datastructures.calc_states``.
:raise: ModificationNotAllowed if the given state was already set.
"""
from aiida.common.datastructures import sort_states
from aiida.backends.djsite.db.models import DbCalcState
if not self.is_stored:
raise ModificationNotAllowed("Cannot set the calculation state "
"before storing")
if state not in calc_states:
raise ValueError(
"'{}' is not a valid calculation status".format(state))
old_state = self.get_state()
if old_state:
state_sequence = [state, old_state]
# sort from new to old: if they are equal, then it is a valid
# advance in state (otherwise, we are going backwards...)
if sort_states(state_sequence) != state_sequence:
raise ModificationNotAllowed("Cannot change the state from {} "
"to {}".format(old_state, state))
try:
with transaction.atomic():
new_state = DbCalcState(dbnode=self.dbnode, state=state).save()
except IntegrityError:
raise ModificationNotAllowed(
"Calculation pk= {} already transited through "
"the state {}".format(self.pk, state))
# For non-imported states, also set in the attribute (so that, if we
# export, we can still see the original state the calculation had.
if state != calc_states.IMPORTED:
self._set_attr('state', state)
def get_state(self, from_attribute=False):
"""
Get the state of the calculation.
.. note:: this method returns the NOTFOUND state if no state
is found in the DB.
.. note:: the 'most recent' state is obtained using the logic in the
``aiida.common.datastructures.sort_states`` function.
.. todo:: Understand if the state returned when no state entry is found
in the DB is the best choice.
:param from_attribute: if set to True, read it from the attributes
(the attribute is also set with set_state, unless the state is set
to IMPORTED; in this way we can also see the state before storing).
:return: a string. If from_attribute is True and no attribute is found,
return None. If from_attribute is False and no entry is found in the
DB, return the "NOTFOUND" state.
"""
from aiida.backends.djsite.db.models import DbCalcState
if from_attribute:
return self.get_attr('state', None)
else:
if not self.is_stored:
return calc_states.NEW
else:
this_calc_states = DbCalcState.objects.filter(
dbnode=self).values_list('state', flat=True)
if not this_calc_states:
return None
else:
try:
most_recent_state = sort_states(this_calc_states)[0]
except ValueError as e:
raise DbContentError("Error in the content of the "
"DbCalcState table ({})".format(
e.message))
return most_recent_state
@classmethod
def _list_calculations_old(cls, states=None, past_days=None, group=None,
group_pk=None, all_users=False, pks=[],
relative_ctime=True):
"""
Return a string with a description of the AiiDA calculations.
.. todo:: does not support the query for the IMPORTED state (since it
checks the state in the Attributes, not in the DbCalcState table).
Decide which is the correct logic and implement the correct query.
:param states: a list of string with states. If set, print only the
calculations in the states "states", otherwise shows all.
Default = None.
:param past_days: If specified, show only calculations that were
created in the given number of past days.
:param group: If specified, show only calculations belonging to a
user-defined group with the given name.
Can use colons to separate the group name from the type,
as specified in :py:meth:`aiida.orm.group.Group.get_from_string`
method.
:param group_pk: If specified, show only calculations belonging to a
user-defined group with the given PK.
:param pks: if specified, must be a list of integers, and only
calculations within that list are shown. Otherwise, all
calculations are shown.
If specified, sets state to None and ignores the
value of the ``past_days`` option.")
:param relative_ctime: if true, prints the creation time relative from now.
(like 2days ago). Default = True
:param all_users: if True, list calculation belonging to all users.
Default = False
:return: a string with description of calculations.
"""
# I assume that calc_states are strings. If this changes in the future,
# update the filter below from dbattributes__tval to the correct field.
from aiida.backends.djsite.db.models import DbAuthInfo, DbAttribute
from aiida.daemon.timestamps import get_last_daemon_timestamp
if states:
for state in states:
if state not in calc_states:
return "Invalid state provided: {}.".format(state)
warnings_list = []
now = timezone.now()
if pks:
q_object = Q(pk__in=pks)
else:
q_object = Q()
if group is not None:
g_pk = Group.get_from_string(group).pk
q_object.add(Q(dbgroups__pk=g_pk), Q.AND)
if group_pk is not None:
q_object.add(Q(dbgroups__pk=group_pk), Q.AND)
if not all_users:
q_object.add(Q(user=get_automatic_user()), Q.AND)
if states is not None:
q_object.add(Q(dbattributes__key='state',
dbattributes__tval__in=states, ), Q.AND)
if past_days is not None:
now = timezone.now()
n_days_ago = now - datetime.timedelta(days=past_days)
q_object.add(Q(ctime__gte=n_days_ago), Q.AND)
calc_list_pk = list(
cls.query(q_object).distinct().values_list('pk', flat=True))
calc_list = cls.query(pk__in=calc_list_pk).order_by('ctime')
scheduler_states = dict(
DbAttribute.objects.filter(dbnode__pk__in=calc_list_pk,
key='scheduler_state').values_list(
'dbnode__pk', 'tval'))
# I do the query now, so that the list of pks gets cached
calc_list_data = list(
calc_list.filter(
# dbcomputer__dbauthinfo__aiidauser=F('user')
).distinct().order_by('ctime').values(
'pk', 'dbcomputer__name', 'ctime',
'type', 'dbcomputer__enabled',
'dbcomputer__pk',
'user__pk'))
list_comp_pk = [i['dbcomputer__pk'] for i in calc_list_data]
list_aiduser_pk = [i['user__pk']
for i in calc_list_data]
enabled_data = DbAuthInfo.objects.filter(
dbcomputer__pk__in=list_comp_pk, aiidauser__pk__in=list_aiduser_pk
).values_list('dbcomputer__pk', 'aiidauser__pk', 'enabled')
enabled_auth_dict = {(i[0], i[1]): i[2] for i in enabled_data}
states = {c.pk: c._get_state_string() for c in calc_list}
scheduler_lastcheck = dict(DbAttribute.objects.filter(
dbnode__in=calc_list,
key='scheduler_lastchecktime').values_list('dbnode__pk', 'dval'))
## Get the last daemon check
try:
last_daemon_check = get_last_daemon_timestamp('updater',
when='stop')
except ValueError:
last_check_string = ("# Last daemon state_updater check: "
"(Error while retrieving the information)")
else:
if last_daemon_check is None:
last_check_string = "# Last daemon state_updater check: (Never)"
else:
last_check_string = ("# Last daemon state_updater check: "
"{} ({})".format(
str_timedelta(now - last_daemon_check,
negative_to_zero=True),
timezone.localtime(last_daemon_check).strftime(
"at %H:%M:%S on %Y-%m-%d")))
disabled_ignorant_states = [
None, calc_states.FINISHED, calc_states.SUBMISSIONFAILED,
calc_states.RETRIEVALFAILED, calc_states.PARSINGFAILED,
calc_states.FAILED
]
if not calc_list:
return last_check_string
else:
# first save a matrix of results to be printed
res_str_list = [last_check_string]
str_matrix = []
title = ['# Pk', 'State', 'Creation',
'Sched. state', 'Computer', 'Type']
str_matrix.append(title)
len_title = [len(i) for i in title]
for calcdata in calc_list_data:
remote_state = "None"
calc_state = states[calcdata['pk']]
remote_computer = calcdata['dbcomputer__name']
try:
sched_state = scheduler_states.get(calcdata['pk'], None)
if sched_state is None:
remote_state = "(unknown)"
else:
remote_state = '{}'.format(sched_state)
if calc_state == calc_states.WITHSCHEDULER:
last_check = scheduler_lastcheck.get(calcdata['pk'],
None)
if last_check is not None:
when_string = " {}".format(
str_timedelta(now - last_check, short=True,
negative_to_zero=True))
verb_string = "was "
else:
when_string = ""
verb_string = ""
remote_state = "{}{}{}".format(verb_string,
sched_state,
when_string)
except ValueError:
raise
calc_module = \
from_type_to_pluginclassname(calcdata['type']).rsplit(".", 1)[0]
prefix = 'calculation.job.'
prefix_len = len(prefix)
if calc_module.startswith(prefix):
calc_module = calc_module[prefix_len:].strip()
if relative_ctime:
calc_ctime = str_timedelta(now - calcdata['ctime'],
negative_to_zero=True,
max_num_fields=1)
else:
calc_ctime = " ".join([timezone.localtime(
calcdata['ctime']).isoformat().split('T')[0],
timezone.localtime(calcdata[
'ctime']).isoformat().split(
'T')[1].split('.')[
0].rsplit(":", 1)[0]])
the_state = states[calcdata['pk']]
# decide if it is needed to print enabled/disabled information
# By default, if the computer is not configured for the
# given user, assume it is user_enabled
user_enabled = enabled_auth_dict.get(
(calcdata['dbcomputer__pk'],
calcdata['user__pk']), True)
global_enabled = calcdata["dbcomputer__enabled"]
enabled = "" if (user_enabled and global_enabled or
the_state in disabled_ignorant_states) else " [Disabled]"
str_matrix.append([calcdata['pk'],
the_state,
calc_ctime,
remote_state,
remote_computer + "{}".format(enabled),
calc_module
])
# prepare a formatted text of minimal row length (to fit in terminals!)
rows = []
for j in range(len(str_matrix[0])):
rows.append([len(str(i[j])) for i in str_matrix])
line_lengths = [str(max(max(rows[i]), len_title[i])) for i in
range(len(rows))]
fmt_string = "{:<" + "}|{:<".join(line_lengths) + "}"
for row in str_matrix:
res_str_list.append(fmt_string.format(*[str(i) for i in row]))
res_str_list += ["# {}".format(_) for _ in warnings_list]
return "\n".join(res_str_list)
|
StarcoderdataPython
|
1896290
|
<gh_stars>1-10
a = float(input('Digite o valor da 1° reta: '))
b = float(input('Digite o 2º valor: '))
c = float(input('Digite o 3° valor: '))
# | b - c | < a < b + c
# | a - c | < b < a + c
# | a - b | < c < a + b
if b - c < a < b + c:
if a - c < b < a + c:
if a - b < c < a + b:
print(' As retas {} , {} e {} formam um TRIANGULO'. format(a, b, c))
else:
print('As retas não formam um TRIANGULO')
else:
print('As retas não formam um TRIANGULO')
else:
print('As retas não formam um TRIANGULO')
# FORMA MAIS SIMPLES
print('-='*100)
if a < b + c and b < c + a and c < b + a:
print(" AS RETAS FOMAM UM TRIANGULO")
else:
print('NAO È UM TRIANGULO')
|
StarcoderdataPython
|
8045464
|
<gh_stars>1000+
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains common helpers for working with Android manifests."""
import hashlib
import os
import re
import shlex
import sys
import xml.dom.minidom as minidom
from util import build_utils
from xml.etree import ElementTree
ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android'
TOOLS_NAMESPACE = 'http://schemas.android.com/tools'
DIST_NAMESPACE = 'http://schemas.android.com/apk/distribution'
EMPTY_ANDROID_MANIFEST_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'AndroidManifest.xml'))
# When normalizing for expectation matching, wrap these tags when they are long
# or else they become very hard to read.
_WRAP_CANDIDATES = (
'<manifest',
'<application',
'<activity',
'<provider',
'<receiver',
'<service',
)
# Don't wrap lines shorter than this.
_WRAP_LINE_LENGTH = 100
_xml_namespace_initialized = False
def _RegisterElementTreeNamespaces():
global _xml_namespace_initialized
if _xml_namespace_initialized:
return
_xml_namespace_initialized = True
ElementTree.register_namespace('android', ANDROID_NAMESPACE)
ElementTree.register_namespace('tools', TOOLS_NAMESPACE)
ElementTree.register_namespace('dist', DIST_NAMESPACE)
def ParseManifest(path):
"""Parses an AndroidManifest.xml using ElementTree.
Registers required namespaces, creates application node if missing, adds any
missing namespaces for 'android', 'tools' and 'dist'.
Returns tuple of:
doc: Root xml document.
manifest_node: the <manifest> node.
app_node: the <application> node.
"""
_RegisterElementTreeNamespaces()
doc = ElementTree.parse(path)
# ElementTree.find does not work if the required tag is the root.
if doc.getroot().tag == 'manifest':
manifest_node = doc.getroot()
else:
manifest_node = doc.find('manifest')
app_node = doc.find('application')
if app_node is None:
app_node = ElementTree.SubElement(manifest_node, 'application')
return doc, manifest_node, app_node
def SaveManifest(doc, path):
with build_utils.AtomicOutput(path) as f:
f.write(ElementTree.tostring(doc.getroot(), encoding='UTF-8'))
def GetPackage(manifest_node):
return manifest_node.get('package')
def AssertUsesSdk(manifest_node,
min_sdk_version=None,
target_sdk_version=None,
max_sdk_version=None,
fail_if_not_exist=False):
"""Asserts values of attributes of <uses-sdk> element.
Unless |fail_if_not_exist| is true, will only assert if both the passed value
is not None and the value of attribute exist. If |fail_if_not_exist| is true
will fail if passed value is not None but attribute does not exist.
"""
uses_sdk_node = manifest_node.find('./uses-sdk')
if uses_sdk_node is None:
return
for prefix, sdk_version in (('min', min_sdk_version), ('target',
target_sdk_version),
('max', max_sdk_version)):
value = uses_sdk_node.get('{%s}%sSdkVersion' % (ANDROID_NAMESPACE, prefix))
if fail_if_not_exist and not value and sdk_version:
assert False, (
'%sSdkVersion in Android manifest does not exist but we expect %s' %
(prefix, sdk_version))
if not value or not sdk_version:
continue
assert value == sdk_version, (
'%sSdkVersion in Android manifest is %s but we expect %s' %
(prefix, value, sdk_version))
def AssertPackage(manifest_node, package):
"""Asserts that manifest package has desired value.
Will only assert if both |package| is not None and the package is set in the
manifest.
"""
package_value = GetPackage(manifest_node)
if package_value is None or package is None:
return
assert package_value == package, (
'Package in Android manifest is %s but we expect %s' % (package_value,
package))
def _SortAndStripElementTree(root):
# Sort alphabetically with two exceptions:
# 1) Put <application> node last (since it's giant).
# 2) Put android:name before other attributes.
def element_sort_key(node):
if node.tag == 'application':
return 'z'
ret = ElementTree.tostring(node)
# ElementTree.tostring inserts namespace attributes for any that are needed
# for the node or any of its descendants. Remove them so as to prevent a
# change to a child that adds/removes a namespace usage from changing sort
# order.
return re.sub(r' xmlns:.*?".*?"', '', ret.decode('utf8'))
name_attr = '{%s}name' % ANDROID_NAMESPACE
def attribute_sort_key(tup):
return ('', '') if tup[0] == name_attr else tup
def helper(node):
for child in node:
if child.text and child.text.isspace():
child.text = None
helper(child)
# Sort attributes (requires Python 3.8+).
node.attrib = dict(sorted(node.attrib.items(), key=attribute_sort_key))
# Sort nodes
node[:] = sorted(node, key=element_sort_key)
helper(root)
def _SplitElement(line):
"""Parses a one-line xml node into ('<tag', ['a="b"', ...]], '/>')."""
# Shlex splits nicely, but removes quotes. Need to put them back.
def restore_quotes(value):
return value.replace('=', '="', 1) + '"'
# Simplify restore_quotes by separating />.
assert line.endswith('>'), line
end_tag = '>'
if line.endswith('/>'):
end_tag = '/>'
line = line[:-len(end_tag)]
# Use shlex to avoid having to re-encode ", etc.
parts = shlex.split(line)
start_tag = parts[0]
attrs = parts[1:]
return start_tag, [restore_quotes(x) for x in attrs], end_tag
def _CreateNodeHash(lines):
"""Computes a hash (md5) for the first XML node found in |lines|.
Args:
lines: List of strings containing pretty-printed XML.
Returns:
Positive 32-bit integer hash of the node (including children).
"""
target_indent = lines[0].find('<')
tag_closed = False
for i, l in enumerate(lines[1:]):
cur_indent = l.find('<')
if cur_indent != -1 and cur_indent <= target_indent:
tag_lines = lines[:i + 1]
break
if not tag_closed and 'android:name="' in l:
# To reduce noise of node tags changing, use android:name as the
# basis the hash since they usually unique.
tag_lines = [l]
break
tag_closed = tag_closed or '>' in l
else:
assert False, 'Did not find end of node:\n' + '\n'.join(lines)
# Insecure and truncated hash as it only needs to be unique vs. its neighbors.
return hashlib.md5(('\n'.join(tag_lines)).encode('utf8')).hexdigest()[:8]
def _IsSelfClosing(lines):
"""Given pretty-printed xml, returns whether first node is self-closing."""
for l in lines:
idx = l.find('>')
if idx != -1:
return l[idx - 1] == '/'
raise RuntimeError('Did not find end of tag:\n%s' % '\n'.join(lines))
def _AddDiffTags(lines):
# When multiple identical tags appear sequentially, XML diffs can look like:
# + </tag>
# + <tag>
# rather than:
# + <tag>
# + </tag>
# To reduce confusion, add hashes to tags.
# This also ensures changed tags show up with outer <tag> elements rather than
# showing only changed attributes.
hash_stack = []
for i, l in enumerate(lines):
stripped = l.lstrip()
# Ignore non-indented tags and lines that are not the start/end of a node.
if l[0] != ' ' or stripped[0] != '<':
continue
# Ignore self-closing nodes that fit on one line.
if l[-2:] == '/>':
continue
# Ignore <application> since diff tag changes with basically any change.
if stripped.lstrip('</').startswith('application'):
continue
# Check for the closing tag (</foo>).
if stripped[1] != '/':
cur_hash = _CreateNodeHash(lines[i:])
if not _IsSelfClosing(lines[i:]):
hash_stack.append(cur_hash)
else:
cur_hash = hash_stack.pop()
lines[i] += ' # DIFF-ANCHOR: {}'.format(cur_hash)
assert not hash_stack, 'hash_stack was not empty:\n' + '\n'.join(hash_stack)
def NormalizeManifest(manifest_contents):
_RegisterElementTreeNamespaces()
# This also strips comments and sorts node attributes alphabetically.
root = ElementTree.fromstring(manifest_contents)
package = GetPackage(root)
app_node = root.find('application')
if app_node is not None:
# android:debuggable is added when !is_official_build. Strip it out to avoid
# expectation diffs caused by not adding is_official_build. Play store
# blocks uploading apps with it set, so there's no risk of it slipping in.
debuggable_name = '{%s}debuggable' % ANDROID_NAMESPACE
if debuggable_name in app_node.attrib:
del app_node.attrib[debuggable_name]
# Trichrome's static library version number is updated daily. To avoid
# frequent manifest check failures, we remove the exact version number
# during normalization.
for node in app_node:
if (node.tag in ['uses-static-library', 'static-library']
and '{%s}version' % ANDROID_NAMESPACE in node.keys()
and '{%s}name' % ANDROID_NAMESPACE in node.keys()):
node.set('{%s}version' % ANDROID_NAMESPACE, '$VERSION_NUMBER')
# We also remove the exact package name (except the one at the root level)
# to avoid noise during manifest comparison.
def blur_package_name(node):
for key in node.keys():
node.set(key, node.get(key).replace(package, '$PACKAGE'))
for child in node:
blur_package_name(child)
# We only blur the package names of non-root nodes because they generate a lot
# of diffs when doing manifest checks for upstream targets. We still want to
# have 1 piece of package name not blurred just in case the package name is
# mistakenly changed.
for child in root:
blur_package_name(child)
_SortAndStripElementTree(root)
# Fix up whitespace/indentation.
dom = minidom.parseString(ElementTree.tostring(root))
out_lines = []
for l in dom.toprettyxml(indent=' ').splitlines():
if not l or l.isspace():
continue
if len(l) > _WRAP_LINE_LENGTH and any(x in l for x in _WRAP_CANDIDATES):
indent = ' ' * l.find('<')
start_tag, attrs, end_tag = _SplitElement(l)
out_lines.append('{}{}'.format(indent, start_tag))
for attribute in attrs:
out_lines.append('{} {}'.format(indent, attribute))
out_lines[-1] += '>'
# Heuristic: Do not allow multi-line tags to be self-closing since these
# can generally be allowed to have nested elements. When diffing, it adds
# noise if the base file is self-closing and the non-base file is not
# self-closing.
if end_tag == '/>':
out_lines.append('{}{}>'.format(indent, start_tag.replace('<', '</')))
else:
out_lines.append(l)
# Make output more diff-friendly.
_AddDiffTags(out_lines)
return '\n'.join(out_lines) + '\n'
|
StarcoderdataPython
|
3287550
|
<filename>ib2/settings.py<gh_stars>1-10
"""
Django settings for ib2 project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from vitalis.max30100 import MAX30100
from vitalis import max30100
# A button is a good approximation for what we need,
# a digital active-low trigger
from gpiozero import Button
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'*',
]
mx30 = None
mx30_error = ""
try:
mx30 = MAX30100(max_buffer_len=1000)
mx30.enable_spo2()
# Set up a trigger to fire when the FIFO buffer (on the MAX30100) fills up.
# You could also use INTERRUPT_HR to get a trigger on every measurement.
mx30.enable_interrupt(max30100.INTERRUPT_HR)
interrupt = Button(16) # Pick a pin
interrupt.when_activated = mx30.read_sensor # Connect the interrupt
print("MAX30100 configurado")
except Exception as e:
print(e)
mx30_error = str(e)
# mx30 = None
# Application definition
INSTALLED_APPS = [
'vitalis.apps.VitalisConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ib2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ib2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
StarcoderdataPython
|
6529632
|
n, k = map(int,input().split())
num = list(map(int,input().split()))
c = 0
for i in range(0,n):
for j in range(0,n):
if i!=j:
if (num[i]+num[j])%k == 0:
c+=1
print(c//2)
|
StarcoderdataPython
|
163045
|
<reponame>ShunranSasaki/furport-back
# Generated by Django 3.0.8 on 2020-07-08 05:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("furport", "0002_auto_20200707_1931"),
]
operations = [
migrations.CreateModel(
name="TagGroup",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"name",
models.CharField(
default="", max_length=255, verbose_name="タググループ名"
),
),
(
"color",
models.CharField(default="", max_length=255, verbose_name="タグ色"),
),
("priority", models.IntegerField(default=0, verbose_name="優先度")),
(
"description",
models.TextField(blank=True, default="", verbose_name="詳細"),
),
],
options={"ordering": ("name",)},
),
migrations.RemoveField(model_name="event", name="image_url"),
migrations.AddField(
model_name="event",
name="stared_by",
field=models.ManyToManyField(
blank=True, related_name="stared_by", to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="event",
name="twitter_id",
field=models.CharField(
blank=True, default="", max_length=255, verbose_name="TwitterId"
),
),
migrations.AlterField(
model_name="event",
name="created_by",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="created_by",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="tag",
name="group",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="furport.TagGroup"
),
),
]
|
StarcoderdataPython
|
6415414
|
<reponame>xpsurgery/customer-base<gh_stars>1-10
class CustomerBase:
def __init__(self):
self.customers = []
def add(self, customer):
self.customers.append(customer)
def findByLastName(self, lastName):
result = []
for customer in self.customers:
if customer.lastName == lastName:
result.append(customer)
return result
def findByFirstAndLastName(self, firstName, lastName):
result = []
for customer in self.customers:
if customer.firstName == firstName and customer.lastName == lastName:
result.append(customer)
return result
def findByCreditGreaterThan(self, credit):
result = []
for customer in self.customers:
if customer.credit > credit:
result.append(customer)
return result
|
StarcoderdataPython
|
1710057
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name,line-too-long
bash_rpm_info = """Name : bash
Version : 4.2.46
Release : 21.el7_3
Architecture: x86_64
Install Date: Thu 11 Jan 2018 12:32:51 PM EET
Group : System Environment/Shells
Size : 3663714
License : GPLv3+
Signature : RSA/SHA256, Wed 07 Dec 2016 02:11:28 AM EET, Key ID <KEY>
Source RPM : bash-4.2.46-21.el7_3.src.rpm
Build Date : Wed 07 Dec 2016 01:21:54 AM EET
Build Host : c1bm.rdu2.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
URL : http://www.gnu.org/software/bash
Summary : The GNU Bourne Again shell
Description :
The GNU Bourne Again shell (Bash) is a shell or command language
interpreter that is compatible with the Bourne shell (sh). Bash
incorporates useful features from the Korn shell (ksh) and the C shell
(csh). Most sh scripts can be run by bash without modification.
"""
basesystem_rpm_info = """Name : basesystem
Version : 10.0
Release : 7.el7.centos
Architecture: noarch
Install Date: Fri 01 Apr 2016 11:47:25 AM EEST
Group : System Environment/Base
Size : 0
License : Public Domain
Signature : RSA/SHA256, Fri 04 Jul 2014 03:46:57 AM EEST, Key ID <KEY>
Source RPM : basesystem-10.0-7.el7.centos.src.rpm
Build Date : Fri 27 Jun 2014 01:37:10 PM EEST
Build Host : worker1.bsys.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
Summary : The skeleton package which defines a simple CentOS Linux system
Description :
Basesystem defines the components of a basic CentOS Linux
system (for example, the package installation order to use during
bootstrapping). Basesystem should be in every installation of a system,
and it should never be removed.
"""
centos_logos_rpm_info = u"""Name : centos-logos
Version : 70.0.6
Release : 3.el7.centos
Architecture: noarch
License : Copyright © 2014 The CentOS Project. All rights reserved.
"""
conntrack_tools_rpm_info = """Name : conntrack-tools
Version : 1.4.4
Release : 3.el7_3
Architecture: x86_64
Install Date: Thu 11 Jan 2018 12:39:20 PM EET
Group : System Environment/Base
Size : 562826
License : GPLv2
Signature : RSA/SHA256, Thu 29 Jun 2017 03:36:05 PM EEST, Key ID <KEY>
Source RPM : conntrack-tools-1.4.4-3.el7_3.src.rpm
Build Date : Thu 29 Jun 2017 03:18:42 AM EEST
Build Host : c1bm.rdu2.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
URL : http://netfilter.org
Summary : Manipulate netfilter connection tracking table and run High Availability
Description :
With conntrack-tools you can setup a High Availability cluster and
synchronize conntrack state between multiple firewalls.
The conntrack-tools package contains two programs:
- conntrack: the command line interface to interact with the connection
tracking system.
- conntrackd: the connection tracking userspace daemon that can be used to
deploy highly available GNU/Linux firewalls and collect
statistics of the firewall use.
conntrack is used to search, list, inspect and maintain the netfilter
connection tracking subsystem of the Linux kernel.
Using conntrack, you can dump a list of all (or a filtered selection of)
currently tracked connections, delete connections from the state table,
and even add new ones.
In addition, you can also monitor connection tracking events, e.g.
show an event message (one line) per newly established connection.
"""
cpp_rpm_info = """Name : cpp
Version : 4.8.5
Release : 11.el7
Architecture: x86_64
Install Date: Thu 11 Jan 2018 12:37:55 PM EET
Group : Development/Languages
Size : 15632501
License : GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD
Signature : RSA/SHA256, Sun 20 Nov 2016 07:27:00 PM EET, Key ID <KEY>
Source RPM : gcc-4.8.5-11.el7.src.rpm
Build Date : Fri 04 Nov 2016 06:01:22 PM EET
Build Host : worker1.bsys.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
URL : http://gcc.gnu.org
Summary : The C Preprocessor
Description :
Cpp is the GNU C-Compatible Compiler Preprocessor.
Cpp is a macro processor which is used automatically
by the C compiler to transform your program before actual
compilation. It is called a macro processor because it allows
you to define macros, abbreviations for longer
constructs.
The C preprocessor provides four separate functionalities: the
inclusion of header files (files of declarations that can be
substituted into your program); macro expansion (you can define macros,
and the C preprocessor will replace the macros with their definitions
throughout the program); conditional compilation (using special
preprocessing directives, you can include or exclude parts of the
program according to various conditions); and line control (if you use
a program to combine or rearrange source files into an intermediate
file which is then compiled, you can use line control to inform the
compiler about where each source line originated).
You should install this package if you are a C programmer and you use
macros.
""" # noqa, PEP-8 disabled because of example output has trailing spaces
dejavu_fonts_common_rpm_info = """Name : dejavu-fonts-common
Version : 2.33
Release : 6.el7
Architecture: noarch
Install Date: Wed Feb 7 13:49:27 2018
Group : User Interface/X
Size : 130455
License : Bitstream Vera and Public Domain
Signature : RSA/SHA256, Fri Jul 4 01:06:50 2014, Key ID <KEY>
Source RPM : dejavu-fonts-2.33-6.el7.src.rpm
Build Date : Mon Jun 9 21:34:30 2014
Build Host : worker1.bsys.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
URL : http://dejavu-fonts.org/
Summary : Common files for the Dejavu font set
Description :
The DejaVu font set is based on the “Bitstream Vera” fonts, release 1.10. Its
purpose is to provide a wider range of characters, while maintaining the
original style, using an open collaborative development process.
This package consists of files used by other DejaVu packages.
"""
usbredir_rpm_info = """Name : usbredir
Version : 0.7.1
Release : 1.el7
Architecture: x86_64
Install Date: Wed Feb 7 13:49:24 2018
Group : System Environment/Libraries
Size : 108319
License : LGPLv2+
Signature : RSA/SHA256, Sun Nov 20 20:56:49 2016, Key ID <KEY>
Source RPM : usbredir-0.7.1-1.el7.src.rpm
Build Date : Sat Nov 5 18:33:15 2016
Build Host : worker1.bsys.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
URL : http://spice-space.org/page/UsbRedir
Summary : USB network redirection protocol libraries
Description :
The usbredir libraries allow USB devices to be used on remote and/or virtual
hosts over TCP. The following libraries are provided:
usbredirparser:
A library containing the parser for the usbredir protocol
usbredirhost:
A library implementing the USB host side of a usbredir connection.
All that an application wishing to implement a USB host needs to do is:
* Provide a libusb device handle for the device
* Provide write and read callbacks for the actual transport of usbredir data
* Monitor for usbredir and libusb read/write events and call their handlers
"""
perl_compress_rpm_info = """Name : perl-Compress-Raw-Zlib
Epoch : 1
Version : 2.061
Release : 4.el7
Architecture: x86_64
Install Date: Sat Jan 26 20:05:50 2019
Group : Development/Libraries
Size : 139803
License : GPL+ or Artistic
Signature : RSA/SHA256, Fri Jul 4 04:15:33 2014, Key ID <KEY>
Source RPM : perl-Compress-Raw-Zlib-2.061-4.el7.src.rpm
Build Date : Tue Jun 10 01:12:08 2014
Build Host : worker1.bsys.centos.org
Relocations : (not relocatable)
Packager : CentOS BuildSystem <http://bugs.centos.org>
Vendor : CentOS
URL : http://search.cpan.org/dist/Compress-Raw-Zlib/
Summary : Low-level interface to the zlib compression library
Description :
The Compress::Raw::Zlib module provides a Perl interface to the zlib
compression library, which is used by IO::Compress::Zlib.
Obsoletes :
""" # noqa, PEP-8 disabled because of example output has trailing spaces
|
StarcoderdataPython
|
11272723
|
#!/usr/bin/python
# Find the minimum-area bounding box of a set of 2D points
#
# The input is a 2D convex hull, in an Nx2 numpy array of x-y co-ordinates.
# The first and last points points must be the same, making a closed polygon.
# This program finds the rotation angles of each edge of the convex polygon,
# then tests the area of a bounding box aligned with the unique angles in
# 90 degrees of the 1st Quadrant.
# Returns the
#
# Tested with Python 2.6.5 on Ubuntu 10.04.4
# Results verified using Matlab
# Copyright (c) 2013, <NAME>, University of Queensland
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from numpy import *
import sys # maxint
def minBoundingRect(hull_points_2d):
#print("Input convex hull points: ")
#print(hull_points_2d)
# Compute edges (x2-x1,y2-y1)
edges = zeros( (len(hull_points_2d)-1,2) ) # empty 2 column array
for i in range( len(edges) ):
edge_x = hull_points_2d[i+1,0] - hull_points_2d[i,0]
edge_y = hull_points_2d[i+1,1] - hull_points_2d[i,1]
edges[i] = [edge_x,edge_y]
#print("Edges: \n", edges)
# Calculate edge angles atan2(y/x)
edge_angles = zeros( (len(edges)) ) # empty 1 column array
for i in range( len(edge_angles) ):
edge_angles[i] = math.atan2( edges[i,1], edges[i,0] )
#print("Edge angles: \n", edge_angles)
# Check for angles in 1st quadrant
for i in range( len(edge_angles) ):
edge_angles[i] = abs( edge_angles[i] % (math.pi/2) ) # want strictly positive answers
#print("Edge angles in 1st Quadrant: \n", edge_angles)
# Remove duplicate angles
edge_angles = unique(edge_angles)
#print("Unique edge angles: \n", edge_angles)
# Test each angle to find bounding box with smallest area
min_bbox = (0, sys.maxsize, 0, 0, 0, 0, 0, 0) # rot_angle, area, width, height, min_x, max_x, min_y, max_y
#print("Testing", len(edge_angles), "possible rotations for bounding box... \n")
for i in range( len(edge_angles) ):
# Create rotation matrix to shift points to baseline
# R = [ cos(theta) , cos(theta-PI/2)
# cos(theta+PI/2) , cos(theta) ]
R = array([ [ math.cos(edge_angles[i]), math.cos(edge_angles[i]-(math.pi/2)) ], [ math.cos(edge_angles[i]+(math.pi/2)), math.cos(edge_angles[i]) ] ])
#print("Rotation matrix for ", edge_angles[i], " is \n", R)
# Apply this rotation to convex hull points
rot_points = dot(R, transpose(hull_points_2d) ) # 2x2 * 2xn
#print("Rotated hull points are \n", rot_points)
# Find min/max x,y points
min_x = nanmin(rot_points[0], axis=0)
max_x = nanmax(rot_points[0], axis=0)
min_y = nanmin(rot_points[1], axis=0)
max_y = nanmax(rot_points[1], axis=0)
#print("Min x:", min_x, " Max x: ", max_x, " Min y:", min_y, " Max y: ", max_y)
# Calculate height/width/area of this bounding rectangle
width = max_x - min_x
height = max_y - min_y
area = width*height
#print("Potential bounding box ", i, ": width: ", width, " height: ", height, " area: ", area )
# Store the smallest rect found first (a simple convex hull might have 2 answers with same area)
if (area < min_bbox[1]):
min_bbox = ( edge_angles[i], area, width, height, min_x, max_x, min_y, max_y )
# Bypass, return the last found rect
#min_bbox = ( edge_angles[i], area, width, height, min_x, max_x, min_y, max_y )
# Re-create rotation matrix for smallest rect
angle = min_bbox[0]
R = array([ [ math.cos(angle), math.cos(angle-(math.pi/2)) ], [ math.cos(angle+(math.pi/2)), math.cos(angle) ] ])
#print("Projection matrix: \n", R)
# Project convex hull points onto rotated frame
proj_points = dot(R, transpose(hull_points_2d) ) # 2x2 * 2xn
#print("Project hull points are \n", proj_points)
# min/max x,y points are against baseline
min_x = min_bbox[4]
max_x = min_bbox[5]
min_y = min_bbox[6]
max_y = min_bbox[7]
#print("Min x:", min_x, " Max x: ", max_x, " Min y:", min_y, " Max y: ", max_y)
# Calculate center point and project onto rotated frame
center_x = (min_x + max_x)/2
center_y = (min_y + max_y)/2
center_point = dot( [ center_x, center_y ], R )
#print("Bounding box center point: \n", center_point)
# Calculate corner points and project onto rotated frame
corner_points = zeros( (5,2) ) # empty 2 column array
corner_points[0] = dot( [ max_x, min_y ], R )
corner_points[1] = dot( [ min_x, min_y ], R )
corner_points[2] = dot( [ min_x, max_y ], R )
corner_points[3] = dot( [ max_x, max_y ], R )
corner_points[4] = dot( [ max_x, min_y ], R )
#print("Bounding box corner points: \n", corner_points)
#print("Angle of rotation: ", angle, "rad ", angle * (180/math.pi), "deg")
return (angle, min_bbox[1], min_bbox[2], min_bbox[3], center_point, corner_points) # rot_angle, area, width, height, center_point, corner_points
|
StarcoderdataPython
|
315162
|
"""
希尔排序
2020-12-06: 11:02.30;
"""
from sort import validatetool
def sort(data):
l = len(data)
gap = l // 2
while gap > 0:
for i in range(gap, l):
val = data[i]
j = i - gap
while j >= 0 and val < data[j]:
data[j + gap] = data[j]
j -= gap
data[j + gap] = val
gap //= 2
return data
if __name__ == '__main__':
validatetool.validate(sort)
|
StarcoderdataPython
|
8123819
|
from pyramid.config import Configurator
from pyramid.response import Response
from time import sleep
from waitress import serve
def see_home(request):
return Response('''\
<html>
<head>
</head>
<body>
<div id="ping"></div>
<div id="x"></div>
<script>
const eventSource = new EventSource('/echoes')
eventSource.onmessage = function(event) {
console.log(event)
const { data } = event
console.log(data)
switch(data) {
case '':
// ping
e = document.getElementById('ping')
e.innerHTML += 'ping '
break
case '*':
// refresh
location.reload()
break
default:
// update
d = JSON.parse(data)
e = document.getElementById(d['#'])
e.innerHTML = JSON.stringify(d['?'])
}
}
</script>
</body>
</html>''')
def send_echoes(request):
response = Response(headerlist=[
('Content-Type', 'text/event-stream'),
('Cache-Control', 'no-cache'),
])
response.app_iter = yield_echoes()
return response
def yield_echoes():
print('ping')
yield 'data:\n\n'.encode()
sleep(3)
print('update')
yield 'data: {"#": "x", "?": {"a": 1}}\n\n'.encode()
sleep(3)
print('refresh')
yield 'data: *\n\n'.encode()
with Configurator() as config:
config.add_route('home', '/')
config.add_route('echoes', '/echoes')
config.add_view(see_home, route_name='home')
config.add_view(send_echoes, route_name='echoes')
app = config.make_wsgi_app()
serve(app, port=8000)
|
StarcoderdataPython
|
3564636
|
<reponame>br-paypaldev/Donate<gh_stars>0
import re
from django.conf import settings
from django.conf.urls.defaults import patterns, url, include
from django.core.exceptions import ImproperlyConfigured
urlpatterns = []
# only serve non-fqdn URLs
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^(?P<path>.*)$', 'django.contrib.staticfiles.views.serve'),
)
def staticfiles_urlpatterns(prefix=None):
"""
Helper function to return a URL pattern for serving static files.
"""
if not settings.DEBUG:
return []
if prefix is None:
prefix = settings.STATICFILES_URL
if not prefix:
raise ImproperlyConfigured(
"The prefix for the 'staticfiles_urlpatterns' helper is empty. "
"Make sure the STATICFILES_URL setting is set correctly.")
if '://' in prefix:
raise ImproperlyConfigured(
"The STATICFILES_URL setting is a full URL, not a path and "
"can't be used with the 'staticfiles_urlpatterns' helper.")
if prefix.startswith("/"):
prefix = prefix[1:]
return patterns('',
url(r'^%s' % re.escape(prefix), include(urlpatterns)),)
|
StarcoderdataPython
|
3520569
|
from datetime import datetime
from django import template
from handypackages.datetime_conv import fmt
register = template.Library()
@register.filter(name="persian_datetime")
def datetime_conv(date_time, string_format="%y/%m/%d %h:%M:%s"):
"""
Convert datetime to persian datetime
example(datetime=datetime.datetime(2019, 5, 28, 1, 10, 33)):
{% load date_time_conv %}
{{ datetime|datetime_conv:"%y/%m/%d %h:%M:%s" }}
output => 1398/3/7 1:10:33
"""
if not isinstance(date_time, datetime):
raise template.TemplateSyntaxError(
"datetime_conv first argument must be datetime instance.",
)
return fmt(date_time, string_format)
|
StarcoderdataPython
|
4938124
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from typing import Any, Callable, Dict, Mapping, Optional, Type, Union
import torch
import torch.nn as nn
from flash.audio.speech_recognition.backbone import SPEECH_RECOGNITION_BACKBONES
from flash.audio.speech_recognition.collate import DataCollatorCTCWithPadding
from flash.audio.speech_recognition.data import SpeechRecognitionBackboneState
from flash.core.data.process import Serializer
from flash.core.data.states import CollateFn
from flash.core.model import Task
from flash.core.registry import FlashRegistry
from flash.core.utilities.imports import _AUDIO_AVAILABLE
if _AUDIO_AVAILABLE:
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
class SpeechRecognition(Task):
backbones: FlashRegistry = SPEECH_RECOGNITION_BACKBONES
required_extras = "audio"
def __init__(
self,
backbone: str = "facebook/wav2vec2-base-960h",
loss_fn: Optional[Callable] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate: float = 1e-5,
serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,
):
os.environ["TOKENIZERS_PARALLELISM"] = "TRUE"
# disable HF thousand warnings
warnings.simplefilter("ignore")
# set os environ variable for multiprocesses
os.environ["PYTHONWARNINGS"] = "ignore"
model = (
self.backbones.get(backbone)() if backbone in self.backbones else Wav2Vec2ForCTC.from_pretrained(backbone)
)
super().__init__(
model=model,
loss_fn=loss_fn,
optimizer=optimizer,
learning_rate=learning_rate,
serializer=serializer,
)
self.save_hyperparameters()
self.set_state(SpeechRecognitionBackboneState(backbone))
self.set_state(CollateFn(DataCollatorCTCWithPadding(Wav2Vec2Processor.from_pretrained(backbone))))
def forward(self, batch: Dict[str, torch.Tensor]):
return self.model(batch["input_values"])
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
return self(batch)
def step(self, batch: Any, batch_idx: int, metrics: nn.ModuleDict) -> Any:
out = self.model(batch["input_values"], labels=batch["labels"])
out["logs"] = {"loss": out.loss}
return out
|
StarcoderdataPython
|
6496531
|
<reponame>whitfin/spack<filename>var/spack/repos/builtin/packages/muster/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Muster(CMakePackage):
"""The Muster library provides implementations of sequential and
parallel K-Medoids clustering algorithms. It is intended as a
general framework for parallel cluster analysis, particularly
for performance data analysis on systems with very large
numbers of processes.
"""
homepage = "https://github.com/llnl/muster"
url = "https://github.com/llnl/muster/archive/v1.0.tar.gz"
version('1.0.1', 'd709787db7e080447afb6571ac17723c')
version('1.0', '2<PASSWORD>')
depends_on('boost')
depends_on('mpi')
depends_on('[email protected]:', type='build')
|
StarcoderdataPython
|
381027
|
from typing import List
class Solution:
def peakIndexInMountainArray(self, arr: List[int]) -> int:
def bin_search(low, hi):
mid = low + int((hi - low)/2)
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]:
return mid
elif arr[mid] > arr[mid-1]:
low = mid
return bin_search(low, hi)
elif arr[mid] > arr[mid+1]:
hi = mid
return bin_search(low, hi)
return bin_search(0, len(arr))
def peakIndexInMountainArray2(self, arr: List[int]) -> int:
for i in range(1,len(arr) - 1):
if arr[i] > arr[i-1] and arr[i] > arr[i+1]:
return i
arr = [0,1,0]
arr = [0,2,1,0]
arr = [3,4,5,1]
arr = [24,69,100,99,79,78,67,36,26,19]
arr = [24,69,81,100,99,79,78,67,36,26,19]
s = Solution()
print(s.peakIndexInMountainArray(arr))
|
StarcoderdataPython
|
8100443
|
# Generated by Django 2.2.1 on 2019-05-21 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('professors', '0007_auto_20190522_0110'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='communication',
field=models.FloatField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=0),
),
migrations.AlterField(
model_name='comment',
name='marking',
field=models.FloatField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=0),
),
migrations.AlterField(
model_name='comment',
name='objectivity',
field=models.FloatField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=0),
),
migrations.AlterField(
model_name='comment',
name='quality',
field=models.FloatField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], default=0),
),
]
|
StarcoderdataPython
|
3351033
|
<filename>tests/local/test_argument.py
import sys
from unittest.mock import patch
import pytest
from mnamer.argument import ArgLoader
from mnamer.setting_spec import SettingSpec
from mnamer.types import SettingType
pytestmark = pytest.mark.local
@pytest.mark.parametrize(
"settings_type",
(SettingType.DIRECTIVE, SettingType.PARAMETER, SettingType.POSITIONAL),
ids=("directive", "parameter", "positional"),
)
def test_arg_loader__add_spec(settings_type: SettingType):
arg_loader = ArgLoader()
spec = SettingSpec(group=settings_type, flags=["-f"], help="foo")
actions = getattr(arg_loader, f"_{settings_type.value}_group")
assert len(actions._group_actions) == 0
arg_loader += spec
assert len(actions._group_actions) == 1
def test_arg_loader__add_spec_other():
arg_loader = ArgLoader()
spec = SettingSpec(
group=SettingType.CONFIGURATION, flags=["-f"], help="foo"
)
with pytest.raises(RuntimeError):
arg_loader += spec
def test_arg_loader__format_help():
arg_loader = ArgLoader()
for spec in (
SettingSpec(SettingType.POSITIONAL, flags=["--foo1"], help="foo1"),
SettingSpec(SettingType.POSITIONAL, flags=["--foo2"], help="foo2"),
SettingSpec(SettingType.POSITIONAL, flags=["--foo3"], help="foo3"),
SettingSpec(SettingType.PARAMETER, flags=["--bar1"], help="bar1"),
SettingSpec(SettingType.PARAMETER, flags=["--bar2"], help="bar2"),
SettingSpec(SettingType.PARAMETER, flags=["--bar3"], help="bar3"),
SettingSpec(SettingType.DIRECTIVE, flags=["--baz1"], help="baz1"),
SettingSpec(SettingType.DIRECTIVE, flags=["--baz2"], help="baz2"),
SettingSpec(SettingType.DIRECTIVE, flags=["--baz3"], help="baz3"),
):
arg_loader._add_spec(spec)
assert (
arg_loader.format_help()
== """
USAGE: mnamer [preferences] [directives] target [targets ...]
POSITIONAL:
foo1
foo2
foo3
PARAMETERS:
The following flags can be used to customize mnamer's behaviour. Their long
forms may also be set in a '.mnamer-v2.json' config file, in which case cli
arguments will take precedence.
bar1
bar2
bar3
DIRECTIVES:
Directives are one-off arguments that are used to perform secondary tasks
like overriding media detection. They can't be used in '.mnamer-v2.json'.
baz1
baz2
baz3
Visit https://github.com/jkwill87/mnamer for more information.
"""
)
def test_arg_parser__load__valid_parameter():
spec = SettingSpec(
group=SettingType.PARAMETER, flags=["-f"], help="foo", type=int
)
arg_parser = ArgLoader(spec)
with patch.object(sys, "argv", ["mnamer", "-f", "01"]):
assert arg_parser.load() == {"f": 1}
def test_arg_parser__load__valid_directive():
spec = SettingSpec(
group=SettingType.DIRECTIVE, flags=["-f"], help="foo", type=int
)
arg_parser = ArgLoader(spec)
with patch.object(sys, "argv", ["mnamer", "-f", "01"]):
assert arg_parser.load() == {"f": 1}
def test_arg_parser__load__valid_positional():
spec = SettingSpec(
group=SettingType.POSITIONAL,
flags=["f"],
help="foo",
type=int,
)
arg_parser = ArgLoader(spec)
with patch.object(sys, "argv", ["mnamer", "01"]):
assert arg_parser.load() == {"f": 1}
def test_arg_parser__load__invalid_configuration():
spec = SettingSpec(
group=SettingType.CONFIGURATION, flags=["-f"], help="foo"
)
arg_parser = ArgLoader(spec)
with patch.object(sys, "argv", ["mnamer", "-f", "1"]):
with pytest.raises(RuntimeError):
arg_parser.load()
def test_arg_parser__missing_help():
spec = SettingSpec(group=SettingType.DIRECTIVE, flags=["-f"])
with pytest.raises(RuntimeError):
ArgLoader(spec)
|
StarcoderdataPython
|
329855
|
<filename>pygate_grpc/client.py
import grpc
from pygate_grpc import buildinfo, faults, ffs, health, net, wallet
from pygate_grpc.errors import ErrorHandlerMeta
class PowerGateClient(object, metaclass=ErrorHandlerMeta):
def __init__(self, host_name, is_secure=False):
self.channel = (
grpc.secure_channel(host_name, grpc.ssl_channel_credentials())
if is_secure
else grpc.insecure_channel(host_name)
)
self.health = health.HealthClient(self.channel)
self.faults = faults.FaultsClient(self.channel)
self.buildinfo = buildinfo.BuildinfoClient(self.channel)
self.ffs = ffs.FfsClient(self.channel)
self.wallet = wallet.WalletClient(self.channel)
self.net = net.NetClient(self.channel)
|
StarcoderdataPython
|
9753859
|
from unittest import TestCase
import rerldo
class TestYup(TestCase):
def test_is_string(self):
s = rerldo.yup()
self.assertTrue(isinstance(s, str))
|
StarcoderdataPython
|
6564290
|
<filename>utils/errors.py<gh_stars>0
from typing import Any, Union
from discord.ext import commands
class UserInputErrors(commands.UserInputError):
def __init__(self, message: str, *arg: Any):
super().__init__(message=message, *arg)
|
StarcoderdataPython
|
362519
|
<gh_stars>0
# Generated by Django 3.0.5 on 2020-09-21 12:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0019_user_review'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='review',
),
]
|
StarcoderdataPython
|
4914111
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DeviceInfo(object):
def __init__(self):
self._device_id = None
self._device_type = None
self._dv_sn = None
self._manufacturer = None
self._product_model = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def device_type(self):
return self._device_type
@device_type.setter
def device_type(self, value):
self._device_type = value
@property
def dv_sn(self):
return self._dv_sn
@dv_sn.setter
def dv_sn(self, value):
self._dv_sn = value
@property
def manufacturer(self):
return self._manufacturer
@manufacturer.setter
def manufacturer(self, value):
self._manufacturer = value
@property
def product_model(self):
return self._product_model
@product_model.setter
def product_model(self, value):
self._product_model = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.device_type:
if hasattr(self.device_type, 'to_alipay_dict'):
params['device_type'] = self.device_type.to_alipay_dict()
else:
params['device_type'] = self.device_type
if self.dv_sn:
if hasattr(self.dv_sn, 'to_alipay_dict'):
params['dv_sn'] = self.dv_sn.to_alipay_dict()
else:
params['dv_sn'] = self.dv_sn
if self.manufacturer:
if hasattr(self.manufacturer, 'to_alipay_dict'):
params['manufacturer'] = self.manufacturer.to_alipay_dict()
else:
params['manufacturer'] = self.manufacturer
if self.product_model:
if hasattr(self.product_model, 'to_alipay_dict'):
params['product_model'] = self.product_model.to_alipay_dict()
else:
params['product_model'] = self.product_model
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DeviceInfo()
if 'device_id' in d:
o.device_id = d['device_id']
if 'device_type' in d:
o.device_type = d['device_type']
if 'dv_sn' in d:
o.dv_sn = d['dv_sn']
if 'manufacturer' in d:
o.manufacturer = d['manufacturer']
if 'product_model' in d:
o.product_model = d['product_model']
return o
|
StarcoderdataPython
|
11386941
|
# coding=utf-8
from unittest import TestCase
from zeeguu.api.test.api_test_mixin import APITestMixin
from zeeguu.api.api.feeds import (
STOP_FOLLOWING_FEED,
FOLLOWED_FEEDS,
START_FOLLOWING_FEED,
INTERESTING_FEEDS,
RECOMMENDED_FEEDS,
)
from zeeguu.core.model import RSSFeedRegistration
from zeeguu.core.content_retriever.article_downloader import download_from_feed
from zeeguu.core.test.rules.rss_feed_rule import RSSFeedRule
import zeeguu.core
class FeedTests(APITestMixin, TestCase):
def setUp(self):
super().setUp()
self.feed1 = RSSFeedRule().feed1
RSSFeedRegistration.find_or_create(
zeeguu.core.db.session, self.user, self.feed1
)
def test_stop_following_feed(self):
feed_id = self.feed1.id
initial_feed_count = len(self.json_from_api_get(f"{FOLLOWED_FEEDS}"))
# if we stop following one, we'll follow only one
form_data = {"source_id": feed_id}
assert self.api_post(f"{STOP_FOLLOWING_FEED}", form_data).data == b"OK"
feeds = self.json_from_api_get(f"{FOLLOWED_FEEDS}")
assert len(feeds) == initial_feed_count - 1
def test_start_following_feed(self):
new_feed_id = RSSFeedRule().feed.id
# When
form_data = {"source_id": new_feed_id}
self.api_post(f"/{START_FOLLOWING_FEED}", form_data)
# Then
followed_feed_ids = [
each["id"] for each in self.json_from_api_get(f"/{FOLLOWED_FEEDS}")
]
assert new_feed_id in followed_feed_ids
def test_get_interesting_feeds(self):
lang_code = self.feed1.language.code
interesting_feeds = self.json_from_api_get(f"{INTERESTING_FEEDS}/{lang_code}")
assert len(interesting_feeds) > 0
def test_non_subscribed_feeds(self):
lang_code = self.feed1.language.code
non_subscribed_feeds = self.json_from_api_get(
f"{RECOMMENDED_FEEDS}/{lang_code}"
)
initial_non_subscribed_count = len(non_subscribed_feeds)
self.test_stop_following_feed()
non_subscribed_feeds = self.json_from_api_get(
f"{RECOMMENDED_FEEDS}/{lang_code}"
)
final_non_subscribed_count = len(non_subscribed_feeds)
assert final_non_subscribed_count > initial_non_subscribed_count
def test_multiple_stop_following_same_feed(self):
feed_id = self.feed1.id
form_data = {"source_id": feed_id}
# if we stop following one it'll be ok
assert self.api_post(f"{STOP_FOLLOWING_FEED}", form_data).data == b"OK"
# if we stop following it once more, not ok
assert not (self.api_post(f"{STOP_FOLLOWING_FEED}", form_data).data == b"OK")
def test_get_feed_items_with_metrics(self):
download_from_feed(self.feed1, zeeguu.core.db.session, 3)
feed_items = self.json_from_api_get(
f"get_feed_items_with_metrics/{self.feed1.id}"
)
assert len(feed_items) > 0
assert feed_items[0]["title"]
assert feed_items[0]["summary"]
assert feed_items[0]["published"]
assert feed_items[0]["metrics"]
|
StarcoderdataPython
|
29638
|
<reponame>Sofia190/book_store_app<gh_stars>0
from django.db import models
# Create your models here.
from django.conf import settings
from django.db import models
from django.utils import timezone
# Create your models here.
class SearchQuery(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE)
query = models.CharField(max_length=570)
timestamp = models.DateField(auto_now=False, auto_now_add=False, default=timezone.now())
|
StarcoderdataPython
|
6688755
|
from .voc import VOCSegmentation, VOCSegmentationIncremental
from .ade import AdeSegmentation, AdeSegmentationIncremental
from .isprs import VaihingenDataset, PotsdamDataset, VaihingenIncremental, PotsdamIncremental
|
StarcoderdataPython
|
4961050
|
<reponame>dmakhno/contrib-python-qubell-client<filename>qubellclient/tests/base.py
# Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__version__ = "1.0.1"
__email__ = "<EMAIL>"
import testtools
import nose.plugins.attrib
from qubellclient.private.platform import QubellPlatform, Context
from qubellclient.private.manifest import Manifest
import os
from qubellclient.tools import rand
import logging as log
user = os.environ.get('QUBELL_USER')
password = os.environ.get('QUBELL_PASSWORD')
api = os.environ.get('QUBELL_API')
org = os.environ.get('QUBELL_ORG')
prefix = os.environ.get('QUBELL_PREFIX')
if not user: log.error('No username provided. Set QUBELL_USER env')
if not password: log.error('No password provided. Set QUBELL_PASSWORD env')
if not api: log.error('No api url provided. Set QUBELL_API env')
if not org: log.error('No organization name provided. Set QUBELL_ORG env')
def attr(*args, **kwargs):
"""A decorator which applies the nose and testtools attr decorator
"""
def decorator(f):
f = testtools.testcase.attr(args)(f)
if not 'skip' in args:
return nose.plugins.attrib.attr(*args, **kwargs)(f)
# TODO: Should do something if test is skipped
return decorator
class BaseTestCasePrivate(testtools.TestCase):
## TODO: Main preparation should be here
""" Here we prepare global env. (load config, etc)
"""
@classmethod
def setUpClass(cls):
cls.prefix = prefix or rand()
cls.context = Context(user=user, password=password, api=api)
# Initialize platform and check access
cls.platform = QubellPlatform(context=cls.context)
assert cls.platform.authenticate()
# Set default manifest for app creation
cls.manifest = Manifest(file=os.path.join(os.path.dirname(__file__), 'default.yml'), name='BaseTestManifest')
# Initialize organization
cls.organization = cls.platform.organization(name=org)
# Initialize environment
cls.environment = cls.organization.environment(name='default')
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# Run before each test
super(BaseTestCasePrivate, self).setUp()
pass
def tearDown(self):
# Run after each test
super(BaseTestCasePrivate, self).tearDown()
pass
|
StarcoderdataPython
|
11325624
|
import json
from .api_detail import APIDetail
class File(APIDetail):
def __init__(self, session, node=None, location=None, name=None, data=None, wb_data=None, auth=None):
super().__init__(session=session, data=data)
if wb_data is not None:
self._update_from_wb(wb_data=wb_data, auth=auth)
elif data is None:
self.name = name
self.location = location
self.type = "file"
self.node = node
self.session = session
def _update_from_wb(self, wb_data, auth=None):
auth = auth or self.session.auth
wb_attributes = wb_data['data']['attributes']
if wb_attributes['provider'] == 'osfstorage':
osf_url = "{}v2/files{}".format(self.session.api_base_url,
wb_attributes['path'])
else:
osf_url = "{}v2/nodes/{}/files/{}{}?info".format(
self.session.api_base_url,
wb_attributes['resource'],
wb_attributes['provider'],
wb_attributes['path']
)
response = self.session.get(url=osf_url, auth=auth)
self._update(response=response)
def get(self, url=None, query_parameters=None, auth=None):
if url:
self.location = url
elif self.links.self:
self.location = self.links.self
# todo elif node, location, and name
response = self.session.get(
url=self.location, query_parameters=query_parameters, auth=auth)
self._update(response=response)
def download(self, query_parameters=None, auth=None):
url = self.links.download
return self.session.get(url=url, query_parameters=query_parameters, auth=auth)
def upload(self, data, query_parameters=None, auth=None):
url = self.links.upload
query_parameters = query_parameters or {}
upload_query_parameters = {
'kind': 'file',
}
combined_query_parameters = {
**query_parameters, **upload_query_parameters}
return self.session.put(url=url, query_parameters=combined_query_parameters, raw_body=data, auth=auth)
def _move_or_copy(self, to_folder, action, rename=None, conflict=None, query_parameters=None, auth=None):
body = {
'action': action,
'path': to_folder.path,
'resource': to_folder.relationships.node['data']['id'],
'provider': to_folder.provider,
}
if rename:
body['rename'] = rename
if conflict:
body['conflict'] = conflict
raw_body = json.JSONEncoder().encode(body)
url = self.links.move
return self.session.post(url=url, raw_body=raw_body, query_parameters=query_parameters, auth=auth)
def move(self, to_folder, rename=None, conflict=None, query_parameters=None, auth=None):
moved_file = self._move_or_copy(to_folder=to_folder, action='move', rename=rename, conflict=conflict,
query_parameters=query_parameters, auth=auth)
self._update_from_wb(wb_data=moved_file, auth=auth)
def copy(self, to_folder, rename=None, conflict=None, query_parameters=None, auth=None):
new_file = self._move_or_copy(to_folder=to_folder, action='copy', rename=rename, conflict=conflict,
query_parameters=query_parameters, auth=auth)
return File(session=self.session, wb_data=new_file, auth=auth)
def delete(self, query_parameters=None, auth=None):
url = self.links.delete
return self.session.delete(url=url, item_type=self.type, query_parameters=query_parameters, auth=auth)
def rename(self, name, query_parameters=None, auth=None):
body = {
'action': 'rename',
'rename': name
}
raw_body = json.JSONEncoder().encode(body)
url = self.links.move
response = self.session.post(
url=url, raw_body=raw_body, query_parameters=query_parameters, auth=auth)
self._update(response=response)
|
StarcoderdataPython
|
1817187
|
<reponame>HIT-SCIR-xuanxuan/OpenKS
#!/usr/bin/env python
# encoding: utf-8
# File Name: graph_encoder.py
# Author: <NAME>
# Create Time: 2019/12/31 18:42
# TODO:
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import Set2Set
from ...model import TorchModel
from .gat import UnsupervisedGAT
from .gin import UnsupervisedGIN
from .mpnn import UnsupervisedMPNN
from .gcn import UnsupervisedGCN
@TorchModel.register("GraphEncoder", "PyTorch")
class GraphEncoder(nn.Module):
"""
MPNN from
`Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__
Parameters
----------
node_input_dim : int
Dimension of input node feature, default to be 15.
edge_input_dim : int
Dimension of input edge feature, default to be 15.
output_dim : int
Dimension of prediction, default to be 12.
node_hidden_dim : int
Dimension of node feature in hidden layers, default to be 64.
edge_hidden_dim : int
Dimension of edge feature in hidden layers, default to be 128.
num_step_message_passing : int
Number of message passing steps, default to be 6.
num_step_set2set : int
Number of set2set steps
num_layer_set2set : int
Number of set2set layers
"""
def __init__(
self,
positional_embedding_size=32,
max_node_freq=8,
max_edge_freq=8,
max_degree=128,
freq_embedding_size=32,
degree_embedding_size=32,
output_dim=32,
node_hidden_dim=32,
edge_hidden_dim=32,
num_layers=6,
num_heads=4,
num_step_set2set=6,
num_layer_set2set=3,
norm=False,
gnn_model="mpnn",
degree_input=False,
lstm_as_gate=False,
):
super(GraphEncoder, self).__init__()
if degree_input:
node_input_dim = positional_embedding_size + degree_embedding_size + 1
else:
node_input_dim = positional_embedding_size + 1
# node_input_dim = (
# positional_embedding_size + freq_embedding_size + degree_embedding_size + 3
# )
edge_input_dim = freq_embedding_size + 1
if gnn_model == "mpnn":
self.gnn = UnsupervisedMPNN(
output_dim=output_dim,
node_input_dim=node_input_dim,
node_hidden_dim=node_hidden_dim,
edge_input_dim=edge_input_dim,
edge_hidden_dim=edge_hidden_dim,
num_step_message_passing=num_layers,
lstm_as_gate=lstm_as_gate,
)
elif gnn_model == "gat":
self.gnn = UnsupervisedGAT(
node_input_dim=node_input_dim,
node_hidden_dim=node_hidden_dim,
edge_input_dim=edge_input_dim,
num_layers=num_layers,
num_heads=num_heads,
)
elif gnn_model == "gcn":
self.gnn = UnsupervisedGCN(
hidden_size=node_input_dim,
num_layer=num_layers
)
elif gnn_model == "gin":
self.gnn = UnsupervisedGIN(
num_layers=num_layers,
num_mlp_layers=2,
input_dim=node_input_dim,
hidden_dim=node_hidden_dim,
output_dim=output_dim,
final_dropout=0.5,
learn_eps=False,
graph_pooling_type="sum",
neighbor_pooling_type="sum",
use_selayer=False,
)
self.gnn_model = gnn_model
self.max_node_freq = max_node_freq
self.max_edge_freq = max_edge_freq
self.max_degree = max_degree
self.degree_input = degree_input
# self.node_freq_embedding = nn.Embedding(
# num_embeddings=max_node_freq + 1, embedding_dim=freq_embedding_size
# )
if degree_input:
self.degree_embedding = nn.Embedding(
num_embeddings=max_degree + 1, embedding_dim=degree_embedding_size
)
# self.edge_freq_embedding = nn.Embedding(
# num_embeddings=max_edge_freq + 1, embedding_dim=freq_embedding_size
# )
self.set2set = Set2Set(node_hidden_dim, num_step_set2set, num_layer_set2set)
self.lin_readout = nn.Sequential(
nn.Linear(2 * node_hidden_dim, node_hidden_dim),
nn.ReLU(),
nn.Linear(node_hidden_dim, output_dim),
)
self.norm = norm
def forward(self, g, return_all_outputs=False):
"""Predict molecule labels
Parameters
----------
g : DGLGraph
Input DGLGraph for molecule(s)
n_feat : tensor of dtype float32 and shape (B1, D1)
Node features. B1 for number of nodes and D1 for
the node feature size.
e_feat : tensor of dtype float32 and shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
Returns
-------
res : Predicted labels
"""
# nfreq = g.ndata["nfreq"]
if self.degree_input:
device = g.ndata["seed"].device
degrees = g.in_degrees()
if device != torch.device("cpu"):
degrees = degrees.cuda(device)
n_feat = torch.cat(
(
g.ndata["pos_undirected"],
self.degree_embedding(degrees.clamp(0, self.max_degree)),
g.ndata["seed"].unsqueeze(1).float(),
),
dim=-1,
)
else:
n_feat = torch.cat(
(
g.ndata["pos_undirected"],
# g.ndata["pos_directed"],
# self.node_freq_embedding(nfreq.clamp(0, self.max_node_freq)),
# self.degree_embedding(degrees.clamp(0, self.max_degree)),
g.ndata["seed"].unsqueeze(1).float(),
# nfreq.unsqueeze(1).float() / self.max_node_freq,
# degrees.unsqueeze(1).float() / self.max_degree,
),
dim=-1,
)
# efreq = g.edata["efreq"]
# e_feat = torch.cat(
# (
# self.edge_freq_embedding(efreq.clamp(0, self.max_edge_freq)),
# efreq.unsqueeze(1).float() / self.max_edge_freq,
# ),
# dim=-1,
# )
e_feat = None
if self.gnn_model == "gin":
x, all_outputs = self.gnn(g, n_feat, e_feat)
else:
x, all_outputs = self.gnn(g, n_feat, e_feat), None
x = self.set2set(g, x)
x = self.lin_readout(x)
if self.norm:
x = F.normalize(x, p=2, dim=-1, eps=1e-5)
if return_all_outputs:
return x, all_outputs
else:
return x
if __name__ == "__main__":
model = GraphEncoder(gnn_model="gin")
print(model)
g = dgl.DGLGraph()
g.add_nodes(3)
g.add_edges([0, 0, 1, 2], [1, 2, 2, 1])
g.ndata["pos_directed"] = torch.rand(3, 16)
g.ndata["pos_undirected"] = torch.rand(3, 16)
g.ndata["seed"] = torch.zeros(3, dtype=torch.long)
g.ndata["nfreq"] = torch.ones(3, dtype=torch.long)
g.edata["efreq"] = torch.ones(4, dtype=torch.long)
g = dgl.batch([g, g, g])
y = model(g)
print(y.shape)
print(y)
|
StarcoderdataPython
|
1870088
|
from asyncmqtt import MQTTException
from asyncmqtt.packet import MQTTFixedHeader, MQTTVariableHeader, MQTTPayload, MQTTPacket, SUBACK, PacketIDVariableHeader
from asyncmqtt.util import *
class SubackPayload(MQTTPayload):
RETURN_CODE_00 = 0x00
RETURN_CODE_01 = 0x01
RETURN_CODE_02 = 0x02
RETURN_CODE_80 = 0x80
def __init__(self, return_codes=[]):
self.return_codes = return_codes
def __repr__(self):
return type(self).__name__ + '(return_codes={0})'.format(repr(self.return_codes))
def to_bytes(self, fixed_header: MQTTFixedHeader, variable_header: MQTTVariableHeader):
out = b''
for return_code in self.return_codes:
out += int_to_bytes(return_code, 1)
return out
@classmethod
def from_bytes(cls, buffer: bytearray, fixed_header: MQTTFixedHeader,
variable_header: MQTTVariableHeader):
return_codes = []
length = fixed_header.remaining_length - variable_header.bytes_length
for i in range(0, length):
return_codes.append(buffer[i])
return cls(return_codes)
class SubackPacket(MQTTPacket):
VARIABLE_HEADER = PacketIDVariableHeader
PAYLOAD = SubackPayload
def __init__(self, fixed: MQTTFixedHeader=None, variable_header: PacketIDVariableHeader=None, payload=None):
if fixed is None:
header = MQTTFixedHeader(SUBACK, 0x00)
else:
if fixed.packet_type is not SUBACK:
raise MQTTException("Invalid fixed packet type %s for SubackPacket init" % fixed.packet_type)
header = fixed
super().__init__(header)
self.variable_header = variable_header
self.payload = payload
@property
def return_codes(self):
return self.payload.return_codes
@return_codes.setter
def return_codes_set(self, return_codes):
self.payload.return_codes = return_codes
@classmethod
def build(cls, packet_id, return_codes):
variable_header = cls.VARIABLE_HEADER(packet_id)
payload = cls.PAYLOAD(return_codes)
return cls(variable_header=variable_header, payload=payload)
|
StarcoderdataPython
|
168952
|
<gh_stars>0
import numpy as np
import theano
import theano.tensor as T
from data import noteStateSingleToInputForm
class OutputFormToInputFormOp(theano.Op):
# Properties attribute
__props__ = ()
def make_node(self, state, time):
state = T.as_tensor_variable(state)
time = T.as_tensor_variable(time)
return theano.Apply(self, [state, time], [T.bmatrix()])
# Python implementation:
def perform(self, node, inputs_storage, output_storage):
state, time = inputs_storage
output_storage[0][0] = np.array(noteStateSingleToInputForm(state, time), dtype='int8')
|
StarcoderdataPython
|
11290500
|
<filename>tests/test_middleware.py
import django
from django.core.cache import cache
from django.contrib.auth.models import AnonymousUser, User, Group
from django.test import TestCase
from mock import Mock
import mock
from groups_cache.compat import is_authenticated
from groups_cache.middleware import GroupsCacheMiddleware
from groups_cache import signals
class TestMiddleware(TestCase):
def setUp(self):
self.gcm = GroupsCacheMiddleware()
self.request = Mock()
self.user = Mock(id=123, name='bob')
if django.VERSION < (1, 10):
self.user.is_authenticated.return_value = True
else:
self.user.is_authenticated = True
def test_request_should_not_cache_anonymous(self):
self.request.user = Mock()
if django.VERSION < (1, 10):
self.request.user.is_authenticated.return_value = False
else:
self.request.user.is_authenticated = False
self.assertEqual(self.gcm.process_request(self.request), None)
self.assertIsNone(self.request.groups_cache)
cache.clear()
def test_request_should_cache_authenticated_user(self):
self.request.user = self.user
self.user.groups.all.return_value.values_list.return_value = Group.objects.none()
self.assertEqual(self.gcm.process_request(self.request), None)
self.assertIsInstance(self.request.groups_cache, type(Group.objects.none()))
self.assertEqual(len(self.request.groups_cache), 0)
cache.clear()
def test_request_should_cache_one_group(self):
Group.objects.create(name='revelers')
self.user.groups.all.return_value.values_list.return_value = Group.objects.all()
self.request.user = self.user
self.assertEqual(self.gcm.process_request(self.request), None)
self.assertIsInstance(self.request.groups_cache, type(Group.objects.none()))
self.assertEqual(len(self.request.groups_cache), 1)
def test_request_should_hit_cached_one_group(self):
self.request.user = self.user
self.assertEqual(self.gcm.process_request(self.request), None)
self.assertIsInstance(self.request.groups_cache, type(Group.objects.none()))
self.assertEqual(len(self.request.groups_cache), 1)
|
StarcoderdataPython
|
1774548
|
"""
get/put functions that consume/produce Python lists using Pickle to serialize
"""
from __future__ import absolute_import
from .compatibility import pickle
from .encode import Encode
from functools import partial
def concat(lists):
return sum(lists, [])
Pickle = partial(Encode,
partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL),
pickle.loads,
concat)
|
StarcoderdataPython
|
382817
|
#1. Import libraries:
!pip install keras-bert
!pip install bert-tensorflow
import sys
import codecs
import numpy as np
from bert import tokenization
from keras_bert import load_trained_model_from_checkpoint
from keras.models import Model
from keras import layers
from keras.layers import Input, Dense, BatchNormalization
from keras_pos_embd import PositionEmbedding
import OpenTextbot.src.Algebra as Algebra
import OpenTextbot.src.Compressor as Compressor
import OpenTextbot.src.Tokenizer as Tokenizer
import OpenTextbot.src.Voronoi as Voronoi
#2. Import compressed token vectors and their centroids:
folder = '/content/drive/My Drive/RuBERT'
Filename = folder+'/ArrayCentroids.txt'
ArrayCentroidsImported = Compressor.ImportCentroids(Filename, ListClusterSize=256, SubvectorSize=4)
Filename2 = folder+'/ArrayCompressedWE.txt'
ArrayCompressedWEImported = Compressor.ImportCompressedWE(Filename2)
#3. Method for creating word vector from token vectors for one sentence:
def GetWordWE(sentence):
sentence = sentence.replace(' [MASK] ','[MASK]')
sentence = sentence.replace('[MASK] ','[MASK]')
sentence = sentence.replace(' [MASK]','[MASK]')
sentence = sentence.split('[MASK]')
tokens = ['[CLS]']
for i in range(len(sentence)):
if i == 0:
tokens = tokens + tokenizer.tokenize(sentence[i])
else:
tokens = tokens + ['[MASK]'] + tokenizer.tokenize(sentence[i])
tokens = tokens + ['[SEP]']
token_input = tokenizer.convert_tokens_to_ids(tokens)
ListCompressedWE = list()
for i in range(len(token_input)):
ListCompressedWE.append(ArrayCompressedWEImported[token_input[i]])
ArrayCompressedWE = np.asarray(ListCompressedWE)
ArrayWE = Compressor.DecompressListWE(ArrayCompressedWE, ArrayCentroidsImported, 768)
ListWordWE = list()
ListListWE = list()
CurrentListWE = list()
for t in range(0, len(tokens)):
if '##' in tokens[t]:
CurrentListWE.append(ArrayWE[t])
if (t == (len(tokens) - 1)):
ListListWE.append(CurrentListWE)
ListWordWE.append(Algebra.Mean(CurrentListWE, Type='Cluster'))
else:
if(len(CurrentListWE) == 0):
CurrentListWE.append(ArrayWE[t])
else:
ListListWE.append(CurrentListWE)
ListWordWE.append(Algebra.Mean(CurrentListWE, Type='Cluster'))
CurrentListWE.clear()
CurrentListWE.append(ArrayWE[t])
ArrayWordWE = np.asarray(ListWordWE)
Length = len(ListWordWE)
Array768 = np.zeros(768)
for i in range(512-len(ListWordWE)):
ListWordWE.append(Array768)
ArrayWordWE = np.asarray(ListWordWE)
ArrayWordWE = ArrayWordWE.reshape([1, 512, 768])
return ArrayWordWE, Length
#4. Create Keras-bert model for WordBERT (you can make the same with TF-2, TF-Keras or PyTorch):
folder = '/content/drive/My Drive/RuBERT'
config_path = folder + '/bert_config.json'
checkpoint_path = folder + '/bert_model.ckpt'
vocab_path = folder + '/vocab.txt'
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_path, do_lower_case=False)
model = load_trained_model_from_checkpoint(config_path, checkpoint_path, training=True)
ListLayer = list()
a = Input(shape=(512,768), name='InputEmbedding')
ListLayer.append(a)
ListLayer.append(model.layers[5](ListLayer[len(ListLayer) - 1])) #PositionEmbedding
ListLayer.append(model.layers[6](ListLayer[len(ListLayer) - 1])) #Dropout
ListLayer.append(model.layers[7](ListLayer[len(ListLayer) - 1])) #LayerNormalization
for i in range(0, 12):
ListLayer.append(model.layers[8 + 8*i](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[9 + 8*i](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[10 + 8*i]([ListLayer[len(ListLayer) - 3], ListLayer[len(ListLayer) - 1]]))
ListLayer.append(model.layers[11 + 8*i](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[12 + 8*i](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[13 + 8*i](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[14 + 8*i]([ListLayer[len(ListLayer) - 3], ListLayer[len(ListLayer) - 1]]))
ListLayer.append(model.layers[15 + 8*i](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[104](ListLayer[len(ListLayer) - 1]))
ListLayer.append(model.layers[105](ListLayer[len(ListLayer) - 1]))
WordModel = Model(inputs=a, outputs=ListLayer[len(ListLayer) - 1])
for layer in WordModel.layers:
layer.trainable = False
|
StarcoderdataPython
|
1758751
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Skills
- SkillLine.dbc
- SkillLineAbility.dbc (spell lookups)
"""
from .. import *
from ..globalstrings import *
class Skill(Model):
@classmethod
def getTypeText(self):
return {
self.MINOR: MINOR_GLYPH,
self.MAJOR: MAJOR_GLYPH,
self.PRIME: PRIME_GLYPH,
}.get(self.obj.type, "")
class SkillProxy(object):
"""
WDBC proxy for skills
"""
def __init__(self, cls):
from pywow import wdbc
self.__file = wdbc.get("SkillLine.dbc", build=-1)
self.spells = wdbc.get("SkillLineAbility.dbc", build=-1)
def get(self, id):
return self.__file[id]
def getSpells(self, row):
from ..spells import Spell
lookups = row.skilllineability__skill
return [Spell(k._raw("spell")) for k in lookups]
Skill.initProxy(SkillProxy)
|
StarcoderdataPython
|
3428574
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
from ..constants import Constants
from .observatory import Observatory
__all__ = ["SDSS"]
c = Constants.LIGHTSPEED
class SDSS(Observatory):
def __init__(self):
super().__init__(
name="Sloan Digital Sky Survey",
acronym="SDSS",
filterNames=["u", "g", "r", "i", "z"],
filterEffectiveLambdas=np.array([
3.543e-7,
4.770e-7,
6.231e-7,
7.625e-7,
9.134e-7,
])
)
|
StarcoderdataPython
|
3470701
|
<reponame>KaShing96/hackerrank-challenges
# === Imports ===
import pytest
import json
import os
import functions as fnc
from func_timeout import func_timeout as to
from func_timeout.exceptions import FunctionTimedOut
from datetime import datetime
from colorama import Fore
from colorama import Style
# === Constants ===
TIMEOUT = 10
TERMINAL_COL = os.get_terminal_size().columns
# === Test function ===
__time = None
def clock(reset=False):
""" Clock function."""
global __time
if not reset:
if __time is None:
__time = datetime.now()
result = __time - __time
else:
result = datetime.now() - __time
__time = datetime.now()
return result
else:
__time = None
return datetime.now() - datetime.now()
def sclock(reset=False):
"""Clock function with return as string."""
return str(clock(reset))
def DEBUG(*args, **kwargs):
"""Debug print function."""
print(*args, **kwargs)
def CLOCKBUG(*args, **kwargs):
"""Debug with clock function."""
args = list(args)
args.append(sclock())
args = tuple(args)
print(*args, **kwargs)
def run_test(f):
"""Basic function to handle tests."""
# Filename
DEBUG()
for _ in range(TERMINAL_COL):
DEBUG("-", end='')
DEBUG("FILENAME:", f)
# Reset clock
sclock(True)
# Open file
with open(f, "r") as fr:
tests = json.load(fr)
CLOCKBUG("File loading:")
# List of all exceptions
all_exceptions = []
# Running tests
for tx, t in enumerate(tests):
exceptions = []
tx
# Extracting argument and answers
json_args = t['arg'].split(' ')
json_ans = t['ans']
DEBUG("Arguments:", str(json_args)[:TERMINAL_COL//2])
args = json_args
ans = json_ans
try:
result = to(
TIMEOUT,
fnc.commonChild,
(
args[0],
args[1]
)
)
except FunctionTimedOut as e:
exceptions.append(e)
CLOCKBUG("Runtime:")
# Check answer
try:
assert result == ans, args
except Exception as e:
exceptions.append(e)
if len(exceptions) > 0:
all_exceptions.append(exceptions)
DEBUG()
for ex in all_exceptions:
for e in ex:
assert False, e
# if len(all_exceptions) > 0:
# assert False, all_exceptions
# errors = []
# for tx, t in enumerate(tests):
# tx
# args = t["arg"].split(' ')
# ans = t["ans"]
# __start = datetime.now()
# try:
# result = to(
# TIMEOUT,
# fnc.commonChild,
# (
# args[0],
# args[1]
# )
# )
# except FunctionTimedOut as e:
# errors.append(e, TIMEOUT)
# continue
# print(datetime.now() - __start)
# try:
# assert result == ans, args
# except Exception as e:
# errors.append(e, datetime.now() - __start)
# for e in errors:
# assert False, e
# === Tests ===
def test_test_values():
"""Tests more basic tests."""
run_test("tests.json")
def test_verification_values():
run_test("verifications.json")
# TODO Unit tests for functions in functions.py
|
StarcoderdataPython
|
5025458
|
from uia import scrape, unidata
from time import sleep
import pickle
from os import path
from datetime import datetime
from dateutil.relativedelta import relativedelta
PICKLE_DIR = 'pickles/'
studies = unidata.studies
# lazy prototyping means infinite loops instead of cronjobs
while True:
for study in studies:
try:
file_time = datetime.fromtimestamp(path.getmtime(PICKLE_DIR + study))
except FileNotFoundError:
file_time = datetime.fromtimestamp(0)
if file_time < datetime.now() - relativedelta(hours=6):
table = {'timetable': scrape.get_timetable(studies[study]['courses']),
'title': studies[study]['title']}
with open(PICKLE_DIR + study, 'wb') as f:
pickle.dump(table, f)
print('pickled {}!'.format(studies[study]['title']))
sleep(21600)
|
StarcoderdataPython
|
3568896
|
#load packages
from xml.etree import cElementTree as ET
import uuid
import os
import json
import sys
'''
get relevant xml attributes and convert to dictonary
:param root: xml root for image
:return: obs: dictionary for one annotation from the image
'''
def get_attributes(root, labels):
for object in root.findall('object'):
#get image size for scaling
size = root.find('size')
image_width = size.find('width').text
image_height = size.find('height').text
#get_attributes
label = object.find('name').text
labels.add(label)
bndbox = object.find('bndbox')
xmin = bndbox.find('xmin').text
ymin = bndbox.find('ymin').text
xmax = bndbox.find('xmax').text
ymax = bndbox.find('ymax').text
#TODO: bulid in check and error handling for missing data
obs = [{'x': float(xmin) / float(image_width),
'x2': float(xmax) / float(image_width),
'y': float(ymin) / float(image_height),
'y2': float(ymax) / float(image_height),
'id': str(uuid.uuid1()),
'label': label}]
return obs
'''
This funtion converts annotations information from many xml files to one json
'''
def main():
# initialize variable
labels = set()
annotation = {}
# configure
version = '1.0'
type = "localization"
rootdir = sys.argv[1]
foldername = sys.argv[2]
#get annotation attrinutes for ech xml file
for _, _, files in os.walk(rootdir):
for file in files:
if file.endswith('.xml'):
path = '/'.join([rootdir, file])
root = ET.parse(path).getroot()
annotation['/'.join([foldername, root.find('filename').text])] = get_attributes(root, labels)
# only write/overwrite when there was some information from xmls found
if annotation:
#write results in json
annotations = {'version': version, 'type': type, 'labels': list(labels), 'annotations': annotation}
with open('/'.join([rootdir, '_annotations.json']), 'w') as outfile:
json.dump(annotations, outfile)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6509182
|
# -*- coding: utf-8 -*-
from .utils import fetch_menicka, parse_menicka
NAME = "<NAME>"
URL = "https://www.menicka.cz/5335-spravne-misto.html"
RESTAURANT_ID = "5335"
def parse_menu():
menicka_html = fetch_menicka(RESTAURANT_ID)
return parse_menicka(menicka_html)
|
StarcoderdataPython
|
5197328
|
# -*- coding: utf-8 -*-
"""Exception classes."""
class APISpecError(Exception):
"""Base class for all apispec-related errors."""
pass
class PluginError(APISpecError):
"""Raised when a plugin cannot be found or is invalid."""
pass
class SwaggerError(APISpecError):
"""Raised when a swagger validation fails"""
pass
|
StarcoderdataPython
|
11240994
|
<reponame>dashhudson/go-links<filename>server/src/migrations/versions/1_11880ac0ca4a_add_lookup_key.py
"""Add lookup key
Revision ID: 11880ac0ca4a
Revises: <KEY>
Create Date: 2020-08-05 22:52:17.165548
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '11880ac0ca4a'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('short_link', sa.Column('key', sa.String(length=500), nullable=False))
op.create_index(op.f('ix_short_link_key'), 'short_link', ['key'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_short_link_key'), table_name='short_link')
op.drop_column('short_link', 'key')
# ### end Alembic commands ###
|
StarcoderdataPython
|
3567591
|
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
class SchemaDatabaseSchemaEditor(DatabaseSchemaEditor):
def _create_fk_sql(self, model, field, suffix):
"""Support of our hackish foo\".\"bar table names on FK.
Base copy/paste of base _create_fk_sql with character replacement added
"""
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table.replace('"."', "__"),
"to_column": to_column.replace('"."', "__"),
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(
model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
"deferrable": self.connection.ops.deferrable_sql(),
}
|
StarcoderdataPython
|
11365087
|
# coding: utf-8
# Author: <NAME>
# Contact: <EMAIL>
# Python modules
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import QThread, pyqtSignal
import time
# Wizard modules
from wizard.core import environment
from wizard.core import repository
from wizard.core import launch
from wizard.core import image
from wizard.core import assets
from wizard.core import project
from wizard.vars import ressources
# Wizard gui modules
from wizard.gui import gui_utils
from wizard.gui import gui_server
class locks_widget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(locks_widget, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.ToolTip)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.work_env_ids = dict()
self.build_ui()
self.connect_functions()
def connect_functions(self):
self.unlock_all_button.clicked.connect(self.unlock_all)
def leaveEvent(self, event):
self.hide()
def build_ui(self):
self.setMinimumWidth(550)
self.setMinimumHeight(500)
self.main_widget_layout = QtWidgets.QHBoxLayout()
self.main_widget_layout.setContentsMargins(12, 12, 12, 12)
self.setLayout(self.main_widget_layout)
self.main_widget = QtWidgets.QFrame()
self.main_widget.setObjectName('round_frame')
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(0,0,0,0)
self.main_layout.setSpacing(6)
self.main_widget.setLayout(self.main_layout)
self.main_widget_layout.addWidget(self.main_widget)
self.shadow = QtWidgets.QGraphicsDropShadowEffect()
self.shadow.setBlurRadius(12)
self.shadow.setColor(QtGui.QColor(0, 0, 0, 190))
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.main_widget.setGraphicsEffect(self.shadow)
self.header_widget = QtWidgets.QWidget()
self.header_widget.setObjectName('dark_widget')
self.header_widget.setStyleSheet('#dark_widget{border-top-left-radius:8px;border-top-right-radius:8px;}')
self.header_layout = QtWidgets.QHBoxLayout()
self.header_layout.setContentsMargins(10,10,10,10)
self.header_layout.setSpacing(6)
self.header_widget.setLayout(self.header_layout)
self.title = QtWidgets.QLabel('Locked work environments')
self.header_layout.addWidget(self.title)
self.header_layout.addSpacerItem(QtWidgets.QSpacerItem(0,0,QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.unlock_all_button = QtWidgets.QPushButton('Unlock all')
self.unlock_all_button.setStyleSheet('padding:3px;')
self.unlock_all_button.setIcon(QtGui.QIcon(ressources._lock_icons_[0]))
self.header_layout.addWidget(self.unlock_all_button)
self.main_layout.addWidget(self.header_widget)
self.info_widget = gui_utils.info_widget(transparent=1)
self.info_widget.setVisible(0)
self.main_layout.addWidget(self.info_widget)
self.work_envs_scrollArea = QtWidgets.QScrollArea()
self.work_envs_scrollArea.setObjectName('transparent_widget')
self.work_envs_scrollBar = self.work_envs_scrollArea.verticalScrollBar()
self.work_envs_scrollArea_widget = QtWidgets.QWidget()
self.work_envs_scrollArea_widget.setObjectName('transparent_widget')
self.work_envs_scrollArea_layout = QtWidgets.QVBoxLayout()
self.work_envs_scrollArea_layout.setContentsMargins(10,10,10,10)
self.work_envs_scrollArea_layout.setSpacing(3)
self.work_envs_scrollArea_widget.setLayout(self.work_envs_scrollArea_layout)
self.work_envs_scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.work_envs_scrollArea.setWidgetResizable(True)
self.work_envs_scrollArea.setWidget(self.work_envs_scrollArea_widget)
self.work_envs_scrollArea_layout.addSpacerItem(QtWidgets.QSpacerItem(0,0,QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))
self.main_layout.addWidget(self.work_envs_scrollArea)
def refresh(self):
user_id = repository.get_user_row_by_name(environment.get_user(), 'id')
work_env_rows = project.get_user_locks(user_id)
project_work_env_ids = []
for work_env_row in work_env_rows:
project_work_env_ids.append(work_env_row['id'])
if work_env_row['id'] not in self.work_env_ids.keys():
widget = work_env_widget(work_env_row, self)
self.work_envs_scrollArea_layout.addWidget(widget)
self.work_env_ids[work_env_row['id']] = widget
work_env_id_list = list(self.work_env_ids.keys())
for work_env_id in work_env_id_list:
if work_env_id not in project_work_env_ids:
self.remove_work_env(work_env_id)
self.refresh_info_widget()
def unlock_all(self):
project.unlock_all()
gui_server.refresh_team_ui()
def remove_work_env(self, work_env_id):
if work_env_id in self.work_env_ids.keys():
self.work_env_ids[work_env_id].setParent(None)
self.work_env_ids[work_env_id].deleteLater()
del self.work_env_ids[work_env_id]
def refresh_info_widget(self):
if len(self.work_env_ids) == 0:
self.work_envs_scrollArea.setVisible(0)
self.info_widget.setVisible(1)
self.info_widget.setText("No locked\nwork environments !")
self.info_widget.setImage(ressources._nothing_info_)
else:
self.info_widget.setVisible(0)
self.work_envs_scrollArea.setVisible(1)
def toggle(self):
if self.isVisible():
if not self.isActiveWindow():
self.show()
self.raise_()
gui_utils.move_ui(self)
else:
self.hide()
else:
self.show()
self.raise_()
gui_utils.move_ui(self)
class work_env_widget(QtWidgets.QFrame):
def __init__(self, work_env_row, parent=None):
super(work_env_widget, self).__init__(parent)
self.work_env_row = work_env_row
self.build_ui()
self.fill_ui()
self.connect_functions()
def fill_ui(self):
software = project.get_software_data(self.work_env_row['software_id'], 'name')
icon = ressources._sofwares_icons_dic_[software]
self.software_icon.setPixmap(QtGui.QIcon(icon).pixmap(26))
work_env_label = assets.instance_to_string(('work_env', self.work_env_row['id']))
self.work_env_label.setText(work_env_label)
def connect_functions(self):
self.unlock_button.clicked.connect(self.unlock)
def unlock(self):
project.toggle_lock(self.work_env_row['id'])
gui_server.refresh_team_ui()
def build_ui(self):
self.setObjectName('item_widget_frame')
self.main_layout = QtWidgets.QHBoxLayout()
self.main_layout.setContentsMargins(10, 10, 10, 10)
self.main_layout.setSpacing(6)
self.setLayout(self.main_layout)
self.software_icon = QtWidgets.QLabel()
self.software_icon.setFixedSize(26,26)
self.main_layout.addWidget(self.software_icon)
self.work_env_label = QtWidgets.QLabel()
self.main_layout.addWidget(self.work_env_label)
self.unlock_button = QtWidgets.QPushButton()
self.unlock_button.setObjectName('locked_button')
self.unlock_button.setFixedSize(26,26)
self.unlock_button.setIcon(QtGui.QIcon(ressources._lock_icons_[1]))
gui_utils.application_tooltip(self.unlock_button, "Unlock software instance")
self.main_layout.addWidget(self.unlock_button)
|
StarcoderdataPython
|
1820589
|
<filename>exerc3_sec6.py<gh_stars>1-10
print("CONTAGEM REGRESSIVA:")
n = 10
while n > 0:
print(n,"!")
n = n - 1
print("FIM!")
|
StarcoderdataPython
|
11384961
|
from .login import LoginForm # noqa
|
StarcoderdataPython
|
3586972
|
<gh_stars>0
import unittest
from pyiron.lammps.control import LammpsControl
class TestLammps(unittest.TestCase):
def test_generate_seed_from_job(self):
lc = LammpsControl()
job_hash_dict = {'job_0_0': lc.generate_seed_from_job(job_name='job_0', seed=0),
'job_0_1': lc.generate_seed_from_job(job_name='job_0', seed=1),
'job_0_2': lc.generate_seed_from_job(job_name='job_0', seed=2),
'job_1_0': lc.generate_seed_from_job(job_name='job_1', seed=0),
'job_1_1': lc.generate_seed_from_job(job_name='job_1', seed=1),
'job_1_2': lc.generate_seed_from_job(job_name='job_1', seed=2)}
self.assertEqual(job_hash_dict['job_0_0'], 94639)
self.assertEqual(job_hash_dict['job_0_1'], 84051)
self.assertEqual(job_hash_dict['job_0_2'], 50062)
self.assertEqual(job_hash_dict['job_1_0'], 84649)
self.assertEqual(job_hash_dict['job_1_1'], 99268)
self.assertEqual(job_hash_dict['job_1_2'], 45752)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1799261
|
<reponame>RiccardoVaccari/Groovy2.0
from src.database.model import DiscordServer, Track, Radio
from pony.orm import db_session, select
class Server:
@db_session
def __init__(self, guild):
self.id = str(guild.id)
self.name = guild.name
if not self.get():
new_server = DiscordServer(
ID = self.id,
name = self.name,
index=0,
radio_index=0
)
@db_session
def get(self):
return DiscordServer.get(ID=self.id)
@db_session
def set_value(self, value):
server = self.get()
server.set(**value)
return server
@db_session
def get_queue(self):
queue = select(track for track in Track if track.DiscordServerID.ID == self.id)[:]
queue.sort(key=lambda track: track.position)
return queue
@db_session
def delete_track(self, track_position):
track = Track.get(position=track_position, DiscordServerID=self.get())
track.delete()
@db_session
def load_track(self, new_track):
t = Track(**new_track)
@db_session
def get_track(self, position):
t = Track.get(DiscordServerID = self.id, position=position)
return t
@db_session
def load_radio(self, radio):
radio = Radio.get(ID=radio.ID)
self.set_value({
"radio": radio,
"radio_index": radio.ID
})
@property
def radio_on(self):
return self.get().radio
|
StarcoderdataPython
|
6652747
|
"""
File: Runner.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/jplehr/pira/LICENSE.txt
Description: Module to run the target software.
"""
import sys
sys.path.append('..')
import lib.Utility as U
import lib.Logging as L
import lib.FunctorManagement as F
import lib.Measurement as M
import lib.DefaultFlags as D
import lib.ProfileSink as S
from lib.Configuration import PiraConfig, TargetConfig, InstrumentConfig, InvocationConfig
import typing
class Runner:
pass
class LocalBaseRunner(Runner):
"""
The base class for execution on the same machine. It implements the basic *run* method, which invokes the target.
"""
def __init__(self, configuration: PiraConfig, sink):
""" Runner are initialized once with a PiraConfiguration """
self._config = configuration
self._sink = sink
def has_sink(self) -> bool:
if self._sink is None:
return False
if isinstance(self._sink, S.NopSink):
return False
return True
def get_sink(self):
return self._sink
def run(self, target_config: TargetConfig, instrument_config: InstrumentConfig, ) -> float:
""" Implements the actual invocation """
functor_manager = F.FunctorManager()
run_functor = functor_manager.get_or_load_functor(target_config.get_build(), target_config.get_target(),
target_config.get_flavor(), 'run')
default_provider = D.BackendDefaults()
kwargs = default_provider.get_default_kwargs()
kwargs['util'] = U
kwargs['LD_PRELOAD'] = default_provider.get_MPI_wrap_LD_PRELOAD()
runtime = .0
if run_functor.get_method()['active']:
run_functor.active(target_config.get_target(), **kwargs)
L.get_logger().log('For the active functor we can barely measure runtime', level='warn')
runtime = 1.0
try:
U.change_cwd(target_config.get_place())
invoke_arguments = target_config.get_args_for_invocation()
kwargs['args'] = invoke_arguments
if invoke_arguments is not None:
L.get_logger().log('LocalBaseRunner::run: (args) ' + str(invoke_arguments))
command = run_functor.passive(target_config.get_target(), **kwargs)
_, runtime = U.shell(command, time_invoc=True)
L.get_logger().log(
'LocalBaseRunner::run::passive_invocation -> Returned runtime: ' + str(runtime), level='debug')
except Exception as e:
L.get_logger().log('LocalBaseRunner::run Exception\n' + str(e), level='error')
raise RuntimeError('LocalBaseRunner::run caught exception. ' + str(e))
# TODO: Insert the data into the database
return runtime
class LocalRunner(LocalBaseRunner):
"""
The LocalRunner invokes the target application with the first argument string given in the config.
For scalability studies, i.e., iterate over all given input sizes, use the LocalScalingRunner.
"""
def __init__(self, configuration: PiraConfig, sink):
""" Runner are initialized once with a PiraConfiguration """
super().__init__(configuration, sink)
self._num_repetitions = InvocationConfig.get_instance().get_num_repetitions()
def get_num_repetitions(self) -> int:
return self._num_repetitions
def do_baseline_run(self, target_config: TargetConfig) -> M.RunResult:
L.get_logger().log('LocalRunner::do_baseline_run')
accu_runtime = .0
if not target_config.has_args_for_invocation():
L.get_logger().log('LocalRunner::do_baseline_run: BEGIN not target_config.has_args_for_invocation()')
# This runner only takes into account the first argument string (if not already set)
args = self._config.get_args(target_config.get_build(), target_config.get_target())
L.get_logger().log('LocalRunner::do_baseline_run: args: ' + str(args))
target_config.set_args_for_invocation(args[0])
L.get_logger().log('LocalRunner::do_baseline_run: END not target_config.has_args_for_invocation()')
# TODO Better evaluation of the obtained timings.
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
for y in range(0, self.get_num_repetitions()):
L.get_logger().log('LocalRunner::do_baseline_run: Running iteration ' + str(y), level='debug')
l_runtime = self.run(target_config, InstrumentConfig())
accu_runtime += l_runtime
time_series.add_values(l_runtime, self.get_num_repetitions())
run_result = M.RunResult(accu_runtime, self.get_num_repetitions())
L.get_logger().log('[Vanilla][RUNTIME] Vanilla avg: ' + str(run_result.get_average()) + '\n', level='perf')
L.get_logger().log('[Vanilla][RTSeries] Average: ' + str(time_series.get_average()), level='perf')
L.get_logger().log('[Vanilla][RTSeries] Median: ' + str(time_series.get_median()), level='perf')
L.get_logger().log('[Vanilla][RTSeries] Stdev: ' + str(time_series.get_stdev()), level='perf')
L.get_logger().log('[Vanilla][REPETITION SUM] Vanilla sum: ' + str(time_series.get_accumulated_runtime()), level='perf')
return time_series
def do_profile_run(self,
target_config: TargetConfig,
instr_iteration: int) -> M.RunResult:
L.get_logger().log(
'LocalRunner::do_profile_run: Received instrumentation file: ' + target_config.get_instr_file(), level='debug')
scorep_helper = M.ScorepSystemHelper(self._config)
instrument_config = InstrumentConfig(True, instr_iteration)
scorep_helper.set_up(target_config, instrument_config)
runtime = .0
if not target_config.has_args_for_invocation():
# This runner only takes into account the first argument string (if not already set)
args = self._config.get_args(target_config.get_build(), target_config.get_target())
target_config.set_args_for_invocation(args[0])
time_series = M.RunResultSeries(reps=self.get_num_repetitions())
for y in range(0, self._num_repetitions):
L.get_logger().log('LocalRunner::do_profile_run: Running instrumentation iteration ' + str(y), level='debug')
l_runtime = self.run(target_config, instrument_config)
runtime += l_runtime
time_series.add_values(l_runtime, self.get_num_repetitions())
# Enable further processing of the resulting profile
self._sink.process(scorep_helper.get_exp_dir(), target_config, instrument_config)
run_result = M.RunResult(runtime, self.get_num_repetitions())
L.get_logger().log(
'[Instrument][RUNTIME] $' + str(instr_iteration) + '$ ' + str(run_result.get_average()), level='perf')
L.get_logger().log('[Instrument][RTSeries] Average: ' + str(time_series.get_average()), level='perf')
L.get_logger().log('[Instrument][RTSeries] Median: ' + str(time_series.get_median()), level='perf')
L.get_logger().log('[Instrument][RTSeries] Stdev: ' + str(time_series.get_stdev()), level='perf')
return time_series
class LocalScalingRunner(LocalRunner):
"""
The LocalScalingRunner performs measurements related to Extra-P modelling.
The arguments given in the configuration are treated as the different input sizes, i.e.,
the first string is the smallest input configuration, the second is the next larger configuration, etc.
"""
def __init__(self, configuration: PiraConfig, sink):
super().__init__(configuration, sink)
def do_profile_run(self,
target_config: TargetConfig,
instr_iteration: int) -> M.RunResult:
L.get_logger().log('LocalScalingRunner::do_profile_run')
# We run as many experiments as we have input data configs
# TODO: How to handle the model parameter <-> input parameter relation, do we care?
args = self._config.get_args(target_config.get_build(), target_config.get_target())
# TODO: How to handle multiple MeasurementResult items? We get a vector of these after this function.
#run_result = M.RunResult()
run_result = M.RunResultSeries(reps=self.get_num_repetitions(), num_data_sets=5)
for arg_cfg in args:
# Call the runner method with the correct arguments.
target_config.set_args_for_invocation(arg_cfg)
rr = super().do_profile_run(target_config, instr_iteration)
run_result.add_from(rr)
# At this point we have all the data we need to construct an Extra-P model
return run_result
def do_baseline_run(self, target_config: TargetConfig) -> M.RunResult:
L.get_logger().log('LocalScalingRunner::do_baseline_run')
args = self._config.get_args(target_config.get_build(), target_config.get_target())
#run_result = M.RunResult()
run_result = M.RunResultSeries(reps=self.get_num_repetitions(), num_data_sets=5)
for arg_cfg in args:
target_config.set_args_for_invocation(arg_cfg)
rr = super().do_baseline_run(target_config)
run_result.add_from(rr)
return run_result
class SlurmRunner(Runner):
""" TODO This runner executes the measurements on a slurm allocated job. """
pass
|
StarcoderdataPython
|
9668199
|
# Grafiek positief getest naar leeftijd door de tijd heen, per leeftijdscategorie
# <NAME>, (@rcsmit) - MIT Licence
# IN: tabel met positief aantal testen en totaal aantal testen per week, gecategoriseerd naar leeftijd
# handmatig overgenomen uit Tabel 14 vh wekelijkse rapport van RIVM
# Wekelijkse update epidemiologische situatie COVID-19 in Nederland
# https://www.rivm.nl/coronavirus-covid-19/actueel/wekelijkse-update-epidemiologische-situatie-covid-19-in-nederland
# Uitdagingen : Kopieren en bewerken Tabel 14. 3 verschillende leeftijdsindelingen. Tot dec. 2020 alles
# cummulatief. X-as in de grafiek
# TODO : - Nog enkele weken toevoegen voorafgaand het huidige begin in de datafile (waren weken met weinig besmettingen).
# - integreren in het dashboard
# - 'Total reported' toevoegen
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def save_df(df,name):
""" _ _ _ """
OUTPUT_DIR = 'C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\output\\'
name_ = OUTPUT_DIR + name+'.csv'
compression_opts = dict(method=None,
archive_name=name_)
df.to_csv(name_, index=False,
compression=compression_opts)
print ("--- Saving "+ name_ + " ---" )
def main():
#url = "C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\input\\covid19_seir_models\input\pos_test_leeftijdscat_wekelijks.csv"
url= "https://raw.githubusercontent.com/rcsmit/COVIDcases/main/pos_test_leeftijdscat_wekelijks.csv"
to_show_in_graph = [ "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+"]
#id;datum;leeftijdscat;methode;mannen_pos;mannen_getest;vrouwen_pos ;vrouwen_getest ;
# totaal_pos;totaal_getest;weeknr2021;van2021;tot2021
df = pd.read_csv(url,
delimiter=";",
low_memory=False)
df["datum"]=pd.to_datetime(df["datum"], format='%d-%m-%Y')
list_dates = df["datum"].unique()
cat_oud = [ "0-4", "05-09", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39",
"40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85-89", "90-94", "95+" ]
cat_vervanging = [ "0-4", "05-09", "10-14", "15-19", "20-29", "20-29", "30-39", "30-39",
"40-49", "40-49", "50-59", "50-59", "60-69", "60-69", "70+", "70+", "70+", "70+", "70+", "70+" ]
cat_nieuw = [ "0-12", "13-17", "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+", "Niet vermeld"]
cat_nieuwst_code =["A", "B", "C", "D", "E", "F" "G", "H", "I", "J", "K"]
cat_nieuwst= ["0-3", "04-12", "13-17", "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+", "Niet vermeld"]
# Deze grafieken komen uiteindelijk voort in de grafiek
cat_nieuwstx= ["0-12", "0-03", "04-12", "13-17", "18-24", "25-29", "30-39", "40-49", "50-59", "60-69", "70+", "Niet vermeld"]
#####################################################
df_new= pd.DataFrame({'date': [],'cat_oud': [],
'cat_nieuw': [], "positief_testen": [],"totaal_testen": [], "methode":[]})
for i in range(len(df)):
d = df.loc[i, "datum"]
for x in range(len(cat_oud)-1):
c_o,c,p,t,m = None,None,None,None,None
if df.loc[i, "methode"] == "oud":
# print (df.loc[i, "leeftijdscat"])
# print (f"----{df.loc[i, 'leeftijdscat']}----{cat_oud[x]}----")
if df.loc[i, "leeftijdscat"] == cat_oud[x]:
c_o = cat_oud[x]
c = cat_vervanging[x]
# print (f"{c} - {i} - {x} ")
# print (f"----{df.loc[i, 'leeftijdscat']}----{cat_oud[x]}----")
p =df.loc[i, "totaal_pos"]
t = df.loc[i, "totaal_getest"]
m = df.loc[i, "methode"] == "oud"
df_new = df_new.append({ 'date': d, 'cat_oud': c_o, 'cat_nieuw': c, "positief_testen": p,"totaal_testen":t, "methode": m}, ignore_index= True)
c_o,c,p,t,m = None,None,None,None,None
elif (
x <= len(cat_nieuwstx) - 1
and df.loc[i, "leeftijdscat"] == cat_nieuwstx[x]
):
c_o = df.loc[i, "leeftijdscat"]
c = df.loc[i, "leeftijdscat"]
p =df.loc[i, "totaal_pos"]
t = df.loc[i, "totaal_getest"]
m = df.loc[i, "methode"]
df_new = df_new.append({ 'date': d, 'cat_oud': c_o, 'cat_nieuw': c, "positief_testen": p,"totaal_testen":t, "methode": m}, ignore_index= True)
c_o,c,p,t,m = None,None,None,None,None
df_new = df_new.groupby(['date','cat_nieuw'], sort=True).sum().reset_index()
df_new['percentage'] = round((df_new['positief_testen']/df_new['totaal_testen']*100),1)
show_from = "2020-1-1"
show_until = "2030-1-1"
startdate = pd.to_datetime(show_from).date()
enddate = pd.to_datetime(show_until).date()
datumveld = 'date'
mask = (df_new[datumveld].dt.date >= startdate) & (df_new[datumveld].dt.date <= enddate)
df_new = df_new.loc[mask]
print (f'Totaal aantal positieve testen : {df_new["positief_testen"].sum()}')
print (f'Totaal aantal testen : {df_new["totaal_testen"].sum()}')
print (f'Percentage positief : { round (( 100 * df_new["positief_testen"].sum() / df_new["totaal_testen"].sum() ),2) }')
list_age_groups = df_new["cat_nieuw"].unique()
fig1x, ax = plt.subplots(1,1)
for l in to_show_in_graph:
df_temp = df_new[df_new['cat_nieuw']==l]
list_percentage = df_temp["percentage"].tolist()
list_dates = df_temp["date"].tolist()
plt.plot(list_dates, list_percentage, label = l)
ax.text(1, 1.1, 'Created by <NAME> — @rcsmit',
transform=ax.transAxes, fontsize='xx-small', va='top', ha='right')
plt.title("Percentage Positieve testen per agegroup" , fontsize=10)
plt.legend(bbox_to_anchor=(1.3, 1),loc="best")
plt.tight_layout()
plt.show()
main()
|
StarcoderdataPython
|
9752967
|
<filename>metpy/calc/thermo.py<gh_stars>1-10
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import division
import numpy as np
import scipy.integrate as si
from ..package_tools import Exporter
from ..constants import epsilon, kappa, P0, Rd, Lv, Cp_d
from ..units import atleast_1d, concatenate, units
exporter = Exporter(globals())
sat_pressure_0c = 6.112 * units.millibar
@exporter.export
def potential_temperature(pressure, temperature):
r'''Calculate the potential temperature.
Uses the Poisson equation to calculation the potential temperature
given `pressure` and `temperature`.
Parameters
----------
pressure : array_like
The total atmospheric pressure
temperature : array_like
The temperature
Returns
-------
array_like
The potential temperature corresponding to the the temperature and
pressure.
See Also
--------
dry_lapse
Notes
-----
Formula:
.. math:: \Theta = T (P_0 / P)^\kappa
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)
290.9814150577374
'''
return temperature * (P0 / pressure)**kappa
@exporter.export
def dry_lapse(pressure, temperature):
r'''Calculate the temperature at a level assuming only dry processes
operating from the starting point.
This function lifts a parcel starting at `temperature`, conserving
potential temperature. The starting pressure should be the first item in
the `pressure` array.
Parameters
----------
pressure : array_like
The atmospheric pressure level(s) of interest
temperature : array_like
The starting temperature
Returns
-------
array_like
The resulting parcel temperature at levels given by `pressure`
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation
processes
parcel_profile : Calculate complete parcel profile
potential_temperature
'''
return temperature * (pressure / pressure[0])**kappa
@exporter.export
def moist_lapse(pressure, temperature):
r'''
Calculate the temperature at a level assuming liquid saturation processes
operating from the starting point.
This function lifts a parcel starting at `temperature`. The starting
pressure should be the first item in the `pressure` array. Essentially,
this function is calculating moist pseudo-adiabats.
Parameters
----------
pressure : array_like
The atmospheric pressure level(s) of interest
temperature : array_like
The starting temperature
Returns
-------
array_like
The temperature corresponding to the the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [1]_.
References
----------
.. [1] <NAME>. and <NAME>, 2013: Saturated Pseudoadiabats--A
Noniterative Approximation. J. Appl. Meteor. Clim., 52, 5-15.
'''
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = mixing_ratio(saturation_vapor_pressure(t), p)
return (1. / p) * ((Rd * t + Lv * rs) /
(Cp_d + (Lv * Lv * rs * epsilon / (Rd * t * t))))
return units.Quantity(si.odeint(dt, atleast_1d(temperature).squeeze(),
pressure.squeeze()).T.squeeze(), temperature.units)
@exporter.export
def lcl(pressure, temperature, dewpt, max_iters=50, eps=1e-2):
r'''Calculate the lifted condensation level (LCL) using from the starting
point.
The starting state for the parcel is defined by `temperature`, `dewpoint`,
and `pressure`.
Parameters
----------
pressure : array_like
The starting atmospheric pressure
temperature : array_like
The starting temperature
dewpt : array_like
The starting dew point
Returns
-------
array_like
The LCL
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-2.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dew point from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `maxIters` counter.
'''
w = mixing_ratio(saturation_vapor_pressure(dewpt), pressure)
p = pressure
eps = units.Quantity(eps, p.units)
while max_iters:
td = dewpoint(vapor_pressure(p, w))
new_p = pressure * (td / temperature) ** (1. / kappa)
if np.abs(new_p - p).max() < eps:
break
p = new_p
max_iters -= 1
return new_p
@exporter.export
def parcel_profile(pressure, temperature, dewpt):
r'''Calculate the profile a parcel takes through the atmosphere, lifting
from the starting point.
The parcel starts at `temperature`, and `dewpt`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : array_like
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : array_like
The starting temperature
dewpt : array_like
The starting dew point
Returns
-------
array_like
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
'''
# Find the LCL
l = lcl(pressure[0], temperature, dewpt).to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL
press_lower = concatenate((pressure[pressure > l], l))
t1 = dry_lapse(press_lower, temperature)
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((l, pressure[pressure < l]))
t2 = moist_lapse(press_upper, t1[-1]).to(t1.units)
# Return LCL *without* the LCL point
return concatenate((t1[:-1], t2[1:]))
@exporter.export
def vapor_pressure(pressure, mixing):
r'''Calculate water vapor (partial) pressure
Given total `pressure` and water vapor `mixing` ratio, calculates the
partial pressure of water vapor.
Parameters
----------
pressure : array_like
total atmospheric pressure
mixing : array_like
dimensionless mass mixing ratio
Returns
-------
array_like
The ambient water vapor (partial) pressure in the same units as
`pressure`.
See Also
--------
saturation_vapor_pressure, dewpoint
'''
return pressure * mixing / (epsilon + mixing)
@exporter.export
def saturation_vapor_pressure(temperature):
r'''Calculate the saturation water vapor (partial) pressure
Parameters
----------
temperature : array_like
The temperature
Returns
-------
array_like
The saturation water vapor (partial) pressure
See Also
--------
vapor_pressure, dewpoint
Notes
-----
Instead of temperature, dewpoint may be used in order to calculate
the actual (ambient) water vapor (partial) pressure.
The formula used is that from Bolton 1980 [2] for T in degrees Celsius:
.. math:: 6.112 e^\frac{17.67T}{T + 243.5}
References
----------
.. [2] <NAME>., 1980: The Computation of Equivalent Potential
Temperature. Mon. Wea. Rev., 108, 1046-1053.
'''
# Converted from original in terms of C to use kelvin. Using raw absolute values of C in
# a formula plays havoc with units support.
return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin) /
(temperature - 29.65 * units.kelvin))
@exporter.export
def dewpoint_rh(temperature, rh):
r'''Calculate the ambient dewpoint given air temperature and relative
humidity.
Parameters
----------
temperature : array_like
Air temperature
rh : array_like
Relative humidity expressed as a ratio in the range [0, 1]
Returns
-------
array_like
The dew point temperature
See Also
--------
dewpoint, saturation_vapor_pressure
'''
return dewpoint(rh * saturation_vapor_pressure(temperature))
@exporter.export
def dewpoint(e):
r'''Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : array_like
Water vapor partial pressure
Returns
-------
array_like
Dew point temperature
See Also
--------
dewpoint_rh, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the Bolton 1980 [3] formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
References
----------
.. [3] <NAME>., 1980: The Computation of Equivalent Potential
Temperature. Mon. Wea. Rev., 108, 1046-1053.
'''
val = np.log(e / sat_pressure_0c)
return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)
@exporter.export
def mixing_ratio(part_press, tot_press):
r'''Calculates the mixing ratio of gas given its partial pressure
and the total pressure of the air.
There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
part_press : array_like
Partial pressure of the constituent gas
tot_press : array_like
Total air pressure
Returns
-------
array_like
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
See Also
--------
vapor_pressure
'''
return epsilon * part_press / (tot_press - part_press)
|
StarcoderdataPython
|
6554562
|
<reponame>karolyi/forum-django
from django.urls.conf import path
from .views.frontend import (
TopicCommentListingView, TopicExpandCommentsDownView,
TopicExpandCommentsUpView, TopicExpandRepliesUpRecursive, TopicListView)
urlpatterns_base = [
path(route=r'', view=TopicListView.as_view(), name='topic-listing'),
path(
route='topic/<slug:topic_slug>/',
view=TopicCommentListingView.as_view(), name='topic-comment-listing'),
path(
route='topic/<slug:topic_slug>/<int:comment_pk>/',
view=TopicCommentListingView.as_view(), name='topic-comment-listing'),
path(
route='comments-up-recursive/<slug:topic_slug>/<int:comment_pk>'
'/<int:scroll_to_pk>/', view=TopicExpandRepliesUpRecursive.as_view(),
name='comments-up-recursive'),
path(
route=(
'comments-up/<slug:topic_slug>/'
'<int:comment_pk>/<int:scroll_to_pk>/'),
view=TopicExpandCommentsUpView.as_view(), name='comments-up'),
path(
route=(
'comments-down/<slug:topic_slug>/'
'<int:comment_pk>/<int:scroll_to_pk>/'),
view=TopicExpandCommentsDownView.as_view(), name='comments-down'),
]
|
StarcoderdataPython
|
47260
|
tutor = "codi"
print(tutor)
|
StarcoderdataPython
|
5078709
|
# Copyright (c) 2019 Works Applications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import re
from queue import PriorityQueue
from . import categorytype
class CharacterCategory(object):
class Range(object):
def __lt__(self, other):
return self.high < other.high
def __init__(self, low=0, high=0, categories=None):
self.low = low
self.high = high
self.categories = categories or []
def contains(self, cp):
return self.low <= cp < self.high
def containing_length(self, text):
for i in range(len(text)):
c = ord(text[i])
if c < self.low or c > self.high:
return i
return len(text)
def lower(self, cp):
return self.high <= cp
def higher(self, cp):
return self.low > cp
def match(self, other):
return self.low == other.low and self.high == other.high
def __init__(self):
self.range_list = []
def _compile(self) -> None:
"""
_compile transforms self.range_list to non overlapped range list
to apply binary search in get_category_types
:return:
"""
self.range_list.sort(key=lambda x: x.high)
self.range_list.sort(key=lambda x: x.low)
new_range_list = []
left_chain = PriorityQueue()
right_chain = self.range_list
states = []
pivot = 0
while True:
if left_chain.empty():
if not right_chain:
break
right = right_chain.pop(0)
left_chain.put(right)
pivot = right.low
states.extend(right.categories)
continue
left = left_chain.get()
right = right_chain[0] if right_chain else None
left_end = left.high
right_begin = right.low if right else math.inf
if left_end <= right_begin:
new_range_list.append(self.Range(pivot, left_end, set(states)))
pivot = left_end
for cat in left.categories:
states.remove(cat)
continue
else:
new_range_list.append(self.Range(pivot, right_begin, set(states)))
pivot = right_begin
states.extend(right.categories)
left_chain.put(right)
left_chain.put(left)
right_chain.pop(0)
self.range_list = []
_range = new_range_list[0]
for irange in new_range_list[1:]:
if irange.low == _range.high and irange.categories == _range.categories:
_range = self.Range(_range.low, irange.high, _range.categories)
else:
self.range_list.append(_range)
_range = irange
self.range_list.append(_range)
def get_category_types(self, code_point):
begin = 0
n = len(self.range_list)
end = n
pivot = (begin + end) // 2
while 0 <= pivot < n:
range_ = self.range_list[pivot]
if range_.contains(code_point):
return range_.categories
if range_.lower(code_point):
begin = pivot
else: # range_.higher(code_point)
end = pivot
new_pivot = (begin + end) // 2
if new_pivot == pivot:
break
pivot = new_pivot
return {categorytype.CategoryType.DEFAULT}
def read_character_definition(self, char_def=None):
"""
:param char_def: path
"""
if char_def is not None:
f = open(char_def, 'r', encoding="utf-8")
else:
f = open("char.def", 'r', encoding="utf-8")
for i, line in enumerate(f.readlines()):
line = line.rstrip()
if re.fullmatch(r"\s*", line) or re.match("#", line):
continue
cols = re.split(r"\s+", line)
if len(cols) < 2:
f.close()
raise AttributeError("invalid format at line {}".format(i))
if not re.match("0x", cols[0]):
continue
range_ = self.Range()
r = re.split("\\.\\.", cols[0])
range_.low = int(r[0], 16)
range_.high = range_.low + 1
if len(r) > 1:
range_.high = int(r[1], 16) + 1
if range_.low >= range_.high:
f.close()
raise AttributeError("invalid range at line {}".format(i))
for j in range(1, len(cols)):
if re.match("#", cols[j]) or cols[j] == '':
break
type_ = categorytype.CategoryType.get(cols[j])
if type_ is None:
f.close()
raise AttributeError("{} is invalid type at line {}".format(cols[j], i))
range_.categories.append(type_)
self.range_list.append(range_)
f.close()
self._compile()
|
StarcoderdataPython
|
6572120
|
<gh_stars>0
import datetime
import boto3
import botocore
import os
import re
def find_tag(key, tags):
return next((tag['Value'] for tag in tags if tag['Key'] == key), None)
def ipv4_ptr_fqdn(address):
return '%s.in-addr.arpa.' % ('.'.join(reversed(address.split('.'))))
def ipv4_amazon_name(address):
return 'ip-%s' % (address.replace('.','-'))
dns_label_regexp = re.compile('\A(?!-)[a-zA-Z0-9-]{1,63}(?<!-)\Z')
def is_valid_dns_label(name):
return dns_label_regexp.match(name)
class AwsAdapter():
def __init__(self, region, vpc_dns_name_tag='Name'):
self.ec2 = boto3.client('ec2', region)
self.r53 = boto3.client('route53', 'us-east-1')
self.vpc_dns_name_tag = vpc_dns_name_tag
self.vpcs_cache = {}
self.cache_duration = 600
def prefetch_vpc_names(self,vpc_ids):
now = datetime.datetime.now().replace(tzinfo=None)
vpc_ids_to_fetch = [x for x in vpc_ids if self.vpcs_cache.get(x) == None or (now - self.vpcs_cache[x]['ts']).total_seconds() > self.cache_duration]
if len(vpc_ids_to_fetch) == 0:
return
print("VPC FETCH: %s" % (vpc_ids_to_fetch))
vpcs = self.ec2.describe_vpcs(VpcIds=vpc_ids_to_fetch)['Vpcs']
for vpc in vpcs:
name = find_tag(self.vpc_dns_name_tag, vpc.get('Tags', []))
if name:
if not is_valid_dns_label(name):
print("! Invalid VPC name: %s" % (name))
return
self.vpcs_cache[vpc['VpcId']] = {'ts': now, 'name': name}
def fetch_vpc_name(self, vpc_id):
self.prefetch_vpc_names([vpc_id])
cache_entry = self.vpcs_cache[vpc_id]
if cache_entry:
return cache_entry['name']
else:
return None
def ec2_instances_by_name(self, name):
print("ec2:DescribeInstances (name): %s" % (name))
pager = self.ec2.get_paginator('describe_instances').paginate(Filters=[{'Name': 'tag:Name','Values': [name]}])
return [i for r in pager.search('Reservations') for i in r['Instances']]
def ec2_instances_by_ids(self, ids):
print("ec2:DescribeInstances (IDs): %s" % (ids))
pager = self.ec2.get_paginator('describe_instances').paginate(InstanceIds=list(ids))
return [i for r in pager.search('Reservations') for i in r['Instances']]
def r53_get_rrset(self, zone, name, rtype):
res = self.r53.list_resource_record_sets(
HostedZoneId=zone,
StartRecordName=name,
StartRecordType=rtype,
MaxItems='1',
)
if len(res['ResourceRecordSets']) == 0:
return None
rrset = res['ResourceRecordSets'][0]
if rrset['Name'] != name:
return None
return rrset
def r53_change_rrsets(self, zone, changes, ignore_invalid_change_batch=False):
print("R53 change(%s):" % (zone))
for change in changes:
rrset = change['ResourceRecordSet']
print("R53 change(%s) [%s] %s %s %s" % (zone, change['Action'], rrset['Name'], rrset['Type'], rrset['ResourceRecords']))
try:
return self.r53.change_resource_record_sets(HostedZoneId=zone, ChangeBatch={'Comment': 'ec2-r53', 'Changes': changes})
except self.r53.exceptions.InvalidChangeBatch:
if ignore_invalid_change_batch:
return None
raise
class MockAdapter():
def __init__(self):
self.vpc_names = {}
self.ec2_instances = []
self.r53_received_change_sets = {}
self.r53_error_on_rrset_deletion = False
def prefetch_vpc_names(self, vpc_ids):
return
def fetch_vpc_name(self, vpc_id):
return self.vpc_names[vpc_id]
def ec2_instances_by_name(self, name):
return [i for i in self.ec2_instances if find_tag('Name', i.get('Tags', [])) == name]
def ec2_instances_by_ids(self, instance_ids):
ids = set(instance_ids)
return [i for i in self.ec2_instances if i['InstanceId'] in ids]
def r53_get_rrset(self, zone, name, rtype):
# FIXME:
return None
def r53_change_rrsets(self, zone, changes, ignore_invalid_change_batch=False):
if self.r53_error_on_rrset_deletion:
for x in changes:
if x['Action'] == 'DELETE':
if ignore_invalid_change_batch:
return
raise boto3.client('route53', 'us-east-1', aws_access_key_id='dummy', aws_secret_access_key='dummy').exceptions.InvalidChangeBatch({}, '')
if self.r53_received_change_sets.get(zone) == None:
self.r53_received_change_sets[zone] = []
self.r53_received_change_sets[zone].append(changes)
return
class InstanceStateProcessor():
def __init__(self, handler, instance_id, state):
self.handler = handler
self.engine = handler.engine
self.instance_id = instance_id
self.state = state
self._instance = None
def instance(self):
if self._instance != None:
return self._instance
self._instance = self.engine.adapter.ec2_instances_by_ids([self.instance_id])[0]
return self._instance
def run(self):
i = self.instance()
address = i.get('PrivateIpAddress')
vpc_id = i.get('VpcId')
if address == None or vpc_id == None:
return
fqdn = "%s.%s.%s" % (ipv4_amazon_name(address), self.engine.adapter.fetch_vpc_name(vpc_id), self.engine.domain)
rrset = self.engine.adapter.r53_get_rrset(self.engine.zone, fqdn, 'CNAME')
if rrset == None:
self.handler.change_rrset(self.engine.zone, {
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': fqdn,
'Type': 'A',
'TTL': 60,
'ResourceRecords': [
{'Value': address},
],
},
})
name = find_tag('Name', i.get('Tags', []))
if name == None:
ptr_zone = self.engine.lookup_ptr_zone(address)
if ptr_zone:
self.handler.change_rrset(ptr_zone, {
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': ipv4_ptr_fqdn(address),
'Type': 'PTR',
'TTL': 5,
'ResourceRecords': [
{'Value': fqdn},
],
},
})
else:
NameTagProcessor(self.handler, 'CreateTags', name, [self.instance()['InstanceId']]).run()
class NameTagProcessor():
def __init__(self, handler, event_name, name, instance_ids):
self.handler = handler
self.engine = handler.engine
self.event_name = event_name
self.name = name
self.target_instance_ids = set(instance_ids)
self._named_instances = None
self._target_instances = None
self._upsert_instances = None
self._deletions = None
self._vpc_ids = None
def named_instances(self):
if self._named_instances != None:
return self._named_instances
self._named_instances = self.engine.adapter.ec2_instances_by_name(self.name)
return self._named_instances
def target_instances(self):
if self._target_instances != None:
return self._target_instances
self._target_instances = self.engine.adapter.ec2_instances_by_ids(self.target_instance_ids)
return self._target_instances
def vpc_ids(self):
if self._vpc_ids != None:
return self._vpc_ids
vpc_ids = set()
processed = 0
for i in self.named_instances():
if i.get('VpcId') == None:
continue
if i['InstanceId'] in self.target_instance_ids:
processed += 1
vpc_ids.add(i['VpcId'])
if processed == len(self.target_instance_ids):
self._target_instances = self._named_instances
else:
for i in self.target_instances():
if i.get('VpcId') == None:
continue
vpc_ids.add(i['VpcId'])
self._vpc_ids = vpc_ids
return vpc_ids
def upsert_instances(self):
if self._upsert_instances != None:
return self._upsert_instances
instances = {}
for i in self.named_instances():
if i.get('VpcId') not in self.vpc_ids():
continue
if i['State']['Name'] == 'terminated':
continue
instance = instances.get(i['VpcId'])
if instance == None or (instance.get('LaunchTime') and i.get('LaunchTime') and instance['LaunchTime'] > i['LaunchTime']):
instances[i['VpcId']] = i
self._upsert_instances = instances
return instances
def deletions(self):
if self._deletions != None:
return self._deletions
info = []
seen = set([i['InstanceId'] for i in self.named_instances()])
unnamed_instance_ids = set([x for x in self.target_instance_ids if x not in seen])
if len(unnamed_instance_ids) > 0:
for i in self.target_instances():
if i['InstanceId'] not in unnamed_instance_ids:
continue
if i.get('VpcId') == None:
continue
delete_a = self.upsert_instances().get(i['VpcId'], None) == None
info.append({'Instance': i, 'DeleteARecord': delete_a})
self._deletions = info
return info
def run(self):
if not is_valid_dns_label(self.name):
print("! Invalid name: %s" % (self.name))
return
self.engine.adapter.prefetch_vpc_names(self.vpc_ids())
for i in self.target_instances():
name = find_tag('Name', i.get('Tags', []))
if self.event_name == 'CreateTags':
if name != self.name:
print("! Skipping because this function is launched for Name=%s but instance %s is currently Name=%s" % (self.name, i['InstanceId'], name))
return
if self.event_name == 'DeleteTags':
if name != None:
print("! Skipping because this function is launched for Name deletion but instance %s is currently Name=%s" % (i['InstanceId'], name))
return
for vpc_id,i in self.upsert_instances().items():
fqdn = "%s.%s.%s" % (self.name, self.engine.adapter.fetch_vpc_name(vpc_id), self.engine.domain)
address = i.get('PrivateIpAddress')
if address:
self.handler.change_rrset(self.engine.zone, {
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': fqdn,
'Type': 'A',
'TTL': 60,
'ResourceRecords': [
{'Value': address},
],
},
})
ptr_zone = self.engine.lookup_ptr_zone(address)
if ptr_zone:
self.handler.change_rrset(ptr_zone, {
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': ipv4_ptr_fqdn(address),
'Type': 'PTR',
'TTL': 60,
'ResourceRecords': [
{'Value': fqdn},
],
},
})
else:
print("! %s (%s) no PrivateIpAddress" % (fqdn, i['InstanceId']))
for deletion in self.deletions():
i = deletion['Instance']
vpc_name = self.engine.adapter.fetch_vpc_name(i['VpcId'])
fqdn = "%s.%s.%s" % (self.name, vpc_name, self.engine.domain)
address = i.get('PrivateIpAddress')
if address:
if deletion['DeleteARecord']:
self.handler.change_rrset(self.engine.zone, {
'Action': 'DELETE',
'ResourceRecordSet': {
'Name': fqdn,
'Type': 'A',
'TTL': 60,
'ResourceRecords': [
{'Value': address},
],
},
})
ptr_zone = self.engine.lookup_ptr_zone(address)
if ptr_zone:
if self.engine.default_to_amazon_name:
fqdn = "%s.%s.%s" % (ipv4_amazon_name(address), vpc_name, self.engine.domain)
self.handler.change_rrset(ptr_zone, {
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': ipv4_ptr_fqdn(address),
'Type': 'PTR',
'TTL': 60,
'ResourceRecords': [
{'Value': fqdn},
],
},
})
else:
self.handler.change_rrset(ptr_zone, {
'Action': 'DELETE',
'ResourceRecordSet': {
'Name': ipv4_ptr_fqdn(address),
'Type': 'PTR',
'TTL': 60,
'ResourceRecords': [
{'Value': fqdn},
],
},
})
class Handler():
def __init__(self, engine):
self.engine = engine
self.rrset_change_sets = {}
self.rrset_deletion_sets = {}
def run(self, event):
if event.get('detail-type') == 'EC2 Instance State-change Notification':
self.handle_instance_state(event['detail']['instance-id'], event['detail']['state'])
return
if event['detail'].get('eventType') == 'AwsApiCall':
event_name = event['detail']['eventName']
if not (event_name == 'CreateTags' or event_name == 'DeleteTags'):
print("Ignoring %s" % (event_name))
return
instance_ids = [rs['resourceId'] for rs in event['detail']['requestParameters']['resourcesSet']['items'] if rs['resourceId'] and rs['resourceId'].startswith('i-')]
tags = event['detail']['requestParameters']['tagSet']['items']
print("%s for instances %s (%s)" % (event_name, instance_ids, tags))
self.handle_tag_event(event_name, instance_ids, tags)
return
def change_rrset(self, zone, change):
if change['Action'] == 'DELETE':
sets = self.rrset_deletion_sets
else:
sets = self.rrset_change_sets
if sets.get(zone) == None:
sets[zone] = []
sets[zone].append(change)
def lookup_ptr_zone(self, address):
return self.engine.lookup_ptr_zone(address)
def handle_instance_state(self, instance_id, state):
if state == 'terminated':
# Cannot support removing safely
return
print("Instance state %s: %s" % (instance_id, state))
self.ensure_records_for_instance(instance_id, state)
def handle_tag_event(self, event_name, instance_ids, tags):
for tag in tags:
if tag['key'] == 'Alias':
self.ensure_records_for_alias_tag(tag['value'], instance_ids)
if tag['key'] == 'Name':
self.ensure_records_for_name_tag(event_name, tag['value'], instance_ids)
def ensure_records_for_instance(self, instance_id, state):
return InstanceStateProcessor(self, instance_id, state).run()
def ensure_records_for_name_tag(self, event_name, name, target_instance_ids):
print("Ensuring records for Name=%s" % (name))
return NameTagProcessor(self, event_name, name, target_instance_ids).run()
def ensure_records_for_alias_tag(self, alias):
# print("Ensuring records for Alias=%s" % (name))
return
class Engine():
def __init__(self, adapter, domain, zone, ptr_zones, default_to_amazon_name=False):
self.adapter = adapter
self.domain = domain
self.zone = zone
self.ptr_zones = ptr_zones
self.default_to_amazon_name = default_to_amazon_name
def handle(self, event):
handler = Handler(self)
handler.run(event)
for zone_id, changes in handler.rrset_change_sets.items():
self.adapter.r53_change_rrsets(zone_id, changes)
for zone_id, changes in handler.rrset_deletion_sets.items():
for change in changes:
self.adapter.r53_change_rrsets(zone_id, [change], ignore_invalid_change_batch=True)
def lookup_ptr_zone(self, address):
for x in self.ptr_zones:
if address.startswith(x['prefix']):
return x['zone']
return None
|
StarcoderdataPython
|
9705508
|
from .progeny import *
|
StarcoderdataPython
|
9763081
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
from ruffus import transform, Pipeline, pipeline_run, regex, inputs
import ruffus
import sys
"""
test_inputs_with_multiple_args_raising_exception.py
inputs with multiple arguments should raise an exception
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
try:
@transform(None, regex(tempdir + "b"), inputs(tempdir + "a", tempdir + "b"), "task_1.output")
def task_1(i, o):
for f in o:
open(f, 'w')
except ruffus.ruffus_exceptions.error_task_transform_inputs_multiple_args:
print("\tExpected exception thrown 1")
except ruffus.ruffus_exceptions.error_inputs_multiple_args:
print("\tExpected exception thrown 2")
def task_2(i, o):
for f in o:
open(f, 'w')
class Test_task_mkdir(unittest.TestCase):
def setUp(self):
"""
"""
pass
def tearDown(self):
"""
"""
pass
def test_no_re_match(self):
try:
pipeline_run(multiprocess=10, verbose=0, pipeline="main")
except:
return
raise Exception(
"Inputs(...) with multiple arguments should have thrown an exception")
def test_newstyle_no_re_match(self):
try:
test_pipeline = Pipeline("test")
test_pipeline.transform(task_func=task_2,
input=None,
filter=regex(tempdir + "b"),
replace_inputs=inputs(
tempdir + "a", tempdir + "b"),
output="task_1.output")
test_pipeline.run(multiprocess=10, verbose=0)
except ruffus.ruffus_exceptions.error_task_transform_inputs_multiple_args:
print("\tExpected exception thrown 1")
return
except ruffus.ruffus_exceptions.error_inputs_multiple_args:
print("\tExpected exception thrown 2")
return
raise Exception(
"Inputs(...) with multiple arguments should have thrown an exception")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3437031
|
from problem.models import Problem
from .models import CodeforcesProblemSet
def get_parent(problem):
try:
return CodeforcesProblemSet.objects.get(child=problem).parent
except:
return problem
def check(problem1, problem2):
return get_parent(problem1).id == get_parent(problem2).id
def join(problem1, problem2):
if not check(problem1, problem2):
parent = get_parent(problem1)
child = get_parent(problem2)
CodeforcesProblemSet.objects.filter(parent=child).update(parent=parent)
CodeforcesProblemSet(parent=parent, child=child).save()
def get_similar_problems(problem):
parent = get_parent(problem)
childern = list(CodeforcesProblemSet.objects.filter(parent = parent)\
.values_list('child', flat = True))
if not childern:
return []
childern.append(parent.id)
problem_qs = Problem.objects.filter(id__in = childern)\
.exclude(id = problem.id)
return problem_qs
|
StarcoderdataPython
|
207416
|
"""
# Utilities that aid in the design and analysis of piping networks
"""
from lib.pypeflow.utils.pump_curve import PumpCurve
|
StarcoderdataPython
|
1725813
|
<filename>djexperience/service/admin.py
from django.contrib import admin
from .models import Service, TypeService, Protest
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', )
search_fields = ('title',)
@admin.register(TypeService)
class TypeServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', 'service')
search_fields = ('title',)
@admin.register(Protest)
class ProtestAdmin(admin.ModelAdmin):
list_display = (
'__str__',
'service',
'type_service',
)
search_fields = (
'typeservice__service__title',
'typeservice__title',
)
list_filter = ('typeservice__service', )
def service(self, obj):
return obj.typeservice.service
service.admin_order_field = 'title'
service.short_description = 'Serviço'
def type_service(self, obj):
return obj.typeservice
type_service.admin_order_field = 'title'
type_service.short_description = 'Tipo de Serviço'
|
StarcoderdataPython
|
21380
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for utility classes."""
import datetime
import sys
import unittest
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
long_string = (
"迎欢迎\n"
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus "
"ex sed dictum volutpat. Integer maximus, mauris at tincidunt iaculis, "
"felis magna scelerisque ex, in scelerisque est odio non nunc. "
"Suspendisse et lobortis augue. Donec faucibus tempor massa, sed dapibus"
" erat iaculis ut. Vestibulum eu elementum nulla. Nullam scelerisque "
"hendrerit lorem. Integer vitae semper metus. Suspendisse accumsan "
"dictum felis. Etiam viverra, felis sed ullamcorper vehicula, libero "
"nisl tempus dui, a porta lacus erat et erat. Morbi mattis elementum "
"efficitur. Pellentesque aliquam placerat mauris non accumsan.")
class RDFValueTest(absltest.TestCase):
"""RDFValue tests."""
def testStr(self):
"""Test RDFValue.__str__."""
self.assertEqual(str(rdfvalue.RDFInteger(1)), "1")
self.assertEqual(str(rdfvalue.RDFString(long_string)), long_string)
# TODO(hanuszczak): Current implementation of `repr` for RDF values is broken
# and not in line with Python guidelines. For example, `repr` should be
# unambiguous whereas current implementation will trim long representations
# with `...`. Moreover, the representation for most types is questionable at
# best.
#
# The implementation should be fixed and proper tests should be written.
class RDFBytesTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"zażółć gęślą jaźń"
result = rdfvalue.RDFBytes.FromHumanReadable(string)
expected = rdfvalue.RDFBytes.FromSerializedBytes(string.encode("utf-8"))
self.assertEqual(result, expected)
class RDFStringTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"pchnąć w tę łódź jeża lub ośm skrzyń fig"
result = rdfvalue.RDFString.FromHumanReadable(string)
self.assertEqual(str(result), string)
def testEqualWithBytes(self):
self.assertEqual(rdfvalue.RDFString(u"foo"), b"foo")
self.assertNotEqual(rdfvalue.RDFString(u"foo"), b"\x80\x81\x82")
def testLessThanWithBytes(self):
self.assertLess(rdfvalue.RDFString(u"abc"), b"def")
self.assertGreater(rdfvalue.RDFString(u"xyz"), b"ghi")
self.assertLess(rdfvalue.RDFString(u"012"), b"\x80\x81\x81")
# TODO: Python on Windows ships with UCS-2 by default, which does
# not properly support unicode.
@unittest.skipIf(
sys.maxunicode <= 65535,
"Your Python installation does not properly support Unicode (likely: "
"Python with no UCS4 support on Windows.")
def testLenOfEmoji(self):
self.assertLen(rdfvalue.RDFString("🚀🚀"), 2)
class RDFIntegerTest(absltest.TestCase):
def testFromHumanReadable(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"42")
self.assertEqual(result, rdfvalue.RDFInteger(42))
def testFromHumanReadablePositive(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"+108")
self.assertEqual(result, rdfvalue.RDFInteger(108))
def testFromHumanReadableNegative(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"-1337")
self.assertEqual(result, rdfvalue.RDFInteger(-1337))
def testFromHumanReadableZero(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"0")
self.assertEqual(result, rdfvalue.RDFInteger(0))
def testFromHumanReadableRaisesOnNonInteger(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12.3")
def testFromHumanReadableRaisesOnNonDecimal(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12A")
class RDFDateTimeTest(absltest.TestCase):
def testLerpMiddle(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = start_time + rdfvalue.Duration.From(10, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.5, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(5, rdfvalue.DAYS))
def testLerpZero(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, start_time)
def testLerpOne(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
1.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, end_time)
def testLerpQuarter(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = start_time + rdfvalue.Duration.From(4, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.25, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesTypeErrorIfTimesAreNotRDFDatetime(self):
now = rdfvalue.RDFDatetime.Now()
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(0.0, start_time=10, end_time=now)
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(
0.0,
start_time=now,
end_time=rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesValueErrorIfProgressIsNotNormalized(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2011-01-01")
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(1.5, start_time=start_time, end_time=end_time)
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(-0.5, start_time=start_time, end_time=end_time)
def testFloorToMinutes(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(60, rdfvalue.SECONDS)), expected)
def testFloorToHours(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:00")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.HOURS)), expected)
def testFloorToDays(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.DAYS)), expected)
def testFloorExact(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
self.assertEqual(dt.Floor(rdfvalue.Duration.From(1, rdfvalue.SECONDS)), dt)
class RDFDatetimeSecondsTest(absltest.TestCase):
def testFromDatetime_withMicroSeconds(self):
dt_with_micros = datetime.datetime(2000, 1, 1, microsecond=5000)
dt = datetime.datetime(2000, 1, 1)
self.assertEqual(
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt_with_micros),
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt))
def testBug122716179(self):
d = rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch(1)
self.assertEqual(d.AsMicrosecondsSinceEpoch(), 1000000)
diff = rdfvalue.RDFDatetimeSeconds(10) - rdfvalue.Duration("3s")
self.assertEqual(diff.AsMicrosecondsSinceEpoch(), 7000000)
class DurationSecondsTest(absltest.TestCase):
def testPublicAttributes(self):
duration = rdfvalue.DurationSeconds.FromHumanReadable("1h")
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3600)
self.assertEqual(duration.ToInt(rdfvalue.MILLISECONDS), 3600 * 1000)
self.assertEqual(duration.microseconds, 3600 * 1000 * 1000)
def testFromDays(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(2, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("2d"))
self.assertEqual(
rdfvalue.DurationSeconds.From(31, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("31d"))
def testFromHours(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(48, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("48h"))
self.assertEqual(
rdfvalue.DurationSeconds.From(24, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("24h"))
def testFromSeconds(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(1337,
rdfvalue.SECONDS).ToInt(rdfvalue.SECONDS),
1337)
def testFromMicroseconds(self):
duration = rdfvalue.DurationSeconds.From(3000000, rdfvalue.MICROSECONDS)
self.assertEqual(duration.microseconds, 3000000)
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3)
def testFloatConstructorRaises(self):
with self.assertRaises(TypeError):
rdfvalue.DurationSeconds(3.14)
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.DurationSeconds.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.DurationSeconds.From(1, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.DurationSeconds.From(2, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.DurationSeconds.From(999, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.DurationSeconds.From(1000,
rdfvalue.SECONDS).SerializeToBytes())
def testFromWireFormat(self):
for i in [0, 7, 1337]:
val = rdfvalue.DurationSeconds.FromWireFormat(i)
self.assertEqual(i, val.ToInt(rdfvalue.SECONDS))
val2 = rdfvalue.DurationSeconds.FromWireFormat(
val.SerializeToWireFormat())
self.assertEqual(val, val2)
MAX_UINT64 = 18446744073709551615
class DurationTest(absltest.TestCase):
def testInitializationFromMicroseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(i, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} us".format(i)))
self.assertEqual(val, rdfvalue.Duration(i))
def testInitializationFromMilliseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000]:
val = rdfvalue.Duration.From(i, rdfvalue.MILLISECONDS)
self.assertEqual(i * 1000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} ms".format(i)))
def testInitializationFromSeconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000000]:
val = rdfvalue.Duration.From(i, rdfvalue.SECONDS)
self.assertEqual(i * 1000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} s".format(i)))
def testInitializationFromMinutes(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 60000000]:
val = rdfvalue.Duration.From(i, rdfvalue.MINUTES)
self.assertEqual(i * 60000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} m".format(i)))
def testInitializationFromHours(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 3600000000]:
val = rdfvalue.Duration.From(i, rdfvalue.HOURS)
self.assertEqual(i * 3600000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} h".format(i)))
def testInitializationFromDays(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 86400000000]:
val = rdfvalue.Duration.From(i, rdfvalue.DAYS)
self.assertEqual(i * 86400000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} d".format(i)))
def testInitializationFromWeeks(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 604800000000]:
val = rdfvalue.Duration.From(i, rdfvalue.WEEKS)
self.assertEqual(i * 604800000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} w".format(i)))
def testConversionToInt(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(val.ToInt(rdfvalue.MICROSECONDS), i)
self.assertEqual(val.ToInt(rdfvalue.MILLISECONDS), i // 1000)
self.assertEqual(val.ToInt(rdfvalue.SECONDS), i // (1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.MINUTES), i // (60 * 1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.HOURS), i // (60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.DAYS), i // (24 * 60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.WEEKS), i // (7 * 24 * 60 * 60 * 1000 * 1000))
def testConversionToFractional(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MICROSECONDS), i)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MILLISECONDS), i / 1000)
self.assertAlmostEqual(
val.ToFractional(rdfvalue.SECONDS), i / (1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.MINUTES), i / (60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.HOURS), i / (60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.DAYS), i / (24 * 60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.WEEKS),
i / (7 * 24 * 60 * 60 * 1000 * 1000))
def testStringDeserialization(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(
rdfvalue.Duration.FromSerializedBytes(val.SerializeToBytes()), val)
def testHumanReadableStringSerialization(self):
self.assertEqual("0 us", str(rdfvalue.Duration.From(0, rdfvalue.WEEKS)))
self.assertEqual("1 us",
str(rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS)))
self.assertEqual("2 us",
str(rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS)))
self.assertEqual("999 us",
str(rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1, rdfvalue.MILLISECONDS)))
self.assertEqual(
"{} us".format(MAX_UINT64),
str(rdfvalue.Duration.From(MAX_UINT64, rdfvalue.MICROSECONDS)))
self.assertEqual("3 s", str(rdfvalue.Duration.From(3, rdfvalue.SECONDS)))
self.assertEqual("3 m", str(rdfvalue.Duration.From(3, rdfvalue.MINUTES)))
self.assertEqual("3 h", str(rdfvalue.Duration.From(3, rdfvalue.HOURS)))
self.assertEqual("3 d", str(rdfvalue.Duration.From(3, rdfvalue.DAYS)))
self.assertEqual("3 w", str(rdfvalue.Duration.From(21, rdfvalue.DAYS)))
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.Duration.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
str(MAX_UINT64).encode("utf-8"),
rdfvalue.Duration.From(MAX_UINT64,
rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"3000000",
rdfvalue.Duration.From(3, rdfvalue.SECONDS).SerializeToBytes())
def testAdditionOfDurationsIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
self.assertEqual(
rdfvalue.Duration(a) + rdfvalue.Duration(b),
rdfvalue.Duration(a + b))
def testSubtractionOfDurationsIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64]:
self.assertEqual(
rdfvalue.Duration(a) - rdfvalue.Duration(min(a, b)),
rdfvalue.Duration(a - min(a, b)))
def testFromWireFormat(self):
for i in [0, 7, 1337, MAX_UINT64]:
val = rdfvalue.Duration.FromWireFormat(i)
self.assertEqual(i, val.microseconds)
def testSubtractionFromDateTimeIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(min(a, b))
result = lhs - rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a - min(a, b))
def testAdditionToDateTimeIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(b)
result = lhs + rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a + b)
def testComparisonIsEqualToIntegerComparison(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
dur_a = rdfvalue.Duration(a)
dur_b = rdfvalue.Duration(b)
if a > b:
self.assertGreater(dur_a, dur_b)
if a >= b:
self.assertGreaterEqual(dur_a, dur_b)
if a == b:
self.assertEqual(dur_a, dur_b)
if a <= b:
self.assertLessEqual(dur_a, dur_b)
if a < b:
self.assertLess(dur_a, dur_b)
if a != b:
self.assertNotEqual(dur_a, dur_b)
class DocTest(test_lib.DocTest):
module = rdfvalue
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
StarcoderdataPython
|
342463
|
<gh_stars>0
# defines a function that takes two arguments
def cheese_and_crackers(cheese_count, boxes_of_crackers):
# prints a string with the first argument passed into the function inserted into the output
print(f"You have {cheese_count} cheeses!")
# prints a string with the second argument passed into the function inserted into the output
print(f"You have {boxes_of_crackers} boxes of crackers!")
# prints a string
print("Man that's enough for a party!")
# prints a string
print("Get a blanket.\n")
# prints a string
print("We can just give the function numbers directly:")
# calls the cheese_and_crackers function with 20 and 30 as the arguments being passed in
cheese_and_crackers(20,30)
# prints a string
print("OR, we can use variables from our script:")
# stores a value of 10 in the variable amount_of_cheese
amount_of_cheese = 10
# stores a value of 50 in the variable amount_of_crackers
amount_of_crackers = 50
# calls the cheese_and_crackers function, passing in the values stored in the variables amount_of_cheese and amount_of_crackers
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
# prints a string
print("We can even do math inside too:")
# calls the cheese_and_crackers function with the results of 1 + 20 and 5 + 6 passed in as the arguments.
cheese_and_crackers(10 + 20, 5 + 6)
# prints a string
print("And we can combine the two, variables and math:")
# calls the cheese_and_crackers function with the results of adding 100 to the value stored in amount_of_cheese and 1000 to the value stored in amount_of_crackers as the arguments
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
|
StarcoderdataPython
|
6563162
|
<reponame>Halftruth08/Game_AI<filename>demo1.py
#demo 1: in command line, enter:
# python3 demo1.py
import codenames.model_building as cmb
model = cmb.make_full_model()
import codenames.game_player as cgp
cgp.codemaster(model)
|
StarcoderdataPython
|
3371070
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_avg = pd.DataFrame()
df_EFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_EFR_avg_t = pd.DataFrame(df_EFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# without window function
df_EFR_avg_t = pd.DataFrame(df_EFR_avg_t.iloc[0,:].values.reshape(1,1024)) # without window function
# implement the window function
df_EFR_avg_t_window = pd.DataFrame((df_EFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_avg = df_EFR_avg.append(pd.concat([df_EFR_avg_t, df_EFR_label], axis=1, ignore_index=True))
df_EFR_avg_win = df_EFR_avg_win.append(pd.concat([df_EFR_avg_t_window, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg = df_EFR_avg.sort_values(by=["Condition", "Subject"])
df_EFR_avg = df_EFR_avg.reset_index(drop=True)
df_EFR_avg_win.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_avg_win = df_EFR_avg_win.sort_values(by=["Condition", "Subject"])
df_EFR_avg_win = df_EFR_avg_win.reset_index(drop=True)
# average all the subjects , test and retest and keep one sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
df_EFR_avg_win_sorted = df_EFR_avg_win.sort_values(by=["Sound Level", "Vowel","Condition", "Subject"])
df_EFR_avg_win_sorted = df_EFR_avg_win_sorted.reset_index(drop=True)
# filter55 65 75 sound levels and keep 85dB
# keep vowel condition and subject
df_EFR_avg_85 = pd.DataFrame(df_EFR_avg_sorted.iloc[528:, :])
df_EFR_avg_85 = df_EFR_avg_85.reset_index(drop=True)
df_EFR_avg_win_85 = pd.DataFrame(df_EFR_avg_win_sorted.iloc[528:, :])
df_EFR_avg_win_85 = df_EFR_avg_win_85.reset_index(drop=True)
# this part was replaced by upper part based on what I need to do
'''
# average all the subjects , test and retest, different sound levels
# filter by 'a vowel and 85Db'
df_EFR_avg_sorted = df_EFR_avg.sort_values(by=["Vowel","Condition", "Subject", "Sound Level"])
df_EFR_avg_sorted = df_EFR_avg_sorted.reset_index(drop=True)
# average sound levels and
# keep vowel condition and subject
df_EFR_avg_vcs = pd.DataFrame()
for i in range(176):
# combine next two rows later
df_EFR_avg_vcs_t = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i: 4*i+4, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
df_EFR_avg_vcs_label = pd.DataFrame(df_EFR_avg_sorted.iloc[4*i, 1024:1031].values.reshape(1,7))
df_EFR_avg_vcs = df_EFR_avg_vcs.append(pd.concat([df_EFR_avg_vcs_t, df_EFR_avg_vcs_label], axis=1, ignore_index=True), ignore_index=True)
# set the title of columns
df_EFR_avg_vcs.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
#df_EFR_avg_vcs = df_EFR_avg_vcs.sort_values(by=["Condition", "Subject"])
'''
'''
# filter by 'a vowel and 85Db'
df_EFR_a_85_test1 = df_EFR[(df_EFR['Vowel'] == 'a vowel') & (df_EFR['Sound Level'] == '85')]
df_EFR_a_85_test1 = df_EFR_a_85_test1.reset_index(drop=True)
df_EFR_a_85_avg = pd.DataFrame()
# average test1 and test2
for i in range(44):
df_EFR_a_85_avg_t = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 96060
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
n3 = 40960
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 95036)))
df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)
# rename columns
df_EFR_avg_85_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg_win_85
df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024]
df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:]
df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 95036)))
df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1)
df_EFR_avg_win_85_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# concatenate AENU
temp1 = pd.concat([df_EFR_avg_85.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1)
temp2 = pd.concat([df_EFR_avg_85.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1)
df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True)
df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1)
'''
# test##############
# test(detrend)
temp_test = np.asarray(df_EFR_avg_85_data.iloc[0, 0:1024])
temp_test_detrend = signal.detrend(temp_test)
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(temp_test)
plt.subplot(2, 1, 2)
plt.plot(temp_test_detrend)
plt.show()
# the raw data is already DC removed
# test(zero padding)
temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024]
temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606]
temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))])
temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))])
plt.figure()
plt.subplot(2, 1, 1)
markers1 = [11, 21, 32, 43, 53, 64, 75]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1)
plt.xlim(0, 100)
plt.title('without zero padding')
plt.subplot(2, 1, 2)
#markers2 = [100, 200, 300, 400, 500, 600, 700]
markers2 = [99, 199, 299, 399, 499, 599, 599]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2)
plt.xlim(0, 1000)
# plt.xscale('linear')
plt.title('with zero padding')
plt.show()
# #################
'''
# Calculate the Amplitude Spectrum
# create a new dataframe with zero-padding amplitude spectrum
'''
# for df_EFR
df_as_7= pd.DataFrame()
for i in range(1408):
temp_EFR = df_EFR_avg_85_withzero.iloc[i, 0:96060]
temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))])
#df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0)
df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it
# filter by 'a vowel and 85Db'
df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')]
df_as_7_test1 = df_as_7_test1.reset_index(drop=True)
'''
# for df_EFR_avg_vcs_withzero
df_as_85= pd.DataFrame()
df_as7_85= pd.DataFrame()
df_as_win_85= pd.DataFrame()
df_as7_win_85= pd.DataFrame()
for i in range(176):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as = np.abs((fftpack.fft(df_EFR_avg_85_withzero.iloc[i, 0:96060])/n2)[range(int(n2/2))])
df_as_85 = df_as_85.append(pd.DataFrame(temp_as.reshape(1,48030)), ignore_index = True)
df_as7_85 = df_as7_85.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
temp_as_win = np.abs((fftpack.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:96060])/n2)[range(int(n2/2))])
df_as_win_85 = df_as_win_85.append(pd.DataFrame(temp_as_win.reshape(1,48030)), ignore_index = True)
df_as7_win_85 = df_as7_win_85.append(pd.DataFrame(np.array([temp_as_win[1000], temp_as_win[2000], temp_as_win[3000], temp_as_win[4000], \
temp_as_win[5000], temp_as_win[6000], temp_as_win[7000]]).reshape(1,7)), ignore_index = True)
df_as_85 = pd.concat([df_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as7_85 = pd.concat([df_as7_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_win_85 = pd.concat([df_as_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
df_as7_win_85 = pd.concat([df_as7_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
# for efr_aenu
df_aenu_as_85= pd.DataFrame()
for i in range(44):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as2 = np.abs((fftpack.fft(df_EFR_avg_85_aenu.iloc[i, 0:4096])/4096)[range(int(4096/2))])
df_aenu_as_85 = df_aenu_as_85.append(pd.DataFrame(temp_as2.reshape(1,2048)), ignore_index = True)
#df_aenu_as_85 = pd.concat([df_aenu_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
'''
# average test1 and test2
df_as_7_avg = pd.DataFrame()
for i in range(44):
df_as_7_avg1 = pd.DataFrame(df_as_7_test1.iloc[2*i: 2*i+1, 0:7].mean(axis=0).values.reshape(1,7))
df_as_7_label = pd.DataFrame(df_as_7_test1.iloc[2*i, 7:14].values.reshape(1,7))
df_as_7_avg_t = pd.concat([df_as_7_avg1, df_as_7_label], axis=1, ignore_index=True)
df_as_7_avg = df_as_7_avg.append(df_as_7_avg_t)
# set the title of columns
df_as_7_avg.columns = np.append(np.arange(7), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_as_7_avg = df_as_7_avg.sort_values(by=["Condition", "Subject"])
df_as_7_avg = df_as_7_avg.reset_index(drop=True)
'''
'''
# set a normalized AS
df_as_7_avg_data= pd.DataFrame(df_as_7_avg.iloc[:, 0:7].astype(float))
df_as_7_avg_sum= pd.DataFrame(df_as_7_avg.iloc[:, 0:7]).sum(axis=1)
df_as_7_avg_label= pd.DataFrame(df_as_7_avg.iloc[:, 7:14])
# normalize
df_as_7_avg_norm = df_as_7_avg_data.div(df_as_7_avg_sum, axis=0)
# add label
df_as_7_avg_norm = pd.concat([df_as_7_avg_norm, df_as_7_avg_label], axis=1, ignore_index=True)
'''
# Calculate correlation
# EFR
corr_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_aenu = df_EFR_avg_85_aenu.iloc[:, 0:4096].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_EFR_avg_85_a_t = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_e_t = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_n_t = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_u_t = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_a_re = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_e_re = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_n_re = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_u_re = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# AS
corr_as_win_85_a = df_as_win_85.iloc[0:44, 0:12000].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_e = df_as_win_85.iloc[44:88, 0:12000].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_n = df_as_win_85.iloc[88:132, 0:12000].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_u = df_as_win_85.iloc[132:176, 0:12000].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_aenu = df_aenu_as_85.iloc[0:44, 0:2048].T.corr(method='pearson').iloc[22:44, 0:22]
# EFR + AS
df_EFR_avg_85_aenu_norm = df_EFR_avg_85_aenu.div((df_EFR_avg_85_aenu.max(axis=1) - df_EFR_avg_85_aenu.min(axis=1)), axis=0)
df_aenu_as_85_norm = df_aenu_as_85.div((df_aenu_as_85.max(axis=1) - df_aenu_as_85.min(axis=1)), axis=0)
df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu_norm, df_aenu_as_85_norm.iloc[:, 0:535]], axis=1)
#corr_sum_85_aenu = df_aenu_sum_85.iloc[0:44, 0:2048].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_as_85_a = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_e = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_n = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_u = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_a_t = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_e_t = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_n_t = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_u_t = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_a_re = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_e_re = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_n_re = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_u_re = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
'''
#AS7
corr_as7_85_a = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_e = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_n = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_u = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_as7_85_a_t = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_e_t = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_n_t = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_u_t = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_a_re = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_e_re = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_n_re = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_u_re = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
'''
'''
# auto-correlation
corr_EFR_a_85_retest = df_EFR_a_85_avg.iloc[0:22, 0:7].T.corr(method='pearson')
corr_EFR_a_85_test = df_EFR_a_85_avg.iloc[22:44, 0:7].T.corr(method='pearson')
# cross-correlation
corr_EFR_a_85_r_t = df_EFR_a_85_avg.iloc[:, 0:7].T.corr(method='pearson')
# correlation matrix of test and retest
corr_EFR_a_85_r_t_part = corr_EFR_a_85_r_t.iloc[22:44, 0:22]
# auto-correlation
corr_as_retest = df_as_7_avg.iloc[0:22, 0:7].T.corr(method='pearson')
corr_as_test = df_as_7_avg.iloc[22:44, 0:7].T.corr(method='pearson')
# cross-correlation
corr_as_r_t = df_as_7_avg.iloc[:, 0:7].T.corr(method='pearson')
# Calculate correlation(normalized)
# auto-correlation
corr_as_norm_retest = df_as_7_avg_norm.iloc[0:22, 0:7].T.corr(method='pearson')
corr_as_norm_test = df_as_7_avg_norm.iloc[22:44, 0:7].T.corr(method='pearson')
# cross-correlation
corr_as_norm_r_t = df_as_7_avg_norm.iloc[:, 0:7].T.corr(method='pearson')
'''
# shrink
# shrink the correlation range from 0.3 to 1
# EFR
'''
corr_EFR_avg_85_a_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_a)
corr_EFR_avg_85_e_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_e)
corr_EFR_avg_85_n_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_n)
corr_EFR_avg_85_u_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_u)
'''
corr_EFR_avg_85_aenu_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_aenu)
# AS
'''
corr_as_win_85_a_shrink_03_1 = shrink_value_03_1(corr_as_win_85_a)
corr_as_win_85_e_shrink_03_1 = shrink_value_03_1(corr_as_win_85_e)
corr_as_win_85_n_shrink_03_1 = shrink_value_03_1(corr_as_win_85_n)
corr_as_win_85_u_shrink_03_1 = shrink_value_03_1(corr_as_win_85_u)
'''
corr_as_85_aenu_shrink_03_1 = shrink_value_03_1(corr_as_85_aenu)
# shrink the correlation range from 0.5 to 1
# EFR
corr_EFR_avg_85_aenu_shrink_05_1 = shrink_value_05_1(corr_EFR_avg_85_aenu)
# AS
corr_as_85_aenu_shrink_05_1 = shrink_value_05_1(corr_as_85_aenu)
# test
# sum of time and frequency corelation matrix
corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu).copy()
# max of time and frequency corelation matrix
# corr_max_avg_85_aenu = (corr_EFR_avg_85_aenu ? corr_as_85_aenu).copy()
# plot the figure
# in time and freq domain
df_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024]
df_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024]
df_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024]
df_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024]
r2_EFR_avg_85_aenu = pd.concat([df_EFR_avg_85_a.iloc[1, :], df_EFR_avg_85_e.iloc[1, :],
df_EFR_avg_85_n.iloc[1, :], df_EFR_avg_85_u.iloc[1, :]], axis=0)
t2_EFR_avg_85_aenu = pd.concat([df_EFR_avg_85_a.iloc[23, :], df_EFR_avg_85_e.iloc[23, :],
df_EFR_avg_85_n.iloc[23, :], df_EFR_avg_85_u.iloc[23, :]], axis=0)
r9_EFR_avg_85_aenu = pd.concat([df_EFR_avg_85_a.iloc[7, :], df_EFR_avg_85_e.iloc[7, :],
df_EFR_avg_85_n.iloc[7, :], df_EFR_avg_85_u.iloc[7, :]], axis=0)
t9_EFR_avg_85_aenu = pd.concat([df_EFR_avg_85_a.iloc[29, :], df_EFR_avg_85_e.iloc[29, :],
df_EFR_avg_85_n.iloc[29, :], df_EFR_avg_85_u.iloc[29, :]], axis=0)
# frequency domain
df_as_85_a = df_as_85.iloc[0:44, 0:48030]
df_as_85_e = df_as_85.iloc[44:88, 0:48030]
df_as_85_n = df_as_85.iloc[88:132, 0:48030]
df_as_85_u = df_as_85.iloc[132:176, 0:48030]
r1_as_85_aenu = df_aenu_as_85_norm.iloc[0,:]
r2_as_85_aenu = df_aenu_as_85_norm.iloc[1,:]
t2_as_85_aenu = df_aenu_as_85_norm.iloc[23, :]
r4_as_85_aenu = df_aenu_as_85_norm.iloc[3,:]
r7_as_85_aenu = df_aenu_as_85_norm.iloc[5,:]
r9_as_85_aenu = df_aenu_as_85_norm.iloc[7,:]
t9_as_85_aenu = df_aenu_as_85_norm.iloc[29, :]
r12_as_85_aenu = df_aenu_as_85_norm.iloc[9,:]
t12_as_85_aenu = df_aenu_as_85_norm.iloc[31, :]
r13_as_85_aenu = df_aenu_as_85_norm.iloc[10, :]
r14_as_85_aenu = df_aenu_as_85_norm.iloc[11,:]
t14_as_85_aenu = df_aenu_as_85_norm.iloc[33, :]
r15_as_85_aenu = df_aenu_as_85_norm.iloc[12,:]
t15_as_85_aenu = df_aenu_as_85_norm.iloc[34, :]
r16_as_85_aenu = df_aenu_as_85_norm.iloc[13, :]
r20_as_85_aenu = df_aenu_as_85_norm.iloc[17,:]
t20_as_85_aenu = df_aenu_as_85_norm.iloc[39, :]
r21_as_85_aenu = df_aenu_as_85_norm.iloc[18,:]
t21_as_85_aenu = df_aenu_as_85_norm.iloc[40, :]
# subject 2
# EFR
# subject2 -> 1 & 23
subject_num1 = 1
subject_num2 = 23
'''
x1_label = np.arange(0, 1024, 1)
x1_label_concat = np.arange(0, 4096, 1)
fig1, axs1 = plt.subplots(3, 2)
axs1[0,0].plot(x1_label, df_EFR_avg_85_a.iloc[subject_num1, :], label='retest')
axs1[0,0].plot(x1_label, df_EFR_avg_85_a.iloc[subject_num2, :], label='test')
axs1[0,0].legend(loc='upper right')
axs1[0,0].set_title('s2_EFR_avg_85_a')
axs1[0,1].plot(x1_label, df_EFR_avg_85_e.iloc[subject_num1, :], x1_label, df_EFR_avg_85_e.iloc[subject_num2, :])
axs1[0,1].set_title('s2_EFR_avg_85_e')
axs1[1,0].plot(x1_label, df_EFR_avg_85_n.iloc[subject_num1, :], x1_label, df_EFR_avg_85_n.iloc[subject_num2, :])
axs1[1,0].set_title('s2_EFR_avg_85_n')
axs1[1,1].plot(x1_label, df_EFR_avg_85_u.iloc[subject_num1, :], x1_label, df_EFR_avg_85_u.iloc[subject_num2, :])
axs1[1,1].set_title('s2_EFR_avg_85_u')
axs1[2,0].plot(x1_label_concat, r2_EFR_avg_85_aenu, x1_label_concat, t2_EFR_avg_85_aenu)
axs1[2,0].set_title('s2_EFR_avg_85_aenu')
plt.show()
'''
#AS
x2_label = np.arange(0, 48030, 1)
x2_label_concat = np.arange(0,2048, 1)
fig2, axs2 = plt.subplots(3, 2)
axs2[0,0].plot(x2_label, df_as_85_a.iloc[subject_num1, :], label='retest')
axs2[0,0].plot(x2_label, df_as_85_a.iloc[subject_num2, :], label='test')
axs2[0,0].set_xlim(0,13000) # 0 to 1300Hz
axs2[0,0].legend(loc='upper right')
axs2[0,0].set_title('s2_as_85_a')
axs2[0,1].plot(x2_label, df_as_85_e.iloc[subject_num1, :], x2_label, df_as_85_e.iloc[subject_num2, :])
axs2[0,1].set_xlim(0,13000) # 0 to 1300Hz
axs2[0,1].set_title('s2_as_85_e')
axs2[1,0].plot(x2_label, df_as_85_n.iloc[subject_num1, :], x2_label, df_as_85_n.iloc[subject_num2, :])
axs2[1,0].set_xlim(0,13000) # 0 to 1300Hz
axs2[1,0].set_title('s2_as_85_n')
axs2[1,1].plot(x2_label, df_as_85_u.iloc[subject_num1, :], x2_label, df_as_85_u.iloc[subject_num2, :])
axs2[1,1].set_xlim(0,13000) # 0 to 1300Hz
axs2[1,1].set_title('s2_as_85_u')
axs2[2,0].plot(x2_label_concat, r2_as_85_aenu, x2_label_concat, t2_as_85_aenu)
axs2[2,0].set_title('s2_as_85_aenu')
axs2[2,0].set_xlim(0,535) # 0 to 1300 Hz
axs2[2,1].plot(x2_label_concat, t2_as_85_aenu, label='test2')
axs2[2,1].plot(x2_label_concat, r16_as_85_aenu, label='retest16')
axs2[2,1].set_title('t2_as_85_aenu vs r16_as_85_aenu (rank#1 in freq domain)')
axs2[2,1].set_xlim(0,535) # 0 to 1300 Hz
plt.show()
# subject 9
subject_num1 = 7
subject_num2 = 29
# EFR
'''
x1_label = np.arange(0, 1024, 1)
x1_label_concat = np.arange(0, 4096, 1)
fig3, axs3 = plt.subplots(3, 2)
axs3[0,0].plot(x1_label, df_EFR_avg_85_a.iloc[subject_num1, :], label='retest')
axs3[0,0].plot(x1_label, df_EFR_avg_85_a.iloc[subject_num2, :], label='test')
axs3[0,0].legend(loc='upper right')
axs3[0,0].set_title('s9_EFR_avg_85_a')
axs3[0,1].plot(x1_label, df_EFR_avg_85_e.iloc[subject_num1, :], x1_label, df_EFR_avg_85_e.iloc[subject_num2, :])
axs3[0,1].set_title('s9_EFR_avg_85_e')
axs3[1,0].plot(x1_label, df_EFR_avg_85_n.iloc[subject_num1, :], x1_label, df_EFR_avg_85_n.iloc[subject_num2, :])
axs3[1,0].set_title('s9_EFR_avg_85_n')
axs3[1,1].plot(x1_label, df_EFR_avg_85_u.iloc[subject_num1, :], x1_label, df_EFR_avg_85_u.iloc[subject_num2, :])
axs3[1,1].set_title('s9_EFR_avg_85_u')
axs3[2,0].plot(x1_label_concat, r9_EFR_avg_85_aenu, label='retest9')
axs3[2,0].plot(x1_label_concat, t9_EFR_avg_85_aenu, label='test9')
axs3[2,0].legend(loc='upper right')
axs3[2,0].set_title('s9_EFR_avg_85_aenu')
plt.show()
'''
#AS
x2_label = np.arange(0, 48030, 1)
x2_label_concat = np.arange(0,2048, 1)
fig4, axs4 = plt.subplots(3, 2)
axs4[0,0].plot(x2_label, df_as_85_a.iloc[subject_num1, :], label='retest')
axs4[0,0].plot(x2_label, df_as_85_a.iloc[subject_num2, :], label='test')
axs4[0,0].set_xlim(0,13000) # 0 to 1300Hz
axs4[0,0].legend(loc='upper right')
axs4[0,0].set_title('s9_as_85_a')
axs4[0,1].plot(x2_label, df_as_85_e.iloc[subject_num1, :], x2_label, df_as_85_e.iloc[subject_num2, :])
axs4[0,1].set_xlim(0,13000) # 0 to 1300Hz
axs4[0,1].set_title('s9_as_85_e')
axs4[1,0].plot(x2_label, df_as_85_n.iloc[subject_num1, :], x2_label, df_as_85_n.iloc[subject_num2, :])
axs4[1,0].set_xlim(0,13000) # 0 to 1300Hz
axs4[1,0].set_title('s9_as_85_n')
axs4[1,1].plot(x2_label, df_as_85_u.iloc[subject_num1, :], x2_label, df_as_85_u.iloc[subject_num2, :])
axs4[1,1].set_xlim(0,13000) # 0 to 1300Hz
axs4[1,1].set_title('s9_as_85_u')
axs4[2,0].plot(x2_label_concat, r9_as_85_aenu, label='retest9')
axs4[2,0].plot(x2_label_concat, t9_as_85_aenu, label='test9')
axs4[2,0].legend(loc='upper right')
axs4[2,0].set_title('s9_as_85_aenu')
axs4[2,0].set_xlim(0,535) # 0 to 1300 Hz
plt.show()
# correlation matrix for freq domain (not #1)
# Subject 2 9 12
x2_label = np.arange(0, 48030, 1)
x2_label_concat = np.arange(0,2048, 1)
fig5, axs5 = plt.subplots(3, 2)
#T2R2 # T2R16
axs5[0,0].plot(x2_label_concat, r2_as_85_aenu, label='retest2')
axs5[0,0].plot(x2_label_concat, t2_as_85_aenu, label='test2')
axs5[0,0].legend(loc='upper right')
axs5[0,0].set_title('s2_as_85_aenu (0.866496, rank#3)')
axs5[0,0].set_xlim(0,535) # 0 to 1300 Hz
axs5[0,1].plot(x2_label_concat, r16_as_85_aenu, label='retest16')
axs5[0,1].plot(x2_label_concat, t2_as_85_aenu, label='test2')
axs5[0,1].legend(loc='upper right')
axs5[0,1].set_title('r16_as_85_aenu vs t2_as_85_aenu (0.871411, rank#1)')
axs5[0,1].set_xlim(0,535) # 0 to 1300 Hz
# R9T9 R13T9
axs5[1,0].plot(x2_label_concat, r9_as_85_aenu, label='retest9')
axs5[1,0].plot(x2_label_concat, t9_as_85_aenu, label='test9')
axs5[1,0].legend(loc='upper right')
axs5[1,0].set_title('s9_as_85_aenu (0.824085, rank#4)')
axs5[1,0].set_xlim(0,535) # 0 to 1300 Hz
axs5[1,1].plot(x2_label_concat, r13_as_85_aenu, label='retest13')
axs5[1,1].plot(x2_label_concat, t9_as_85_aenu, label='test9')
axs5[1,1].legend(loc='upper right')
axs5[1,1].set_title('r13_as_85_aenu vs. t9_as_85_aenu (0.838825, rank#1)')
axs5[1,1].set_xlim(0,535) # 0 to 1300 Hz
# T12R12 T12R4
axs5[2,0].plot(x2_label_concat, r12_as_85_aenu, label='retest12')
axs5[2,0].plot(x2_label_concat, t12_as_85_aenu, label='test12')
axs5[2,0].legend(loc='upper right')
axs5[2,0].set_title('s12_as_85_aenu (0.868822, rank#4)')
axs5[2,0].set_xlim(0,535) # 0 to 1300 Hz
axs5[2,1].plot(x2_label_concat, r4_as_85_aenu, label='retest4')
axs5[2,1].plot(x2_label_concat, t12_as_85_aenu, label='test12')
axs5[2,1].legend(loc='upper right')
axs5[2,1].set_title('r4_as_85_aenu vs. t12_as_85_aenu (0.892162, rank#1)')
axs5[2,1].set_xlim(0,535) # 0 to 1300 Hz
plt.show()
# Subject 14 15
x2_label = np.arange(0, 48030, 1)
x2_label_concat = np.arange(0,4800, 2.34375)
fig6, axs6 = plt.subplots(4, 1)
#T14R14 T14R9
axs6[0].plot(x2_label_concat, r14_as_85_aenu, label='retest14')
axs6[0].plot(x2_label_concat, t14_as_85_aenu, label='test14')
axs6[0].legend(loc='upper right')
axs6[0].set_title('s14_as_85_aenu (0.832793, rank#3)')
axs6[0].set_xlim(0,1300) # 0 to 1300 Hz
axs6[1].plot(x2_label_concat, r9_as_85_aenu, label='retest9')
axs6[1].plot(x2_label_concat, t14_as_85_aenu, label='test14')
axs6[1].legend(loc='upper right')
axs6[1].set_title('r9_as_85_aenu vs t14_as_85_aenu (0.835228, rank#1)')
axs6[1].set_xlim(0,1300) # 0 to 1300 Hz
# T15R15 T15R1
axs6[2].plot(x2_label_concat, r15_as_85_aenu, label='retest15')
axs6[2].plot(x2_label_concat, t15_as_85_aenu, label='test15')
axs6[2].legend(loc='upper right')
axs6[2].set_title('s15_as_85_aenu (0.845387, rank#4)')
axs6[2].set_xlim(0,1300) # 0 to 1300 Hz
axs6[3].plot(x2_label_concat, r1_as_85_aenu, label='retest1')
axs6[3].plot(x2_label_concat, t15_as_85_aenu, label='test15')
axs6[3].legend(loc='upper right')
axs6[3].set_title('r1_as_85_aenu vs. t15_as_85_aenu (0.89756, rank#1)')
axs6[3].set_xlim(0,1300) # 0 to 1300 Hz
plt.setp(axs6[0].get_xticklabels(), visible=False)
plt.setp(axs6[1].get_xticklabels(), visible=False)
plt.setp(axs6[2].get_xticklabels(), visible=False)
plt.show()
# Subject 20 21
fig7, axs7 = plt.subplots(4, 1)
# T20R20 T20R1
axs7[0].plot(x2_label_concat, r20_as_85_aenu, label='retest20')
axs7[0].plot(x2_label_concat, t20_as_85_aenu, label='test20')
axs7[0].legend(loc='upper right')
axs7[0].set_title('s20_as_85_aenu (0.844889, rank#7)')
axs7[0].set_xlim(0,1300) # 0 to 1300 Hz
axs7[1].plot(x2_label_concat, r1_as_85_aenu, label='retest1')
axs7[1].plot(x2_label_concat, t20_as_85_aenu, label='test20')
axs7[1].legend(loc='upper right')
axs7[1].set_title('r1_as_85_aenu vs t20_as_85_aenu (0.905874, rank#1)')
axs7[1].set_xlim(0,1300) # 0 to 1300 Hz
# T21R21 T21R7
axs7[2].plot(x2_label_concat, r21_as_85_aenu, label='retest21')
axs7[2].plot(x2_label_concat, t21_as_85_aenu, label='test21')
axs7[2].legend(loc='upper right')
axs7[2].set_title('s21_as_85_aenu (0.815956, rank#2)')
axs7[2].set_xlim(0,1300) # 0 to 1300 Hz
axs7[3].plot(x2_label_concat, r7_as_85_aenu, label='retest7')
axs7[3].plot(x2_label_concat, t21_as_85_aenu, label='test21')
axs7[3].legend(loc='upper right')
axs7[3].set_title('r7_as_85_aenu vs. t21_as_85_aenu (0.820878, rank#1)')
axs7[3].set_xlim(0,1300) # 0 to 1300 Hz
plt.setp(axs7[0].get_xticklabels(), visible=False)
plt.setp(axs7[1].get_xticklabels(), visible=False)
plt.setp(axs7[2].get_xticklabels(), visible=False)
plt.show()
# subject
'''
# Correlation Matrix
# EFR
correlation_matrix(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
# AS
correlation_matrix(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
# AS7
correlation_matrix(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
# Correlation Matrix witn 0 and 1
# EFR
correlation_matrix_01(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_a_t, 'cross correlation of 85dB a_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_a_re, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_e_t, 'cross correlation of 85dB e_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_e_re, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_n_t, 'cross correlation of 85dB n_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_n_re, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_01(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
#correlation_matrix_tt_01(corr_EFR_avg_85_u_t, 'cross correlation of 85dB u_vowel in time domain')
#correlation_matrix_rr_01(corr_EFR_avg_85_u_re, 'cross correlation of 85dB u_vowel in time domain')
# Amplitude Spectrum
correlation_matrix_01(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_a_t, 'cross correlation of 85dB a_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_a_re, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix_01(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_e_t, 'cross correlation of 85dB e_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_e_re, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix_01(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_n_t, 'cross correlation of 85dB n_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_n_re, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix_01(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
#correlation_matrix_tt_01(corr_as_85_u_t, 'cross correlation of 85dB u_vowel in frequency domain')
#correlation_matrix_rr_01(corr_as_85_u_re, 'cross correlation of 85dB u_vowel in frequency domain')
# Amplitude Spectrum 7 points
correlation_matrix_01(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_a_t, 'cross correlation of 85dB a_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_a_re, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_e_t, 'cross correlation of 85dB e_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_e_re, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_n_t, 'cross correlation of 85dB n_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_n_re, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix_01(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
#correlation_matrix_tt_01(corr_as7_85_u_t, 'cross correlation of 85dB u_vowel in frequency domain 7')
#correlation_matrix_rr_01(corr_as7_85_u_re, 'cross correlation of 85dB u_vowel in frequency domain 7')
'''
# Correlation Matrix_both
# EFR
'''
correlation_matrix_comb(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix_comb(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix_comb(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix_comb(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
'''
# figure 1
#correlation_matrix_comb(corr_EFR_avg_85_aenu, 'cross correlation of 85dB aenu in time domain')
#correlation_matrix_comb(corr_EFR_avg_85_aenu_shrink_03_1, 'cross correlation of shrinked(0.3, 1) 85dB aenu in time domain')
#correlation_matrix_comb(corr_EFR_avg_85_aenu_shrink_05_1, 'cross correlation of shrinked(0.5, 1) 85dB aenu in time domain')
# AS
'''
correlation_matrix_comb(corr_as_win_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix_comb(corr_as_win_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix_comb(corr_as_win_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix_comb(corr_as_win_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
'''
#correlation_matrix_comb(corr_as_85_aenu_shrink_03_1, 'cross correlation of shrinked(0.3, 1) 85dB aenu in frequency domain')
#correlation_matrix_comb(corr_as_85_aenu_shrink_05_1, 'cross correlation of shrinked(0.5, 1) 85dB aenu in frequency domain')
# sum of EFR and AS
#correlation_matrix_comb(corr_sum_avg_85_aenu, 'cross correlation of sum 85dB aenu in time and freq domain')
# AS7
'''
correlation_matrix_comb(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix_comb(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix_comb(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix_comb(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
'''
'''
# original test
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.matshow(corr_EFR_a_85_r_t_part, cmap='gray') # cmap=plt.cm.gray
plt.title('cross correlation of test and retest')
plt.colorbar() # show the color bar on the right side of the figure
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
'''
'''
#plt.subplot(1,3,1)
plt.matshow(corr_as_test)# cmap=plt.cm.gray
plt.title('cross correlation of test subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,2)
plt.matshow(corr_as_retest) # cmap=plt.cm.gray
plt.title('cross correlation of retest subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,3)
plt.matshow(corr_as_t_r) # cmap=plt.cm.gray
plt.title('cross correlation of test and retest')
plt.colorbar() # show the color bar on the right side of the figure
plt.matshow(corr_as_norm_test)# cmap=plt.cm.gray
plt.title('auto correlation of normalized test subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,2)
plt.matshow(corr_as_norm_retest) # cmap=plt.cm.gray
plt.title('auto correlation of normalized retest subject')
plt.colorbar() # show the color bar on the right side of the figure
#plt.subplot(1,3,3)
plt.matshow(corr_as_norm_t_r) # cmap=plt.cm.gray
plt.title('corss correlation of normalized test and retest')
plt.colorbar() # show the color bar on the right side of the figure
'''
|
StarcoderdataPython
|
4978584
|
image_extensions = (
'3fr', 'ari', 'arw', 'bay', 'bmp', 'cap', 'cr2', 'cr3', 'crw',
'dcr', 'dcs', 'dds', 'dib', 'dng', 'drf', 'eip', 'emf', 'erf',
'fff', 'gif', 'ico', 'ief', 'iiq', 'jfif', 'jpe', 'jpeg', 'jpg',
'jxr', 'k25', 'kdc', 'mef', 'mos', 'mrw', 'nef', 'nrw', 'orf',
'ori', 'pbm', 'pef', 'pgm', 'png', 'pnm', 'ppm', 'ptx', 'pxn',
'raf', 'ras', 'raw', 'rgb', 'rw2', 'rwl', 'sr2', 'srf', 'srw',
'svg', 'tif', 'tiff', 'wdp', 'webp', 'wmf', 'x3f', 'xbm', 'xpm',
'xwd'
)
audio_extensions = (
'aac', 'ac3', 'adt', 'adts', 'aif', 'aifc', 'aiff', 'au', 'ec3',
'flac', 'lpcm', 'm3u', 'm4a', 'mid', 'midi', 'mka', 'mp2', 'mp3',
'mpa', 'oga', 'ogg', 'opus', 'ra', 'rmi', 'snd', 'wav', 'wax',
'weba', 'wma'
)
common_file_extensions = (
'csv', 'doc', 'docm', 'docx', 'htm', 'html', 'ods', 'odt', 'pdf',
'pps', 'ppsm', 'ppsx', 'ppt', 'pptm', 'pptx', 'rtf', 'txt', 'wps',
'xlr', 'xls', 'xlsb', 'xlsm', 'xlsx', 'xlw', 'xml', 'xps'
)
disallowed_file_extensions = (
'ade', 'adp', 'apk', 'appx', 'appxbundle', 'bat', 'cab', 'chm',
'cmd', 'com', 'cpl', 'dll', 'dmg', 'ex', 'ex_', 'exe', 'hta',
'ins', 'iso', 'isp', 'jar', 'js', 'jse', 'lib', 'lnk', 'mde',
'msc', 'msi', 'msix', 'msixbundle', 'msp', 'mst', 'nsh', 'pif',
'ps1', 'scr', 'sct', 'shb', 'sys', 'vb', 'vbe', 'vbs', 'vxd',
'wsc', 'wsf', 'wsh'
)
|
StarcoderdataPython
|
3255980
|
<filename>openbook_posts/management/commands/migrate_post_images.py
from django.core.management.base import BaseCommand
import logging
from django.db import transaction
from openbook_common.utils.model_loaders import get_post_model, get_post_media_model
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Migrates the Post.image\'s to PostMedia items'
def handle(self, *args, **options):
Post = get_post_model()
PostMedia = get_post_media_model()
posts_to_migrate = Post.objects.filter(image__isnull=False, media__isnull=True)
migrated_posts = 0
for post in posts_to_migrate.iterator():
with transaction.atomic():
post_image = post.image
PostMedia.create_post_media(type=PostMedia.MEDIA_TYPE_IMAGE,
content_object=post_image,
post_id=post.pk, order=0)
post_image.save()
logger.info('Migrated post with id:' + str(post.pk))
migrated_posts = migrated_posts + 1
logger.info('Migrated %d posts' % migrated_posts)
|
StarcoderdataPython
|
131400
|
<gh_stars>1-10
"""Init file for backend App"""
# pylint: disable=invalid-name
import sys
import logging
import redis
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_bootstrap import WebCDN
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_caching import Cache
from flask_mail import Mail
from config import config, Config
from celery import Celery
from .watchdog import Watchdog
from werkzeug.contrib.fixers import ProxyFix
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
cache = Cache(config={
'CACHE_TYPE': 'redis',
'CACHE_KEY_PREFIX': 'fcache',
'CACHE_REDIS_HOST': 'redis',
'CACHE_REDIS_PORT': '6379',
'CACHE_REDIS_URL': 'redis://redis:6379'})
celery = Celery(__name__, broker=Config.CELERY_BROKER_URL)
login_manager = LoginManager()
login_manager.session_protection = 'basic'
login_manager.login_view = 'auth.login'
logger = logging.getLogger('backend INFO Logger')
logger.setLevel(logging.INFO)
info_file_handler = RotatingFileHandler(
filename='info_log.log',
maxBytes=30 * 1024 * 1024,
backupCount=7)
info_file_handler.setLevel(logging.INFO)
info_formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s '
'[in %(pathname)s: line %(lineno)d]')
info_file_handler.setFormatter(info_formatter)
logger.addHandler(info_file_handler)
watchdog = Watchdog(timeout=10, cache=cache)
def create_app(config_name):
"""
Creates an instance of the Backend App
Args:
config_name: is the configuration for the type of Backend the
user wants to run
Returns:
Backend, which starts up the app
"""
app = Flask(__name__)
# WARNING It is a security issue to use this middleware in a non-proxy
# setup that enforces using https as it will blindly trust the incoming
# headers which could be forged by a malicious client. See also the
# following: http://flask.pocoo.org/docs/1.0/deploying/wsgi-standalone/
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(config[config_name])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
config[config_name].init_app(app)
login_manager.init_app(app)
bootstrap.init_app(app)
app.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
'//ajax.googleapis.com/ajax/libs/jquery/3.2.1/'
)
mail.init_app(app)
db.init_app(app)
cache.init_app(app)
celery.conf.update(app.config)
# Clear cache at startup, Redis defaults to clear the whole DB if that
# happens. NOTE: I am doing this to create a consistent startup
# state, however, any replicas if they spun back up would clear the
# cache after initializing, so we may want to do this elsewhere.
with app.app_context():
try:
cache.clear()
except redis.ConnectionError as e:
print(e)
sys.exit(-1)
# Backend Warning/Error Logger
if not app.config['TESTING']:
error_file_handler = RotatingFileHandler(
filename=app.config['ERROR_LOGGING_LOCATION'],
maxBytes=app.config['ERROR_LOGGING_MAX_BYTES'],
backupCount=['ERROR_LOGGING_BACKUP_COUNT'])
formatter = logging.Formatter(app.config['LOGGING_FORMAT'])
error_file_handler.setFormatter(formatter)
app.logger.setLevel(app.config['ERROR_LOGGING_LEVEL'])
app.logger.addHandler(error_file_handler)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .api_0_1 import api_0_1 as api_0_1_blueprint
app.register_blueprint(api_0_1_blueprint, url_prefix='/api/v0.1')
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
|
StarcoderdataPython
|
1876099
|
<filename>dcms/media/views.py
from rest_framework import views, viewsets
from rest_framework.parsers import FileUploadParser
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework import status
from config.authentication import default_authentication_classes
from content.permissions import IsAuthorOrAdminOtherwiseReadOnly
from .serializers import *
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
authentication_classes = default_authentication_classes
class FileViewSet(viewsets.ModelViewSet):
queryset = File.objects.all()
serializer_class = FileSerializer
authentication_classes = default_authentication_classes
# alternative to the POST method in ImageViewSet/FileViewSet
class ImageUploadView(views.APIView):
parser_classes = (FileUploadParser,)
def put(self, request, filename, format=None):
file_obj = request.FILES['file']
image = Image.objects.create(path=file_obj)
return Response(data=ImageSerializer(image).data, status=status.HTTP_201_CREATED)
class FileUploadView(views.APIView):
parser_classes = (FileUploadParser,)
def put(self, request, filename, format=None):
file_obj = request.FILES['file']
file = File.objects.create(path=file_obj)
return Response(data=FileSerializer(file).data, status=status.HTTP_201_CREATED)
|
StarcoderdataPython
|
4818964
|
<filename>eit_app/test_cam.py
import cv2
frameWeight = 640
frameHeight = 480
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, frameWeight)
cap.set(4, frameHeight)
cap.set(10, 150)
while cap.isOpened():
success, img = cap.read()
if success:
cv2.imshow("Result", img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
|
StarcoderdataPython
|
3520153
|
<reponame>evinus/My-appproch-One<gh_stars>0
from scipy.stats.stats import mode
import tensorflow.keras as keras
from tensorflow.python.keras import activations
#from tensorflow.python.keras import callbacks
import metrics as met
import cv2
import os
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import config
from sklearn.metrics import roc_auc_score , roc_curve
gpus = config.experimental.list_physical_devices('GPU')
config.experimental.set_memory_growth(gpus[0], True)
bilder = list()
for folder in os.listdir("data//avenue//testing//frames"):
path = os.path.join("data//avenue//testing//frames",folder)
for img in os.listdir(path):
bild = os.path.join(path,img)
bilder.append(cv2.imread(bild))
bilder = np.array(bilder)
bilder = bilder.reshape(bilder.shape[0],bilder.shape[1],bilder.shape[2],bilder.shape[3],1)
bilder = bilder.astype('float32') / 255
labels = np.load("data/frame_labels_avenue.npy")
#labels = np.reshape(labels,labels.shape[1])
X_train, X_test, Y_train, Y_test = train_test_split(bilder,labels,test_size=0.2, random_state= 100)
batch_size = 16
model = keras.Sequential()
model.add(keras.layers.Conv3D(input_shape =(240, 360, 3, 1),activation="relu",filters=6,kernel_size=3,padding="same"))
model.add(keras.layers.SpatialDropout3D(0.5))
model.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))
#model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv3D(activation="relu",filters=6,kernel_size=3,padding="same"))
model.add(keras.layers.SpatialDropout3D(0.5))
model.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))
model.add(keras.layers.Conv3D(activation="relu",filters=6,kernel_size=2,padding="same"))
model.add(keras.layers.SpatialDropout3D(0.5))
model.add(keras.layers.MaxPooling3D(pool_size=(2,2,1)))
#model.add(keras.layers.Dense(64,activation="relu"))
#model.add(keras.layers.GlobalAveragePooling3D())
model.add(keras.layers.Flatten())
#model.add(keras.layers.Dense(256,activation="relu"))
model.add(keras.layers.Dense(50,activation="relu"))
model.add(keras.layers.Dense(10,activation="relu"))
model.add(keras.layers.Dense(1,activation="sigmoid"))
metrics = [keras.metrics.categorical_crossentropy,keras.metrics.binary_accuracy,keras.metrics.Precision,met.f1_m]
#model.compile(optimizer="adam",metrics=["acc",met.f1_m,met.precision_m,met.recall_m],loss="binary_crossentropy")
model.compile(optimizer=keras.optimizers.Adam(),metrics=["binary_accuracy","AUC","Precision","Recall","TruePositives","TrueNegatives","FalsePositives","FalseNegatives"],loss="binary_crossentropy")
model.summary()
filepath = 'model3Davenue-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}'
#callbacks = [keras.callbacks.EarlyStopping(monitor="val_loss", patience=3, mode="min")]#,keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')]
callbacks = [keras.callbacks.EarlyStopping(monitor="val_loss", patience=3, mode="min"),keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')]
model.fit(X_train,Y_train,batch_size=batch_size,verbose=1,epochs=30,callbacks=callbacks,validation_data=(X_test,Y_test))
model.save("model3Davenue2")
#reconstructed_model = keras.models.load_model("model3Davenue2")
#np.testing.assert_allclose(model.predict(X_test), reconstructed_model.predict(X_test))
#np.testing.assert_allclose(model.evaluate(X_test,Y_test,batch_size=batch_size), reconstructed_model.evaluate(X_test,Y_test,batch_size=batch_size))
#model.evaluate(X_test,Y_test,batch_size=batch_size)
#y_score = model.predict(X_test,batch_size=batch_size)
#auc = roc_auc_score(Y_test,y_score=y_score)
#print('AUC: ', auc*100, '%')
|
StarcoderdataPython
|
9778183
|
<gh_stars>0
from __future__ import annotations
from math import log, sqrt
from typing import Dict, Optional, Tuple
import parameters
class TreeNode:
player_reward = {
1: 1,
2: -1
}
def __init__(self, state: Tuple[int, ...], parent: Optional[TreeNode] = None) -> None:
self.state = state
self.parent = parent
self.children: Dict[int, TreeNode] = {}
self.score = 0
self.visits = 0
self.c = -parameters.UCT_C if state[0] == 1 else parameters.UCT_C
self.policy_function = max if self.state[0] == 1 else min
@property
def UCT(self) -> float:
if self.visits == 0:
return self.c * float("inf")
exploitation = self.score / self.visits
exploration = self.c * sqrt(2 * log(self.parent.visits) / (self.visits))
return exploitation + exploration
def tree_policy(self) -> int:
return self.policy_function(self.children.keys(), key=lambda key: self.children[key].UCT)
@property
def is_not_leaf(self) -> bool:
return bool(self.children)
def add_reward(self, winner: int) -> None:
self.score += TreeNode.player_reward[winner]
def increment_visit_count(self) -> None:
self.visits += 1
def add_node(self, action: int, state: Tuple[int, ...]) -> TreeNode:
child_node = TreeNode(state, self)
self.children[action] = child_node
return child_node
def __eq__(self, o: TreeNode) -> bool:
return self.state == o.state
def __hash__(self) -> int:
return hash(self.state)
|
StarcoderdataPython
|
9752073
|
import os
import csv
class Formatter():
DEL_COM = ','
LINE_LF = '\n'
UTF_8 = 'utf-8'
DBL_QUOTE = '"'
def __init__(self, *args, **kwargs):
"""
delimiter: delimiter of column
line_sep: separater of line as record
"""
self.delimiter = kwargs['delimiter'] if 'delimiter' in kwargs else Formatter.DEL_COM
self.quotechar = kwargs['quotechar'] if 'quotechar' in kwargs else Formatter.DBL_QUOTE
self.line_sep = kwargs['line_sep'] if 'line_sep' in kwargs else Formatter.LINE_LF
self.charset = kwargs['charset'] if 'charset' in kwargs else Formatter.UTF_8
class Writer():
def __init__(self):
"""
Not Implements
"""
pass
def write(self, formatter):
pass
class Reader():
def __init__(self):
"""
Not Implements
"""
pass
class Csv():
def __init__(self, formatter=None):
"""
delimiter: delimiter of column
line_sep: separater of line as record
"""
if formatter is None:
self.form = Formatter()
else:
self.form = formatter
def write(self, file_path, contents, headers=None):
""" write file """
is_addtional = os.path.isfile(file_path)
with open(file_path, 'a', newline=self.form.line_sep) as csv_file:
writer = csv.writer(
csv_file,
delimiter=self.form.delimiter,
quotechar=self.form.quotechar,
quoting=csv.QUOTE_MINIMAL
)
if headers is not None and not is_addtional:
writer.writerow(headers)
for content in contents:
writer.writerow(content)
return True
def read(self, file_path, headers=None):
""" read file """
contents = []
with open(file_path, "r", newline=self.form.line_sep) as csv_file:
reader = csv.reader(
csv_file,
delimiter=self.form.delimiter,
quotechar=self.form.quotechar,
quoting=csv.QUOTE_MINIMAL
)
for n, row in enumerate(reader):
if headers is not None and n == 0 :
continue
contents.append(row)
return contents
|
StarcoderdataPython
|
8079626
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 07:10:48 2019
@author: gbrunkhorst
"""
import gbxltable
|
StarcoderdataPython
|
11332096
|
<filename>website_multi_company_sale/__manifest__.py
# Copyright 2017-2018 <NAME> <https://it-projects.info/team/yelizariev>
# Copyright 2018 <NAME> <https://it-projects.info/team/ilmir-k>
# Copyright 2018 <NAME> <https://it-projects.info/team/iledarn>
# Copyright 2019 <NAME> <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
{
"name": """Multi Ecommerce""",
"summary": """Multi Website support in eCommerce""",
"category": "eCommerce",
# "live_test_url": "http://apps.it-projects.info/shop/product/website-multi-company?version=12.0",
"images": ["images/website_multi_company_sale_main.png"],
"version": "192.168.127.12.2",
"application": False,
"author": "IT-Projects LLC, <NAME>",
"support": "<EMAIL>",
"website": "https://it-projects.info/team/yelizariev",
"license": "Other OSI approved licence", # MIT
"price": 9.00,
"currency": "EUR",
"depends": ["website_multi_company", "website_sale", "ir_rule_website"],
"external_dependencies": {"python": [], "bin": []},
"data": [
"views/product_public_category_views.xml",
"views/res_config_views.xml",
"views/product_template_views.xml",
"views/payment_views.xml",
"views/sale_views.xml",
"security/website_multi_company_sale_security.xml",
],
"qweb": [],
"demo": [],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"uninstall_hook": None,
"auto_install": False,
"installable": False,
}
|
StarcoderdataPython
|
299815
|
def login():
return 'login info'
a = 18
num1 = 30
num2 = 10
num2 = 20
|
StarcoderdataPython
|
3381420
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
$ python setup.py register sdist upload
First Time register project on pypi
https://pypi.org/manage/projects/
Pypi Release
$ pip3 install twine
$ python3 setup.py sdist
$ twine upload dist/keri-0.0.1.tar.gz
Create release git:
$ git tag -a v0.4.2 -m "bump version"
$ git push --tags
$ git checkout -b release_0.4.2
$ git push --set-upstream origin release_0.4.2
$ git checkout master
Best practices for setup.py and requirements.txt
https://caremad.io/posts/2013/07/setup-vs-requirement/
"""
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages, setup
setup(
name='keri',
version='0.4.7', # also change in src/keri/__init__.py
license='Apache Software License 2.0',
description='Key Event Receipt Infrastructure',
long_description="KERI Decentralized Key Management Infrastructure",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/decentralized-identity/keripy',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: PyPy',
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://keri.readthedocs.io/',
'Changelog': 'https://keri.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/decentralized-identity/keripy/issues',
},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
python_requires='>=3.9.5',
install_requires=[
'lmdb>=1.2.1',
'pysodium>=0.7.8',
'blake3>=0.1.8',
'msgpack>=1.0.2',
'cbor2>=5.3.0',
'multidict>=5.1.0',
'orderedset>=2.0.3',
'hio>=0.3.9',
'multicommand>=0.0.8',
],
extras_require={
},
tests_require=['coverage>=5.5', 'pytest>=6.2.4'],
setup_requires=[
],
entry_points={
'console_scripts': [
'keri_bob = keri.demo.demo_bob:main',
'keri_eve = keri.demo.demo_eve:main',
'keri_sam = keri.demo.demo_sam:main',
'kli = keri.kli.kli:main',
]
},
)
|
StarcoderdataPython
|
3432589
|
<reponame>ciaranjordan/webex_bot
import logging
from abc import ABC, abstractmethod
log = logging.getLogger(__name__)
CALLBACK_KEYWORD_KEY = 'callback_keyword'
class Command(ABC):
def __init__(self, command_keyword, card, help_message=None, delete_previous_message=False):
self.command_keyword = command_keyword
self.help_message = help_message
self.card = card
self.pre_card_callback = self.execute
self.card_callback = self.execute
self.card_callback_keyword = None
self.delete_previous_message = delete_previous_message
# Now, if this card has a Action.Submit action, let's read the callback keyword,
# or if it doesnt exist, add it.
# Only work from the first action for now. Maybe in future support multiple actions.
if card is not None:
if 'actions' in card:
if len(card['actions']) > 0:
first_action = card['actions'][0]
if 'type' in first_action and first_action['type'] == 'Action.Submit':
if 'data' in first_action and len(first_action['data']) > 0:
data = first_action['data']
if CALLBACK_KEYWORD_KEY in data:
self.card_callback_keyword = first_action['data'][CALLBACK_KEYWORD_KEY]
else:
log.warning(
f"card actions data but no entry for '{CALLBACK_KEYWORD_KEY}' for {command_keyword}")
self.set_default_card_callback_keyword()
else:
log.warning(
f"no card actions data so no entry for '{CALLBACK_KEYWORD_KEY}' for {command_keyword}")
self.set_default_card_callback_keyword()
else:
log.info(f"No actions defined in this card. command_keyword={command_keyword}")
def set_default_card_callback_keyword(self):
if self.card_callback_keyword is None:
if 'data' not in self.card['actions'][0]:
self.card['actions'][0]['data'] = {}
self.card_callback_keyword = f"callback___{self.command_keyword}"
self.card['actions'][0]['data'][CALLBACK_KEYWORD_KEY] = self.card_callback_keyword
log.info(
f"Added default action for '{self.command_keyword}' {CALLBACK_KEYWORD_KEY}={self.card_callback_keyword}")
def pre_card_load_reply(self, message, attachment_actions, activity):
pass
def pre_execute(self, message, attachment_actions, activity):
pass
@abstractmethod
def execute(self, message, attachment_actions, activity):
pass
|
StarcoderdataPython
|
229799
|
<gh_stars>0
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sh import ErrorReturnCode_1
from functools import partial
from test_base import TestBase
from docker_host import DockerHost
from utils import retry_until_success
class Ipv6MultiHostMainline(TestBase):
def run_ipv6_multi_host(self, default_as=None, per_node_as=None):
"""
Run a mainline multi-host test with IPv6.
Almost identical in function to the vagrant coreOS demo.
"""
host1 = DockerHost('host1', as_num=per_node_as)
host2 = DockerHost('host2', as_num=per_node_as)
if default_as:
host1.calicoctl("default-node-as 12345")
ip1 = "fd80:24e2:f998:72d6::1:1"
ip2 = "fd80:24e2:f998:72d6::1:2"
ip3 = "fd80:24e2:f998:72d6::1:3"
ip4 = "fd80:24e2:f998:72d6::1:4"
ip5 = "fd80:24e2:f998:72d6::1:5"
# We use this image here because busybox doesn't have ping6.
workload1 = host1.create_workload("workload1", ip1, image="phusion/baseimage:0.9.16")
workload2 = host1.create_workload("workload2", ip2, image="phusion/baseimage:0.9.16")
workload3 = host1.create_workload("workload3", ip3, image="phusion/baseimage:0.9.16")
workload4 = host2.create_workload("workload4", ip4, image="phusion/baseimage:0.9.16")
workload5 = host2.create_workload("workload5", ip5, image="phusion/baseimage:0.9.16")
host1.calicoctl("profile add PROF_1_3_5")
host1.calicoctl("profile add PROF_2")
host1.calicoctl("profile add PROF_4")
host1.calicoctl("profile PROF_1_3_5 member add %s" % workload1)
host1.calicoctl("profile PROF_2 member add %s" % workload2)
host1.calicoctl("profile PROF_1_3_5 member add %s" % workload3)
host2.calicoctl("profile PROF_4 member add %s" % workload4)
host2.calicoctl("profile PROF_1_3_5 member add %s" % workload5)
self.assert_connectivity(pass_list=[workload1, workload3, workload5],
fail_list=[workload2, workload4])
self.assert_connectivity(pass_list=[workload2],
fail_list=[workload1, workload3, workload4, workload5])
self.assert_connectivity(pass_list=[workload4],
fail_list=[workload1, workload2, workload3, workload5])
def test_ipv6_multi_host(self):
self.run_ipv6_multi_host()
def test_ipv6_multi_host_default_as(self):
self.run_ipv6_multi_host(default_as=64512)
def test_ipv6_multi_host_per_node_as(self):
self.run_ipv6_multi_host(per_node_as=64513)
def test_ipv6_multi_host_default_and_per_node_as(self):
self.run_ipv6_multi_host(default_as=64514, per_node_as=64515)
|
StarcoderdataPython
|
9795093
|
from glowworm import gso
from deap import benchmarks
if __name__ == '__main__':
def fitness(candidate):
# return 1/(benchmarks.schwefel(candidate)[0]+1)
return 1/(benchmarks.ackley(candidate)[0]+1)
gso(agents_number=80, dim=10, func_obj=fitness, epochs=300, step_size=0.5, random_step =True, virtual_individual = True, dims_lim = [-15,15])
|
StarcoderdataPython
|
6472064
|
import pytest
from adlib27.autodiff import AutoDiff as AD
import numpy as np
# Testing the getters and setters
def test_getters():
x = AD(val=[10])
value = x.val
derivative = x.der
assert value == pytest.approx([10], rel=1e-4)
for d in derivative:
assert d == pytest.approx([1])
def test_setters():
x = AD(val=[10])
x.val[0] = 5
x.der[0][0] = 0
assert x.val == pytest.approx([5])
for d in x.der:
assert d == pytest.approx([0])
# Testing the comparison operations
def test_ne_const():
x = AD(val=[10])
y = 10
assert x != y
def test_ne_different_AD():
x = AD(val=[10], index=0, magnitude=2)
y = AD(val=[20], index=1, magnitude=2)
assert x != y
def test_eq_AD():
x1 = AD(val=[10])
x2 = AD(val=[10])
assert x1 == x2
def test_ne_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
assert x != y
def test_eq_AD_many_val():
x1 = AD(val=[10, 1, 3, 4])
x2 = AD(val=[10, 1, 3, 4])
assert x1 == x2
# Testing the Unary operations
def test_neg():
x = AD(val=[10])
y = -x
assert y.val == pytest.approx([-10])
for d in y.der:
assert d == pytest.approx([-1])
def test_neg_different_AD():
x = AD(val=[10], index=0, magnitude=2)
y = AD(val=[20], index=1, magnitude=2)
z = -(x + y)
assert z.val == pytest.approx([-30])
for d in z.der:
assert d == pytest.approx([-1])
def test_neg_many_val():
x = AD(val=[10, 1, 3, 4])
y = -x
assert y.val == pytest.approx([-10, -1, -3, -4])
for d in y.der:
assert d == pytest.approx([-1, -1, -1, -1])
def test_neg_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = -(x + y)
assert z.val == pytest.approx([-8, 1, -4.2, -104])
for d in z.der:
assert d == pytest.approx([-1, -1, -1, -1])
def test_pos():
x = AD(val=[-15])
y = +x
assert y.val == pytest.approx([-15])
for d in y.der:
assert d == pytest.approx([1])
def test_pos_different_AD():
x = AD(val=[10], index=0, magnitude=2)
y = AD(val=[-20], index=1, magnitude=2)
z = +(x + y)
assert z.val == pytest.approx([-10])
for d in z.der:
assert d == pytest.approx([1])
def test_pos_many_val():
x = AD(val=[10, 1, 3, 4])
y = +x
assert y.val == pytest.approx([10, 1, 3, 4])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_pos_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = +(x + y)
assert z.val == pytest.approx([8, -1, 4.2, 104])
for d in z.der:
assert d == pytest.approx([1, 1, 1, 1])
# Testing the basic operations (+, -, *, /)
# Testing the add and radd
def test_add_const():
x = AD(val=[10])
y = x + 22
assert y.val == pytest.approx([32])
for d in y.der:
assert d == pytest.approx([1])
def test_add_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x + 17
assert y.val == pytest.approx([16, 17, 22, 27])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_add_AD():
x = AD(val=[3])
y = x + x
assert y.val == pytest.approx([6])
for d in y.der:
assert d == pytest.approx([2])
def test_add_AD_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x + x
assert y.val == pytest.approx([-2, 0, 10, 20])
for d in y.der:
assert d == pytest.approx([2, 2, 2, 2])
def test_add_different_AD():
x = AD(val=[3], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x + y
assert z.val == pytest.approx([7])
for d in z.der:
assert d == pytest.approx([1])
def test_add_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = x + y
assert z.val == pytest.approx([8, -1, 4.2, 104])
for d in z.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_radd_const():
x = AD(val=[10])
y = 5 + x
assert y.val == pytest.approx([15])
for d in y.der:
assert d == pytest.approx([1])
def test_radd_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = 17 + x
assert y.val == pytest.approx([16, 17, 22, 27])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
# Testing the sub and rsub
def test_sub_const():
x = AD(val=[10])
y = x - 3
assert y.val == pytest.approx([7])
for d in y.der:
assert d == pytest.approx([1])
def test_sub_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x - 3
assert y.val == pytest.approx([-4, -3, 2, 7])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_sub_AD():
x = AD(val=[14])
y = x - x
assert y.val == pytest.approx([0])
for d in y.der:
assert d == pytest.approx([0])
def test_sub_AD_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x - x
assert y.val == pytest.approx([0, 0, 0, 0])
for d in y.der:
assert d == pytest.approx([0, 0, 0, 0])
def test_sub_different_AD():
x = AD(val=[3], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x - y
assert z.val == pytest.approx([-1])
assert z.der[0] == pytest.approx([1])
assert z.der[1] == pytest.approx([-1])
def test_sub_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = x - y
assert z.val == pytest.approx([12, -1, 2.2, -96])
assert z.der[0] == pytest.approx([1, 1, 1, 1])
assert z.der[1] == pytest.approx([-1, -1, -1, -1])
def test_rsub_const():
x = AD(val=[1])
y = 7 - x
assert y.val == pytest.approx([6])
for d in y.der:
assert d == pytest.approx([-1])
def test_rsub_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = 17 - x
assert y.val == pytest.approx([18, 17, 12, 7])
for d in y.der:
assert d == pytest.approx([-1, -1, -1, -1])
# Testing the mul and rmul
def test_mul_const():
x = AD(val=[10])
y = x * 3
assert y.val == pytest.approx([30])
for d in y.der:
assert d == pytest.approx([3])
def test_mul_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x * 3
assert y.val == pytest.approx([-3, 0, 15, 30])
for d in y.der:
assert d == pytest.approx([3, 3, 3, 3])
def test_mul_AD():
x = AD(val=[4])
y = x * x
assert y.val == pytest.approx([16])
for d in y.der:
assert d == pytest.approx([8])
def test_mul_AD_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x * x
assert y.val == pytest.approx([1, 0, 25, 100])
for d in y.der:
assert d == pytest.approx([-2, 0, 10, 20])
def test_mul_different_AD():
x = AD(val=[3], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x * y
assert z.val == pytest.approx([12])
assert z.der[0] == pytest.approx([4])
assert z.der[1] == pytest.approx([3])
def test_mul_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = x * y
assert z.val == pytest.approx([-20, 0, 3.2, 400])
assert z.der[0] == pytest.approx([-2, 0, 1, 100])
assert z.der[1] == pytest.approx([10, -1, 3.2, 4])
def test_rmul_const():
x = AD(val=[1])
y = 7 * x
assert y.val == pytest.approx([7])
for d in y.der:
assert d == pytest.approx([7])
def test_rmul_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = 7 * x
assert y.val == pytest.approx([-7, 0, 35, 70])
for d in y.der:
assert d == pytest.approx([7, 7, 7, 7])
# Testing the div and rdiv
def test_div_const():
x = AD(val=[20])
y = x / 4
assert y.val == pytest.approx([5])
for d in y.der:
assert d == pytest.approx([0.25])
def test_div_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x / 4
assert y.val == pytest.approx([-0.25, 0, 1.25, 2.5])
for d in y.der:
assert d == pytest.approx([0.25, 0.25, 0.25, 0.25])
def test_div_AD():
x = AD(val=[4])
y = x / x
assert y.val == pytest.approx([1])
for d in y.der:
assert d == pytest.approx([0])
def test_div_AD_many_val():
x = AD(val=[-1, 1, 5, 10])
y = x / x
assert y.val == pytest.approx([1, 1, 1, 1])
for d in y.der:
assert d == pytest.approx([0, 0, 0, 0])
def test_div_different_AD():
x = AD(val=[2], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x / y
assert z.val == pytest.approx([0.5])
assert z.der[0] == pytest.approx([0.25])
assert z.der[1] == pytest.approx([-0.125])
def test_div_different_AD_many_val():
x = AD(val=[-2, 4,10, 100], index=0, magnitude=2)
y = AD(val=[1, 2, 2, 4], index=1, magnitude=2)
z = x / y
assert z.val == pytest.approx([-2, 2, 5, 25])
assert z.der[0] == pytest.approx([1, 0.5, 0.5, 0.25])
assert z.der[1] == pytest.approx([2, -1, -2.5, -6.25])
def test_rdiv_const():
x = AD(val=[1])
y = 7 / x
assert y.val == pytest.approx([7])
for d in y.der:
assert d == pytest.approx([-7])
def test_rdiv_const_many_val():
x = AD(val=[-1, 1, 7, 14])
y = 7 / x
assert y.val == pytest.approx([-7, 7, 1, 0.5])
for d in y.der:
assert d == pytest.approx([-7, -7, -0.14285714285714285, -0.03571428571428571])
# Testing the power and rpower
def test_pow_const():
x = AD(val=[5])
y = x**2
assert y.val == pytest.approx([25])
for d in y.der:
assert d == pytest.approx([10])
def test_pow_const_many_val():
x = AD(val=[-1, 0, 1, 3])
y = x ** 3
assert y.val == pytest.approx([-1, 0, 1, 27])
for d in y.der:
assert d == pytest.approx([3, 0, 3, 27])
def test_pow_AD():
x = AD(val=[2])
y = x ** x
assert y.val == pytest.approx([4])
for d in y.der:
assert d == pytest.approx([6.772588722239782])
def test_pow_AD_many_val():
x = AD(val=[1, 2, 5, 10])
y = x ** x
assert y.val == pytest.approx([1, 4, 3125, 10000000000])
for d in y.der:
assert d == pytest.approx([1, 6.772588722239782, 8154.493476356564, 33025850929.94046])
def test_pow_different_AD():
x = AD(val=[2], index=0, magnitude=2)
y = AD(val=[3], index=1, magnitude=2)
z = x ** y
assert z.val == pytest.approx([8])
assert z.der[0] == pytest.approx([12])
assert z.der[1] == pytest.approx([5.545177444479562])
def test_pow_different_AD_many_val():
x = AD(val=[1, 2, 3, 4], index=0, magnitude=2)
y = AD(val=[4, 3, 2, 1], index=1, magnitude=2)
z = x ** y
assert z.val == pytest.approx([1, 8, 9, 4])
assert z.der[0] == pytest.approx([4, 12, 6, 1])
assert z.der[1] == pytest.approx([0, 5.545177444479562, 9.887510598012987, 5.545177444479562])
def test_rpow_const():
x = AD(val=[1])
y = 2 ** x
assert y.val == pytest.approx([2])
for d in y.der:
assert d == pytest.approx([1.3862943611198906])
def test_rpow_const_many_val():
x = AD(val=[-1, 0, 1, 2])
y = 2 ** x
assert y.val == pytest.approx([0.5, 1, 2, 4])
for d in y.der:
assert d == pytest.approx([0.34657359027997264, 0.6931471805599453, 1.3862943611198906, 2.772588722239781])
|
StarcoderdataPython
|
5198883
|
# Generated by Django 2.2.11 on 2020-05-30 16:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("patients", "0011_auto_20200530_2150"),
]
operations = [
migrations.RemoveField(model_name="patient", name="patient_search_id",),
]
|
StarcoderdataPython
|
237837
|
<filename>gui/ui/interbasin_dialog.py<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\interbasin_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_interbasin_dialog(object):
def setupUi(self, interbasin_dialog):
interbasin_dialog.setObjectName("interbasin_dialog")
interbasin_dialog.resize(478, 346)
self.gridLayout = QtWidgets.QGridLayout(interbasin_dialog)
self.gridLayout.setObjectName("gridLayout")
self.int_basin_name_display = QtWidgets.QLineEdit(interbasin_dialog)
self.int_basin_name_display.setEnabled(False)
self.int_basin_name_display.setObjectName("int_basin_name_display")
self.gridLayout.addWidget(self.int_basin_name_display, 1, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(interbasin_dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 1, 1, 1)
self.frame = QtWidgets.QFrame(interbasin_dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setMaximumSize(QtCore.QSize(16777215, 289))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.formLayout = QtWidgets.QFormLayout(self.frame)
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.IB_description_box = QtWidgets.QGroupBox(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.IB_description_box.sizePolicy().hasHeightForWidth())
self.IB_description_box.setSizePolicy(sizePolicy)
self.IB_description_box.setMaximumSize(QtCore.QSize(250, 16777215))
self.IB_description_box.setObjectName("IB_description_box")
self.gridLayout_2 = QtWidgets.QGridLayout(self.IB_description_box)
self.gridLayout_2.setObjectName("gridLayout_2")
self.interbasin_id_display = QtWidgets.QLabel(self.IB_description_box)
self.interbasin_id_display.setObjectName("interbasin_id_display")
self.gridLayout_2.addWidget(self.interbasin_id_display, 0, 1, 1, 1)
self.interbasin_id_label = QtWidgets.QLabel(self.IB_description_box)
self.interbasin_id_label.setObjectName("interbasin_id_label")
self.gridLayout_2.addWidget(self.interbasin_id_label, 0, 0, 1, 1)
self.drainage_area_label = QtWidgets.QLabel(self.IB_description_box)
self.drainage_area_label.setObjectName("drainage_area_label")
self.gridLayout_2.addWidget(self.drainage_area_label, 2, 0, 1, 1)
self.drainage_area_edit = QtWidgets.QLineEdit(self.IB_description_box)
self.drainage_area_edit.setObjectName("drainage_area_edit")
self.gridLayout_2.addWidget(self.drainage_area_edit, 2, 1, 1, 1)
self.interbasin_name_label = QtWidgets.QLabel(self.IB_description_box)
self.interbasin_name_label.setObjectName("interbasin_name_label")
self.gridLayout_2.addWidget(self.interbasin_name_label, 1, 0, 1, 1)
self.interbasin_name_edit = QtWidgets.QLineEdit(self.IB_description_box)
self.interbasin_name_edit.setObjectName("interbasin_name_edit")
self.gridLayout_2.addWidget(self.interbasin_name_edit, 1, 1, 1, 1)
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.IB_description_box)
self.input_method_box = QtWidgets.QGroupBox(self.frame)
self.input_method_box.setObjectName("input_method_box")
self.formLayout_2 = QtWidgets.QFormLayout(self.input_method_box)
self.formLayout_2.setObjectName("formLayout_2")
self.table_radio = QtWidgets.QRadioButton(self.input_method_box)
self.table_radio.setObjectName("table_radio")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.table_radio)
self.file_radio = QtWidgets.QRadioButton(self.input_method_box)
self.file_radio.setObjectName("file_radio")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.file_radio)
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.input_method_box)
self.ave_flow_box = QtWidgets.QGroupBox(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ave_flow_box.sizePolicy().hasHeightForWidth())
self.ave_flow_box.setSizePolicy(sizePolicy)
self.ave_flow_box.setMaximumSize(QtCore.QSize(16777215, 150))
self.ave_flow_box.setObjectName("ave_flow_box")
self.gridLayout_3 = QtWidgets.QGridLayout(self.ave_flow_box)
self.gridLayout_3.setObjectName("gridLayout_3")
self.flow_file_edit = QtWidgets.QLineEdit(self.ave_flow_box)
self.flow_file_edit.setEnabled(False)
self.flow_file_edit.setObjectName("flow_file_edit")
self.gridLayout_3.addWidget(self.flow_file_edit, 1, 1, 1, 1)
self.file_button = QtWidgets.QPushButton(self.ave_flow_box)
self.file_button.setObjectName("file_button")
self.gridLayout_3.addWidget(self.file_button, 1, 0, 1, 1)
self.ave_flows_table = QtWidgets.QTableWidget(self.ave_flow_box)
self.ave_flows_table.setObjectName("ave_flows_table")
self.ave_flows_table.setColumnCount(1)
self.ave_flows_table.setRowCount(1)
item = QtWidgets.QTableWidgetItem()
self.ave_flows_table.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.ave_flows_table.setHorizontalHeaderItem(0, item)
self.gridLayout_3.addWidget(self.ave_flows_table, 0, 0, 1, 2)
self.formLayout.setWidget(1, QtWidgets.QFormLayout.SpanningRole, self.ave_flow_box)
self.gridLayout.addWidget(self.frame, 0, 0, 1, 2)
self.drainage_area_label.setBuddy(self.drainage_area_edit)
self.interbasin_name_label.setBuddy(self.interbasin_name_edit)
self.retranslateUi(interbasin_dialog)
self.buttonBox.accepted.connect(interbasin_dialog.accept)
self.buttonBox.rejected.connect(interbasin_dialog.reject)
QtCore.QMetaObject.connectSlotsByName(interbasin_dialog)
interbasin_dialog.setTabOrder(self.interbasin_name_edit, self.drainage_area_edit)
interbasin_dialog.setTabOrder(self.drainage_area_edit, self.ave_flows_table)
interbasin_dialog.setTabOrder(self.ave_flows_table, self.int_basin_name_display)
interbasin_dialog.setTabOrder(self.int_basin_name_display, self.buttonBox)
def retranslateUi(self, interbasin_dialog):
_translate = QtCore.QCoreApplication.translate
interbasin_dialog.setWindowTitle(_translate("interbasin_dialog", "Interbasin"))
self.IB_description_box.setTitle(_translate("interbasin_dialog", "Description"))
self.interbasin_id_display.setText(_translate("interbasin_dialog", "1"))
self.interbasin_id_label.setText(_translate("interbasin_dialog", "Interbasin ID"))
self.drainage_area_label.setText(_translate("interbasin_dialog", "Drainage Area"))
self.interbasin_name_label.setText(_translate("interbasin_dialog", "Interbasin Name"))
self.input_method_box.setTitle(_translate("interbasin_dialog", "Input Method"))
self.table_radio.setText(_translate("interbasin_dialog", "Table"))
self.file_radio.setText(_translate("interbasin_dialog", "Input File"))
self.ave_flow_box.setTitle(_translate("interbasin_dialog", "Average Flow"))
self.file_button.setText(_translate("interbasin_dialog", "Select File"))
item = self.ave_flows_table.verticalHeaderItem(0)
item.setText(_translate("interbasin_dialog", "A"))
item = self.ave_flows_table.horizontalHeaderItem(0)
item.setText(_translate("interbasin_dialog", "1"))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.