metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "4301104065/TextSummarization",
"score": 2
} |
#### File: src/models/predict_model.py
```python
import argparse
import numpy as np
import re
import nltk
from typing import List
import pickle
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import re
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
import warnings
import sys
from tensorflow.keras.models import load_model
from attention import AttentionLayer
sys.path.append("E:\Slide subject\Text mining\TextSummarization\src")
from features.split_data_tokenization import tokenization
max_text_len=30
max_summary_len=8
x_tr, y_tr, x_val, y_val, x_tokenizer, y_tokenizer, x_voc, y_voc = tokenization()
reverse_target_word_index=y_tokenizer.index_word
reverse_source_word_index=x_tokenizer.index_word
target_word_index=y_tokenizer.word_index
path = Path(__file__).parent / "../../models/encoder_model_inference.h5"
encoder_model = load_model(path, custom_objects={'AttentionLayer': AttentionLayer})
path = Path(__file__).parent / "../../models/decoder_model_inference.h5"
decoder_model = load_model(path, custom_objects={'AttentionLayer': AttentionLayer})
def decode_sequence(input_seq):
# Encode the input as state vectors.
e_out, e_h, e_c = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first word of target sequence with the start word.
target_seq[0, 0] = target_word_index['sostok']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c])
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = reverse_target_word_index[sampled_token_index]
if(sampled_token!='eostok'):
decoded_sentence += ' '+sampled_token
# Exit condition: either hit max length or find stop word.
if (sampled_token == 'eostok' or len(decoded_sentence.split()) >= (max_summary_len-1)):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update internal states
e_h, e_c = h, c
return decoded_sentence
def seq2summary(input_seq):
newString=''
for i in input_seq:
if((i!=0 and i!=target_word_index['sostok']) and i!=target_word_index['eostok']):
newString=newString+reverse_target_word_index[i]+' '
return newString
def seq2text(input_seq):
newString=''
for i in input_seq:
if(i!=0):
newString=newString+reverse_source_word_index[i]+' '
return newString
def main():
for i in range(0,10):
print("Review:",seq2text(x_val[i]))
print("Original summary:",seq2summary(y_val[i]))
print("Predicted summary:",decode_sequence(x_val[i].reshape(1,max_text_len)))
print("\n")
if __name__ == "__main__":
main()
``` |
{
"source": "43061b4a/phone_number_to_words",
"score": 4
} |
#### File: 43061b4a/phone_number_to_words/number_converter.py
```python
import time
from functools import lru_cache
from trie import PrefixTree
class NumberConverter(object):
def __init__(self):
self.trie = PrefixTree()
with open('words_en.txt') as file:
lines = [line.rstrip('\n') for line in file]
for line in lines:
self.trie.insert(line)
def number_to_valid_phone_words(self, num):
if '1' in num or '0' in num:
raise Exception('Numbers with 1 and 0 are currently not supported.')
# 1: Find all words of length equivalent to given string that can be formed
words = []
for prefix in self.num_to_chars(num[0]):
words.extend(self.trie.starts_with(prefix, len(num)))
# 2: Convert words to number equivalents eg 'cat' -> '228'
possible_words = []
for word in words:
converted_num = self.words_to_nums(word)
# 3: We add this word to results if this is equivalent to given number
if num == converted_num:
possible_words.append(word)
return possible_words
@staticmethod
def num_to_chars(num):
keymap = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
return keymap[num] if num in keymap else None
@lru_cache(maxsize=10000)
def words_to_nums(self, word):
keymap = {
'a': '2', 'b': '2', 'c': '2',
'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4',
'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6',
'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8',
'w': '9', 'x': '9', 'y': '9', 'z': '9'
}
for char, num in keymap.items():
word = word.replace(char, num)
return word
converter = NumberConverter()
print('****First Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
print('****Second Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
```
#### File: 43061b4a/phone_number_to_words/test_trie.py
```python
import unittest
from trie import PrefixTree
class TrieTest(unittest.TestCase):
def setUp(self):
self.trie = PrefixTree()
def test_trie_size(self):
self.trie.insert('apple')
self.assertEqual(self.trie.size(), 6)
def test_prefix_not_found_as_whole_word(self):
self.trie.insert('apple')
self.trie.insert('appreciate')
self.assertEqual(self.trie.find('app'), None)
def test_prefix_is_also_whole_word(self):
self.trie.insert('apple')
self.trie.insert('appreciate')
self.trie.insert('app')
# 10: [app], [appr], [appre], [apprec], [appreci], [apprecia]
# [appreciat], [appreciate], [appl], and [apple]
self.assertEqual(self.trie.size(self.trie.find('app')), 10)
self.assertEqual(self.trie.find('app').is_word, True)
def test_starts_with(self):
self.trie.insert('apple')
self.trie.insert('appreciate')
self.trie.insert('aposematic')
self.trie.insert('apoplectic')
self.trie.insert('appendix')
result = self.trie.starts_with('app')
for v in ['apple', 'appreciate', 'appendix']:
self.assertIn(v, result)
def test_starts_with_self(self):
self.trie.insert('app')
self.assertEqual(self.trie.starts_with('app'), ['app'])
def test_bigger_size(self):
self.trie.insert('bad')
self.trie.insert('bat')
self.trie.insert('cat')
self.trie.insert('cage')
self.assertEqual(self.trie.size(), 10)
def test_starts_with_empty_and_no_words(self):
self.assertEqual(self.trie.starts_with(''), [])
def test_starts_with_empty_returns_all_words(self):
values = ['bad', 'bat', 'cat', 'cage']
for v in values:
self.trie.insert(v)
result = self.trie.starts_with('')
for v in values:
self.assertIn(v, result)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "43061b4a/TicTacToeGame",
"score": 3
} |
#### File: TicTacToeGame/GameLib/GameRunner.py
```python
import random
from GameLib.TicTacToe import TicTacToe
class GameRunner:
def __init__(self):
super().__init__()
self.game = TicTacToe()
self.player_count = 2
self.current_player = random.randint(0, self.player_count - 1)
self.symbols = ['✕', '◉']
def game_over(self):
return self.game.all_positions_taken()
def get_current_player(self):
return self.current_player
def get_current_player_symbol(self):
return self.symbols[self.current_player]
def take_turn(self, position):
if not self.game.all_positions_taken():
self.game.turn(position, self.symbols[self.current_player])
self.current_player = (self.current_player + 1) % self.player_count
return True
else:
return False
def get_winner(self):
for i in range(self.player_count):
if self.game.check(self.symbols[i]):
return i + 1
return None
``` |
{
"source": "4310V343k/Hacknet-Pathfinder",
"score": 2
} |
#### File: Hacknet-Pathfinder/PathfinderInstaller/PathfinderInstaller.py
```python
import os
import platform
import stat
import subprocess
from tkinter import *
from tkinter import filedialog
from tkinter.ttk import *
import shutil
from threading import Thread
import requests
from zipfile import ZipFile
from io import BytesIO
import re
import pathlib
if platform.system() == 'Windows':
from winreg import *
def install_pathfinder(gen_event_callback, hacknet_directory):
for asset in requests.get('https://api.github.com/repos/Arkhist/Hacknet-Pathfinder/releases').json()[0]['assets']:
if 'Pathfinder.Release' in asset['name']:
url = asset['browser_download_url']
break
with ZipFile(BytesIO(requests.get(url).content)) as pathfinder_zip:
pathfinder_zip.extractall(path=hacknet_directory)
patcher_exe = os.path.join(hacknet_directory, 'PathfinderPatcher.exe')
if platform.system() == "Linux":
os.chmod(patcher_exe, stat.S_IRWXU)
completed = subprocess.run([patcher_exe], cwd=hacknet_directory)
if completed.returncode != 0:
gen_event_callback('<<InstallFailure>>')
return
try:
os.remove(patcher_exe)
os.remove(os.path.join(hacknet_directory, 'Mono.Cecil.dll'))
hacknet_exe = os.path.join(hacknet_directory, 'Hacknet.exe')
os.rename(hacknet_exe, os.path.join(hacknet_directory, 'HacknetOld.exe'))
os.rename(os.path.join(hacknet_directory, 'HacknetPathfinder.exe'), hacknet_exe)
except OSError:
gen_event_callback('<<InstallFailure>>')
return
gen_event_callback('<<InstallComplete>>')
def try_find_hacknet_dir():
def get_library_folders(vdf_path):
with open(vdf_path) as vdf:
match = re.search(r'^\s*"[0-9]+"\s*"(.+)"', vdf.read(), flags=re.MULTILINE)
if match is None:
return []
return match.groups()
hacknet_dir = ''
folders = []
if platform.system() == 'Windows':
try:
registry = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
key = OpenKey(registry, r'SOFTWARE\Wow6432Node\Valve\Steam')
root_steamapps = os.path.join(QueryValueEx(key, 'InstallPath')[0], 'steamapps')
folders.append(root_steamapps)
libraries = get_library_folders(os.path.join(root_steamapps, 'libraryfolders.vdf'))
folders.extend([os.path.join(library, 'steamapps') for library in libraries])
except OSError:
return hacknet_dir
else:
home = pathlib.Path.home()
steam_root = None
possible_roots = [
os.path.join(home, '.local', 'share', 'Steam'),
os.path.join(home, '.steam', 'steam'),
os.path.join(home, '.steam', 'root'),
os.path.join(home, '.steam'),
os.path.join(home, '.var', 'app', 'com.valvesoftware.Steam', '.local', 'share', 'steam'),
os.path.join(home, '.var', 'app', 'com.valvesoftware.Steam', '.steam', 'steam'),
os.path.join(home, '.var', 'app', 'com.valvesoftware.Steam', '.steam', 'root'),
os.path.join(home, '.var', 'com.valvesoftware.Steam', '.steam')
]
for dir in possible_roots:
if not os.path.exists(dir) or not os.path.exists(os.path.join(dir, 'steam.sh')):
continue
steam_root = dir
break
if steam_root is None:
return hacknet_dir
possible_steamapps = [
os.path.join(steam_root, 'steamapps'),
os.path.join(steam_root, 'steam', 'steamapps'),
os.path.join(steam_root, 'root', 'steamapps')
]
root_steamapps = None
for possible_steamapp in possible_steamapps:
if os.path.exists(possible_steamapp):
root_steamapps = possible_steamapp
break
if root_steamapps is None:
return hacknet_dir
folders.append(root_steamapps)
libraries = get_library_folders(os.path.join(root_steamapps, 'libraryfolders.vdf'))
for library in libraries:
for possible_steamapp in possible_steamapps:
if os.path.exists(os.path.join(library, possible_steamapp)):
folders.append(possible_steamapp)
for folder in folders:
hacknet_acf = os.path.join(folder, 'appmanifest_365450.acf')
if not os.path.exists(hacknet_acf):
continue
hacknet_dir_candidate = os.path.join(folder, 'common', 'Hacknet')
hacknet_exe = os.path.join(hacknet_dir_candidate, 'Hacknet.exe')
if not os.path.exists(hacknet_dir_candidate) or not os.path.exists(hacknet_exe):
continue
hacknet_dir = hacknet_dir_candidate
return hacknet_dir
class App(Frame):
def __init__(self, master: Tk):
super().__init__(master)
self.master = master
self.master.bind('<<InstallComplete>>', self.install_complete)
self.master.bind('<<InstallFailure>>', self.install_failure)
self.content = Frame(self.master)
self.file_frame = Frame(self.content)
self.dir_label = Label(self.file_frame, text='Hacknet Folder')
self.hacknet_directory = StringVar()
self.hacknet_directory.set(try_find_hacknet_dir())
self.dir = Entry(self.file_frame, textvariable=self.hacknet_directory)
self.reopen_button = Button(self.file_frame, text='Open Directory Select', command=self.open_dir)
self.button_frame = Frame(self.content)
self.install_button = Button(self.button_frame, text='Install', command=self.install)
self.uninstall_button = Button(self.button_frame, text='Uninstall', command=self.uninstall)
self.setup_grid()
self.progress = None
def setup_grid(self):
self.master.title('Pathfinder Installer')
self.master.geometry("750x75")
self.master.resizable(FALSE, FALSE)
self.content.grid(column=0, row=0, sticky='NSEW')
self.file_frame.grid(column=0, row=0, sticky='NSEW')
self.dir_label.grid(column=0, row=0, padx=(5, 0))
self.dir.grid(column=1, row=0, columnspan=2, padx=5, sticky='EW')
self.reopen_button.grid(column=3, row=0, padx=(0, 5))
self.button_frame.grid(column=0, row=1, pady=(0, 5))
self.install_button.grid(column=0, row=0, padx=(0, 20))
self.uninstall_button.grid(column=1, row=0, padx=(20, 0))
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
self.content.columnconfigure(0, weight=1)
self.content.rowconfigure(0, weight=1)
self.content.rowconfigure(1, weight=1)
self.file_frame.columnconfigure(1, weight=1)
self.file_frame.rowconfigure(0, weight=1)
def open_dir(self):
self.hacknet_directory.set(filedialog.askdirectory())
def install(self):
hacknet_dir = self.hacknet_directory.get()
if not self.valid_directory(hacknet_dir):
return
self.progress = Progressbar(self.button_frame, orient=HORIZONTAL, length=500, mode='indeterminate')
self.progress.grid(column=0, row=0, columnspan=2)
self.progress.start()
Thread(target=install_pathfinder, args=(self.master.event_generate, hacknet_dir)).start()
def install_complete(self, event):
self.make_message_box('Installation Complete!', title='Success')
self.progress.destroy()
self.progress = None
def install_failure(self, event):
self.make_message_box('Installation failed, this may have left an unfinished installation in your Hacknet folder!', title='Failure')
self.progress.destroy()
self.progress = None
return
def uninstall(self):
hacknet_dir = self.hacknet_directory.get()
if not self.valid_directory(hacknet_dir):
return
hacknet_exe_path = os.path.join(hacknet_dir, 'Hacknet.exe')
old_hacknet_path = os.path.join(hacknet_dir, 'HacknetOld.exe')
if not os.path.exists(old_hacknet_path):
self.make_message_box('Could not find OldHacknet.exe, are you sure Pathfinder is installed (and was installed by this installer)?', title='Error!')
return
try:
os.remove(hacknet_exe_path)
os.rename(old_hacknet_path, hacknet_exe_path)
shutil.rmtree(os.path.join(hacknet_dir, 'BepInEx'), ignore_errors=True)
except OSError:
self.make_message_box('Failed to clean up all files, you may be left with an incomplete uninstall!', title='Error!')
self.make_message_box('Pathfinder successfully uninstalled', title='Success')
def valid_directory(self, directory):
valid = True
if not os.path.exists(directory):
valid = False
self.make_message_box(f'The directory {directory} does not exist!', title='Error!')
elif not os.path.exists(os.path.join(directory, 'Hacknet.exe')):
valid = False
self.make_message_box(f'The directory {directory} does not contain a file called Hacknet.exe!', title='Error!')
return valid
def make_message_box(self, message, title='Message'):
message_box = Toplevel(self.master)
message_box.resizable(FALSE, FALSE)
message_box.title(title)
message_frame = Frame(message_box)
message_frame.grid()
Label(message_frame, text=message).grid(column=0, row=0, padx=5, pady=5)
Button(message_frame, text='Ok', command=message_box.destroy).grid(column=0, row=1, pady=5)
root = Tk()
app = App(root)
root.mainloop()
``` |
{
"source": "4321ba/Galaxy_Jukebox",
"score": 3
} |
#### File: 4321ba/Galaxy_Jukebox/builder.py
```python
from vector import Vector
instrument_name = [
"harp",
"bass",
"basedrum",
"snare",
"hat",
"guitar",
"flute",
"bell",
"chime",
"xylophone",
"iron_xylophone",
"cow_bell",
"didgeridoo",
"bit",
"banjo",
"pling",
]
instrument_material = [
"lapis_block",
"jungle_wood",
"black_concrete",
"pink_concrete_powder",
"purple_stained_glass",
"magenta_wool",
"clay",
"gold_block",
"packed_ice",
"bone_block",
"iron_block",
"soul_sand",
"pumpkin",
"emerald_block",
"hay_block",
"glowstone",
]
building_material = [
"blue_concrete",
"green_concrete",
"black_concrete",
"pink_concrete",
"purple_concrete",
"magenta_concrete",
"light_gray_concrete",
"yellow_concrete",
"light_blue_concrete",
"white_concrete",
"cyan_concrete",
"brown_concrete",
"orange_concrete",
"lime_concrete",
"red_concrete",
"gray_concrete",
]
even_delay_buildblock = "polished_andesite"
odd_delay_buildblock = "polished_granite"
start_line_buildblock = "polished_diorite"
def cardinal_direction(v):
assert v.y==0 and ((abs(v.x)==1 and v.z==0) or (v.x==0 and abs(v.z)==1))
if v.x == 1:
return "east"
if v.x == -1:
return "west"
if v.z == 1:
return "south"
if v.z == -1:
return "north"
# these 3 functions are used throughout split_lines and here in build_delay, exclusively, to create blocks:
def setblock(schem, v, block):
schem.setblock(v.x, v.y, v.z, block)
def block_and_redstone(schem, v, buildblock, powered=False):
schem.setblock(v.x, v.y+0, v.z, buildblock)
schem.setblock(v.x, v.y+1, v.z, f"redstone_wire[east=side,north=side,power={15 if powered else 0},south=side,west=side]")
def block_and_repeater(schem, v, buildblock, facing_direction, delay=1, locked=False, powered=False):
assert delay in [1, 2, 3, 4], f"Cannot create a repeater with a delay of {delay}!"
schem.setblock(v.x, v.y+0, v.z, buildblock)
schem.setblock(v.x, v.y+1, v.z, f"repeater[delay={delay},facing={cardinal_direction(-facing_direction)},locked={locked},powered={powered}]")
# return the space/blocks needed for the delay and md pair
def get_delay_length(delay, md):
class DummySchematic:
def setblock(self, x, y, z, block):
pass
schem = DummySchematic()
v = Vector(0, 0, 0)
forward = Vector(0, 0, 1)
build_delay(schem, "", v, forward, delay, md)
return v.z
"""
creates the delay in the form:
<> : repeater
- : redstone
≤≥ : repeater or redstone
█ : block
? : something
from the side:
>>>>>>█
██████-
≤????<█
█????█
the signal goes in on the top, and after delay time, it comes back at the bottom
these can be stacked one after another, creating the heart of the whole contraption
if loopback is false, the redstone at the end won't get placed, making it useful for turning sideway
v is modified to represent the actual position!
"""
def build_delay(schem, buildblock, v, forward, delay, md, loopback=True):
# helper functions for e.g.: placing a redstone down and a repeater up
# these additionally move v forward, as it is always needed after placing these
def d_redstone_u_repeater(schem, buildblock, v, forward, u_delay):
block_and_redstone(schem, v, buildblock)
block_and_repeater(schem, v + Vector(0, 2, 0), buildblock, forward, delay=u_delay)
v += forward
def d_repeater_u_repeater(schem, buildblock, v, forward, d_delay, u_delay):
block_and_repeater(schem, v, buildblock, -forward, delay=d_delay)
block_and_repeater(schem, v + Vector(0, 2, 0), buildblock, forward, delay=u_delay)
v += forward
def d_block_u_repeater(schem, buildblock, v, forward, u_delay):
setblock(schem, v + Vector(0, 1, 0), buildblock)
block_and_repeater(schem, v + Vector(0, 2, 0), buildblock, forward, delay=u_delay)
v += forward
def d_loopback_u_block(schem, buildblock, v, forward, loopback):
if loopback:
block_and_redstone(schem, v + Vector(0, 1, 0), buildblock)
else:
setblock(schem, v + Vector(0, 1, 0), buildblock)
setblock(schem, v + Vector(0, 3, 0), buildblock)
v += forward
# md: minimum of the delays in the entire line afterwards, determines how much delay we can put onto the repeaters
# we can't put 2 repeaters after one another on the bottom line with md=2 because of this bug:
# https://bugs.mojang.com/browse/MC-54711
# because of this, with even md, we can't end with a repeater with delay=md//2 on the bottom
# also related is that with md6 e.g.:
# a 3 tick repeater can only go after a 1 tick one if the pulse is already 3 tick long, it doesn't work if the pulse is shorter
# 1 tick repeaters everywhere, repeater chaining only at the top
def create_delay_md2(schem, buildblock, v, forward, delay, loopback):
if delay % 3 != 2: # 0 or 1 is the remainder
delay -= 1
d_redstone_u_repeater(schem, buildblock, v, forward, 1)
if delay % 3 == 2: # 0 or 2 was the remainder originally
delay -= 2
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
while delay > 0:
delay -= 3
d_block_u_repeater(schem, buildblock, v, forward, 1)
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
# 1 tick repeaters everywhere, repeater chaining at the bottom and the top too
def create_delay_md3(schem, buildblock, v, forward, delay, loopback):
if delay % 2 == 1:
delay -= 1
d_redstone_u_repeater(schem, buildblock, v, forward, 1)
while delay > 0:
delay -= 2
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
# 2 tick repeaters everywhere, repeater chaining at the bottom needs to end with a 1 tick repeater
# this is becoming a bit of a pattern, but here it still may be better written out explicit
# I'll generalize with md6
def create_delay_md4(schem, buildblock, v, forward, delay, loopback):
if delay == 4:
delay -= 4
d_repeater_u_repeater(schem, buildblock, v, forward, 2, 2)
elif delay % 4 == 0:
delay -= 4
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
elif delay % 4 == 1:
delay -= 5
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 2)
elif delay % 4 == 2:
delay -= 2
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
elif delay % 4 == 3:
delay -= 3
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 2)
while delay > 0:
delay -= 4
d_repeater_u_repeater(schem, buildblock, v, forward, 2, 2)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
# same as md4, except we can chain 2 tick repeaters everywhere
def create_delay_md5(schem, buildblock, v, forward, delay, loopback):
if delay % 4 == 0:
while delay > 0:
delay -= 4
d_repeater_u_repeater(schem, buildblock, v, forward, 2, 2)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
else:
create_delay_md4(schem, buildblock, v, forward, delay, loopback)
# 3 tick repeaters everywhere, repeater chaining at the bottom needs to end with a 2 or 1 tick repeater
def create_delay_md6(schem, buildblock, v, forward, delay, loopback):
if delay == 6:
delay -= 6
d_repeater_u_repeater(schem, buildblock, v, forward, 3, 3)
elif delay % 6 in [0, 1]:
rem = delay % 6
delay -= (6 + rem)
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
d_repeater_u_repeater(schem, buildblock, v, forward, 1 + rem, 3)
elif delay % 6 in [2, 3, 4, 5]:
rem = delay % 6
delay -= rem
d_delay = 2 if rem == 5 else 1
d_repeater_u_repeater(schem, buildblock, v, forward, d_delay, rem - d_delay)
while delay > 0:
delay -= 6
d_repeater_u_repeater(schem, buildblock, v, forward, 3, 3)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
# same as md6, except we can chain 3 tick repeaters everywhere
def create_delay_md7(schem, buildblock, v, forward, delay, loopback):
if delay % 6 == 0:
while delay > 0:
delay -= 6
d_repeater_u_repeater(schem, buildblock, v, forward, 3, 3)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
else:
create_delay_md6(schem, buildblock, v, forward, delay, loopback)
# 4 tick repeaters everywhere, repeater chaining at the bottom needs to end with a <4 tick repeater
def create_delay_md8(schem, buildblock, v, forward, delay, loopback):
if delay == 8:
delay -= 8
d_repeater_u_repeater(schem, buildblock, v, forward, 4, 4)
elif delay % 8 in [0, 1]:
rem = delay % 8
delay -= (8 + rem)
d_repeater_u_repeater(schem, buildblock, v, forward, 1, 1)
d_repeater_u_repeater(schem, buildblock, v, forward, 2 + rem, 4)
elif delay % 8 in [2, 3, 4, 5, 6, 7]:
rem = delay % 8
delay -= rem
d_delay = rem - 4 if rem in [6, 7] else 1
d_repeater_u_repeater(schem, buildblock, v, forward, d_delay, rem - d_delay)
while delay > 0:
delay -= 8
d_repeater_u_repeater(schem, buildblock, v, forward, 4, 4)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
# same as md8, except we can chain 4 tick repeaters everywhere
def create_delay_md9_or_above(schem, buildblock, v, forward, delay, loopback):
if delay % 8 == 0:
while delay > 0:
delay -= 8
d_repeater_u_repeater(schem, buildblock, v, forward, 4, 4)
assert delay == 0, f"There shouldn't be any delay remaining, but it is {delay}!"
d_loopback_u_block(schem, buildblock, v, forward, loopback)
else:
create_delay_md8(schem, buildblock, v, forward, delay, loopback)
delay_functions = {
2: create_delay_md2,
3: create_delay_md3,
4: create_delay_md4,
5: create_delay_md5,
6: create_delay_md6,
7: create_delay_md7,
8: create_delay_md8,
9: create_delay_md9_or_above
}
# actually executing the needed thing:
delay_functions[min(md, 9)](schem, buildblock, v, forward, delay, loopback)
``` |
{
"source": "4322vipul/mnist_digit_classifier_website",
"score": 3
} |
#### File: mnist_digit_classifier_website/mnistwebsite/models.py
```python
from django.db import models
# Create your models here.
class given_image(models.Model):
image_given=models.ImageField(max_length=64)
class image_name(models.Model):
name_of_image=models.CharField(max_length=64)
def __str__(self):
return self.name_of_image
class predicted_label(models.Model):
label=models.CharField(max_length=8)
def __str__(self):
return self.label
``` |
{
"source": "435236402/dome",
"score": 2
} |
#### File: untitled2/Book/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
# def session_set(requset):
# request.session['name'] = 'ithei
def set_session(request):
'''设置session'''
request.session['username']='smart'
request.session['age']=18
# 设置sessionid的过期时间
# request.session.set_expiry(5)
return HttpResponse('设置session')
def session_get(request):
name = request.session['name']
return HttpResponse(name)
``` |
{
"source": "435981572/visual-servoing-MSIM",
"score": 2
} |
#### File: 435981572/visual-servoing-MSIM/vrep_cnn.py
```python
import numpy as np
import sys
import matplotlib.pyplot as plt
import os
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
from torchvision.utils import make_grid
from PIL import Image
import torch.nn as nn
from torchvision.transforms import transforms
from vrep_test import *
import math
import cv2
import vrep
from creat_data import *
def connect(port, message):
# connect to server
vrep.simxFinish(-1) # just in case, close all opened connections
clientID = vrep.simxStart('127.0.0.1', 19999, True, True, 5000, 5) # start a connection
if clientID != -1:
print("Connected to remote API server")
print(message)
else:
print("Not connected to remote API server")
sys.exit("Could not connect")
return clientID
def getObjectsHandles(clientID, objects):
handles = {}
for obj_idx in range(len(objects)):
err_code, handles[objects[obj_idx]] = vrep.simxGetObjectHandle(clientID, objects[obj_idx], vrep.simx_opmode_blocking)
if err_code:
print("Failed to get a handle for object: {}, got error code: {}".format( objects[obj_idx], err_code))
break;
return handles
def getLightHandles(clientID, lights):
handles = {}
for obj_idx in range(len(lights)):
err_code, handles[lights[obj_idx]] = vrep.simxGetObjectHandle(clientID, lights[obj_idx], vrep.simx_opmode_blocking)
if err_code:
print("Failed to get a handle for object: {}, got error code: {}".format(lights[obj_idx], err_code))
break;
return handles
def setCameraInitialPose(clientID, obj):
# print(obj)
errPos, position = vrep.simxGetObjectPosition(clientID, obj, -1, vrep.simx_opmode_oneshot_wait)
# print("1 error", err_code)
# print("Position", position)
errOrient, orientation = vrep.simxGetObjectOrientation(clientID, obj, -1, vrep.simx_opmode_oneshot_wait)
# print("2 error", err_code)
#print("Orientation", orientation)
if errPos :
print("Failed to get position for object: {}, got error code: {}".format(obj, errPos))
elif errOrient:
print("Failed to get orientation for object: {}, got error code: {}".format(obj, errOrient))
else:
return np.array([position, orientation])
def generateCameraRandomPose(clientID, obj, oldPose):
# import matplotlib.pyplot as mlp
print("old pose is :",oldPose)
randPose = np.asarray(np.random.random([2, 3]))
print("randPose is :",randPose)
# print(np.shape(randPose))
center = np.array([[0.01, 0.01, 0.01], np.deg2rad([-5, -5, -10])])
variance = np.array([[0.01, 0.01, 0.01], np.deg2rad([5, 5, 10])])
print("variance",variance)
std = np.sqrt(variance)
print("std is :",std)
newPose = np.multiply(randPose, std) - std/2 + oldPose
#print(np.shape(std))
#print(oldPose)
print("newpose shape is :",newPose)
return newPose
def setCameraRandomPose(clientID, obj, newPose):
# print(obj)
errPos= vrep.simxSetObjectPosition(clientID, obj, -1, newPose[0,:], vrep.simx_opmode_oneshot_wait)
# print("1 error", err_code)
# print("Position", position)
errOrient= vrep.simxSetObjectOrientation(clientID, obj, -1, newPose[1,:], vrep.simx_opmode_oneshot_wait)
# print("2 error", err_code)
# print("Orientation", orientation)
if errPos :
print("Failed to set position for object: {}, got error code: {}".format(obj, errPos))
elif errOrient:
print("Failed to set orientation for object: {}, got error code: {}".format(obj, errOrient))
else:
return newPose
transform = transforms.Compose([transforms.Resize(size=256,interpolation=2),transforms.ToTensor()])
baseName = 'UR5'
jointName = 'UR5_joint'
RAD2DEG =180 / math.pi
jointNum = 6
## globals
SRV_PORT = 19999
CAMERA = "Vision_sensor"
IMAGE_PLANE = "Plane0"
N_BASE_IMGS=50
CAPTURED_IMGS_PATH= 'vrep_cnn\\processing\\'
testTarget1="testTarget1"
time = 0
df = 2
dt =0.01
i = 0
err = []
v_all = []
w_all = []
time_intervals = []
traces = []
if __name__ == '__main__':
clientID = connect(SRV_PORT, "Data generation started")
objects_names = [CAMERA, IMAGE_PLANE, testTarget1]
object_handles = getObjectsHandles(clientID, objects_names)
##init_pose初始位姿获取,应该编写欧拉角和对偶四元数、旋转矩阵之间的转换
initPose = setCameraInitialPose(clientID,object_handles[testTarget1])#欧拉角
print('initpose is:',initPose.reshape(-1))
dq_AR = eulerTR2dualpq(initPose.reshape(-1))
u_AR, theta_AR, R_AR, t_AR = dualq2uthetaRt(dq_AR)
X_AR = np.vstack((np.hstack([R_AR, t_AR.reshape(t_AR.shape[0], 1)]), np.array([0, 0, 0, 1]))) ##
#euler_AR = dualpq2eulerTR(dq_AR)
euler_AR = initPose.reshape(2, 3)
pointA = euler_AR[0].reshape(-1)
print('euler_AR is:',euler_AR)
##desired pose,从真实标签中抽取一个。
all_desired_pose = np.loadtxt('./lable.txt')
index = np.random.randint(0, 4999)
print('all_desired_pose[index] is',all_desired_pose[index])
dq_BR = eulerTR2dualpq(all_desired_pose[index].reshape(-1))##欧拉角换对偶四元数
u_BR, theta_BR, R_BR, t_BR = dualq2uthetaRt(dq_BR)
X_BR = np.vstack((np.hstack([R_BR, t_BR.reshape(t_BR.shape[0], 1)]), np.array([0, 0, 0, 1])))
euler_BR = dualpq2eulerTR(dq_BR)
#euler_BR = all_desired_pose[index].reshape(2,3)
pointB = euler_BR[0].reshape(-1)
#euler_BR = all_desired_pose[index].reshape(2,3)
print('euler_BR is:', euler_BR,index)
# load model
model = torch.load('model.kpl')
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval() ##模型含有dropout,使用eval固定
torch.no_grad()
while time < df:
error_dq_AB = muldualpq(conjdualqsimple(dq_BR), dq_AR)
u_AB, theta_AB, R_AB, t_AB = dualq2uthetaRt(error_dq_AB)
err.append(error_dq_AB)
print('error_dq_AB is:',error_dq_AB)
setCameraRandomPose(clientID, object_handles[testTarget1], euler_AR)
## Control Law
lambdax = 5
v = -lambdax * np.dot(R_AB.T, t_AB)
w = -lambdax * theta_AB * u_AB
control_law_AB = np.hstack([v, w])
####Convert Control Law
T_BR = skew(t_BR)
A = np.vstack([np.hstack([R_BR, np.dot(T_BR, R_BR)]), np.hstack([np.zeros((3, 3)), R_BR])])
control_law_AR = np.dot(A, control_law_AB.T)
v = control_law_AR[0:3]
w = control_law_AR[3:6]
v_all.append(v)
w_all.append(w)
theta = np.linalg.norm(w)
if theta == 0:
u = np.array([0, 0, 1])
else:
u = w / np.linalg.norm(w)
##当前图像获取与位姿预测
fname = CAPTURED_IMGS_PATH + "img" + '{0:06d}'.format(i) + ".jpg"
if i == 0:
sleeptime1 = 0.05
img = renderSensorImage(clientID,object_handles[CAMERA],fname,sleeptime1)
else:
sleeptime1 = 0
img = renderSensorImage(clientID, object_handles[CAMERA], fname,sleeptime1)
i = i + 1
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
img = transform(img).unsqueeze(0)
img_ = img.to(device)
#output = model(img_)
#output = output.cuda().data.cpu().numpy().reshape(6,)
#print('output',output)
####
update_dq_AR = uthetat2dq(u, dt * theta, dt * v)
dq_AR = muldualpq(update_dq_AR, dq_AR)
euler_AR = dualpq2eulerTR(dq_AR)
print('time is:',time)
time = time + dt
time_intervals.append(time)
traces.append(euler_AR[0].reshape(-1))
err = np.asarray(err)
v_all = np.asarray(v_all)
w_all = np.asarray(w_all)
time_intervals = np.asarray(time_intervals)
###plot 3D the trajectory of the camera frame
traces1 = np.asarray(traces)
x = traces1[:,0]
y = traces1[:,1]
z = traces1[:,2]
fig = plt.figure()
ax = fig.gca(projection='3d')
# set figure information
ax.set_title("3D_Curve")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.scatter(pointA[0], pointA[1], pointA[2], c='g', marker='+')
ax.scatter(pointB[0], pointB[1], pointB[2], c='r', marker='^')
figure = ax.plot(x, y, z, c='r')
##velocity
fig = plt.figure()
plt.plot(time_intervals, v_all[:,0],c='r',label='v1')
plt.plot(time_intervals, v_all[:,1],c='b',label='v2')
plt.plot(time_intervals, v_all[:,2],c='g',label='v3')
##orientation
plt.plot(time_intervals, w_all[:,0],c='y',ls='-.',label='w1')
plt.plot(time_intervals, w_all[:,1],c='k',ls='-.',label='w2')
plt.plot(time_intervals, w_all[:,2],c='m',ls='-.',marker='o',ms=1,label='w3')
plt.legend() # 显示图例
plt.xlabel('time_intervals')
#plt.show()
###err
fig = plt.figure()
plt.plot(time_intervals, err[:,0],c='r',label='r1')
plt.plot(time_intervals, err[:,1],c='g',label='r2')
plt.plot(time_intervals, err[:,2],c='k',label='r3')
plt.plot(time_intervals, err[:,3],c='m',label='r4')
plt.plot(time_intervals, err[:,4],c='b',label='d1')
plt.plot(time_intervals, err[:,5],c='g',label='d2')
plt.plot(time_intervals, err[:,6],c='y',label='d3')
plt.plot(time_intervals, err[:,7],c='m',label='d4')
plt.legend() # 显示图例
plt.xlabel('time_intervals')
plt.show()
``` |
{
"source": "435vic/python_perceptron",
"score": 4
} |
#### File: 435vic/python_perceptron/perceptron.py
```python
"perceptron module"
import numpy as np
class Perceptron:
"Main class for module"
def __init__(self):
# Randomizes the weights to value between -1 and 1
self.weights = 2 * np.random.random((2, 1)) - 1
def _sigmoid(self, _x, deriv=False):
if deriv:
return _x * (1 - _x)
return 1 / (1 + np.exp(-_x))
def predict(self, tinputs):
"Predicts a new value based on an input"
return self._sigmoid(np.dot(tinputs, self.weights))
def train(self, tinputs, toutputs, iterations):
"Trains the model and updates its weights through back propagation using gradient descent"
for iteration in range(iterations):
output = self.predict(tinputs)
error = toutputs - output
adjustment = np.dot(tinputs.T, error * self._sigmoid(output, 1))
self.weights += adjustment
``` |
{
"source": "4379711/easyrequest",
"score": 3
} |
#### File: easyrequest/commands/stop_spider.py
```python
from os.path import exists
from subprocess import Popen, PIPE
import psutil
import os
import platform
import signal
import time
def stop_spider(spider_name, path=None):
"""
Stop a running spider .
"""
if path is None:
base_path = os.getcwd()
else:
base_path = path
to_read_file = str(spider_name) + '.pid'
file_path = os.path.join(base_path, to_read_file)
if not exists(file_path):
print('Spider maybe not running or you forget to set [ RECORD_PID= True ] in setting.py !')
return
with open(file_path, 'r', encoding='utf-8') as f:
pid = int(f.readline())
pid_list = psutil.pids()
if pid not in pid_list:
print('Spider maybe not running !')
return
os.remove(file_path)
# signal can only be used on linux .
if platform.system() == 'Windows':
command = f'taskkill /pid {pid} -f'
pp = Popen(command,
shell=True,
universal_newlines=True,
stdin=PIPE,
stderr=PIPE,
stdout=PIPE)
pp.communicate()
else:
os.killpg(os.getpgid(int(pid)), signal.SIGKILL)
print('Checking result ...\n')
time.sleep(0.5)
# CHECK RESULT
pid_list = psutil.pids()
if int(pid) not in pid_list:
print('\tStop spider successful !')
else:
print('\tStop spider maybe failed !')
```
#### File: easyrequest/error/__init__.py
```python
class LoadError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Can not load %s ,Check spider file <Apps.xxx.py , Models.xxxItems.py> is correct !' % self.name
__repr__ = __str__
class ReturnTypeError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Return Type must be %s .' % self.name.__name__
__repr__ = __str__
class ParameterError(Exception):
def __init__(self, whos, name):
self.name = name
self.whos = whos
def __str__(self):
return '%s parameter must be %s .' % (self.whos.__name__, self.name.__name__)
__repr__ = __str__
class RetryError(Exception):
def __init__(self, times, url, err):
self.url = url
self.times = times
self.err = err
def __str__(self):
return 'Request <%s> Retry <%d> times still failed ,error:\n\n %s' % (self.url, self.times, self.err)
__repr__ = __str__
class ConfigError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Error of %s config in settings .' % self.name
__repr__ = __str__
class RequestUrl(Exception):
def __str__(self):
return 'URL must be type str !'
__repr__ = __str__
```
#### File: easyrequest/utils/__init__.py
```python
import re
import os
import sys
from hashlib import md5
from os.path import join, exists
from threading import Lock
from easyrequest.error import RequestUrl
from .format_print import pprint
from .load_module import *
def average_number_of_groups(m, n):
"""
Split a data into N parts of approximate size .
:param m: Total length of data to be split .
:param n: Need to be divided into several portions .
:return: list ,index +1 that should be split .
"""
base_num = m // n
over_num = m % n
result = [base_num for _ in range(n)]
for i in range(over_num):
result[i] = result[i] + 1
for i in range(n - 1):
result[i + 1] = result[i] + result[i + 1]
return result
def split_urls_by_group(urls, n):
aa = average_number_of_groups(len(urls), n)
for i in range(len(aa)):
if i == 0:
yield (urls[:aa[i]])
else:
yield (urls[aa[i - 1]:aa[i]])
def write_process_pid(spider_name):
# record process id to file
base_path = os.getcwd()
to_write_file = str(spider_name) + '.pid'
file_path = os.path.join(base_path, to_write_file)
pid = os.getpid()
with open(file_path, 'w', encoding='utf-8') as f:
f.write(str(pid))
f.flush()
def check_spider_name(spider_name):
if not re.search(r'^[_a-zA-Z]\w*$', spider_name):
print('\033[32mError: spider names must begin with a letter and contain only\n'
'letters, numbers and underscores\033[0m')
return False
else:
return True
def check_project_file(spider_name):
cmd_path = os.getcwd()
sys.path.insert(0, cmd_path)
spider_file_name = f'{spider_name}.py'
if not exists(join(cmd_path, 'Apps', spider_file_name)):
print(f'\033[32mError: Spider "{spider_name}" not exists\033[0m')
return False
if not exists(join(cmd_path, 'settings.py')):
print(f'\033[32mError: Check that your project path is correct! \033[0m')
pprint('You must execute the RunSpider command in the project directory')
return False
return True
def get_md5(url, kwargs):
if not isinstance(url, str):
raise RequestUrl
md5_str = md5((url + str(kwargs)).encode('utf-8')).hexdigest()
return md5_str
class RecordTaskInfo:
def __init__(self):
# self._all_request = set()
# self._all_parse = set()
self._all_request = []
self._request_success = 0
self._request_failed = 0
self._parse_success = 0
self._parse_failed = 0
self._save_failed = 0
self._save_success = 0
self._lock0 = Lock()
self._lock1 = Lock()
self._lock2 = Lock()
self._lock3 = Lock()
self._lock4 = Lock()
self._lock5 = Lock()
self._lock6 = Lock()
self._lock7 = Lock()
# When _all_request is a set() .
# def request_add(self, value):
# with self._lock0:
# self._all_request.add(value)
# def parse_add(self, value):
# with self._lock1:
# self._all_parse.add(value)
def request_add(self, value):
with self._lock0:
self._all_request.append(value)
def parse_add(self, value):
with self._lock0:
self._all_request.remove(value)
def request_success_plus(self):
with self._lock2:
self._request_success += 1
def request_failed_plus(self):
with self._lock3:
self._request_failed += 1
def parse_success_plus(self):
with self._lock4:
self._parse_success += 1
def parse_failed_plus(self):
with self._lock5:
self._parse_failed += 1
def save_failed_plus(self):
with self._lock6:
self._save_failed += 1
def save_success_plus(self):
with self._lock7:
self._save_success += 1
def is_in_set(self, value):
return value in self._all_request
# @property
# def two_set_same(self):
# return self._all_request == self._all_parse
@property
def requests_is_empty(self):
return self._all_request == []
@property
def info(self):
return (self._request_success,
self._request_failed,
self._parse_success,
self._parse_failed,
self._save_success,
self._save_failed
)
```
#### File: easyrequest/utils/template.py
```python
import os
import re
from string import Template
class MyTemplate(Template):
delimiter = '>>'
def render_template_file(path, **kwargs):
with open(path, 'rb') as fp:
raw = fp.read().decode('utf-8')
content = MyTemplate(raw).substitute(**kwargs)
render_path = path[:-len('.template')] if path.endswith('.template') else path
with open(render_path, 'wb') as fp:
fp.write(content.encode('utf-8'))
if path.endswith('.template'):
os.remove(path)
CAMELCASE_INVALID_CHARS = re.compile(r'[^a-zA-Z\d]')
def string_camelcase(string):
""" Convert a word to its CamelCase version and remove invalid chars
>>> string_camelcase('miss-you')
'MissYou'
>>> string_camelcase('miss_you')
'MissYou'
"""
return CAMELCASE_INVALID_CHARS.sub('', string.title())
``` |
{
"source": "4379711/functools_lyl",
"score": 3
} |
#### File: geeker/functions/timeout.py
```python
import ctypes
import inspect
import time
import threading
from functools import wraps
# import traceback
# import sys
class MyThread(threading.Thread):
def __init__(self, target, args=None, kwargs=None):
super().__init__()
self.func = target
self.args = args
self.kwargs = kwargs
self.result = '<_^&^_@**@__what fuck!__@**@_^&^_>'
self.exitcode = False
self.exception = None
# self.exc_traceback = None
def _run(self):
self.result = self.func(*self.args, **self.kwargs)
def run(self): # Overwrite run() method
try:
self._run()
except Exception as e:
self.exitcode = True
self.exception = e
# self.exc_traceback = sys.exc_info()
@property
def get_result(self):
return self.result
@staticmethod
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
# tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
# if res == 0:
# raise ValueError("invalid thread id")
# elif res != 1:
# ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
# raise SystemError("PyThreadState_SetAsyncExc failed !")
def stop_thread(self, ident=None):
if ident:
self._async_raise(ident, SystemExit)
else:
# self._async_raise(threading.get_ident(), SystemExit)
for fuck in threading.enumerate():
if fuck is self:
self._async_raise(fuck.ident, SystemExit)
break
class TimeOut:
def __init__(self, limit_time=1):
if not (isinstance(limit_time, (int, float)) and limit_time > 0):
raise ValueError('The type of parameter <limit_time> must be int and greater than 0!')
self.limit = int(limit_time * 10)
def __raise_error(self, th):
# exec_type = th.exc_traceback[0]
# tmp_str = traceback.format_exception(th.exc_traceback[0], th.exc_traceback[1], th.exc_traceback[2])
# str_ = ''.join(tmp_str[1:])
#
# th.stop_thread()
#
# # raise exec_type('\n'+str_)
raise th.exception
def __call__(self, func):
@wraps(func)
def warp_(*args, **kwargs):
warp_.__name__ = func.__name__
th = MyThread(target=func, args=args, kwargs=kwargs)
th.daemon = True
th.start()
# Add 0.1 second here
for _ in range(self.limit + 2):
if th.exitcode:
self.__raise_error(th)
is_result = th.get_result
if is_result != '<_^&^_@**@__what fuck!__@**@_^&^_>':
return is_result
if _ == self.limit:
# kill the thread by itself
th.stop_thread()
raise TimeoutError('Unfinished tasks within the specified time !')
time.sleep(0.1)
return warp_
```
#### File: geeker/mylog/log_config.py
```python
import logging
import logging.handlers
from logging.handlers import TimedRotatingFileHandler
import gzip
import os
import time
from geeker.functions import Singleton
class GzTimedRotatingFileHandler(TimedRotatingFileHandler):
def __init__(self, filename, when, interval, **kwargs):
super(GzTimedRotatingFileHandler, self).__init__(filename, when, interval, **kwargs)
@staticmethod
def do_gzip(old_log):
with open(old_log, 'rb') as old:
with gzip.open(old_log.replace('.log', '', 1) + '.gz', 'wb') as comp_log:
comp_log.writelines(old)
os.remove(old_log)
# overwrite
def doRollover(self):
if self.stream:
self.stream.close()
self.stream = None
current_time = int(time.time())
dst_now = time.localtime(current_time)[-1]
t = self.rolloverAt - self.interval
if self.utc:
time_tuple = time.gmtime(t)
else:
time_tuple = time.localtime(t)
dst_then = time_tuple[-1]
if dst_now != dst_then:
if dst_now:
addend = 3600
else:
addend = -3600
time_tuple = time.localtime(t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, time_tuple)
if os.path.exists(dfn):
os.remove(dfn)
if os.path.exists(self.baseFilename):
os.rename(self.baseFilename, dfn)
self.do_gzip(dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
new_rollover_at = self.computeRollover(current_time)
while new_rollover_at <= current_time:
new_rollover_at = new_rollover_at + self.interval
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
ds_att_rollover = time.localtime(new_rollover_at)[-1]
if dst_now != ds_att_rollover:
if not dst_now: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
new_rollover_at += addend
self.rolloverAt = new_rollover_at
class LogBase(Singleton):
def __init__(self, dir_path='./logs/',
logger_name='special_log_name',
info_name='info.log',
error_name='error.log',
warning_name='warning.log',
debug_name='debug.log',
interval=7,
detail=False,
debug=False,
info=True,
error=True,
warning=True,
):
self.info_name = info_name
self.error_name = error_name
self.warning_name = warning_name
self.debug_name = debug_name
self.path = dir_path
self.interval = interval
self._logger = logging.getLogger(logger_name)
self._debug = debug
self._warning = warning
self._error = error
self._info = info
self._detail = detail
def __handler(self, log_name):
handler = GzTimedRotatingFileHandler(self.path + log_name,
when='D',
interval=self.interval,
backupCount=3,
encoding='utf-8')
return handler
def __filter_message(self, handler, log_level):
"""
过滤不同等级日志的其他信息,只保留当前日志等级的信息
:param handler: handler
:param log_level: 字符串
:return: handler
"""
if self._detail:
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(funcName)s - %(lineno)d - %(message)s",
"%Y%m%d %H:%M:%S")
else:
formatter = logging.Formatter("%(asctime)s - %(message)s", "%Y%m%d %H:%M:%S")
_filter = logging.Filter()
handler.suffix = "%Y%m%d.log"
handler.setFormatter(formatter)
handler.setLevel(log_level)
_filter.filter = lambda record: record.levelno == log_level
handler.addFilter(_filter)
return handler
def _get_logger(self):
# 添加此行,防止日志重复记录
if not self._logger.handlers:
# 设置日志等级,默认是 DEBUG
self._logger.setLevel(logging.DEBUG)
levels = [self._debug, self._info, self._warning, self._error]
log_names = [self.debug_name, self.info_name, self.warning_name, self.error_name]
levels_ = [10, 20, 30, 40]
for i, lev in enumerate(levels):
if lev:
_handler = self.__handler(log_names[i])
_handler = self.__filter_message(_handler, levels_[i])
# handler添加给日志对象
self._logger.addHandler(_handler)
return self._logger
``` |
{
"source": "4383/anishot",
"score": 3
} |
#### File: 4383/anishot/anishot.py
```python
__author__ = '<NAME>'
import os
import sys
import gflags
import imageio
import numpy
from PIL import Image
from PIL.ImageDraw import Draw
gflags.DEFINE_string('inp', None, 'Input screenshot image')
gflags.DEFINE_string('out', None, 'Output antimated GIF')
gflags.DEFINE_integer('h', 0, 'Window height')
gflags.DEFINE_integer('maxspeed', 200, 'Max speed on scroll px/frame')
gflags.DEFINE_list('stops', [], 'List of stops for scrolling')
gflags.DEFINE_integer('zoom', 0, 'Number of steps on initial zoom in')
gflags.DEFINE_float('zoom_frac', .3, 'Fraction of screenshot to see on zoomout')
gflags.register_validator('inp', os.path.exists, 'Input screenshot required')
gflags.register_validator('h', lambda v: v > 0, 'Window height required')
F = gflags.FLAGS
def add_frame(frames, frame, duration):
frames.append((prettify(frame), duration))
def prettify(frame):
off = 5
h, w = frame.shape[:2]
pretty = Image.new('RGB', (w + off, h + off), '#ffffff')
draw = Draw(pretty)
draw.rectangle([off, off, w + off, h + off], '#cccccc', '#cccccc')
pretty.paste(Image.fromarray(frame), (0, 0))
draw.rectangle([0, 0, w, h], outline='#666666')
return numpy.array(pretty)
def make_zoomin(image, frames):
h, w = image.shape[:2]
scale = F.h / float(h) / F.zoom_frac
step = (1 - scale) / (F.zoom + 1)
original = Image.fromarray(image)
for i in range(F.zoom):
small = original.resize(
(int(w * scale + .5), int(h * scale + .5)), Image.LANCZOS)
scale += step
small_w = small.size[0]
frame = Image.new('RGB', (w, F.h), '#ffffff')
off = (w - small_w) // 2
frame.paste(small, (off, 0))
draw = Draw(frame)
draw.rectangle([off, 0, off + small_w, F.h], outline='#666666')
add_frame(frames, numpy.array(frame), .2 if i > 0 else 1)
def make_scroll(image, frames):
h, w = image.shape[:2]
stops = [0] + list(map(int, F.stops)) + [h - F.h]
add_frame(frames, image[stops[0]:stops[0] + F.h, :], 2)
for i in range(len(stops) - 1):
s0, s1 = stops[i:i + 2]
speed = 10
y = s0 + speed
while y < s1:
add_frame(frames, image[y:y + F.h, :], .01)
y += speed
speed = min(speed * 2, F.maxspeed)
add_frame(frames, image[s1:s1 + F.h, :], 2)
def main(argv):
try:
F(argv)
image = imageio.imread(F.inp)
frames = []
if F.zoom:
make_zoomin(image, frames)
make_scroll(image, frames)
imageio.mimwrite(F.out,
map(lambda f: f[0], frames),
duration=list(map(lambda f: f[1], frames)))
except gflags.FlagsError as e:
print('e: ', e)
print('Usage: %s' % F)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
``` |
{
"source": "4383/beagle",
"score": 2
} |
#### File: beagle/beagle/openstack.py
```python
from cliff.formatters import base
DEFAULT_URL = 'http://codesearch.openstack.org'
class OSLinkFormatter(base.ListFormatter):
"OpenStack cgit link formatter"
def add_argument_group(self, parser):
pass
def emit_list(self, column_names, data, stdout, parsed_args):
fmt = 'http://git.openstack.org/cgit/openstack/{Repository}/tree/{Filename}#n{Line} : {Text}\n'
for row in data:
row_d = {
c: r
for c, r in zip(column_names, row)
}
if parsed_args.context_lines:
before = row_d['Before'].machine_readable()
write_lines_with_offset(
fmt,
row_d,
before,
-1 * len(before),
stdout,
)
stdout.write(fmt.format(**row_d))
if parsed_args.context_lines:
write_lines_with_offset(
fmt,
row_d,
row_d['After'].machine_readable(),
1,
stdout,
)
``` |
{
"source": "4383/dogpile.cache",
"score": 2
} |
#### File: dogpile/util/compat.py
```python
import sys
py2k = sys.version_info < (3, 0)
py3k = sys.version_info >= (3, 0)
py32 = sys.version_info >= (3, 2)
py27 = sys.version_info >= (2, 7)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
try:
import threading
except ImportError:
import dummy_threading as threading # noqa
if py3k: # pragma: no cover
string_types = str,
text_type = str
string_type = str
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
def u(s):
return s
def ue(s):
return s
import configparser
import io
import _thread as thread
else:
string_types = basestring,
text_type = unicode
string_type = str
def u(s):
return unicode(s, "utf-8")
def ue(s):
return unicode(s, "unicode_escape")
import ConfigParser as configparser # noqa
import StringIO as io # noqa
callable = callable # noqa
import thread # noqa
if py3k:
import collections
ArgSpec = collections.namedtuple(
"ArgSpec",
["args", "varargs", "keywords", "defaults"])
from inspect import getfullargspec as inspect_getfullargspec
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
else:
from inspect import getargspec as inspect_getargspec # noqa
if py3k or jython:
import pickle
else:
import cPickle as pickle # noqa
if py3k:
def read_config_file(config, fileobj):
return config.read_file(fileobj)
else:
def read_config_file(config, fileobj):
return config.readfp(fileobj)
def timedelta_total_seconds(td):
if py27:
return td.total_seconds()
else:
return (td.microseconds + (
td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
``` |
{
"source": "4383/openstacker",
"score": 2
} |
#### File: openstacker/vigo/config.py
```python
from pathlib import Path
path = Path.home()
vigo_dir = path / ".vigo"
governance_url = "https://github.com/openstack/governance"
projects = vigo_dir / "governance" / "reference" / "projects.yaml"
class GeneralConfig:
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, debug=False, verbose=False):
"""Initialize general configuration."""
self.debug = debug
self.verbose = verbose
``` |
{
"source": "4383/poc-bidon",
"score": 2
} |
#### File: poc-bidon/bidon/__init__.py
```python
def main():
print("I'm nothing")
``` |
{
"source": "4383/pymemcache",
"score": 3
} |
#### File: pymemcache/client/rendezvous.py
```python
from pymemcache.client.murmur3 import murmur3_32
class RendezvousHash(object):
"""
Implements the Highest Random Weight (HRW) hashing algorithm most
commonly referred to as rendezvous hashing.
Originally developed as part of python-clandestined.
Copyright (c) 2014 <NAME>
"""
def __init__(self, nodes=None, seed=0, hash_function=murmur3_32):
"""
Constructor.
"""
self.nodes = []
self.seed = seed
if nodes is not None:
self.nodes = nodes
self.hash_function = lambda x: hash_function(x, seed)
def add_node(self, node):
if node not in self.nodes:
self.nodes.append(node)
def remove_node(self, node):
if node in self.nodes:
self.nodes.remove(node)
else:
raise ValueError("No such node %s to remove" % (node))
def get_node(self, key):
high_score = -1
winner = None
for node in self.nodes:
score = self.hash_function(
"%s-%s" % (node, key))
if score > high_score:
(high_score, winner) = (score, node)
elif score == high_score:
(high_score, winner) = (score, max(str(node), str(winner)))
return winner
``` |
{
"source": "4383/recalbox-manager",
"score": 2
} |
#### File: api/views/support.py
```python
from project.api.views import ApiBaseJsonView
from project.utils.cli_process import SimpleCaller
class SupportScriptView(ApiBaseJsonView):
_default_state = 'pending'
def get(self, request, *args, **kwargs):
self.call()
return super(SupportScriptView, self).get(request, *args, **kwargs)
def call(self):
call = SimpleCaller('.')
version = call('git', 'describe', '.')
print version
return version.strip()
```
#### File: project/assets_cartographer/parser.py
```python
import os
from django.conf import settings
from django.template import Context
from django.template.loader import get_template as loader_get_template
from django.contrib.staticfiles import finders
class AssetMapError(Exception):
pass
class StaticfileAssetNotFound(Exception):
pass
class AssetTagsManagerBase(object):
"""
Base for management assets using given asset map
Just take assets map to get its files and render their HTML "loader" fragment
This does not intend to compress/minify/uglify asset, just rendering their tags to
load them from your template
@assets_map: file maps for an asset kind (not the full asset map)
"""
def __init__(self, assets_map):
self.assets_map = assets_map
def render_fragment(self, template, context=None):
"""
Render fragment using given django template
"""
return template.render(context)
def static_url(self, filepath):
"""
Have to raise a custom exception instead of output print
Check if given relative file path exists in any static directory but
only is ASSETS_STRICT is enabled.
Finally if there is not exception, return the static file url
"""
if settings.ASSETS_STRICT:
if not finders.find(filepath):
raise StaticfileAssetNotFound("Asset file cannot be finded in any static directory: {}".format(filepath))
return os.path.join(settings.STATIC_URL, filepath)
def get_files(self, name):
"""
Find and return asset file url given package name
"""
try:
file_paths = self.assets_map[name]
except KeyError:
if settings.ASSETS_STRICT:
raise AssetMapError("Asset key '{}' does not exists in your asset map".format(name))
else:
if settings.ASSETS_PACKAGED:
return [self.static_url(name)]
else:
return [self.static_url(item) for item in file_paths]
return []
def render(self, names, template):
"""
Return rendered given template for each asset files of each package names
"""
tags = []
for name in names:
asset_files = self.get_files(name)
for item in filter(None, asset_files):
tags.append( self.render_fragment(template, context=Context({"ASSET_URL": item})) )
return '\n'.join(tags)
class AssetTagsManagerFromManifest(AssetTagsManagerBase):
"""
Override AssetTagsManagerBase to implement management from the whole
manifest
"""
def __init__(self, manifest):
self.manifest = manifest # full asset map from settings
self.templates = self.get_templates()
def get_templates(self):
"""
Render fragment using given django template
"""
templates = {}
for k,v in settings.ASSETS_TAG_TEMPLATES.items():
templates[k] = loader_get_template(v)
return templates
def render_for_kind(self, names, kind):
self.assets_map = self.manifest[kind]
return self.render(names, self.templates[kind])
```
#### File: project/utils/__init__.py
```python
def keynat(string):
"""
A natural sort helper function for sort() and sorted()
without using regular expressions or exceptions.
>>> items = ('Z', 'a', '10th', '1st', '9')
>>> sorted(items)
['10th', '1st', '9', 'Z', 'a']
>>> sorted(items, key=keynat)
['1st', '9', '10th', 'a', 'Z']
:type string: string
:param string: String to compare
:rtype: int
:return: Position
"""
it = type(1)
r = []
for c in string:
if c.isdigit():
d = int(c)
if r and type( r[-1] ) == it:
r[-1] = r[-1] * 10 + d
else:
r.append(d)
else:
r.append(c.lower())
return r
``` |
{
"source": "4383/rogue",
"score": 2
} |
#### File: api/config/add.py
```python
from rogue.api.config import file as configfile
def add(path, options):
cfg = configfile.read(path)
section, key = options[0].split('.')
value = options[1]
if section not in cfg.sections():
cfg.add_section(section)
cfg.set(section, key, value)
configfile.write(cfg, path)
```
#### File: api/config/file.py
```python
import configparser
def read(configfile):
config = configparser.ConfigParser()
config.read(configfile)
return config
def write(config, cfgfile):
with open(cfgfile, "w+") as configfile:
config.write(configfile)
```
#### File: api/profil/controler.py
```python
import click
from cookiecutter import main as cookiecutter
default_template = 'https://github.com/audreyr/cookiecutter-pypackage'
def create(context={}):
"""Create a new project"""
if not context['template']:
context['template'] = default_template
cookiecutter.cookiecutter(
template=context.['template'],
extra_context=context,
output_dir='test',
checkout="master",
no_input=True
)
```
#### File: api/todo/controler.py
```python
import os
import sqlite3
BASE_PATH = os.path.dirname(__file__)
USER_PATH = os.path.expanduser('~')
TODO_PATH = os.path.join(USER_PATH, '.rogue')
DATABASE_PATH = os.path.join(TODO_PATH, 'todo.db')
def sort(tasks):
sorted_tasks = {
'hight': [],
'low': [],
'normal': []
}
for el in tasks:
priority = humanize(el[5])
sorted_tasks[priority].append(el)
return sorted_tasks
def convert_for_database(priority):
binding = {'hight': 'H', 'low': 'L', 'normal': 'N'}
return binding[priority]
def humanize(priority):
binding = {'H': 'hight', 'L': 'low', 'N': 'normal'}
return binding[priority]
class Todo():
connection = None
cursor = None
def __init__(self):
if not self.__can_store():
self.__connect()
self.__initialize()
else:
self.__connect()
def __del__(self):
self.__disconnect()
def __connect(self):
self.connection = sqlite3.connect(DATABASE_PATH)
self.cursor = self.connection.cursor()
def __disconnect(self):
self.connection.close()
def __can_store(self):
if not os.path.isfile(DATABASE_PATH):
return False
return True
def __initialize(self):
if not os.path.isdir(TODO_PATH):
os.makedirs(TODO_PATH)
self.cursor.execute('''
CREATE TABLE tasks
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
content TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
finished_at DATETIME,
active BOOLEAN DEFAULT 1 CHECK (active IN (0, 1)),
priority TEXT DEFAULT 'N' CHECK (priority IN ('H', 'L', 'N'))
)
''')
self.connection.commit()
def add(self, task, priority):
self.cursor.execute('''
INSERT INTO tasks (content, priority) VALUES (?, ?)
'''
, [task, convert_for_database(priority)])
self.connection.commit()
def ls(self, active=False):
return self.cursor.execute('''
SELECT * FROM tasks
''')
def info(self, identifier):
request = '''
SELECT * FROM tasks WHERE id IN (?)
'''
return self.cursor.execute(request, [identifier])
def infos(self, identifiers):
request = '''
SELECT * FROM tasks WHERE id IN ({})
'''.format(','.join(['?']*len(identifiers)))
return self.cursor.execute(request, identifiers)
def done(self, identifier):
self.cursor.execute('''
UPDATE tasks SET
active = 0
WHERE id=?
''', [identifier])
self.connection.commit()
def delete(self, identifier):
self.cursor.execute('''
DELETE FROM tasks
WHERE id=?
''', [identifier])
self.connection.commit()
```
#### File: rogue/commands/todo.py
```python
import os
import os.path
import shutil
import time
import click
import rogue.api.todo.controler as api_todo
import rogue.api.config.controler as api_config
from rogue.utilities import console
from rogue.utilities import system
from terminaltables import SingleTable
@click.group()
def todo(args=None):
"""Manage todo list"""
@todo.command()
@click.argument('task', type=str)
@click.option('-p', '--priority', default="normal",
type=click.Choice(['hight', 'low', 'normal']))
@click.option('--config', default=".rogue.cfg",
type=click.File())
def add(task, priority, config):
"""Add a new task to the todo list"""
store = api_todo.Todo()
store.add(task, priority)
@todo.command()
@click.option('--config', default=".rogue.cfg",
type=click.File())
def list(config):
"""List the todo list"""
store = api_todo.Todo()
#tasks = api_sort(store.ls())
tasks = store.ls()
headers = ['id', 'Priority', 'done', 'description']
data = []
for el in tasks:
identifier, content, _, _, active, priority = el
data.append([identifier, priority, "" if active else "X", content])
console.show_table(data, headers, 'tasks')
@todo.command()
@click.argument('task', type=int, nargs=+1)
@click.option('--config', default=".rogue.cfg",
type=click.File())
def delete(task, config):
"""Delete a task from the todo list"""
store = api_todo.Todo()
store.delete(task)
@todo.command()
@click.argument('task', nargs=+1)
@click.option('--config', default=".rogue.cfg",
type=click.File())
def done(task, config):
"""Mark a task is done from the todo list"""
store = api_todo.Todo()
store.done(task)
@todo.command()
@click.argument('task', type=str)
@click.option('--config', default=".rogue.cfg",
type=click.File())
def info(task, config):
"""Get task information from the todo list"""
store = api_todo.Todo()
result = store.info(task).fetchone()
print("Id: {}".format(result[0]))
print("Description: {}".format(result[1]))
print("Created: {}".format(result[2]))
if __name__ == "__main__":
project()
``` |
{
"source": "4383/sagacity",
"score": 2
} |
#### File: sagacity/sagacity/git.py
```python
import sys
from sagacity import execute
SEP = "ø"
def fetch(remote='--all'):
cmd = ['git', 'fetch', remote]
outs, errs = execute.execute(cmd)
if errs:
print(errs)
sys.exit(200)
def log(since=None, until=None,
pretty=f'%cd{SEP}%an{SEP}%ae{SEP}%H', no_merges=True, fetching=True,
remote_to_fetch='origin', branch=None,
path=None):
if fetching:
fetch(remote_to_fetch)
cmd = ['git', 'log']
if since:
cmd.extend(['--since', since.strftime("%b %d %Y")])
if until:
cmd.extend(['--until', until.strftime("%b %d %Y")])
if pretty:
cmd.append(f'--pretty=format:{pretty}')
if no_merges:
cmd.append('--no-merges')
if branch:
cmd.append(branch)
if path:
cmd.extend(['--', path])
outs, errs = execute.execute(cmd, shell=False)
if errs:
print(errs)
sys.exit(201)
return outs.decode('utf-8'), errs.decode('utf-8')
``` |
{
"source": "4383/tobiko",
"score": 2
} |
#### File: tobiko/cmd/base.py
```python
from __future__ import absolute_import
import os
import logging
import argparse
from oslo_log import log
from tobiko.common.managers import ansible
from tobiko import config
LOG = log.getLogger(__name__)
class TobikoCMD(object):
"""Manages different command line utilities."""
def __init__(self):
config.CONF.tobiko.use_stderr = True
log.setup(config.CONF.tobiko, 'tobiko')
self.parser = self.get_parser()
self.args = (self.parser).parse_args()
curr_dir = os.path.dirname(__file__)
self.playbooks_dir = os.path.join(curr_dir,
"../tests/scenario/playbooks")
self.ansibleManager = ansible.AnsibleManager(self.playbooks_dir)
def get_parser(self):
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--verbose', '-v', action='count',
help='Make the output more verbose, incremental.')
parser.add_argument('--quiet', '-q', action='count',
help='Make the output less verbose, incremental.')
return parser
def set_stream_handler_logging_level(self):
num_quiet = self.args.quiet or 0
num_verb = self.args.verbose or 0
level = logging.WARNING - (num_verb * 10) + (num_quiet * 10)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(level)
```
#### File: tobiko/cmd/run.py
```python
from __future__ import absolute_import
import argparse
import sys
import paramiko
from oslo_log import log
LOG = log.getLogger(__name__)
class Tobiko():
def __init__(self):
self.parser = self.get_parser()
self.args = (self.parser).parse_args()
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def get_parser(self):
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
'--host',
help="The name of the host where your cloud is deployed.\n")
parser.add_argument(
'--key', '-k',
help="They SSH key to use to connect the host.")
return parser
def verify_connection(self):
"""Verifies it's able to connect the host provided by the user."""
try:
self.ssh.connect(self.args.host)
except paramiko.ssh_exception.AuthenticationException:
LOG.error("Unable to connect %r", self.args.host)
def main():
"""Run CLI main entry."""
tobiko = Tobiko()
tobiko.verify_connection()
# run.discover_environment()
if __name__ == '__main__':
sys.exit(main())
```
#### File: openstack/heat/_stack.py
```python
from __future__ import absolute_import
import collections
import time
import typing # noqa
from heatclient import exc
from oslo_log import log
import tobiko
from tobiko import config
from tobiko.openstack.heat import _client
from tobiko.openstack.heat import _template
LOG = log.getLogger(__name__)
# Status
INIT_IN_PROGRESS = 'INIT_IN_PROGRESS'
INIT_COMPLETE = 'INIT_COMPLETE'
INIT_IN_PROGRESS = 'INIT_FAILED'
CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS'
CREATE_COMPLETE = 'CREATE_COMPLETE'
CREATE_FAILED = 'CREATE_FAILED'
DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS'
DELETE_COMPLETE = 'DELETE_COMPLETE'
DELETE_FAILED = 'DELETE_FAILED'
TEMPLATE_FILE_SUFFIX = '.yaml'
def heat_stack_parameters(obj, stack=None):
if isinstance(obj, HeatStackParametersFixture):
parameters = obj
elif obj is None or isinstance(obj, collections.Mapping):
parameters = HeatStackParametersFixture(stack, obj)
else:
parameters = tobiko.get_fixture(obj)
tobiko.check_valid_type(parameters, HeatStackParametersFixture)
if stack:
parameters.stack = parameters.stack or stack
tobiko.check_valid_type(parameters.stack, type(None), HeatStackFixture)
return parameters
class HeatStackFixture(tobiko.SharedFixture):
"""Manages Heat stacks."""
client = None
retry_create_stack = 1
wait_interval = 5
stack_name = None # type: str
template = None # type: _template.HeatTemplateFixture
stack = None
parameters = None # type: HeatStackParametersFixture
def __init__(self, stack_name=None, template=None, parameters=None,
wait_interval=None, client=None):
super(HeatStackFixture, self).__init__()
self.stack_name = stack_name = (stack_name or
self.stack_name or
self.fixture_name)
self.template = _template.heat_template(template or self.template)
self.parameters = heat_stack_parameters(
stack=self, obj=(parameters or self.parameters))
self.client = client or self.client
if config.get_bool_env('TOBIKO_PREVENT_CREATE'):
self.retry_create_stack = 0
if wait_interval:
self.wait_interval = wait_interval
def _get_retry_value(self, retry):
if retry is None:
retry = self.retry_create_stack
if retry is None:
retry = 1
return int(retry)
def setup_fixture(self):
self.setup_template()
self.setup_client()
self.setup_stack()
def setup_template(self):
tobiko.setup_fixture(self.template)
def setup_client(self):
self.client = _client.heat_client(self.client)
def setup_stack(self):
self.create_stack()
def get_stack_parameters(self):
return tobiko.reset_fixture(self.parameters).values
def create_stack(self, retry=None):
"""Creates stack based on passed parameters."""
created_stack_ids = set()
retry = self._get_retry_value(retry)
while True:
stack = self.wait_for_stack_status(
expected_status={CREATE_COMPLETE, CREATE_FAILED,
CREATE_IN_PROGRESS, DELETE_COMPLETE,
DELETE_FAILED})
stack_status = getattr(stack, 'stack_status', DELETE_COMPLETE)
expected_status = {CREATE_COMPLETE, CREATE_IN_PROGRESS}
if stack_status in expected_status:
LOG.debug('Stack created: %r (id=%r)', self.stack_name,
stack.id)
for stack_id in created_stack_ids:
if self.stack.id != stack_id:
LOG.warning("Concurrent stack creation: delete "
"duplicated stack is %r (id=%r).",
self.stack_name, stack_id)
self.delete_stack(stack_id)
return stack
if not retry:
status_reason = getattr(stack, 'stack_status_reason', None)
raise HeatStackCreationFailed(name=self.stack_name,
observed=stack_status,
expected=expected_status,
status_reason=status_reason)
retry -= 1
if stack_status.endswith('_FAILED'):
LOG.debug('Delete existing failed stack: %r (id=%r)',
self.stack_name, stack.id)
self.delete_stack()
stack = self.wait_for_stack_status(
expected_status={DELETE_COMPLETE})
# Cleanup cached objects
self.stack = self._outputs = self._resources = None
# Compile template parameters
parameters = self.get_stack_parameters()
try:
LOG.debug('Creating stack %r (re-tries left %d)...',
self.stack_name, retry)
stack_id = self.client.stacks.create(
stack_name=self.stack_name,
template=self.template.template_yaml,
parameters=parameters)['stack']['id']
except exc.HTTPConflict:
LOG.debug('Stack %r already exists.', self.stack_name)
else:
created_stack_ids.add(stack_id)
LOG.debug('Creating stack %r (id=%r)...', self.stack_name,
stack_id)
_resources = None
@tobiko.fixture_property
def resources(self):
resources = self._resources
if not self._resources:
self._resources = resources = HeatStackResourceFixture(self)
return resources
def cleanup_fixture(self):
self.setup_client()
self.cleanup_stack()
def cleanup_stack(self):
self.delete_stack()
def delete_stack(self, stack_id=None):
"""Deletes stack."""
if not stack_id:
stack_id = self.stack_id
self.stack = self._outputs = self._resources = None
try:
self.client.stacks.delete(stack_id)
except exc.NotFound:
LOG.debug('Stack already deleted: %r (id=%r)', self.stack_name,
stack_id)
else:
LOG.debug('Deleting stack %r (id=%r)...', self.stack_name,
stack_id)
@property
def stack_id(self):
stack = self.stack
if stack:
return stack.id
else:
return self.stack_name
def get_stack(self, resolve_outputs=False):
"""Returns stack ID."""
try:
self.stack = stack = self.client.stacks.get(
self.stack_name, resolve_outputs=resolve_outputs)
except exc.HTTPNotFound:
self.stack = stack = None
finally:
self._outputs = None
return stack
def wait_for_create_complete(self, check=True):
return self.wait_for_stack_status(expected_status={CREATE_COMPLETE},
check=check)
def wait_for_delete_complete(self, check=True):
return self.wait_for_stack_status(expected_status={DELETE_COMPLETE},
check=check)
def wait_for_stack_status(self, expected_status, check=True):
"""Waits for the stack to reach the given status."""
stack = self.stack or self.get_stack()
while (stack and stack.stack_status.endswith('_IN_PROGRESS') and
stack.stack_status not in expected_status):
LOG.debug("Waiting for %r (id=%r) stack status "
"(observed=%r, expected=%r)", self.stack_name,
stack.id, stack.stack_status, expected_status)
time.sleep(self.wait_interval)
stack = self.get_stack()
if check:
if stack is None:
if DELETE_COMPLETE not in expected_status:
raise HeatStackNotFound(name=self.stack_name)
else:
check_stack_status(stack, expected_status)
return stack
_outputs = None
def get_stack_outputs(self):
outputs = self._outputs
if not outputs:
self._outputs = outputs = HeatStackOutputsFixture(self)
return outputs
outputs = tobiko.fixture_property(get_stack_outputs)
def __getattr__(self, name):
try:
return self.get_stack_outputs().get_value(name)
except HeatStackOutputKeyError:
pass
message = "Object {!r} has no attribute {!r}".format(self, name)
raise AttributeError(message)
class HeatStackKeyError(tobiko.TobikoException):
message = "key {key!r} not found in stack {name!r}"
class HeatStackResourceKeyError(HeatStackKeyError):
message = "resource key {key!r} not found in stack {name!r}"
class HeatStackParameterKeyError(HeatStackKeyError):
message = "parameter key {key!r} not found in stack {name!r}"
class HeatStackOutputKeyError(HeatStackKeyError):
message = "output key {key!r} not found in stack {name!r}"
class HeatStackNamespaceFixture(tobiko.SharedFixture):
key_error = HeatStackKeyError
_keys = None
_values = None
def __init__(self, stack):
super(HeatStackNamespaceFixture, self).__init__()
if stack and not isinstance(stack, HeatStackFixture):
message = "Object {!r} is not an HeatStackFixture".format(stack)
raise TypeError(message)
self.stack = stack
def setup_fixture(self):
self.setup_keys()
self.setup_values()
def setup_keys(self):
keys = self._keys
if keys is None:
self._keys = keys = self.get_keys()
self.addCleanup(self.cleanup_keys)
return keys
keys = tobiko.fixture_property(setup_keys)
def get_keys(self):
raise NotImplementedError
def cleanup_keys(self):
del self._keys
def setup_values(self):
values = self._values
if values is None:
self._values = values = self.get_values()
self.addCleanup(self.cleanup_values)
return values
values = tobiko.fixture_property(setup_values)
def get_values(self):
raise NotImplementedError
def cleanup_values(self):
del self._values
def get_value(self, key):
# Match template outputs definition before getting value
if key in self.keys:
try:
return self.values[key]
except KeyError:
LOG.error('Key %r not found in stack %r', key,
self.stack.stack_name)
else:
LOG.error('Key %r not found in template for stack %r', key,
self.stack.stack_name)
raise self.key_error(name=self.stack.stack_name, key=key)
def set_value(self, key, value):
# Match template outputs definition before setting value
if key in self.keys:
self.values[key] = value
else:
LOG.error('Key %r not found in template for stack %r', key,
self.stack.stack_name)
raise self.key_error(name=self.stack.stack_name, key=key)
def __getattr__(self, name):
try:
return self.get_value(name)
except self.key_error:
pass
message = "Object {!r} has no attribute {!r}".format(self, name)
raise AttributeError(message)
class HeatStackParametersFixture(HeatStackNamespaceFixture):
key_error = HeatStackParameterKeyError
def __init__(self, stack, parameters=None):
super(HeatStackParametersFixture, self).__init__(stack)
self.parameters = parameters and dict(parameters) or {}
def get_keys(self):
template = tobiko.setup_fixture(self.stack.template)
return frozenset(template.parameters or [])
def get_values(self):
values = dict(self.parameters)
missing_keys = sorted(self.keys - set(values))
for key in missing_keys:
value = getattr(self.stack, key, None)
if value is not None:
values[key] = value
return values
class HeatStackOutputsFixture(HeatStackNamespaceFixture):
key_error = HeatStackOutputKeyError
def get_keys(self):
template = tobiko.setup_fixture(self.stack.template)
return frozenset(template.outputs or [])
def get_values(self):
# Can't get output values before stack creation is complete
self.stack.wait_for_create_complete()
outputs = self.stack.get_stack(resolve_outputs=True).outputs
return {o['output_key']: o['output_value']
for o in outputs}
def check_stack_status(stack, expected):
observed = stack.stack_status
if observed not in expected:
if observed == CREATE_FAILED:
error_class = HeatStackCreationFailed
elif observed == DELETE_FAILED:
error_class = HeatStackDeletionFailed
else:
error_class = InvalidHeatStackStatus
raise error_class(name=stack.stack_name,
observed=observed,
expected=expected,
status_reason=stack.stack_status_reason)
class HeatStackNotFound(tobiko.TobikoException):
message = "stack {name!r} not found"
class InvalidHeatStackStatus(tobiko.TobikoException):
message = ("stack {name!r} status {observed!r} not in {expected!r}\n"
"{status_reason!s}")
class HeatStackCreationFailed(InvalidHeatStackStatus):
pass
class HeatStackDeletionFailed(InvalidHeatStackStatus):
pass
class HeatStackResourceFixture(HeatStackNamespaceFixture):
key_error = HeatStackResourceKeyError
def get_keys(self):
template = tobiko.setup_fixture(self.stack.template)
return frozenset(template.resources or [])
def get_values(self):
self.stack.wait_for_create_complete()
client = self.stack.client
resources = client.resources.list(self.stack.stack_id)
return {r.resource_name: r for r in resources}
@property
def fixture_name(self):
return self.stack_name + '.resources'
```
#### File: openstack/keystone/_credentials.py
```python
from __future__ import absolute_import
import collections
import os
import sys
from oslo_log import log
import yaml
import tobiko
LOG = log.getLogger(__name__)
def get_keystone_credentials(obj=None):
if not obj:
return default_keystone_credentials()
if tobiko.is_fixture(obj):
obj = tobiko.get_fixture(obj)
if isinstance(obj, KeystoneCredentialsFixture):
obj = tobiko.setup_fixture(obj).credentials
if isinstance(obj, KeystoneCredentials):
return obj
message = "Can't get {!r} object from {!r}".format(
KeystoneCredentials, obj)
raise TypeError(message)
def default_keystone_credentials():
credentials = tobiko.setup_fixture(DefaultKeystoneCredentialsFixture
).credentials
tobiko.check_valid_type(credentials, KeystoneCredentials)
return credentials
class KeystoneCredentials(collections.namedtuple(
'KeystoneCredentials', ['api_version',
'auth_url',
'username',
'password',
'project_name',
'domain_name',
'user_domain_name',
'project_domain_name',
'project_domain_id',
'trust_id'])):
def to_dict(self):
return {k: v
for k, v in self._asdict().items()
if v is not None}
def __repr__(self):
params = self.to_dict()
if 'password' in params:
params['password'] = '***'
return 'keystone_credentials({!s})'.format(
", ".join("{!s}={!r}".format(k, v)
for k, v in sorted(params.items())))
required_params = ('auth_url', 'username', 'password', 'project_name')
def validate(self, required_params=None):
required_params = required_params or self.required_params
missing_params = [p
for p in required_params
if not getattr(self, p)]
if missing_params:
reason = "undefined parameters: {!s}".format(
', '.join(missing_params))
raise InvalidKeystoneCredentials(credentials=self, reason=reason)
def keystone_credentials(api_version=None,
auth_url=None,
username=None,
password=<PASSWORD>,
project_name=None,
domain_name=None,
user_domain_name=None,
project_domain_name=None,
project_domain_id=None,
trust_id=None,
cls=KeystoneCredentials):
return cls(api_version=api_version,
auth_url=auth_url,
username=username,
password=password,
project_name=project_name,
domain_name=domain_name,
user_domain_name=user_domain_name,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id,
trust_id=trust_id)
class InvalidKeystoneCredentials(tobiko.TobikoException):
message = "invalid Keystone credentials; {reason!s}; {credentials!r}"
class KeystoneCredentialsFixture(tobiko.SharedFixture):
credentials = None
def __init__(self, credentials=None):
super(KeystoneCredentialsFixture, self).__init__()
if credentials:
self.credentials = credentials
def setup_fixture(self):
self.setup_credentials()
def setup_credentials(self):
credentials = self.credentials
if not self.credentials:
credentials = self.get_credentials()
if credentials:
try:
credentials.validate()
except InvalidKeystoneCredentials as ex:
LOG.info("No such valid credentials from %r (%r)",
self, ex)
else:
self.addCleanup(self.cleanup_credentials)
self.credentials = credentials
def cleanup_credentials(self):
del self.credentials
def get_credentials(self):
return self.credentials
class EnvironKeystoneCredentialsFixture(KeystoneCredentialsFixture):
environ = None
def __init__(self, credentials=None, environ=None):
super(EnvironKeystoneCredentialsFixture, self).__init__(
credentials=credentials)
if environ is not None:
self.environ = environ
def setup_fixture(self):
if self.environ is None:
self.environ = self.get_environ()
super(EnvironKeystoneCredentialsFixture, self).setup_fixture()
def get_environ(self):
return os.environ
def get_credentials(self):
auth_url = self.get_env('OS_AUTH_URL')
if not auth_url:
LOG.debug("OS_AUTH_URL environment variable not defined")
return None
api_version = (
self.get_int_env('OS_IDENTITY_API_VERSION') or
api_version_from_url(auth_url))
username = (
self.get_env('OS_USERNAME') or
self.get_env('OS_USER_ID'))
password = self.get_env('OS_PASSWORD')
project_name = (
self.get_env('OS_PROJECT_NAME') or
self.get_env('OS_TENANT_NAME') or
self.get_env('OS_PROJECT_ID') or
self.get_env('OS_TENANT_ID'))
if api_version == 2:
return keystone_credentials(
api_version=api_version,
auth_url=auth_url,
username=username,
password=password,
project_name=project_name)
else:
domain_name = (
self.get_env('OS_DOMAIN_NAME') or
self.get_env('OS_DOMAIN_ID'))
user_domain_name = (
self.get_env('OS_USER_DOMAIN_NAME') or
self.get_env('OS_USER_DOMAIN_ID'))
project_domain_name = (
self.get_env('OS_PROJECT_DOMAIN_NAME'))
project_domain_id = (
self.get_env('OS_PROJECT_DOMAIN_ID'))
trust_id = self.get_env('OS_TRUST_ID')
return keystone_credentials(
api_version=api_version,
auth_url=auth_url,
username=username,
password=password,
project_name=project_name,
domain_name=domain_name,
user_domain_name=user_domain_name,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id,
trust_id=trust_id)
def get_env(self, name):
return self.environ.get(name, None)
def get_int_env(self, name):
value = self.get_env(name=name)
if value is not None:
value = int(value)
return value
class ConfigKeystoneCredentialsFixture(KeystoneCredentialsFixture):
def get_credentials(self):
from tobiko import config
conf = config.CONF.tobiko.keystone
auth_url = conf.auth_url
if not auth_url:
LOG.debug("auth_url option not defined in 'keystone' section of "
"tobiko.conf")
return None
api_version = (conf.api_version or
api_version_from_url(auth_url))
if api_version == 2:
return keystone_credentials(
api_version=api_version,
auth_url=auth_url,
username=conf.username,
password=<PASSWORD>,
project_name=conf.project_name)
else:
return keystone_credentials(
api_version=api_version,
auth_url=auth_url,
username=conf.username,
password=<PASSWORD>,
project_name=conf.project_name,
domain_name=conf.domain_name,
user_domain_name=conf.user_domain_name,
project_domain_name=conf.project_domain_name,
project_domain_id=conf.project_domain_id,
trust_id=conf.trust_id)
DEFAULT_KEYSTONE_CREDENTIALS_FIXTURES = [
EnvironKeystoneCredentialsFixture,
ConfigKeystoneCredentialsFixture]
class DefaultKeystoneCredentialsFixture(KeystoneCredentialsFixture):
fixtures = DEFAULT_KEYSTONE_CREDENTIALS_FIXTURES
def get_credentials(self):
for fixture in self.fixtures:
try:
credentials = tobiko.setup_fixture(fixture).credentials
except Exception:
LOG.exception("Error setting up fixture %r", fixture)
continue
if credentials:
LOG.info("Got default credentials from fixture %r: %r",
fixture, credentials)
return credentials
def api_version_from_url(auth_url):
if auth_url.endswith('/v2.0'):
LOG.debug('Got Keystone API version 2 from auth_url: %r', auth_url)
return 2
elif auth_url.endswith('/v3'):
LOG.debug('Got Keystone API version 3 from auth_url: %r', auth_url)
return 3
else:
LOG.warning('Unable to get Keystone API version from auth_url: %r',
auth_url)
return None
def print_credentials():
credentials = default_keystone_credentials()
yaml.dump(dict(credentials.to_dict()),
sys.stdout,
indent=4,
sort_keys=True)
```
#### File: openstack/keystone/_services.py
```python
from __future__ import absolute_import
import tobiko
from tobiko.openstack.keystone import _client
class ServiceListFixture(tobiko.SharedFixture):
client = None
services = None
def setup_fixture(self):
self.services = _client.list_services()
def has_service(self, **attributes):
services = self.services
if services and attributes:
services = services.with_attributes(**attributes)
return bool(services)
def has_service(**attributes):
fixture = tobiko.setup_fixture(ServiceListFixture)
return fixture.has_service(**attributes)
def is_service_missing(**params):
return not has_service(**params)
def skip_if_missing_service(**params):
return tobiko.skip_if('missing service: {!r}'.format(params),
is_service_missing, **params)
```
#### File: openstack/neutron/_client.py
```python
from __future__ import absolute_import
import collections
import netaddr
from neutronclient.v2_0 import client as neutronclient
import tobiko
from tobiko.openstack import _client
class NeutronClientFixture(_client.OpenstackClientFixture):
def init_client(self, session):
return neutronclient.Client(session=session)
class NeutronClientManager(_client.OpenstackClientManager):
def create_client(self, session):
return NeutronClientFixture(session=session)
CLIENTS = NeutronClientManager()
def neutron_client(obj):
if not obj:
return get_neutron_client()
if isinstance(obj, neutronclient.Client):
return obj
fixture = tobiko.setup_fixture(obj)
if isinstance(fixture, NeutronClientFixture):
return fixture.client
message = "Object {!r} is not a NeutronClientFixture".format(obj)
raise TypeError(message)
def get_neutron_client(session=None, shared=True, init_client=None,
manager=None):
manager = manager or CLIENTS
client = manager.get_client(session=session, shared=shared,
init_client=init_client)
tobiko.setup_fixture(client)
return client.client
_RAISE_ERROR = object()
def find_network(client=None, unique=False, default=_RAISE_ERROR,
**attributes):
"""Look for a network matching some property values"""
networks = list_networks(client=client, **attributes)
if default is _RAISE_ERROR or networks:
if unique:
return networks.unique
else:
return networks.first
else:
return default
def find_port(client=None, unique=False, default=_RAISE_ERROR, **attributes):
"""Look for a port matching some property values"""
ports = list_ports(client=client, **attributes)
if default is _RAISE_ERROR or ports:
if unique:
return ports.unique
else:
return ports.first
else:
return default
def find_subnet(client=None, unique=False, default=_RAISE_ERROR, **attributes):
"""Look for a subnet matching some property values"""
subnets = list_subnets(client=client, **attributes)
if default is _RAISE_ERROR or subnets:
if unique:
return subnets.unique
else:
return subnets.first
else:
return default
def list_networks(client=None, **params):
networks = neutron_client(client).list_networks(**params)['networks']
return tobiko.select(networks)
def list_ports(client=None, **params):
ports = neutron_client(client).list_ports(**params)['ports']
return tobiko.select(ports)
def list_subnets(client=None, **params):
subnets = neutron_client(client).list_subnets(**params)
if isinstance(subnets, collections.Mapping):
subnets = subnets['subnets']
return tobiko.select(subnets)
def list_agents(client=None, **params):
agents = neutron_client(client).list_agents(**params)
if isinstance(agents, collections.Mapping):
agents = agents['agents']
return tobiko.select(agents)
def list_subnet_cidrs(client=None, **params):
return tobiko.select(netaddr.IPNetwork(subnet['cidr'])
for subnet in list_subnets(client=client, **params))
def get_floating_ip(floating_ip, client=None, **params):
floating_ip = neutron_client(client).show_floatingip(floating_ip, **params)
return floating_ip['floatingip']
def get_network(network, client=None, **params):
return neutron_client(client).show_network(network, **params)['network']
def get_port(port, client=None, **params):
return neutron_client(client).show_port(port, **params)['port']
def get_router(router, client=None, **params):
return neutron_client(client).show_router(router, **params)['router']
def get_subnet(subnet, client=None, **params):
return neutron_client(client).show_subnet(subnet, **params)['subnet']
def list_l3_agent_hosting_routers(router, client=None, **params):
agents = neutron_client(client).list_l3_agent_hosting_routers(
router, **params)
if isinstance(agents, collections.Mapping):
agents = agents['agents']
return tobiko.select(agents)
def find_l3_agent_hosting_router(router, client=None, unique=False,
default=_RAISE_ERROR, **params):
agents = list_l3_agent_hosting_routers(router=router, client=client,
**params)
if default is _RAISE_ERROR or agents:
if unique:
return agents.unique
else:
return agents.first
else:
return default
```
#### File: openstack/octavia/config.py
```python
from __future__ import absolute_import
import itertools
from oslo_config import cfg
GROUP_NAME = 'octavia'
OPTIONS = [
cfg.IntOpt('check_interval',
default=5,
help='Interval to check for status changes, in seconds.'),
cfg.IntOpt('check_timeout',
default=360,
help='Timeout, in seconds, to wait for a status change.'),
]
def register_tobiko_options(conf):
conf.register_opts(group=cfg.OptGroup(GROUP_NAME), opts=OPTIONS)
def list_options():
return [(GROUP_NAME, itertools.chain(OPTIONS))]
```
#### File: openstack/os_faults/_cloud.py
```python
from __future__ import absolute_import
import os_faults
from oslo_log import log
import tobiko
from tobiko.openstack.os_faults import _config_file
LOG = log.getLogger(__name__)
def get_os_fault_cloud_managenemt(config_filename=None):
fixture = OsFaultsCloudManagementFixture(config_filename=config_filename)
return tobiko.setup_fixture(fixture).cloud_management
class OsFaultsCloudManagementFixture(tobiko.SharedFixture):
"""Responsible for executing faults."""
config_filename = None
cloud_management = None
def __init__(self, config_filename=None, cloud_management=None):
super(OsFaultsCloudManagementFixture, self).__init__()
if config_filename:
self.config_filename = config_filename
if cloud_management:
self.cloud_management = cloud_management
def setup_fixture(self):
self.connect()
def connect(self):
"""Connect to the cloud using os-faults."""
cloud_management = self.cloud_management
if cloud_management is None:
config_filename = self.config_filename
if config_filename is None:
self.config_filename = config_filename = (
_config_file.get_os_fault_config_filename())
LOG.info("OS-Faults: connecting with config filename %s",
config_filename)
self.cloud_management = cloud_management = os_faults.connect(
config_filename=config_filename)
return cloud_management
```
#### File: shell/sh/_command.py
```python
from __future__ import absolute_import
import subprocess
import six
def shell_command(command):
if isinstance(command, ShellCommand):
return command
elif isinstance(command, six.string_types):
return ShellCommand(command.split())
elif command:
return ShellCommand(str(a) for a in command)
else:
return ShellCommand()
class ShellCommand(tuple):
def __repr__(self):
return "ShellCommand([{!s}])".format(', '.join(self))
def __str__(self):
return subprocess.list2cmdline(self)
def __add__(self, other):
other = shell_command(other)
return shell_command(tuple(self) + other)
```
#### File: shell/ssh/_command.py
```python
from __future__ import absolute_import
import subprocess
import six
from tobiko.shell.ssh import _config
def ssh_login(hostname, username=None, port=None):
login = hostname
if port:
login += ':' + str(port)
if username:
login = username + '@' + login
return login
def ssh_command(host, username=None, port=None, command=None,
config_files=None, host_config=None, proxy_command=None,
key_filename=None, **options):
host_config = host_config or _config.ssh_host_config(
host=host, config_files=config_files)
command = command or host_config.default.command.split()
if isinstance(command, six.string_types):
command = command.split()
hostname = host_config.hostname
username = username or host_config.username
command += [ssh_login(hostname=hostname, username=username)]
# if host_config.default.debug:
# command += ['-vvvvvv']
port = port or host_config.port
if port:
command += ['-p', str(port)]
if key_filename:
command += ['-i', key_filename]
if proxy_command:
if not isinstance(proxy_command, six.string_types):
proxy_command = subprocess.list2cmdline([str(a)
for a in proxy_command])
options['ProxyCommand'] = proxy_command
for name, value in host_config.host_config.items():
if name not in {'hostname', 'port', 'user'}:
options.setdefault(name, value)
options.setdefault('UserKnownHostsFile', '/dev/null')
options.setdefault('StrictHostKeyChecking', 'no')
options.setdefault('LogLevel', 'quiet')
options.setdefault('ConnectTimeout', int(host_config.timeout))
options.setdefault('ConnectionAttempts', host_config.connection_attempts)
if options:
for name, value in sorted(options.items()):
name = name.replace('_', '')
command += ['-o', '{!s}={!s}'.format(name, value)]
return command
```
#### File: shell/ssh/_forward.py
```python
from __future__ import absolute_import
import collections
import contextlib
import socket
from oslo_log import log
import six
from six.moves import urllib
import sshtunnel
import tobiko
from tobiko.shell.ssh import _client
LOG = log.getLogger(__name__)
def get_forward_port_address(address, ssh_client=None, manager=None):
if ssh_client is None:
ssh_client = _client.ssh_proxy_client()
manager = manager or DEFAULT_SSH_PORT_FORWARD_MANAGER
return manager.get_forward_port_address(address, ssh_client=ssh_client)
def get_forward_url(url, ssh_client=None, manager=None):
url = parse_url(url)
if ssh_client is None:
ssh_client = _client.ssh_proxy_client()
manager = manager or DEFAULT_SSH_PORT_FORWARD_MANAGER
address = binding_address(url)
forward_address = get_forward_port_address(address, ssh_client=ssh_client,
manager=manager)
return binding_url(forward_address)
class SSHPortForwardManager(object):
def __init__(self):
self.forward_addresses = {}
self.forwarders = {}
def get_forward_port_address(self, address, ssh_client):
try:
return self.forward_addresses[address, ssh_client]
except KeyError:
pass
forwarder = self.get_forwarder(address, ssh_client=ssh_client)
if forwarder:
forward_address = forwarder.get_forwarding(address)
else:
forward_address = address
self.forward_addresses[address, ssh_client] = forward_address
return forward_address
def get_forwarder(self, address, ssh_client):
try:
return self.forwarders[address, ssh_client]
except KeyError:
pass
if ssh_client:
tobiko.check_valid_type(ssh_client, _client.SSHClientFixture)
forwarder = SSHTunnelForwarderFixture(ssh_client=ssh_client)
forwarder.put_forwarding(address)
tobiko.setup_fixture(forwarder)
else:
forwarder = None
self.forwarders[address, ssh_client] = forwarder
return forwarder
DEFAULT_SSH_PORT_FORWARD_MANAGER = SSHPortForwardManager()
class SSHTunnelForwarderFixture(tobiko.SharedFixture):
forwarder = None
def __init__(self, ssh_client):
super(SSHTunnelForwarderFixture, self).__init__()
self.ssh_client = ssh_client
self._forwarding = collections.OrderedDict()
def put_forwarding(self, remote, local=None):
if not local:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.bind(('127.0.0.1', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
hostname, port = sock.getsockname()
local = hostname, port
return self._forwarding.setdefault(remote, local)
def get_forwarding(self, remote):
return self._forwarding.get(remote)
def setup_fixture(self):
self.setup_forwarder()
def setup_forwarder(self):
forwarder = self.forwarder
if not forwarder:
remote_bind_addresses = list(self._forwarding.keys())
local_bind_addresses = list(self._forwarding.values())
self.forwarder = forwarder = SSHTunnelForwarder(
ssh_client=self.ssh_client,
local_bind_addresses=local_bind_addresses,
remote_bind_addresses=remote_bind_addresses)
self.addCleanup(self.cleanup_forwarder)
forwarder.start()
self.ssh_client.addCleanup(self)
return forwarder
def cleanup_forwarder(self):
forwarder = self.forwarder
if forwarder:
del self.forwarder
forwarder.stop()
# pylint: disable=protected-access
class SSHUnixForwardHandler(sshtunnel._ForwardHandler):
transport = None
def handle(self):
uid = sshtunnel.get_connection_id()
self.info = '#{0} <-- {1}'.format(uid, self.client_address or
self.server.local_address)
remote_address = self.remote_address
assert isinstance(remote_address, six.string_types)
command = 'sudo nc -U "{}"'.format(remote_address)
chan = self.transport.open_session()
chan.exec_command(command)
self.logger.log(sshtunnel.TRACE_LEVEL,
'{0} connected'.format(self.info))
try:
self._redirect(chan)
except socket.error:
# Sometimes a RST is sent and a socket error is raised, treat this
# exception. It was seen that a 3way FIN is processed later on, so
# no need to make an ordered close of the connection here or raise
# the exception beyond this point...
self.logger.log(sshtunnel.TRACE_LEVEL,
'{0} sending RST'.format(self.info))
except Exception as e:
self.logger.log(sshtunnel.TRACE_LEVEL,
'{0} error: {1}'.format(self.info, repr(e)))
finally:
chan.close()
self.request.close()
self.logger.log(sshtunnel.TRACE_LEVEL,
'{0} connection closed.'.format(self.info))
# pylint: enable=protected-access
class SSHTunnelForwarder(sshtunnel.SSHTunnelForwarder):
daemon_forward_servers = True #: flag tunnel threads in daemon mode
daemon_transport = True #: flag SSH transport thread in daemon mode
def __init__(self, ssh_client, **kwargs):
self.ssh_client = ssh_client
params = self._merge_parameters(self._get_connect_parameters(),
**kwargs)
super(SSHTunnelForwarder, self).__init__(**params)
def _merge_parameters(self, *dicts, **kwargs):
result = {}
for d in dicts + (kwargs,):
if d:
result.update((k, v) for k, v in d.items() if v is not None)
return result
@staticmethod
def _consolidate_auth(ssh_password=<PASSWORD>,
ssh_pkey=None,
ssh_pkey_password=None,
allow_agent=True,
host_pkey_directories=None,
logger=None):
return None, None
def _get_connect_parameters(self):
parameters = self.ssh_client.setup_connect_parameters()
return dict(ssh_address_or_host=parameters['hostname'],
ssh_username=parameters.get('username'),
ssh_password=parameters.get('password'),
ssh_pkey=parameters.get('pkey'),
ssh_port=parameters.get('port'),
ssh_private_key_password=parameters.get('passphrase'),
compression=parameters.get('compress'),
allow_agent=parameters.get('allow_agent'))
def _connect_to_gateway(self):
# pylint: disable=attribute-defined-outside-init
self._transport = self._get_transport()
def _get_transport(self):
return self.ssh_client.connect().get_transport()
def _stop_transport(self):
if self.is_active:
del self._transport
assert not self.is_active
super(SSHTunnelForwarder, self)._stop_transport()
@staticmethod
def _get_binds(bind_address, bind_addresses, is_remote=False):
addr_kind = 'remote' if is_remote else 'local'
if not bind_address and not bind_addresses:
if is_remote:
raise ValueError("No {0} bind addresses specified. Use "
"'{0}_bind_address' or '{0}_bind_addresses'"
" argument".format(addr_kind))
else:
return []
elif bind_address and bind_addresses:
raise ValueError("You can't use both '{0}_bind_address' and "
"'{0}_bind_addresses' arguments. Use one of "
"them.".format(addr_kind))
if bind_address:
bind_addresses = [bind_address]
if not is_remote:
# Add random port if missing in local bind
for (i, local_bind) in enumerate(bind_addresses):
if isinstance(local_bind, tuple) and len(local_bind) == 1:
bind_addresses[i] = (local_bind[0], 0)
# check_addresses(bind_addresses, is_remote)
return bind_addresses
def _make_ssh_forward_handler_class(self, remote_address_):
"""
Make SSH Handler class
"""
if isinstance(remote_address_, tuple):
return super(
SSHTunnelForwarder, self)._make_ssh_forward_handler_class(
remote_address_)
class Handler(SSHUnixForwardHandler):
transport = self._transport
remote_address = remote_address_
logger = self.logger
return Handler
def parse_url(url):
if isinstance(url, urllib.parse.ParseResult):
return url
else:
return urllib.parse.urlparse(url)
def binding_address(url):
url = parse_url(url)
if url.netloc:
# Retains only scheme and netloc
return (url.hostname, url.port)
elif url.path:
# Retains only scheme and path
return url.path
raise ValueError('Invalid URL: {!r}'.format(url))
def binding_url(address):
if isinstance(address, tuple):
try:
hostname, = address
except ValueError:
hostname, port = address
return 'tcp://{hostname}:{port}'.format(hostname=hostname,
port=port)
elif isinstance(address, six.string_types):
return 'unix://{path}'.format(path=address)
raise TypeError('Invalid address type: {!r}'.format(address))
```
#### File: functional/openstack/test_keystone.py
```python
from __future__ import absolute_import
from keystoneclient.v2_0 import client as v2_client
from keystoneclient.v3 import client as v3_client
from oslo_log import log
import testtools
import yaml
import tobiko
from tobiko.openstack import keystone
from tobiko.shell import sh
LOG = log.getLogger(__name__)
CIENT_CLASSSES = v2_client.Client, v3_client.Client
class TobikoKeystoneCredentialsCommandTest(testtools.TestCase):
def test_execute(self):
with sh.local_process('tobiko-keystone-credentials') as process:
actual = yaml.full_load(process.stdout)
process.check_exit_status()
expected = keystone.default_keystone_credentials().to_dict()
self.assertEqual(expected, actual)
class KeystoneClientAPITest(testtools.TestCase):
def test_get_keystone_client(self):
client = keystone.get_keystone_client()
self.assertIsInstance(client, CIENT_CLASSSES)
def test_list_services(self):
services = keystone.list_services()
self.assertTrue(services)
def test_list_services_by_name(self):
services = keystone.list_services(name='keystone')
self.assertTrue(services)
for s in services:
self.assertEqual('keystone', s.name)
def test_list_services_by_type(self):
services = keystone.list_services(type='identity')
self.assertTrue(services)
for s in services:
self.assertEqual('identity', s.type)
def test_find_service(self):
service = keystone.find_service()
self.assertTrue(service.id)
def test_find_service_with_unique(self):
self.assertRaises(tobiko.MultipleObjectsFound,
keystone.find_service,
unique=True)
def test_find_service_not_found(self):
self.assertRaises(tobiko.ObjectNotFound,
keystone.find_service,
name='never-never-land')
def test_find_service_with_defaulkt(self):
service = keystone.find_service(name='never-never-land',
default=None)
self.assertIsNone(service)
def test_find_service_by_name(self):
service = keystone.find_service(name='keystone')
self.assertEqual('keystone', service.name)
def test_find_service_by_type(self):
service = keystone.find_service(type='identity')
self.assertEqual('identity', service.type)
def test_list_endpoints(self):
service = keystone.find_service(name='keystone')
endpoints = keystone.list_endpoints()
self.assertIn(service.id, [e.service_id for e in endpoints])
def test_list_endpoints_by_service(self):
service = keystone.find_service(name='keystone')
endpoints = keystone.list_endpoints(service=service)
self.assertTrue(endpoints)
self.assertEqual([service.id] * len(endpoints),
[e.service_id for e in endpoints])
def test_list_endpoints_by_service_id(self):
service = keystone.find_service(name='keystone')
endpoints = keystone.list_endpoints(service_id=service.id)
self.assertTrue(endpoints)
for e in endpoints:
self.assertEqual(service.id, e.service_id)
def test_list_endpoints_by_interface(self):
endpoints = keystone.list_endpoints(interface='public')
self.assertTrue(endpoints)
for e in endpoints:
self.assertEqual('public', e.interface)
def test_list_endpoints_by_url(self):
url = keystone.list_endpoints()[-1].url
endpoints = keystone.list_endpoints(url=url)
self.assertTrue(endpoints)
for e in endpoints:
self.assertEqual(url, e.url)
def test_find_endpoint(self):
endpoint = keystone.find_endpoint()
self.assertTrue(endpoint.id)
def test_find_endpoint_with_unique(self):
self.assertRaises(tobiko.MultipleObjectsFound,
keystone.find_endpoint,
unique=True)
def test_find_endpoint_not_found(self):
self.assertRaises(tobiko.ObjectNotFound,
keystone.find_endpoint,
service='never-never-land')
def test_find_endpoint_with_default(self):
service = keystone.find_endpoint(service='never-never-land',
default=None)
self.assertIsNone(service)
def test_find_endpoint_by_service(self):
service = keystone.find_service(name='keystone')
endpoint = keystone.find_endpoint(service=service)
self.assertEqual(endpoint.service_id, service.id)
def test_find_endpoint_by_service_id(self):
service = keystone.find_service(name='keystone')
endpoint = keystone.find_endpoint(service_id=service.id)
self.assertEqual(endpoint.service_id, service.id)
def test_find_endpoint_by_url(self):
url = keystone.list_endpoints()[-1].url
endpoint = keystone.find_endpoint(url=url)
self.assertEqual(url, endpoint.url)
def test_find_service_endpoint(self):
service = keystone.find_service(name='keystone')
endpoint = keystone.find_service_endpoint(name='keystone')
self.assertEqual(service.id, endpoint.service_id)
self.assertEqual('public', endpoint.interface)
self.assertTrue(endpoint.enabled)
@keystone.skip_if_missing_service(name='octavia')
def test_find_octavia_service_endpoint(self):
service = keystone.find_service(name='octavia')
endpoint = keystone.find_service_endpoint(name='octavia')
self.assertEqual(service.id, endpoint.service_id)
self.assertEqual('public', endpoint.interface)
self.assertTrue(endpoint.enabled)
```
#### File: functional/tripleo/test_overcloud.py
```python
from __future__ import absolute_import
import os
import netaddr
import pandas as pd
import six
import testtools
from tobiko import config
from tobiko.openstack import nova
from tobiko.tripleo import overcloud
from tobiko.tripleo import pacemaker
from tobiko.tripleo import services
from tobiko.tripleo import processes
import tobiko
CONF = config.CONF
@overcloud.skip_if_missing_overcloud
class OvercloudSshConnectionTest(testtools.TestCase):
def test_fetch_overcloud_credentials(self):
env = overcloud.load_overcloud_rcfile()
self.assertTrue(env['OS_AUTH_URL'])
self.assertTrue(env.get('OS_USERNAME') or env.get('OS_USER_ID'))
self.assertTrue(env['OS_PASSWORD'])
self.assertTrue(env.get('OS_TENANT_NAME') or
env.get('OS_PROJECT_NAME') or
env.get('OS_TENANT_ID') or
env.get('OS_PROJECT_ID'))
@overcloud.skip_if_missing_overcloud
class OvercloudNovaApiTest(testtools.TestCase):
def test_list_overcloud_nodes(self):
nodes = overcloud.list_overcloud_nodes()
self.assertTrue(nodes)
for node in nodes:
node_ip = nova.find_server_ip_address(server=node,
check_connectivity=True)
self.assertIsInstance(node_ip, netaddr.IPAddress)
def test_find_overcloud_nodes(self):
node = overcloud.find_overcloud_node()
node_ip = nova.find_server_ip_address(server=node,
check_connectivity=True)
self.assertIsInstance(node_ip, netaddr.IPAddress)
def test_get_overcloud_node_ip_address(self):
overcloud_node_ip = overcloud.overcloud_node_ip_address()
self.assertIsInstance(overcloud_node_ip, netaddr.IPAddress)
def test_overcloud_host_config(self):
hostname = overcloud.find_overcloud_node().name
host_config = tobiko.setup_fixture(
overcloud.overcloud_host_config(hostname=hostname))
self.assertEqual(hostname, host_config.host)
self.assertIsInstance(host_config.hostname, six.string_types)
netaddr.IPAddress(host_config.hostname)
self.assertEqual(CONF.tobiko.tripleo.overcloud_ssh_port,
host_config.port)
self.assertEqual(CONF.tobiko.tripleo.overcloud_ssh_username,
host_config.username)
key_filename = os.path.expanduser(
CONF.tobiko.tripleo.overcloud_ssh_key_filename)
self.assertEqual(key_filename, host_config.key_filename)
self.assertTrue(os.path.isfile(key_filename))
self.assertTrue(os.path.isfile(key_filename + '.pub'))
def test_overcloud_ssh_client_connection(self):
hostname = overcloud.find_overcloud_node().name
ssh_client = overcloud.overcloud_ssh_client(hostname=hostname)
ssh_client.connect()
@overcloud.skip_if_missing_overcloud
class OvercloudPacemakerTest(testtools.TestCase):
"""
Assert that all pacemaker resources are in
healthy state
"""
def test_get_pacemaker_resource_table(self):
resource_table = pacemaker.get_pcs_resources_table()
self.assertIsInstance(resource_table, pd.DataFrame)
def test_pacemaker_resources_health(self):
pcs_health = pacemaker.PacemakerResourcesStatus()
self.assertTrue(pcs_health.all_healthy)
@overcloud.skip_if_missing_overcloud
class OvercloudServicesTest(testtools.TestCase):
"""
Assert that a subset of overcloud services are in running state
across the overcloud nodes
"""
def test_get_services_resource_table(self):
oss = services.OvercloudServicesStatus()
self.assertIsInstance(oss.oc_services_df,
pd.DataFrame)
def test_overcloud_services(self):
oss = services.OvercloudServicesStatus()
self.assertTrue(oss.basic_overcloud_services_running)
def test_get_overcloud_nodes_running_pcs_resource(self):
nodes_list = pacemaker.get_overcloud_nodes_running_pcs_resource(
resource_type='(ocf::heartbeat:rabbitmq-cluster):',
resource_state='Started')
self.assertIsInstance(nodes_list, list)
@overcloud.skip_if_missing_overcloud
class OvercloudProcessesTest(testtools.TestCase):
"""
Assert that a subset of overcloud processes are in running state
across the overcloud nodes
"""
def test_get_processes_resource_table(self):
ops = processes.OvercloudProcessesStatus()
self.assertIsInstance(ops.oc_procs_df,
pd.DataFrame)
def test_overcloud_processes(self):
ops = processes.OvercloudProcessesStatus()
self.assertTrue(ops.basic_overcloud_processes_running)
```
#### File: scenario/neutron/test_floating_ip.py
```python
from __future__ import absolute_import
import testtools
import tobiko
from tobiko import config
from tobiko.shell import ping
from tobiko.shell import sh
from tobiko.openstack import neutron
from tobiko.openstack import stacks
CONF = config.CONF
class FloatingIPTest(testtools.TestCase):
"""Tests connectivity via floating IPs"""
#: Resources stack with floating IP and Nova server
stack = tobiko.required_setup_fixture(stacks.CirrosServerStackFixture)
def test_stack_create_complete(self):
self.stack.key_pair_stack.wait_for_create_complete()
self.stack.network_stack.wait_for_create_complete()
self.stack.wait_for_create_complete()
def test_ssh(self):
"""Test SSH connectivity to floating IP address"""
hostname = sh.get_hostname(ssh_client=self.stack.ssh_client)
self.assertEqual(self.stack.server_name.lower(), hostname)
def test_ping(self):
"""Test ICMP connectivity to floating IP address"""
ping.ping_until_received(
self.stack.floating_ip_address).assert_replied()
# --- test port-security extension ---------------------------------------
@neutron.skip_if_missing_networking_extensions('port-security')
def test_port_security_enabled_port_attribute(self):
"""Test port security enabled port attribute"""
self.assertEqual(self.expected_port_security_enabled,
self.observed_port_security_enabled)
@property
def expected_port_security_enabled(self):
"""Expected port security enabled value"""
return self.stack.port_security_enabled
@property
def observed_port_security_enabled(self):
"""Actual MTU value for internal network"""
return self.stack.outputs.port_security_enabled
# --- test security_group extension --------------------------------------
@neutron.skip_if_missing_networking_extensions('security-group')
def test_security_groups_port_attribute(self):
"""Test security groups port attribute"""
self.assertEqual(self.expected_security_groups,
self.observed_security_groups)
@property
def expected_security_groups(self):
"""Expected port security groups"""
return set(self.stack.security_groups)
@property
def observed_security_groups(self):
"""Actual port security group"""
return set(self.stack.outputs.security_groups)
# --- test net-mtu and net-mtu-writable extensions ------------------------
@ping.skip_if_missing_fragment_ping_option
@neutron.skip_if_missing_networking_extensions('net-mtu')
def test_ping_with_net_mtu(self):
"""Test connectivity to floating IP address with MTU sized packets"""
# Wait until it can reach remote port with maximum-sized packets
ping.ping(self.stack.floating_ip_address,
until=ping.RECEIVED,
packet_size=self.observed_net_mtu,
fragmentation=False).assert_replied()
# Verify it can't reach remote port with over-sized packets
ping.ping(self.stack.floating_ip_address,
packet_size=self.observed_net_mtu + 1,
fragmentation=False,
count=5,
check=False).assert_not_replied()
@property
def observed_net_mtu(self):
"""Actual MTU value for internal network"""
return self.stack.network_stack.outputs.mtu
# --- test l3_ha extension ------------------------------------------------
@neutron.skip_if_missing_networking_extensions('l3-ha')
def test_l3_ha(self):
"""Test 'mtu' network attribute"""
gateway = self.stack.network_stack.gateway_details
self.assertEqual(self.stack.network_stack.ha,
gateway['ha'])
# --- Test with port security enabled -----------------------------------------
@neutron.skip_if_missing_networking_extensions('port-security',
'security-group')
class FloatingIPWithPortSecurityFixture(stacks.CirrosServerStackFixture):
"""Heat stack for testing a floating IP instance with port security"""
#: Resources stack with security group to allow ping Nova servers
security_groups_stack = tobiko.required_setup_fixture(
stacks.SecurityGroupsFixture)
#: Enable port security on internal network
port_security_enabled = True
@property
def security_groups(self):
"""List with ICMP security group"""
return [self.security_groups_stack.ssh_security_group_id]
@neutron.skip_if_missing_networking_extensions('port-security',
'security-group')
class FloatingIPWithPortSecurityTest(FloatingIPTest):
"""Tests connectivity via floating IPs with port security"""
#: Resources stack with floating IP and Nova server with port security
stack = tobiko.required_setup_fixture(FloatingIPWithPortSecurityFixture)
def test_ping(self):
"""Test connectivity to floating IP address"""
# Wait for server instance to get ready by logging in
self.stack.ssh_client.connect()
# Check can't reach secured port via floating IP
ping.ping(self.stack.floating_ip_address,
count=5,
check=False).assert_not_replied()
@ping.skip_if_missing_fragment_ping_option
@neutron.skip_if_missing_networking_extensions('net-mtu')
def test_ping_with_net_mtu(self):
"""Test connectivity to floating IP address"""
# Wait for server instance to get ready by logging in
tobiko.setup_fixture(self.stack.ssh_client)
self.stack.ssh_client.connect()
# Verify it can't reach secured port with maximum-sized packets
ping.ping(self.stack.floating_ip_address,
packet_size=self.observed_net_mtu,
fragmentation=False,
count=5,
check=False).assert_not_replied()
# Verify it can't reach secured port with over-sized packets
ping.ping(self.stack.floating_ip_address,
packet_size=self.observed_net_mtu + 1,
fragmentation=False,
count=5,
check=False).assert_not_replied()
# --- Test with ICMP security group -------------------------------------------
class FloatingIPWithICMPSecurityGroupFixture(
FloatingIPWithPortSecurityFixture):
"""Heat stack for testing a floating IP instance with security groups"""
@property
def security_groups(self):
"""List with ICMP security group"""
return [self.security_groups_stack.ssh_security_group_id,
self.security_groups_stack.icmp_security_group_id]
@neutron.skip_if_missing_networking_extensions('port-security',
'security-group')
class FloatingIPWithICMPSecurityGroupTest(FloatingIPTest):
"""Tests connectivity via floating IP with security ICMP security group"""
#: Resources stack with floating IP and Nova server to ping
stack = tobiko.required_setup_fixture(
FloatingIPWithICMPSecurityGroupFixture)
# --- Test net-mtu-write extension --------------------------------------------
@neutron.skip_if_missing_networking_extensions('net-mtu-writable')
class FloatingIPWithNetMtuWritableFixture(stacks.CirrosServerStackFixture):
"""Heat stack for testing floating IP with a custom MTU network value"""
#: Heat stack for creating internal network with custom MTU value
network_stack = tobiko.required_setup_fixture(
stacks.NetworkWithNetMtuWriteStackFixture)
@neutron.skip_if_missing_networking_extensions('net-mtu-writable')
class FloatingIpWithMtuWritableTest(FloatingIPTest):
"""Tests connectivity via floating IP with a custom MTU value"""
#: Resources stack with floating IP and Nova server
stack = tobiko.required_setup_fixture(FloatingIPWithNetMtuWritableFixture)
def test_net_mtu_write(self):
"""Test 'mtu' network attribute"""
self.assertEqual(self.expected_net_mtu, self.observed_net_mtu)
@property
def expected_net_mtu(self):
"""Expected MTU value for internal network"""
return self.stack.network_stack.custom_mtu_size
# --- Test la-h3 extension ----------------------------------------------------
@neutron.skip_if_missing_networking_extensions('l3-ha')
@neutron.skip_if_missing_networking_agents(binary='neutron-l3-agent',
count=2)
class FloatingIpWithL3HATest(FloatingIPTest):
#: Resources stack with floating IP and Nova server
stack = tobiko.required_setup_fixture(stacks.L3haServerStackFixture)
```
#### File: scenario/neutron/test_port.py
```python
from __future__ import absolute_import
import netaddr
import testtools
import tobiko
from tobiko.shell import ping
from tobiko.shell import ip
from tobiko.openstack import neutron
from tobiko.openstack import stacks
class PortTest(testtools.TestCase):
"""Test Neutron ports"""
#: Resources stack with Nova server to send messages to
stack = tobiko.required_setup_fixture(stacks.CirrosServerStackFixture)
def test_port_ips(self):
server_ips = ip.list_ip_addresses(scope='global',
ssh_client=self.stack.ssh_client)
port_ips = neutron.list_port_ip_addresses(port=self.stack.port_details)
self.assertFalse(set(port_ips) - set(server_ips))
def test_port_network(self):
self.assertEqual(self.stack.network_stack.network_id,
self.stack.port_details['network_id'])
def test_port_subnets(self):
port_subnets = [fixed_ip['subnet_id']
for fixed_ip in self.stack.port_details['fixed_ips']]
network_subnets = self.stack.network_stack.network_details['subnets']
self.assertEqual(set(network_subnets), set(port_subnets))
def test_ping_subnet_gateways(self):
network_id = self.stack.network_stack.network_id
subnets = neutron.list_subnets(network_id=network_id)
gateway_ips = [netaddr.IPAddress(subnet['gateway_ip'])
for subnet in subnets]
ping.assert_reachable_hosts(gateway_ips,
ssh_client=self.stack.ssh_client)
def test_ping_port(self, network_id=None, device_id=None):
network_id = network_id or self.stack.network_stack.network_id
device_id = device_id or self.stack.server_id
ports = neutron.list_ports(network_id=network_id,
device_id=device_id)
port_ips = set()
for port in ports:
self.assertEqual(network_id, port['network_id'])
self.assertEqual(device_id, port['device_id'])
port_ips.update(neutron.list_port_ip_addresses(port=port))
ping.assert_reachable_hosts(port_ips,
ssh_client=self.stack.ssh_client)
def test_ping_inner_gateway_ip(self):
if not self.stack.network_stack.has_gateway:
self.skip('Server network has no gateway router')
self.test_ping_port(device_id=self.stack.network_stack.gateway_id)
# --- Test la-h3 extension ----------------------------------------------------
@neutron.skip_if_missing_networking_extensions('l3-ha')
@neutron.skip_if_missing_networking_agents(binary='neutron-l3-agent',
count=2)
class L3HAPortTest(PortTest):
#: Resources stack with floating IP and Nova server
stack = tobiko.required_setup_fixture(stacks.L3haServerStackFixture)
@neutron.skip_if_missing_networking_extensions('l3-ha')
@neutron.skip_if_missing_networking_agents(binary='neutron-l3-agent',
count=2)
class CentosServerL3HAPortTestWith(PortTest):
#: Resources stack with floating IP and Nova server
stack = tobiko.required_setup_fixture(stacks.L3haCentosServerStackFixture)
@neutron.skip_if_missing_networking_extensions('l3-ha')
@neutron.skip_if_missing_networking_agents(binary='neutron-l3-agent',
count=2)
class UbuntuServerL3HAPortTestWith(PortTest):
#: Resources stack with floating IP and Nova server
stack = tobiko.required_setup_fixture(stacks.L3haUbuntuServerStackFixture)
```
#### File: scenario/octavia/test_traffic.py
```python
from __future__ import absolute_import
import time
from oslo_log import log
import tobiko
from tobiko import config
from tobiko.openstack import keystone
from tobiko.openstack import octavia
from tobiko.openstack import stacks
from tobiko.shell import ssh
from tobiko.shell import sh
from tobiko.tests import base
LOG = log.getLogger(__name__)
CONF = config.CONF
CURL_OPTIONS = "-f --connect-timeout 2 -g"
class OctaviaOtherServerStackFixture(
stacks.OctaviaServerStackFixture):
pass
class OctaviaOtherMemberServerStackFixture(
stacks.OctaviaMemberServerStackFixture):
server_stack = tobiko.required_setup_fixture(
OctaviaOtherServerStackFixture)
class RequestException(tobiko.TobikoException):
message = ("Error while sending request to server "
"(command was '{command}'): {error}")
class TimeoutException(tobiko.TobikoException):
message = "Timeout exception: {reason}"
@keystone.skip_if_missing_service(name='octavia')
class OctaviaBasicTrafficScenarioTest(base.TobikoTest):
"""Octavia traffic scenario test.
Create a load balancer with 2 members that run a server application,
Create a client that is connected to the load balancer VIP port,
Generate network traffic from the client to the load balanacer.
"""
loadbalancer_stack = tobiko.required_setup_fixture(
stacks.OctaviaLoadbalancerStackFixture)
listener_stack = tobiko.required_setup_fixture(
stacks.OctaviaListenerStackFixture)
member1_stack = tobiko.required_setup_fixture(
stacks.OctaviaMemberServerStackFixture)
member2_stack = tobiko.required_setup_fixture(
OctaviaOtherMemberServerStackFixture)
client_stack = tobiko.required_setup_fixture(
stacks.OctaviaClientServerStackFixture)
members_count = 2
def setUp(self):
super(OctaviaBasicTrafficScenarioTest, self).setUp()
# Wait for members
self._check_member(self.member1_stack)
self._check_member(self.member2_stack)
# Check if load balancer is functional
self._check_loadbalancer()
def _request(self, client_stack, server_ip_address, protocol, server_port):
"""Perform a request on a server.
Returns the response in case of success, throws an RequestException
otherwise.
"""
if ':' in server_ip_address:
# Add square brackets around IPv6 address to please curl
server_ip_address = "[{}]".format(server_ip_address)
cmd = "curl {} {}://{}:{}/id".format(
CURL_OPTIONS, protocol.lower(), server_ip_address, server_port)
ssh_client = ssh.ssh_client(
client_stack.floating_ip_address,
username=client_stack.image_fixture.username)
ret = sh.ssh_execute(ssh_client, cmd)
if ret.exit_status != 0:
raise RequestException(command=cmd,
error=ret.stderr)
return ret.stdout
def _wait_resource_operating_status(self, resource_type, operating_status,
resource_get, *args):
start = time.time()
while time.time() - start < CONF.tobiko.octavia.check_timeout:
res = resource_get(*args)
if res['operating_status'] == operating_status:
return
time.sleep(CONF.tobiko.octavia.check_interval)
raise TimeoutException(
reason=("Cannot get operating_status '{}' from {} {} "
"within the timeout period.".format(
operating_status, resource_type, args)))
def _wait_lb_operating_status(self, lb_id, operating_status):
LOG.debug("Wait for loadbalancer {} to have '{}' "
"operating_status".format(lb_id, operating_status))
self._wait_resource_operating_status("loadbalancer",
operating_status,
octavia.get_loadbalancer,
lb_id)
def _wait_for_request_data(self, client_stack, server_ip_address,
server_protocol, server_port):
"""Wait until a request on a server succeeds
Throws a TimeoutException after CONF.tobiko.octavia.check_timeout
if the server doesn't reply.
"""
start = time.time()
while time.time() - start < CONF.tobiko.octavia.check_timeout:
try:
ret = self._request(client_stack, server_ip_address,
server_protocol, server_port)
except Exception as e:
LOG.warning("Received exception {} while performing a "
"request".format(e))
else:
return ret
time.sleep(CONF.tobiko.octavia.check_interval)
raise TimeoutException(
reason=("Cannot get data from {} on port {} with "
"protocol {} within the timeout period.".format(
server_ip_address, server_port,
server_protocol)))
def _check_loadbalancer(self):
"""Wait until the load balancer is functional."""
# Check load balancer status
loadbalancer_id = self.loadbalancer_stack.loadbalancer_id
self._wait_lb_operating_status(loadbalancer_id, 'ONLINE')
loadbalancer_vip = self.loadbalancer_stack.loadbalancer_vip
loadbalancer_port = self.listener_stack.lb_port
loadbalancer_protocol = self.listener_stack.lb_protocol
self._wait_for_request_data(self.client_stack,
loadbalancer_vip,
loadbalancer_protocol,
loadbalancer_port)
def _check_member(self, member_stack):
"""Wait until a member server is functional."""
member_ip = member_stack.server_stack.floating_ip_address
member_port = member_stack.application_port
member_protocol = self.listener_stack.pool_protocol
self._wait_for_request_data(self.client_stack, member_ip,
member_protocol, member_port)
def _check_members_balanced(self):
"""Check if traffic is properly balanced between members."""
replies = {}
loadbalancer_vip = self.loadbalancer_stack.loadbalancer_vip
loadbalancer_port = self.listener_stack.lb_port
loadbalancer_protocol = self.listener_stack.lb_protocol
for _ in range(20):
content = self._request(self.client_stack, loadbalancer_vip,
loadbalancer_protocol, loadbalancer_port)
if content not in replies:
replies[content] = 0
replies[content] += 1
# wait one second (required when using cirros' nc fake webserver)
time.sleep(1)
LOG.debug("Replies from load balancer: {}".format(
replies))
# assert that 'members_count' servers replied
self.assertEqual(len(replies), self.members_count)
if self.listener_stack.lb_algorithm == 'ROUND_ROBIN':
# assert that requests have been fairly dispatched (each server
# received the same number of requests)
self.assertEqual(len(set(replies.values())), 1)
def test_traffic(self):
self._check_members_balanced()
```
#### File: openstack/keystone/test_clouds_file.py
```python
from __future__ import absolute_import
import json
import os
import tempfile
import typing # noqa
import yaml
import tobiko
from tobiko.openstack import keystone
from tobiko.openstack.keystone import _clouds_file
from tobiko.tests.unit import openstack
from tobiko.tests.unit.openstack.keystone import test_credentials
def make_clouds_content(cloud_name, api_version=None, auth=None):
content = {}
if api_version is not None:
content['identity_api_version'] = api_version
if auth is not None:
content['auth'] = auth
return {'clouds': {cloud_name: content}}
class CloudsFileFixture(tobiko.SharedFixture):
cloud_name = None # type: str
api_version = None # type: str
auth = None # type: typing.Dict[str, typing.Any]
clouds_content = None
clouds_file = None
suffix = '.yaml'
create_file = True
def __init__(self, cloud_name=None, api_version=None, auth=None,
clouds_file=None, suffix=None, create_file=None,
clouds_content=None):
super(CloudsFileFixture, self).__init__()
if cloud_name is not None:
self.cloud_name = cloud_name
if api_version is not None:
self.api_version = api_version
if auth is not None:
self.auth = auth
if clouds_file is not None:
self.clouds_file = clouds_file
if suffix is not None:
self.suffix = suffix
if create_file is not None:
self.create_file = create_file
if clouds_content is not None:
self.clouds_content = clouds_content
def setup_fixture(self):
clouds_content = self.clouds_content
if clouds_content is None:
self.clouds_content = clouds_content = make_clouds_content(
cloud_name=self.cloud_name, api_version=self.api_version,
auth=self.auth)
if self.create_file:
clouds_file = self.clouds_file
if clouds_file is None:
fd, clouds_file = tempfile.mkstemp(suffix=self.suffix)
self.addCleanup(os.remove, clouds_file)
self.clouds_file = clouds_file
clouds_stream = os.fdopen(fd, 'wt')
else:
clouds_stream = os.open(clouds_file, 'wt')
try:
if self.suffix in _clouds_file.JSON_SUFFIXES:
json.dump(clouds_content, clouds_stream)
elif self.suffix in _clouds_file.YAML_SUFFIXES:
yaml.safe_dump(clouds_content, clouds_stream)
finally:
clouds_stream.close()
class V2CloudsFileFixture(CloudsFileFixture):
cloud_name = 'V2-TEST_CLOUD'
auth = test_credentials.V2_PARAMS
class V3CloudsFileFixture(CloudsFileFixture):
cloud_name = 'V3-TEST_CLOUD'
auth = test_credentials.V3_PARAMS
class CloudsFileKeystoneCredentialsFixtureTest(openstack.OpenstackTest):
config = tobiko.required_setup_fixture(
_clouds_file.DefaultCloudsFileConfig)
def test_init(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture()
self.assertEqual(self.config.cloud_name, fixture.cloud_name)
self.assertIsNone(fixture.clouds_content)
self.assertIsNone(fixture.clouds_file)
self.assertEqual(self.config.clouds_files, fixture.clouds_files)
def test_init_with_cloud_name(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name')
self.assertEqual('cloud-name', fixture.cloud_name)
def test_init_with_clouds_content(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_content={})
self.assertEqual({}, fixture.clouds_content)
def test_init_with_clouds_file(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_file='cloud-file')
self.assertEqual('cloud-file', fixture.clouds_file)
def test_init_with_clouds_files(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_files=['a', 'b', 'd'])
self.assertEqual(['a', 'b', 'd'], fixture.clouds_files)
def test_setup_from_default_clouds_files(self):
file_fixture = self.useFixture(V3CloudsFileFixture())
self.patch(self.config, 'clouds_files',
['/a', file_fixture.clouds_file, '/c'])
credentials_fixture = self.useFixture(
keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name=file_fixture.cloud_name))
self.assertEqual(file_fixture.clouds_content,
credentials_fixture.clouds_content)
self.assertEqual(test_credentials.V3_PARAMS,
credentials_fixture.credentials.to_dict())
def test_setup_from_json(self):
file_fixture = self.useFixture(V3CloudsFileFixture(suffix='.json'))
credentials_fixture = self.useFixture(
keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name=file_fixture.cloud_name,
clouds_file=file_fixture.clouds_file))
self.assertEqual(file_fixture.clouds_content,
credentials_fixture.clouds_content)
self.assertEqual(test_credentials.V3_PARAMS,
credentials_fixture.credentials.to_dict())
def test_setup_from_yaml(self):
file_fixture = self.useFixture(V3CloudsFileFixture(suffix='.yaml'))
credentials_fixture = self.useFixture(
keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name=file_fixture.cloud_name,
clouds_file=file_fixture.clouds_file))
self.assertEqual(file_fixture.clouds_content,
credentials_fixture.clouds_content)
self.assertEqual(test_credentials.V3_PARAMS,
credentials_fixture.credentials.to_dict())
def test_setup_from_yml(self):
file_fixture = self.useFixture(V3CloudsFileFixture(suffix='.yml'))
credentials_fixture = self.useFixture(
keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name=file_fixture.cloud_name,
clouds_file=file_fixture.clouds_file))
self.assertEqual(file_fixture.clouds_content,
credentials_fixture.clouds_content)
self.assertEqual(test_credentials.V3_PARAMS,
credentials_fixture.credentials.to_dict())
def test_setup_v2_credentials(self):
file_fixture = self.useFixture(V2CloudsFileFixture())
credentials_fixture = self.useFixture(
keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name=file_fixture.cloud_name,
clouds_file=file_fixture.clouds_file))
self.assertEqual(file_fixture.clouds_content,
credentials_fixture.clouds_content)
self.assertEqual(test_credentials.V2_PARAMS,
credentials_fixture.credentials.to_dict())
def test_setup_with_cloud_name(self):
file_fixture = self.useFixture(V3CloudsFileFixture())
credentials_fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name',
clouds_file=file_fixture.clouds_file)
ex = self.assertRaises(ValueError, tobiko.setup_fixture,
credentials_fixture)
self.assertEqual("No such cloud with name 'cloud-name' in file " +
repr(file_fixture.clouds_file), str(ex))
def test_setup_with_cloud_name_from_env(self):
self.patch(self.config, 'cloud_name', None)
file_fixture = self.useFixture(V2CloudsFileFixture())
self.patch(os, 'environ', {'OS_CLOUD': file_fixture.cloud_name})
credentials_fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_file=file_fixture.clouds_file)
self.assertIsNone(credentials_fixture.cloud_name)
tobiko.setup_fixture(credentials_fixture)
self.assertEqual(file_fixture.cloud_name,
credentials_fixture.cloud_name)
def test_setup_with_empty_cloud_name(self):
file_fixture = self.useFixture(V2CloudsFileFixture())
credentials_fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_file=file_fixture.clouds_file,
cloud_name='')
self.assertIsNone(credentials_fixture.credentials)
self.assertEqual('', credentials_fixture.cloud_name)
tobiko.setup_fixture(credentials_fixture)
self.assertIsNone(credentials_fixture.credentials)
self.assertEqual('', credentials_fixture.cloud_name)
def test_setup_with_empty_cloud_name_from_env(self):
self.patch(self.config, 'cloud_name', None)
file_fixture = self.useFixture(V2CloudsFileFixture())
self.patch(os, 'environ', {'OS_CLOUD': ''})
credentials_fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_file=file_fixture.clouds_file)
self.assertIsNone(credentials_fixture.credentials)
self.assertIsNone(credentials_fixture.cloud_name)
tobiko.setup_fixture(credentials_fixture)
self.assertIsNone(credentials_fixture.credentials)
self.assertIsNone(credentials_fixture.cloud_name)
def test_setup_with_no_cloud_name(self):
self.patch(self.config, 'cloud_name', None)
file_fixture = self.useFixture(V2CloudsFileFixture())
credentials_fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_file=file_fixture.clouds_file)
self.assertIsNone(credentials_fixture.credentials)
self.assertIsNone(credentials_fixture.cloud_name)
tobiko.setup_fixture(credentials_fixture)
self.assertIsNone(credentials_fixture.credentials)
self.assertIsNone(credentials_fixture.cloud_name)
def test_setup_with_no_clouds_section(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name', clouds_content={'other_data': None},
clouds_file='clouds-file')
ex = self.assertRaises(ValueError, tobiko.setup_fixture, fixture)
self.assertEqual('cloud-name', fixture.cloud_name)
self.assertEqual({'other_data': None}, fixture.clouds_content)
self.assertEqual("'clouds' section not found in clouds file "
"'clouds-file'", str(ex))
def test_setup_with_empty_clouds_content(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name', clouds_content={})
ex = self.assertRaises(ValueError, tobiko.setup_fixture, fixture)
self.assertEqual('cloud-name', fixture.cloud_name)
self.assertEqual({}, fixture.clouds_content)
self.assertEqual('Invalid clouds file content: {}', str(ex))
def test_setup_with_no_auth(self):
clouds_content = make_clouds_content('cloud-name')
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name',
clouds_content=clouds_content,
clouds_file='cloud-file')
ex = self.assertRaises(ValueError, tobiko.setup_fixture, fixture)
self.assertEqual('cloud-name', fixture.cloud_name)
self.assertEqual(
"No such 'auth' section in cloud file 'cloud-file' for cloud "
"name 'cloud-name'", str(ex))
def test_setup_with_no_auth_url(self):
clouds_content = make_clouds_content('cloud-name', auth={})
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name',
clouds_content=clouds_content,
clouds_file='cloud-file')
ex = self.assertRaises(ValueError, tobiko.setup_fixture, fixture)
self.assertEqual('cloud-name', fixture.cloud_name)
self.assertEqual(
"No such 'auth_url' in file 'cloud-file' for cloud name "
"'cloud-name'", str(ex))
def test_setup_without_clouds_file(self):
self.patch(self.config, 'clouds_files', ['/a', '/b', '/c'])
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
cloud_name='cloud-name')
ex = self.assertRaises(_clouds_file.CloudsFileNotFoundError,
tobiko.setup_fixture, fixture)
self.assertEqual('cloud-name', fixture.cloud_name)
self.assertEqual("No such clouds file(s): /a, /b, /c", str(ex))
def test_setup_with_non_existing_clouds_file(self):
fixture = keystone.CloudsFileKeystoneCredentialsFixture(
clouds_file='/a.yaml',
cloud_name='cloud-name')
ex = self.assertRaises(_clouds_file.CloudsFileNotFoundError,
tobiko.setup_fixture, fixture)
self.assertEqual("No such clouds file(s): /a.yaml", str(ex))
```
#### File: openstack/neutron/test_client.py
```python
from __future__ import absolute_import
from neutronclient.v2_0 import client as neutronclient
from tobiko.openstack import keystone
from tobiko.openstack import neutron
from tobiko.tests.unit import openstack
from tobiko.tests.unit.openstack import test_client
class NeutronClientFixtureTest(test_client.OpenstackClientFixtureTest):
def create_client(self, session=None):
return neutron.NeutronClientFixture(session=session)
class GetNeutronClientTest(openstack.OpenstackTest):
def test_get_neutron_client(self, session=None, shared=True):
client1 = neutron.get_neutron_client(session=session, shared=shared)
client2 = neutron.get_neutron_client(session=session, shared=shared)
if shared:
self.assertIs(client1, client2)
else:
self.assertIsNot(client1, client2)
self.assertIsInstance(client1, neutronclient.Client)
self.assertIsInstance(client2, neutronclient.Client)
def test_get_neutron_client_with_not_shared(self):
self.test_get_neutron_client(shared=False)
def test_get_neutron_client_with_session(self):
session = keystone.get_keystone_session()
self.test_get_neutron_client(session=session)
class NeutronClientTest(openstack.OpenstackTest):
def test_neutron_client_with_none(self):
default_client = neutron.get_neutron_client()
client = neutron.neutron_client(None)
self.assertIsInstance(client, neutronclient.Client)
self.assertIs(default_client, client)
def test_neutron_client_with_client(self):
default_client = neutron.get_neutron_client()
client = neutron.neutron_client(default_client)
self.assertIsInstance(client, neutronclient.Client)
self.assertIs(default_client, client)
def test_neutron_client_with_fixture(self):
fixture = neutron.NeutronClientFixture()
client = neutron.neutron_client(fixture)
self.assertIsInstance(client, neutronclient.Client)
self.assertIs(client, fixture.client)
```
#### File: tests/unit/test_config.py
```python
from __future__ import absolute_import
import os
import mock
from tobiko.tests import unit
from tobiko import config
CONF = config.CONF
class HttpProxyFixtureTest(unit.TobikoUnitTest):
MY_HTTP_PROXY = 'http://my-server:8080'
MY_NO_PROXY = '127.0.0.1'
def setUp(self):
super(HttpProxyFixtureTest, self).setUp()
self.patch(os, 'environ', {})
self.patch(CONF.tobiko, 'http',
http_proxy=None, https_proxy=None, no_proxy=None)
def test_init(self):
fixture = config.HttpProxyFixture()
self.assertIsNone(fixture.http_proxy)
self.assertEqual({}, os.environ)
def test_setup(self):
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertIsNone(fixture.http_proxy)
self.assertEqual({}, os.environ)
def test_setup_from_environ_http_proxy(self):
os.environ['http_proxy'] = self.MY_HTTP_PROXY
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertEqual({'http_proxy': self.MY_HTTP_PROXY}, os.environ)
self.assertEqual(self.MY_HTTP_PROXY, fixture.http_proxy)
def test_setup_from_environ_https_proxy(self):
os.environ['https_proxy'] = self.MY_HTTP_PROXY
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertEqual({'https_proxy': self.MY_HTTP_PROXY}, os.environ)
self.assertEqual(self.MY_HTTP_PROXY, fixture.https_proxy)
def test_setup_from_environ_no_proxy(self):
os.environ['no_proxy'] = self.MY_NO_PROXY
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertEqual({'no_proxy': self.MY_NO_PROXY}, os.environ)
self.assertEqual(self.MY_NO_PROXY, fixture.no_proxy)
def test_setup_from_tobiko_conf_http_proxy(self):
self.patch(CONF.tobiko.http, 'http_proxy', self.MY_HTTP_PROXY)
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertEqual(self.MY_HTTP_PROXY, fixture.http_proxy)
self.assertEqual({'http_proxy': self.MY_HTTP_PROXY}, os.environ)
def test_setup_from_tobiko_conf_https_proxy(self):
self.patch(CONF.tobiko.http, 'https_proxy', self.MY_HTTP_PROXY)
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertEqual(self.MY_HTTP_PROXY, fixture.https_proxy)
self.assertEqual({'https_proxy': self.MY_HTTP_PROXY}, os.environ)
def test_setup_from_tobiko_conf_no_proxy(self):
self.patch(CONF.tobiko.http, 'http_proxy', self.MY_HTTP_PROXY)
self.patch(CONF.tobiko.http, 'no_proxy', self.MY_NO_PROXY)
fixture = config.HttpProxyFixture()
fixture.setUp()
self.assertEqual(self.MY_NO_PROXY, fixture.no_proxy)
self.assertEqual(self.MY_HTTP_PROXY, fixture.http_proxy)
self.assertEqual({'no_proxy': self.MY_NO_PROXY,
'http_proxy': self.MY_HTTP_PROXY}, os.environ)
def test_get_bool_env(self):
env_option = "TEST_OPTION"
true_values = ['True', 'true', 'TRUE', 'TrUe', '1']
false_values = ['False', 'false', 'FALSE', 'FaLsE', '0']
invalid_values = [None, 'something else', '']
for value in true_values:
with mock.patch.dict('os.environ', {env_option: value}):
self.assertIs(True, config.get_bool_env(env_option))
for value in false_values:
with mock.patch.dict('os.environ', {env_option: value}):
self.assertIs(False, config.get_bool_env(env_option))
for value in invalid_values:
with mock.patch.dict('os.environ', {env_option: value}):
self.assertIsNone(config.get_bool_env(env_option))
```
#### File: tests/unit/test_fixture.py
```python
from __future__ import absolute_import
import os
import sys
import fixtures
import mock
import testtools
import tobiko
from tobiko.tests import unit
def canonical_name(cls):
return __name__ + '.' + cls.__name__
class MyBaseFixture(tobiko.SharedFixture):
def __init__(self):
super(MyBaseFixture, self).__init__()
self.setup_fixture = mock.Mock(
specs=tobiko.SharedFixture.setup_fixture)
self.cleanup_fixture = mock.Mock(
specs=tobiko.SharedFixture.cleanup_fixture)
class MySkyppingFixture(tobiko.SharedFixture):
def setup_fixture(self):
tobiko.skip('some-reason')
def cleanup_fixture(self):
tobiko.skip('some-reason')
class MyFixture(MyBaseFixture):
pass
class GetFixtureTest(unit.TobikoUnitTest):
def test_by_name(self):
self._test_get_fixture(canonical_name(MyFixture))
def test_by_type(self):
self._test_get_fixture(MyFixture)
def test_by_instance(self):
self._test_get_fixture(MyFixture())
def _test_get_fixture(self, obj):
fixture = tobiko.get_fixture(obj)
self.assertIsInstance(fixture, MyFixture)
self.assertIs(fixture, tobiko.get_fixture(obj))
if isinstance(obj, fixtures.Fixture):
self.assertIs(obj, fixture)
else:
self.assertIs(fixture, tobiko.get_fixture(
canonical_name(MyFixture)))
fixture.setup_fixture.assert_not_called()
fixture.cleanup_fixture.assert_not_called()
class GetFixtureNameTest(unit.TobikoUnitTest):
def test_with_instance(self):
fixture = MyFixture()
result = tobiko.get_fixture_name(fixture)
self.assertEqual(canonical_name(MyFixture), result)
def test_with_other_type(self):
obj = object()
ex = self.assertRaises(TypeError, tobiko.get_fixture_name, obj)
self.assertEqual('Object {obj!r} is not a fixture.'.format(obj=obj),
str(ex))
class GetFixtureClassTest(unit.TobikoUnitTest):
def test_with_name(self):
result = tobiko.get_fixture_class(canonical_name(MyFixture))
self.assertIs(MyFixture, result)
def test_with_type(self):
result = tobiko.get_fixture_class(MyFixture)
self.assertIs(MyFixture, result)
def test_with_instance(self):
result = tobiko.get_fixture_class(MyFixture())
self.assertIs(MyFixture, result)
class GetFixtureDirTest(unit.TobikoUnitTest):
expected_dir = os.path.dirname(__file__)
def test_with_name(self):
actual_dir = tobiko.get_fixture_dir(canonical_name(MyFixture))
self.assertEqual(self.expected_dir, actual_dir)
def test_with_type(self):
actual_dir = tobiko.get_fixture_dir(MyFixture)
self.assertEqual(self.expected_dir, actual_dir)
def test_with_instance(self):
actual_dir = tobiko.get_fixture_dir(MyFixture())
self.assertEqual(self.expected_dir, actual_dir)
class RemoveFixtureTest(unit.TobikoUnitTest):
def test_with_name(self):
self._test_remove_fixture(canonical_name(MyFixture))
def test_with_type(self):
self._test_remove_fixture(MyFixture)
def _test_remove_fixture(self, obj):
fixture = tobiko.get_fixture(obj)
result = tobiko.remove_fixture(obj)
self.assertIs(fixture, result)
self.assertIsNot(fixture, tobiko.get_fixture(obj))
fixture.setup_fixture.assert_not_called()
fixture.cleanup_fixture.assert_not_called()
class SetupFixtureTest(unit.TobikoUnitTest):
def test_with_name(self):
self._test_setup_fixture(canonical_name(MyFixture))
def test_with_type(self):
self._test_setup_fixture(MyFixture)
def test_with_instance(self):
self._test_setup_fixture(MyFixture2())
def _test_setup_fixture(self, obj):
result = tobiko.setup_fixture(obj)
self.assertIs(tobiko.get_fixture(obj), result)
result.setup_fixture.assert_called_once_with()
result.cleanup_fixture.assert_not_called()
class ResetFixtureTest(unit.TobikoUnitTest):
def test_with_name(self):
self._test_reset_fixture(canonical_name(MyFixture))
def test_with_type(self):
self._test_reset_fixture(MyFixture)
def test_with_instance(self):
self._test_reset_fixture(MyFixture2())
def test_after_setup(self):
fixture = MyFixture2()
fixture.setUp()
fixture.setup_fixture.reset_mock()
self._test_reset_fixture(fixture)
def test_after_cleanup(self):
fixture = MyFixture2()
fixture.cleanUp()
self._test_reset_fixture(fixture)
def _test_reset_fixture(self, obj, should_clean=True):
result = tobiko.reset_fixture(obj)
self.assertIs(tobiko.get_fixture(obj), result)
result.setup_fixture.assert_called_once_with()
if should_clean:
result.cleanup_fixture.assert_called_once_with()
else:
result.cleanup_fixture.assert_not_called()
class FailingFixture(tobiko.SharedFixture):
def setup_fixture(self):
raise RuntimeError('raised by setup_fixture')
def cleanup_fixture(self):
raise RuntimeError('raised by cleanup_fixture')
class FailingSetupFixtureWhenFailingTest(unit.TobikoUnitTest):
def test_with_name(self):
self._test_setup_fixture(canonical_name(FailingFixture))
def test_with_type(self):
self._test_setup_fixture(FailingFixture)
def test_with_instance(self):
self._test_setup_fixture(FailingFixture())
def _test_setup_fixture(self, obj):
ex = self.assertRaises(RuntimeError, tobiko.setup_fixture, obj)
self.assertEqual('raised by setup_fixture', str(ex))
class CleanupFixtureTest(unit.TobikoUnitTest):
def test_with_name(self):
self._test_cleanup_fixture(canonical_name(MyFixture))
def test_with_type(self):
self._test_cleanup_fixture(MyFixture)
def test_with_instance(self):
self._test_cleanup_fixture(MyFixture())
def _test_cleanup_fixture(self, obj):
result = tobiko.cleanup_fixture(obj)
self.assertIs(tobiko.get_fixture(obj), result)
result.setup_fixture.assert_not_called()
result.cleanup_fixture.assert_called_once_with()
class MyFixtureWithProperty(MyBaseFixture):
@tobiko.fixture_property
def some_property(self):
return id(self)
class FixturePropertyTest(unit.TobikoUnitTest):
def test_with_instance(self):
fixture = tobiko.get_fixture(MyFixtureWithProperty)
self.assertEqual(id(fixture), fixture.some_property)
def test_without_instance(self):
fixture = tobiko.get_fixture(MyFixtureWithProperty)
self.assertEqual(id(fixture), MyFixtureWithProperty.some_property)
class MyFixture2(MyBaseFixture):
pass
class MyRequiredFixture(MyBaseFixture):
pass
class MyRequiredSetupFixture(MyBaseFixture):
pass
class ListRequiredFixtureTest(unit.TobikoUnitTest):
required_fixture = tobiko.required_fixture(MyRequiredFixture)
required_setup_fixture = tobiko.required_setup_fixture(
MyRequiredSetupFixture)
def test_with_module(self):
module = sys.modules[__name__]
result = tobiko.list_required_fixtures([module])
self.assertEqual([], result)
def test_with_module_name(self):
result = tobiko.list_required_fixtures([__name__])
self.assertEqual([], result)
def test_with_testcase_type(self):
result = tobiko.list_required_fixtures([ListRequiredFixtureTest])
self.assertEqual([canonical_name(MyRequiredFixture),
canonical_name(MyRequiredSetupFixture)], result)
def test_with_testcase_name(self):
result = tobiko.list_required_fixtures(
[canonical_name(ListRequiredFixtureTest)])
self.assertEqual([canonical_name(MyRequiredFixture),
canonical_name(MyRequiredSetupFixture)], result)
def test_with_unbound_method(self, fixture=MyFixture, fixture2=MyFixture2):
result = tobiko.list_required_fixtures(
[ListRequiredFixtureTest.test_with_unbound_method])
self.assertEqual([canonical_name(fixture),
canonical_name(fixture2),
canonical_name(MyRequiredFixture),
canonical_name(MyRequiredSetupFixture)], result)
def test_with_bound_method(self, fixture=MyFixture, fixture2=MyFixture2):
result = tobiko.list_required_fixtures([self.test_with_bound_method])
self.assertEqual([canonical_name(fixture),
canonical_name(fixture2),
canonical_name(MyRequiredFixture),
canonical_name(MyRequiredSetupFixture)], result)
def test_with_method_name(self, fixture=MyFixture, fixture2=MyFixture2):
result = tobiko.list_required_fixtures([self.id()])
self.assertEqual([canonical_name(fixture),
canonical_name(fixture2),
canonical_name(MyRequiredFixture),
canonical_name(MyRequiredSetupFixture)], result)
def test_with_fixture_name(self):
result = tobiko.list_required_fixtures([canonical_name(MyFixture)])
self.assertEqual([canonical_name(MyFixture)], result)
def test_with_fixture(self):
result = tobiko.list_required_fixtures([MyFixture()])
self.assertEqual([canonical_name(MyFixture)], result)
def test_with_fixture_type(self):
result = tobiko.list_required_fixtures([MyFixture])
self.assertEqual([canonical_name(MyFixture)], result)
def test_required_fixture_property(self):
fixture = self.required_fixture
self.assertIsInstance(fixture, MyRequiredFixture)
fixture.setup_fixture.assert_not_called()
fixture.cleanup_fixture.assert_not_called()
def test_required_setup_fixture_property(self):
fixture = self.required_setup_fixture
self.assertIsInstance(fixture, MyRequiredSetupFixture)
fixture.setup_fixture.assert_called_once_with()
fixture.cleanup_fixture.assert_not_called()
class SharedFixtureTest(unit.TobikoUnitTest):
def setUp(self):
super(SharedFixtureTest, self).setUp()
tobiko.remove_fixture(MyFixture)
def test_init(self):
fixture = MyFixture()
fixture.setup_fixture.assert_not_called()
fixture.cleanup_fixture.assert_not_called()
def test_get(self):
fixture = MyFixture.get()
self.assertIs(tobiko.get_fixture(MyFixture), fixture)
def test_use_fixture(self):
fixture = MyFixture()
self.addCleanup(fixture.cleanup_fixture.assert_called_once_with)
self.useFixture(fixture)
fixture.setup_fixture.assert_called_once_with()
fixture.cleanup_fixture.assert_not_called()
self.useFixture(fixture)
fixture.setup_fixture.assert_called_once_with()
fixture.cleanup_fixture.assert_not_called()
def test_add_cleanup(self):
fixture = MyFixture()
self.addCleanup(fixture.cleanup_fixture.assert_called_once_with)
self.addCleanup(fixture.cleanUp)
self.addCleanup(fixture.cleanUp)
def test_setup(self):
fixture = MyFixture()
fixture.setUp()
fixture.setup_fixture.assert_called_once_with()
def test_setup_twice(self):
fixture = MyFixture()
fixture.setUp()
fixture.setUp()
fixture.setup_fixture.assert_called_once_with()
def test_setup_when_skipping(self):
fixture = MySkyppingFixture()
self.assertRaises(testtools.MultipleExceptions, fixture.setUp)
self.assertRaises(testtools.MultipleExceptions, fixture.setUp)
def test_cleanup(self):
fixture = MyFixture()
fixture.cleanUp()
fixture.cleanup_fixture.assert_called_once_with()
def test_cleanup_twice(self):
fixture = MyFixture()
fixture.cleanUp()
fixture.cleanUp()
fixture.cleanup_fixture.assert_called_once_with()
def test_cleanup_when_skipping(self):
fixture = MySkyppingFixture()
self.assertRaises(tobiko.SkipException, fixture.cleanUp)
self.assertRaises(testtools.MultipleExceptions, fixture.cleanUp)
def test_lifecycle(self):
fixture = MyFixture()
for call_count in range(3):
fixture.setUp()
fixture.setup_fixture.assert_has_calls([mock.call()] * call_count)
fixture.setUp()
fixture.setup_fixture.assert_has_calls([mock.call()] * call_count)
fixture.cleanUp()
fixture.cleanup_fixture.assert_has_calls(
[mock.call()] * call_count)
fixture.cleanUp()
fixture.cleanup_fixture.assert_has_calls(
[mock.call()] * call_count)
```
#### File: tests/unit/test_testcase.py
```python
from __future__ import absolute_import
import os
import tobiko
from tobiko.tests import unit
class TestCasesManagerTest(unit.TobikoUnitTest):
test_path = os.path.dirname(__file__)
def setUp(self):
super(TestCasesManagerTest, self).setUp()
top_dir = os.path.abspath(self.test_path)
while os.path.isdir(top_dir) and top_dir != os.path.sep:
if os.path.isdir(os.path.join(top_dir, '.stestr')):
break
top_dir = os.path.dirname(top_dir)
else:
raise self.fail("Unable to find '.stestr' directory")
self.top_dir = top_dir
self.repo_url = top_dir
# Move to top directory
original_work_dir = os.getcwd()
os.chdir(self.top_dir)
self.addCleanup(os.chdir, original_work_dir)
def test_discover_testcases(self):
testcases = tobiko.discover_testcases(test_path=self.test_path,
top_dir=self.top_dir,
repo_url=self.repo_url,
filters=[self.id()])
self.assertIn(self.id(), testcases)
```
#### File: tobiko/tripleo/undercloud.py
```python
from __future__ import absolute_import
import tobiko
from tobiko import config
from tobiko.openstack import keystone
from tobiko.shell import ssh
from tobiko.shell import sh
CONF = config.CONF
def undercloud_ssh_client():
host_config = undercloud_host_config()
return ssh.ssh_client(host='undercloud-0', host_config=host_config)
def undercloud_host_config():
return tobiko.setup_fixture(UndecloudHostConfig)
def fetch_os_env(rcfile):
command = ". {rcfile}; env | grep '^OS_'".format(rcfile=rcfile)
result = sh.execute(command, ssh_client=undercloud_ssh_client())
env = {}
for line in result.stdout.splitlines():
name, value = line.split('=')
env[name] = value
return env
def load_undercloud_rcfile():
return fetch_os_env(rcfile=CONF.tobiko.tripleo.undercloud_rcfile)
class UndercloudKeystoneCredentialsFixture(
keystone.EnvironKeystoneCredentialsFixture):
def get_environ(self):
return load_undercloud_rcfile()
def has_undercloud():
host_config = undercloud_host_config()
return bool(host_config.hostname)
skip_if_missing_undercloud = tobiko.skip_unless(
'TripleO undercloud hostname not configured', has_undercloud)
class UndecloudHostConfig(tobiko.SharedFixture):
host = 'undercloud-0'
hostname = None
port = None
username = None
key_filename = None
def __init__(self, **kwargs):
super(UndecloudHostConfig, self).__init__()
self._connect_parameters = ssh.gather_ssh_connect_parameters(**kwargs)
def setup_fixture(self):
self.hostname = CONF.tobiko.tripleo.undercloud_ssh_hostname
self.port = CONF.tobiko.tripleo.undercloud_ssh_port
self.username = CONF.tobiko.tripleo.undercloud_ssh_username
self.key_filename = CONF.tobiko.tripleo.undercloud_ssh_key_filename
@property
def connect_parameters(self):
parameters = ssh.gather_ssh_connect_parameters(self)
parameters.update(self._connect_parameters)
return parameters
def undercloud_keystone_client():
session = undercloud_keystone_session()
return keystone.get_keystone_client(session=session)
def undercloud_keystone_session():
return keystone.get_keystone_session(
credentials=UndercloudKeystoneCredentialsFixture)
def undercloud_keystone_credentials():
return tobiko.setup_fixture(
UndercloudKeystoneCredentialsFixture).credentials
``` |
{
"source": "4383/tournesol-twitter-bot",
"score": 3
} |
#### File: tournesol-twitter-bot/tournesolbot/tournesol_api_functions.py
```python
import requests
import pandas as pd
from data.utils_dict import YT_2_TWITTER, already_shared_filepath
def remove_already_tweeted_videos_and_channels(df, language='en'):
# Remove from the top df aready tweeted video and channel tweeted in the last n days
# Get already tweeted video
with open(already_shared_filepath[language], "r") as file:
already_tweeted = [x.strip('\n') for x in file.readlines()]
# Get already tweeted channels in the last n days
n_days = 7
last_days_channels = []
for video_id in already_tweeted[-n_days:]:
if video_id in df['video_id'].values:
channel = df.loc[df['video_id']==video_id,'uploader'].values[0]
else: # Use the API if the video in not in the top anymore
channel = get_video_info(video_id)['uploader']
last_days_channels.append(channel)
# Remove already tweeted video
df = df[~df['video_id'].isin(already_tweeted)]
# Remove channel already twetted in last n days
df = df[~df['uploader'].isin(last_days_channels)]
return df
def get_good_video(from_top,days_ago,language='en'):
# Get a good video for the daily tweet
# Get the top ranked video from Tournesol (through the API)
response = requests.get(f"https://tournesol.app/api/v2/videos/search_tournesol/?backfire_risk=100&better_habits=100&diversity_inclusion=100&engaging=100&entertaining_relaxing=100&importance=100&layman_friendly=100&pedagogy=100&reliability=100&days_ago_lte={days_ago}&language={language}&limit={from_top}").json()
df = pd.DataFrame.from_dict(response['results'], orient='columns')
# Keep videos rated by more than n contributors
n_contributor = 2
df['n_experts'] = df['n_public_experts'] + df['n_private_experts']
print(df[['video_id','name','uploader','tournesol_score','n_experts','reliability']])
df = df[df['n_experts']>n_contributor]
df = remove_already_tweeted_videos_and_channels(df, language)
# Remove video with a reliability lower than average
df = df[df['reliability']>1.0]
print('\nList of remaining videos :')
print(df[['video_id','name','uploader','tournesol_score','n_experts','reliability']])
# Chose a video randomly (weighted by Tournesol score) in the remaining list
df_rand = df.sample(weights=df['score'])
video_id = df_rand['video_id'].item()
return video_id
def get_video_info(video_id=''):
# Get the dictionnary of info (fron Tournesol API) for a video from it's ID
# Get video info dictionary
print('Call API with video_id: ',video_id)
response = requests.get(f'https://tournesol.app/api/v2/videos/?video_id={video_id}').json()
if response['count']:
video_dict = response['results'][0]
print('The video has been found on Tournesol.')
return video_dict
else:
print('The video has not been found on Tournesol!')
return 0
def get_missing_channel_list(from_top,days_ago,language='en'):
# To get a list of YouTube channel with not associated Twitter account in utils_dict.py
print(' Get channels with no Twitter account associated.')
# Get top viedeo from Tournesol API
response = requests.get(f"https://tournesol.app/api/v2/videos/search_tournesol/?backfire_risk=100&better_habits=100&diversity_inclusion=100&engaging=100&entertaining_relaxing=100&importance=100&layman_friendly=100&pedagogy=100&reliability=100&days_ago_lte={days_ago}&language={language}&limit={from_top}").json()
df = pd.DataFrame.from_dict(response['results'], orient='columns')
# Remove channel which are already in the dictionnary
df = df[~df['uploader'].isin(YT_2_TWITTER.keys())]
df['n_experts'] = df['n_public_experts'] + df['n_private_experts']
df = df[df['n_experts']>1]
# Print the list
print('\nYouTub channel with no associated twitter account yet:')
for channel in df['uploader'].tolist():
print(f'"{channel}":"None",')
``` |
{
"source": "4383/warehouse",
"score": 2
} |
#### File: admin/views/test_banners.py
```python
import uuid
import pretend
import pytest
from pyramid.httpexceptions import HTTPNotFound
from sqlalchemy.orm.exc import NoResultFound
from webob.multidict import MultiDict
from warehouse.admin.views import banners as views
from warehouse.banners.models import Banner
from ....common.db.banners import BannerFactory
@pytest.fixture
def banner_data():
"""Fixture with minimal required data to create a banner"""
return {
"name": "<NAME>",
"text": "This should be the correct text",
"link_url": "https://samplebanner.com",
"end": "2021-07-30",
}
class TestBannerList:
def test_list_all_banners(self, db_request):
BannerFactory.create_batch(5)
banners = db_request.db.query(Banner).all()
result = views.banner_list(db_request)
assert result == {"banners": banners}
class TestCreateBanner:
def test_serialize_form_to_create_banner(self, db_request):
result = views.create_banner(db_request)
assert len(result) == 1
assert isinstance(result["form"], views.BannerForm)
def test_serialize_form_errors_if_invalid_post(self, db_request):
db_request.method = "POST"
db_request.POST["name"] = ""
db_request.POST["link_url"] = ""
db_request.POST = MultiDict(db_request.POST)
result = views.create_banner(db_request)
assert len(result) == 1
assert isinstance(result["form"], views.BannerForm)
assert result["form"].errors
def test_create_banner(self, db_request, banner_data):
db_request.method = "POST"
db_request.POST = MultiDict(banner_data)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_url = pretend.call_recorder(lambda r: "/admin/banners/")
assert db_request.db.query(Banner).count() == 0
resp = views.create_banner(db_request)
assert db_request.db.query(Banner).count() == 1
assert resp.status_code == 303
assert resp.location == "/admin/banners/"
assert db_request.session.flash.calls == [
pretend.call("Added new banner 'Sample Banner'", queue="success")
]
assert db_request.route_url.calls == [pretend.call("admin.banner.list")]
class TestEditBanner:
def test_serialize_form_and_banner(self, db_request):
banner = BannerFactory.create()
db_request.matchdict["banner_id"] = banner.id
result = views.edit_banner(db_request)
assert len(result) == 2
assert isinstance(result["form"], views.BannerForm)
assert result["form"].data["name"] == banner.name
assert result["banner"] == banner
def test_404_if_banner_does_not_exist(self, db_request):
db_request.matchdict["banner_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.edit_banner(db_request)
def test_update_banner(self, db_request, banner_data):
banner = BannerFactory.create(fa_icon="custom", **banner_data)
assert banner.is_live
form = views.BannerForm(MultiDict({}), banner)
data = form.data.copy()
data["name"] = "<NAME>"
data["end"] = str(data["end"])
data.pop("fa_icon") # do not send fa icon within post data
db_request.matchdict["banner_id"] = banner.id
db_request.method = "POST"
db_request.POST = MultiDict(data)
db_request.current_route_path = pretend.call_recorder(
lambda: f"/admin/banners/{banner.id}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.edit_banner(db_request)
db_banner = db_request.db.query(Banner).filter(Banner.id == banner.id).one()
assert resp.status_code == 303
assert resp.location == f"/admin/banners/{banner.id}/"
assert db_banner.name == "New Name"
assert db_banner.fa_icon == "custom" # keep previous value
assert db_request.session.flash.calls == [
pretend.call("Banner updated", queue="success")
]
def test_form_errors_if_invalid_post_data(self, db_request):
banner = BannerFactory.create()
form = views.BannerForm(MultiDict({}), banner)
data = form.data.copy()
data["name"] = "New name"
data["end"] = "" # date is required
db_request.matchdict["banner_id"] = banner.id
db_request.method = "POST"
db_request.POST = MultiDict(data)
result = views.edit_banner(db_request)
assert "end" in result["form"].errors
assert "New name" == result["form"].data["name"]
class TestDeleteBanner:
def test_404_if_banner_does_not_exist(self, db_request):
db_request.matchdict["banner_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.delete_banner(db_request)
def test_delete_banner(self, db_request):
banner = BannerFactory.create()
db_request.matchdict["banner_id"] = banner.id
db_request.params = {"banner": banner.name}
db_request.method = "POST"
db_request.route_url = pretend.call_recorder(lambda s: "/admin/banners/")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.delete_banner(db_request)
with pytest.raises(NoResultFound):
db_request.db.query(Banner).filter(Banner.id == banner.id).one()
assert resp.status_code == 303
assert resp.location == "/admin/banners/"
assert db_request.session.flash.calls == [
pretend.call(f"Deleted banner {banner.name}", queue="success")
]
assert db_request.route_url.calls == [pretend.call("admin.banner.list")]
def test_do_not_delete_banner_if_invalid_confirmation_param(self, db_request):
banner = BannerFactory.create()
db_request.matchdict["banner_id"] = banner.id
db_request.params = {"banner": "not the banner name"}
db_request.method = "POST"
db_request.route_url = pretend.call_recorder(
lambda s, banner_id: f"/admin/banners/{banner_id}"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.delete_banner(db_request)
banner = db_request.db.query(Banner).filter(Banner.id == banner.id).one()
assert resp.status_code == 303
assert resp.location == f"/admin/banners/{banner.id}"
assert db_request.session.flash.calls == [
pretend.call("Wrong confirmation input", queue="error")
]
assert db_request.route_url.calls == [
pretend.call("admin.banner.edit", banner_id=banner.id)
]
class TestPreviewBanner:
def test_404_if_banner_does_not_exist(self, db_request):
db_request.matchdict["banner_id"] = str(uuid.uuid4())
with pytest.raises(HTTPNotFound):
views.preview_banner(db_request)
def test_preview_banner(self, db_request):
banner = BannerFactory.create()
db_request.matchdict["banner_id"] = str(banner.id)
resp = views.preview_banner(db_request)
assert {"banner": banner} == resp
class TestBannerForm:
def test_required_fields(self, banner_data):
form = views.BannerForm(data={})
assert form.validate() is False
assert set(form.errors) == set(banner_data)
def test_valid_data(self, banner_data):
form = views.BannerForm(data=banner_data)
assert form.validate() is True
data = form.data
defaults = {
"fa_icon": Banner.DEFAULT_FA_ICON,
"active": False,
"link_label": Banner.DEFAULT_BTN_LABEL,
}
assert data == {**banner_data, **defaults}
``` |
{
"source": "4398-mg/REST_api",
"score": 2
} |
#### File: REST_api/app/__init__.py
```python
from flask import Flask
from config import config
import os
def create_app(config_name):
app = Flask(__name__)
config_name = os.getenv('FLASK_CONFIGURATION', 'default')
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
```
#### File: app/main/views.py
```python
import json
from flask import (abort, jsonify, g, session, render_template, redirect,
request, url_for)
from manage import app, client, bucket, bcolors
from . import main
from datetime import datetime
import sys
from boto.s3.key import Key
from .helper import names, authentication
from .neural_net import sample
import uuid
import time
import random
from subprocess import Popen, PIPE
from random import randint
from google.oauth2 import id_token
from google.auth.transport import requests
# import self written modules from modules dir
# from ..modules import ...
@main.route('/', methods=['GET', 'POST'])
def home():
response_obj = {
'endpoints': {
'/help': '[GET, POST] help endpoint (general usage info)',
'/generate_song': '[POST] generate song based on parameters',
'/generate_song/help': '[GET, POST] help endpoint for song gen',
'/get_song': '[POST] returns a link to download a song',
'/get_song/help': '[GET, POST] help endpoint for song gen'
}
}
resp = jsonify(response_obj)
resp.status_code = 200
return resp
@main.route('/test_db', methods=['GET', 'POST'])
def test_db():
db = client.music_gen
if(not(app.config['DEBUG'])):
resp = jsonify({'error': 'the API is not in debug mode, hence it will not test the database'})
resp.status_code = 404
return resp
resp = {}
try:
db.test_coll.insert({'test': 'test'})
db.test_coll.remove({}, multi=True)
resp = {'status': 'connected and accessed database successfully'}
except Exception as e:
print(app.config)
resp = {
'config': app.config['DB_USER'],
'status': 'unable to connect to and access db',
'error_text': str(e)
}
return jsonify(resp)
@main.route('/echo', methods=['POST'])
def echo():
print(request)
print(dir(request))
print(request.data)
try:
print(request.json)
print(request.values)
except Exception as e:
print(e)
try:
data = json.loads(request.data.decode('utf-8'))
except:
data = {}
return jsonify(data)
@main.route('/help', methods=['GET', 'POST'])
def help():
response_obj = {
'response': 'all things help related (wip)'
}
resp = jsonify(response_obj)
resp.status_code = 200
return resp
@main.route('/generate_song', methods=['POST'])
def generate_song():
try:
data = json.loads(request.data.decode('utf-8'))
data['genre'] = data['genre'].lower()
data['tempo'] = data['tempo'].lower()
except:
resp = jsonify({'error': 'unable to parse the sent data OR you are not passing values for the keys "genre" and "tempo"'})
resp.status_code = 400
return resp
duration_dict = {
'short': 90,
'medium': 180,
'long': 300
}
try:
duration = duration_dict[data['duration'].lower()] + (randint(0,20)-10)
except KeyError:
resp = jsonify({'error': 'you are not passing values for the key "duration"'})
resp.status_code = 400
return resp
valid_genres = ['game', 'jazz', 'classical', 'folk']
if(not(data['genre'] in valid_genres)):
resp = jsonify({'error': 'Invalid genre passed, valid genres are "game", "jazz", "classical", and "folk"'})
resp.status_code = 400
return resp
instrument_dict = {
'game': [76, 72, 75],
'classical': [48, 42, 46],
'folk': [24, 25, 27],
'jazz': [26, 34, 36]
}
tempo_dict = {
'slow': random.randint(0,2),
'medium': random.randint(3, 5),
'normal': random.randint(3,5),
'fast': random.randint(6,8)
}
genre_dict = {
'game': 'game',
'classical': 'classical',
'folk': 'folk',
'jazz': 'jazz'
}
pitch = 0
if(data['genre'] == 'jazz'):
pitch = -30
pitch_dict = {
'game':(-10, 14),
'classical':(-10,20),
'folk':(-10, 20),
'jazz':(-30, -15)
}
drums = False
gen_params = {
'data_dir': './app/main/neural_net/data/' + data['genre'],
'experiment_dir': './app/main/neural_net/experiments/' + genre_dict[data['genre']],
'file_length': duration,
'midi_instrument': random.choice(instrument_dict[data['genre']]),
'num_files': 1,
'prime_file': None,
'save_dir': None,
'tempo': tempo_dict[data['tempo']],
'pitch': random.randint(pitch_dict[data['genre']][0],pitch_dict[data['genre']][1]),
'drum': drums
}
begin = time.time()
generated_file = sample.main(gen_params)
print('done generating\ngenerated: ' + str(generated_file) + bcolors.ENDC)
print('time elapsed: ' + str(time.time() - begin))
if(generated_file):
db = client.music_gen
file_id = str(uuid.uuid4())
file_prefix = './app/main/music/' + file_id
outfile = sample.midi_to_mp3(generated_file, file_prefix)
print(outfile)
# check to make sure file has been converted
file_name = 'music/' + file_id + '.mp3'
key = bucket.new_key(file_name)
key.set_contents_from_filename(outfile)
key.set_canned_acl('public-read')
file_url = key.generate_url(0, query_auth=False, force_http=True)
new_file = './app/main/g_midis/{0}.mid'.format(file_id)
# remove file
cp_string = 'cp {0} {1}'.format(generated_file, new_file)
rm_string = 'rm {0}; rm {1}; rm {2}'.format(file_prefix + '.mp3', file_prefix + '.wav', generated_file)
Popen(cp_string, stdout=PIPE, stderr=PIPE, shell=True).wait()
Popen(rm_string, stdout=PIPE, stderr=PIPE, shell=True).wait()
print('MIDI PATH: ' + new_file)
response_obj = {
'timestamp': datetime.utcnow(),
'location': file_url,
'sheet_location': None,
'song_id': file_id,
'genre': data['genre'],
'tempo': data['tempo'],
'duration': data['duration'],
'song_name': names.generate_name(data['genre'], data['tempo']),
'midi_path': new_file
}
resp = jsonify(response_obj)
resp.status_code = 200
if('profileID' in data.keys() and 'profileEmail' in data.keys()):
verified_id = authentication.verify(data['profileID'])
profile_email = str(data['profileEmail']).lower()
if(verified_id):
db.users.update({'$and': [{'profileID': verified_id}, {'profileEmail': profile_email}]},
{'$set': {'profileID': verified_id, 'profileEmail': profile_email}}, upsert=True)
try:
user_obj = db.users.find_one({'$and': [{'profileID': verified_id}, {'profileEmail': profile_email}]})
current_songs = user_obj['songs']
current_songs.append(response_obj)
except Exception as e:
current_songs = [response_obj]
db.users.update({'$and': [{'profileID': verified_id}, {'profileEmail': profile_email}]},
{'$set': {'songs': current_songs}}, upsert=True)
db.songs.insert(response_obj)
else:
response_obj = {
'timestamp': datetime.utcnow(),
'location': None,
'song_id': file_id,
'sheet_location': None,
'genre': data['genre'],
'tempo': data['tempo'],
'duration': data['duration'],
'midi_path': new_file,
'song_name': names.generate_name(data['genre'], data['tempo'])
}
resp = jsonify(response_obj)
resp.status_code = 200
return resp
@main.route('/sheet_music', methods=['POST'])
def sheet_music():
db = client.music_gen
try:
data = json.loads(request.data.decode('utf-8'))
song_id = data['songID']
except Exception as e:
resp = jsonify({'error': 'unable to parse data sent OR the key "songID" was not included in the request'})
resp.status_code = 400
return resp
song_obj = db.songs.find_one({'song_id': song_id})
if('sheet_location' in song_obj.keys() and song_obj['sheet_location']):
print('sheet music cached')
return jsonify({'sheet_location': song_obj['sheet_location']})
gen_str = 'mkdir ./app/main/sheets/{0}; mono sheet.exe {1} ./app/main/sheets/{0}/{0}'.format(song_obj['song_id'],
song_obj['midi_path'])
Popen(gen_str, stdout=PIPE, stderr=PIPE, shell=True).wait()
zip_str = 'mv ./app/main/sheets/{0} ./; zip -r ./app/main/sheets/{0} ./{0}'.format(song_obj['song_id'])
Popen(zip_str, stdout=PIPE, stderr=PIPE, shell=True).wait()
sheet_path = './app/main/sheets/{0}.zip'.format(song_id)
key = bucket.new_key('sheet_music/' + song_id + '.zip')
key.set_contents_from_filename(sheet_path)
key.set_canned_acl('public-read')
file_url = key.generate_url(0, query_auth=False, force_http=True)
db.songs.update({'song_id': song_id}, {'$set': {'sheet_location': file_url}})
rm_str = 'rm -rf ./app/main/g_midis/{0}.mid ./app/main/sheets/{0}.zip ./{0}'.format(song_obj['song_id'])
Popen(rm_str, stdout=PIPE, stderr=PIPE, shell=True)
print(song_obj)
return jsonify({'sheet_location': file_url})
@main.route('/history', methods=['POST'])
def history():
db = client.music_gen
try:
data = json.loads(request.data.decode('utf-8'))
profile_id = data['profileID']
profile_email= data['profileEmail'].lower()
except:
resp = jsonify({'error': 'unable to parse the request body OR the keys "profileID" and "profileEmail" weren\'t passed in the request body'})
resp.status_code = 400
return resp
profile_id = authentication.verify(profile_id)
if(not(profile_id)):
resp = jsonify({'error': 'invalid profileID token and profileEmail pair. Perhaps you\'re not passing the profileID token and just the profileID?'})
resp.status_code = 404
return resp
found_user = db.users.find_one({'$and': [{'profileEmail': profile_email}, {'profileID': profile_id}]})
if(not(found_user)):
songs = []
else:
songs = found_user['songs']
return jsonify({'history': songs})
@main.route('/edit_song', methods=['POST'])
def edit_song():
db = client.music_gen
try:
data = json.loads(request.data.decode('utf-8'))
profile_id = data['profileID']
profile_email= data['profileEmail'].lower()
except:
resp = jsonify({'error': 'unable to parse the request body OR the keys "profileID" and "profileEmail" weren\'t passed in the request body'})
resp.status_code = 400
return resp
profile_id = authentication.verify(profile_id)
if(not(profile_id)):
resp = jsonify({'error': 'invalid profileID token and profileEmail pair. Perhaps you\'re not passing the profileID token and just the profileID?'})
resp.status_code = 404
return resp
found_user = db.users.find_one({'$and': [{'profileEmail': profile_email}, {'profileID': profile_id}]})
if(not(found_user)):
songs = []
else:
songs = found_user['songs']
try:
song_id = data['songID']
new_name = str(data['newName'])
except:
resp = jsonify({'error': 'the key songID OR newName was not passed in the request body'})
resp.status_code = 400
return resp
for i in range(len(songs)):
if(song_id == songs[i]['song_id']):
songs[i]['song_name'] = new_name
break
db.users.update({'$and': [{'profileID': profile_id}, {'profileEmail': profile_email}]},
{'$set': {'songs': songs}}, upsert=True)
return jsonify({'status': 'song name updated'})
@main.route('/remove_song', methods=['POST'])
def remove_song():
db = client.music_gen
try:
data = json.loads(request.data.decode('utf-8'))
profile_id = data['profileID']
profile_email= data['profileEmail'].lower()
except:
resp = jsonify({'error': 'unable to parse the request body OR the keys "profileID" and "profileEmail" weren\'t passed in the request body'})
resp.status_code = 400
return resp
profile_id = authentication.verify(profile_id)
if(not(profile_id)):
resp = jsonify({'error': 'invalid profileID token and profileEmail pair. Perhaps you\'re not passing the profileID token and just the profileID?'})
resp.status_code = 404
return resp
found_user = db.users.find_one({'$and': [{'profileEmail': profile_email}, {'profileID': profile_id}]})
if(not(found_user)):
songs = []
else:
songs = found_user['songs']
try:
song_id = data['songID']
except:
resp = jsonify({'error': 'the key "songID" was not passed in the request body'})
resp.status_code = 400
return resp
for i in range(len(songs)):
if(song_id == songs[i]['song_id']):
del(songs[i])
break
db.users.update({'$and': [{'profileID': profile_id}, {'profileEmail': profile_email}]},
{'$set': {'songs': songs}}, upsert=True)
return jsonify({'status': 'song removed'})
``` |
{
"source": "4398-mg/web_app",
"score": 2
} |
#### File: app/main/views.py
```python
from flask import (abort, jsonify, g, session, render_template, redirect,
request, url_for)
from manage import app, client
from . import main
# import self written modules from modules dir
# from ..modules import ...
@main.route('/')
def index():
db = client
return render_template('index.html', app=app)
``` |
{
"source": "4398TempleSpring2020/cscapstoneproject-infinitetrivia",
"score": 3
} |
#### File: cscapstoneproject-infinitetrivia/database_connection/dbconn.py
```python
from dataclasses import dataclass
from math import cos, sin, asin, radians, sqrt
from random import random
from sqlite3 import connect
from os import path
from typing import Optional
from configparser import ConfigParser
from trivia_generator import TUnit
from flask_login import UserMixin
from trivia_generator.TUnit import TUnit
from trivia_generator.web_scraper import Article
@dataclass
class DBUser(UserMixin):
user_id: int = None
username: str = None
email: str = None
wins: int = 0
losses: int = 0
num_answered: int = 0
num_answered_correct: int = 0
def __eq__(self, other):
return self.__dict__ == other.__dict__
@dataclass
class DBConn:
"""
Class representing a database connection.
"""
DB_CONFIG_FILE: str = "db.ini"
db_filename: str = None
max_importance: float = None
search_radius: float = None
def __init__(self, filename=None, search_radius=None):
local_path = path.dirname(path.abspath(__file__))
config_filepath = path.join(local_path, DBConn.DB_CONFIG_FILE)
config = ConfigParser()
config.read(config_filepath)
self.db_filename = local_path + '/' + (config['DATABASE']['DatabaseFile'] if filename is None else filename)
self.search_radius = float(config['DATABASE']['SearchRadius']) if search_radius is None else search_radius
@staticmethod
def _distance(lat: float, long: float, query_lat: float, query_long: float):
if lat is None or long is None or query_lat is None or query_long is None:
return -1
lat = radians(lat)
long = radians(long)
query_lat = radians(query_lat)
query_long = radians(query_long)
d_lon = query_long - long
d_lat = query_lat - lat
a = sin(d_lat / 2) ** 2 + cos(lat) * cos(query_lat) * sin(d_lon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 3956
return c * r
def _select_lat_long(self, zip_code: str) -> tuple:
db = connect(self.db_filename)
cursor = db.cursor()
query = """
SELECT lat, long
FROM location
WHERE zip = ?
"""
cursor.execute(query, (zip_code,))
lat_long = cursor.fetchone()
db.close()
return lat_long if lat_long is not None else (None, None)
def select_max_importance(self) -> float:
"""Gets the max importance score of the category with the maximum importance score, if not yet recorded.
"""
if self.max_importance is None:
db = connect(self.db_filename)
cursor = db.cursor()
cursor.execute('SELECT MAX(importance) FROM category;')
row = cursor.fetchone()
self.max_importance = row[0]
db.close()
return self.max_importance
def select_random_article(self) -> tuple:
"""Selects a random article from the database.
returns: the article id and title of the random article.
rtype: (int, str)
"""
db = connect(self.db_filename)
cursor = db.cursor()
cursor.execute('SELECT article_id, title FROM article ORDER BY random() LIMIT 1;')
article_id, title = cursor.fetchone()
db.close()
return article_id, title
def select_weighted_random_article(self) -> tuple:
"""Selects a random article from the database weighted by its importance score.
returns: the article id and title of the random article.
rtype: (int, str)
"""
db = connect(self.db_filename)
cursor = db.cursor()
min_select_importance = random() * self.select_max_importance()
query = """
SELECT article.article_id, article.title, SUM(importance) AS article_importance
FROM article_category
JOIN article ON article.article_id = article_category.article_id
JOIN category ON category.category_id = article_category.category_id
GROUP BY article.article_id
HAVING SUM(importance) > ?
ORDER BY RANDOM()
LIMIT 1;
"""
cursor.execute(query, [min_select_importance])
article_id, title, importance = cursor.fetchone()
db.close()
return article_id, title, importance
def select_random_category(self) -> tuple:
"""Selects a random category from the database weighted by its importance score.
returns: the category id, name, and importance of the category.
"""
db = connect(self.db_filename)
cursor = db.cursor()
min_select_importance = random() * self.select_max_importance()
# Get a random category whose importace score is above min_select_importance
query = """
SELECT category_id, name, importance
FROM category
WHERE importance >= ?
ORDER BY RANDOM()
LIMIT 1;"""
cursor.execute(query, [min_select_importance])
row = cursor.fetchone()
db.close()
return row
def select_article_categories(self, article_id: int) -> list:
"""Selects the categories associated with the article with the given article id.
:param article_id: The ID of the article.
:type article_id: int
:raises: DatabaseError
:returns: the list of strings representing the names of the categories.
:rtype: [str]
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
SELECT name
FROM article_category
JOIN category ON article_category.category_id = category.category_id
WHERE article_id = ?;
"""
cursor.execute(query, (article_id,))
rows = cursor.fetchall()
db.close()
return [row[0] for row in rows]
def select_category_articles(self, category: str) -> list:
"""Selects the categories associated with the article with the given article id.
:param category: category name.
:type category: int
:raises: DatabaseError
:returns: the list of article_ids associated with that category.
:rtype: [(int, str)]
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
SELECT DISTINCT a.article_id, a.title
FROM article_category ac
JOIN category c ON ac.category_id = c.category_id
JOIN article a ON ac.article_id = a.article_id
WHERE c.name LIKE ?;
"""
cursor.execute(query, ('%' + category + '%',))
rows = cursor.fetchall()
db.close()
return rows
def insert_user(self, user: DBUser, password: str) -> int:
"""
Inserts a user into the database
:param user: the DBUser object to be added to the database
:type user: DBUser
:param password: <PASSWORD>
:type password: str
:raises: DatabaseError
:return: database user_id
:rtype: int
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
INSERT INTO user (username, email, password, wins, losses, num_answered, num_answered_correct)
VALUES (?,?,?,?,?,?,?)
"""
cursor.execute(query, (
user.username,
user.email,
password,
user.wins,
user.losses,
user.num_answered,
user.num_answered_correct))
db.commit()
user_id = cursor.lastrowid
db.close()
return user_id
def update_user(self, user: DBUser) -> int:
""" Updates a user in the database.
:param user: the DBUser object to be added to the database
:type user: DBUser
:raises: DatabaseError
:return: database user_id or -1 if user not found
:rtype: int
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
UPDATE user
SET username = ?, email = ?, wins = ?, losses = ?, num_answered = ?, num_answered_correct = ?
WHERE username = ?
"""
cursor.execute(query, (
user.username,
user.email,
user.wins,
user.losses,
user.num_answered,
user.num_answered_correct,
user.username
))
db.commit()
query = """
SELECT user_id
FROM user
WHERE username = ?
"""
user_id = cursor.execute(query, (user.username,)).fetchone()
db.close()
if user_id is None:
return -1
else:
return user_id[0]
def select_password(self, username: str) -> str:
"""
Retrieves a password entry from the database for the specified user
:param username: user's username
:type username: str
:raises: sqlite3.DatabaseError
:return: password entry
"""
db = connect(self.db_filename)
query = '''
SELECT password
FROM user
WHERE username = ?
'''
password = db.cursor().execute(query, (username,)).fetchone()[0]
db.close()
return password
def update_password(self, username: str, password: str) -> int:
""" Updates a user in the database.
:param username: the DBUser object to be added to the database
:type username: str
:param password: <PASSWORD>
:type password: str
:raises: DatabaseError
:return: database user_id or -1 if user not found
:rtype: int
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
UPDATE user
SET password = ?
WHERE username = ?
"""
cursor.execute(query, (password, username))
db.commit()
user_id = cursor.lastrowid
db.close()
if user_id == 0:
return -1
else:
return user_id
def select_user(self, username: str) -> Optional[DBUser]:
"""Gets a user from the database by username.
:param username: username to be retrieved
:type username: str
:raises DatabaseError:
:returns: an object representing a player or None
:rtype: DBUser or None
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
SELECT user_id, username, email, wins, losses, num_answered, num_answered_correct
FROM user
WHERE username = ?;
"""
cursor.execute(query, (username,))
user = cursor.fetchone()
db.close()
if user is not None:
return DBUser(*user)
else:
return None
def delete_user(self, user: DBUser) -> None:
"""Deletes a user from the database.
:param user: a users's object to be deleted from database
:type user: DBUser
:raises DatabaseError:
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
DELETE FROM user
WHERE username = ?
"""
cursor.execute(query, (user.username,))
db.commit()
db.close()
def update_tunit(self, t_unit: TUnit) -> int:
"""Updates a TUnit in the database.
:param t_unit: a TUnit object to be deleted from database
:type t_unit: TUnit
:raises DatabaseError:
:returns: t_unit_Id or -1 of not found
:rtype: int
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
REPLACE INTO t_unit (t_unit_Id, sentence, article_id, url, access_timestamp, lat, long, num_likes,
num_mehs, num_dislikes)
VALUES (?,?,?,?,?,?,?,?,?,?);
"""
cursor.execute(query,
(t_unit.t_unit_id, t_unit.sentence, t_unit.article_id, t_unit.url, t_unit.access_timestamp,
t_unit.latitude, t_unit.longitude, t_unit.num_likes, t_unit.num_mehs, t_unit.num_dislikes))
db.commit()
t_unit.t_unit_id = cursor.lastrowid
db.close()
return t_unit.t_unit_id
def select_tunit_random(self) -> TUnit:
"""Gets a TUnit from the database by random.
:raises DatabaseError:
:returns: an object representing a TUnit
:rtype: TUnit
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
SELECT sentence, article_id, url, access_timestamp, t_unit_Id, lat, long, num_likes, num_mehs,
num_dislikes
FROM t_unit
ORDER BY RANDOM() LIMIT 1;
"""
cursor.execute(query)
tunit = TUnit(*cursor.fetchone())
db.close()
return tunit
def select_tunit_category(self, category: str) -> list:
"""Gets a list of TUnits from the database by category.
:param category: the category used to to find TUnits
:type category: str
:raises DatabaseError:
:returns: a list of TUnit objects
:rtype: [TUnit] or empty list if category not found
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
SELECT DISTINCT sentence, tu.article_id, url, access_timestamp, t_unit_Id, lat, long, num_likes, num_mehs,
num_dislikes
FROM t_unit tu
JOIN article_category ac on tu.article_id = ac.article_id
JOIN category c on ac.category_id = c.category_id
WHERE c.name LIKE ?
"""
cursor.execute(query, ('%' + category + '%',))
t_unit_list = [TUnit(*t_unit_tuple) for t_unit_tuple in cursor.fetchall()]
db.close()
return t_unit_list
def select_tunit_location(self, zip_code: str) -> list:
"""Gets a list of TUnits from the database by location.
:raises DatabaseError:
:returns: a list of TUNit objects
:rtype: [TUnit] or empty list if not found
"""
lat, long = self._select_lat_long(zip_code)
db = connect(self.db_filename)
db.create_function('DISTANCE', 4, DBConn._distance)
cursor = db.cursor()
query = '''
SELECT sentence, article_id, url, access_timestamp, tu.t_unit_Id, lat, long, num_likes, num_mehs,
num_dislikes
FROM t_unit tu
JOIN (
SELECT t_unit_Id, DISTANCE(lat, long, ?, ?) d
FROM t_unit
WHERE d < ? AND d >= 0
) l ON tu.t_unit_Id = l.t_unit_Id
'''
cursor.execute(query, (lat, long, self.search_radius))
t_unit_list = [TUnit(*t_unit_tuple) for t_unit_tuple in cursor.fetchall()]
db.close()
return t_unit_list
def delete_tunit(self, t_unit: TUnit):
"""Deletes a TUnit from the database.
:param t_unit: a TUnit object to be deleted from database
:type t_unit: TUnit
:raises DatabaseError:
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
DELETE FROM t_unit
WHERE t_unit_Id = ?
"""
cursor.execute(query, (t_unit.t_unit_id,))
db.commit()
db.close()
def insert_category(self, category: str, importance: float) -> int:
"""Adds a category to the database.
:param category: the category to be added to the database
:type category: str
:param importance: relevance of category
:type importance: float
:raises DatabaseError:
:returns: the category id
:rtype: int
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
INSERT INTO category (name, importance)
VALUES (?,?)
"""
cursor.execute(query, (category, importance))
db.commit()
category_id = cursor.lastrowid
db.close()
return category_id
def delete_category(self, category: str):
"""Deletes a category from the database.
:param category: the category to be deleted from the database
:type category: str
:raises DatabaseError:
"""
db = connect(self.db_filename)
cursor = db.cursor()
query = """
DELETE FROM category
WHERE category.name = ?
"""
cursor.execute(query, (category,))
db.commit()
db.close()
def select_articles_location(self, zip_code: str) -> list:
""" Retrieves Articles from the database based on a location
:raises DatabaseError:
:returns: a list of tuples representing an article id and title
:rtype: [(int, str)]
"""
lat, long = self._select_lat_long(zip_code)
db = connect(self.db_filename)
db.create_function('DISTANCE', 4, DBConn._distance)
cursor = db.cursor()
query = """
SELECT a.article_id, a.title
FROM article a
JOIN (
SELECT article_id, DISTANCE(lat, long, ?, ?) d
FROM article
WHERE d < ? AND d >= 0
) l ON a.article_id = l.article_id
"""
cursor.execute(query, (lat, long, self.search_radius))
article_list = cursor.fetchall()
db.close()
return article_list
```
#### File: cscapstoneproject-infinitetrivia/dev_ranker/trainer.py
```python
import sys
import os
import random
from flask import Flask, render_template, request
from flask_socketio import SocketIO
top_level_dir = os.path.abspath('../')
sys.path.append(top_level_dir)
from trivia_generator.web_scraper.WebScraper import get_page_by_random
from trivia_generator.NLPPreProcessor import create_TUnits
from database_connection.dbconn import DBConn
app = Flask(__name__)
socketio = SocketIO(app)
tunit_dictionary = dict()
dbconn = DBConn()
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html")
@socketio.on('update_rank')
def update_rank(rank):
try:
tunit = tunit_dictionary[request.sid]
if rank == 'like':
tunit.num_likes += 1
elif rank == 'dislike':
tunit.num_dislikes += 1
elif rank == 'meh':
tunit.num_mehs += 1
else:
print("invalid rank submitted")
# update tunit in database
dbconn.update_tunit(tunit)
except KeyError:
print("could not find SID")
@socketio.on('request_trivia')
def request_trivia(info):
trivia_article = get_page_by_random()
tunit_list = create_TUnits(trivia_article)
while not tunit_list:
print("bad article!")
trivia_article = get_page_by_random()
tunit_list = create_TUnits(trivia_article)
trivia = random.choice(tunit_list)
tunit_dictionary[request.sid] = trivia
return trivia.sentence
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0')
```
#### File: cscapstoneproject-infinitetrivia/nlp_helpers/NLPConn.py
```python
import spacy
import neuralcoref
nlp = None
def get_nlp_conn():
global nlp
if nlp is None:
nlp = spacy.load('en_core_web_lg')
neuralcoref.add_to_pipe(nlp)
return nlp
```
#### File: cscapstoneproject-infinitetrivia/question_generator/MLQuestionGenerator.py
```python
from trivia_generator import TUnit
def ml_generate_trivia_question(tunit: TUnit) -> (str, str):
"""Takes a trivia sentence and turns it into a question and answer using machine learning.
:param tunit: a TUnit representing the trivia question.
:type TUnit: TUnit
:returns: a tuple containing the question and answer as strings.
"""
pass
```
#### File: cscapstoneproject-infinitetrivia/question_generator/NLPQuestionGeneratorSpacy.py
```python
import numpy as np
import spacy
from nlp_helpers import NLPConn
nlp = NLPConn.get_nlp_conn()
def fill_in_the_blank_question_generation(sentence):
doc = nlp(sentence)
questions = []
for ent in doc.ents:
question = sentence[:ent.start_char] + "______" + sentence[ent.end_char:]
answer = ent.text
questions.append((question, answer))
return questions
def nlp_question_generation(sentence):
"""
Creates a mix of multiple choice and wh-questions
"""
doc = nlp(sentence)
questions = []
for ent in doc.ents:
question = sentence[:ent.start_char] + "______" + sentence[ent.end_char:]
answer = ent.text
questions.append((question, answer))
return questions
"""
doc = nlp(sentence)
hasQuestionMark = False # boolean to check if it's a wh question or fib questions
questions = []
for ent in doc.ents:
questionTag = "______"
if(ent.label_ == "PERSON" and ent.start_char == 0 ):
questionTag = "Who"
hasQuestionMark = True
elif(ent.label_ == "ORG"):
if(not ent.start_char == 0):
questionTag = "what"
else:
questionTag = "What"
hasQuestionMark = True
elif(ent.label_ == "DATE"):
if(not ent.start_char == 0):
if("century" in ent.text):
questionTag = "which century"
else:
questionTag = "which year"
else:
questionTag = "When"
hasQuestionMark = True
elif(ent.label_ == "TIME"):
if(not ent.start_char == 0):
questionTag = "at what time"
else:
questionTag = "When"
hasQuestionMark = True
elif(ent.label_ == "MONEY"):
if(not ent.start_char == 0):
questionTag = "how much"
hasQuestionMark = True
if(hasQuestionMark == True):
question = sentence[:ent.start_char] + questionTag + sentence[ent.end_char:-1] + "?"
hasQuestionMark = False
else:
question = sentence[:ent.start_char] + questionTag + sentence[ent.end_char:]
answer = ent.text
questions.append((question, answer))
return questions"""
def main():
corpus = ["Stable nuclides are nuclides that are not radioactive and so (unlike radionuclides) do not spontaneously undergo radioactive decay.",
"<NAME> was a prominent singer of the 20th century.",
"<NAME>, the Jamaican runner is an 11-time world champion and holds the record in the 100 and 200 meter race.",
"The first Wimbledon Championship was held in 1877.",
"<NAME> reportedly studied <NAME>’s mannerisms to prepare for his role as a serial killer <NAME> in American Psycho.",
"<NAME> is the only person to be awarded stars in all five categories on the Hollywood Walk of Fame.",
"The hashtag symbol is technically called an octothorpe.",
"It’s been said that nearly 3% of the ice in Antarctic glaciers is penguin urine.",
"<NAME> is a student at Temple University.",
"Ahmad buys Apple stocks for 100 million dollars."]
fib_questionBank = []
nlp_questionBank = []
# Generating just Fill in Blanks questions
for sentence in corpus:
questions = fill_in_the_blank_question_generation(sentence)
for items in questions:
fib_questionBank.append((items[0], items[1]))
#Generating wh-questions and fill in the blank questions
for sentence in corpus:
questions = nlp_question_generation(sentence)
for items in questions:
nlp_questionBank.append((items[0], items[1]))
for items in fib_questionBank:
print("\b Question: " + str(items[0]) + "\n")
print("Correct Answer: " + str(items[1]) + "\n")
for items in nlp_questionBank:
print("\b Question: " + str(items[0]) + "\n")
print("Correct Answer: " + str(items[1]) + "\n")
if __name__ == '__main__':
main()
```
#### File: cscapstoneproject-infinitetrivia/trivia_generator/NLPPreProcessor.py
```python
import os
import spacy
from spacy.tokens import Span, Token
from trivia_generator.web_scraper import Article
from trivia_generator.web_scraper.WebScraper import get_page_by_random
from trivia_generator.TUnit import TUnit
from nlp_helpers import features
from nlp_helpers import NLPConn
from nlp_helpers import ContradictatoryMatcher
nlp = NLPConn.get_nlp_conn()
contradictatory_matcher = ContradictatoryMatcher.get_contradicatory_matcher()
def create_TUnits(article: Article) -> list:
"""Creates a list of TUnits from a Wikipedia article object.
:param article: A Wikipedia article object.
:type article: Article
:returns: a list of TUnits created from article.
"""
paragraphs = ' '.join([para for para in article.content.splitlines() if para != ''])
tunits = []
doc = nlp(paragraphs)
for sent in list(doc.sents):
if features.is_complete_sentence(sent) and features.sentence_has_context(sent):
tunits.append(
TUnit(
sent.string,
article.article_id,
article.url,
article.access_timestamp,
None,
article.latitude,
article.longitude,
0,
0,
0
)
)
return tunits
if __name__ == '__main__':
article = get_page_by_random()
tunits = create_TUnits(article)
for tunit in tunits:
print(tunit)
```
#### File: cscapstoneproject-infinitetrivia/trivia_ranker/label_generator.py
```python
import sys
import os
# Handle import junk
top_level_dir = os.path.abspath('../')
# include trivia generator modules
sys.path.append(top_level_dir)
from nlp_helpers import features
from trivia_ranker import feature_engineering
import pickle
def generate_labels(sentence_list):
"""
This function gets the list of trivia sentences and generates their interistingness ranking
using the pre-trained model
"""
#get ner_ratios/counts
ner_ratio = feature_engineering.get_ner_counts(sentence_list)
#get Unigram features
uni_features = feature_engineering.get_unigram_features(sentence_list)
#get linguistic features
has_super = []
has_contra = []
fog = []
for sentence in sentence_list:
has_super.append(features.get_has_superlatives(sentence))
has_contra.append(features.get_has_superlatives(sentence))
fog.append(features.get_fog_score(sentence))
X_test =[]
for index in range(len(sentence_list)):
X_test.append([ner_ratio[index], uni_features[index], has_contra[index], has_super[index], fog[index]])
loaded_model = pickle.load(open("svm.sav", 'rb'))
generated_label = loaded_model.predict()
y_test = loaded_model.predict(X_test)
return y_test
```
#### File: app/game_models/Game.py
```python
import random
from .Player import Player
from .GameSettings import GameSettings
from trivia_generator.web_scraper.WebScraper import get_page_by_random
from trivia_generator.web_scraper.WebScraper import get_page_by_category
from trivia_generator.web_scraper.WebScraper import get_page_by_location_zip
from trivia_generator.NLPPreProcessor import create_TUnits
from question_generator.NLPQuestionGeneratorSpacy import nlp_question_generation
class Game:
"""Class for a running instance of a game session. Contains All Game Logic.
:param players: A list of the currently active players in the game.
:param game_settings: A *GameSettings* object which contains the settings of the game.
:param round_number: the current round number the game is on.
:param game_code: the game code used to connect to the game.
:param current_state: the current state of the game.
:param game_states: a list of all possible game states.
:param game_room: the ID of the game room used by connecting sockets.
:param trivia_database: the database containing trivia questions.
"""
# game_states: list
def __init__(self, game_code: str,
game_settings: GameSettings,
host_id: str):
self.players = []
self.num_players = 0
self.game_code = game_code
self.game_settings = game_settings
self.host_id = host_id
self.round_number = 0
self.current_state = "LOBBY"
self.game_started = False
self.current_trivia = ""
self.number_of_responses = 0
self.number_of_lies = 0
self.current_answer = ""
def add_player_to_lobby(self, player: Player) -> bool:
"""Adds a player to the current game lobby.
:param player: the player to be added to the game lobby
:type player: Player
:returns: True if player was successfully added to lobby, False otherwise
"""
if not self.game_started:
self.players.append(player)
self.num_players += 1
return True
else:
return False
def remove_player_from_lobby(self, player: Player) -> bool:
"""Removes a player from the current game lobby.
:param player: the player to be removed from the game lobby
:type player: Player
:returns: True if player was successfully removed from lobby, False otherwise
"""
self.players.remove(player)
self.num_players -= 1
return True
def start_game(self) -> bool:
"""Finalizes the lobby and begins a game session.
:returns: True if the game session was successfully started, false otherwise
"""
self.game_started = True
self.round_number = 1
return True
def get_round_number(self) -> int:
"""Returns the current game round.
:returns: the current game round number as an integer
"""
return self.round_number
def get_score(self) -> dict:
"""creates and returns dictionary with the name and score of each player in game
:returns: a dictionary containinging the score of each player
"""
data = dict()
data['players'] = []
self.players.sort(key=lambda p: p.current_score, reverse=True)
for player in self.players:
player_entry = dict()
player_entry['name'] = player.name
player_entry['score'] = player.current_score
data['players'].append(player_entry)
return data
def get_next_trivia(self) -> str:
"""Fetches a trivia question for the upcoming round from the trivia database, based on the current GameSettings.
:returns: a trivia question
"""
quest_ans_pairs = []
while not quest_ans_pairs:
if self.game_settings.game_mode == 'category':
print("getting article by category")
trivia_article = get_page_by_category(self.game_settings.category)
elif self.game_settings.game_mode == 'location':
print("getting article by location")
trivia_article = get_page_by_location_zip(self.game_settings.zip_code)
else:
print("getting article by random")
trivia_article = get_page_by_random()
tunit_list = create_TUnits(trivia_article)
if len(tunit_list) > 0:
tunit = random.choice(tunit_list)
quest_ans_pairs = nlp_question_generation(tunit.sentence)
trivia_question, trivia_answer = random.choice(quest_ans_pairs)
print('found trivia!')
self.current_trivia = trivia_question
self.current_answer = trivia_answer
return trivia_question
def submit_answer(self, data: dict) -> list:
"""Retrives an answer the current trivia question from a given player.
:returns: A list, the first values corresponding the the success of submitting
the answer, true if successful, false otherwise,
the second value is true if there are no players left to answer, false if there are
"""
print("Game submission:", data)
player = self.get_player_by_sid(data['sid'])
if player is None:
return [False, False]
else:
player.current_answer = data['answer']
self.number_of_responses += 1
print('number of responses:', self.number_of_responses)
print('number of players:', self.num_players)
if self.number_of_responses == self.num_players:
return [True, True]
return [True, False]
def submit_lie(self, data: dict) -> list:
"""Retrives a lie submitted by a player in a fibbage game.
:returns: A list, the first value corresponding to the success
of submitting lie, the second corresponding to the if there are more players left to submit lies
"""
player = self.get_player_by_sid(data['sid'])
if player is None:
return [False, False]
player.current_lie = data['lie']
print("submitted lie:", data['lie'])
self.number_of_lies += 1
print("number of lies:", self.number_of_lies)
print('number of players:', self.num_players)
if self.number_of_lies == self.num_players:
return [True, True]
return [True, False]
def get_trivia_answer_and_responses(self) -> dict:
"""Returns the answer to the current trivia, and the responses of each player
:returns: a dictionary containing the trivia answer, and player answers
"""
data = dict()
data['answer'] = self.current_answer
self.players.sort(key=lambda p: p.name)
data['player_answers'] = dict()
for player in self.players:
data['player_answers'][player.name] = dict()
data['player_answers'][player.name]['answer'] = player.current_answer
is_correct = (player.current_answer == self.current_answer)
data['player_answers'][player.name]['correct'] = is_correct
player.current_answer = ""
self.round_number += 1
self.update_scores(data)
self.number_of_responses = 0
return data
def get_fibbage_answer_and_responses(self) -> dict:
"""Returns the answer to the current trivia, and the lies+answers of each player
:returns: a dictionary containing the trivia answer, and the lie and answer of each player
"""
data = dict()
data['answer'] = self.current_answer
data['players'] = []
for player in self.players:
player_info = dict()
player_info['name'] = player.name
player_info['answer'] = player.current_answer
is_correct = (player.current_answer == self.current_answer)
player_info['correct'] = is_correct
player_info['lie'] = player.current_lie
num_fooled = len([p.current_answer
for p in self.players
if p.current_answer == player.current_lie])
player_info['fooled'] = num_fooled
player.number_fooled = num_fooled
data['players'].append(player_info)
self.round_number += 1
# self.update_fibbage_scores(data) TODO
self.number_of_responses = 0
self.update_fibbage_scores(data)
return data
def get_fibbage_lies_and_answer(self) -> dict:
"""Returns all user-submitted lies to current fibbage trivia, and real answer
:returns: a dictionary containing the trivia answer, and player's lies
"""
data = dict()
data['answer'] = self.current_answer
data['lies'] = []
for player in self.players:
lie = player.current_lie
if lie != "":
data['lies'].append(lie)
# player.current_lie = ""
# self.numer_of_lies = 0
return data
def update_fibbage_scores(self, data):
"""Updates the scores of each player based on the answer and lies of each player"""
for player in self.players:
if data['answer'] == player.current_answer:
player.update_score(1)
player.update_score(player.number_fooled)
player.number_fooled = 0
player.current_lie = ""
player.current_answer = ""
self.number_of_lies = 0
def update_scores(self, data):
"""Updates the scores of each player based on the data of each player."""
for player in self.players:
if data['player_answers'][player.name]['correct']:
# TODO determine how many points they should get
player.update_score(1)
def submit_trivia_rank(self, rank):
# TODO
# 1. find current trivia TUnit
# 2. update TUnit in DB based on rank
print("trivia recieved rank", rank)
def display_category_options(self) -> bool:
"""If applicable (depending on game mode), send a list of possible categories that a player can choose from to the front end, which will be displayed to the selected user.
:returns: True if categories were properly fetched from database and sent to frontend, False otherwise
"""
pass
def determine_winners_of_round(self):
"""Based of off the current trivia and the received answers from each player, determine who won the round.
"""
pass
def prompt_for_lie(self) -> bool:
"""If applicable (depending on game mode), tell front-end to prompt all player(s) for a fake-answer to a trivia question.
:returns: True if info was successfully sent to front-end, False otherwise
"""
pass
def finish_game(self) -> bool:
"""After all rounds have been completed, sents "credits" information to front-end and updates statistics for all registered users.
:returns: True if info was successfully sent to front-end and user statistics were updated, false otherwise
"""
pass
def get_player_by_sid(self, sid: str) -> Player:
"""Returns the given player in game based off of their SID, or None if not found.
:returns: The player corresponding to the given SID, or None if not found
"""
for player in self.players:
if sid == player.ID:
return player
return None
```
#### File: web_app/app/routes.py
```python
from flask import render_template
from flask_login import login_required, current_user
from app import app
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/create_room')
def create_room_page():
return render_template("create_room.html")
@app.route('/login')
def login_page():
return render_template("login.html")
@app.route('/statistics')
@login_required
def statistics_page():
return render_template("statistics.html", name=current_user.name)
```
#### File: web_app/app/validations.py
```python
from app import games
def is_game_code_valid(game_code: str) -> bool:
return game_code in games
def is_game_name_valid(game_code: str, name: str) -> bool:
if name == "":
return False
game = games[game_code]
return name not in [player.name for player in game.players]
``` |
{
"source": "4399123/faster_rcnn_learning",
"score": 2
} |
#### File: faster_rcnn_learning/tools/demo.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import _init_paths
from model.config import cfg
from model.test import im_detect
from torchvision.ops import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse
import random
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
from nets.mobilenet_v1 import mobilenetv1
import torch
CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def demo(net):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im = cv2.imread('004545.jpg')
# im=cv2.resize(im,None,None,fx=2,fy=2)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(
timer.total_time(), boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
Colors = [[random.randint(0, 256) for _ in range(3)] for _ in CLASSES]
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(
torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),
NMS_THRESH)
dets = dets[keep.numpy(), :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
cv2.rectangle(im,(int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),Colors[CLASSES.index(cls)],2)
cv2.putText(im,'{}:{:.2f}'.format(str(cls),score),(int(bbox[0]),int(bbox[1])+12),1,1.2,Colors[CLASSES.index(cls)],2)
cv2.imshow('11',im)
cv2.waitKey(0)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(
description='Tensorflow Faster R-CNN demo')
parser.add_argument('--net',
dest='demo_net',
help='Network to use [vgg16 res101 mobile]',
default='res101')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
# model path
demonet = args.demo_net
# load network
if demonet == 'vgg16':
net = vgg16()
elif demonet == 'res101':
net = resnetv1(num_layers=101)
elif demonet == 'mobile':
net = mobilenetv1()
else:
raise NotImplementedError
net.create_architecture(21, tag='default', anchor_scales=[8, 16, 32])
net.load_state_dict(
torch.load('model/res101_faster_rcnn_iter_110000.pth',map_location=torch.device('cpu')))
net.eval()
if not torch.cuda.is_available():
net._device = 'cpu'
net.to(net._device)
demo(net)
``` |
{
"source": "43ndr1k/Mappinng-Cryptocurrencies-with-News",
"score": 3
} |
#### File: crawler/helper/date_helper.py
```python
from datetime import datetime as dt, timedelta
from my_constants import *
def utc_to_str(utc_date):
"""
:param utc_date:
:return:
"""
return dt.fromtimestamp(int(utc_date)/1000).strftime(DATE_FORMAT)
def datespan(start_date, end_date, delta=timedelta(days=1)):
"""
:param start_date:
:param end_date:
:param delta:
:return:
"""
current_date = start_date
while current_date < end_date:
yield current_date
current_date += delta
def get_date_time_tz(date_string=""):
"""
:param date_string:
:return:
"""
parts = date_string.split(" ")
try:
res = parts[0]+"T"+parts[1]+"Z"
except IndexError:
res = date_string
return res
def timestamp_to_date(my_timestamp):
"""
:param my_timestamp:
:return:
"""
return dt.strptime(my_timestamp.strip().split("T")[0], DATE_FORMAT)
def get_formated_date(date):
"""
:param date:
:return:
"""
return date.strftime(DATE_FORMAT)
def reverse_format(date):
"""
:param date:
:return:
"""
return dt.strptime(date, DATE_FORMAT)
```
#### File: crawler/helper/json_builder.py
```python
import pickle
import re
from datetime import datetime as dt
from script_structure import my_main
from my_constants import *
from json_helper import *
from date_helper import get_date_time_tz
from date_helper import utc_to_str
def correct_date(date_string):
"""
:param date_string:
:return:
"""
r = re.compile('[a-zA-Z]{3} \d{2}, \d{4}')
if r.match(date_string):
return dt.strptime(date_string.strip(), '%b %d, %Y').strftime(DATE_FORMAT)
if "/" in date_string:
new_date_string = dt.strptime(date_string.strip(), '%d/%m/%Y').strftime(DATE_FORMAT)
if new_date_string:
return new_date_string
else:
return date_string
"""----------------------------------------------------
timeSeries
----------------------------------------------------"""
def bitcoinde(filename="", output_file = ""):
"""
timeseries aren't included in bitfinex or crypto.wat.ch,
have to be downloaded and adjusted to fit.
https://www.bitcoin.de/json/chart/stats_hourly_btcusd_statistics_60month.json
:return:
"""
output_dicct = {}
output_dicct['exchange'] = "bitcoin.de"
output_dicct['shortName'] = "btc"
output_dicct['unit'] = "usd"
output_dicct['timeSeriesValues'] =[]
with open(filename, "r") as fin:
lines = fin.read().split("],")
last_date = "'2017-01-01"
last_course = ""
for line in lines[1:]:
line= line[1:].split(",")
date = utc_to_str(line[0])
if date.startswith("2017") or date.startswith("2018"):
if date != last_date:
output_dicct['timeSeriesValues'].append({
'date': last_date+"T23:59:59.00000Z",
'value': last_course
})
last_date = date
last_course = line[4]
new_save_as_json(output=output_dicct,filename=output_file,as_array=False )
def space_to_tz(foldername=""):
"""
:param foldername:
:return:
"""
json_files = glob.glob(foldername + "*.json")
for file in json_files:
with open(file, 'r') as fin:
data = json.load(fin)
for i in range(len(data['timeSeriesValues'])):
entry = data['timeSeriesValues'][i]
if len(entry['date'].split(" ")) > 1:
entry['date'] = get_date_time_tz(entry['date'])
data['timeSeriesValues'][i] = entry
#print(entry)
new_save_as_json(data, file, as_array=False)
"""----------------------------------------------------
news
----------------------------------------------------"""
def remove_duplicates(file_name=""):
"""
:param file_name:
:return:
"""
with open(file_name, 'r') as fin:
mydicct_array= json.load(fin)
sources = []
result_ar = []
for i, article in enumerate(mydicct_array, 0):
link = mydicct_array[i]['source']
if link not in sources:
sources.append(link)
result_ar.append(article)
output_name = "/home/tobias/Dokumente/Crypto-News-Projekt/output/news/news_data_v7.json"
new_save_as_json(output=mydicct_array, filename= output_name
, as_array=True)
"""----------------------------------------------------
more
----------------------------------------------------"""
def wiki_script():
"""
mini-script um wiki-einträge auf exchanges objecte zu mappen.
:return:
"""
# with open(
# file="/home/tobias/mygits/crypto-news-docs/crawler/input/currency_info/desired_currencies_wiki.json") as fin:
# wiki_data = json.load(fin)
output_folder = "/home/tobias/Schreibtisch/exchanges/"
json_files = glob.glob("/home/tobias/Schreibtisch/exchanges/" + "*.json")
print(len(json_files))
for file in json_files:
with open(file, 'r') as fin:
exchange_data = json.load(fin)
try:
ioc_codes = {
'USA': 'USA',
'CHN': 'CHN',
'RUS': 'RUS',
'HKG': 'HKG',
'GBR': 'GBR',
'MEX': 'MEX',
'CAN': 'CAN',
'LUX': 'LUX',
'GER': 'DEU',
'JPN': 'JPN'
}
# exchange_data['position']= ioc_codes[exchange_data['position']]
# if exchange_data['position'] == 'LUY':
# exchange_data['position'] = 'LUX'
for i, currency in enumerate(exchange_data['cryptoCurrencies'], 0):
exchange_data['cryptoCurrencies'][i]['description']['wikiEnglish'] = 'dummy'#wiki_data[currency['shortName']]['wikiEnglish']
exchange_data['cryptoCurrencies'][i]['description']['wikiGerman'] = 'dummy'#wiki_data[currency['shortName']]['wikiGerman']
out_file = output_folder + file.split("/")[-1]
new_save_as_json(exchange_data, filename=out_file, as_array=False)
except KeyError as e:
print(e)
def convert_jl_json_to_json(filename):
"""
:param filename:
:return:
"""
data = []
with open(filename, 'r') as fin:
for line in fin:
try:
json_string = line.encode('utf-8').decode().strip()
json_string = json.loads(json_string.strip().rstrip(","))
data.append(json_string)
except json.decoder.JSONDecodeError as e:
pass
new_save_as_json(output=data, as_array=True,
filename="/home/tobias/Dokumente/Crypto-News-Projekt/output/news/btc_cointelegraph.json")
def stat_articles_per_file(foldername = "/home/tobias/Dokumente/Crypto-News-Projekt/output/news/v_2/"):
"""
:param foldername:
:return:
"""
json_files = glob.glob(foldername + "*.json")
tag_currency_counter = {'ETH': 0,
'BTC': 0,
'LTC': 0,
'DSH': 0}
authors = {}
sources = {}
for file in json_files:
data = get_json_as_dict(file_name=file)
currency = data[0]['cryptoCurrency']['currency']
tag_currency_counter[currency] += len(data)
"""
counting articles per author
"""
# for article in data:
# author = article['metaInfo']['author']
# author_array = []
# if ',' in author:
# author_array = author.split(',')
# else:
# author_array.append(author)
#
# for person in author_array:
# if author in authors:
# authors[person] += 1
# else:
# authors[person] = 1
"""
counting articles per platform
"""
for article in data:
source = article['source']
if 'https' in source:
source = source.split('https://')[1]
if 'http' in source:
source = source.split('http://')[1]
source = source.split("www.")[-1].split('/')[0]
combi = source+" "+currency
if combi in sources:
sources[combi] += 1
else:
sources[combi] = 1
s = [(k, sources[k]) for k in sorted(sources, key=sources.get, reverse=True)]
print(s)
print(tag_currency_counter)
with open('../output/stats/platform_currency_counter', 'wb') as fout:
pickle.dump(s, fout)
if __name__ == '__main__':
bitcoinde()
with my_main(__file__):
#stat_articles_per_file()
#convert_jl_json_to_json("/home/tobias/Dokumente/Crypto-News-Projekt/output/news/v_2/btc_cointelegraph2.json")
pass
# pass
# data = get_json_as_dict(file_name="/home/tobias/Dokumente/Crypto-News-Projekt/output/news/merged.json")
# #print(len(data))
# for i, el in enumerate(data,0):
# body = el['body']
# if body == 'None':
# if 'bitcoinmagazine' in el['source']:
# print(el['source'])
# if '/r/' in keywords:
# keywords = keywords.replace('/r/', '')
# print(keywords)
# if keywords and keywords != 'none':
# keywords = keywords.replace('\r', '').replace('\n','').replace('\t','')
# else:
# keywords = ''
# data[i]['metaInfo']['keywords']= keywords
# new_save_as_json(output=data, filename="/home/tobias/Dokumente/Crypto-News-Projekt/output/news/merged.json",
# as_array=True)
```
#### File: crawler/helper/language_processor.py
```python
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
def get_text_of_n_words(text="",n=200):
"""
:param text:
:param n:
:return:
"""
sentences = sent_tokenize(text=text)
res = ""
words_already = 0
for sent in sentences:
res += str(sent) + " "
words_already += len(word_tokenize(sent))
if words_already > n:
break
return res
def clean_string(text=""):
"""
:param text:
:return:
"""
text = text.strip().replace('\n', '')
return text
if __name__ == '__main__':
print(get_text_of_n_words("""368
down vote.
If it's just a substring search you can use string.find("substring").
You do have to be a little careful with find, index, and in though, as they are substring searches. In other words, this:"""))
```
#### File: crawler/helper/my_logging.py
```python
import logging
import logging.config
import os
import json
LOG_FILE = "/home/tobias/mygits/crypto-news-docs/crawler/scrapy_crawler/scrapy_crawler/spider.log"
def setup_logging(
default_path='',
default_level = logging.INFO,
env_key='LOG_CFG'
):
"""Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def get_my_logger(fout_name=LOG_FILE):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(fout_name)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.basicConfig(level=logging.INFO)
return logger
```
#### File: crawler/request_crawler/ripplecoinnews_spider.py
```python
import requests
import time
import numpy as np
from lxml import etree
import sys
sys.path.append("../helper")
from helper.json_helper import save_as_json
DATE_FORMAT = "%Y-%m-%d"
class RippleCoinNewsSpider():
page = 1
href_bag = []
REQUEST_URL = "https://www.ripplecoinnews.com/wp-admin/admin-ajax.php?_wpnonce=2ed8421096"
allowed_domains = ['ripplecoinnews.com']
article_dicct_array = []
def __init__(self, year=2017):
"""
:param year:
:param crawl_recent:
:param currency:
"""
self.random_pause = [0.2, 2.5]
self.crawl_recent = True
self.year = year
self.currency = 'XRP' #currency
self.output_file = self.currency + "ripplecoinnews.json"
def request_crawl(self):
"""
:return:
"""
try:
response = requests.post(self.REQUEST_URL, {'action': 'load_post',
'cat':'ripple-news',
'col':'col-xs-6+col-sm-6+col-md-4',
'layout':'grid',
'page':'1'})
except requests.exceptions.ConnectionError:
return
if response is not None:
tree = etree.HTML(response.text)
for post in tree.xpath("""//div[contains(@class,"article-content")]"""):
"""
ich kann nicht wie bei scrapy einfach das resulat weiterverarbeiten
-> vllt finde ich da ja noch ne andere lösung
"""
article_href = post.xpath("""div[starts-with(@class,"entry-header")]/h3/a/@href""")[0]
if article_href not in self.href_bag:
self.article_dicct_array.append({
# 'href': article_href,
'title': post.xpath("""div[starts-with(@class,"entry-header")]
/h3/a/text()""")[0],
'currency': self.currency,
'metaInfo': {
'author': post.xpath("""div[starts-with(@class,"entry-meta")]
/span/span/a/text()""")[0],
'pagetopic': 'None',
'keywords': 'None',
'language': 'english'
},
# 'timestamp': {
# 'timestamp': post.xpath("""div[starts-with(@class,"entry-meta")]
# /span[contains(@class,"posted-on")]
# /a/time[contains(@class,"entry-date published")]/@datetime""")[0]
# },
'date' : post.xpath("""div[starts-with(@class,"entry-meta")]
/span[contains(@class,"posted-on")]
/a/time[contains(@class,"entry-date published")]/@datetime""")[0], # >v.6
'source': article_href,
'body': 'None'
})
else:
return
print(self.page)
t = np.random.uniform(self.random_pause[0], self.random_pause[1])
time.sleep(t)
self.page += 1
#self.request_crawl()
def main(argv):
# try:
# opts, args = getopt.getopt(argv, "ht:c:", ["time=", "currency="])
# except getopt.GetoptError:
# print('cointelegraph.py -t <year/recent> -c <currency>')
# sys.exit(2)
#
# time_span = ''
# currency = ''
# for opt, arg in opts:
# if opt == '-h':
# print('ripplecoinnews_spider.py -t <year/recent> -c <currency>')
# sys.exit()
# elif opt in ("-t", "--time"):
# time_span = arg
# elif opt in ("-c", "--currency"):
# currency = arg
# else:
# time_span = 'recent'
# currency = 'Ripple'
#
# if time_span == 'recent':
# year = 2017
# crawl_recent = True
# else:
# year = time_span
# crawl_recent = False
print("ripplecoinnews-scraper")
# print("Start to crawl for news about {} in timespan {}".format(currency,time_span))
ripplecoinnews_spider = RippleCoinNewsSpider(year='2017')
start = time.time()
ripplecoinnews_spider.request_crawl()
save_as_json(filename="../output/" + ripplecoinnews_spider.output_file, as_array=True, output=ripplecoinnews_spider.article_dicct_array)
# with open("../output/" + ripplecoinnews_spider.output_file, 'w') as f:
# json.dump(ripplecoinnews_spider.article_dict_array, f)
# end = time.time() - start
#print('finished crawl after %f sec' % end)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: scrapy_crawler/spiders/ethnews_spider.py
```python
from datetime import datetime as dt
import sys
"""------------------------------------------------------
import third party modules
------------------------------------------------------"""
import scrapy
"""------------------------------------------------------
import own functions/modules
------------------------------------------------------"""
sys.path.append("/home/tobias/mygits/crypto-news-docs/crawler/helper")
from my_constants import *
from date_helper import timestamp_to_date, get_formated_date
from my_logging import *
from my_imports import *
class EthnewsSpider(scrapy.Spider):
name = "ethnewsspider"
index = 1
crawl_until = 0
start_urls = ['https://www.ethnews.com/news']
base_url = "https://www.ethnews.com"
href_bag = []
allowed_domains = ["ethnews.com"]
"""
https://www.ethnews.com/news?page=1
"""
def __init__(self, start_date=dt(2018, 1, 1),
stop_date=dt(2018, 1, 21), *args, **kwargs):
super(EthnewsSpider, self).__init__(*args, **kwargs)
self.currency = ETHEREUM
self.start_date = start_date
self.stop_date = stop_date
self.my_logger = get_my_logger()
self.my_logger.info("start %s"% __class__)
self.my_logger.info("crawl {} from {} until {}".format(self.currency, self.stop_date,self.start_date))
def start_requests(self):
"""
:return:
"""
#
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.collect_page_links)
def parse_article_page(self, response):
"""
:param response:
:return:
"""
description = response.xpath('//p[@class="article__summary"]/text()').extract_first()
date_string = response.xpath('//div[@class="article__published"]/@data-created').extract_first()
date = get_formated_date(timestamp_to_date(date_string))
title = response.xpath('//div[@class="container"]/h1/text()').extract_first()
first_name = response.xpath('//div[@class="article-gutter__author"]/a/h4/text()').extract_first()
last_name = response.xpath('//div[@class="article-gutter__author"]/a/h3/text()').extract_first()
author = first_name + " " + last_name
keyword_string = ""
keywords = response.xpath('//div[@class="article__tags"]/div[@class="article__tags__item"]/text()')
for i, keyword in enumerate(keywords, 0):
keyword_string += keyword.extract()
if i != len(keywords)-1:
keyword_string += ", "
elif i >= 3:
break
topic_string = ""
topics = response.xpath('//div[@class="article__category"]/a/text()')
for i, topic in enumerate(topics, 0):
topic_string += topic.extract()
if i != len(topics)-1:
topic_string += ", "
elif i >= 3:
break
for sign in APOSTROPHES:
# only on this side.
description = description.replace(sign, "'")
title = title.replace(sign, "'")
author = author.replace(sign, "'")
item = ScrapyCrawlerItem()
item['title'] = title
item['metaInfo'] = {
'author': author,
'language': 'english',
'pageTopic': topic_string,
'keywords': keyword_string,
}
item['date'] = date
item['source'] = response.url
item['body'] = description
item['cryptoCurrency'] = {'currency': self.currency}
yield item
def collect_page_links(self, response):
"""
:param response:
:return:
"""
if self.index == 1:
last_page = response.xpath('//div[contains(@class, "pagination")]/div[contains(@class, "pagination__text")]/text()')\
.extract_first().split('of')[-1].strip()
self.crawl_until = int(last_page)
self.index += 1
link = response.xpath('//div[contains(@class, "news__top")]/a/@href').extract_first()
article_link = self.base_url + link
"""nicht dringend, aber hier fehlt eine abfrage nach dem datum (erste seite wird meistens mitgescrollt)"""
# date_string = response.xpath('//div[contains(@class, "news__top__primary__info")]/i/text()').extract_first()
# date_string = re.sub(r'(\d)(st|nd|rd|th)', r'\1', date_string)
# date = dt.strptime(date_string.strip(), '%b %d, %Y').strftime(DATE_FORMAT)
if article_link not in self.href_bag:
self.href_bag.append(article_link)
if self.index < self.crawl_until:
stop = False
for post in response.xpath('//div[starts-with(@class,"article-thumbnail__info")]'):
link = post.xpath('h2[contains(@class,"article-thumbnail__info__title")]/a/@href').extract_first()
if link is None:
continue
date_string = post.xpath('''div[contains(@class,"article-thumbnail__info__etc")]
/div[contains(@class, "article-thumbnail__info__etc__date")]/h6/@data-created-short''')\
.extract_first()
article_link = self.base_url + link
date = dt.strptime(date_string.strip().split("T")[0], '%Y-%m-%d')
if date >= self.start_date:
if date <= self.stop_date or self.stop_date == self.start_date:
if article_link not in self.href_bag:
self.href_bag.append(article_link)
else:
stop = True
if not stop:
self.index += 1
next_page = self.start_urls[0]+'?page='+str(self.index)
yield response.follow(next_page, callback=self.collect_page_links)
else:
for site in self.href_bag:
yield scrapy.Request(url=site, callback=self.parse_article_page)
```
#### File: scrapy_crawler/spiders/newsbitcoin_spider.py
```python
import sys
import datetime
from datetime import datetime as dt
"""------------------------------------------------------
import third party modules
------------------------------------------------------"""
import scrapy
"""------------------------------------------------------
import own functions/modules
------------------------------------------------------"""
sys.path.append("/home/tobias/mygits/crypto-news-docs/crawler/helper")
from my_constants import *
from language_processor import get_text_of_n_words
from date_helper import get_formated_date
from date_helper import timestamp_to_date
from my_imports import *
from my_logging import *
class NewsBitcoinSpider(scrapy.Spider):
name = "newsbitcoinspider"
index = 0
crawl_until = 0
allowed_domains = ["news.bitcoin.com"]
start_urls = ['https://news.bitcoin.com/']
complete_cycle = False
def __init__(self, start_date=dt(2018, 1, 1),
stop_date=dt(2018, 1, 21), *args, **kwargs):
super(NewsBitcoinSpider, self).__init__(*args, **kwargs)
self.currency = BITCOIN
self.start_date = start_date
self.stop_date = stop_date
self.href_bag = []
self.my_logger = get_my_logger()
self.my_logger.info("start %s"% __class__)
self.my_logger.info("crawl {} today until {}".format(self.currency, self.start_date))
def start_requests(self):
"""
:return:
"""
for url in self.start_urls:
yield scrapy.Request(url=url, callback=self.collect_page_links)
def parse_article_page(self, response):
"""
also parse author
is called after links have been collected
:param response:
:return:
"""
description = " ".join(response.xpath('//div[@class="td-post-content"]/p/strong/text()').extract())
i = 3 # while there the description is not long enougth, add text.
while len(description) < CHARACTER_NUMBER and i < 13:
description = " ".join(response.xpath('//div[@class="td-post-content"]/p/text()').extract()[:i]).strip()
i += 1
if description:
description = get_text_of_n_words(description, n=WORD_NUMBER)
title = response.xpath('//h1[@class= "entry-title"]/text()').extract_first()
author = response.xpath('//div[@class="td-post-author-name"]/a/text()').extract_first()
try:
date_string = response.xpath('//div[@class="btc-post-meta td-post-title td-post-author-name"]/text()').extract_first().strip()
except AttributeError:
date_string = ''
if date_string:
if "days ago" in date_string:
days_ago = int(date_string.strip().split("days")[0].rstrip())
date_string = (datetime.date.today() - datetime.timedelta(days=days_ago)).strftime(DATE_FORMAT)
elif "day ago" in date_string:
date_string = (datetime.date.today() - datetime.timedelta(days=1)).strftime(DATE_FORMAT)
elif "hours ago" in date_string:
hours_ago = int(date_string.strip().split("hours")[0].rstrip())
date_string = (datetime.date.today() - datetime.timedelta(hours=hours_ago)).strftime(DATE_FORMAT)
elif "hour ago" in date_string:
date_string = (datetime.date.today() - datetime.timedelta(hours=1)).strftime(DATE_FORMAT)
else:
date = response.xpath('//span[@class="td-post-date"]/time[@class="entry-date updated td-module-date"]/@datetime').extract_first()
date_string = get_formated_date(timestamp_to_date(date))
topic = response.xpath('//div[@class="td-post-category top-left-label btc-post-meta td-post-title"]/text()')\
.extract_first().strip()
keywords = response.xpath('//ul[@class="td-tags td-post-small-box clearfix"]') # first is 'TAG'
keywords = keywords.xpath('li/a/text()')
keyword_string = ""
for i, keyword in enumerate(keywords, 1):
keyword_string += keyword.extract()
if i != len(keywords):
keyword_string += ", "
if len(keywords) > 3:
keyword_string = ",".join(keyword_string.split(",")[:3])
item = ScrapyCrawlerItem()
item['title'] = title
item['metaInfo'] = {
'author': author,
'language': 'english',
'pageTopic': topic,
'keywords': keyword_string,
}
item['date'] = date_string
item['source'] = response.url
item['body'] = description
item['cryptoCurrency'] = {'currency': self.currency}
yield item
def collect_page_links(self, response):
"""
:param response:
:return:
"""
if self.index == 0:
last_page = response.xpath('//div[starts-with(@class, "page-nav")]/a[contains(@class, "last")]/@title')\
.extract_first()
self.crawl_until = int(last_page)
for post in response.xpath('//div[starts-with(@class,"td_module_mx16")]'):
link = post.xpath('div/div/h3/a/@href').extract_first()
if link not in self.href_bag:
self.href_bag.append(link)
if self.index < self.crawl_until:
stop = False
for post in response.xpath('//div[contains(@class, "item-details")]'):
link = post.xpath('h3/a/@href').extract_first()
date_string = post.xpath('div/span/time/@datetime').extract_first()
date = dt.strptime(date_string.strip().split("T")[0], '%Y-%m-%d')
if self.stop_date > date > self.start_date:
if link not in self.href_bag:
self.href_bag.append(link)
else:
stop= True
if not stop:
self.index += 1
next_page = self.start_urls[0]+"page/"+str(self.index)
yield response.follow(next_page, callback=self.collect_page_links)
else:
for url in self.href_bag:
yield scrapy.Request(url=url, callback=self.parse_article_page)
``` |
{
"source": "43ravens/ECget",
"score": 2
} |
#### File: ECget/ecget/weather_amqp.py
```python
import logging
import os
import time
import uuid
import kombu
import kombu.exceptions
import kombu.mixins
__all__ = [
'DatamartConsumer', 'get_queue_name',
]
class DatamartConsumer(kombu.mixins.ConsumerMixin):
"""Consumer for EC CMC Datamart AMQP topic exchange service.
:arg queue_name: Name of message queue to consume from.
:type queue_name: str
:arg routing_key: Routing key that the queue will receive.
Also known as exchange key or binding key.
:type routing_key: str
:arg msg_handler: Callable that will process body of messages received
on the queue.
:arg lifetime: Number of seconds that the consumer should operate for.
Note that if the connection to the AMQP broker is lost
the consumer will attempt to re-establish for a new
lifetime period.
:type lifetime: int
:arg queue_expiry: Number of seconds to send to broker as value of
:kbd:`x-expires` queue declaration argument.
:type queue_expiry: int
"""
CONNECTION = {
'transport': 'amqp',
'userid': 'anonymous',
'password': '<PASSWORD>',
'hostname': 'dd.weather.gc.ca',
'port': 5672,
'virtual_host': '/',
}
EXCHANGE = {
'name': 'xpublic',
'type': 'topic',
}
log = logging.getLogger(__name__)
def __init__(
self,
queue_name,
routing_key,
msg_handler,
lifetime=900,
queue_expiry=None,
):
self.queue_name = queue_name
self.routing_key = routing_key
self.msg_handler = msg_handler
self.lifetime = lifetime
self.queue_expiry = queue_expiry
def on_consume_ready(self, connection, channel, consumers, **kwargs):
"""Calculate when the consumer should shut itself down.
"""
self.end_time = time.time() + self.lifetime
self.log.debug(
'consumer starting for {.lifetime} sec lifetime'.format(self))
def on_iteration(self):
"""Check for consumer shut-down time.
"""
if time.time() > self.end_time:
self.log.debug('consumer lifetime limit reached')
self.should_stop = True
def on_consume_end(self, connection, channel):
"""Close the connection to the server.
"""
connection.close()
def get_consumers(self, Consumer, channel):
"""Bind exchange and queue to AMQP channel.
If the queue does not exits on the server,
declare it to the server and bind it to the exchange and routing key.
:returns: List containing a configured Consumer instance.
"""
exchg = self.exchange(channel)
self.log.debug('exchange bound to channel: {}'.format(exchg))
queue = self.queue(channel)
self.log.debug('queue bound to channel: {}'.format(queue))
try:
queue.queue_declare(passive=True)
self.log.debug('queue exists on server')
except kombu.exceptions.ChannelError:
queue.queue_declare()
self.log.debug('queue declared on server')
queue.queue_bind()
self.log.debug('queue binding created on server')
return [
Consumer(
queues=[queue],
callbacks=[self.handle_msg],
auto_declare=False,
)
]
def handle_msg(self, body, message):
"""Pass the body of a received message to the message handler
and acknowledge receipt of the message to the server.
"""
self.msg_handler(body)
message.ack()
def run(self):
"""Run the consumer.
"""
self.connection = kombu.Connection(**self.CONNECTION)
self.exchange = kombu.Exchange(**self.EXCHANGE)
self.queue = kombu.Queue(
name=self.queue_name,
exchange=self.exchange,
routing_key=self.routing_key,
)
if self.queue_expiry is not None:
self.queue.queue_arguments = {'x-expires': self.queue_expiry}
super(DatamartConsumer, self).run()
def get_queue_name(prefix):
"""Return a queue name based on the prefix.
The queue name is the prefix with the string representation of a
random UUID dot-appended to it;
i.e. the queue name for the prefix :kbd:`foo.bar` might be
:kbd:`foo.bar.4749cb1b-b33d-46ac-b89c-b4d469ddabe9`.
Queues persist on the AMQP server but the name can only be provided
by the client/consumer.
To allow the client/consumer to re-connect to a queue that it has
already created on the server,
queue names are stored in the :file:`./queues/` directory in files
named with their prefixes.
If a queue file with the name prefix exists in the :file:`./queues/`
directory its contents are returned as the queue name.
Otherwise,
a random UUID is dot-appended to prefix,
stored in a file called prefix in the :file:`./queues/` directory,
and the newly created queue name is returned.
This function creates the :file:`./queues/` directory if it does not
already exist.
:arg prefix: Queue name prefix.
:type prefix: str
:returns: Queue name
:rtype: str
"""
queues_dir = os.path.join('.', 'queues')
if not os.path.exists(queues_dir):
os.mkdir(queues_dir)
queue_file = os.path.join(queues_dir, prefix)
if not os.path.exists(queue_file):
queue_name = '.'.join((prefix, str(uuid.uuid4())))
with open(queue_file, 'wt') as f:
f.write(queue_name)
else:
with open(queue_file, 'rt') as f:
queue_name = f.read()
return queue_name
```
#### File: ECget/tests/test_river.py
```python
try:
import unittest.mock as mock
except ImportError: # pragma: no cover; happens for Python < 3.3
import mock
import arrow
import bs4
import cliff.app
import pytest
import stevedore.driver
@pytest.fixture
def river_flow():
import ecget.river
return ecget.river.RiverFlow(mock.Mock(spec=cliff.app.App), [])
@pytest.fixture
def daily_value_mgr():
import ecget.SOG_formatters
driver = mock.Mock(
name='daily_value',
obj=ecget.SOG_formatters.DailyValue(),
)
return stevedore.driver.DriverManager.make_test_instance(driver)
def test_get_parser(river_flow):
parser = river_flow.get_parser('ecget river flow')
assert parser.prog == 'ecget river flow'
def test_take_action_end_date_None(river_flow):
start_date = arrow.get(2014, 1, 22)
parsed_args = mock.Mock(
station_id='foo',
start_date=start_date,
end_date=None,
)
river_flow._get_data = mock.Mock()
river_flow._calc_daily_avgs = mock.Mock(return_value=[])
river_flow._output_results = mock.Mock()
river_flow.take_action(parsed_args)
assert parsed_args.end_date == start_date
def test_take_action_interpolate_missing_if_necessary(river_flow):
parsed_args = mock.Mock(
station_id='foo',
start_date=arrow.get(2014, 1, 22),
end_date=arrow.get(2014, 1, 23),
)
river_flow._get_data = mock.Mock()
mock_avgs = range(2)
river_flow._calc_daily_avgs = mock.Mock(return_value=mock_avgs)
river_flow._interpolate_missing = mock.Mock()
river_flow._output_results = mock.Mock()
river_flow.take_action(parsed_args)
river_flow._interpolate_missing.assert_called_once_with(mock_avgs)
def test_calc_daily_avgs_1_row(river_flow):
html = '''
<table>
<tr>
<td>2014-01-21 19:02:00</td>
<td data-order="4200.00734274105">4,200</td>
<td data-order="3.89">3.89</td>
</tr>
</table>
'''
raw_data = bs4.BeautifulSoup(html, 'html.parser')
daily_avgs = river_flow._calc_daily_avgs(raw_data, arrow.get(2014, 1, 22))
assert daily_avgs == [(arrow.get(2014, 1, 21), 4200.0)]
def test_calc_daily_avgs_2_rows_1_day(river_flow):
html = '''
<table>
<tr>
<td>2014-01-21 19:02:00</td>
<td data-order="4200.00734274105">4,200</td>
<td data-order="3.89">3.89</td>
</tr>
<tr>
<td>2014-01-21 19:07:00</td>
<td data-order="4399.77221395191">4,400</td>
<td data-order="0"></td>
</tr>
</table>
'''
raw_data = bs4.BeautifulSoup(html, 'html.parser')
daily_avgs = river_flow._calc_daily_avgs(raw_data, arrow.get(2014, 1, 22))
assert daily_avgs == [(arrow.get(2014, 1, 21), 4300.0)]
def test_calc_daily_avgs_2_rows_2_days(river_flow):
html = '''
<table>
<tr>
<td>2014-01-21 19:02:00</td>
<td data-order="4200.00734274105">4,200</td>
<td data-order="3.89">3.89</td>
</tr>
<tr>
<td>2014-01-22 19:07:00</td>
<td data-order="4399.77221395191">4,400</td>
<td data-order="0"></td>
</tr>
</table>
'''
raw_data = bs4.BeautifulSoup(html, 'html.parser')
daily_avgs = river_flow._calc_daily_avgs(raw_data, arrow.get(2014, 1, 23))
expected = [
(arrow.get(2014, 1, 21), 4200.0),
(arrow.get(2014, 1, 22), 4400.0),
]
assert daily_avgs == expected
def test_calc_daily_avgs_end_date(river_flow):
html = '''
<table>
<tr>
<td>2014-01-21 19:02:00</td>
<td data-order="4200.00734274105">4,200</td>
<td data-order="3.89">3.89</td>
</tr>
<tr>
<td>2014-01-22 19:07:00</td>
<td data-order="4399.77221395191">4,400</td>
<td data-order="0"></td>
</tr>
</table>
'''
raw_data = bs4.BeautifulSoup(html, 'html.parser')
daily_avgs = river_flow._calc_daily_avgs(raw_data, arrow.get(2014, 1, 21))
assert daily_avgs == [(arrow.get(2014, 1, 21), 4200.0)]
def test_read_datestamp(river_flow):
datestamp = river_flow._read_datestamp('2014-01-22 18:16:42')
assert datestamp == arrow.get(2014, 1, 22)
@pytest.mark.parametrize(
'input, expected', [
('4200.0', 4200.0),
('4200.0*', 4200.0),
]
)
def test_convert_flow(river_flow, input, expected):
flow = river_flow._convert_flow(input)
assert flow == expected
def test_interpolate_missing_no_gap(river_flow):
daily_avgs = [
(arrow.get(2014, 1, 22), 4300.0),
(arrow.get(2014, 1, 23), 4500.0),
]
river_flow.log = mock.Mock()
river_flow._interpolate_values = mock.Mock()
river_flow._interpolate_missing(daily_avgs)
assert len(daily_avgs) == 2
assert not river_flow.log.debug.called
assert not river_flow._interpolate_values.called
def test_interpolate_missing_1_day_gap(river_flow):
daily_avgs = [
(arrow.get(2014, 1, 22), 4300.0),
(arrow.get(2014, 1, 24), 4500.0),
]
river_flow.log = mock.Mock()
river_flow._interpolate_values = mock.Mock()
river_flow._interpolate_missing(daily_avgs)
expected = (arrow.get(2014, 1, 23), None)
assert daily_avgs[1] == expected
river_flow.log.debug.assert_called_once_with(
'interpolated average flow for 2014-01-23')
river_flow._interpolate_values.assert_called_once_with(daily_avgs, 1, 1)
def test_interpolate_missing_2_day_gap(river_flow):
daily_avgs = [
(arrow.get(2014, 1, 22), 4300.0),
(arrow.get(2014, 1, 25), 4600.0),
]
river_flow.log = mock.Mock()
river_flow._interpolate_values = mock.Mock()
river_flow._interpolate_missing(daily_avgs)
expected = [
(arrow.get(2014, 1, 23), None),
(arrow.get(2014, 1, 24), None),
]
assert daily_avgs[1:3] == expected
expected = [
mock.call('interpolated average flow for 2014-01-23'),
mock.call('interpolated average flow for 2014-01-24'),
]
assert river_flow.log.debug.call_args_list == expected
river_flow._interpolate_values.assert_called_once_with(daily_avgs, 1, 2)
def test_interpolate_missing_2_gaps(river_flow):
daily_avgs = [
(arrow.get(2014, 1, 22), 4300.0),
(arrow.get(2014, 1, 24), 4500.0),
(arrow.get(2014, 1, 25), 4500.0),
(arrow.get(2014, 1, 28), 4200.0),
]
river_flow.log = mock.Mock()
river_flow._interpolate_values = mock.Mock()
river_flow._interpolate_missing(daily_avgs)
expected = (arrow.get(2014, 1, 23), None)
assert daily_avgs[1] == expected
expected = [
(arrow.get(2014, 1, 26), None),
(arrow.get(2014, 1, 27), None),
]
assert daily_avgs[4:6] == expected
expected = [
mock.call('interpolated average flow for 2014-01-23'),
mock.call('interpolated average flow for 2014-01-26'),
mock.call('interpolated average flow for 2014-01-27'),
]
assert river_flow.log.debug.call_args_list == expected
expected = [
mock.call(daily_avgs, 1, 1),
mock.call(daily_avgs, 4, 5),
]
assert river_flow._interpolate_values.call_args_list == expected
def test_interpolate_values_1_day_gap(river_flow):
daily_avgs = [
(arrow.get(2014, 1, 22), 4300.0),
(arrow.get(2014, 1, 23), None),
(arrow.get(2014, 1, 24), 4500.0),
]
river_flow._interpolate_values(daily_avgs, 1, 1)
assert daily_avgs[1] == (arrow.get(2014, 1, 23), 4400.0)
def test_interpolate_values_2_day_gap(river_flow):
daily_avgs = [
(arrow.get(2014, 1, 22), 4300.0),
(arrow.get(2014, 1, 23), None),
(arrow.get(2014, 1, 24), None),
(arrow.get(2014, 1, 25), 4600.0),
]
river_flow._interpolate_values(daily_avgs, 1, 2)
expected = [
(arrow.get(2014, 1, 23), 4400.0),
(arrow.get(2014, 1, 24), 4500.0),
]
assert daily_avgs[1:3] == expected
def test_output_results(daily_value_mgr, river_flow, capsys):
river_flow._output_results([(arrow.get(2014, 1, 23), 4200.0)])
out, err = capsys.readouterr()
assert out == '2014 01 23 4.200000e+03\n'
``` |
{
"source": "443ki/ProRob_Code",
"score": 3
} |
#### File: ProRob_Code/cahpter2/lidar_200.py
```python
import pandas as pd
data = pd. read_csv("sensor_data_200.txt", delimiter=" ", header=None, names=("data","time","ir","lidar"))
data
# In[]:
print(data["lidar"][0:5])
# In[]:
import matplotlib.pyplot as plt
data["lidar"].hist(bins = max(data["lidar"]) - min(data["lidar"]), align = 'left')
plt.show()
# In[]:
mean1 = sum(data["lidar"].values)/len(data["lidar"].values)
mean2 = data["lidar"].mean()
print(mean1, mean2)
# In[]:
data["lidar"].hist(bins = max(data["lidar"]) - min(data["lidar"]), color = "orange", align='left')
plt.vlines(mean1, ymin=0, ymax=5000, color="red")
plt.show()
# In[]:
### 定義から計算 ###
zs = data["lidar"].values
mean = sum(zs)/len(zs)
diff_square = [ (z - mean)**2 for z in zs]
# 標本分散
sampling_var = sum(diff_square)/(len(zs))
# 不偏分散
unbiased_var = sum(diff_square)/(len(zs)-1)
print(sampling_var)
print(unbiased_var)
### Pandasを使用 ###
# 標本分散
pandas_sampling_var = data["lidar"].var(ddof=0) # Falseでは何故かエラー
# デフォルト(不偏分散)
pandas_default_var = data["lidar"].var()
print(pandas_sampling_var)
print(pandas_default_var)
### NumPayを使用 ###
import numpy as np
numpy_defalt_var = np.var(data["lidar"])
numpy_unbiased_var = np.var(data["lidar"], ddof=1)
print(numpy_defalt_var)
print(numpy_unbiased_var)
# In[]:
import math
### 定義から計算 ###
stddev1 = math.sqrt(sampling_var)
stddev2 = math.sqrt(unbiased_var)
### Pandasを使用 ###
pandas_stddev = data["lidar"].std()
print(stddev1)
print(stddev2)
print(pandas_stddev)
# In[]:
freqs = pd.DataFrame(data["lidar"].value_counts())
freqs.transpose()
# In[]:
freqs["probs"] = freqs["lidar"]/len(data["lidar"])
freqs.transpose()
# In[]:
sum(freqs["probs"])
# In[]:
freqs["probs"].sort_index().plot.bar(color="blue")
plt.show()
# In[]:
def drawing():
return freqs.sample(n=1, weights="probs").index[0]
drawing()
# In[]:
samples = [ drawing() for i in range(100)]
# samples = [ drawing() for i in range(len(data))]
simulated = pd.DataFrame(samples, columns=["lidar"])
p = simulated["lidar"]
p.hist(bins = max(p) - min(p), color="orange", align='left')
plt.show()
# In[]:
def p(z, mu=209.7, dev=23.4):
return math.exp(-(z - mu)**2/(2*dev))/math.sqrt(2*math.pi*dev)
# In[]:
zs = range(190,230)
ys = [p(z) for z in zs]
plt.plot(zs, ys)
plt.show()
# In[]:
def prob(z, width=0.5):
return width*( p(z-width) + p(z+width) )
zs = range(190,230)
ys = [prob(z) for z in zs]
plt.bar(zs, ys, color="red", alpha=0.3)
f = freqs["probs"].sort_index()
plt.bar(f.index, f.values, color="blue", alpha=0.3)
plt.show()
# In[]:
from scipy.stats import norm
zs = range(190,230)
ys = [norm.pdf(z, mean1, stddev1) for z in zs]
plt.plot(zs,ys)
plt.show()
# In[]:
zs = range(190, 230)
ys = [norm.cdf(z, mean1, stddev1) for z in zs]
plt.plot(zs, ys, color="red")
plt.show()
# In[]:
zs = range(190, 230)
ys = [norm.cdf(z+0.5, mean1, stddev1) - norm.cdf(z-0.5, mean, stddev1) for z in zs]
plt.bar(zs, ys)
plt.show()
``` |
{
"source": "444thLiao/VarappX-flask",
"score": 3
} |
#### File: varappx/common/email.py
```python
from flask_mail import Mail
from varappx.handle_init import app
from flask_mail import Message
from varappx.handle_config import settings
mail = Mail(app)
email_TAGS = '[ VarX ] '
def send_email(email_to, subject='No subject', text='', html='', tofile=None):
"""Call _send_email using the app's settings"""
_send_email(settings.MAIL_USERNAME,email_to, subject=subject, text=text, html=html, tofile=tofile)
def _send_email(email_from,email_to, subject='No subject', text='', html='', tofile=None):
"""
:param email_to: email of the receiver
:param subject: head of the message
:param text: plain text to send if HTML cannot be used
:param html: message contend, in HTML format (has priority over *text*)
:param tofile: file object, for testing purposes
"""
msg = Message(email_TAGS+subject,sender=email_from,recipients=[email_to])
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred. If it is supported, only the HTML
# message will be received.
msg.bodt = text
msg.html = html
if tofile:
tofile.write(msg.as_string())
else:
try:
mail.send(msg)
except ConnectionRefusedError:
raise ConnectionRefusedError("No SMTP server was found at {}:{}".format(email_host, email_port))
```
#### File: varappx/data_models/variants.py
```python
import collections
from varappx.common.genotypes import decode_int
from varappx.constants.filters import ALL_VARIANT_FILTER_NAMES
from varappx.main.filters.sort import Sort
from varappx.models.gemini import Variants, GeneDetailed
# For export to frontend
_variant_genotype_expose = {0: [0,0], 1: [0,1], 2: [None,None], 3: [1,1]}
# Actually gt_types=2 means that it is unknown,
# cf. https://github.com/arq5x/gemini/blob/master/gemini/gemini_constants.py and google groups.
VARIANT_FIELDS = [f for f in Variants.__table__.columns.keys()] + ['source']
# A simple, lighter model of Variant - an object with same fields but without special methods
VariantTuple = collections.namedtuple('VariantTuple', VARIANT_FIELDS)
VariantTriplet = collections.namedtuple('VariantTriplet', ['variant_id','gene_symbol','source']) # for compound het
VariantMono = collections.namedtuple('VariantMono', 'variant_id') # for other gen filters
VariantTupleStats = collections.namedtuple('VariantTupleStats', ALL_VARIANT_FILTER_NAMES) # for stats
# Proxy model for variants
# Making all the changes to the data that are necessary to filter correctly
class Variant(Variants):
source = ''
class Meta:
proxy = True
class VariantsCollection:
"""A list of variants - such as the result of evaluating a QuerySet,
the result of a query (filtering) of the databse.
"""
def __init__(self, variants, cache_key=None, db=None):
"""Construct a VariantsCollection based on either a QuerySet
(which we evaluate with `list()`) or a list of Variant objects.
:param db: the name of the db these variants come from.
"""
self.list = list(variants)
self.cache_key = cache_key
self.db = db
def __getitem__(self, item):
return self.list[item]
def __len__(self):
return len(self.list)
#return self.variants.count() if self._n is None else self._n
def __next__(self):
return next(self.list)
def __add__(self, other):
return VariantsCollection(self.list + other.list, db=self.db)
@property
def ids(self):
return [v.variant_id for v in self.list]
def pop(self, i):
self.list.pop(i)
def remove(self, elt):
self.list.remove(elt)
def append(self, sample):
self.list.append(sample)
def extend(self, other):
self.list.extend(other.list)
def sub(self, a, b=None):
"""Return a new collection with only the first N variants."""
if b is None:
return VariantsCollection(self.list[:a], db=self.db)
else:
return VariantsCollection(self.list[a:b], db=self.db)
def get_field_values(self, field_name):
""" Return a list of all values for the given field_name."""
return [getattr(v, field_name) for v in self.list]
def order_by(self, key, reverse=False):
"""Return a new ordered collection of the same elements.
:param key: either a string with the attribute or a list of keys. The special
'location' parameter can be passed, to sort them by chrom + start (chromosome as a string)
:param reverse: if True, sort in the reverse order.
"""
keyl = Sort(key, reverse).key_condition
return VariantsCollection(sorted(self.list, key=keyl, reverse=reverse), db=self.db)
def sort_inplace(self, key, reverse=False):
"""Order the collection in-place"""
keyl = Sort(key, reverse).key_condition
self.list.sort(key=keyl, reverse=reverse)
def __str__(self):
return "<Collection of {} variants>".format(len(self.list))
def expand(self):
return '\n'.join([str(v) for v in self.list])
def expose(self):
return [v.expose() for v in self.list]
def expose_variant(v):
"""The JSON to return to the frontend"""
return {
"variant_id": v.variant_id,
"chrom": v.chrom,
"start": v.start + 1,
"end": v.end,
"ref": v.ref,
"alt": v.alt,
"quality": v.qual,
"genotypes_index": [_variant_genotype_expose[i] for i in decode_int(v.gts)] if v.gts else [],
"pass_filter": v.filter or 'PASS',
"dbsnp": v.rs_ids.split(',') if v.rs_ids is not None else [],
"is_exonic": v.is_exonic,
"is_coding": v.is_coding,
"aaf_1kg_all": v.aaf_1kg_all,
"aaf_esp_all": v.aaf_esp_all,
"aaf_exac_all": v.aaf_exac_all,
"aaf_max_all": v.max_aaf_all,
"gene_symbol": v.gene,
"ensembl_transcript_id": v.transcript,
"impact": v.impact,
"impact_severity": v.impact_severity,
"aa_change": v.aa_change,
"polyphen_pred": v.polyphen_pred,
"polyphen_score": v.polyphen_score,
"sift_pred": v.sift_pred,
"sift_score": v.sift_score,
"cadd_raw": v.cadd_raw,
"cadd_scaled": v.cadd_scaled,
"clinvar_sig": v.clinvar_sig,
"clinvar_disease_acc": v.clinvar_disease_acc.split("|") if v.clinvar_disease_acc is not None else [],
"gerp_bp_score": v.gerp_bp_score,
"gerp_element_pval": v.gerp_element_pval,
"source": v.source,
"qual_depth": v.qual_depth,
"fisher_strand_bias": v.fisher_strand_bias,
"rms_map_qual": v.rms_map_qual,
"hgvsp": v.vep_hgvsp,
"hgvsc": v.vep_hgvsc,
"read_depth": v.read_depth,
"allele_count": v.allele_count,
"allele_freq": v.allele_freq,
"base_qual_rank_sum": v.base_qual_rank_sum,
"map_qual_rank_sum": v.map_qual_rank_sum,
"read_pos_rank_sum": v.read_pos_rank_sum,
"strand_bias_odds_ratio": v.strand_bias_odds_ratio,
"type": v.type,
"allele_depths":v.allele_depths,
"allele_freq_raws":v.allele_freq_raws,
"allele_depths_raws": v.allele_depths_raws,
}
def add_genotypes_selection(v_exposed, samples_selection):
v_exposed["genotypes_index"] = samples_selection.select_x_active(v_exposed["genotypes_index"])
return v_exposed
def expose_variant_full(v, samples_selection):
exp = expose_variant(v)
exp = add_genotypes_selection(exp, samples_selection)
return exp
def annotate_variants(variants, db):
from varappx.handle_init import db as DB
transcripts = [v['ensembl_transcript_id'] for v in variants]
DB.create_all(bind=db)
gds = GeneDetailed.query.filter(GeneDetailed.transcript.in_(transcripts)).all()
gd=[]
for _gd in gds:
gd.append([_gd.transcript,_gd.ensembl_gene_id,_gd.entrez_id])
annot = {}
for t,ensg,entrez in gd:
annot[t] = (ensg, entrez)
for v in variants:
enst = v['ensembl_transcript_id']
ann = annot.get(enst)
if ann:
v['ensembl_gene_id'] = ann[0]
v['entrez_gene_id'] = ann[1]
return variants
```
#### File: varappx/filters/genotype_filters.py
```python
from django.conf import settings
from varapp.filters.apply_bitwise import c_apply_bitwise # from cython extension
from varapp.constants.filters import FILTER_CLASS_GENOTYPE
from varapp.constants.genotype import *
from varapp.data_models.samples import SamplesSelection
from varapp.data_models.variants import *
from varapp.filters.filters import Filter, FilterResult, FiltersCollection
from varapp.variants.genotypes_service import genotypes_service
from varapp.variants.variants_factory import set_source
import abc, itertools, multiprocessing as mp
import numpy as np
from functools import reduce
from operator import attrgetter, itemgetter, __and__
from time import time
AND = 'AND'
OR = 'OR'
DEBUG = True and settings.DEBUG
def merge_conditions_array(conds):
"""If there are multiple affected samples sharing the same parents,
the conditions can be redundant. Simplify the conditions array so that
there is at most one for each genotype/sample. If there are several constraints
for the same genotype, check that they are compatible and take the strongest
(lowest bit value).
:param conds: an array of couples [sample_index, genotype_bit]
:rtype: same as input
"""
merged = []
if not conds:
return merged
# Group by sample index, and get a single common bit for all conds on that sample
conds.sort(key=itemgetter(0))
for idx,group in itertools.groupby(conds, itemgetter(0)):
genbits = [x[1] for x in group] # only the genotype bit
common_bits = reduce(__and__, genbits)
merged.append((idx, common_bits))
return merged
class GenotypesFilter(Filter):
"""Defines a way to *apply* a filter on variants genotypes."""
__metaclass__ = abc.ABCMeta
filter_class = FILTER_CLASS_GENOTYPE
need_groups = [] # The required group names in the samples selection for the filter to work.
need_parents = 0 # Whether 0/1/2 parents are required for the filter to work
def __init__(self, ss:SamplesSelection, val, name='genotype', op='=', db=None):
super().__init__(name=name, op=op, val=val, ss=ss, db=db)
self.nsamples = len(ss.active_idx)
self.merge_op = AND
self.shortcut = False # Flag: if True, don't filter anything
# Need at least one active sample
if len(self.ss.active_idx) == 0:
self.shortcut = True
# If parents are required, check that both are present for at least one of the affected samples
mothers_aff = [ss.mother_idx_of(s) for s in ss.affected]
fathers_aff = [ss.father_idx_of(s) for s in ss.affected]
if self.need_parents == 2 and all(None in x for x in zip(mothers_aff, fathers_aff)):
self.shortcut = True
elif self.need_parents == 1 and all((x,y)==(None,None) for x,y in zip(mothers_aff, fathers_aff)):
self.shortcut = True
# If certain groups are required, check that they are present in the selection
if any((x not in ss.groups.keys() or len(ss.groups[x]) == 0) for x in self.need_groups):
self.shortcut = True
# The compound case implements its own stuff, but otherwise do that:
if self.val != GENOTYPE_COMPOUND:
conditions_array = self.build_conditions_array()
self.conditions_array = merge_conditions_array(conditions_array)
if len(self.conditions_array) == 0:
self.shortcut = True
self.conditions_vector = self.build_conditions_vector(self.conditions_array)
def build_conditions_array(self):
"""Construct a list of lists [sample_idx, BITCODE], one for each sample.
Then a variant passes if in its decoded gts, there is BITCODE at position idx.
Once only: it is proper to the filter (with the list of all possible samples,
but no samples selection)."""
raise NotImplementedError("No `build_conditions_array` method implemented.")
def build_conditions_vector(self, conditions_array):
"""From a *conditions_array*, of elements [sample_idx, BITCODE],
build a vector of size len(active_samples) with BITCODE at indices
where a condition is given, and GENOTYPE_BIT_ANY elsewhere.
:rtype: np.ndarray[uint8]
"""
active_idx = self.ss.active_idx
conds = GENOTYPE_BIT_ANY * np.ones(len(active_idx), dtype=np.uint8)
shift = {idx:i for i,idx in enumerate(active_idx)}
for idx,bit in conditions_array:
conds[shift[idx]] = bit
return conds
def scan_genotypes(self, genotypes, sub_ids=None, db=None):
"""Pass through all genotypes and return only the indices of those that pass the filter.
:param genotypes: np.ndarray[uint64, dim=2]
:rtype: np.ndarray[uint64]"""
if self.shortcut:
return np.zeros(0)
N = len(genotypes)
if sub_ids is not None:
variant_ids = sub_ids
elif self.val == 'x_linked' and db:
variant_ids = genotypes_service(db).chrX
else:
variant_ids = np.asarray(range(1,N+1), dtype=np.uint64)
active_idx = np.asarray(self.ss.active_idx, dtype=np.uint16)
conditions = self.conditions_vector
is_and = self.merge_op == AND
if len(conditions) == 0:
passing = variant_ids
else:
passing = self.parallel_apply_bitwise(genotypes, variant_ids, conditions, active_idx, is_and)
return passing
@staticmethod
def parallel_apply_bitwise(genotypes, variant_ids, conditions, active_idx, is_and):
"""Run c_apply_bitwise in parallel. Takes the same arguments."""
N = len(genotypes)
nprocs = mp.cpu_count()
pool = mp.Pool(processes=nprocs)
B = round(N/nprocs + 0.5) # batch size
# Split variant_ids in batches (genotype batches are equally-sized, but not
# variant ids, in case a subset was given)
split_at = variant_ids.searchsorted([(k+1)*B+1 for k in range(nprocs-1)])
variant_ids_batches = np.split(variant_ids, split_at)
assert len(variant_ids_batches) == nprocs
# Run one job for each batch
passing = [pool.apply(c_apply_bitwise,
args=(genotypes[k*B:(k+1)*B,:],
variant_ids_batches[k],
conditions, active_idx, is_and, B))
for k in range(nprocs)]
passing = np.concatenate(passing)
pool.close()
return passing
#@timer
def apply(self, variants=None, genotypes=None, db=None, limit=None, offset=0):
"""Apply this collection of filters on a collection of variants.
:param variants: a VariantsCollection or a QuerySet of variants.
If None, makes a QuerySet of the whole *db*.
:param db: database name. If no set, it tries to be inferred from *variants*.
:param genotypes: a list of genotypes arrays.
if None, a GenotypesService is created from the variants' db.
In principle, set it for testing purposes only.
:rtype: FilterResult
"""
sub_ids = None
if variants is None and db is not None:
variants = Variant.objects.using(db)
elif db is None:
db = variants.db
if self.shortcut:
return FilterResult(variants=VariantsCollection([]), ids=[], n_filtered=0)
if genotypes is None:
assert db is not None, "Either a db name or a genotypes array is required"
genotypes = genotypes_service(db).genotypes
else:
assert len(genotypes) == len(variants)
if self.val == 'x_linked':
if isinstance(variants, VariantsCollection):
sub_ids = np.asarray([v.variant_id for v in variants if v.chrom=='chrX'], dtype=np.uint64)
else:
sub_ids = genotypes_service(db).chrX
passing = self.scan_genotypes(genotypes, sub_ids=sub_ids, db=db)
return FilterResult(
variants=self.variants_from_mask(variants, passing, db, limit, offset),
ids=passing,
n_filtered=len(passing),
)
@staticmethod
def variants_from_mask(variants, passing, db=None, limit=None, offset=0):
"""Get the collection of variants which id is in *passing*."""
if limit is not None:
passing = passing[offset:offset+limit]
passing = set(passing)
return VariantsCollection([v for v in variants if v.variant_id in passing], db=db)
def __str__(self):
return "<Filter {}>".format(self.short_str()) + ('-'+str(self.ss) if self.ss else '')
def __repr__(self):
return "<Filter {}>".format(self.short_str()) + ('-'+str(self.ss) if self.ss else '')
class GenotypesFilterDoNothing(GenotypesFilter):
"""A filter that every variant passes anyway."""
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, 'nothing', db=db)
def build_conditions_array(self):
assert self
return [[i, GENOTYPE_BIT_ANY] for i in self.ss.active_idx]
class GenotypesFilterActive(GenotypesFilter):
"""Return a variant only if it is mutant in at least one of the active samples.
"""
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, GENOTYPE_ACTIVE, db=db)
self.merge_op = OR
def build_conditions_array(self):
return [[i, GENOTYPE_BIT_CARRIER] for i in self.ss.active_idx]
class GenotypesFilterDominant(GenotypesFilter):
"""Simplest scenario: autosomal dominant.
Suppose the effect is dominant, i.e. one allele
mutated is enough to observe a phenotype.
Filter variants that are mutated in all samples but the controls.
"""
need_groups = ["affected"]
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, GENOTYPE_DOMINANT, db=db)
def build_conditions_array(self):
return [[i, GENOTYPE_BIT_CARRIER] for i in self.ss.affected_idx] + \
[[i, GENOTYPE_BIT_NON_CARRIER] for i in self.ss.not_affected_idx]
class GenotypesFilterRecessive(GenotypesFilter):
"""Suppose the effect is recessive, i.e. a child must inherit a mutated
allele from both carrier parents to have an observable phenotype.
Filter mutations that are present in both the parents and homozygous
in the "affected" children.
Controls ("not_affected") are samples known to be non-carriers.
"""
need_groups = ["affected"]
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, GENOTYPE_RECESSIVE, db=db)
def build_conditions_array(self):
conds = [] # 1 per sample, because of its particular parents
for s in self.ss.affected:
idx = self.ss.idx_of(s.name, active=True)
conds.append([idx, GENOTYPE_BIT_CARRIER_HOM])
for i in self.ss.parents_idx_of(s):
conds.append([i, GENOTYPE_BIT_CARRIER])
for i in self.ss.not_affected_idx:
conds.append([i, GENOTYPE_BIT_NOT_CARRIER_HOM])
return conds
class GenotypesFilterDeNovo(GenotypesFilter):
"""Case where a mutation is present in a child but not in the parents.
So the controls should be the parents, but can include other non-carriers.
Otherwise it is the same as the Dominant case.
"""
need_groups = ["affected"]
need_parents = 2
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, GENOTYPE_DENOVO, db=db)
def build_conditions_array(self):
conds = [] # 1 per sample, because of its particular parents
for s in self.ss.affected:
idx = self.ss.idx_of(s.name, active=True)
parents_idx = self.ss.parents_idx_of(s)
if len(parents_idx) == 2: # pointless if not both parents present
if len(set(parents_idx) & set(self.ss.affected_idx)) > 0:
continue # pointless if one of the parents is affected
conds.append([idx, GENOTYPE_BIT_CARRIER_HET])
for i in parents_idx:
conds.append([i, GENOTYPE_BIT_NON_CARRIER])
if conds:
for i in self.ss.not_affected_idx:
conds.append([i, GENOTYPE_BIT_NON_CARRIER])
return conds
class GenotypesFilterXLinked(GenotypesFilter):
"""A deleterious mutation os present on chromosome X. Possible cases:
a) Dominant case: Apart from the proportion of affected children
of each sex, it behaves exactly like a usual dominant mutation,
so we don't cover that case here:
- Affected <=> carrier;
- In principle one of the parents should carry it, but it could be de novo.
b) Recessive case:
- Affected <=> woman carrier hom, or man carrier het;
- For a woman, both parents must be carriers (and the father is affected);
- For a man, only the mother must be carrier.
"""
need_groups = ["affected"]
need_parents = 0
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, GENOTYPE_XLINKED, db=db)
def build_conditions_array(self):
conds = [] # 1 per sample, because of its particular parents
for s in self.ss.affected:
idx = self.ss.idx_of(s.name, active=True)
# Male: carrier het, and the mother is carrier
if s.sex == 'M':
conds.append([idx, GENOTYPE_BIT_CARRIER_HET])
i = self.ss.mother_idx_of(s)
if i is not None:
conds.append([i, GENOTYPE_BIT_CARRIER])
# Female: carrier hom, and both parents are carriers
elif s.sex == 'F':
conds.append([idx, GENOTYPE_BIT_CARRIER_HOM])
for i in self.ss.parents_idx_of(s):
conds.append([i, GENOTYPE_BIT_CARRIER])
for s in self.ss.not_affected:
idx = self.ss.idx_of(s.name, active=True)
# Male unaffected cannot be carriers
if s.sex == 'M':
conds.append([idx, GENOTYPE_BIT_NON_CARRIER])
# Female unaffected could be carrier het
elif s.sex == 'F':
conds.append([idx, GENOTYPE_BIT_NOT_CARRIER_HOM])
return conds
class GenotypesFilterCompoundHeterozygous(GenotypesFilter):
"""Case where two mutations, inherited one from each parent,
occur in the same gene and thus code for two defective proteins.
Compose two results:
- father is carrier in that gene and child has it;
- mother is carrier in that same gene and child has it.
Notes:
- We cannot group conditions for many samples as we did before, because
they can be touched by different compounds pairs in the same gene (rare ?).
- Neither of the parents can be homozygous, or he would be affected (both proteins are touched).
- A child cannot be homozygous at any position of the compounds pair, because
that would suffice to invalidate both proteins and is indistinguishable from the
recessive case.
- Both parents could be affected at one position of the compounds pair (rare ?).
"""
need_groups = ["affected"]
need_parents = 2
def __init__(self, ss:SamplesSelection, db=None):
super().__init__(ss, val=GENOTYPE_COMPOUND, db=db)
self.conditions_array = self.build_conditions_array()
if not self.conditions_array:
self.shortcut = True
else:
self.conditions_vector = self.build_compound_conditions_vector()
def build_conditions_array(self):
"""Returns pairs of condition (paternal, maternal), one for each sample,
in a dict {sample_name: [cond1, cond2]}.
Make it also for non affected, because we want to find false positives searching
as if they were affected. An unaffected sample could well carry one of the two variants.
"""
conds = {}
# Common condition: all affected are carriers het, and no unaffected can be homozygous
base_cond = [(i, GENOTYPE_BIT_NOT_CARRIER_HOM) for i in self.ss.not_affected_idx] \
+ [(i, GENOTYPE_BIT_CARRIER_HET) for i in self.ss.affected_idx]
for s in self.ss.active:
idx = self.ss.idx_of(s.name, active=True)
father_idx = self.ss.father_idx_of(s)
mother_idx = self.ss.mother_idx_of(s)
if father_idx is None or mother_idx is None:
continue
if father_idx in self.ss.affected_idx or mother_idx in self.ss.affected_idx:
continue # pointless if one of the parents is affected
# Father carrier
c1 = base_cond + [
(idx, GENOTYPE_BIT_CARRIER_HET), # in case it is not affected, but we simulate for false positives
(father_idx, GENOTYPE_BIT_CARRIER),
(mother_idx, GENOTYPE_BIT_NON_CARRIER),
]
# Mother carrier
c2 = base_cond + [
(idx, GENOTYPE_BIT_CARRIER_HET),
(father_idx, GENOTYPE_BIT_NON_CARRIER),
(mother_idx, GENOTYPE_BIT_CARRIER),
]
# Note: c1 and c2 cannot both be true at the same genomic position
c1 = tuple(merge_conditions_array(c1))
c2 = tuple(merge_conditions_array(c2))
conds[s.name] = (c1, c2)
# Remove duplicate conditions to speed it up
seen = set()
dups = set()
for k,v in conds.items():
if v in seen:
dups.add(k)
else:
seen.add(v)
for name in dups:
conds.pop(name)
return conds
def build_compound_conditions_vector(self):
"""Extend *self.build_conditions_vector()* to apply it to all sub-elements
*c1*,*c2* of the more complicated {sample: [c1, c2]} of the compound case."""
conditions = {}
for sample, conds in self.conditions_array.items():
conditions[sample] = [None,None]
conditions[sample][0] = self.build_conditions_vector(conds[0])
conditions[sample][1] = self.build_conditions_vector(conds[1])
return conditions
def apply(self, variants=None, genotypes=None, db=None, limit=None, offset=0, sub_ids=None, parallel=True):
""":param sub_ids: does nothing, just inheritance"""
if self.shortcut:
return FilterResult(variants=VariantsCollection([]), ids=[], n_filtered=0)
if variants is None and db is not None:
variants = Variant.objects.using(db)
elif db is None:
db = variants.db
if db is None:
batches = {gene: np.array([v.variant_id for v in var], dtype=np.uint64)
for gene,var in itertools.groupby(variants, key=attrgetter('gene_symbol'))}
else:
gs = genotypes_service(db)
batches = gs.variant_ids_batches_by_gene
if genotypes is None:
assert db is not None, "Either a db name or a genotypes array is required"
genotypes = genotypes_service(db).genotypes
else:
assert len(genotypes) == len(variants)
passing, sources, pairs = self.scan_genotypes_compound(genotypes, batches, parallel)
variants = self.variants_from_mask(variants, passing, db, limit, offset)
for v in variants:
set_source(v, sources[v.variant_id])
return FilterResult(
variants=variants,
ids=passing,
n_filtered=len(passing),
)
def scan_genotypes_compound(self, genotypes, batches, parallel=True):
"""Scan the *genotypes* array for compounds. Variant ids are treated in batches,
- one list of variant_ids per gene."""
if self.shortcut:
passing, sources, pairs = np.zeros(0), {}, []
else:
N = len(genotypes)
active_idx = np.asarray(self.ss.active_idx, dtype=np.uint16)
batches = list(batches.items())
if parallel:
passing, sources, pairs = self.parallel_batches(genotypes, batches, active_idx, N)
else:
passing, sources, pairs = self.process_batches(genotypes, batches, active_idx, N)
passing = np.array(list(passing), dtype=np.uint64)
passing.sort()
return passing, sources, pairs
def parallel_batches(self, genotypes, batches, active_idx, N):
"""Parallelize the scanning of genotypes for compounds over groups of genes."""
passing = set()
sources = {}
pairs = []
nprocs = mp.cpu_count()
NB = len(batches)
B = round(NB/nprocs + 0.5) # batch size
split_batches = [batches[k*B:(k+1)*B] for k in range(nprocs)]
if DEBUG and 0:
print(" @parallel_batches {} CPUs: {}".format(nprocs, [len(x) for x in split_batches]))
pool = mp.Pool(processes=nprocs)
res = [pool.apply_async(self.process_batches,
args=(np.copy(genotypes), list(split_batches[k]), np.copy(active_idx), N))
for k in range(nprocs)]
output = [x.get() for x in res]
for x in output:
passing |= x[0]
sources.update(x[1])
pairs += x[2]
pool.close()
return passing, sources, pairs
def process_batches(self, genotypes, batches, active_idx, N):
"""Search a batch of genes for compounds."""
passing = set()
sources = {}
pairs = []
tbatch = 0
for gene,variant_ids in batches:
t1 = time()
local_passing, local_sources, local_pairs = self.process_1_batch(variant_ids, genotypes, active_idx, N)
t2 = time()
tbatch += t2-t1
passing |= local_passing
pairs += local_pairs
sources.update(local_sources)
if DEBUG and 0:
print(" Processed batches in {:.3f}s ({} passing)".format(tbatch,len(passing)))
return passing, sources, pairs
def process_1_batch(self, variant_ids, genotypes, active_idx, N):
"""Search 1 gene for compounds. Return:
local_passing: set of variant_ids passing the filter
local_sources: dict `{variant_id: 'paternal'/'maternal'}`
local_pairs: list of compound pairs `(variant_id1, variant_id2)`
"""
# Check that all affected samples have the compound
local_passing_mother = set()
local_passing_father = set()
local_sources = {}
for affected in self.ss.affected:
if affected.name not in self.conditions_vector:
continue
conds = self.conditions_vector[affected.name]
passing_father = set(c_apply_bitwise(genotypes, variant_ids, conds[0], active_idx, True, N))
passing_mother = set(c_apply_bitwise(genotypes, variant_ids, conds[1], active_idx, True, N))
# Exclude compounds that healthy samples carry as well
if len(passing_father) > 0 and len(passing_mother) > 0:
fp1 = set()
fp2 = set()
local_ids = np.array(list(passing_father | passing_mother), dtype=np.uint64)
for healthy in self.ss.not_affected:
if healthy.name not in self.conditions_vector:
continue
conds = np.asarray(self.conditions_vector[healthy.name], dtype=np.uint8)
false_father = c_apply_bitwise(genotypes, local_ids, conds[0], active_idx, True, N)
false_mother = c_apply_bitwise(genotypes, local_ids, conds[1], active_idx, True, N)
false_pairs = list(itertools.product(false_father, false_mother))
for p1, p2 in false_pairs:
if p1 in passing_father and p2 in passing_mother:
fp1.add(p1)
fp2.add(p2)
passing_father = passing_father - fp1
passing_mother = passing_mother - fp2
# If there are any left in both lists, add them to the result set
if len(passing_father) > 0 and len(passing_mother) > 0:
for k in passing_father:
local_sources[k] = 'paternal'
for k in passing_mother:
local_sources[k] = 'maternal'
if len(local_passing_father) == 0:
local_passing_father = passing_father
else:
local_passing_father &= passing_father
if len(local_passing_mother) == 0:
local_passing_mother = passing_mother
else:
local_passing_mother &= passing_mother
# All affected samples must have at least one of the combinations
else:
local_passing_father = set()
local_passing_mother = set()
local_sources = {}
break # go to next gene
local_passing = local_passing_father | local_passing_mother
local_pairs = list(itertools.product(
map(int,local_passing_father), # map to int because of new numpy warning when used as index
map(int,local_passing_mother)
))
return local_passing, local_sources, local_pairs
```
#### File: main/samples/samples_factoyr.py
```python
from varappx.models.gemini import Samples
#from varappx.data_models.samples import SamplesSelection, Sample
from varappx.models.users import Bam, VariantsDb
import itertools
from operator import attrgetter
#
#
# def sample_factory(s:Samples):
# """Create a more useful Sample instance from a Django Samples instance *s*."""
# return Sample(s.name, s.sample_id, s.family_id, s.maternal_id, s.paternal_id, s.sex, s.phenotype)
#
# def add_bam_keys(db, samples):
# """Fill the 'bam' field of each Samples in *samples* with the key to access
# the BAM file in bam-server, if present in the Bam table.
# :param db: db name
# :param samples: list of Sample
# """
# vdb = VariantsDb.objects.get(name=db, is_active=1)
# q = Bam.objects.filter(variants_db=vdb, key__isnull=False, sample__isnull=False).values_list('sample', 'key')
# bam_keys = dict(q)
# for s in samples:
# s.bam = bam_keys.get(s.name)
#
# def samples_list_from_db(db, query_set=None):
# """Return a list of `Sample`s from database content."""
# if query_set is None:
# query_set = Samples.objects.using(db).all().order_by('sample_id')
# return [sample_factory(s) for s in query_set]
#
# def samples_selection_factory(db, groups=None, query_set=None):
# """Create a more useful SamplesCollection instance from a Django Samples QuerySet *query_set*,
# or from the whole database content (with *db*).
# :param groups: a dict {group_name: list of sample names}. If set to 'ped', use the samples'
# 'phenotype' attribute to build the groups.
# """
# samples_list = samples_list_from_db(db, query_set)
# add_bam_keys(db, samples_list)
# if groups == 'ped':
# groups = fetch_ped_info_groups(samples_list)
# return SamplesSelection(samples_list, groups, db=db)
#
# def fetch_ped_info_groups(samples):
# """Read phenotype info in the Samples table of Gemini, which is built on the PED.
# :param samples: a SamplesCollection
# """
# names = {'1':'not_affected', '2':'affected'}
# groups = {}
# for phenotype, group in itertools.groupby(sorted(samples, key=attrgetter('phenotype')), attrgetter('phenotype')):
# if names.get(phenotype):
# group_name = names[phenotype]
# groups[group_name] = [s.name for s in group]
# return groups
#
```
#### File: varappx/main/view.py
```python
from flask import render_template
from . import main
from time import time
@main.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@main.route('/varappx', methods=['GET', 'POST'])
def varapp():
# print('a')
return render_template('testing.html')
``` |
{
"source": "447327642/swigibpy",
"score": 3
} |
#### File: swigibpy/examples/contractdetails.py
```python
from datetime import datetime
from threading import Event
from swigibpy import EWrapper, EPosixClientSocket, Contract
WAIT_TIME = 10.0
###
class ContractDetailsExample(EWrapper):
'''Callback object passed to TWS, these functions will be called directly
by TWS.
'''
def __init__(self):
super(ContractDetailsExample, self).__init__()
self.got_contract = Event()
def orderStatus(self, id, status, filled, remaining, avgFillPrice, permId,
parentId, lastFilledPrice, clientId, whyHeld):
pass
def openOrder(self, orderID, contract, order, orderState):
pass
def nextValidId(self, orderId):
'''Always called by TWS but not relevant for our example'''
pass
def openOrderEnd(self):
'''Always called by TWS but not relevant for our example'''
pass
def managedAccounts(self, openOrderEnd):
'''Called by TWS but not relevant for our example'''
pass
def contractDetailsEnd(self, reqId):
print("Contract details request complete, (request id %i)" % reqId)
def contractDetails(self, reqId, contractDetails):
print("Contract details received (request id %i):" % reqId)
print("callable: %s" % contractDetails.callable)
print("category: %s" % contractDetails.category)
print("contractMonth: %s" % contractDetails.contractMonth)
print("convertible: %s" % contractDetails.convertible)
print("coupon: %s" % contractDetails.coupon)
print("industry: %s" % contractDetails.industry)
print("liquidHours: %s" % contractDetails.liquidHours)
print("longName: %s" % contractDetails.longName)
print("marketName: %s" % contractDetails.marketName)
print("minTick: %s" % contractDetails.minTick)
print("nextOptionPartial: %s" % contractDetails.nextOptionPartial)
print("orderTypes: %s" % contractDetails.orderTypes)
print("priceMagnifier: %s" % contractDetails.priceMagnifier)
print("putable: %s" % contractDetails.putable)
if contractDetails.secIdList is not None:
for secId in contractDetails.secIdList:
print("secIdList: %s" % secId)
else:
print("secIdList: None")
print("subcategory: %s" % contractDetails.subcategory)
print("tradingHours: %s" % contractDetails.tradingHours)
print("timeZoneId: %s" % contractDetails.timeZoneId)
print("underConId: %s" % contractDetails.underConId)
print("evRule: %s" % contractDetails.evRule)
print("evMultiplier: %s" % contractDetails.evMultiplier)
contract = contractDetails.summary
print("\nContract Summary:")
print("exchange: %s" % contract.exchange)
print("symbol: %s" % contract.symbol)
print("secType: %s" % contract.secType)
print("currency: %s" % contract.currency)
print("tradingClass: %s" % contract.tradingClass)
if contract.comboLegs is not None:
for comboLeg in contract.comboLegs:
print("comboLegs: %s - %s" %
(comboLeg.action, comboLeg.exchange))
else:
print("comboLegs: None")
print("\nBond Values:")
print("bondType: %s" % contractDetails.bondType)
print("couponType: %s" % contractDetails.couponType)
print("cusip: %s" % contractDetails.cusip)
print("descAppend: %s" % contractDetails.descAppend)
print("issueDate: %s" % contractDetails.issueDate)
print("maturity: %s" % contractDetails.maturity)
print("nextOptionDate: %s" % contractDetails.nextOptionDate)
print("nextOptionType: %s" % contractDetails.nextOptionType)
print("notes: %s" % contractDetails.notes)
print("ratings: %s" % contractDetails.ratings)
print("validExchanges: %s" % contractDetails.validExchanges)
self.got_contract.set()
# Instantiate our callback object
callback = ContractDetailsExample()
# Instantiate a socket object, allowing us to call TWS directly. Pass our
# callback object so TWS can respond.
tws = EPosixClientSocket(callback)
# Connect to tws running on localhost
if not tws.eConnect("", 7496, 42):
raise RuntimeError('Failed to connect to TWS')
# Simple contract for GOOG
contract = Contract()
contract.exchange = "SMART"
contract.symbol = "GOOG"
contract.secType = "STK"
contract.currency = "USD"
today = datetime.today()
print("Requesting contract details...")
# Perform the request
tws.reqContractDetails(
42, # reqId,
contract, # contract,
)
print("\n====================================================================")
print(" Contract details requested, waiting %ds for TWS responses" % WAIT_TIME)
print("====================================================================\n")
try:
callback.got_contract.wait(timeout=WAIT_TIME)
except KeyboardInterrupt:
pass
finally:
if not callback.got_contract.is_set():
print('Failed to get contract within %d seconds' % WAIT_TIME)
print("\nDisconnecting...")
tws.eDisconnect()
``` |
{
"source": "447983454/taichi",
"score": 3
} |
#### File: taichi/examples/ad_gravity.py
```python
import taichi as ti
ti.init()
N = 8
dt = 5e-5
pos = ti.Vector.var(2, ti.f32, N, needs_grad=True)
vel = ti.Vector.var(2, ti.f32, N)
potential = ti.var(ti.f32, (), needs_grad=True)
@ti.kernel
def calc_potential():
for i, j in ti.ndrange(N, N):
disp = pos[i] - pos[j]
potential[None] += 1 / disp.norm(1e-3)
@ti.kernel
def init():
for i in pos:
pos[i] = [ti.random(), ti.random()]
@ti.kernel
def advance():
for i in pos:
vel[i] += dt * pos.grad[i]
for i in pos:
pos[i] += dt * vel[i]
def substep():
with ti.Tape(potential):
calc_potential()
advance()
init()
gui = ti.GUI('Autodiff gravity')
while gui.running and not gui.get_event(gui.ESCAPE):
for i in range(16):
substep()
gui.circles(pos.to_numpy(), radius=3)
gui.show()
```
#### File: taichi/examples/mgpcg_advanced.py
```python
import numpy as np
import time
import taichi as ti
real = ti.f32
ti.init(default_fp=real, arch=ti.x64, kernel_profiler=True)
@ti.data_oriented
class MGPCG:
def __init__(self):
# grid parameters
self.use_multigrid = True
self.N = 128
self.N_gui = 512 # gui resolution
self.n_mg_levels = 4
self.pre_and_post_smoothing = 2
self.bottom_smoothing = 50
self.dim = 3
self.N_ext = self.N // 2 # number of ext cells set so that that total grid size is still power of 2
self.N_tot = 2 * self.N
# setup sparse simulation data arrays
self.r = [ti.var(dt=real) for _ in range(self.n_mg_levels)] # residual
self.z = [ti.var(dt=real)
for _ in range(self.n_mg_levels)] # M^-1 self.r
self.x = ti.var(dt=real) # solution
self.p = ti.var(dt=real) # conjugate gradient
self.Ap = ti.var(dt=real) # matrix-vector product
self.alpha = ti.var(dt=real) # step size
self.beta = ti.var(dt=real) # step size
self.sum = ti.var(dt=real) # storage for reductions
self.pixels = ti.var(dt=real,
shape=(self.N_gui, self.N_gui)) # image buffer
indices = ti.ijk if self.dim == 3 else ti.ij
self.grid = ti.root.pointer(indices, [self.N_tot // 4]).dense(
indices, 4).place(self.x, self.p, self.Ap)
for l in range(self.n_mg_levels):
self.grid = ti.root.pointer(indices,
[self.N_tot // (4 * 2**l)]).dense(
indices,
4).place(self.r[l], self.z[l])
ti.root.place(self.alpha, self.beta, self.sum)
@ti.kernel
def init(self):
for I in ti.grouped(
ti.ndrange(*(
(self.N_ext, self.N_tot - self.N_ext), ) * self.dim)):
self.r[0][I] = 1.0
for k in ti.static(range(self.dim)):
self.r[0][I] *= ti.sin(2.0 * np.pi * (I[k] - self.N_ext) *
2.0 / self.N_tot)
self.z[0][I] = 0.0
self.Ap[I] = 0.0
self.p[I] = 0.0
self.x[I] = 0.0
@ti.func
def neighbor_sum(self, x, I):
ret = 0.0
for i in ti.static(range(self.dim)):
offset = ti.Vector.unit(self.dim, i)
ret += x[I + offset] + x[I - offset]
return ret
@ti.kernel
def compute_Ap(self):
for I in ti.grouped(self.Ap):
self.Ap[I] = (2 * self.dim) * self.p[I] - self.neighbor_sum(
self.p, I)
@ti.kernel
def reduce(self, p: ti.template(), q: ti.template()):
self.sum[None] = 0
for I in ti.grouped(p):
self.sum[None] += p[I] * q[I]
@ti.kernel
def update_x(self):
for I in ti.grouped(self.p):
self.x[I] += self.alpha[None] * self.p[I]
@ti.kernel
def update_r(self):
for I in ti.grouped(self.p):
self.r[0][I] -= self.alpha[None] * self.Ap[I]
@ti.kernel
def update_p(self):
for I in ti.grouped(self.p):
self.p[I] = self.z[0][I] + self.beta[None] * self.p[I]
@ti.kernel
def restrict(self, l: ti.template()):
for I in ti.grouped(self.r[l]):
res = self.r[l][I] - (2.0 * self.dim * self.z[l][I] -
self.neighbor_sum(self.z[l], I))
self.r[l + 1][I // 2] += res * 0.5
@ti.kernel
def prolongate(self, l: ti.template()):
for I in ti.grouped(self.z[l]):
self.z[l][I] = self.z[l + 1][I // 2]
@ti.kernel
def smooth(self, l: ti.template(), phase: ti.template()):
# phase = red/black Gauss-Seidel phase
for I in ti.grouped(self.r[l]):
if (I.sum()) & 1 == phase:
self.z[l][I] = (self.r[l][I] + self.neighbor_sum(
self.z[l], I)) / (2.0 * self.dim)
def apply_preconditioner(self):
self.z[0].fill(0)
for l in range(self.n_mg_levels - 1):
for i in range(self.pre_and_post_smoothing << l):
self.smooth(l, 0)
self.smooth(l, 1)
self.z[l + 1].fill(0)
self.r[l + 1].fill(0)
self.restrict(l)
for i in range(self.bottom_smoothing):
self.smooth(self.n_mg_levels - 1, 0)
self.smooth(self.n_mg_levels - 1, 1)
for l in reversed(range(self.n_mg_levels - 1)):
self.prolongate(l)
for i in range(self.pre_and_post_smoothing << l):
self.smooth(l, 1)
self.smooth(l, 0)
@ti.kernel
def paint(self):
if ti.static(self.dim == 3):
kk = self.N_tot * 3 // 8
for i, j in self.pixels:
ii = int(i * self.N / self.N_gui) + self.N_ext
jj = int(j * self.N / self.N_gui) + self.N_ext
self.pixels[i, j] = self.x[ii, jj, kk] / self.N_tot
def run(self):
gui = ti.GUI("Multigrid Preconditioned Conjugate Gradients",
res=(self.N_gui, self.N_gui))
self.init()
self.reduce(self.r[0], self.r[0])
initial_rTr = self.sum[None]
# self.r = b - Ax = b since self.x = 0
# self.p = self.r = self.r + 0 self.p
if self.use_multigrid:
self.apply_preconditioner()
else:
self.z[0].copy_from(self.r[0])
self.update_p()
self.reduce(self.z[0], self.r[0])
old_zTr = self.sum[None]
# CG
for i in range(400):
# self.alpha = rTr / pTAp
self.compute_Ap()
self.reduce(self.p, self.Ap)
pAp = self.sum[None]
self.alpha[None] = old_zTr / pAp
# self.x = self.x + self.alpha self.p
self.update_x()
# self.r = self.r - self.alpha self.Ap
self.update_r()
# check for convergence
self.reduce(self.r[0], self.r[0])
rTr = self.sum[None]
if rTr < initial_rTr * 1.0e-12:
break
# self.z = M^-1 self.r
if self.use_multigrid:
self.apply_preconditioner()
else:
self.z[0].copy_from(self.r[0])
# self.beta = new_rTr / old_rTr
self.reduce(self.z[0], self.r[0])
new_zTr = self.sum[None]
self.beta[None] = new_zTr / old_zTr
# self.p = self.z + self.beta self.p
self.update_p()
old_zTr = new_zTr
print(f'iter {i}, residual={rTr}')
self.paint()
gui.set_image(self.pixels)
gui.show()
ti.kernel_profiler_print()
solver = MGPCG()
t = time.time()
solver.run()
print(f'Solver time: {time.time() - t:.3f} s')
ti.core.print_profile_info()
ti.core.print_stat()
```
#### File: taichi/examples/odop_solar.py
```python
import taichi as ti
import math
@ti.data_oriented
class SolarSystem:
def __init__(self, n, dt):
# initializer of the solar system simulator
self.n = n
self.dt = dt
self.x = ti.Vector(2, dt=ti.f32, shape=n)
self.v = ti.Vector(2, dt=ti.f32, shape=n)
self.center = ti.Vector(2, dt=ti.f32, shape=())
@staticmethod
@ti.func
def random_vector_in(rmax):
# create a random vector
a = ti.random() * math.tau
r = ti.random() * rmax
return r * ti.Vector([ti.cos(a), ti.sin(a)])
@ti.kernel
def initialize(self):
# initialization or reset
for i in range(self.n):
offset = self.random_vector_in(0.5)
self.x[i] = self.center[None] + offset # Offset from center
self.v[i] = [-offset.y, offset.x] # Perpendicular to offset
self.v[i] += self.random_vector_in(0.02) # Shaking
self.v[i] *= 1 / offset.norm()**1.5 # Kepler's 3rd law
@ti.func
def gravity(self, pos):
# compute gravitational acceleration at pos
offset = -(pos - self.center[None])
return offset / offset.norm()**3
@ti.kernel
def integrate(self):
# semi-implicit time integration
for i in range(self.n):
self.v[i] += self.dt * self.gravity(self.x[i])
self.x[i] += self.dt * self.v[i]
def render(self, gui):
# render the simulation scene on the GUI
gui.circle([0.5, 0.5], radius=10, color=0xffaa88)
gui.circles(solar.x.to_numpy(), radius=3, color=0xffffff)
solar = SolarSystem(8, 0.0001)
solar.center[None] = [0.5, 0.5]
solar.initialize()
gui = ti.GUI("Solar System", background_color=0x0071a)
while gui.running:
# GUI event processing
if gui.get_event(gui.PRESS):
if gui.event.key == gui.SPACE:
solar.initialize()
elif gui.event.key == gui.ESCAPE:
gui.running = False
for i in range(10):
solar.integrate()
solar.render(gui)
gui.show()
```
#### File: taichi/examples/particle_renderer.py
```python
import taichi as ti
import numpy as np
import math
import time
from renderer_utils import out_dir, ray_aabb_intersection, inf, eps, \
intersect_sphere, sphere_aabb_intersect_motion, inside_taichi
ti.init(arch=ti.cuda, device_memory_GB=4)
res = 1280, 720
num_spheres = 1024
color_buffer = ti.Vector(3, dt=ti.f32)
bbox = ti.Vector(3, dt=ti.f32, shape=2)
grid_density = ti.var(dt=ti.i32)
voxel_has_particle = ti.var(dt=ti.i32)
max_ray_depth = 4
use_directional_light = True
particle_x = ti.Vector(3, dt=ti.f32)
particle_v = ti.Vector(3, dt=ti.f32)
particle_color = ti.Vector(3, dt=ti.f32)
pid = ti.var(ti.i32)
num_particles = ti.var(ti.i32, shape=())
fov = 0.23
dist_limit = 100
exposure = 1.5
camera_pos = ti.Vector([0.5, 0.32, 2.7])
vignette_strength = 0.9
vignette_radius = 0.0
vignette_center = [0.5, 0.5]
light_direction = [1.2, 0.3, 0.7]
light_direction_noise = 0.03
light_color = [1.0, 1.0, 1.0]
grid_visualization_block_size = 16
grid_resolution = 256 // grid_visualization_block_size
frame_id = 0
render_voxel = False # see dda()
inv_dx = 256.0
dx = 1.0 / inv_dx
camera_pos = ti.Vector([0.5, 0.27, 2.7])
supporter = 2
shutter_time = 0.5e-3 # half the frame time (1e-3)
sphere_radius = 0.0015
particle_grid_res = 256
max_num_particles_per_cell = 8192 * 1024
max_num_particles = 1024 * 1024 * 4
assert sphere_radius * 2 * particle_grid_res < 1
ti.root.dense(ti.ij, (res[0] // 8, res[1] // 8)).dense(ti.ij,
8).place(color_buffer)
ti.root.dense(ti.ijk, 2).dense(ti.ijk, particle_grid_res // 8).dense(
ti.ijk, 8).place(voxel_has_particle)
ti.root.dense(ti.ijk, 4).pointer(ti.ijk, particle_grid_res // 8).dense(
ti.ijk, 8).dynamic(ti.l, max_num_particles_per_cell, 512).place(pid)
ti.root.dense(ti.l, max_num_particles).place(particle_x, particle_v,
particle_color)
ti.root.dense(ti.ijk, grid_resolution // 8).dense(ti.ijk,
8).place(grid_density)
@ti.func
def inside_grid(ipos):
return ipos.min() >= 0 and ipos.max() < grid_resolution
# The dda algorithm requires the voxel grid to have one surrounding layer of void region
# to correctly render the outmost voxel faces
@ti.func
def inside_grid_loose(ipos):
return ipos.min() >= -1 and ipos.max() <= grid_resolution
@ti.func
def query_density_int(ipos):
inside = inside_grid(ipos)
ret = 0
if inside:
ret = grid_density[ipos]
else:
ret = 0
return ret
@ti.func
def voxel_color(pos):
p = pos * grid_resolution
p -= ti.floor(p)
boundary = 0.1
count = 0
for i in ti.static(range(3)):
if p[i] < boundary or p[i] > 1 - boundary:
count += 1
f = 0.0
if count >= 2:
f = 1.0
return ti.Vector([0.2, 0.3, 0.2]) * (2.3 - 2 * f)
@ti.func
def sdf(o):
dist = 0.0
if ti.static(supporter == 0):
o -= ti.Vector([0.5, 0.002, 0.5])
p = o
h = 0.02
ra = 0.29
rb = 0.005
d = (ti.Vector([p[0], p[2]]).norm() - 2.0 * ra + rb, abs(p[1]) - h)
dist = min(max(d[0], d[1]), 0.0) + ti.Vector(
[max(d[0], 0.0), max(d[1], 0)]).norm() - rb
elif ti.static(supporter == 1):
o -= ti.Vector([0.5, 0.002, 0.5])
dist = (o.abs() - ti.Vector([0.5, 0.02, 0.5])).max()
else:
dist = o[1] - 0.027
return dist
@ti.func
def ray_march(p, d):
j = 0
dist = 0.0
limit = 200
while j < limit and sdf(p + dist * d) > 1e-8 and dist < dist_limit:
dist += sdf(p + dist * d)
j += 1
if dist > dist_limit:
dist = inf
return dist
@ti.func
def sdf_normal(p):
d = 1e-3
n = ti.Vector([0.0, 0.0, 0.0])
for i in ti.static(range(3)):
inc = p
dec = p
inc[i] += d
dec[i] -= d
n[i] = (0.5 / d) * (sdf(inc) - sdf(dec))
return n.normalized()
@ti.func
def sdf_color(p):
scale = 0.4
if inside_taichi(ti.Vector([p[0], p[2]])):
scale = 1
return ti.Vector([0.3, 0.5, 0.7]) * scale
# Digital differential analyzer for the grid visualization (render_voxels=True)
@ti.func
def dda(eye_pos, d):
for i in ti.static(range(3)):
if abs(d[i]) < 1e-6:
d[i] = 1e-6
rinv = 1.0 / d
rsign = ti.Vector([0, 0, 0])
for i in ti.static(range(3)):
if d[i] > 0:
rsign[i] = 1
else:
rsign[i] = -1
bbox_min = ti.Vector([0.0, 0.0, 0.0]) - 10 * eps
bbox_max = ti.Vector([1.0, 1.0, 1.0]) + 10 * eps
inter, near, far = ray_aabb_intersection(bbox_min, bbox_max, eye_pos, d)
hit_distance = inf
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
if inter:
near = max(0, near)
pos = eye_pos + d * (near + 5 * eps)
o = grid_resolution * pos
ipos = ti.floor(o).cast(int)
dis = (ipos - o + 0.5 + rsign * 0.5) * rinv
running = 1
i = 0
hit_pos = ti.Vector([0.0, 0.0, 0.0])
while running:
last_sample = query_density_int(ipos)
if not inside_grid_loose(ipos):
running = 0
# normal = [0, 0, 0]
if last_sample:
mini = (ipos - o + ti.Vector([0.5, 0.5, 0.5]) -
rsign * 0.5) * rinv
hit_distance = mini.max() * (1 / grid_resolution) + near
hit_pos = eye_pos + hit_distance * d
c = voxel_color(hit_pos)
running = 0
else:
mm = ti.Vector([0, 0, 0])
if dis[0] <= dis[1] and dis[0] < dis[2]:
mm[0] = 1
elif dis[1] <= dis[0] and dis[1] <= dis[2]:
mm[1] = 1
else:
mm[2] = 1
dis += mm * rsign * rinv
ipos += mm * rsign
normal = -mm * rsign
i += 1
return hit_distance, normal, c
@ti.func
def inside_particle_grid(ipos):
pos = ipos * dx
return bbox[0][0] <= pos[0] and pos[0] < bbox[1][0] and bbox[0][1] <= pos[
1] and pos[1] < bbox[1][1] and bbox[0][2] <= pos[2] and pos[2] < bbox[
1][2]
# DDA for the particle visualization (render_voxels=False)
@ti.func
def dda_particle(eye_pos, d, t):
grid_res = particle_grid_res
# bounding box
bbox_min = bbox[0]
bbox_max = bbox[1]
hit_pos = ti.Vector([0.0, 0.0, 0.0])
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
for i in ti.static(range(3)):
if abs(d[i]) < 1e-6:
d[i] = 1e-6
inter, near, far = ray_aabb_intersection(bbox_min, bbox_max, eye_pos, d)
near = max(0, near)
closest_intersection = inf
if inter:
pos = eye_pos + d * (near + eps)
rinv = 1.0 / d
rsign = ti.Vector([0, 0, 0])
for i in ti.static(range(3)):
if d[i] > 0:
rsign[i] = 1
else:
rsign[i] = -1
o = grid_res * pos
ipos = ti.floor(o).cast(int)
dis = (ipos - o + 0.5 + rsign * 0.5) * rinv
running = 1
# DDA for voxels with at least one particle
while running:
inside = inside_particle_grid(ipos)
if inside:
# once we actually intersect with a voxel that contains at least one particle, loop over the particle list
num_particles = voxel_has_particle[ipos]
if num_particles != 0:
num_particles = ti.length(pid.parent(), ipos)
for k in range(num_particles):
p = pid[ipos[0], ipos[1], ipos[2], k]
v = particle_v[p]
x = particle_x[p] + t * v
color = particle_color[p]
# ray-sphere intersection
dist, poss = intersect_sphere(eye_pos, d, x, sphere_radius)
hit_pos = poss
if dist < closest_intersection and dist > 0:
hit_pos = eye_pos + dist * d
closest_intersection = dist
normal = (hit_pos - x).normalized()
c = color
else:
running = 0
normal = [0, 0, 0]
if closest_intersection < inf:
running = 0
else:
# hits nothing. Continue ray marching
mm = ti.Vector([0, 0, 0])
if dis[0] <= dis[1] and dis[0] <= dis[2]:
mm[0] = 1
elif dis[1] <= dis[0] and dis[1] <= dis[2]:
mm[1] = 1
else:
mm[2] = 1
dis += mm * rsign * rinv
ipos += mm * rsign
return closest_intersection, normal, c
@ti.func
def next_hit(pos, d, t):
closest = inf
normal = ti.Vector([0.0, 0.0, 0.0])
c = ti.Vector([0.0, 0.0, 0.0])
if ti.static(render_voxel):
closest, normal, c = dda(pos, d)
else:
closest, normal, c = dda_particle(pos, d, t)
if d[2] != 0:
ray_closest = -(pos[2] + 5.5) / d[2]
if ray_closest > 0 and ray_closest < closest:
closest = ray_closest
normal = ti.Vector([0.0, 0.0, 1.0])
c = ti.Vector([0.6, 0.7, 0.7])
ray_march_dist = ray_march(pos, d)
if ray_march_dist < dist_limit and ray_march_dist < closest:
closest = ray_march_dist
normal = sdf_normal(pos + d * closest)
c = sdf_color(pos + d * closest)
return closest, normal, c
aspect_ratio = res[0] / res[1]
@ti.kernel
def render():
for u, v in color_buffer:
pos = camera_pos
d = ti.Vector([(2 * fov * (u + ti.random(ti.f32)) / res[1] -
fov * aspect_ratio - 1e-5),
2 * fov * (v + ti.random(ti.f32)) / res[1] - fov - 1e-5,
-1.0])
d = d.normalized()
t = (ti.random() - 0.5) * shutter_time
contrib = ti.Vector([0.0, 0.0, 0.0])
throughput = ti.Vector([1.0, 1.0, 1.0])
depth = 0
hit_sky = 1
ray_depth = 0
while depth < max_ray_depth:
closest, normal, c = next_hit(pos, d, t)
hit_pos = pos + closest * d
depth += 1
ray_depth = depth
if normal.norm() != 0:
d = out_dir(normal)
pos = hit_pos + 1e-4 * d
throughput *= c
if ti.static(use_directional_light):
dir_noise = ti.Vector([
ti.random() - 0.5,
ti.random() - 0.5,
ti.random() - 0.5
]) * light_direction_noise
direct = (ti.Vector(light_direction) +
dir_noise).normalized()
dot = direct.dot(normal)
if dot > 0:
dist, _, _ = next_hit(pos, direct, t)
if dist > dist_limit:
contrib += throughput * ti.Vector(
light_color) * dot
else: # hit sky
hit_sky = 1
depth = max_ray_depth
max_c = throughput.max()
if ti.random() > max_c:
depth = max_ray_depth
throughput = [0, 0, 0]
else:
throughput /= max_c
if hit_sky:
if ray_depth != 1:
# contrib *= max(d[1], 0.05)
pass
else:
# directly hit sky
pass
else:
throughput *= 0
# contrib += throughput
color_buffer[u, v] += contrib
support = 2
@ti.kernel
def initialize_particle_grid():
for p in range(num_particles[None]):
x = particle_x[p]
v = particle_v[p]
ipos = ti.floor(x * particle_grid_res).cast(ti.i32)
for i in range(-support, support + 1):
for j in range(-support, support + 1):
for k in range(-support, support + 1):
offset = ti.Vector([i, j, k])
box_ipos = ipos + offset
if inside_particle_grid(box_ipos):
box_min = box_ipos * (1 / particle_grid_res)
box_max = (box_ipos + ti.Vector([1, 1, 1])) * (
1 / particle_grid_res)
if sphere_aabb_intersect_motion(
box_min, box_max, x - 0.5 * shutter_time * v,
x + 0.5 * shutter_time * v, sphere_radius):
ti.append(pid.parent(), box_ipos, p)
voxel_has_particle[box_ipos] = 1
@ti.kernel
def copy(img: ti.ext_arr(), samples: ti.i32):
for i, j in color_buffer:
u = 1.0 * i / res[0]
v = 1.0 * j / res[1]
darken = 1.0 - vignette_strength * max(
(ti.sqrt((u - vignette_center[0])**2 +
(v - vignette_center[1])**2) - vignette_radius), 0)
for c in ti.static(range(3)):
img[i, j, c] = ti.sqrt(color_buffer[i, j][c] * darken * exposure /
samples)
def main():
num_part = 100000
np_x = np.random.rand(num_part, 3).astype(np.float32) * 0.4 + 0.2
np_v = np.random.rand(num_part, 3).astype(np.float32) * 0
np_c = np.zeros((num_part, 3)).astype(np.float32)
np_c[:, 0] = 0.85
np_c[:, 1] = 0.9
np_c[:, 2] = 1
for i in range(3):
# bbox values must be multiples of dx
# bbox values are the min and max particle coordinates, with 3 dx margin
bbox[0][i] = (math.floor(np_x[:, i].min() * particle_grid_res) -
3.0) / particle_grid_res
bbox[1][i] = (math.floor(np_x[:, i].max() * particle_grid_res) +
3.0) / particle_grid_res
num_particles[None] = num_part
print('num_input_particles =', num_part)
@ti.kernel
def initialize_particle_x(x: ti.ext_arr(), v: ti.ext_arr(),
color: ti.ext_arr()):
for i in range(num_particles[None]):
for c in ti.static(range(3)):
particle_x[i][c] = x[i, c]
particle_v[i][c] = v[i, c]
particle_color[i][c] = color[i, c]
for k in ti.static(range(27)):
base_coord = (inv_dx * particle_x[i] - 0.5).cast(
ti.i32) + ti.Vector([k // 9, k // 3 % 3, k % 3])
grid_density[base_coord // grid_visualization_block_size] = 1
initialize_particle_x(np_x, np_v, np_c)
initialize_particle_grid()
gui = ti.GUI('Particle Renderer', res)
last_t = 0
for i in range(500):
render()
interval = 10
if i % interval == 0:
img = np.zeros((res[0], res[1], 3), dtype=np.float32)
copy(img, i + 1)
if last_t != 0:
print("time per spp = {:.2f} ms".format(
(time.time() - last_t) * 1000 / interval))
last_t = time.time()
gui.set_image(img)
gui.show()
if __name__ == '__main__':
main()
```
#### File: taichi/examples/taichi_bitmasked.py
```python
import taichi as ti
import math
ti.init(arch=ti.gpu)
n = 256
x = ti.var(ti.f32)
# `bitmasked` is a tensor that supports sparsity, in that each element can be
# activated individually. (It can be viewed as `dense`, with an extra bit for each
# element to mark its activation). Assigning to an element will activate it
# automatically. Use struct-for syntax to loop over the active elements only.
ti.root.bitmasked(ti.ij, (n, n)).place(x)
@ti.kernel
def activate():
# All elements in bitmasked is initially deactivated
# Let's activate elements in the rectangle now!
for i, j in ti.ndrange((100, 125), (100, 125)):
x[i, j] = 233 # assign any value to activate the element at (i, j)
@ti.kernel
def paint_active_pixels(color: ti.f32):
# struct-for syntax: loop over active pixels, inactive pixels are skipped
for i, j in x:
x[i, j] = color
@ti.kernel
def paint_all_pixels(color: ti.f32):
# range-for syntax: loop over all pixels, no matter active or not
for i, j in ti.ndrange(n, n):
x[i, j] = color
ti.root.deactivate_all()
activate()
gui = ti.GUI('bitmasked', (n, n))
for frame in range(10000):
color = math.sin(frame * 0.05) * 0.5 + 0.5
paint_active_pixels(color)
#paint_all_pixels(color) # try this and compare the difference!
gui.set_image(x)
gui.show()
```
#### File: taichi/misc/cc_action_record.py
```python
import taichi as ti
ti.core.start_recording('record.yml')
ti.init(arch=ti.cc)
n = 512
x = ti.Vector(3, ti.f32, (n, n))
@ti.kernel
def render():
for i, j in x:
x[i, j] = [i / x.shape[0], j / x.shape[1], 0]
@ti.kernel
def dump_ppm(tensor: ti.template()):
if ti.static(isinstance(tensor, ti.Matrix)):
print('P3')
else:
print('P2')
print(tensor.shape[0], tensor.shape[1], 255)
for _ in range(1):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
c = min(255,
max(0, int(tensor[j, x.shape[1] - 1 - i] * 255 + 0.5)))
if ti.static(isinstance(tensor, ti.Matrix)):
r, g, b = c
print(r, g, b)
else:
print(c)
render()
dump_ppm(x)
ti.imshow(x)
```
#### File: taichi/misc/demo_listgen.py
```python
import taichi as ti
ti.init(print_ir=True)
x = ti.var(ti.i32)
ti.root.dense(ti.i, 4).bitmasked(ti.i, 4).place(x)
@ti.kernel
def func():
for i in x:
print(i)
func()
```
#### File: taichi/misc/test_async_weaken_access.py
```python
import taichi as ti
ti.init()
x = ti.var(ti.i32)
y = ti.var(ti.i32)
ti.root.pointer(ti.ij, 4).dense(ti.ij, 8).place(x, y)
@ti.kernel
def copy():
for i, j in y:
x[i, j] = y[i, j]
copy()
```
#### File: taichi/lang/shell.py
```python
import sys, os, atexit
class ShellType:
NATIVE = 'Python shell'
IPYTHON = 'IPython TerminalInteractiveShell'
JUPYTER = 'IPython ZMQInteractiveShell'
IPYBASED = 'IPython Based Shell'
SCRIPT = None
class ShellInspectorWrapper:
"""
Wrapper of the `inspect` module. When interactive shell detected,
we will redirect getsource() calls to the corresponding inspector
provided by / suitable for each type of shell.
"""
@staticmethod
def get_shell_name(exclude_script=False):
"""
Detect which type of shell is using.
Can be IPython, IDLE, Python native, or none.
"""
shell = os.environ.get('TI_SHELL_TYPE')
if shell is not None:
return getattr(ShellType, shell.upper())
if not exclude_script:
try:
import __main__ as main
if hasattr(main, '__file__'): # Called from a script?
return ShellType.SCRIPT
except:
pass
# Let's detect which type of interactive shell is being used.
# As you can see, huge engineering efforts are done here just to
# make IDLE and IPython happy. Hope our users really love them :)
try: # IPython / Jupyter?
return 'IPython ' + get_ipython().__class__.__name__
except:
# Note that we can't simply do `'IPython' in sys.modules`,
# since it seems `torch` will import IPython on it's own too..
if hasattr(__builtins__, '__IPYTHON__'):
return ShellType.IPYBASED
try:
if getattr(sys, 'ps1', sys.flags.interactive):
return ShellType.NATIVE
except:
pass
return ShellType.SCRIPT
@staticmethod
def create_inspector(name):
if name is None:
# `inspect` for "Python script"
import inspect
return inspect
elif name == ShellType.NATIVE:
# `dill.source` for "Python native shell"
try:
import dill
except ImportError as e:
raise ImportError(
'In order to run Taichi in Python interactive shell, '
'Please execute `python3 -m pip install --user dill`')
return dill.source
elif name.startswith('IPython'):
# `IPython.core.oinspect` for "IPython advanced shell"
return IPythonInspectorWrapper()
else:
raise RuntimeError(f'Shell type "{name}" not supported')
def __init__(self):
self.name = self.get_shell_name()
if self.name is not None:
print(f'[Taichi] Interactive shell detected: {self.name}')
self.inspector = self.create_inspector(self.name)
def getsource(self, o):
return self.inspector.getsource(o)
def getsourcelines(self, o):
return self.inspector.getsourcelines(o)
def getsourcefile(self, o):
return self.inspector.getsourcefile(o)
class IPythonInspectorWrapper:
"""`inspect` module wrapper for IPython / Jupyter notebook"""
def __init__(self):
pass
def getsource(self, o):
import IPython
return IPython.core.oinspect.getsource(o)
def getsourcelines(self, o):
import IPython
lineno = IPython.core.oinspect.find_source_lines(o)
lines = IPython.core.oinspect.getsource(o).split('\n')
return lines, lineno
def getsourcefile(self, o):
import IPython
lineno = IPython.core.oinspect.find_source_lines(o)
return f'<IPython:{lineno}>'
oinspect = ShellInspectorWrapper()
# TODO: also detect print according to shell type
```
#### File: tests/python/test_listgen.py
```python
import taichi as ti
from random import randrange
@ti.all_archs
def test_listgen():
x = ti.var(ti.i32)
n = 1024
ti.root.dense(ti.ij, 4).dense(ti.ij, 4).dense(ti.ij,
4).dense(ti.ij,
4).dense(ti.ij,
4).place(x)
@ti.kernel
def fill(c: ti.i32):
for i, j in x:
x[i, j] = i * 10 + j + c
for c in range(2):
print('Testing c=%d' % c)
fill(c)
# read it out once to avoid launching too many operator[] kernels
xnp = x.to_numpy()
for i in range(n):
for j in range(n):
assert xnp[i, j] == i * 10 + j + c
# Randomly check 1000 items to ensure [] work as well
for _ in range(1000):
i, j = randrange(n), randrange(n)
assert x[i, j] == i * 10 + j + c
@ti.all_archs
def test_nested_3d():
x = ti.var(ti.i32)
n = 128
ti.root.dense(ti.ijk, 4).dense(ti.ijk, 4).dense(ti.ijk,
4).dense(ti.ijk,
2).place(x)
@ti.kernel
def fill():
for i, j, k in x:
x[i, j, k] = (i * n + j) * n + k
fill()
# read it out once to avoid launching too many operator[] kernels
xnp = x.to_numpy()
for i in range(n):
for j in range(n):
for k in range(n):
assert xnp[i, j, k] == (i * n + j) * n + k
# Randomly check 1000 items to ensure [] work as well
for _ in range(1000):
i, j, k = randrange(n), randrange(n), randrange(n)
assert x[i, j, k] == (i * n + j) * n + k
``` |
{
"source": "44aayush/CTF_Bot_Discord",
"score": 2
} |
#### File: CTF_Bot_Discord/Python-CTF_BOT/commands.py
```python
from config import credential
from discord.ext import commands
from handlers import team_controller, task_controller, audit_controller, view_controller
def start():
help_command = commands.DefaultHelpCommand(no_category = 'dCTF')
bot = commands.Bot(command_prefix='>>', help_command = help_command)
@bot.command(name='register', help='Register your team. Format: >>register <team_name>')
async def register(ctx, team: str):
response = team_controller.register(team)
await ctx.send(response)
@bot.command(name='login', help='Login with team token. Format: >>login <team_token>')
async def login(ctx, token: str):
response = team_controller.login(str(ctx.author.id), token)
await ctx.send(response)
@bot.command(name='create-challenge', help='Format: >>create-challenge <name> <category> <description> <files> <flag>')
@commands.has_role(credential.role)
async def create_task(ctx, name: str, category: str, description: str, files: str, flag: str):
response = task_controller.create_task(name, category, description, files, flag)
await ctx.send(response)
@bot.command(name='release-challenge', help='Format: >>release-challenge <challenge_id>')
@commands.has_role(credential.role)
async def release_task(ctx, task_id: int):
response = task_controller.release_task(task_id)
await ctx.send(response)
@bot.command(name='hide-challenge', help='Hide a challenge. Format: >>hide-challenge <challenge_id>')
@commands.has_role(credential.role)
async def hide_task(ctx, task_id: int):
response = task_controller.hide_task(task_id)
await ctx.send(response)
@bot.command(name='delete-challenge', help='Format: >>delete-challenge <challenge_id>')
@commands.has_role(credential.role)
async def delete_task(ctx, task_id: int):
response = task_controller.delete_task(task_id)
await ctx.send(response)
@bot.command(name='submit', help='Submit flag. Format: >>submit <flag>')
async def submit(ctx, flag: str):
response = audit_controller.submit(str(ctx.author.id), flag)
await ctx.send(response)
@bot.command(name='challenges', help='List all challenges. Format: >>challenges')
async def challenges(ctx):
response = view_controller.challenges()
await ctx.send(embed=response)
@bot.command(name='challenges-info', help='Get challenges info. Format: >>chllenges-info <name>')
async def challenges_info(ctx, name: str):
response = view_controller.challenges_info(name)
await ctx.send(embed=response)
@bot.command(name='scoreboard', help='Update scoreboard. Format: >>scoreboard')
async def scoreboard(ctx):
response=view_controller.scoreboard_before_freeze()
await ctx.send(response)
bot.run(credential.token)
```
#### File: Python-CTF_BOT/databases/task_database.py
```python
from config import dburl
from sqlalchemy import Column, Integer, String, Date, Boolean
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine(dburl)
Base = declarative_base()
Session = sessionmaker(bind=engine)
s = Session()
class Task(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
name = Column(String)
category = Column(String)
description = Column(String)
files = Column(String)
flag = Column(String)
visible = Column(Boolean)
def find_all_task():
data = s.query(Task).all()
return data
def find_task(task_id):
data = s.query(Task).get(task_id)
return data
def create_task(name, category, description, files, flag):
task = Task(
name=name,
category=category,
description=description,
files=files,
flag=flag,
visible=False
)
s.add(task)
s.flush()
s.refresh(task)
s.commit()
return task
def task_exist(name):
data = s.query(Task).filter_by(name=name).first()
return False if (data == None) else True
def release_task(task_id):
data = s.query(Task).get(task_id)
data.visible = True
s.commit()
def hide_task(task_id):
data = s.query(Task).get(task_id)
data.visible = False
s.commit()
def delete_task(task_id):
data = s.query(Task).get(task_id)
s.delete(data)
s.commit()
def correct_flag(flag):
data = s.query(Task).filter_by(flag=flag).first()
return data
def not_unique_flag(flag):
data = s.query(Task).filter_by(flag=flag).first()
return False if (data == None) else True
def find_all_visible_task():
data = s.query(Task).filter_by(visible=True).all()
return data
def find_visible_task(name):
data = s.query(Task).filter_by(visible=True, name=name).first()
return data
Base.metadata.create_all(engine)
``` |
{
"source": "44O/gaota",
"score": 3
} |
#### File: 44O/gaota/gaota.py
```python
import pyxel
#from collections import namedtuple
#Point = namedtuple("Point", ["x", "y"], "Tile_Point", ["x", "y"])
WINDOW_W = 128
#WINDOW_H = 128
WINDOW_H = 140
ROCK_IMG = 0
ROCK_U = 16
ROCK_V = 0
ROCK_H = 16
ROCK_W = 16
ROCK_COLKEY = 0
IMG_ROCK_UPPER_LEFT = 2
IMG_ROCK_UPPER_RIGHT = 3
IMG_ROCK_LOWER_LEFT = 34
IMG_ROCK_LOWER_RIGHT = 35
IMG_WALL = 1
IMG_ONIGIRI_UPPER_LEFT = 128
IMG_ONIGIRI_UPPER_RIGHT = 129
IMG_ONIGIRI_LOWER_LEFT = 160
IMG_ONIGIRI_LOWER_RIGHT = 161
IMG_TRASURE_UPPER_LEFT = 6
IMG_TRASURE_UPPER_RIGHT = 7
IMG_TRASURE_LOWER_LEFT = 38
IMG_TRASURE_LOWER_RIGHT = 39
IMG_LADDER_LEFT = 4
IMG_LADDER_RIGHT = 5
# TODO:Classing, OVERALL!!!!!!
class App:
def __init__(self):
# TODO:wanna gorgeous title!
pyxel.init(WINDOW_W, WINDOW_H, caption="gaota", fps=25)
pyxel.load('assets/gaota.pyxel')
self.title()
self.deep_reset()
self.reset()
pyxel.run(self.update, self.draw)
def reset(self):
self.x = 0
self.y = 0
self.tile_x = self.stage * 16
self.tile_y = 0
self.move_x = 0
self.move_y = 0
self.walk = [0, 16]
self.laddering = [80, 96]
self.direction = [16, -16]
self.vector = 0
self.rock_x = 0
self.rock_y = 0
self.rock_move_y = 0
self.is_rock_fall = False
self.is_spewing = False
self.spew_count = 0
self.spew = [0, 16, 32, 48]
self.rock_count = 0
self.is_laddering = False
def deep_reset(self):
self.stage = 0
def update(self):
if pyxel.btnp(pyxel.KEY_Q):
pyxel.quit()
elif pyxel.btnp(pyxel.KEY_R):
self.reset()
self.deep_reset()
self.update_gaota()
if self.is_rock_fall:
self.update_rock()
def draw(self):
pyxel.cls(0)
self.tilemap_draw()
self.draw_gaota()
if self.is_rock_fall:
self.draw_rock()
def update_gaota(self):
# Any judgments are made every 8 dots.
if self.x % 8 == 0 and self.y % 8 == 0:
self.move_x = 0
self.move_y = 0
self.is_laddering = False
# fall if no wall under gaota
if self.is_nothing_at_my_feet():
self.move_y = 1
# for spewing animations
elif self.is_spewing:
if self.spew_count == 3:
self.spew_count = 0
self.is_spewing = False
self.is_rock_fall = True
else:
self.spew_count += 1
# push h to move to left
elif pyxel.btn(pyxel.KEY_H) and self.x>0:
self.move_x = -1
self.vector = 1
if (
self.target(self.tile_x-1, self.tile_y) in [
IMG_WALL,
IMG_ROCK_UPPER_RIGHT,
IMG_ROCK_UPPER_LEFT
] or
self.target(self.tile_x-1, self.tile_y+1) in [
IMG_WALL,
IMG_ROCK_UPPER_RIGHT,
IMG_ROCK_UPPER_LEFT
]):
self.move_x = 0
# push l to move to right
elif pyxel.btn(pyxel.KEY_L) and self.x<112:
self.move_x = 1
self.vector = 0
if (
self.target(self.tile_x+2, self.tile_y) in [
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_LOWER_LEFT
] or
self.target(self.tile_x+2, self.tile_y+1) in [
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_LOWER_LEFT
]):
self.move_x = 0
# push k to move up
elif pyxel.btn(pyxel.KEY_K):
self.move_y = 0
if (
self.target(self.tile_x+0, self.tile_y+1) in [
IMG_LADDER_RIGHT,
IMG_LADDER_LEFT
] and
self.target(self.tile_x+1, self.tile_y+1) in [
IMG_LADDER_RIGHT,
IMG_LADDER_LEFT
]):
self.move_y = -1
self.is_laddering = True
# push j to move down
elif pyxel.btn(pyxel.KEY_J):
self.move_y = 0
if (
self.target(self.tile_x+0, self.tile_y+2) in [
IMG_LADDER_RIGHT,
IMG_LADDER_LEFT
] and
self.target(self.tile_x+1, self.tile_y+2) in [
IMG_LADDER_RIGHT,
IMG_LADDER_LEFT
]):
self.move_y = 1
self.is_laddering = True
# push z to spew a rock
elif pyxel.btn(pyxel.KEY_Z):
if self.is_rock_fall:
pass
elif self.rock_count == 0:
pass
else:
if self.is_puttable():
self.rock_tile_x = self.tile_x + 2 if self.vector == 0 else self.tile_x - 2
self.rock_tile_y = self.tile_y
self.rock_x = (self.rock_tile_x - (self.stage * 16)) * 8
self.rock_y = self.rock_tile_y * 8
self.rock_count -= 1
self.update_rock_count()
self.is_spewing = True
# move gaota
self.x += self.move_x
self.y += self.move_y
# set gaota location on the tile.
self.tile_x = 0 if self.x == 0 else int(self.x / 8)
self.tile_x += self.stage * 16
self.tile_y = 0 if self.y == 0 else int(self.y / 8)
# got something?(shoot, gaota cannot get anything at the air.)
got = self.target(self.tile_x+0, self.tile_y+0)
if got == IMG_ONIGIRI_UPPER_LEFT and not(self.is_nothing_at_my_feet()):
self.rock_count += 1
self.update_rock_count()
self.delete_behind()
elif got == IMG_TRASURE_UPPER_LEFT and not(self.is_nothing_at_my_feet()):
# TODO: need much more rich stage clear process
self.stage += 1
self.reset()
self.tilemap_draw()
def update_rock_count(self):
# TODO: Ahhhhhhgggggg....
i = self.rock_count
j = 1
while i > 0:
#pyxel.tilemap(0).copy(
# j,
# 16,
# 7, 2, 0, 1, 1)
#pyxel.tilemap(0).copy(
# j,
# 16,
# 7, 2, 1, 1, 1)
pyxel.bltm(
j * 8,
132,
0,
2, 0, 1, 1)
pyxel.bltm(
j * 8,
10,
0,
2, 1, 1, 1)
j += 1
i -= 1
def target(self, x, y):
return pyxel.tilemap(0).get(x, y)
def is_puttable(self):
if self.vector == 0:
obj_upper_left = self.target(self.tile_x+2, self.tile_y)
obj_upper_right = self.target(self.tile_x+3, self.tile_y)
obj_lower_left = self.target(self.tile_x+2, self.tile_y+1)
obj_lower_right = self.target(self.tile_x+3, self.tile_y+1)
else:
obj_upper_left = self.target(self.tile_x-2, self.tile_y)
obj_upper_right = self.target(self.tile_x-1, self.tile_y)
obj_lower_left = self.target(self.tile_x-2, self.tile_y+1)
obj_lower_right = self.target(self.tile_x-1, self.tile_y+1)
return set([
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_UPPER_RIGHT,
IMG_ROCK_LOWER_LEFT, IMG_ROCK_LOWER_RIGHT
]).isdisjoint([
obj_upper_left,
obj_upper_right,
obj_lower_right,
obj_lower_left
])
def update_rock(self):
if self.rock_y % 8 == 0:
self.rock_move_y = 0
# fall if no wall under rock
if self.is_nothing_at_my_bottom():
self.rock_move_y = 1
else:
# put static rock after landing.
# TODO:i think i should not use tilemap().copy.
# actually im using tilemap(7) to copy tile and that is not good, i think.
pyxel.tilemap(0).copy(
self.rock_tile_x,
self.rock_tile_y,
7, 0, 0, 2, 2)
self.is_rock_fall = False
self.rock_y += self.rock_move_y
# set rock location on the tile.
self.rock_tile_y = 0 if self.rock_y == 0 else int(self.rock_y / 8)
def is_nothing_at_my_feet(self):
if self.target(self.tile_x, self.tile_y+2) in [
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_UPPER_RIGHT,
IMG_LADDER_LEFT,
IMG_LADDER_RIGHT]:
return False
if self.target(self.tile_x+1, self.tile_y+2) in [
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_UPPER_RIGHT,
IMG_LADDER_LEFT,
IMG_LADDER_RIGHT]:
return False
return True
def is_nothing_at_my_bottom(self):
if self.target(self.rock_tile_x, self.rock_tile_y+2) in [
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_UPPER_RIGHT]:
return False
if self.target(self.rock_tile_x+1, self.rock_tile_y+2) in [
IMG_WALL,
IMG_ROCK_UPPER_LEFT,
IMG_ROCK_UPPER_RIGHT]:
return False
return True
def delete_behind(self):
# TODO: i think i should not use tilemap().copy.
pyxel.tilemap(0).copy(
self.tile_x,
self.tile_y,
1, 2, 0, 2, 2)
def draw_gaota(self):
x = self.x
y = self.y
img = 0
if self.is_spewing:
u = self.spew[self.spew_count]
elif self.is_laddering:
u = self.laddering[(self.y // 4) % 2]
else:
u = self.walk[(self.x // 4) % 2]
v = 16
w = self.direction[self.vector]
h = 16
colkey = 0
pyxel.blt(x, y, img, u, v, w, h, colkey)
def draw_rock(self):
x = self.rock_x
y = self.rock_y
pyxel.blt(x, y, ROCK_IMG, ROCK_U, ROCK_V, ROCK_W, ROCK_H, ROCK_COLKEY)
def tilemap_draw(self):
x = 0
y = 0
tm = 0
u = self.stage * 16
v = 0
w = 16
h = 16
pyxel.bltm(x, y, tm, u, v, w, h)
def title(self):
# TODO: title yoteichi.
pass
#pyxel.image(0).load(0, 0, "assets/title.png")
App()
``` |
{
"source": "44REAM/ECG-holter",
"score": 2
} |
#### File: ECG-holter/ecgholter/classifier.py
```python
from typing import List
import numpy as np
from .peakobject import ECGLabel
from . import metric
from .config import cfg
from .peakobject import PeakArray, QRS, Peak
from .utils import *
class PeakClassifier:
def preprocess(self, signals):
pass
def tachy_brady(self, peakarray: PeakArray, segment_idx: List[int], algo = "interval"):
if algo == 'interval':
self.tachy_brady_time(peakarray, segment_idx)
elif algo == "peak":
self.tachy_brachy_peak(peakarray)
@staticmethod
def tachy_brady_time(peakarray: PeakArray, segment_idx: List[int]):
brady_threshold= cfg.DIAG.BRADY
tachy_threshold = cfg.DIAG.TACHY
start_idx = 0
for end_idx in segment_idx:
peakarray_landmark = peakarray[start_idx: end_idx]
interval_array, _ = peakarray_landmark.get_interval(rm_outlier=True, upper_percentile=95, lower_percentile=10, to_time=True)
hr = metric.hr(interval_array)
if hr == None:
continue
for peak in peakarray_landmark:
if isinstance(peak, QRS):
peak.add_hr(hr, tachy_threshold, brady_threshold)
start_idx = end_idx
@staticmethod
def tachy_brachy_peak(peakarray: PeakArray, wait_buffer = False):
"""Assigned HR to peak
use number of peak
This method will automatic update diagnosis for
tachycardia, bradycardia
Args:
wait_buffer (bool, optional): Used for defined if we want to wait for previous data.
"""
if not peakarray.ASSIGN_INTERVAL:
raise ValueError("Please assign interval before calculate tachy brady. Call assign_interval_to_peak")
interval_buffer = []
fs = cfg.FS
min_peak_buffer = cfg.DIAG.HR_PEAK_BUFFER
brady_threshold= cfg.DIAG.BRADY
tachy_threshold = cfg.DIAG.TACHY
for peak in peakarray:
if not isinstance(peak, QRS):
if wait_buffer:
interval_buffer = []
continue
if peak.interval == None:
if wait_buffer:
interval_buffer = []
continue
interval_buffer.append(sample_to_msec(peak.interval), fs )
if len(interval_buffer) == min_peak_buffer:
hr = metric.hr(interval_buffer)
if hr == None:
continue
# add hr automatically classify tachycardia and bradycardia
peak.add_hr(hr, tachy_threshold, brady_threshold)
del(interval_buffer[0])
def ectopic(self, peakarray, segment_idx, algo = "interval"):
if algo == "interval":
self.ectopic_interval(peakarray, segment_idx)
def ectopic_interval(self, peakarray, segment_idx):
if not peakarray.ASSIGN_INTERVAL:
raise ValueError("Please assign interval before calculate tachy brady. Call assign_interval_to_peak")
if not peakarray.QRS_ONSET:
raise ValueError("Please assign QRS onset")
if not peakarray.QRS_OFFSET:
raise ValueError("Please assign QRS offset")
start_idx = 0
ectopic_ratio = cfg.DIAG.ECTOPIC_RATIO
qrs_width_sample = msec_to_sample(cfg.DIAG.QRS_WIDTH, cfg.FS, to_int=False)
median = cfg.DIAG.ECTOPIC_MEDIAN
for end_idx in segment_idx:
interval_array, _ = peakarray[start_idx: end_idx].get_interval(to_time=False)
if len(interval_array) ==0:
continue
if median:
mean_rr = np.median(interval_array)
else:
mean_rr = np.mean(interval_array)
for peak in peakarray[start_idx: end_idx]:
if not isinstance(peak, QRS):
continue
if not peak.interval:
continue
if peak.interval < mean_rr * ectopic_ratio:
self.pvc_pac(peak, qrs_width_sample)
start_idx = end_idx
def pvc_pac(self, peak: QRS, qrs_width_sample):
if not peak.mu:
peak.add_diagnosis(ECGLabel.PAC)
return
if not peak.j:
peak.add_diagnosis(ECGLabel.PAC)
return
qrs_width = peak.j - peak.mu
if qrs_width > qrs_width_sample:
peak.add_diagnosis(ECGLabel.PVC)
return
peak.add_diagnosis(ECGLabel.PVC)
def ventricular_arrhythmia(self, signals, peakarray: PeakArray, algo = ["tcsc", "vf_filter"]):
"""ventricular arrhythmia
"""
segment_sample = sec_to_sample(cfg.DIAG.VTFT_TCSC_SEGMENT_SEC, cfg.FS)
decision_sample = sec_to_sample(cfg.DIAG.VTFT_TCSC_SMOOTH_SEC, cfg.FS)
tcsc_section = []
vf_filter_section = []
n_signals = len(signals)
if "tcsc" in algo:
mask = get_section_mask(peakarray.Q_SECTION, len(signals))
vtvf_signals = metric.tcsc(signals, segment_sample, decision_sample,
threshold=cfg.DIAG.VTFT_TCSC_BINARY_THRESHOLD, mask = mask)
tcsc_section = mask_segment(vtvf_signals, 0, n_signals, threshold = cfg.DIAG.VTFT_TCSC_THRESHOLD)
if "vf_filter" in algo:
vtvf_signals = metric.vf_filter(signals, segment_sample)
vtvf_signals = vtvf_signals*(-1) +1
if len(vtvf_signals) !=0:
tmp_vf_filter_section = mask_segment(vtvf_signals,0,n_signals, threshold = cfg.DIAG.VTFT_VFF_THRESHOLD)
constant = len(signals)/len(vtvf_signals)
for section in tmp_vf_filter_section:
vf_filter_section.append( (int(section[0]*constant), int(section[1]*constant)))
vtvf_section = union_list_of_section([tcsc_section, vf_filter_section])
peakarray.add_vtvf_section(vtvf_section)
def af(self, peakarray: PeakArray):
#TODO
intervals, _ = peakarray.get_interval(to_time=True)
n_section = int(np.floor(len(intervals)/10))
for i in range(n_section):
pass
```
#### File: ECG-holter/ecgholter/metric.py
```python
import math
import numpy as np
from .utils import *
def entropy(histogram_bin):
histogram_bin = np.array(histogram_bin)
if histogram_bin.size == 0:
raise ValueError("Array cannot be empty")
histogram_bin =histogram_bin[histogram_bin!=0]
n_classes = histogram_bin.size
if n_classes <= 1:
return 0
probs = histogram_bin / np.sum(histogram_bin)
value = 0.
for i in probs:
value -= i * np.log(i)
return value
def sdann_asdnn_old(nn_interval: np.ndarray, index_array: np.ndarray, min_number_of_interval = 3, duration = 5):
"""[summary]
Args:
nn_interval (np.ndarray): interval array in millisecond
index_array (np.ndarray): index array in millisecond
min_number_of_interval: minimum number of interval for calculate
duration (int, optional): Duration in minute
Returns:
[type]: [description]
"""
section_msec = minute_to_msec(duration)
# number of 5 minute sections
n_section = np.ceil(last_idx = (index_array[-1])/section_msec)
start_idx = 0
average_array = []
std_array = []
for i in range(n_section):
end_section = (i+1)*section_msec
end_idx = np.searchsorted(index_array, end_section,side='right')
# calculate only if have peak greater than specified
if len(nn_interval[start_idx:end_idx]) >= min_number_of_interval:
average_array.append(np.mean(nn_interval[start_idx:end_idx]))
std_array.append(np.std(nn_interval[start_idx:end_idx]))
start_idx = end_idx
if len(average_array) >= min_number_of_interval:
sdann = np.std(average_array)
asdnn = np.mean(std_array)
return sdann, asdnn
return None, None
# faster than sdann_asdnn_old because dont have to search for index
def sdann_asdnn(nn_interval: np.ndarray, index_array: np.ndarray, min_number_of_interval = 3, duration = 5):
"""[summary]
Args:
nn_interval (np.ndarray): interval array in millisecond
index_array (np.ndarray): index array in millisecond
min_number_of_interval: minimum number of interval for calculate
duration (int, optional): Duration in minute
Returns:
[type]: [description]
"""
if len(nn_interval) < min_number_of_interval:
return None, None
section_msec = minute_to_msec(duration)
segment_index = get_segment_index(index_array, section_msec)
average_array = []
std_array = []
start_idx = 0
for end_idx in segment_index:
if len(nn_interval[start_idx:end_idx]) >= min_number_of_interval:
average_array.append(np.mean(nn_interval[start_idx:end_idx]))
std_array.append(np.std(nn_interval[start_idx:end_idx]))
start_idx = end_idx
if len(average_array) >= min_number_of_interval:
sdann = np.std(average_array)
asdnn = np.mean(std_array)
return sdann, asdnn
return None, None
def sdnn(nn_interval: np.ndarray, min_number_of_interval = 3):
"""[summary]
Args:
nn_interval (np.ndarray): interval array in millisecond
min_number_of_interval: minimum number of interval for calculate
"""
if len(nn_interval) < min_number_of_interval:
return None
std = np.std(nn_interval)
return std
def rmssd(rr_interval, min_number_of_interval = 3):
"""[summary]
Args:
nn_interval (np.ndarray): interval array in millisecond
min_number_of_interval: minimum number of interval for calculate
"""
if len(rr_interval) >= min_number_of_interval+1:
rr_dif = np.diff(rr_interval)
# return np.sqrt(np.sum(np.array( rr_dif )**2)/len(rr_dif))
return np.std(rr_dif)
return None
def hr(rr_interval):
if len(rr_interval) == 0:
return None
mean_rr = np.mean(rr_interval)
return msec_to_hr(mean_rr)
# ***********************************************************************
# ******************** Ventricular Arrhythmia ***************************
# ***********************************************************************
def tcsc(signals: np.ndarray, segment_sample: int, decision_sample: int, threshold = 0.2, mask = None):
"""
<NAME>, <NAME>, <NAME>.
A simple time domain algorithm for the detection of
ventricular fibrillation in electrocardiogram.
SIViP. 2011. (With some modification)
Time domain features
** For detect ventricular arrhythmia.
Not required QRS detection first.
The signal should be noise free.
"""
signals = segment_normalize(signals ,segment_sample, absolute=True)
binary_array = np.zeros_like(signals)
binary_array[signals >threshold] = 1
if mask is None:
return ma(binary_array, decision_sample)
binary_array[mask] = 0
return ma(binary_array, decision_sample)
def mav(signals: np.ndarray, segment_sample: int, decision_sample: int):
"""
<NAME>, <NAME>, Hasan MK.
Sequential algorithm for life threatening
cardiac pathologies detection based on mean
signal strength and EMD functions.
BioMed Eng OnLine. 2010 .
Time domain features
** For detect ventricular arrhythmia.
Not required QRS detection first.
The signal should be noise free.
"""
signals = segment_normalize(signals ,segment_sample, absolute=True)
return ma(signals, decision_sample)
def vf_filter(signals: np.ndarray, segment_sample: int):
"""
<NAME>, <NAME>, <NAME>.
Reliability of old and new ventricular fibrillation
detection algorithms for automated external defibrillators.
BioMed Eng OnLine. 2005
Can be view as frequency domain feature.
Use filter leakaged.
** For detect only ventricular fibrillation.
Not required QRS detection first.
The signal should be noise free.
"""
n_signal = len(signals)
section = int(np.floor(n_signal/ segment_sample))
l_array = []
for i in range(1, section):
signal_section = signals[i*segment_sample: (i+1)*segment_sample]
numerator = np.abs(signal_section).sum()
denominator = np.abs(np.diff(signal_section)).sum()
try:
if denominator == 0:
N = 0
else:
N = np.pi * numerator / denominator + 1/2
N = int(np.floor(N))
except Exception as e:
l_array.append(1)
continue
if N > segment_sample:
# raise ValueError("N cannot greater than section size")
# TODO
# Something error
# the signal frequency are too low so the N larger than segment_sample
# this because set signal to constant when preprocessing to remove signal threshold greater than ...
# append 1 for now
l_array.append(1)
continue
signal_section_shift = signals[i*segment_sample - N: (i+1)*segment_sample - N]
numerator = np.abs(signal_section + signal_section_shift).sum()
denominator = np.sum(np.abs(signal_section) + np.abs(signal_section_shift))
if denominator == 0:
# TODO
# Something error
# Reason as above
l_array.append(1)
continue
l = numerator / denominator
l_array.append(l)
return np.array(l_array)
```
#### File: ecgholter/peakobject/peak.py
```python
from enum import IntEnum
from math import floor
from ..utils import sample_to_msec, sample_to_sec, timestamp_msec_to_datetime
class ECGLabel(IntEnum):
UNKNOWN = 0
NORMAL = 1
BRADYCARDIA = 2
TACHYCARDIA = 3
PAC = 4
PVC = 5
AF = 6
VENTRICULAR = 7
PAUSE = 8
QUESTION = 9
EVENT = 10
ANNOTATION = {
ECGLabel.UNKNOWN: '*',
ECGLabel.NORMAL: 'N',
ECGLabel.BRADYCARDIA: 'B',
ECGLabel.TACHYCARDIA: 'T',
ECGLabel.PAC: 'A',
ECGLabel.PVC: 'V',
ECGLabel.AF: 'a',
ECGLabel.VENTRICULAR: 'v',
ECGLabel.PAUSE: 'P',
ECGLabel.QUESTION: '?'
}
class Peak():
def __init__(self, idx):
self.idx = idx
self.diagnosis = ECGLabel.UNKNOWN
def __gt__(self, other):
if self.idx > other.idx:
return True
return False
def __ge__(self, other):
if self.idx >= other.idx:
return True
return False
def __lt__(self, other):
if self.idx < other.idx:
return True
return False
def __le__(self, other):
if self.idx <= other.idx:
return True
return False
def __eq__(self, other):
if self.idx == other.idx:
return True
return False
def __hash__(self):
return hash(self.__repr__())
def __ne__(self, other):
if self.idx != other.idx:
return True
return False
def __sub__(self, other):
return self.idx - other.idx
def __add__(self, other):
return self.idx + other.idx
def __repr__(self) -> str:
return f"{self.__class__.__name__}{self.idx}"
def get_timestamp_sec(self, start_ts_sec, fs):
return start_ts_sec + sample_to_sec(self.idx, fs)
def get_timestamp_msec(self, start_ts_msec, fs):
return start_ts_msec + sample_to_msec(self.idx, fs)
def get_gmt_time(self, start_ts_msec, fs, return_object = True):
timestamp_msec = self.get_timestamp_msec(start_ts_msec, fs)
return timestamp_msec_to_datetime(timestamp_msec, mode = 'utc', return_object =return_object)
def get_bkk_time(self, start_ts_msec, fs, return_object=True):
timestamp_msec = self.get_timestamp_msec(start_ts_msec, fs)
return timestamp_msec_to_datetime(timestamp_msec, mode = 'bkk', return_object =return_object)
def shift(self, shift):
self.idx += shift
def rescale(self, ratio):
self.idx = int(floor(self.idx*ratio))
def get_annotation(self):
return ANNOTATION[self.diagnosis]
@property
def timestamp(self, start_ts):
return sample_to_sec(self.idx) + start_ts
class Event(Peak):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.diagnosis = ECGLabel.EVENT
class QuestionMark(Peak):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.diagnosis = ECGLabel.QUESTION
class QRS(Peak):
def __init__(self, idx, normal=True, diagnosis=ECGLabel.UNKNOWN,
mu = None, j = None,t=None,u=None, hr = None, interval = None,
t_end = None, next_interval = None,interval_around_peak = None, *args, **kwargs):
"""
Rule
1. Mu and J cannot be None if None it will set to Questionmark
2. T can be None, if T is None then cannot find QT interval
"""
super().__init__(idx, *args, **kwargs)
self.mu = mu
self.j = j
self.t = t
self.t_end = t_end
self.u = u
self.hr = hr
self.interval = interval
self.next_interval = next_interval
self.interval_around_peak = interval_around_peak
self.normal = normal
self.diagnosis = diagnosis
def shift(self, shift):
super().shift(shift)
if self.mu:
self.mu +=shift
if self.j:
self.j +=shift
if self.t:
self.t +=shift
if self.t_end:
self.t_end +=shift
if self.u:
self.u +=shift
def rescale(self, ratio):
#TODO
super().rescale(ratio)
def add_interval_around_peak(self, interval: float):
#TODO
self.interval_around_peak = interval
def add_previous_interval(self, interval: float, pause_sample):
self.interval = interval
self.diag_pause(pause_sample)
# if self.interval > pause_sample:
# self.add_diagnosis(ECGLabel.PAUSE)
def diag_pause(self, pause_sample):
if self.interval > pause_sample:
self.add_diagnosis(ECGLabel.PAUSE)
def add_hr(self, hr: int, tachy_threshold, brady_threshold):
self.hr = hr
self.diag_tachy_brady(tachy_threshold, brady_threshold)
# if self.hr > tachy_threshold:
# self.add_diagnosis(ECGLabel.TACHYCARDIA)
# elif self.hr < brady_threshold:
# self.add_diagnosis(ECGLabel.BRADYCARDIA)
# else:
# self.add_diagnosis(ECGLabel.NORMAL)
def conditional_diagnosis(self, property, threshold, greater = True):
pass
#TODO
def diag_tachy_brady(self, tachy_threshold, brady_threshold):
if self.hr > tachy_threshold:
self.add_diagnosis(ECGLabel.TACHYCARDIA)
elif self.hr < brady_threshold:
self.add_diagnosis(ECGLabel.BRADYCARDIA)
else:
self.add_diagnosis(ECGLabel.NORMAL)
def set_unknown(self):
if self.diagnosis == ECGLabel.UNKNOWN:
self.normal = False
return
if not self.interval:
self.diagnosis = ECGLabel.UNKNOWN
self.normal = False
return
if not self.next_interval:
self.diagnosis = ECGLabel.UNKNOWN
self.normal = False
return
def add_diagnosis(self, diag: ECGLabel):
if self.diagnosis < diag:
self.diagnosis = diag
if diag in [ECGLabel.PAC, ECGLabel.PVC, ECGLabel.VENTRICULAR]:
self.normal = False
```
#### File: ecgholter/utils/report.py
```python
import os
import shutil
import pytz
from docx.shared import Pt
import matplotlib.pyplot as plt
from matplotlib.dates import date2num, AutoDateLocator
import matplotlib.dates as mdates
from docx.shared import Inches, Cm
from docx import Document
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from scipy.ndimage import zoom
from .utils import *
from .. import peakobject as po
from docx.shared import Pt
def make_rows_bold(*rows):
for row in rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
for run in paragraph.runs:
run.font.bold = True
def make_rows_center(*rows):
for row in rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
paragraph.alignment=WD_PARAGRAPH_ALIGNMENT.CENTER
def plot_datetime(savepath, y_signals, x_time, title, ylabel, tz, ymin, ymax, color = 'b'):
if len(x_time)<=1:
return
plt.close()
fig, ax = plt.subplots(figsize=(30, 8))
ax.set_xlim(x_time[0], x_time[-1])
ax.plot(date2num(x_time), y_signals, color)
plt.title(title, fontsize = 24)
ax.set_xlim(x_time[0],x_time[-1])
ax.set_ylim(ymin,ymax)
ax.set_ylabel(ylabel, fontsize=22)
# auto format date
# ax.xaxis.set_major_formatter( AutoDateFormatter(locator) )
locator = AutoDateLocator()
ax.xaxis.set_major_locator(locator)
# manual format date
date_format = mdates.DateFormatter('%d %H:%M:%S', tz = tz)
ax.xaxis.set_major_formatter(date_format)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.grid(True)
plt.savefig(savepath, bbox_inches='tight')
min_max = 3
y_tick_major = np.arange(-4, 4,0.5)
y_tick_minor = np.arange(-4, 4,0.1)
def plot_ecg_10_sec(savepath, y_signals_ch2, y_signals_ch1, x_time, interval_array, interval_time_array, annotation, tz):
if len(x_time)<=1:
return
plt.close()
date_format = mdates.DateFormatter('%d %H:%M:%S', tz = tz)
fig, ax = plt.subplots(2, figsize=(30, 15))
# ax.set_ylim([-min_max, min_max])
for a in ax:
a.set_xticks(x_time[:: 100])
a.set_xticks(x_time[:: 20], minor=True)
a.set_yticks(y_tick_major)
a.set_yticks(y_tick_minor, minor=True)
a.grid(which='major')
a.grid(which='minor', alpha=0.3)
a.xaxis.set_major_formatter(date_format)
a.tick_params(axis='both', which='major', labelsize=16)
a.set_ylim(-min_max, min_max)
a.set_xlim(x_time[0], x_time[-1])
a.set_ylabel("mV", fontsize=22)
for idx,t in enumerate(a.xaxis.get_major_ticks()):
if idx%5==0:
continue
t.label.set_visible(False)
t.tick1On = False # <----
for interval, time_interval, anno in zip(interval_array[1:], interval_time_array[1:], annotation[1:]):
xmax = timestamp_msec_to_datetime(time_interval)
xcenter = timestamp_msec_to_datetime(time_interval-interval/2)
a.vlines(x = xmax, ymin = 2, ymax = 2.5 ,color = "#f70000")
a.text(xcenter , 2, interval, fontsize = 16, ha='center')
a.text(xmax , 2.5, anno, fontsize = 16, ha='center')
ax[0].set_title("CHANNEL I", fontsize = 24)
ax[1].set_title("CHANNEL II", fontsize = 24)
ax[0].plot(x_time, y_signals_ch1)
ax[1].plot(date2num(x_time), y_signals_ch2)
plt.savefig(savepath, bbox_inches='tight')
def plot_ecg_1_hour(savepath, y_signals, x_time, annotation_time, title, tz):
plt.close()
y_scatter = np.ones(len(annotation_time))*(-4)
fig, ax = plt.subplots(figsize=(30, 8))
ax.set_xlim(x_time[0], x_time[-1])
ax.plot(date2num(x_time), y_signals)
ax.scatter(annotation_time, y_scatter, c = "#f70000", zorder = 1)
plt.title(title, fontsize = 24)
ax.set_xlim(x_time[0],x_time[-1])
ax.set_ylim(-5,5)
ax.set_ylabel("mV", fontsize=22)
# auto format date
# ax.xaxis.set_major_formatter( AutoDateFormatter(locator) )
locator = AutoDateLocator()
ax.xaxis.set_major_locator(locator)
# manual format date
date_format = mdates.DateFormatter('%d %H:%M:%S', tz = tz)
ax.xaxis.set_major_formatter(date_format)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.grid(True)
plt.savefig(savepath, bbox_inches='tight')
def report_diagnosis1hour(savefolder,signals, peakarray: po.PeakArray, diag ,segment, tz):
# Plot HR 24 hour
fs = peakarray.fs
n_segment = len(segment)
start = 0
for i in range(25):
if i>=n_segment:
break
landmark_peak = peakarray[start:segment[i]]
diag_location = landmark_peak.get_diagnosis_time(diag)
if len(diag_location) == 0:
continue
landmark_signal = signals[landmark_peak[0].idx: landmark_peak[-1].idx]
time_array = sample_to_sec(np.arange(landmark_peak[0].idx, landmark_peak[-1].idx), fs) + peakarray.START_TS/1000
time_array = [timestamp_sec_to_datetime(ts) for ts in time_array]
savepath = os.path.join(savefolder, f'{po.ANNOTATION[diag]}_hour_{(i+1):02d}.jpg')
plot_ecg_1_hour(savepath, landmark_signal, time_array,diag_location ,f"{po.ANNOTATION[diag]} Hour {(i+1):02d}", tz)
start = segment[i]
def report_diagnosis(savefolder, peakarray: po.PeakArray, signals, signals_ch1, diag: po.ECGLabel, tz, limit = 10000):
segment = peakarray.get_segment_index_every_nsec(nsec = 10)
start = 0
fs=peakarray.fs
picture_count = 0
for end in segment:
if picture_count >= limit:
break
landmark_peak = peakarray[start:end]
start = end
out = False
have_diag = False
annotation = []
for peak in landmark_peak:
if not isinstance(peak, po.QRS):
out = True
break
if peak.diagnosis == po.ECGLabel.UNKNOWN:
out = True
break
if peak.diagnosis == diag:
have_diag = True
annotation.append(peak.get_annotation())
if out or (not have_diag):
continue
r_peak = landmark_peak.get_r_index()
if len(r_peak)<4:
continue
start_idx = landmark_peak[0].idx
end_idx = landmark_peak[-1].idx
length = end_idx - start_idx
add = sec_to_sample(10, fs) -length
end_idx +=add+1
landmark_signals = signals[start_idx: end_idx]
landmark_signals_ch1 = signals_ch1[start_idx: end_idx]
time_array = sample_to_sec(np.arange(start_idx, end_idx), fs) + peakarray.START_TS/1000
time_array = [timestamp_sec_to_datetime(ts) for ts in time_array]
interval, time_interval = landmark_peak.get_interval(to_time=True, report=True)
savepath = os.path.join(savefolder, f"{time_array[0].strftime('%H.%M.%S (%d_%m_%Y)')}.jpg")
plot_ecg_10_sec(savepath, landmark_signals, landmark_signals_ch1, time_array, interval, time_interval, annotation, tz)
picture_count+=1
def report_hr(savefolder, peakarray: po.PeakArray, segment, tz):
# Plot HR 24 hour
peak_hr, peak_time = peakarray.get_hr_with_time()
savepath = os.path.join(savefolder, f'0_hr_total.jpg')
plot_datetime(savepath, peak_hr, peak_time, f"Total HR plot", "HR", tz, 0, 200)
n_segment = len(segment)
start = 0
for i in range(25):
if i>=n_segment:
break
peak_hr, peak_time = peakarray.get_hr_with_time(start_idx=start, end_idx=segment[i])
savepath = os.path.join(savefolder, f'hr{(i+1):02d}.jpg')
plot_datetime(savepath, peak_hr, peak_time, f"HR plot one hour {(i+1):02d}", "HR", tz, 0,200)
start = segment[i]
def report_question(savefolder, peakarray: po.PeakArray, n_signal, tz):
mask = get_section_mask(peakarray.Q_SECTION, n_signal, invert =True)
ts_array = sample_to_sec(np.arange(n_signal), peakarray.fs) + peakarray.START_TS/1000
ts_array = [timestamp_sec_to_datetime(ts) for ts in ts_array]
savepath = os.path.join(savefolder, f"0_Total")
plot_datetime(savepath, mask,ts_array, "Total signal lost",'one = lost', tz, 0, 1.2, color ='r' )
def clean_folder(folder_path):
print(folder_path)
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
os.mkdir(folder_path)
else:
os.mkdir(folder_path)
def format_diagnosis(document, savefolder,folder_name,
peakarray,signals, signals_ch1,
signals_resample, peakarray_resample, diag,
segment, tz, document_header, save_pic_limit = 5, document_n_pic = 3,
document_pic_width = 7):
document.add_page_break()
document.add_heading(document_header, level = 1)
document.add_paragraph()
folder_path = os.path.join(savefolder, folder_name)
clean_folder(folder_path)
hour_folder_path = os.path.join(folder_path, "hour_report")
clean_folder(hour_folder_path)
report_diagnosis1hour(hour_folder_path, signals_resample, peakarray_resample, diag, segment, tz)
report_diagnosis(folder_path, peakarray, signals, signals_ch1, diag, tz, limit = save_pic_limit)
for picture in os.listdir(hour_folder_path):
pic_path = os.path.join(hour_folder_path, picture)
document.add_picture(pic_path, width=Inches(document_pic_width))
document.add_heading("Example", level = 5)
for picture in os.listdir(folder_path)[:document_n_pic]:
pic_path = os.path.join(folder_path, picture)
if os.path.isdir(pic_path):
continue
document.add_picture(pic_path, width=Inches(document_pic_width))
if len(os.listdir(folder_path)) == 0:
document.add_paragraph("There are no this labelpeak or consider a noise")
def report_document(savefolder, peakarray: po.PeakArray, signals, signals_ch1, report_lost_signal = False):
n_signals = len(signals)
n_peak = len(peakarray.get_r_index())
signals_resample = zoom(signals, 10/peakarray.fs)
peakarray_resample = po.resample(peakarray, 10)
if peakarray.START_TS is None:
raise ValueError("Start time not yet assigned")
if peakarray.Q_SECTION is None:
raise ValueError("Question not yet assigned")
# get timezone
tz = pytz.timezone('Asia/Bangkok')
# get segment every 1 hour
segment = peakarray.get_segment_index_every_nsec(nsec = 3600)
# setting folder
# -------- Report HR folder
hr_folder = "hr_report"
hr_folder_path = os.path.join(savefolder, hr_folder)
clean_folder(hr_folder_path)
report_hr(hr_folder_path, peakarray, segment, tz)
# --------Report Diag
# Create Document
document = Document()
document.add_heading("ECG report", level=0)
document.add_heading("Information", level = 2)
document.add_paragraph()
start_time = timestamp_msec_to_datetime(peakarray.START_TS, return_object=False)
document.add_paragraph(f"Start time: {start_time}")
time_signal = sec_to_time_format(sample_to_sec(n_signals, peakarray.fs))
document.add_paragraph(f"Duration: {time_signal}")
document.add_paragraph(f"Total signal: {n_signals} (Sampling rate {peakarray.fs}Hz)")
n_lost_signal = count_section_sample(peakarray.Q_SECTION)
document.add_paragraph(f"Lost signal: {n_lost_signal} ({round(n_lost_signal/n_signals*100, 2)}%)")
document.add_paragraph()
document.add_heading("General summary", level = 2)
document.add_paragraph()
document.add_paragraph(f"Total peak: {n_peak}")
table = document.add_table(rows=11, cols=2, style = "Table Grid")
# Heading
table.cell(0, 0).text = "Label"
table.cell(0, 1).text = "Count"
make_rows_bold(table.rows[0])
make_rows_center(table.rows[0])
# Diagnosis
all_labels = peakarray.get_all_labels()
table.cell(1, 0).text = "Normal"
table.cell(2, 0).text = "Tachycardia"
table.cell(3, 0).text = "Bradycardia"
table.cell(4, 0).text = "Pause"
table.cell(5, 0).text = "PVC"
table.cell(6, 0).text = "PAC"
table.cell(7, 0).text = "AF"
table.cell(8, 0).text = "VTVF"
table.cell(9, 0).text = "Unknown"
table.cell(10, 0).text = "Question"
table.cell(1, 1).text = str(len(all_labels['normal']))
table.cell(2, 1).text = str(len(all_labels['tachy']))
table.cell(3, 1).text = str(len(all_labels['brady']))
table.cell(4, 1).text = str(len(all_labels['pause']))
table.cell(5, 1).text = str(len(all_labels['pvc']))
table.cell(6, 1).text = str(len(all_labels['pac']))
table.cell(7, 1).text = str(len(all_labels['af']))
table.cell(8, 1).text = str(len(all_labels['ventricular']))
table.cell(9, 1).text = str(len(all_labels['unknown']))
table.cell(10, 1).text =str(len(all_labels['question']))
# Metric
document.add_paragraph()
table = document.add_table(rows=9, cols=2, style = "Table Grid")
all_metrics = peakarray.get_all_metrics( rm_outlier=True, upper_percentile=95, lower_percentile = 5)
hrmax_time = timestamp_msec_to_datetime(all_metrics['hrmax_ts'], mode = 'bkk', return_object=False)
hrmin_time = timestamp_msec_to_datetime(all_metrics['hrmin_ts'], mode = 'bkk', return_object=False)
# Diagnosis
table.cell(0, 0).text = "Mean HR"
table.cell(0, 1).text = str(all_metrics['hr'])
table.cell(1, 0).text = "Max HR"
table.cell(1, 1).text = f"{all_metrics['hrmax']} at {hrmax_time}"
table.cell(2, 0).text = "Min HR"
table.cell(2, 1).text = f"{all_metrics['hrmin']} at {hrmin_time}"
table.cell(3, 0).text = "SDNN"
table.cell(3, 1).text = str(all_metrics['sdnn'])
table.cell(4, 0).text = "ASDNN"
table.cell(4, 1).text = str(all_metrics['asdnn'])
table.cell(5, 0).text = "SDANN"
table.cell(5, 1).text = str(all_metrics['sdann'])
table.cell(6, 0).text = "RMSSD"
table.cell(6, 1).text = str(all_metrics['rmssd'])
table.cell(7, 0).text = "QT"
table.cell(7, 1).text = str(all_metrics['qt'])
table.cell(8, 0).text = "QTc"
table.cell(8, 1).text = str(all_metrics['qtc'])
make_rows_bold(table.columns[0])
pic_width = 7
# HR picture
document.add_page_break()
document.add_heading("HR report", level = 1)
document.add_paragraph()
for picture in os.listdir(hr_folder_path):
pic_path = os.path.join(hr_folder_path, picture)
document.add_picture(pic_path, width=Inches(pic_width))
n_pic = 4
save_pic_limit = 100000
save_pic_limit_normal = 20
segment = peakarray_resample.get_segment_index_every_nsec(nsec = 3600)
document.add_page_break()
document.add_heading("Normal report", level = 1)
document.add_paragraph()
folder_path = os.path.join(savefolder, "normal_report")
clean_folder(folder_path)
report_diagnosis(folder_path, peakarray, signals, signals_ch1, po.ECGLabel.NORMAL, tz, limit = save_pic_limit_normal)
for picture in os.listdir(folder_path)[:n_pic]:
pic_path = os.path.join(folder_path, picture)
document.add_picture(pic_path, width=Inches(pic_width))
if len(os.listdir(folder_path)) == 0:
document.add_paragraph("There are no this labelpeak or consider a noise")
format_diagnosis(document, savefolder, "brady_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.BRADYCARDIA, segment, tz, "Bradycardia report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
format_diagnosis(document, savefolder, "tachy_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.TACHYCARDIA, segment, tz, "Tachycardia report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
format_diagnosis(document, savefolder, "pause_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.PAUSE, segment, tz, "Pause report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
# ------------------------------------------------------------------------------
format_diagnosis(document, savefolder, "pvc_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.PVC, segment, tz, "PVC report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
# ------------------------------------------------------------------------------
format_diagnosis(document, savefolder, "pac_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.PAC, segment, tz, "PAC report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
format_diagnosis(document, savefolder, "af_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.AF, segment, tz, "AF report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
format_diagnosis(document, savefolder, "vtvf_report",
peakarray,signals,signals_ch1, signals_resample,
peakarray_resample,po.ECGLabel.VENTRICULAR, segment, tz, "VTVF report",
save_pic_limit = save_pic_limit, document_n_pic=n_pic, document_pic_width=pic_width)
# Question picture
if report_lost_signal:
lost_folder = "question_report"
lost_folder_path = os.path.join(savefolder, lost_folder)
if os.path.exists(lost_folder_path):
shutil.rmtree(lost_folder_path)
os.mkdir(lost_folder_path)
else:
os.mkdir(lost_folder_path)
report_question(lost_folder_path, peakarray, n_signals, tz)
document.add_page_break()
document.add_heading("Lost signal report", level = 1)
document.add_paragraph()
for picture in os.listdir(lost_folder_path):
pic_path = os.path.join(lost_folder_path, picture)
document.add_picture(pic_path, width=Inches(7))
for section in document.sections:
section.top_margin = Cm(1.5)
section.bottom_margin = Cm(1.5)
section.left_margin = Cm(1.5)
section.right_margin = Cm(1.5)
document.save(os.path.join(savefolder, "report.docx"))
``` |
{
"source": "44Shu/Kaggle-Solutions",
"score": 2
} |
#### File: Kaggle-Solutions/Halite by Two Sigma/mineBot.py
```python
weights='''1.065318617455976 542.1433864410643 0.7511632555608448 0.6945893010559424 0.1341607259959342 -256.54011220873883
0 2.3837319660395457 0.4770079274532575 14.871982834273645 10
0.04043743652542793 219.09952521708655 9.561641308515489 1.1406984927798645 0.4806089913651024 11.485903586701356
0.32917669267944993 0.12670831197102922
1 -3.1819320805078153 -3
112.69692418951784
3 0.1
5'''
# Contains all dependencies used in bot
# First file loaded
from kaggle_environments import make
from kaggle_environments.envs.halite.helpers import *
import math, random
import numpy as np
import scipy.optimize
import scipy.ndimage
from queue import PriorityQueue
# Global constants
# Infinity value thats actually not infinity
INF = 999999999999
# All game state goes here - everything, even mundane
state = {}
# Bot training weights
# 0 - shipyard reward
# 1 - mine reward
# 2 - attack weights
# 3 - return weights
# 4 - spawn weights
# 5 - guard weights
# 6 - navigation weights
# 7 - target attack weights
temp = []
weights = weights.split('\n')
for line in weights:
temp.append(np.array(list(map(float,line.split()))))
weights = temp
# Init function - called at the start of each game
def init(board):
global state
np.set_printoptions(precision=3)
state['configuration'] = board.configuration
state['me'] = board.current_player_id
state['playerNum'] = len(board.players)
state['memory'] = {}
pass
# Run start of every turn
def update(board):
global action
action = {}
state['currentHalite'] = board.current_player.halite
state['next'] = np.zeros((board.configuration.size,board.configuration.size))
state['board'] = board
state['memory'][board.step] = {}
state['memory'][board.step]['board'] = board
state['cells'] = board.cells.values()
state['ships'] = board.ships.values()
state['myShips'] = board.current_player.ships
state['shipyards'] = board.shipyards.values()
state['myShipyards'] = board.current_player.shipyards
# Calc processes
encode()
state['spawn'] = spawn()
# General random helper functions that are not strictly "process" or in "nav"
# Map from 0 to 1
def normalize(v):
norm = np.linalg.norm(v,np.inf)
if norm == 0:
return v
return v / norm
def closest_ship(t):
return closest_thing(t,state['myShips'])
def closest_thing(t,arr):
res = None
for thing in arr:
if res == None:
res = thing
elif dist(t,res.position) > dist(t,thing.position):
res = thing
return res
def closest_thing_position(t,arr):
res = None
for thing in arr:
if res == None:
res = thing
elif dist(t,res) > dist(t,thing):
res = thing
return res
def halite_per_turn(deposit, shipTime, returnTime):
travelTime = shipTime + returnTime
actualDeposit = min(500,deposit * 1.02 ** shipTime)
maximum = 0
for turns in range(1,10):
mined = (1 - .75**turns) * actualDeposit
perTurn = mined / (turns+travelTime)
maximum = perTurn if perTurn > maximum else maximum
return maximum
def miner_num():
if state['board'].step < 280:
if len(state['myShips']) > 25:
return min(len(state['myShips']),int(state['haliteMean'] / 4 + len(state['myShipyards'])))
else:
return min(len(state['myShips']),int(state['haliteMean'] / 2 + len(state['myShipyards'])))
elif state['board'].step > 370:
return len(state['myShips'])
else:
return len(state['myShips']) * 0.8
def get_targets():
targets = []
for ship in state['enemyShips']:
if ship.halite != 0:
targets.append(ship)
return targets
def attack(ships):
global action
# Select potential targets
targets = get_targets()
# Greedy selection
target_list = []
for ship in ships:
# Force return
if ship.halite > 0:
action[ship] = (INF, ship, state['closestShipyard'][ship.position.x][ship.position.y])
continue
# Attack
finalTarget = targets[0]
v = rule_attack_reward(ship,finalTarget,target_list)
for target in targets:
tv = rule_attack_reward(ship,target,target_list)
if tv > v:
v = tv
finalTarget = target
target_list.append(finalTarget)
action[ship] = (1/dist(finalTarget.position,ship.position), ship, finalTarget.position)
# Greedy selection
# TODO: Improve this!
def rule_attack_reward(s,t,target_list):
tPos = t.position
sPos = s.position
d = dist(tPos,sPos)
res = 1/d
if t.player == state['killTarget']:
res = res * 4
control = state['positiveControlMap'][tPos.x][tPos.y]
if control > 1 and d < 8:
# Check if local maxima
yes = True
for x in range(-3,4):
if not yes:
break
for y in range(-3,4):
xx = (tPos.x+x) % 21
yy = (tPos.y+y) % 21
if not yes:
break
if state['positiveControlMap'][xx][yy] > control and state['enemyShipHalite'][xx][yy] < 99999 and state['enemyShipHalite'][xx][yy] > 0:
yes = False
if yes:
res = res * 6
if state['trapped'][t.player_id][tPos.x][tPos.y] and d <= 6:
res = res * 10
'''
for pos in get_adjacent(tPos):
if state['enemyShipHalite'][pos.x][pos.y] <= s.halite:
return 0
'''
return res
###################
# target based attack system
###################
'''
def target_based_attack():
# actions[ship] = (priority: int, ship: Ship, target: Point)
params = weights[7] # <- np.array
# target selection
targets = "all enemy ships with cargo > 0"
sorted(targets, key="cargo")
# assignment
for target in targets:
actions["all ally ships with cargo < target.cargo" in area5x5(target)] = ("priority", "ship", "target.pos")
'''
# Core strategy
action = {} # ship -> (value,ship,target)
farms = [] # list of cells to farm
def farm_tasks():
build_farm()
control_farm()
# Create patrols
def ship_tasks(): # update action
global action
cfg = state['configuration']
board = state['board']
me = board.current_player
tasks = {}
shipsToAssign = []
# Split attack ships and mine ships
temp = get_targets()
state['attackers'] = []
if len(temp) > 0:
minerNum = miner_num()
attackerNum = len(state['myShips']) - minerNum
for ship in me.ships:
if ship in action:
continue
if attackerNum > 0:
attackerNum -= 1
#Uncomment to activate attack
state['attackers'].append(ship)
#target_based_attack()
for ship in state['ships']:
if ship.player_id != state['me']:
if state['trapped'][ship.player_id][ship.position.x][ship.position.y] and ship.halite > 0:
print(ship.position)
# All ships rule based
for ship in me.ships:
'''
# Flee
if state['trapped'][state['me']][ship.position.x][ship.position.y] and ship.halite > 0:
action[ship] = (INF*2+state[ship]['danger'][ship.position.x][ship.position.y], ship, state['closestShipyard'][ship.position.x][ship.position.y])
'''
if ship in action:
continue
for target in get_adjacent(ship.position):
if board.cells[target].ship != None:
targetShip = board.cells[target].ship
if targetShip.player.id != state['me'] and targetShip.halite < ship.halite:
action[ship] = (INF*2+state[ship]['danger'][ship.position.x][ship.position.y], ship, state['closestShipyard'][ship.position.x][ship.position.y])
if ship in action:
continue # continue its current action
# End-game return
if board.step > state['configuration']['episodeSteps'] - cfg.size * 1.5 and ship.halite > 0:
action[ship] = (ship.halite, ship, state['closestShipyard'][ship.position.x][ship.position.y])
# End game attack
if len(state['board'].opponents) > 0 and board.step > state['configuration']['episodeSteps'] - cfg.size * 1.5 and ship.halite == 0:
#print(ship.position)
if len(state['myShipyards']) > 0 and ship == closest_thing(state['myShipyards'][0].position,state['myShips']):
action[ship] = (0,ship,state['myShipyards'][0].position)
continue
killTarget = state['killTarget']
if len(killTarget.shipyards) > 0:
target = closest_thing(ship.position,killTarget.shipyards)
action[ship] = (ship.halite, ship, target.position)
elif len(killTarget.ships) > 0:
target = closest_thing(ship.position,killTarget.ships)
action[ship] = (ship.halite, ship, target.position)
if ship in action or ship in state['attackers']:
continue
shipsToAssign.append(ship)
# Rule based: Attackers
#print(len(state['myShips']))
#print(len(state['attackers']))
attack(state['attackers'])
# Reward based: Mining + Guarding + Control
targets = [] # (cell, type)
for i in board.cells.values(): # Filter targets
if i.shipyard != None and i.shipyard.player_id == state['me']:
targets.append((i,'guard'))
for j in range(min(6,len(state['myShips']))):
targets.append((i,'cell'))
continue
'''if i.halite < 15 and i.ship == None and i.shipyard == None:
# Spots not very interesting
continue'''
if i.ship != None and i.ship.player_id != state['me']:
if i.ship.halite == 0 and state['controlMap'][i.position.x][i.position.y] < 0:
continue
targets.append((i,'cell'))
rewards = np.zeros((len(shipsToAssign), len(targets)))
for i, ship in enumerate(shipsToAssign):
for j, target in enumerate(targets):
rewards[i, j] = get_reward(ship, target)
rows, cols = scipy.optimize.linear_sum_assignment(rewards, maximize=True) # rows[i] -> cols[i]
for r, c in zip(rows, cols):
task = targets[c]
if task[1] == 'cell':
cell = cell = targets[c][0]
if cell.halite == 0 and cell.shipyard == None and (cell.ship == None or cell.ship.player_id == state['me']):
action[shipsToAssign[r]] = (0, shipsToAssign[r], targets[c][0].position)
else:
action[shipsToAssign[r]] = (rewards[r][c], shipsToAssign[r], targets[c][0].position)
elif task[1] == 'guard':
action[shipsToAssign[r]] = (0, shipsToAssign[r], targets[c][0].position)
# Process actions
actions = list(action.values())
actions.sort(reverse=True, key=lambda x: x[0])
for act in actions:
process_action(act)
def process_action(act):
global action
if action[act[1]] == True:
return act[1].next_action
action[act[1]] = True
# Processing
act[1].next_action = d_move(act[1], act[2], state[act[1]]['blocked'])
# Ship convertion
sPos = act[1].position
if state['closestShipyard'][sPos.x][sPos.y] == sPos and state['board'].cells[sPos].shipyard == None:
act[1].next_action = ShipAction.CONVERT
state['next'][sPos.x][sPos.y] = 1
return act[1].next_action
def convert_tasks():
global action
# Add convertion tasks
currentShipyards = state['myShipyards'] # Shipyards "existing"
targetShipyards = currentShipyards[:]
# Maximum cell
v = shipyard_value(state['board'].cells[Point(0,0)])
t = state['board'].cells[Point(0,0)]
for cell in state['board'].cells.values():
a = shipyard_value(cell)
if v < a:
v = a
t = cell
tx, ty = t.position.x,t.position.y
# Calculate the reward for each cell
if state['board'].step == 0:
# Build immediately
targetShipyards.append(state['board'].cells[state['myShips'][0].position])
action[state['myShips'][0]] = (math.inf, state['myShips'][0], state['myShips'][0].position)
state['currentHalite'] -= 500
elif len(currentShipyards) == 0:
# Grab the closest possible ship to the target and build.
possibleShips = []
for ship in state['myShips']:
if ship.halite + state['currentHalite'] >= 500:
possibleShips.append(ship)
closest = closest_thing(Point(tx, ty),possibleShips)
if closest != None:
action[closest] = (math.inf, closest, Point(tx, ty))
targetShipyards.append(state['board'].cells[Point(tx, ty)])
state['currentHalite'] -= 500
elif v > 500 and v > state['shipValue']:
targetShipyards.append(state['board'].cells[Point(tx, ty)])
state['currentHalite'] -= 500
state['closestShipyard'] = closest_shipyard(targetShipyards)
def build_farm():
global farms
for cell in state['board'].cells.values():
if dist(cell.position,state['closestShipyard'][cell.position.x][cell.position.y]) == 1:
if cell.position in farms:
continue
farms.append(cell.position)
def control_farm():
global farms
for i,farm in enumerate(farms[:]):
if dist(farm,state['closestShipyard'][farm.x][farm.y]) > 1:
# Not worth it
farms.remove(farm)
def spawn():
# Ship value:
'''
if state['shipValue'] >= 500:
return True
else:
return False
'''
# 抄袭
bank = state['currentHalite']
haliteMean = state['haliteMean']
step = state['board'].step
shipCnt = len(state['myShips'])
totalShipCnt = len(state['ships'])
#isBlocked = state['next'][shipyard.cell.position.x][shipyard.cell.position.y]
isBlocked = 0 #In theory never blocked, as already checked
if shipCnt >= 60 or step > 330:
return False
inArr = (np.array([bank, totalShipCnt, shipCnt, step, haliteMean, isBlocked]) - spawnMean) / spawnStd
res = W1 @ inArr + b1
res = np.maximum(res, 0)
res = W2 @ res + b2
res = np.maximum(res, 0)
res = W3 @ res + b3
#print(res)
if res > 0:
return True
else:
return False
def spawn_tasks():
shipyards = state['board'].current_player.shipyards
shipyards.sort(reverse=True, key=lambda shipyard: state['haliteSpread'][shipyard.position.x][shipyard.position.y])
shouldSpawn = spawn()
for shipyard in shipyards:
if state['currentHalite'] >= 500 and not state['next'][shipyard.cell.position.x][shipyard.cell.position.y]:
if shouldSpawn:
shipyard.next_action = ShipyardAction.SPAWN
state['currentHalite'] -= 500
elif len(state['myShips']) < 1 and shipyard == shipyards[0]:
shipyard.next_action = ShipyardAction.SPAWN
state['currentHalite'] -= 500
elif len(state['myShipyards']) == 1:
for pos in get_adjacent(shipyard.position):
cell = state['board'].cells[pos]
if cell.ship != None and cell.ship.player_id != state['me']:
shipyard.next_action = ShipyardAction.SPAWN
state['currentHalite'] -= 500
return
spawnMean = np.array([4.9859e+03, 6.0502e+01, 2.5001e+01, 1.9415e+02, 2.8910e+01, 6.1503e-01])
spawnStd = np.array([8.5868e+03, 1.5326e+01, 1.0737e+01, 1.1549e+02, 1.1789e+01, 4.8660e-01])
W1 = np.array([[-1.5224804e+00,2.4725301E-03,-8.7220293e-01,-1.0598649e+00,
9.9166840e-01,1.8315561e+00],
[-4.8011017e-01,-6.7499268e-01 ,3.5633636e-01,-1.7301080e+00,
2.0809724e+00,-8.9656311e-01],
[-1.1370039e+00,-2.0581658e-01,-2.6484251e+00,-1.5524467e+00,
3.5835698e+00,-1.7890360e+00],
[-1.7479208e-01 ,1.9892944e-01, 1.4682317e-01 , 1.1079860e+00,
1.4466201e-01 , 1.9152831e+00]])
b1 = np.array([1.177493, 0.5530099, 0.1025302, 2.165062 ])
W2 = np.array([[ 0.22407304 ,-0.32596582 ,-0.31062314 ,-0.17025752],
[-3.6107817 , 1.9571906 , -0.04028177, -4.0320687 ],
[ 4.130036 , -1.2309656, -0.52751654, 1.5594524 ],
[-0.33959138, -0.0332855 , -0.26249635, -0.35909724]])
b2 = np.array([-0.40560475 ,-0.00167005 , 0.7714385 , -0.19049597])
W3 = np.array([[ 0.4247551 , 5.073255 ,-4.3405128 , 0.00574893]])
b3 = np.array([-0.2889765])
# General calculations whose values are expected to be used in multiple instances
# Basically calc in botv1.0.
# Run in update() - see dependency.py
def encode():
global state
N = state['configuration'].size
# Halite
state['haliteMap'] = np.zeros((N, N))
for cell in state['cells']:
state['haliteMap'][cell.position.x][cell.position.y] = cell.halite
# Halite Spread
state['haliteSpread'] = np.copy(state['haliteMap'])
for i in range(1,5):
state['haliteSpread'] += np.roll(state['haliteMap'],i,axis=0) * 0.5**i
state['haliteSpread'] += np.roll(state['haliteMap'],-i,axis=0) * 0.5**i
temp = state['haliteSpread'].copy()
for i in range(1,5):
state['haliteSpread'] += np.roll(temp,i,axis=1) * 0.5**i
state['haliteSpread'] += np.roll(temp,-i,axis=1) * 0.5**i
# Ships
state['shipMap'] = np.zeros((state['playerNum'], N, N))
state['enemyShips'] = []
for ship in state['ships']:
state['shipMap'][ship.player_id][ship.position.x][ship.position.y] = 1
if ship.player_id != state['me']:
state['enemyShips'].append(ship)
# Shipyards
state['shipyardMap'] = np.zeros((state['playerNum'], N, N))
state['enemyShipyards'] = []
for shipyard in state['shipyards']:
state['shipyardMap'][shipyard.player_id][shipyard.position.x][shipyard.position.y] = 1
if shipyard.player_id != state['me']:
state['enemyShipyards'].append(shipyard)
# Total Halite
state['haliteTotal'] = np.sum(state['haliteMap'])
# Mean Halite
state['haliteMean'] = state['haliteTotal'] / (N**2)
# Estimated "value" of a ship
#totalShips = len(state['ships'])
#state['shipValue'] = state['haliteTotal'] / state
state['shipValue'] = ship_value()
# Friendly units
state['ally'] = state['shipMap'][state['me']]
# Friendly shipyards
state['allyShipyard'] = state['shipyardMap'][state['me']]
# Enemy units
state['enemy'] = np.sum(state['shipMap'], axis=0) - state['ally']
# Enemy shipyards
state['enemyShipyard'] = np.sum(state['shipyardMap'], axis=0) - state['allyShipyard']
# Closest shipyard
state['closestShipyard'] = closest_shipyard(state['myShipyards'])
# Control map
state['controlMap'] = control_map(state['ally']-state['enemy'],state['allyShipyard']-state['enemyShipyard'])
state['negativeControlMap'] = control_map(-state['enemy'],-state['enemyShipyard'])
state['positiveControlMap'] = control_map(state['ally'],state['allyShipyard'])
# Enemy ship labeled by halite. If none, infinity
state['enemyShipHalite'] = np.zeros((N, N))
state['shipHalite'] = np.zeros((state['playerNum'], N, N))
state['shipHalite'] += np.Infinity
state['enemyShipHalite'] += np.Infinity
for ship in state['ships']:
state['shipHalite'][ship.player.id][ship.position.x][ship.position.y] = ship.halite
if ship.player.id != state['me']:
state['enemyShipHalite'][ship.position.x][ship.position.y] = ship.halite
# Immediate danger map
state['trapped'] = np.zeros((state['playerNum'], N, N))
for player in range(state['playerNum']):
state['trapped'][player] = get_immediate_danger(player)
# Avoidance map (Places not to go for each ship)
for ship in state['myShips']:
state[ship] = {}
state[ship]['blocked'] = get_avoidance(ship)
state[ship]['danger'] = get_danger(ship.halite)
state['generalDangerMap'] = get_danger(1)
# Who we should attack
if len(state['board'].opponents) > 0:
state['killTarget'] = get_target()
def get_avoidance(s):
threshold = s.halite
#Enemy units
temp = np.where(state['enemyShipHalite'] < threshold, 1, 0)
enemyBlock = np.copy(temp)
enemyBlock = enemyBlock + np.roll(temp,1,axis=0)
enemyBlock = enemyBlock + np.roll(temp,-1,axis=0)
enemyBlock = enemyBlock + np.roll(temp,1,axis=1)
enemyBlock = enemyBlock + np.roll(temp,-1,axis=1)
enemyBlock = enemyBlock + state['enemyShipyard']
blocked = enemyBlock
blocked = np.where(blocked>0,1,0)
return blocked
def get_danger(s):
threshold = s
dangerMap = np.where(state['enemyShipHalite'] < threshold, 1, 0)
temp = dangerMap.copy()
for i in range(1,4):
dangerMap = np.add(dangerMap,np.roll(temp,i,axis=0) * 0.7**i,casting="unsafe")
dangerMap += np.roll(temp,-i,axis=0) * 0.7**i
temp = dangerMap.copy()
for i in range(1,4):
dangerMap += np.roll(temp,i,axis=1) * 0.7**i
dangerMap += np.roll(temp,-i,axis=1) * 0.7**i
return dangerMap
def closest_shipyard(shipyards):
N = state['configuration'].size
res = [[None for y in range(N)]for x in range(N)]
for x in range(N):
for y in range(N):
minimum = math.inf
for shipyard in shipyards:
if dist(Point(x,y),shipyard.position) < minimum:
minimum = dist(Point(x,y),shipyard.position)
res[x][y] = shipyard.position
return res
def control_map(ships,shipyards):
ITERATIONS = 3
res = np.copy(ships)
for i in range(1,ITERATIONS+1):
res += np.roll(ships,i,axis=0) * 0.5**i
res += np.roll(ships,-i,axis=0) * 0.5**i
temp = res.copy()
for i in range(1,ITERATIONS+1):
res += np.roll(temp,i,axis=1) * 0.5**i
res += np.roll(temp,-i,axis=1) * 0.5**i
return res + shipyards
def get_target():
board = state['board']
me = board.current_player
idx,v = 0, -math.inf
for i,opponent in enumerate(board.opponents):
value = 0
if opponent.halite-me.halite > 0:
value = -(opponent.halite-me.halite)
else:
value = (opponent.halite-me.halite) * 5
if value > v:
v = value
idx = i
return board.opponents[idx]
def get_immediate_danger(team):
res = np.zeros((state['configuration'].size,state['configuration'].size))
enemy = np.zeros((state['configuration'].size,state['configuration'].size))
for i in range(state['playerNum']):
if i == team:
continue
enemy += np.where(state['shipHalite'][i]==0,1,0)
for axis in range(2):
secondAxis = 0 if axis == 1 else 1
for direction in [-1,1]:
N = enemy.copy()
N += np.roll(enemy,direction,axis=axis)
N += np.roll(np.roll(enemy,direction,axis=axis),1,axis=secondAxis)
N += np.roll(np.roll(enemy,direction,axis=axis),-1,axis=secondAxis)
N += np.roll(N,direction,axis=axis)
N += np.roll(N,direction,axis=axis)
'''N += np.roll(np.roll(enemy,direction*3,axis=axis),2,axis=secondAxis)
N += np.roll(np.roll(enemy,direction*3,axis=axis),-2,axis=secondAxis)'''
res += np.where(N>0,1,0)
danger = np.where(res>=4,1,0)
return danger
# Direction from point s to point t
def direction_to(s: Point, t: Point) -> ShipAction:
candidate = directions_to(s, t)
if len(candidate) == 2:
if dist(Point(s.x,0),point(t.x,0)) > dist(Point(0,s.y),Point(0,t.y)):
return candidate[1]
else:
return candidate[0]
elif len(candidate) == 1:
random.choice(candidate)
else:
return None
# Distance from point a to b
def dist(a: Point, b: Point) -> int:
N = state['configuration'].size
return min(abs(a.x - b.x), N - abs(a.x - b.x)) + min(abs(a.y - b.y), N - abs(a.y - b.y))
# Returns list of possible directions
def directions_to(s: Point, t: Point) -> ShipAction:
N = state['configuration'].size
candidates = [] # [N/S, E/W]
if s.x-t.x != 0:
candidates.append(ShipAction.WEST if (s.x-t.x) % N < (t.x-s.x) % N else ShipAction.EAST)
if s.y-t.y != 0:
candidates.append(ShipAction.SOUTH if (s.y-t.y) % N < (t.y-s.y) % N else ShipAction.NORTH)
return candidates
# Deserialize an integer which represents a point
def unpack(n) -> Point:
N = state['configuration'].size
return Point(n // N, n % N)
# A default direction to target
def direction_to(s: Point, t: Point) -> ShipAction:
candidate = directions_to(s, t)
return random.choice(candidate) if len(candidate) > 0 else None
# Returns the "next" point of a ship at point s with shipAction d
def dry_move(s: Point, d: ShipAction) -> Point:
N = state['configuration'].size
if d == ShipAction.NORTH:
return s.translate(Point(0, 1),N)
elif d == ShipAction.SOUTH:
return s.translate(Point(0, -1),N)
elif d == ShipAction.EAST:
return s.translate(Point(1, 0),N)
elif d == ShipAction.WEST:
return s.translate(Point(-1, 0),N)
else:
return s
# Returns opposite direction
def opp_direction(d: ShipAction):
if d == ShipAction.NORTH:
return ShipAction.SOUTH
if d == ShipAction.SOUTH:
return ShipAction.NORTH
if d == ShipAction.WEST:
return ShipAction.EAST
if d == ShipAction.EAST:
return ShipAction.WEST
return None
# Returns list of len 4 of adjacent points to a point
def get_adjacent(point):
N = state['configuration'].size
res = []
for offX, offY in ((0,1),(1,0),(0,-1),(-1,0)):
res.append(point.translate(Point(offX,offY),N))
return res
def safe_naive(s,t,blocked):
for direction in directions_to(s.position,t):
target = dry_move(s.position,direction)
if not blocked[target.x][target.y]:
return direction
return None
def move_cost(s : Ship, t : Point, p : Point):
navigationWeights = weights[6]
cost = state[s]['danger'][p.x][p.y] * navigationWeights[1]
c = state['board'].cells[p]
if c.ship != None and c.ship.player_id != state['me']:
if direction_to(t,s.position) != direction_to(t,p):
cost += 1
if s.halite > 0 and state['trapped'][state['me']][s.position.x][s.position.y]:
cost += 5
return cost
# Dijkstra's movement
def d_move(s : Ship, t : Point, inBlocked):
nextMap = state['next']
sPos = s.position
blocked = inBlocked + nextMap
# Check if we are trying to attack
if state['board'].cells[t].ship != None:
target = state['board'].cells[t].ship
if target.player_id != state['me'] and target.halite == s.halite:
blocked[t.x][t.y] -= 1
elif state['board'].cells[t].shipyard != None and state['board'].cells[t].shipyard.player_id != state['me']:
blocked[t.x][t.y] -= 1
# Don't ram stuff thats not the target.
if state['board'].step < state['configuration']['episodeSteps'] - state['configuration'].size * 1.5:
blocked += np.where(state['enemyShipHalite'] <= s.halite,1,0)
temp = np.zeros(blocked.shape)
tot = 0
for pos in get_adjacent(sPos):
if state['allyShipyard'][pos.x][pos.y]:
continue
if blocked[pos.x][pos.y] > 0:
tot += 1
else:
for tPos in get_adjacent(pos):
if state['enemyShipHalite'][tPos.x][tPos.y] <= s.halite:
if tPos == t:
continue
tot += 1
temp[pos.x][pos.y] = 1
break
if not(tot == 4 and (state['board'].cells[sPos].halite > 0 or nextMap[sPos.x][sPos.y])):
blocked += temp
blocked = np.where(blocked>0,1,0)
desired = None
#Stay still
if sPos == t or nextMap[t.x][t.y]:
#Someone with higher priority needs position, must move. Or being attacked.
if blocked[t.x][t.y]:
for processPoint in get_adjacent(sPos):
if not blocked[processPoint.x][processPoint.y]:
#nextMap[processPoint.x][processPoint.y] = 1
desired = direction_to(sPos,processPoint)
t = processPoint
if desired == None:
target = micro_run(s)
t = dry_move(sPos,target)
desired = target
else:
t = sPos
desired = None
else:
#Dijkstra
pred = {}
calcDist = {}
pq = PriorityQueue()
pqMap = {}
pqMap[dist(sPos,t)] = [sPos]
pq.put(dist(sPos,t))
pred[sPos] = sPos
calcDist[sPos] = dist(sPos,t)
# Main
while not pq.empty():
if t in calcDist:
break
currentPoint = pqMap.get(pq.get()).pop()
for processPoint in get_adjacent(currentPoint):
if blocked[processPoint.x][processPoint.y] or processPoint in calcDist:
continue
calcDist[processPoint] = calcDist[currentPoint] + 1 + move_cost(s,t,processPoint)
priority = calcDist[processPoint]
pqMap[priority] = pqMap.get(priority,[])
pqMap[priority].append(processPoint)
pq.put(priority)
pred[processPoint] = currentPoint
if not t in pred:
# Can go in general direction
res = safe_naive(s,t,blocked)
if res != None:
t = dry_move(s.position,res)
desired = res
else:
#Random move
for processPoint in get_adjacent(sPos):
if not blocked[processPoint.x][processPoint.y]:
#nextMap[processPoint.x][processPoint.y] = 1
t = processPoint
desired = direction_to(sPos,processPoint)
# Run
if desired == None and blocked[sPos.x][sPos.y]:
target = micro_run(s)
t = dry_move(sPos,target)
desired = target
elif not blocked[sPos.x][sPos.y]:
t = sPos
desired = None
else:
# Path reconstruction
while pred[t] != sPos:
t = pred[t]
desired = direction_to(sPos,t)
# Reduce collisions
if desired != None and state['board'].cells[t].ship != None and state['board'].cells[t].ship.player_id == state['me']:
target = state['board'].cells[t].ship
s.next_action = desired
if action[target] != True:
nextMap[t.x][t.y] = 1
result = process_action(action[target])
# Going there will kill it
if result == None or result == ShipAction.CONVERT:
desired = d_move(s,t,inBlocked)
t = dry_move(sPos,desired)
nextMap[t.x][t.y] = 1
return desired
# Ship might die, RUN!
def micro_run(s):
sPos = s.position
nextMap = state['next']
if state[s]['blocked'][sPos.x][sPos.y]:
if s.halite > 400:
return ShipAction.CONVERT
score = [0,0,0,0]
# Preprocess
directAttackers = 0
for i,pos in enumerate(get_adjacent(sPos)):
if state['enemyShipHalite'][pos.x][pos.y] < s.halite:
directAttackers += 1
# Calculate score
for i,pos in enumerate(get_adjacent(sPos)):
score[i] = 0
for j,tPos in enumerate(get_adjacent(sPos)):
if state['enemyShipHalite'][tPos.x][tPos.y] < s.halite:
score[i] -= 0.5
if state['enemyShipHalite'][pos.x][pos.y] < s.halite:
score[i] -= 0.5 + 1/directAttackers
score[i] += state['negativeControlMap'][pos.x][pos.y] * 0.01
# Select best position
i, maximum = 0,0
for j, thing in enumerate(score):
if thing > maximum:
i = j
maximum = thing
return direction_to(sPos,get_adjacent(sPos)[i])
else:
return None
# Key function
# For a ship, return the inherent "value" of the ship to get to a target cell
def get_reward(ship,target):
cell = target[0]
res = 0
# Don't be stupid
if state[ship]['blocked'][cell.position.x][cell.position.y] and cell.shipyard == None:
res = 0
elif target[1] == 'cell':
# Mining reward
if (cell.ship is None or cell.ship.player_id == state['me']) and cell.halite > 0:
res = mine_reward(ship,cell)
elif cell.shipyard is None and cell.halite == 0 and (cell.ship is None or cell.ship.player_id == state['me']):
res = control_reward(ship,cell)
elif cell.ship is not None and cell.ship.player_id != state['me']:
res = attack_reward(ship,cell)
elif cell.shipyard is not None and cell.shipyard.player_id == state['me']:
res = return_reward(ship,cell)
elif cell.shipyard is not None and cell.shipyard.player_id != state['me']:
res = attack_reward(ship,cell)
elif target[1] == 'guard':
res = guard_reward(ship,cell)
return res
def control_reward(ship,cell):
return 0
sPos = ship.position
cPos = cell.position
if ship.halite > 0 or dist(cPos,state['closestShipyard'][cPos.x][cPos.y]) <= 2:
return 0
res = 0
for pos in get_adjacent(cPos):
tCell = state['board'].cells[pos]
if tCell.halite > 0:
res += 3.5
res -= dist(sPos,cPos) + dist(cPos,state['closestShipyard'][cPos.x][cPos.y])
return res
def guard_reward(ship,cell):
cPos = cell.position
sPos = ship.position
guardWeights = weights[5]
if len(state['enemyShips']) == 0:
return 0
closestEnemy = closest_thing(ship.position,state['enemyShips'])
if dist(sPos,cPos) > dist(closestEnemy.position,cPos):
return 0
elif ship.halite != 0 and dist(sPos,cPos) >= dist(closestEnemy.position,cPos):
return 0
# Check if we want to build
if cell.shipyard == max(state['myShipyards'],key=lambda shipyard: state['haliteSpread'][shipyard.position.x][shipyard.position.y]):
if state['currentHalite'] >= 500 and state['spawn']:
return 0
return guardWeights[0] / (dist(closestEnemy.position,cPos) * max(dist(sPos,cPos),1))
def mine_reward(ship,cell):
mineWeights = weights[1]
sPos = ship.position
cPos = cell.position
cHalite = cell.halite
cell
shipyardDist = dist(cPos,state['closestShipyard'][cPos.x][cPos.y])
if state['generalDangerMap'][cPos.x][cPos.y] > 1.5 and state['trapped'][state['me']][cPos.x][cPos.y]:
return 0
# Halite per turn
halitePerTurn = 0
# Occupied cell
if cell.ship != None and cell.ship.player_id == state['me'] and cell.ship.halite <= ship.halite:
# Current cell multiplier
if sPos == cPos:
if cHalite > state['haliteMean'] * mineWeights[2] and cHalite > 10 and ship.halite > 0:
cHalite = cHalite * mineWeights[1]
# Farming!
if cPos in farms and cell.halite < min(500,(state['board'].step + 10*15)) and state['board'].step < state['configuration']['episodeSteps'] - 50:
return 0
if shipyardDist >= 3:
# Don't mine if enemy near
for pos in get_adjacent(cPos):
if state['enemyShipHalite'][pos.x][pos.y] <= ship.halite:
return 0
if state['trapped'][state['me']][cPos.x][cPos.y]:
return 0
# Dangerous area
cHalite += state['negativeControlMap'][cPos.x][cPos.y] * mineWeights[4]
if state['enemyShipHalite'][cPos.x][cPos.y] <= ship.halite:
return 0
for pos in get_adjacent(cPos):
if state['enemyShipHalite'][pos.x][pos.y] <= ship.halite:
return 0
'''
if state['currentHalite'] > 1000: # Do we need some funds to do stuff?
# No
halitePerTurn = halite_per_turn(cHalite,dist(sPos,cPos),0)
else:
# Yes
halitePerTurn = halite_per_turn(cHalite,dist(sPos,cPos),dist(cPos,state['closestShipyard'][cPos.x][cPos.y]))
'''
halitePerTurn = halite_per_turn(cHalite,dist(sPos,cPos),shipyardDist)
# Surrounding halite
spreadGain = state['haliteSpread'][cPos.x][cPos.y] * mineWeights[0]
res = halitePerTurn + spreadGain
if state[ship]['danger'][cPos.x][cPos.y] > 1.3:
res -= mineWeights[3] ** state[ship]['danger'][cPos.x][cPos.y]
return res
def attack_reward(ship,cell):
attackWeights = weights[2]
cPos = cell.position
sPos = ship.position
d = dist(ship.position,cell.position)
# Don't even bother
if dist(sPos,cPos) > 6:
return 0
res = 0
# It's a ship!
if cell.ship != None:
# Nearby
if cPos in get_adjacent(sPos) and state['controlMap'][cPos.x][cPos.y] < 0.5:
# Try to reduce collision num
for pos in get_adjacent(cPos):
if state['enemyShipHalite'][pos.x][pos.y] <= ship.halite:
return 0
if cell.ship.halite > ship.halite:
# Defend the farm!
if cPos in farms:
return cell.halite - d
res = max([cell.halite**(attackWeights[4]),state['controlMap'][cPos.x][cPos.y]*attackWeights[2]]) - d*attackWeights[3]
elif len(state['myShips']) > 15:
res = state['controlMap'][cPos.x][cPos.y] * 100 / d**2
if ship.halite != 0:
res = res / 3
# It's a shipyard!
elif len(state['myShips']) > 10 and ship.halite == 0:
if len(state['myShips']) > 15 and cell.shipyard.player == state['killTarget']:
# Is it viable to attack
viable = True
for pos in get_adjacent(cPos):
target = state['board'].cells[pos].ship
if target != None and target.player_id != state['me'] and target.halite <= ship.halite:
viable = False
break
if viable:
res = attackWeights[1] / d**2
res = max(res,state['controlMap'][cPos.x][cPos.y] * 100 / d**2)
return res * attackWeights[0]
def return_reward(ship,cell):
returnWeights = weights[3]
sPos = ship.position
cPos = cell.position
if sPos == cPos :
return 0
res = 0
if state['currentHalite'] > 1000:
res = ship.halite / (dist(sPos,cPos)) * returnWeights[0]
else:
res = ship.halite / (dist(sPos,cPos))
res = res * returnWeights[1]
return res
def shipyard_value(cell):
# Features
shipyardWeights = weights[0]
cPos = cell.position
if state['board'].step > 310:
return 0
nearestShipyard = closest_thing(cPos,state['shipyards'])
nearestShipyardDistance = 1
if nearestShipyard != None:
nearestShipyardDistance = dist(nearestShipyard.position,cPos)
negativeControl = min(0,state['controlMap'][cPos.x][cPos.y])
if len(state['myShips']) > 0:
negativeControl = max(negativeControl-0.5 ** dist(closest_thing(cPos,state['myShips']).position,cPos),state['negativeControlMap'][cPos.x][cPos.y])
haliteSpread = state['haliteSpread'][cPos.x][cPos.y] - state['haliteMap'][cPos.x][cPos.y]
shipShipyardRatio = len(state['myShips']) / max(1,len(state['myShipyards']))
# Hard limit on range and halite spread
if nearestShipyardDistance <= 5 or haliteSpread <= 200:
return 0
# Base halite multiplier
res = haliteSpread * shipyardWeights[0]
# Negative control
res += negativeControl * shipyardWeights[1]
# Nearest shipyard
res = res * nearestShipyardDistance ** shipyardWeights[2]
# Ship shipyard ratio multiplier
res = res * shipShipyardRatio ** shipyardWeights[3]
# Final multiplier and bias
res = res * shipyardWeights[4] + shipyardWeights[5]
return res
def ship_value():
if len(state['myShips']) >= 60:
return 0
res = state['haliteMean'] * 0.25 * (state['configuration']['episodeSteps']- 30 - state['board'].step) * weights[4][0]
res += (len(state['ships']) - len(state['myShips'])) ** 1.5 * weights[4][1]
res += len(state['myShips']) ** 1.5 * weights[4][2]
return res
# The final function
@board_agent
def agent(board):
print("Turn =",board.step+1)
# Init
if board.step == 0:
init(board)
# Update
update(board)
# Convert
convert_tasks()
# Farm
#farm_tasks()
# Ship
ship_tasks()
# Spawn
spawn_tasks()
``` |
{
"source": "44t4nk1/IP-App-Price-Tracker",
"score": 3
} |
#### File: src/invoice_recognition/ipapp_bill_ocr.py
```python
import easyocr
import PIL
from PIL import ImageDraw
import keras
from keras.preprocessing.image import save_img
from keras.preprocessing.image import img_to_array
reader = easyocr.Reader(['en'], gpu = False)
im = PIL.Image.open("1184-receipt.jpg")
bounds = reader.readtext('1184-receipt.jpg')
def draw_boxes(image, bounds, color='yellow', width=2):
draw = ImageDraw.Draw(image)
for bound in bounds:
p0, p1, p2, p3 = bound[0]
draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
return image
print(draw_boxes(im, bounds))
for i in bounds:
print(i[1])
img_array = img_to_array(im)
save_img('1184-receipt-boxed.jpg', img_array)
``` |
{
"source": "44xtc44/Flask_SQLAlchemy_Project_Template",
"score": 3
} |
#### File: Flask_SQLAlchemy_Project_Template/Flask_SQLAlchemy_Project_Template/__init__.py
```python
from os import path
from flask import Flask
from .database import db
# import for creating test items, can be removed
from Flask_SQLAlchemy_Project_Template.models import Users, InternalUse
this_dir = path.abspath(path.join(path.dirname("__file__")))
db_path = path.join(this_dir, 'database.db')
def create_app():
app = Flask(__name__)
app.config['DEBUG'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + db_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = "False" # supress FSADeprecationWarning
# flask initializes Alchemy with the application
db. init_app(app)
# Register Blueprints and routes, one route is also sufficient
from Flask_SQLAlchemy_Project_Template.routes_internal import routes as internal_routes
from Flask_SQLAlchemy_Project_Template.routes_user import routes as user_routes
app.register_blueprint(internal_routes.internal_bp)
app.register_blueprint(user_routes.user_bp)
return app
def setup_database(flask_app):
with flask_app.app_context():
# alchemy creates the db from SQLALCHEMY_DATABASE_URI and models.py classes
db.create_all()
# only for test items, can be removed
db.session.add(InternalUse(browser_open=1, statistics='98.6', commercials='sold'))
db.session.add(InternalUse(browser_open=0, statistics=70.2, commercials='bid'))
db.session.add(Users(username='pi', email='<EMAIL>', profile='ceo'))
db.session.add(Users(username='pa', email='<EMAIL>', profile='chief'))
db.session.add(Users(username='po', email='<EMAIL>', profile='leader'))
db.session.commit()
``` |
{
"source": "450703035/Solve-a-Sudoku-with-AI",
"score": 4
} |
#### File: 450703035/Solve-a-Sudoku-with-AI/sudoku.py
```python
import pdb
rows = 'ABCDEFGHI'
cols = '123456789'
def cross(a, b):
return [s+t for s in a for t in b]
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
def display(values):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
# WARNING! We've modified this function to return '123456789' instead of '.' for boxes with no value.
# Look at the explanation above in the text.
def grid_values(grid):
"""Convert grid string into {<box>: <value>} dict with '123456789' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '123456789' if it is empty.
"""
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(boxes, values))
def eliminate(values):
"""Eliminate values from peers of each box with a single value.
Go through all the boxes, and whenever there is a box with a single value,
eliminate this value from the set of values of all its peers.
Args:
values: Sudoku in dictionary form.
Returns:
Resulting Sudoku in dictionary form after eliminating values.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(values):
"""Finalize all values that are the only choice for a unit.
Go through all the units, and whenever there is a unit with a value
that only fits in one box, assign the value to this box.
Input: Sudoku in dictionary form.
Output: Resulting Sudoku in dictionary form after filling in only choices.
"""
# TODO: Implement only choice strategy here
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Use the Eliminate Strategy
values = eliminate(values)
# Use the Only Choice Strategy
values = only_choice(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"Using depth-first search and propagation, create a search tree and solve the sudoku."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
# If you're stuck, see the solution.py tab!
test = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
grid2 = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
test1 = search(grid_values(grid2))
display(test1)
``` |
{
"source": "452366we/python_web",
"score": 3
} |
#### File: mfresh_api/mfresh_api/urls.py
```python
from django.urls import path
from django.http import HttpResponse
from user.views import userLogin,userRegister,userCheckUname,userCheckPhone
from news.views import newsList,newsDetail
from product.views import productList,productDetail
from cart.views import cartDetailAdd,cartDetailList,cartDetailDelete,cartDetailUpdate
from user.models import MfUser
def handleHome(req):
res=HttpResponse('<h1>welcome to mfresh!</h1><hr>')
return res
urlpatterns = [
path('',handleHome),
path('user/login',userLogin),
path('user/register',userRegister),
path('user/check/uname',userCheckUname),
path('user/check/phone',userCheckPhone),
path('news/list',newsList),
path('news/detail',newsDetail),
path('product/list',productList),
path('product/detail',productDetail)
]
``` |
{
"source": "45258E9F/IntPTI",
"score": 2
} |
#### File: IntPTI/scripts/generate-report-with-graphs.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.dont_write_bytecode = True # prevent creation of .pyc files
import glob
import os
import argparse
import subprocess
import signal
for egg in glob.glob(os.path.join(os.path.dirname(__file__), os.pardir, 'lib', 'python-benchmark', '*.egg')):
sys.path.insert(0, egg)
def call_dot(infile, outpath):
(basefilename, ext) = os.path.splitext(os.path.basename(infile))
outfile = os.path.join(outpath, basefilename + '.svg')
#print (' generating ' + infile)
code = 0
try:
p = subprocess.Popen(['dot', '-Nfontsize=10', '-Efontsize=10',
'-Efontname=Courier New', '-Tsvg', '-o',
outfile, infile])
code = p.wait()
except KeyboardInterrupt: # ctrl + c
print (' skipping ' + infile)
p.terminate()
try:
os.remove(outfile) # sometimes outfile is written half, so cleanup
except OSError:
pass # if outfile is not written, removing is impossible
return False
except OSError as e:
if e.errno == 2:
sys.exit('Error: Could not call "dot" from GraphViz to create graph, please install it\n({}).'.format(e))
else:
sys.exit('Error: Could not call "dot" from GraphViz to create graph\n({}).'.format(e))
if code != 0:
sys.exit('Error: Could not call "dot" from to create graph {0} (return code {1}).'.format(outfile, code))
return True
def generateReport(cpaoutdir, functions, argfilepath, outfilepath, tplfilepath):
fin = open(tplfilepath, 'r')
fout = open(outfilepath, 'w')
for line in fin:
if 'CFAFUNCTIONGRAPHS' in line:
writeCFA(cpaoutdir, functions, fout)
elif 'ARGGRAPHS' in line:
writeARG(argfilepath, fout)
else:
fout.write(line)
print ('Report generated in {0}'.format(outfilepath))
try:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['xdg-open', outfilepath],
stdout=devnull, stderr=devnull)
except OSError:
pass
fin.close()
fout.close()
def writeCFA(cpaoutdir, functions, outf):
i = 0
for func in functions:
start = False
cfafile = open(os.path.join(cpaoutdir, 'cfa__' + func + '.svg'))
for line in cfafile:
if start:
line = line.replace('class="node"','class="node" ng-dblclick="clickedCFAElement($event)"')
line = line.replace('class="edge"','class="edge" ng-dblclick="clickedCFAElement($event)"')
outf.write(line)
if '<svg' in line:
outf.write(line[:5] + " ng-show = \"cfaFunctionIsSet(" + str(i) + ")\" " + line[5:])
start = True
i = i+1
cfafile.close()
def writeARG(argfilepath, outf):
start = False
argfile = open(argfilepath[:-4] + '.svg')
for line in argfile:
if '<svg' in line:
start = True
if start:
line = line.replace('class="node"','class="node" ng-dblclick="clickedARGElement($event)"')
line = line.replace('class="edge"','class="edge" ng-dblclick="clickedARGElement($event)"')
outf.write(line)
argfile.close()
def signal_handler(signal, frame):
print("Received a keyboard interrupt. Exiting.")
sys.exit(0)
def main():
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(
description="Generate a HTML report with graphs from the CPAchecker output."
)
parser.add_argument("-c", "--config",
dest="configfile",
default="output/UsedConfiguration.properties",
help="""File with all the used CPAchecker configuration files
(default: output/UsedConfiguration.properties)"""
)
options = parser.parse_args()
print ('Generating report')
# read config file
config = {}
try:
with open(options.configfile) as configfile:
for line in configfile:
(key, val) = line.split("=", 1)
config[key.strip()] = val.strip()
except IOError as e:
if e.errno:
sys.exit('Could not find output of CPAchecker in {}. Please specify correctpath with option --config\n({}).'.format(options.configfile, e))
else:
sys.exit('Could not read output of CPAchecker in {}\n({}).'.format(options.configfile, e))
if not config.get('analysis.programNames'):
sys.exit('CPAchecker output does not specify path to analyzed program. Cannot generate report.')
# extract paths to all necessary files from config
cpaoutdir = config.get('output.path', 'output/')
argfilepath = os.path.join(cpaoutdir, config.get('cpa.arg.file', 'ARG.dot'))
errorpath = os.path.join(cpaoutdir, config.get('cpa.arg.errorPath.json', 'ErrorPath.%d.json'))
countexdir = "output/report"
#if there is an ARG.dot create an SVG in the report dir
if os.path.isfile(argfilepath):
print ('Generating SVG for ARG (press Ctrl+C if this takes too long)')
call_dot(argfilepath, cpaoutdir)
print ('Generating SVGs for CFA')
functions = [x[5:-4] for x in os.listdir(cpaoutdir) if x.startswith('cfa__') and x.endswith('.dot')]
errorpathcount = len(glob.glob(errorpath.replace('%d', '*')))
functions = sorted(functions)
for func in functions:
call_dot(os.path.join(cpaoutdir, 'cfa__' + func + '.dot'), cpaoutdir)
if errorpathcount != 0:
for index in range(errorpathcount):
outfilepath = os.path.join(countexdir, 'report_' + str(index) + '.html')
tplfilepath = os.path.join(countexdir, 'report_withoutGraphs_' + str(index) + '.html')
generateReport(cpaoutdir, functions, argfilepath, outfilepath, tplfilepath)
else:
outfilepath = os.path.join(countexdir, 'report.html')
tplfilepath = os.path.join(countexdir, 'report_withoutGraphs.html')
generateReport(cpaoutdir, functions, argfilepath, outfilepath, tplfilepath)
if __name__ == '__main__':
main()
```
#### File: IntPTI/scripts/report-generator.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.dont_write_bytecode = True # prevent creation of .pyc files
import glob
import os
import time
import argparse
import subprocess
import json
for egg in glob.glob(os.path.join(os.path.dirname(__file__), os.pardir, 'lib', 'python-benchmark', '*.whl')):
sys.path.insert(0, egg)
import tempita
def call_dot(infile, outpath):
(basefilename, ext) = os.path.splitext(os.path.basename(infile))
outfile = os.path.join(outpath, basefilename + '.svg')
#print (' generating ' + infile)
code = 0
try:
p = subprocess.Popen(['dot', '-Nfontsize=10', '-Efontsize=10',
'-Efontname=Courier New', '-Tsvg', '-o',
outfile, infile])
code = p.wait()
except KeyboardInterrupt: # ctrl + c
print (' skipping ' + infile)
p.terminate()
try:
os.remove(outfile) # sometimes outfile is written half, so cleanup
except OSError:
pass # if outfile is not written, removing is impossible
return False
except OSError as e:
if e.errno == 2:
sys.exit('Error: Could not call "dot" from GraphViz to create graph, please install it\n({}).'.format(e))
else:
sys.exit('Error: Could not call "dot" from GraphViz to create graph\n({}).'.format(e))
if code != 0:
sys.exit('Error: Could not call "dot" from to create graph {0} (return code {1}).'.format(outfile, code))
return True
def readfile(filepath, optional=False):
if not os.path.isfile(filepath):
if optional:
return None
raise Exception('File not found: ' + filepath)
#print ('Reading: ' + filepath)
with open(filepath, 'r') as fp:
return fp.read()
def generateReport(outfilepath, template, templatevalues):
with open(outfilepath, 'w') as outf:
outf.write(template.substitute(templatevalues))
print ('Report generated in {0}'.format(outfilepath))
try:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['xdg-open', outfilepath],
stdout=devnull, stderr=devnull)
except OSError:
pass
def main():
parser = argparse.ArgumentParser(
description="Generate a HTML report with graphs from the CPAchecker output."
)
parser.add_argument("-r", "--reportpath",
dest="reportdir",
help="Directory for report (default: CPAchecker output path)"
)
parser.add_argument("-c", "--config",
dest="configfile",
default="output/UsedConfiguration.properties",
help="""File with all the used CPAchecker configuration files
(default: output/UsedConfiguration.properties)"""
)
options = parser.parse_args()
print ('Generating report')
# read config file
config = {}
try:
with open(options.configfile) as configfile:
for line in configfile:
(key, val) = line.split("=", 1)
config[key.strip()] = val.strip()
except IOError as e:
if e.errno:
sys.exit('Could not find output of CPAchecker in {}. Please specify correctpath with option --config\n({}).'.format(options.configfile, e))
else:
sys.exit('Could not read output of CPAchecker in {}\n({}).'.format(options.configfile, e))
if not config.get('analysis.programNames'):
sys.exit('CPAchecker output does not specify path to analyzed program. Cannot generate report.')
# extract paths to all necessary files from config
cpaoutdir = config.get('output.path', 'output/')
sourcefiles = [sourcefile.strip() for sourcefile in config.get('analysis.programNames').split(',')]
assert sourcefiles, "sourcefile not available"
logfile = os.path.join(cpaoutdir, config.get('log.file', 'CPALog.txt'))
statsfile = os.path.join(cpaoutdir, config.get('statistics.file', 'Statistics.txt'))
argfilepath = os.path.join(cpaoutdir, config.get('cpa.arg.file', 'ARG.dot'))
errorpathgraph = os.path.join(cpaoutdir, config.get('cpa.arg.errorPath.graph', 'ErrorPath.%d.dot'))
errorpath = os.path.join(cpaoutdir, config.get('cpa.arg.errorPath.json', 'ErrorPath.%d.json'))
combinednodes = os.path.join(cpaoutdir, 'combinednodes.json')
cfainfo = os.path.join(cpaoutdir, 'cfainfo.json')
fcalledges = os.path.join(cpaoutdir, 'fcalledges.json')
scriptdir = os.path.dirname(__file__)
reportdir = options.reportdir or cpaoutdir
tplfilepath = os.path.join(scriptdir, 'report-template.html')
if not os.path.isdir(reportdir):
os.makedirs(reportdir)
#if there is an ARG.dot create an SVG in the report dir
if os.path.isfile(argfilepath):
print ('Generating SVG for ARG (press Ctrl+C if this takes too long)')
call_dot(argfilepath, reportdir)
#if not call_dot(argfilepath, reportdir) and os.path.isfile(errorpathgraph):
# if call_dot(errorpathgraph, reportdir):
# os.rename(os.path.join(reportdir, 'ErrorPath.svg'),
# os.path.join(reportdir, 'ARG.svg'))
print ('Generating SVGs for CFA')
functions = [x[5:-4] for x in os.listdir(cpaoutdir) if x.startswith('cfa__') and x.endswith('.dot')]
for func in functions:
call_dot(os.path.join(cpaoutdir, 'cfa__' + func + '.dot'), reportdir)
template = tempita.HTMLTemplate.from_filename(tplfilepath, encoding='UTF-8')
# prepare values that may be used in template
templatevalues = {}
templatevalues['time_generated'] = time.strftime("%a, %d %b %Y %H:%M", time.localtime())
templatevalues['sourcefilenames'] = sourcefiles
templatevalues['sourcefilecontents']= [readfile(sourcefile, optional=True) for sourcefile in sourcefiles]
templatevalues['logfile'] = readfile(logfile, optional=True)
templatevalues['statistics'] = readfile(statsfile, optional=True)
templatevalues['conffile'] = readfile(options.configfile, optional=True)
# JSON data for script
templatevalues['errorpath'] = '[]'
templatevalues['functionlist'] = json.dumps(functions)
templatevalues['combinednodes'] = readfile(combinednodes)
templatevalues['cfainfo'] = readfile(cfainfo)
templatevalues['fcalledges'] = readfile(fcalledges)
errorpathcount = len(glob.glob(errorpath.replace('%d', '*')))
print("Found %d error paths" % errorpathcount)
if errorpathcount > 0:
for i in range(errorpathcount):
outfilepath = os.path.join(reportdir, 'ErrorPath.%d.html' % i)
templatevalues['title'] = '%s (error path %d)' % (os.path.basename(sourcefiles[0]), i) # use the first sourcefile as name
try:
templatevalues['errorpath'] = readfile(errorpath % i)
except Exception as e:
print('Could not read error path number %d: %s' % (i, e))
else:
generateReport(outfilepath, template, templatevalues)
else:
outfilepath = os.path.join(reportdir, 'report.html')
templatevalues['title'] = os.path.basename(sourcefiles[0]) # use the first sourcefile as name
generateReport(outfilepath, template, templatevalues)
if __name__ == '__main__':
main()
```
#### File: IntPTI/scripts/witness_validation_web_cloud.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.dont_write_bytecode = True # prevent creation of .pyc files
import argparse
import glob
import logging
import os
import urllib.request as request
if os.path.basename(__file__) == 'witness_validation_web_cloud.py':
# try looking up additional libraries if not packaged
for egg in glob.glob(os.path.join(os.path.dirname(__file__), os.pardir, 'lib', 'python-benchmark', '*.whl')):
sys.path.insert(0, egg)
from benchmark.webclient import * # @UnusedWildImport
__version__ = '1.0'
DEFAULT_OUTPUT_PATH = "./"
def _create_argument_parser():
"""
Create a parser for the command-line options.
@return: an argparse.ArgumentParser instance
"""
parser = argparse.ArgumentParser(
description="Validate witness using CPAchecker in the cloud (without local installation).",
fromfile_prefix_chars='@')
parser.add_argument("--cloudMaster",
dest="cloud_master",
default="http://vcloud.sosy-lab.org/webclient/",
metavar="HOST",
help=argparse.SUPPRESS)
parser.add_argument("--cloudUser",
dest="cloud_user",
metavar="USER:PWD",
help=argparse.SUPPRESS)
parser.add_argument("--program",
dest="program_file",
metavar="FILE",
help="The path to the program file.",
required=True)
parser.add_argument("--witness",
dest="witness_file",
metavar="FILE",
help="The path to the witness file.",
required=True)
parser.add_argument("--configuration",
dest="configuration",
metavar="CONFIG",
help="The configuration used for the validation.")
parser.add_argument("-d", "--debug",
action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-o", "--outputpath",
dest="output_path", type=str,
default=DEFAULT_OUTPUT_PATH,
help="Output prefix for the generated results. "
+ "If the path is a folder files are put into it,"
+ "otherwise it is used as a prefix for the resulting files.")
parser.add_argument("--version",
action="version",
version="%(prog)s " + __version__)
return parser
def _setup_logging(config):
"""
Configure the logging framework.
"""
if config.debug:
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.DEBUG)
else:
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO)
def _init(config):
"""
Sets _webclient if it is defined in the given config.
"""
if not config.cloud_master:
sys.exit("No URL of a VerifierCloud instance is given.")
webclient = WebInterface(config.cloud_master, config.cloud_user,
user_agent='witness_validation_web_cloud.py', version=__version__)
logging.info('Using %s version %s.', webclient.tool_name(), webclient.tool_revision())
return webclient
def _submit_run(webclient, config):
"""
Submits a single run using the web interface of the VerifierCloud.
@return: the run's result
"""
run_result_future = webclient.submit_witness_validation(\
config.witness_file, config.program_file, config.configuration, config.cloud_user)
webclient.flush_runs()
return run_result_future.result()
def _execute():
"""
Executes a single CPAchecker run in the VerifierCloud via the web front end.
All informations are given by the command line arguments.
@return: the return value of CPAchecker
"""
arg_parser = _create_argument_parser()
config = arg_parser.parse_args()
_setup_logging(config)
webclient = _init(config)
try:
run_result = _submit_run(webclient, config)
return handle_result(run_result, config.output_path, config.witness_file,
handle_host_info=lambda x : None)
except request.HTTPError as e:
logging.warning(e.reason)
except WebClientError as e:
logging.warning(str(e))
finally:
webclient.shutdown()
if __name__ == "__main__":
try:
sys.exit(_execute())
except KeyboardInterrupt:
sys.exit(1)
``` |
{
"source": "452/micropython-examples",
"score": 3
} |
#### File: micropython-examples/scripts/web.py
```python
def s():
# minimal Ajax in Control Webserver
CONTENT = """\
HTTP/1.0 200 OK
Hello #{} from MicroPython!
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 80))
s.listen(0) # just queue up some requests
counter=0
while True:
conn, addr = s.accept()
print("Got a connection from %s" % str(addr))
request = conn.recv(1024)
conn.sendall('HTTP/1.1 200 OK\nConnection: close\nServer: nanoWiPy\nContent-Type: text/html\n\n')
## print("Content = %s" % str(request))
request = str(request)
ib = request.find('Val=')
if ib > 0 :
ie = request.find(' ', ib)
Val = request[ib+4:ie]
print("Val =", Val)
conn.send(Val)
else:
# conn.send(bytes(CONTENT.format(counter), "ascii"))
with open('/lib/AiCWebpage.htm', 'r') as html:
conn.send(html.read())
conn.sendall('\n')
conn.close()
print("Connection wth %s closed" % str(addr))
counter += 1
``` |
{
"source": "452sunny/CommoditySearch",
"score": 3
} |
#### File: TaoBaoSearchMachine-master/pyqt example/PyQtRes.py
```python
import sys
from PyQt4 import QtCore, QtGui, uic
class MyDialog(QtGui.QDialog):
def __init__(self):
QtGui.QWidget.__init__(self)
uic.loadUi("res.ui",self)
class MyWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setWindowTitle('PyQt')
self.resize(300,200)
gridlayout = QtGui.QGridLayout()
self.button = QtGui.QPushButton('CreateDialog')
gridlayout.addWidget(self.button,1 ,1)
self.setLayout(gridlayout)
self.connect(self.button, QtCore.SIGNAL('clicked()'),
self.OnButton)
def OnButton(self):
dialog = MyDialog()
r = dialog.exec_()
if r:
self.button.setText(dialog.lineEdit.text())
app = QtGui.QApplication(sys.argv)
mywindow = MyWindow()
mywindow.show()
app.exec_()
```
#### File: TaoBaoSearchMachine-master/pyqt example/table.py
```python
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
def main():
app = QApplication(sys.argv)
table = QTableWidget()
tableItem = QTableWidgetItem()
# initiate table
table.setWindowTitle("QTableWidget Example @<EMAIL>")
table.resize(400, 250)
table.setRowCount(4)
table.setColumnCount(2)
title = QStringList()
a = QString('20')
b = QString('1.2')
title.append(a)
title.append(b)
title.append(a)
title.append(b)
title.sort()
print title[0],title[1],title[2],title[3]
#print title.count
#title.removeDuplicates()
#print title[0],title[1]
table.setHorizontalHeaderLabels(title)
# set data
table.setItem(0,0, QTableWidgetItem(title[0]))
table.setItem(0,1, QTableWidgetItem("Item (1,2)"))
table.setItem(1,0, QTableWidgetItem("Item (2,1)"))
table.setItem(1,1, QTableWidgetItem("Item (2,2)"))
table.setItem(2,0, QTableWidgetItem("Item (3,1)"))
table.setItem(2,1, QTableWidgetItem("Item (3,2)"))
table.setItem(3,0, QTableWidgetItem("Item (4,1)"))
table.setItem(3,1, QTableWidgetItem("Item (4,2)"))
# show table
table.show()
return app.exec_()
if __name__ == '__main__':
main()
```
#### File: TaoBao/taobao/taobaoSpyder.py
```python
import random
from time import sleep
from loguru import logger
from pyquery import PyQuery as pq
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# 配置日志记录
logger.debug('this is a debug message')
logger.add('runtime.log')
class TaobaoSpyder:
def __init__(self):
self.url = 'https://login.taobao.com/member/login.jhtml'
options = webdriver.ChromeOptions()
# 设置为开发者模式,防止被各大网站识别为Selenium
options.add_experimental_option(
'excludeSwitches', ['enable-automation'])
self.browser = webdriver.Chrome(options=options)
self.wait = WebDriverWait(self.browser, 10) # 超时时长为10s
def scan_Login(self):
"""扫码登录淘宝."""
print("—— 请在30秒内扫码完成登陆!")
self.browser.get(self.url)
# 选择扫码登录
self.browser.implicitly_wait(30)
self.browser.find_element_by_class_name('login-switch').click()
sleep(20)
# 直到获取到淘宝会员昵称才能确定是登录成功
taobao_name = self.wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, '.site-nav-bd > ul.site-nav-bd-l > li#J_SiteNavLogin > div.site-nav-menu-hd > div.site-nav-user > a.site-nav-login-info-nick ')))
print("{}已登录".format(taobao_name))
def swipe_down(self, second):
"""模拟向下滑动浏览"""
for i in range(int(second/0.1)):
# 根据i的值,模拟上下滑动
if(i%2==0):
js = "var q=document.documentElement.scrollTop=" + str(300+400*i)
else:
js = "var q=document.documentElement.scrollTop=" + str(200 * i)
self.browser.execute_script(js)
sleep(2)
js = "var q=document.documentElement.scrollTop=100000"
self.browser.execute_script(js)
sleep(3)
# 爬取淘宝 我已买到的宝贝商品数据
def crawl_good_buy_data(self):
# 对我已买到的宝贝商品数据进行爬虫
self.browser.get("https://buyertrade.taobao.com/trade/itemlist/list_bought_items.htm")
# 遍历所有页数
for page in range(1,100):
# 等待该页面全部已买到的宝贝商品数据加载完毕
good_total = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#tp-bought-root > div.js-order-container')))
# 获取本页面源代码
html = self.browser.page_source
# pq模块解析网页源代码
doc = pq(html)
# # 存储该页已经买到的宝贝数据
good_items = doc('#tp-bought-root .js-order-container').items()
# 遍历该页的所有宝贝
for item in good_items:
good_time_and_id = item.find('.bought-wrapper-mod__head-info-cell___29cDO').text().replace('\n',"").replace('\r',"")
good_merchant = item.find('.seller-mod__container___1w0Cx').text().replace('\n',"").replace('\r',"")
good_name = item.find('.sol-mod__no-br___1PwLO').text().replace('\n', "").replace('\r', "")
# 只列出商品购买时间、订单号、商家名称、商品名称
# 其余的请自己实践获取
print(good_time_and_id, good_merchant, good_name)
print('\n\n')
# 大部分人被检测为机器人就是因为进一步模拟人工操作
# 模拟人工向下浏览商品,即进行模拟下滑操作,防止被识别出是机器人
# 随机滑动延时时间
swipe_time = random.randint(1, 3)
self.swipe_down(swipe_time)
# 等待下一页按钮 出现
good_total = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.pagination-next')))
# 点击下一页按钮
good_total.click()
sleep(2)
if __name__ == "__main__":
a = TaobaoSpyder()
a.scan_Login()
a.crawl_good_buy_data()
```
#### File: demos/hello/app.py
```python
import click
from flask import Flask
app = Flask(__name__)
# the minimal Flask application
@app.route('/')
def index():
return '<h1>Hello, World!</h1>'
# bind multiple URL for one view function
@app.route('/hi')
@app.route('/hello')
def say_hello():
return '<h1>Hello, Flask!</h1>'
# dynamic route, URL variable default
@app.route('/greet', defaults={'name': 'Programmer'})
@app.route('/greet/<name>')
def greet(name):
return '<h1>Hello, %s!</h1>' % name
# custom flask cli command
@app.cli.command()
def hello():
"""Just say hello."""
click.echo('Hello, Human!')
``` |
{
"source": "4564/QuestionRetrieval",
"score": 3
} |
#### File: QR/data_process/data_merge.py
```python
import json
import random
import time
def merge(file_list, json_path='F:/Data/Chinese/chinese.json'):
"""
融合数据分配统一id
:return:
"""
start = time.time()
print '处理:'
res = []
temp = []
for file_path in file_list:
print file_path
with open(file_path, 'r') as f:
for line in f:
j = json.loads(line)
if 'question_id' in j:
res.append(j)
else:
temp.append(j)
print time.time() - start
start = time.time()
# 取得qid集合
qids = set([x['question_id'] for x in res])
print time.time() - start
start = time.time()
# 取得aid集合
aids = set()
for x in res:
for y in x['answers']:
aids.add(y['answer_id'])
print time.time() - start
start = time.time()
# 为无id的分配id
for x in temp:
new_qid = random.randint(1000000, 9999999)
while new_qid in qids:
new_qid = random.randint(1000000, 9999999)
x['question_id'] = new_qid
qids.add(new_qid)
x['questioner_id'] = -1
for y in x['answers']:
new_aid = random.randint(10000000, 99999999)
while new_aid in aids:
new_aid = random.randint(1000000, 9999999)
y['answer_id'] = new_aid
aids.add(new_aid)
y['answerer_id'] = -1
print time.time() - start
res[len(res):len(res)] = temp
# 写入文件
f = open(json_path, 'w')
for x in res:
f.write(json.dumps(x))
f.write('\n')
f.close()
print '融合结果已保存到:'
print json_path
if __name__ == '__main__':
"""
将抽取出的数据融合,解决非sogou数据的
"""
# data_paths = ['F:/Data/Chinese/Sogou/QA/sogou.json',
# 'F:/Data/Chinese/NLPCC/2016QA/train/nlpcc2016.json',
# 'F:/Data/Chinese/Baidu/WebQA.v1.0/data/baidu_webqa.json',
# 'F:/Data/Chinese/Baidu/Reading Comprehension/raw/baidu_reading_comprehension.json']
# merge(data_paths)
``` |
{
"source": "4577/Jawa",
"score": 2
} |
#### File: jawa/attributes/stack_map_table.py
```python
from itertools import repeat
from jawa.attribute import Attribute
from jawa.util.verifier import VerificationTypes
# These types are followed by an additional u2.
TYPES_WITH_EXTRA = (
VerificationTypes.ITEM_Object,
VerificationTypes.ITEM_Uninitialized
)
class StackMapFrame(object):
__slots__ = (
'frame_type',
'frame_offset',
'frame_locals',
'frame_stack'
)
def __init__(self, frame_type):
self.frame_type = frame_type
self.frame_offset = 0
self.frame_locals = []
self.frame_stack = []
def __repr__(self):
return (
u'<StackMapFrame(type={s.frame_type!r},'
u'offset={s.frame_offset!r},'
u'locals={s.frame_locals!r},'
u'stack={s.frame_stack!r})>'
).format(s=self)
class StackMapTableAttribute(Attribute):
"""
.. note::
Consider this experimental. This is an unnecessary 'feature' added
in Java6 that even the official JDK has multiple bugs with. Proper
generation of a StackMapTableAttribute requires a complete class
hierarchy among other things.
"""
ADDED_IN = '6.0.0'
MINIMUM_CLASS_VERSION = (50, 0)
def __init__(self, table, name_index=None):
super(StackMapTableAttribute, self).__init__(
table,
name_index or table.cf.constants.create_utf8(
'StackMapTable'
).index
)
self.frames = []
def unpack(self, info):
# Described in "4.7.4. The StackMapTable Attribute"
length = info.u2()
# Start with a null-state FULL_FRAME.
previous_frame = StackMapFrame(255)
for i in range(length):
frame_type = info.u1()
frame = StackMapFrame(frame_type)
if frame_type < 64:
# 0 to 63 are SAME_FRAME
if i == 0:
frame.frame_offset = frame_type
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_type + 1
frame.frame_locals = previous_frame.frame_locals
self.frames.append(frame)
previous_frame = frame
continue
elif frame_type < 128:
# 64 to 127 are SAME_LOCALS_1_STACK_ITEM
if i == 0:
frame.frame_offset = frame_type - 64
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_type - 63
frame.frame_locals = previous_frame.frame_locals
frame.frame_stack = list(
self._unpack_verification_type_info(info, 1)
)
self.frames.append(frame)
previous_frame = frame
continue
elif frame_type < 247:
# Reserved types, we may be trying to parse a ClassFile that's
# newer than we can handle.
raise NotImplementedError()
# All other types have an additional offset
frame_offset = info.u2()
if frame_type == 247:
# SAME_LOCALS_1_STACK_ITEM_EXTENDED
if i == 0:
frame.frame_offset = frame_offset
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_offset + 1
frame.frame_locals = previous_frame.frame_locals
frame.frame_stack = list(
self._unpack_verification_type_info(
info,
1
)
)
elif frame_type < 251:
# CHOP
if i == 0:
frame.frame_offset = frame_offset
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_offset + 1
frame.frame_locals = previous_frame.frame_locals[
0:251 - frame_type
]
elif frame_type == 251:
# SAME_FRAME_EXTENDED
if i == 0:
frame.frame_offset = frame_offset
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_offset + 1
frame.frame_locals = previous_frame.frame_locals
elif frame_type < 255:
# APPEND
if i == 0:
frame.frame_offset = frame_offset
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_offset + 1
frame.frame_locals = previous_frame.frame_locals + list(
self._unpack_verification_type_info(
info,
frame_type - 251
)
)
elif frame_type == 255:
# FULL_FRAME
if i == 0:
frame.frame_offset = frame_offset
else:
frame.frame_offset = previous_frame.frame_offset + \
frame_offset + 1
frame.frame_locals = list(self._unpack_verification_type_info(
info,
info.u2()
))
frame.frame_stack = list(self._unpack_verification_type_info(
info,
info.u2()
))
self.frames.append(frame)
previous_frame = frame
@staticmethod
def _unpack_verification_type_info(info, count):
# Unpacks the verification_type_info structure, used for both locals
# and the stack.
for _ in repeat(None, count):
tag = info.u1()
if tag in TYPES_WITH_EXTRA:
yield (tag, info.u2())
else:
yield (tag,)
def pack(self):
raise NotImplementedError()
```
#### File: jawa/attributes/synthetic.py
```python
from jawa.attribute import Attribute
class SyntheticAttribute(Attribute):
ADDED_IN = '5.0.0'
MINIMUM_CLASS_VERSION = (49, 0)
def __init__(self, table, name_index=None):
super().__init__(
table,
name_index or table.cf.constants.create_utf8(
'Synthetic'
).index
)
def pack(self):
pass
def unpack(self, info):
pass
```
#### File: jawa/util/flags.py
```python
__all__ = ('Flags',)
import struct
class Flags(object):
"""
Convenience class for handling bit flags.
"""
def __init__(self, binary_format, flags):
object.__setattr__(self, 'binary_format', binary_format)
object.__setattr__(self, 'flags', flags)
object.__setattr__(self, '_value', 0)
object.__setattr__(self, '_cache', struct.Struct(binary_format))
def pack(self):
"""
A shortcut for `struct.pack(flag.binary_format, flag.value)`.
"""
return self._cache.pack(self.value)
@property
def value(self):
"""
The numeric value of the bitfield.
"""
return self._value
def unpack(self, source):
"""
A shortcut for `struct.unpack(flag.binary_format, <bytes>)`.
"""
self._value = self._cache.unpack(source)[0]
def get(self, name):
"""
Returns the value of the field `name`.
"""
return bool(self.flags[name] & self.value)
def set(self, name, value):
"""
Sets the value of the field `name` to `value`, which is `True` or
`False`.
"""
flag = self.flags[name]
self._value = (self.value | flag) if value else (self.value & ~flag)
def __getattr__(self, attr):
if attr not in self.flags:
return object.__getattr__(self, attr)
return self.get(attr)
def __setattr__(self, attr, value):
if attr not in self.flags:
return object.__setattr__(self, attr, value)
self.set(attr, value)
def to_dict(self):
"""
Returns this `Flags` object's fields as a dictionary.
"""
return dict((k, self.get(k)) for k in self.flags.keys())
```
#### File: tests/attributes/test_enclosing_method.py
```python
def test_enclosing_method_read(loader):
cf = loader['EnclosingMethod$1EnclosedClass']
a = cf.attributes.find_one(name='EnclosingMethod')
assert cf.constants[a.method_index].name.value == 'main'
assert cf.constants[a.class_index].name.value == 'EnclosingMethod'
def test_exceptions_write(loader):
cf = loader['EnclosingMethod$1EnclosedClass']
a = cf.attributes.find_one(name='EnclosingMethod')
assert a.pack() == b'\x00\x0b\x00\x0c'
```
#### File: tests/attributes/test_line_number_attribute.py
```python
def test_exceptions_read(loader):
cf = loader['HelloWorldDebug']
m = cf.methods.find_one(name='main')
a = m.code.attributes.find_one(name='LineNumberTable')
assert len(a.line_no) == 2
assert a.line_no[0] == (0, 3)
assert a.line_no[1] == (8, 4)
def test_exceptions_write(loader):
cf = loader['HelloWorldDebug']
m = cf.methods.find_one(name='main')
a = m.code.attributes.find_one(name='LineNumberTable')
assert a.pack() == b'\x00\x02\x00\x00\x00\x03\x00\x08\x00\x04'
```
#### File: Jawa/tests/test_printable.py
```python
from jawa.cf import ClassFile
from jawa.constants import ConstantPool
def test_printable_constants():
# Ensure we can successfully repr valid constants without crashing.
pool = ConstantPool()
repr(pool.create_utf8('HelloWorld'))
repr(pool.create_class('HelloWorld'))
repr(pool.create_double(1))
repr(pool.create_float(1))
repr(pool.create_integer(1))
repr(pool.create_long(1))
repr(pool.create_name_and_type('HelloWorld', 'I'))
repr(pool.create_field_ref('HelloWorld', 'test', 'I'))
repr(pool.create_method_ref('HelloWorld', 'test', 'I)V'))
repr(pool.create_interface_method_ref(
'HelloWorld',
'test',
'I)V'
))
repr(pool.create_string('HelloWorld'))
def test_printable_classes():
cf = ClassFile.create('HelloWorld')
assert repr(cf) == '<ClassFile(this=\'HelloWorld\')>'
assert repr(cf.version) == 'ClassVersion(major=50, minor=0)'
``` |
{
"source": "459217974/myvc",
"score": 3
} |
#### File: src/myvc_app/dbs.py
```python
import json
import os
from typing import List
from myvc_app.db_info import DBInfo
from myvc_app.config import DATA_PATH
class DBs:
def __init__(self):
self.dbs = [] # type: List[DBInfo]
def get_db_info_by_id(self, db_id):
_ = list(
filter(
lambda db: db.id == db_id,
self.dbs
)
)
return _[0] if _ else None
def delete_db_info_by_id(self, db_id):
for i, db in enumerate(self.dbs):
if db.id == db_id:
self.dbs.pop(i)
break
def save(self):
_ = []
for db_info in self.dbs:
_.append(db_info.to_json())
with open(DATA_PATH, 'w') as f:
json.dump(_, f)
@classmethod
def load(cls):
# type: () -> DBs
dbs = DBs()
if os.path.exists(DATA_PATH):
with open(DATA_PATH, 'rb') as f:
_dict = json.load(f)
for db_info in _dict:
dbs.dbs.append(DBInfo.load_from_json(db_info))
return dbs
``` |
{
"source": "45gfg9/RemoteSignal",
"score": 3
} |
#### File: 45gfg9/RemoteSignal/param_calc.py
```python
from math import *
# script that calculates idle days, TX presses and OCR1 settings
# all data are ideal, from corresponding datasheets
battery_capacity_mAh = 200
rf24_pd_current_mA = 900e-6
rf24_rx_current_mA = {.25: 12.6, 1: 13.1, 2: 13.5}
rf24_tx_current_mA = {0: 11.3, -6: 9.0, -12: 7.5, -18: 7.0}
m48_wdt_current_mA = 3.75e-3
m48_1MHz_active_current_mA = .6
m48_ps_current_mA = 7e-3
def get_idle_days(rx_time_s, rf24_air_speed_Mbps=2):
return battery_capacity_mAh / ((1 - rx_time_s) * (m48_wdt_current_mA + m48_ps_current_mA + rf24_pd_current_mA)
+ rx_time_s * (m48_wdt_current_mA + m48_1MHz_active_current_mA + rf24_rx_current_mA[rf24_air_speed_Mbps])) / 24
def get_tx_info(tx_time_s, rf24_tx_power_dBm=-6):
prescaler = 1024
return {
'min_press': battery_capacity_mAh * 3600 / (tx_time_s * (m48_wdt_current_mA + m48_1MHz_active_current_mA + rf24_tx_current_mA[rf24_tx_power_dBm])),
'timer_1_comp_val': hex(ceil(tx_time_s / (prescaler / 1e6)))
}
rx_time_s = 2e-3
tx_time_s = 2.003
print(f'idle {get_idle_days(rx_time_s)} days when rx {rx_time_s * 1e3}ms')
print(f'tx {tx_time_s}s:', get_tx_info(tx_time_s))
``` |
{
"source": "45i/Denzven-Graphing-Api",
"score": 3
} |
#### File: Denzven-Graphing-Api/api/beta_flat_graph.py
```python
from flask import *
import matplotlib
from matplotlib import *
from config import *
matplotlib.use("agg")
import numpy as np
import matplotlib.pyplot as plt
import os
import traceback
# Adding a blueprint to start the graph function
beta_flat_graph_runner = Blueprint("beta_flat_graph_runner", __name__)
# Using the Blueprint made with a path
@beta_flat_graph_runner.route(BETA_FLAT_GRAPH_ROUTE, methods=["GET"])
def beta_flat_graph(): # The Funtion
# Getting all the parameters from the url
formula_og_input = request.args.get("formula")
grid_value = request.args.get("grid")
plot_style = request.args.get("plot_style")
x_coord = request.args.get("x_coord")
y_coord = request.args.get("y_coord")
spine_top = request.args.get("spine_top")
spine_bottom = request.args.get("spine_bottom")
spine_left = request.args.get("spine_left")
spine_right = request.args.get("spine_right")
line_style = request.args.get("line_style")
grid_lines_major = request.args.get("grid_lines_major")
grid_lines_minor = request.args.get("grid_lines_minor")
tick_colors = request.args.get("tick_colors")
axfacecolor = request.args.get("axfacecolor")
figfacecolor = request.args.get("figfacecolor")
title_text = request.args.get("title_text")
plot_style_list = [
"Solarize_Light2",
"_classic_test_patch",
"bmh",
"classic",
"dark_background",
"fast",
"fivethirtyeight",
"ggplot",
"grayscale",
"seaborn",
"seaborn-bright",
"seaborn-colorblind",
"seaborn-dark",
"seaborn-dark-palette",
"seaborn-darkgrid",
"seaborn-deep",
"seaborn-muted",
"seaborn-notebook",
"seaborn-paper",
"seaborn-pastel",
"seaborn-poster",
"seaborn-talk",
"seaborn-ticks",
"seaborn-white",
"seaborn-whitegrid",
"tableau-colorblind10",
]
# Printing tha values for debugging
print("\n\n\n")
print(f"+========================================+")
print(f"| ")
print(f"| Graph_Type : FlatGraph ")
print(f"| formula_og_input : {formula_og_input} ")
print(f"| grid_value : {grid_value} ")
print(f"| plot_style : {plot_style} ")
print(f"| x_coord : {x_coord} ")
print(f"| y_coord : {y_coord} ")
print(f"| spine_top : {spine_top} ")
print(f"| spine_bottom : {spine_bottom} ")
print(f"| spine_left : {spine_left} ")
print(f"| spine_right : {spine_right} ")
print(f"| line_style : {line_style} ")
print(f"| grid_lines_major : {grid_lines_major} ")
print(f"| grid_lines_minor : {grid_lines_minor} ")
print(f"| tick_colors : {tick_colors} ")
print(f"| axfacecolor : {axfacecolor} ")
print(f"| figfacecolor : {figfacecolor} ")
print(f"| ")
print(f"+========================================+")
print("\n\n\n")
# Running the funtion in try-execpt blocks to avoid 500 type error
try: # Main Try-Execept block
try: # Checking for Formula
if formula_og_input is None:
return jsonify(
error="formula input is not provided",
error_id="ERROR_NO_FORMULA_INPUT_TRY_BLOCK",
fix="Do not leave the Formula parameter empty",
)
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_FORMULA_INPUT_TRY_BLOCK",
fix="check your formula input again",
)
# ---
try: # Replacing only some with small letters to work in the eval
# formula_og_input = str(formula_og_input.upper()) # My sole Defence against every single thing
from sympy.parsing.sympy_parser import (
parse_expr, # converts string to sympy expression
standard_transformations, # eg. 5! = 5*4*3*2*1
implicit_multiplication_application, # e.g. 2x = 2*x
convert_xor, # e.g. 2^x = 2**x
)
# <--------------[PARSE EXPRESSION]---------------> #
def parseExpression(expression):
expression = expression.replace("y=", "")
expression = expression.replace("^", "**")
expression = expression.replace("e", "E")
transformations = (
standard_transformations
+ (implicit_multiplication_application,)
+ (convert_xor,)
)
equation = parse_expr(expression, transformations=transformations)
return equation
formula = parseExpression(formula_og_input)
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_FORMULA_REPLACE_TRY_BLOCK",
fix="Please check your formula again, it contains unsupported characters",
)
# ---
try: # Setting plot style
if plot_style is None:
plt.style.use("dark_background")
pass
if plot_style is not None:
plot_style_choice = int(plot_style)
try:
plot_style = plot_style_list[plot_style_choice]
except:
return f"couldnt use this style {plot_style}"
plt.style.use(str(plot_style))
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_PLOT_STYLE_TRY_BLOCK",
fix="change your plot_style to a valid number (between 0-25)",
)
# ---
try: # Setting x_coord
if x_coord is None:
xlist = np.linspace(-10, 10, num=1000)
pass
if x_coord is not None:
x_coord = int(x_coord)
neg_x_coord = int(np.negative(x_coord))
xlist = np.linspace(neg_x_coord, x_coord, num=1000)
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_X_COORD_TRY_BLOCK",
fix="x_coord must be a number",
)
# ---
try: # Setting y_coord
if y_coord is None:
ylist = np.linspace(-10, 10, num=1000)
pass
if y_coord is not None:
y_coord = int(y_coord)
neg_y_coord = int(np.negative(y_coord))
ylist = np.linspace(neg_y_coord, y_coord, num=1000)
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_Y_COORD_TRY_BLOCK",
fix="y_coord must be a number",
)
# ---
try: # Core funtion of actually getting the numbers
import numexpr as ne
X = x_coord
Y = ne.evaluate(str(formula))
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_MAIN_EVAL_TRY_BLOCK",
fix="Check the formula input again,\n (PS: 2x has to be written as 2*x, please read the docs for further info: \n https://denzven.pythonanywhere.com/docs)",
)
# ---
try: # setting up Line_style
if line_style is None:
ax.contour(X, Y, F, [0], colors="#4c82ca")
pass
if line_style is not None:
ax.contour(X, Y, F, [0], colors=f"#{line_style}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_LINE_STYLE_TRY_BLOCK",
fix="check the line_style input it has to be a valid hex color withour #",
)
# ---
try: # Setting up Grids
if grid_value is None:
plt.minorticks_off()
plt.grid(b=False)
plt.grid(b=False)
pass
if grid_value is "1":
plt.minorticks_on()
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.grid(
b=True, which="minor", color="#999999", linestyle="-", alpha=0.2
)
pass
if grid_value is "3":
plt.minorticks_on()
plt.grid(
b=True, which="major", color=f"#{grid_lines_major}", linestyle="-"
)
plt.grid(
b=True,
which="minor",
color=f"#{grid_lines_minor}",
linestyle="-",
alpha=0.2,
)
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_GRID_VALUE_TRY_BLOCK",
fix="check the grid input it has to be 1,2 or 3",
)
# ---
try: # Setting up each axis spine
try: # Top-Spine
if spine_top is None:
ax.spines["top"].set_color(f"#ffffff")
pass
if spine_top is not None:
ax.spines["top"].set_color(f"#{spine_top}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_TOP_SPINE_TRY_BLOCK",
fix="check the spine_top input it has to be a valid hex color withour #",
)
# ---
try: # Bottom-Spine
if spine_bottom is None:
ax.spines["bottom"].set_color(f"#ffffff")
pass
if spine_bottom is not None:
ax.spines["bottom"].set_color(f"#{spine_bottom}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_BOTTOM_SPINE_TRY_BLOCK",
fix="check the spine_bottom input it has to be a valid hex color withour #",
)
# ---
try: # Left-Spine
if spine_left is None:
ax.spines["left"].set_color(f"#ffffff")
pass
if spine_left is not None:
ax.spines["left"].set_color(f"#{spine_left}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_LEFT_SPINE_TRY_BLOCK",
fix="check the spine_left input it has to be a valid hex color withour #",
)
# ---
try: # Right-Spine
if spine_right is None:
ax.spines["right"].set_color(f"#ffffff")
pass
if spine_right is not None:
ax.spines["right"].set_color(f"#{spine_right}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_RIGHT_SPINE_TRY_BLOCK",
fix="check the spine_right input it has to be a valid hex color withour #",
)
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_MAIN_SPINE_TRY_BLOCK",
fix="please check values of spine again",
)
# ---
try: # setting up tick_colors
if tick_colors is None:
ax.tick_params(colors="#ffffff", which="both")
pass
if tick_colors is not None:
ax.tick_params(colors=f"#{tick_colors}", which="both")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_TICK_COLORS_TRY_BLOCK",
fix="check the tick_colors input it has to be a valid hex color withour #",
)
# ---
try: # setting up axfacecolors
if axfacecolor is None:
pass
if axfacecolor is not None:
ax.set_facecolor(f"#{axfacecolor}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_AX_FACECOLOR_TRY_BLOCK",
fix="check the axfacecolor input it has to be a valid hex color withour #",
)
# ---
try: # setting up figfacecolors
if figfacecolor is None:
pass
if figfacecolor is not None:
fig.set_facecolor(f"#{figfacecolor}")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_FIG_FACECOLOR_TRY_BLOCK",
fix="check the figfacecolor input it has to be a valid hex color withour #",
)
# ---
try: # setting up title
if title_text is None:
plt.title(
f"graphical representation of {formula_og_input} = 0",
color="#ffffff",
pad=20,
fontsize="small",
)
pass
if title_text is not None:
plt.title(f"{title_text}", color="#ffffff", pad=20, fontsize="small")
pass
except Exception as e:
return jsonify(
error=str(e),
error_id="ERROR_TITLE_TEXT_TRY_BLOCK",
fix="the title contains invalid characters please recheck the title",
)
# ---
try: # adding title and saving and sending the file
ax.set_aspect("equal")
fig.savefig("../flat_plot_test.png", bbox_inches="tight", dpi=150)
filename = "../flat_plot_test.png"
plt.close(fig)
return send_file(filename)
except Exception as e:
return jsonify(error=str(e), error_id="ERROR_SAVE_FIG_TRY_BLOCK")
except Exception as e:
return jsonify(error=str(e), error_id="ERROR_MAIN_TRY_BLOCK")
# Hope you loved this. feel free to try out and explore this Api at:
# https://denzven.pythonanywhere.com/
# Join my chill server at:
# https://dsc.gg/chilly_place
# pls star this on github it will be a great honour
# https://github.com/denzven/Denzven-Graphing-Api
# Hope yall have a great day! happy Graphing!
# Oh Boy it was a Pain to comment this code, But im sure its not a pain for you to understand it :) .
``` |
{
"source": "4620511/cnn-example",
"score": 3
} |
#### File: cnn_example/trainer/trainer.py
```python
from cnn_example.config import Config
from .resnet18 import ResNet18Module # noqa: F401
from .resnet50 import ResNet50Module # noqa: F401
def get_trainer(config: Config):
if config.train.model == "resnet18":
return ResNet18Module
if config.train.model == "resnet50":
return ResNet50Module
raise ValueError("invalid model name: {}".format(config.train.model))
``` |
{
"source": "4620511/MangaLineExtraction_PyTorch",
"score": 3
} |
#### File: MangaLineExtraction_PyTorch/mangalineextraction/model.py
```python
import torch.nn as nn
class _BnReluConv(nn.Module):
def __init__(self, in_filters, nb_filters, fw, fh, subsample=1):
super(_BnReluConv, self).__init__()
self.model = nn.Sequential(
nn.BatchNorm2d(in_filters, eps=1e-3),
nn.LeakyReLU(0.2),
nn.Conv2d(
in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw // 2, fh // 2), padding_mode="zeros"
),
)
def forward(self, x):
return self.model(x)
class _UBnReluConv(nn.Module):
def __init__(self, in_filters, nb_filters, fw, fh, subsample=1):
super(_UBnReluConv, self).__init__()
self.model = nn.Sequential(
nn.BatchNorm2d(in_filters, eps=1e-3),
nn.LeakyReLU(0.2),
nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw // 2, fh // 2)),
nn.Upsample(scale_factor=2, mode="nearest"),
)
def forward(self, x):
return self.model(x)
class _Shortcut(nn.Module):
def __init__(self, in_filters, nb_filters, subsample=1):
super(_Shortcut, self).__init__()
self.process = False
self.model = None
if in_filters != nb_filters or subsample != 1:
self.process = True
self.model = nn.Sequential(nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample))
def forward(self, x, y):
if self.process:
y0 = self.model(x)
return y0 + y
else:
return x + y
class _UShortcut(nn.Module):
def __init__(self, in_filters, nb_filters, subsample):
super(_UShortcut, self).__init__()
self.process = False
self.model = None
if in_filters != nb_filters:
self.process = True
self.model = nn.Sequential(
nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample, padding_mode="zeros"),
nn.Upsample(scale_factor=2, mode="nearest"),
)
def forward(self, x, y):
if self.process:
return self.model(x) + y
else:
return x + y
class BasicBlock(nn.Module):
def __init__(self, in_filters, nb_filters, init_subsample=1):
super(BasicBlock, self).__init__()
self.conv1 = _BnReluConv(in_filters, nb_filters, 3, 3, subsample=init_subsample)
self.residual = _BnReluConv(nb_filters, nb_filters, 3, 3)
self.shortcut = _Shortcut(in_filters, nb_filters, subsample=init_subsample)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.residual(x1)
return self.shortcut(x, x2)
class _UBasicBlock(nn.Module):
def __init__(self, in_filters, nb_filters, init_subsample=1):
super(_UBasicBlock, self).__init__()
self.conv1 = _UBnReluConv(in_filters, nb_filters, 3, 3, subsample=init_subsample)
self.residual = _BnReluConv(nb_filters, nb_filters, 3, 3)
self.shortcut = _UShortcut(in_filters, nb_filters, subsample=init_subsample)
def forward(self, x):
y = self.residual(self.conv1(x))
return self.shortcut(x, y)
class _ResidualBlock(nn.Module):
def __init__(self, in_filters, nb_filters, repetitions, is_first_layer=False):
super(_ResidualBlock, self).__init__()
layers = []
for i in range(repetitions):
init_subsample = 1
if i == repetitions - 1 and not is_first_layer:
init_subsample = 2
if i == 0:
l = BasicBlock(in_filters=in_filters, nb_filters=nb_filters, init_subsample=init_subsample)
else:
l = BasicBlock(in_filters=nb_filters, nb_filters=nb_filters, init_subsample=init_subsample)
layers.append(l)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class _UpsamplingResidualBlock(nn.Module):
def __init__(self, in_filters, nb_filters, repetitions):
super(_UpsamplingResidualBlock, self).__init__()
layers = []
for i in range(repetitions):
l = None
if i == 0:
l = _UBasicBlock(in_filters=in_filters, nb_filters=nb_filters) # (input)
else:
l = BasicBlock(in_filters=nb_filters, nb_filters=nb_filters) # (input)
layers.append(l)
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class MangaLineExtractor(nn.Module):
def __init__(self):
super(MangaLineExtractor, self).__init__()
self.block0 = _ResidualBlock(in_filters=1, nb_filters=24, repetitions=2, is_first_layer=True) # (input)
self.block1 = _ResidualBlock(in_filters=24, nb_filters=48, repetitions=3) # (block0)
self.block2 = _ResidualBlock(in_filters=48, nb_filters=96, repetitions=5) # (block1)
self.block3 = _ResidualBlock(in_filters=96, nb_filters=192, repetitions=7) # (block2)
self.block4 = _ResidualBlock(in_filters=192, nb_filters=384, repetitions=12) # (block3)
self.block5 = _UpsamplingResidualBlock(in_filters=384, nb_filters=192, repetitions=7) # (block4)
self.res1 = _Shortcut(in_filters=192, nb_filters=192) # (block3, block5, subsample=(1,1))
self.block6 = _UpsamplingResidualBlock(in_filters=192, nb_filters=96, repetitions=5) # (res1)
self.res2 = _Shortcut(in_filters=96, nb_filters=96) # (block2, block6, subsample=(1,1))
self.block7 = _UpsamplingResidualBlock(in_filters=96, nb_filters=48, repetitions=3) # (res2)
self.res3 = _Shortcut(in_filters=48, nb_filters=48) # (block1, block7, subsample=(1,1))
self.block8 = _UpsamplingResidualBlock(in_filters=48, nb_filters=24, repetitions=2) # (res3)
self.res4 = _Shortcut(in_filters=24, nb_filters=24) # (block0,block8, subsample=(1,1))
self.block9 = _ResidualBlock(in_filters=24, nb_filters=16, repetitions=2, is_first_layer=True) # (res4)
self.conv15 = _BnReluConv(in_filters=16, nb_filters=1, fh=1, fw=1, subsample=1) # (block7)
def forward(self, x):
x0 = self.block0(x)
x1 = self.block1(x0)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
res1 = self.res1(x3, x5)
x6 = self.block6(res1)
res2 = self.res2(x2, x6)
x7 = self.block7(res2)
res3 = self.res3(x1, x7)
x8 = self.block8(res3)
res4 = self.res4(x0, x8)
x9 = self.block9(res4)
y = self.conv15(x9)
return y
``` |
{
"source": "462548187/atms-api",
"score": 2
} |
#### File: atms-api/utils/security.py
```python
from datetime import timedelta, datetime
from typing import Optional
# token 路由
from fastapi import HTTPException, Depends, status
from fastapi.security import OAuth2PasswordBearer
# jwt库
from jose import jwt, JWTError
# hash 密码库
from passlib.context import CryptContext
from config import settings
# 密码加密算法
from models import user
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
# 生成token 的指定路由
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="v1/login")
def get_password_hash(password: str) -> str:
"""使用哈希算法加密密码"""
return pwd_context.hash(password)
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""验证密码与hash密码"""
return pwd_context.verify(plain_password, hashed_password)
def create_access_token(data: dict, expire_time: Optional[timedelta] = None):
"""
生成JWT token
"""
# 设置token过期时间
if expire_time:
expire = datetime.utcnow() + expire_time
else:
expire = datetime.utcnow() + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
data.update({"exp": expire})
token = jwt.encode(data, settings.SECRET_KEY, algorithm=settings.ALGORITHM)
return token
async def get_current_user(token: str = Depends(oauth2_scheme)):
"""
获取当前登陆用户
"""
token_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="token不正确或已过期",
headers={"WWW-Authenticate": "Bearer"},
)
try:
jwt_data = jwt.decode(token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM])
username: str = jwt_data.get("sub")
if username is None or username == "":
raise token_exception
except JWTError:
raise token_exception
user_name = await user.User.get(username=username)
if user_name is None:
raise token_exception
return user_name
``` |
{
"source": "462548187/project_web",
"score": 2
} |
#### File: api/v1/push.py
```python
from tortoise.transactions import in_transaction
import core
from fastapi import APIRouter
from db import models
push_router = APIRouter(tags=["推送相关"])
@push_router.post("/push", name="推送新增")
async def create(push: models.PushInName):
"""
环境新增数据库配置目前只提供mysql,需按照如下字典配置
Args:
push:
Returns:
"""
try:
push_name_obj = [await models.Staff.get(id=staff) for staff in push.push_name_list]
del push.push_name_list
async with in_transaction():
push_obj = await models.Push.create(**push.dict(exclude_unset=True))
# await push_obj.at_name.add(*push_name_obj)
return core.Success(data=await models.Push_Pydantic.from_tortoise_orm(push_obj))
except Exception as e:
return core.Fail(message=f"创建失败.{e}")
@push_router.delete("/push/{push_id}", name="推送删除")
async def delete(push_id: int):
push_obj = await models.Push.filter(id=push_id).delete()
if push_obj:
return core.Success()
return core.Fail(message="推送不存在.")
@push_router.get("/push", name="查询所有推送")
async def select_all(limit: int = 10, page: int = 1):
skip = (page - 1) * limit
# from_queryset 针对queryset 对象序列化
data = await models.Push_Pydantic.from_queryset(models.Push.all().order_by('-created_at').offset(skip).limit(limit))
return core.Success(data={"total": await models.Push.all().count(), "items": data})
@push_router.get("/search/{push_name}", name="模糊推送需求名称")
async def select_push(push_name: str, limit: int = 10, page: int = 1):
skip = (page - 1) * limit
try:
data = await models.Push_Pydantic.from_queryset(models.Push.filter(name__contains=push_name).all().order_by('-created_at').offset(skip).limit(limit))
return core.Success(data=data)
except Exception as e:
return core.Fail(message=f"查看失败.{e}")
@push_router.put("/push/{push_id}", name="推送编辑")
async def update(push_id: int, push: models.PushInName):
try:
push_obj = await models.Task.get(id=push_id)
push_name_obj = [await models.Staff.get(id=staff) for staff in push.push_name_list]
del push.push_name_list
async with in_transaction():
await models.Push.filter(id=push_id).update(**push.dict(exclude_unset=True))
# 清除该对象与at_name的关系
await push_obj.at_name.clear()
# 添加关系
await push_obj.at_name.add(*push_name_obj)
return core.Success(data=await models.Push_Pydantic.from_queryset_single(models.Push.get(id=push_id)))
except Exception as e:
return core.Fail(message=f"更新失败.{e}")
```
#### File: api/v1/util.py
```python
from tortoise.transactions import in_transaction
import core
from core import MysqlSettings
from db import models
from util import read_file
from fastapi import APIRouter
from util.wr_file import write_file
util_router = APIRouter(tags=["其他"])
@util_router.get("/help", name="获取说明文档")
async def help_doc():
return core.Success(data=await read_file('apiAutoTestWeb使用说明.md'))
@util_router.get("/code", name="获取扩展脚本")
async def get_code():
return core.Success(data=await read_file('util/extend.py'))
@util_router.put("/code", name="修改扩展脚本")
async def update_code(script: core.Code):
# 验证是否可以被执行
try:
exec(script.code)
await write_file('util/extend.py', script.code)
return core.Success()
except Exception as e:
return core.Fail(message=f"更新失败.{e}")
@util_router.get("/list", name="平台数据获取")
async def get_plant():
# 获取当天数据 总数 SELECT count(*) FROM project WHERE strftime('%Y-%m-%d',
# created_at) = date('now')
tables = ['project']
today = []
async with in_transaction("default") as conn:
for table in tables:
data = await conn.execute_query_dict(f"SELECT count(*) as total FROM {table} WHERE strftime('%Y-%m-%d', created_at) = date('now')")
today.append(data[0]["total"])
return core.Success(data={
"project": len(await models.Project.all()),
"today": today
})
``` |
{
"source": "462630221/optimizer",
"score": 2
} |
#### File: onnxoptimizer/test/optimizer_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from typing import Sequence, Text, Any, Tuple, List, Callable, Optional, Dict, Union
import io
import unittest
import os
import numpy as np # type: ignore
try:
import torch
import torchvision as tv
has_tv = True
except:
has_tv = False
import onnx
from onnx import checker, helper, ModelProto, TensorProto, GraphProto, NodeProto, shape_inference
from onnx import numpy_helper
from onnx.numpy_helper import to_array
try:
import onnxruntime as rt
has_ort = True
except:
has_ort = False
import onnxoptimizer
TensorShape = List[int]
TensorShapes = Dict[Optional[str], TensorShape]
LATEST_STABLE_OPSET_VERSION = 13
class TestOptimizer(unittest.TestCase):
def _compare(self, model_opt: onnx.ModelProto, model_ori: onnx.ModelProto, n_times: int = 5,
input_shapes: Optional[TensorShapes] = None, verbose=True) -> bool:
"""
:param input_shapes: Shapes of generated random inputs
:param model_opt: The simplified ONNX model
:param model_ori: The original ONNX model
:param n_times: Generate n random inputs
"""
def get_shape_from_value_info_proto(v: onnx.ValueInfoProto) -> List[int]:
return [dim.dim_value for dim in v.type.tensor_type.shape.dim]
def get_value_info_all(m: onnx.ModelProto, name: str) -> Optional[onnx.ValueInfoProto]:
for v in m.graph.value_info:
if v.name == name:
return v
for v in m.graph.input:
if v.name == name:
return v
for v in m.graph.output:
if v.name == name:
return v
return None
def get_shape(m: onnx.ModelProto, name: str) -> TensorShape:
"""
Note: This method relies on onnx shape inference, which is not reliable. So only use it on input or output tensors
"""
v = get_value_info_all(m, name)
if v is not None:
return get_shape_from_value_info_proto(v)
raise RuntimeError('Cannot get shape of "{}"'.format(name))
def get_elem_type(m: onnx.ModelProto, name: str) -> Optional[int]:
v = get_value_info_all(m, name)
if v is not None:
return v.type.tensor_type.elem_type
return None
def get_np_type_from_elem_type(elem_type: int) -> int:
sizes = (None, np.float32, np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, str, np.bool,
np.float16, np.double, np.uint32, np.uint64, np.complex64, np.complex128, np.float16)
assert len(sizes) == 17
size = sizes[elem_type]
assert size is not None
return size
def get_input_names(model: onnx.ModelProto) -> List[str]:
input_names = list(set([ipt.name for ipt in model.graph.input])
- set([x.name for x in model.graph.initializer]))
return input_names
def generate_rand_input(model, input_shapes: Optional[TensorShapes] = None):
if input_shapes is None:
input_shapes = {}
input_names = get_input_names(model)
full_input_shapes = {ipt: get_shape(
model, ipt) for ipt in input_names}
assert None not in input_shapes
full_input_shapes.update(input_shapes) # type: ignore
for key in full_input_shapes:
if np.prod(full_input_shapes[key]) <= 0:
raise RuntimeError(
'The shape of input "{}" has dynamic size, '
'please set an input shape manually'.format(key))
inputs = {ipt: np.array(np.random.rand(*full_input_shapes[ipt]),
dtype=get_np_type_from_elem_type(get_elem_type(model, ipt))) for ipt in
input_names}
return inputs
def forward(model, inputs=None, input_shapes: Optional[TensorShapes] = None) -> Dict[str, np.ndarray]:
if input_shapes is None:
input_shapes = {}
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = rt.GraphOptimizationLevel(0)
sess_options.log_severity_level = 3
sess = rt.InferenceSession(model.SerializeToString(
), sess_options=sess_options, providers=['CPUExecutionProvider'])
if inputs is None:
inputs = generate_rand_input(model, input_shapes=input_shapes)
outputs = [x.name for x in sess.get_outputs()]
run_options = rt.RunOptions()
run_options.log_severity_level = 3
res = OrderedDict(zip(outputs, sess.run(
outputs, inputs, run_options=run_options)))
return res
if input_shapes is None:
input_shapes = {}
onnx.checker.check_model(model_opt)
for i in range(n_times):
rand_input = generate_rand_input(
model_opt, input_shapes=input_shapes)
res_ori = forward(model_ori, inputs=rand_input)
res_opt = forward(model_opt, inputs=rand_input)
for name in res_opt.keys():
if not np.allclose(res_opt[name], res_ori[name], rtol=1e-4, atol=1e-5):
if verbose:
print("Tensor {} changes after optimization. The max diff is {}.".format(
name, np.max(np.abs(res_opt[name] - res_ori[name]))))
print("After optimization:")
print(res_opt[name])
print("Before optimization:")
print(res_ori[name])
print("----------------")
return False
return True
# type: (Union[GraphProto, ModelProto], Sequence[Text], bool, **Any) -> ModelProto
def _optimized(self, graph_or_model, opts, fixed_point=False, compare_result=True, **kwargs):
if isinstance(graph_or_model, ModelProto):
orig_model = graph_or_model
else:
opset_imports = kwargs.pop('opset_imports', None)
if opset_imports is None:
opset_imports = [helper.make_opsetid("", LATEST_STABLE_OPSET_VERSION)]
orig_model = helper.make_model(
graph_or_model, producer_name='onnx-test', opset_imports=opset_imports, **kwargs)
checker.check_model(orig_model)
optimized_model = onnxoptimizer.optimize(orig_model, opts, fixed_point)
checker.check_model(optimized_model)
if compare_result and len(optimized_model.graph.node) > 0:
if has_ort:
assert self._compare(optimized_model, orig_model)
else:
print("Skip onnxruntime test because it is not installed.")
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
# NOTE(daquexian): only values that change across loop iterations should be in `input_types` and `output_types`. The pseudocode showing how loop op works is:
# loop_value_inputs = graph_value_inputs
# while cond:
# loop_value_outputs = body(loop_value_inputs)
# loop_value_inputs = loop_value_outputs
# graph_value_outputs = loop_value_outputs
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
input_types,
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types,
check_legality=True,
): # type: (...) -> List[NodeProto]
if check_legality:
assert len(input_types) == len(output_types)
zero = helper.make_tensor(
"trip_count_value", TensorProto.INT64, (), [1])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT64, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info(
"cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(
helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
# type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
# type: (GraphProto, Callable[[NodeProto], None]) -> None
def _visit_all_nodes_recursive(self, graph, fn):
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
def test_get_available_passes(self): # type: () -> None
# FIXME does not guarantees to be listing all
graph = helper.make_graph([], "dummy_graph", [], [])
list_of_passes = onnxoptimizer.get_available_passes()
assert isinstance(list_of_passes, (list)) and len(list_of_passes) > 0
for pass_name in list_of_passes:
# If pass_name is invalid it throws a RuntimeError
self._optimized(graph, [pass_name])
def test_eliminate_identity_single_use(self): # type: () -> None
nodes = [helper.make_node("Add", ["X", "Y"], ["A"]),
helper.make_node("Identity", ["A"], ["B"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["_B"], ["_B2"])],
[(TensorProto.FLOAT, (5,), "B")],
[(TensorProto.FLOAT, (5,), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("B2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
# All identity nodes should have been eliminated
def check_identity(node): # type: (NodeProto) -> None
assert node.op_type != "Identity"
self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
# Use of the output from the Identity node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "B"
# Use of the output from the Identity node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[3].attribute[0].g.output) == 2
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == "_B2"
def test_eliminate_identity_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
identity = helper.make_node("Identity", ["A"], ["B"])
graph = helper.make_graph(
[add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(
optimized_model.graph.output) == 1 and optimized_model.graph.output[0].name == 'B'
assert len(optimized_model.graph.node) == 1
def test_eliminate_identity_multiple_uses(self): # type: () -> None
identity = helper.make_node("Identity", ["X"], ["Y"])
add = helper.make_node("Add", ["Z", "Y"], ["A"])
mul = helper.make_node("Mul", ["A", "Y"], ["B"])
graph = helper.make_graph(
[identity, add, mul],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 2
def test_not_fuse_non_nop_flatten(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=2)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (1, 10, 3, 1, 1))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (10, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == 'Flatten'
def test_nop_flatten_axis0_graph_output(self):
add = helper.make_node("Add", ["X", "Y"], ["A"])
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=0)
graph = helper.make_graph(
[add, flatten],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 10)),
],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 10))],
# the tensor_value_info of "A" is necessary to this optimizer
value_info=[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (1, 10))]
)
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == 'Add'
def test_nop_flatten_axis0(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=0)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 10))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 10))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 0
def test_nop_flatten_axis1(self):
flatten = helper.make_node("Flatten", ["A"], ["B"], axis=1)
graph = helper.make_graph(
[flatten],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_flatten"])
assert len(optimized_model.graph.node) == 0
def test_eliminate_duplicate_initializer(self): # type: () -> None
add_1 = helper.make_node("Add", ["A", "I_0"], ["B"])
add_2 = helper.make_node("Add", ["B", "I_1"], ["C"])
i = np.random.rand(5).astype(np.float32)
graph = helper.make_graph(
[add_1, add_2],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("I_0", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("I_1", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (5,))],
[helper.make_tensor("I_0", TensorProto.FLOAT,
dims=(5,),
vals=i.tobytes(),
raw=True),
helper.make_tensor("I_1", TensorProto.FLOAT,
dims=(5,),
vals=i.tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_duplicate_initializer"])
assert len(optimized_model.graph.node) == 2
assert len(optimized_model.graph.initializer) == 1
assert len(optimized_model.graph.input) == 2
assert optimized_model.graph.node[0].input[1] == "I_0"
def test_nop_cast(self): # type: () -> None
cast = helper.make_node("Cast", ["A"], ["B"], to=TensorProto.FLOAT)
graph = helper.make_graph(
[cast],
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_cast"])
assert len(optimized_model.graph.node) == 0
def test_nop_transpose_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans = helper.make_node("Transpose", ["A"], ["B"], perm=[0, 1])
graph = helper.make_graph(
[add, trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_nop_transpose(self): # type: () -> None
nodes = [helper.make_node("Identity", ["A"], ["X"]),
helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "Y"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[3].attribute[0].g.output) == 2
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == "_Y2"
def test_nop_transpose_default(self): # type: () -> None
trans = helper.make_node("Transpose", ["X"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Transpose"
def test_nop_pad_opset10(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0, 0, 0])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))])
assert len(graph.node) == 1
optimized_model = self._optimized(
graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "Y"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
pad = helper.make_node("Pad", ["A", "Pads"], ["B"])
graph = helper.make_graph(
[add, pad],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (2,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(2,),
vals=np.array([0, 0]).astype(
np.int64).tobytes(),
raw=True)])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.node) == 1
def test_nop_pad(self): # type: () -> None
nodes = [helper.make_node("Pad", ["X", "Pads"], ["Y"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 0, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
assert len(graph.node) == 1
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
def check_pad(node): # type: (NodeProto) -> None
assert node.op_type != "Pad"
self._visit_all_nodes_recursive(optimized_model.graph, check_pad)
assert len(optimized_model.graph.output) == 1
assert optimized_model.graph.output[0].name == "Y"
assert len(optimized_model.graph.node) == 0
def test_nop_pad_default_opset10(self): # type: () -> None
trans = helper.make_node("Pad", ["X"], ["Y"], pads=[0, 0, 1, 1])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))])
optimized_model = self._optimized(
graph, ["eliminate_nop_pad"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_nop_pad_default(self): # type: () -> None
trans = helper.make_node("Pad", ["X", "Pads"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (4,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(4,),
vals=np.array([0, 1, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["eliminate_nop_pad"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Pad"
def test_eliminate_unused_initializer(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
def test_eliminate_unused_initializer_input(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 0
assert len(optimized_model.graph.input) == 2
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_used_default(self):
add = helper.make_node("Add", ["X", "A"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_used(self):
nodes = [helper.make_node("Add", ["X", "A"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Add", ["_X", "A"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 2), "X")],
[(TensorProto.FLOAT, (1, 2), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(1, 2),
vals=np.random.randn(1, 2).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
# Add, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Add"
assert optimized_model.graph.output[0].name == "Z"
# Add
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Add'
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z2'
assert len(list(optimized_model.graph.initializer)) == 1
# type: () -> None
def test_eliminate_unused_initializer_no_eliminate_output(self):
add = helper.make_node("Add", ["X", "Y"], ["Z"])
graph = helper.make_graph(
[add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor("A", TensorProto.FLOAT,
dims=(2, 3),
vals=np.random.randn(2, 3).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(
graph, ["eliminate_unused_initializer"])
assert len(list(optimized_model.graph.initializer)) == 1
assert "Z" in [o.name for o in optimized_model.graph.output]
def test_extract_constant_to_initializer(self): # type: () -> None
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(
graph, ["extract_constant_to_initializer"])
self.assertEqual(len(optimized_model.graph.initializer), 1)
init = optimized_model.graph.initializer[0]
self.assertEqual(init.name, 'A')
self.assertEqual(init.dims, [16, 1, 1])
self.assertEqual(init.data_type, TensorProto.FLOAT)
self.assertEqual(
[n.op_type for n in optimized_model.graph.node], ['Conv', 'Add'])
def test_fuse_concats(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=0),
helper.make_node("Concat", ["X", "G", "Y"], ["Z"], axis=0)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (4, 3, 4)),
helper.make_tensor_value_info("G", TensorProto.FLOAT, (4, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (22, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"], True) # two passes are needed to simplify the graph to its simplest state.
assert len(optimized_model.graph.node) == 1
assert len(optimized_model.graph.node[0].input) == 7
assert optimized_model.graph.node[0].input == [
"A", "B", "C", "G", "D", "E", "F"]
assert optimized_model.graph.node[0].op_type == "Concat"
def test_fuse_concats_different_axis(self): # type: () -> None
nodes = [helper.make_node("Concat", ["A", "B", "C"], ["X"], axis=0),
helper.make_node("Concat", ["D", "E", "F"], ["Y"], axis=1),
helper.make_node("Concat", ["X", "Y"], ["Z"], axis=2)]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 9, 4)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 9, 4)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 9, 4)),
helper.make_tensor_value_info("D", TensorProto.FLOAT, (8, 3, 4)),
helper.make_tensor_value_info("E", TensorProto.FLOAT, (8, 3, 4)),
helper.make_tensor_value_info("F", TensorProto.FLOAT, (8, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (8, 9, 8))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_concats"])
assert optimized_model.graph == graph
def test_fuse_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_Y2"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["_Y2"], ["_Y3"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["_Y3"], ["_Y4"], perm=[2, 0, 1])],
[(TensorProto.FLOAT, (2, 3, 4), "X")],
[(TensorProto.FLOAT, (2, 4, 3), "Y4")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, (4, 3, 2))])
original_model = helper.make_model(graph)
shape_inference.infer_shapes(original_model)
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
shape_inference.infer_shapes(optimized_model)
# Transpose, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
# Transpose
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
def test_fuse_transpose_default_graph_output(self): # type: () -> None
add = helper.make_node("Add", ["X", "Y"], ["A"])
trans1 = helper.make_node("Transpose", ["A"], ["B"])
trans2 = helper.make_node("Transpose", ["B"], ["C"])
graph = helper.make_graph(
[add, trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("C", TensorProto.FLOAT, (2, 3))])
# The existence of shape infos of graoh outputs is checked in _optimized
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
assert len(optimized_model.graph.node) == 1
def test_fuse_transpose_default(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 0
def test_fuse_transpose_default_no_fuse(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[0, 1, 2])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 2
for node in optimized_model.graph.node:
assert node.op_type == "Transpose"
def test_fuse_transpose_into_gemm(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["_B"], perm=[1, 0]),
helper.make_node("Gemm", ["_A", "_B", "C"], ["_Z2"])],
[(TensorProto.FLOAT, (2, 3), "X")],
[(TensorProto.FLOAT, (3, 5), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])
# Gemm, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Gemm"
# Gemm
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
def test_fuse_add_bias_into_conv_with_scalar_bias(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, ())],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# Unsqueeze, Conv
assert len(optimized_model.graph.node) == 4
assert optimized_model.graph.node[0].op_type == 'Unsqueeze'
assert optimized_model.graph.node[1].op_type == 'Constant'
assert optimized_model.graph.node[2].op_type == 'Tile'
assert optimized_model.graph.node[3].op_type == 'Conv'
def test_fuse_add_bias_into_conv_use_weight_shape(self): # type: () -> None
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"])]
# FIXME(daquexian): It looks like subgraph cannot get value info from parent subgraph
# nodes.extend(self._make_fake_loop_op(
# [helper.make_node("Conv", ["_X", "Y"], ["_Z"]),
# helper.make_node("Add", ["_Z", "A"], ["_B2"])],
# [(TensorProto.FLOAT, (1, 5, 3, 3), "X")],
# [(TensorProto.FLOAT, (1, 16, 1, 1), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# # Squeeze, Conv, Constant (trip count), Constant (condition), Loop
# assert len(list(optimized_model.graph.node)) == 5
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Squeeze'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
# # Squeeze, Conv
# assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2
# assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze'
# assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv'
# # Output 1 since 0 is 'cond'
# assert optimized_model.graph.node[4].attribute[0].g.output[1].name == 'B2'
# type: () -> None
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 3
assert len(optimized_model.graph.value_info) == 1
assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
assert len(
optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Tile'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
def test_fuse_add_bias_into_conv_use_conv_shape(self): # type: () -> None
sub = helper.make_node("Sub", ["M", "N"], ["Y"])
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[sub, conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"M", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info(
"N", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))
],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Sub'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
# type: () -> None
def test_fuse_add_bias_into_conv_use_move_constant(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16, 1, 1),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 1))],
value_info=[
helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 3
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Squeeze'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'B'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(
optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_1d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 1, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_3d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
# type: () -> None
def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 3, 3))],
[helper.make_tensor_value_info(
"B", TensorProto.FLOAT, (1, 16, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_matmul_add_bias_into_gemm(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16,))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
def test_fuse_matmul_add_bias_into_gemm_2d_bias(self): # type: () -> None
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_2d_bias_same_shape(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (32, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Gemm"
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_2d_bias_bcast_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (16, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_3d_matmul_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4, 3)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (3, 3))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 3, 3))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_3d_bias_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "B"], ["A"])
graph = helper.make_graph(
[matmul, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (4, 1, 16))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (32, 16))]
)
# 3d bias for 2d matmul is not legal. So disable onnxruntime checking
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"], compare_result=False)
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_matmul_add_bias_into_gemm_multiple_use_no_fuse(self):
matmul = helper.make_node("MatMul", ["X", "Y"], ["Z"])
identity = helper.make_node("Identity", ["Z"], ["A1"])
add = helper.make_node("Add", ["Z", "B"], ["A2"])
graph = helper.make_graph(
[matmul, add, identity],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (32, 10)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (10, 16)),
helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16))],
[helper.make_tensor_value_info("A1", TensorProto.FLOAT, (32, 16)),
helper.make_tensor_value_info("A2", TensorProto.FLOAT, (32, 16))]
)
optimized_model = self._optimized(
graph, ["fuse_matmul_add_bias_into_gemm"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_no_optional_value_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
def test_fuse_pad_into_conv_no_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
def test_fuse_pad_into_conv_with_optional_value(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info(
"Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
vals=np.array([0]).astype(np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
0, 0, 1, 1]
# type: () -> None
def test_fuse_pad_into_conv_with_nonzero_optional_value(self):
pad = helper.make_node(
"Pad",
["X", "Pads", "Constant_value"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info(
"Constant_value", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True),
helper.make_tensor("Constant_value", TensorProto.FLOAT,
dims=(),
# non-zero Constant_value -> so no pad
vals=np.array([25]).astype(
np.float32).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_1d_opset10(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 1, 0, 0, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
def test_fuse_pad_into_conv_1d(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 30)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (6,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 32))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(6,),
vals=np.array([0, 0, 1, 0, 0, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [1, 1]
# type: () -> None
def test_fuse_pad_into_conv_existing_conv_pad_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
1, 1, 1, 1]
def test_fuse_pad_into_conv_existing_conv_pad(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node(
"Conv",
["P", "Y"],
["Z"],
pads=[1, 1, 0, 0]
)
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 4, 4))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Conv"
assert optimized_model.graph.node[0].attribute[0].name == "pads"
assert list(optimized_model.graph.node[0].attribute[0].ints) == [
1, 1, 1, 1]
# type: () -> None
def test_fuse_pad_into_conv_pad_feature_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 1, 0, 0, 0, 0, 0, 0]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_pad_feature_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 4, 3, 3)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 1, 0, 0, 0, 0, 0, 0]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_negative_pad_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="constant",
pads=[0, 0, 0, 0, 0, 0, -1, -1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
def test_fuse_pad_into_conv_negative_pad_no_fuse(self): # type: () -> None
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="constant"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 4, 4)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array(
[0, 0, 0, 0, 0, 0, -1, -1]).astype(np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_reflection_pad_no_fuse_opset10(self):
pad = helper.make_node(
"Pad",
["X"],
["P"],
mode="reflect",
pads=[0, 0, 0, 0, 0, 0, 1, 1]
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))]
)
optimized_model = self._optimized(
graph, ["fuse_pad_into_conv"], False, opset_imports=[helper.make_opsetid("", 10)])
assert optimized_model.graph == graph
# type: () -> None
def test_fuse_pad_into_conv_reflection_pad_no_fuse(self):
pad = helper.make_node(
"Pad",
["X", "Pads"],
["P"],
mode="reflect"
)
conv = helper.make_node("Conv", ["P", "Y"], ["Z"])
graph = helper.make_graph(
[pad, conv],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 2, 2)),
helper.make_tensor_value_info("Pads", TensorProto.INT64, (8,)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (1, 16, 1, 1))],
[helper.make_tensor("Pads", TensorProto.INT64,
dims=(8,),
vals=np.array([0, 0, 0, 0, 0, 0, 1, 1]).astype(
np.int64).tobytes(),
raw=True)])
optimized_model = self._optimized(graph, ["fuse_pad_into_conv"])
assert optimized_model.graph == graph
def test_fuse_consecutive_squeezes(self): # type: () -> None
nodes = [helper.make_node("Squeeze", ["X", "X_axes"], ["Y"]),
helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Squeeze", ["_X", "X_axes"], ["_Y"]),
helper.make_node("Squeeze", ["_Y", "Y_axes"], ["_Z2"])],
[(TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9), "X")],
[(TensorProto.FLOAT, (2, 3, 1, 8, 9), "Z2")]))
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64))]
]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2])],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (2, 3, 1, 8, 9))],
initializer=initializers)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
# Squeeze, Constant (trip count), Constant (cond), Loop
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 4, 5, 6]
assert len(list(optimized_model.graph.node)) == 4
def test_fuse_consecutive_squeezes_default(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
squeeze3 = helper.make_node("Squeeze", ["Z", "Z_axes"], ["A"])
nodes = [squeeze1, squeeze2, squeeze3]
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64)),
('Z_axes', np.array([2], dtype=np.int64))]
]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2]),
helper.make_tensor_value_info("Z_axes", TensorProto.INT64, [1])],
[helper.make_tensor_value_info(
"A", TensorProto.FLOAT, (2, 3, 8, 9))],
initializer=initializers)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 4, 5, 6, 7]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_random(self): # type: () -> None
x_shape = [1, 1, 1, 3, 4, 1, 6, 1, 1, 9]
s1_one_indices = [i for i, a in enumerate(x_shape) if a == 1]
s1_axes = np.random.choice(s1_one_indices,
size=np.random.randint(
low=1, high=len(s1_one_indices) - 1),
replace=False).astype(np.int64)
s2_x_shape = [a for i, a in enumerate(x_shape) if i not in s1_axes]
s2_one_indices = [i for i, a in enumerate(s2_x_shape) if a == 1]
s2_axes = np.array(s2_one_indices).astype(np.int64)
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', s1_axes),
('Y_axes', s2_axes)]
]
nodes = [squeeze1, squeeze2]
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, x_shape),
helper.make_tensor_value_info(
"X_axes", TensorProto.INT64, s1_axes.shape),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, s2_axes.shape)],
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, (3, 4, 6, 9))],
initializer=initializers
)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [0, 1, 2, 5, 7, 8]
assert len(list(optimized_model.graph.node)) == 1
def test_fuse_consecutive_squeezes_multi_uses(self): # type: () -> None
squeeze1 = helper.make_node("Squeeze", ["X", "X_axes"], ["Y"])
add = helper.make_node("Add", ["Y", "A"], ["Z2"])
squeeze2 = helper.make_node("Squeeze", ["Y", "Y_axes"], ["Z"])
initializers = [
helper.make_tensor(name, TensorProto.INT64,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('X_axes', np.array([0, 4, 5], dtype=np.int64)),
('Y_axes', np.array([0, 3], dtype=np.int64)), ]
]
graph = helper.make_graph(
[squeeze1, add, squeeze2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 1, 2, 3, 1, 1, 1, 1, 8, 9)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,)),
helper.make_tensor_value_info("X_axes", TensorProto.INT64, [3]),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, [2]),
],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 1, 8, 9)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (1, 2, 3, 1, 1, 8, 9))],
initializer=initializers
)
optimized_model = self._optimized(graph, ["fuse_consecutive_squeezes"])
assert optimized_model.graph.node[0].op_type == "Squeeze"
assert optimized_model.graph.node[2].op_type == "Squeeze"
assert optimized_model.graph.node[2].input[0] == "X"
assert len(list(optimized_model.graph.node)) == 3
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[0].input[1]:
assert list(to_array(init)) == [
0, 4, 5]
if init.name == optimized_model.graph.node[2].input[1]:
assert list(to_array(init)) == [
0, 1, 4, 5, 6]
def test_fuse_consecutive_softmax_log_axis(self): # type: () -> None
for axis in range(3):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=axis)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
def test_fuse_consecutive_softmax_log_side_effect(self): # type: () -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[softmax, log],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert graph == optimized_model.graph
# type: () -> None
def test_fuse_consecutive_softmax_log_multiple_out(self):
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
graph = helper.make_graph(
[softmax, log, exp],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11)),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["fuse_consecutive_log_softmax"])
assert len(optimized_model.graph.output) == 2
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.output[1].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "LogSoftmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Exp"
def test_preserve_value_info(self): # type: () -> None
trans1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1])
trans3 = helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])
graph = helper.make_graph(
[trans1, trans2, trans3],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (2, 4, 3))])
vi = helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))
graph.value_info.extend([vi])
optimized_model = self._optimized(graph, ["nop"])
assert list(optimized_model.graph.value_info) == [vi]
assert len(list(optimized_model.graph.node)) == 3
def test_split(self): # type: () -> None
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['X'],
value=onnx.helper.make_tensor(
name='X',
data_type=TensorProto.FLOAT,
dims=[1],
vals=[5],
),
)
graph = helper.make_graph(
[node],
'test-optimize-split',
[],
[helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])
init_model = self._optimized(graph, ['split_init'])
self.assertEqual(len(init_model.graph.node), 1)
self.assertEqual(len(init_model.graph.output), 1)
self.assertEqual(init_model.graph.node[0].op_type, 'Constant')
predict_model = self._optimized(graph, ['split_predict'])
self.assertEqual(len(predict_model.graph.node), 0)
self.assertEqual(len(predict_model.graph.input), 1)
self.assertEqual(predict_model.graph.input[0].name, 'X')
def test_lift_lex_loop(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
# 'lift_lexical_references' is legacy code and I don't know how it works.
# More error occurs if I make this loop op legal.
# So don't check legality here
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")],
check_legality=False))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "lift_lexical_references" pass produces a graph that does not conform to
# the ONNX spec. Disable checking.
optimized_model = self._optimized(
graph, ["lift_lexical_references"], compare_result=False)
assert len(optimized_model.graph.node) == 4
# body_graph, __control_inputs
assert len(optimized_model.graph.node[3].attribute) == 2
assert optimized_model.graph.node[3].attribute[1].name == "__control_inputs"
assert optimized_model.graph.node[3].attribute[1].strings[0] == b"X"
assert optimized_model.graph.node[3].attribute[1].strings[1] == b"Y"
def test_lift_lex_if(self): # type: () -> None
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_if_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["X"], ["_Y3"])],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "If" node now diverges from ONNX schema. Disable checking.
optimized_model = self._optimized(
graph, ["lift_lexical_references"], compare_result=False)
# Identity, Constant (condition), If
assert len(optimized_model.graph.node) == 3
# else_branch, then_branch, __control_inputs
assert len(optimized_model.graph.node[2].attribute) == 3
assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
def test_fuse_bn_into_conv_simple(self): # type: () -> None
for (tensor_type, np_type) in [(TensorProto.FLOAT, np.float32)]:
conv = helper.make_node("Conv", ["X", "W", "B"], ["Y"])
bn = helper.make_node("BatchNormalization", [
"Y", "scale", "b", "mean", "var"], ["Z"])
W = np.random.randn(3, 2, 5, 5).astype(np_type) + 2
B = np.random.randn(3,).astype(np_type) + 2
scale = np.random.randn(3,).astype(np_type) + 2
b = np.random.randn(3,).astype(np_type) + 2
mean = np.random.randn(3,).astype(np_type) + 2
var = np.abs(np.random.randn(3,).astype(np_type)) + 2
initializers = [
helper.make_tensor(name, tensor_type,
npa.shape, npa.tobytes(), raw=True)
for name, npa in [('W', W), ('B', B), ('scale', scale), ('b', b), ('mean', mean), ('var', var)]
]
graph = helper.make_graph(
[conv, bn],
"test",
[helper.make_tensor_value_info("X", tensor_type, (5, 2, 28, 28))],
[helper.make_tensor_value_info(
"Z", tensor_type, (5, 3, 24, 24))],
initializer=initializers,
value_info=[
helper.make_tensor_value_info(
"Y", tensor_type, (5, 3, 24, 24))
]
)
optimized_model = self._optimized(graph, ["fuse_bn_into_conv"])
self.assertEqual(len(optimized_model.graph.node), 1)
self.assertEqual(optimized_model.graph.node[0].op_type, 'Conv')
self.assertEqual(len(optimized_model.graph.initializer), 2)
new_W = numpy_helper.to_array(optimized_model.graph.initializer[0])
new_b = numpy_helper.to_array(optimized_model.graph.initializer[1])
f = scale / np.sqrt(var + 1e-5)
np.testing.assert_almost_equal((B - mean) * f + b, new_b)
np.testing.assert_almost_equal(
W * f[:, np.newaxis, np.newaxis, np.newaxis], new_W)
def _internal_test_deadend_elimination(self, fixed): # type: (bool) -> None
softmax = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
log = helper.make_node("Log", ["Y"], ["Z"])
exp = helper.make_node("Exp", ["Z"], ["Z1"])
exp1 = helper.make_node("Log", ["Z"], ["Z2"])
exp2 = helper.make_node("Sqrt", ["Z1"], ["Z3"])
graph = helper.make_graph(
[softmax, log, exp, exp1, exp2],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7, 11))])
optimized_model = self._optimized(
graph, ["eliminate_deadend"], fixed)
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[0].op_type == "Softmax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == 2
assert optimized_model.graph.node[1].op_type == "Log"
def test_deadend_elimination_simple(self): # type: () -> None
self._internal_test_deadend_elimination(False)
def test_deadend_elimination_simple_fixed(self): # type: () -> None
self._internal_test_deadend_elimination(True)
def _get_argmax_output_shape(self, input_shape, axis, keepdims):
assert keepdims
output_shape = list(input_shape[:])
output_shape[axis] = 1
output_shape = tuple(output_shape)
return output_shape
# type: () -> None
def test_eliminate_nop_monotone_argmax_basic_no_node_axis(self):
input_shape = (5, 7, 11)
for node_name in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis
# type: () -> None
def test_eliminate_nop_monotone_argmax_basic_with_node_axis(self):
input_shape = (5, 7, 11)
for node_name in ["Softmax", "LogSoftmax"]:
for axis_n in range(3):
for axis_max in range(3):
node = helper.make_node(
node_name, ["X"], ["Y"], axis=axis_n)
argmax = helper.make_node(
"ArgMax", ["Y"], ["Z"], axis=axis_max)
output_shape = self._get_argmax_output_shape(
input_shape, axis_max, True)
graph = helper.make_graph(
[node, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
if axis_max == axis_n:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == axis_max
else:
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_monotone_argmax_multiple_out(self):
input_shape = (5, 7, 11)
for node_name in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name, ["X"], ["Y"])
node2 = helper.make_node(node_name, ["Y"], ["Z1"])
argmax = helper.make_node("ArgMax", ["Y"], ["Z"], axis=axis)
argmax_output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, argmax_output_shape),
helper.make_tensor_value_info("Z1", TensorProto.FLOAT, input_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"])
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_monotone_argmax_consecutive(self):
# type: (GraphProto, ModelProto, bool, int) -> None
input_shape = (5, 7, 11)
def _assertion(graph, optimized_model, axis_aligned, true_axis):
if axis_aligned:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[0].op_type == "ArgMax"
assert optimized_model.graph.node[0].attribute[0].name == "axis"
assert optimized_model.graph.node[0].attribute[0].i == true_axis
else:
assert optimized_model.graph == graph
# no axis X no axis test
for node_name_0 in ["Exp"]:
for node_name_1 in ["Exp"]:
for axis in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(node_name_1, ["Y"], ["Y1"])
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis)
output_shape = self._get_argmax_output_shape(
input_shape, axis, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model, True, axis)
# no axis X axis test
for node_name_0 in ["Exp"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
node = helper.make_node(node_name_0, ["X"], ["Y"])
node2 = helper.make_node(
node_name_1, ["Y"], ["Y1"], axis=axis_0)
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis_1)
output_shape = self._get_argmax_output_shape(
input_shape, axis_1, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7, 11))],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
_assertion(graph, optimized_model,
axis_0 == axis_1, axis_1)
# axis X axis test
for node_name_0 in ["Softmax", "LogSoftmax"]:
for node_name_1 in ["Softmax", "LogSoftmax"]:
for axis_0 in range(3):
for axis_1 in range(3):
for axis_2 in range(3):
node = helper.make_node(
node_name_0, ["X"], ["Y"], axis=axis_0)
node2 = helper.make_node(
node_name_1, ["Y"], ["Y1"], axis=axis_1)
argmax = helper.make_node(
"ArgMax", ["Y1"], ["Z"], axis=axis_2)
output_shape = self._get_argmax_output_shape(
input_shape, axis_2, True)
graph = helper.make_graph(
[node, node2, argmax],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, output_shape)])
optimized_model = self._optimized(
graph, ["eliminate_nop_monotone_argmax"], True)
if axis_0 == axis_1: # we can reduce both of the monotonic ops
_assertion(graph, optimized_model,
axis_1 == axis_2, axis_2)
elif axis_1 == axis_2: # we can reduce one of the monotonic ops
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.INT64
assert optimized_model.graph.node[-1].op_type == "ArgMax"
assert optimized_model.graph.node[-1].attribute[0].name == "axis"
assert optimized_model.graph.node[-1].attribute[0].i == axis_2
else: # we can't reduce anything
assert optimized_model.graph == graph
def test_eliminate_nop_dropout(self): # type: () -> None
node = helper.make_node("Dropout", ["X"], ["Y"])
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False)
# we don't want to eliminate the dropoutin opset 12,
# even when it';s an optional parameter (defaults to 0)
assert optimized_model.graph == graph
# type: () -> None
def test_eliminate_nop_dropout_opset11_graph_output(self):
node = helper.make_node("Log", ["X"], ["Y"])
node1 = helper.make_node("Dropout", ["Y"], ["Z"], ratio=0.0)
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
assert optimized_model.graph.output[0].name == 'Z'
def test_eliminate_nop_dropout_opset11(self): # type: () -> None
for ratio in [0.0, 0.5]:
node = helper.make_node("Dropout", ["X"], ["Y"], ratio=ratio)
node1 = helper.make_node("Log", ["Y"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
[helper.make_tensor_value_info(
"X", TensorProto.FLOAT, (5, 7))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5, 7))])
optimized_model = self._optimized(
graph, ["eliminate_nop_dropout"], False, opset_imports=[helper.make_opsetid("", 11)])
if ratio > 0.0:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.node[0].op_type == "Log"
def test_fuse_reduction_unsqueeze(self): # type: () -> None
# type: (Tuple[int, ...], List[int], List[int], bool) -> Tuple[int, ...]
def _calculate_post_transform_shape(input_shape, reduction_axes, unsqueeze_axes, keepdim):
post_reduce_shape = None
if keepdim:
post_reduce_shape = tuple(
[(x if i not in reduction_axes else 1) for i, x in enumerate(input_shape)])
else:
post_reduce_shape = tuple(
[x for i, x in enumerate(input_shape) if i not in reduction_axes])
post_unsqueeze_shape = list(post_reduce_shape)
for ax in unsqueeze_axes:
post_unsqueeze_shape.insert(ax, 1)
return tuple(post_unsqueeze_shape)
for reduction in ["ReduceL1", "ReduceL2", "ReduceLogSum",
"ReduceLogSumExp", "ReduceMax", "ReduceMean",
"ReduceMin", "ReduceProd", "ReduceSum", "ReduceSumSquare"]:
for axes1 in [[1], [1, 2], [2]]:
for axes2 in [[0], [0, 1], [1]]:
for keepdim in [False, True]:
input_shape = (5, 7, 9)
output_shape = _calculate_post_transform_shape(
input_shape, axes1, axes2, keepdim) # type: Tuple[int, ...]
axes2_arr = np.array(axes2, dtype=np.int64)
graph_input = [helper.make_tensor_value_info(
"X", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("Y_axes", TensorProto.INT64, axes2_arr.shape)]
graph_initializer = [
helper.make_tensor("Y_axes", TensorProto.INT64,
axes2_arr.shape, axes2_arr.tobytes(), raw=True)
]
if reduction in ("ReduceSum"):
axes1_arr = np.array(axes1, dtype=np.int64)
node = helper.make_node(
reduction, ["X", "X_axes"], ["Y"], keepdims=keepdim)
graph_input.append(
helper.make_tensor_value_info("X_axes", TensorProto.INT64, axes1_arr.shape))
graph_initializer.append(helper.make_tensor("X_axes", TensorProto.INT64,
axes1_arr.shape, axes1_arr.tobytes(), raw=True))
else:
node = helper.make_node(
reduction, ["X"], ["Y"], axes=axes1, keepdims=keepdim)
node1 = helper.make_node(
"Unsqueeze", ["Y", "Y_axes"], ["Z"])
graph = helper.make_graph(
[node, node1],
"test",
graph_input,
[helper.make_tensor_value_info(
"Z", TensorProto.FLOAT, output_shape)],
initializer=graph_initializer
)
optimized_model = self._optimized(
graph, ["fuse_consecutive_reduce_unsqueeze"], False)
if keepdim or axes1 != axes2:
assert optimized_model.graph == graph
else:
assert len(optimized_model.graph.output) == 1
assert len(optimized_model.graph.node) == 1
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert optimized_model.graph.node[-1].op_type == reduction
if reduction in ("ReduceSum"):
for init in optimized_model.graph.initializer:
if init.name == optimized_model.graph.node[-1].input[1]:
assert list(to_array(init)) == axes1
else:
assert optimized_model.graph.node[-1].attribute[0].name == "axes"
assert optimized_model.graph.node[-1].attribute[0].ints == axes1
optimized_output_shape = tuple(
x.dim_value for x in optimized_model.graph.output[0].type.tensor_type.shape.dim)
assert optimized_output_shape == output_shape
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_fasterrcnn_fpn(self): # type: () -> None
model = tv.models.detection.fasterrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
# maskrcnn is only supported in opset 11 and higher
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_maskrcnn_fpn_opset11(self): # type: () -> None
model = tv.models.detection.maskrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
# keypointrcnn is only supported in opset 11 and higher
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_keypointrcnn_fpn(self): # type: () -> None
model = tv.models.detection.keypointrcnn_resnet50_fpn(pretrained=False)
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
with io.BytesIO() as f:
torch.onnx.export(model, x, f, opset_version=11)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_shufflenet_v2(self): # type: () -> None
model = tv.models.shufflenet_v2_x1_0(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_mnasnet(self): # type: () -> None
model = tv.models.mnasnet1_0(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
@unittest.skipUnless(has_tv, "This test needs torchvision")
def test_torchvision_deeplabv3(self): # type: () -> None
model = tv.models.segmentation.deeplabv3_resnet50(pretrained=False)
x = torch.rand(1, 3, 224, 224)
with io.BytesIO() as f:
torch.onnx.export(model, x, f)
model = onnx.load_model_from_string(f.getvalue())
self._optimized(model, onnxoptimizer.get_fuse_and_elimination_passes(), fixed_point=True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "46319943/arcpy_demo",
"score": 2
} |
#### File: 46319943/arcpy_demo/csv_poi_to_clipped_shp.py
```python
import os
from os import listdir, makedirs
from os.path import join, basename, splitext, isfile, exists
import glob
import arcpy
import xlrd
# ArcPy工作路径,之后所有的路径都是这个路径的相对路径
WORKSPACE = r'D:\Document\ArcMapDemo\data00_416after'
# arcpy.env.workspace = WORKSPACE
# 行政区划目录
DISTRICT_FOLDER = 'China'
# CSV文件目录
CSV_FOLDER = ['RentPrice_Jan', 'ResoldPrice_Jan']
# POI文件目录
POI_FOLDER = 'POI'
# 临时文件目录
TEMP = 'temp'
# 创建CSV临时目录
for temp in [join(WORKSPACE, TEMP, folder) for folder in CSV_FOLDER]:
if not exists(temp):
makedirs(temp)
# 创建POI临时目录
for temp in [join(WORKSPACE, TEMP, POI_FOLDER, folder) for folder in listdir(join(WORKSPACE, POI_FOLDER))]:
if not exists(temp):
makedirs(temp)
# 对应X Y坐标字段名称
X_FIELD = 'Lon84'
Y_FIELD = 'Lat84'
X_FIELD_POI = '经度_wgs84'
Y_FIELD_POI = '纬度_wgs84'
# 获取所有SHP文件名及对应路径{Beijing: '...path...'}
feature_paths = {splitext(basename(filepath))[0].strip(): filepath
for filepath in glob.glob(join(WORKSPACE, DISTRICT_FOLDER, '*.shp'))}
# 创建WGS84坐标系对象
spatial_ref = arcpy.SpatialReference(4326)
def clip_csv(restart=False):
for folder in CSV_FOLDER:
temp_path = join(WORKSPACE, TEMP, folder)
for filepath in glob.glob(join(WORKSPACE, folder, '*.csv')):
filename = splitext(basename(filepath))[0]
output_filepath = join(temp_path, filename + '.shp')
print output_filepath
if exists(output_filepath):
if not restart:
print 'exist'
continue
arcpy.MakeXYEventLayer_management(
filepath, X_FIELD, Y_FIELD, filename + 'Event', spatial_ref)
arcpy.Delete_management(join(temp_path, filename + '.shp'))
arcpy.Clip_analysis(
filename + 'Event', feature_paths[filename], join(temp_path, filename + '.shp'))
arcpy.Delete_management(filename + 'Event')
# 直接保存展点数据的三种方法。后两种在10.3及以下版本存在BUG
# arcpy.FeatureToPoint_management(
# filename + 'Event', join(temp_path, filename + '.shp'))
# arcpy.DeleteField_management(
# join(temp_path, filename + '.shp'), 'ORIG_FID')
# arcpy.FeatureClassToFeatureClass_conversion(filename + 'Event', join(WORKSPACE, TEMP), filename)
# arcpy.CopyFeatures_management(filename + 'Event', join(WORKSPACE, TEMP, filename))
def clip_poi(restart=False):
for city in listdir(join(WORKSPACE, POI_FOLDER)):
temp_path = join(WORKSPACE, TEMP, POI_FOLDER, city)
for filepath in glob.glob(join(WORKSPACE, POI_FOLDER, city, '*.xlsx')):
filename = splitext(basename(filepath))[0]
output_filepath = join(temp_path, filename + '.shp')
print output_filepath
if exists(output_filepath):
if not restart:
print 'exist'
continue
sheet_name = ExcelHasRow(filepath)
if not sheet_name:
print 'null row, skip this file'
continue
arcpy.MakeXYEventLayer_management(
filepath + '/' + sheet_name + '$', X_FIELD_POI, Y_FIELD_POI, filename + 'Event', spatial_ref)
arcpy.Delete_management(join(temp_path, filename + '.shp'))
# TODO: 裁剪之后,有的POI会被全部裁剪掉,生成的SHP文件中不存在要素,是否保留?
arcpy.Clip_analysis(
filename + 'Event', feature_paths[city], join(temp_path, filename + '.shp'))
arcpy.Delete_management(filename + 'Event')
def ExcelHasRow(filepath):
workxls = xlrd.open_workbook(filepath)
sheet_name = workxls.sheet_names()[0]
worksheet = workxls.sheet_by_name(sheet_name)
if worksheet.nrows > 1:
return sheet_name
else:
return False
if __name__ == "__main__":
# clip_csv()
clip_poi()
``` |
{
"source": "46319943/baidu_poi",
"score": 3
} |
#### File: 46319943/baidu_poi/region_search.py
```python
import geopandas as gpd
import pandas as pd
import difflib
from shapely.geometry.base import BaseGeometry
df_all = pd.read_json('https://geo.datav.aliyun.com/areas_v2/bound/all.json')
def get_region_gdf(region_name) -> gpd.GeoDataFrame:
match_list = difflib.get_close_matches(region_name, df_all['name'], n=1)
if len(match_list) == 0:
raise Exception('无法根据名称寻找到匹配的区域')
region_name = match_list[0]
region_adcode = df_all[df_all['name'] == region_name]['adcode'].values[0]
return gpd.read_file(f'https://geo.datav.aliyun.com/areas_v2/bound/{region_adcode}.json')
def get_reigon_geometry(region_name) -> BaseGeometry:
gdf = get_region_gdf(region_name)
return gdf.geometry.values[0]
if __name__ == '__main__':
get_reigon_geometry('武汉')
``` |
{
"source": "46319943/BusinessDistrict",
"score": 3
} |
#### File: 46319943/BusinessDistrict/geocode.py
```python
import pandas as pd
import json
from slab.geocode.geocode_async import geocode_dataframe_async
import asyncio
def main():
with open('business_district.json', encoding='UTF-8') as f:
bd_json = json.load(f)
result_list = list()
for province_object in bd_json:
province_name = province_object['name']
province_city_list = province_object['cities']
for city_object in province_city_list:
city_name = city_object['name']
city_county_list = city_object['counties']
for county_object in city_county_list:
county_name = county_object['name']
county_circle_list = county_object['circles']
for circle_object in county_circle_list:
circle_name = circle_object['name']
if '其他' in circle_name:
continue
result_list.append({
'province': province_name,
'city': city_name,
'county': county_name,
'circle': circle_name
})
df = pd.DataFrame(result_list)
df.to_csv('business_district.csv', index=False, encoding='UTF-8')
df = asyncio.run(geocode_dataframe_async(
df, '', ['province', 'city', 'county', 'circle']))
df.to_csv('geocode_result.csv', index=False, encoding='UTF-8')
main()
``` |
{
"source": "46319943/SLan-NLP",
"score": 3
} |
#### File: SLan-NLP/Archive/dynasty_stat.py
```python
import pandas as pd
from dianping_nlp import *
from slab.pickle_util import pickle_to_file, unpickle_from_file
from dynasty_extract import *
dynasty_ordered_list = ['现代', '清朝', '明朝', '元朝', '宋朝', '唐朝', '隋朝',
'魏晋南北朝', '三国', '汉代', '秦代', '春秋战国', '西周', '商代', '夏代', '黄帝', ]
def df_to_dummies(df: pd.DataFrame) -> pd.DataFrame:
dynasty_str_list = []
for loc_list in df['line_time_result'].values:
dynasty_str_list.append(
','.join(dynasty_extract(loc_list))
)
df['dynasty_str'] = dynasty_str_list
df_dummies = pd.concat([df, df['dynasty_str'].str.get_dummies(sep=',')], axis='columns')
return df_dummies
def dummies_word_count(df_dummies: pd.DataFrame, dynasty_name: str = '现代'):
return pd.Series(
[word
for line_result in df_dummies[df_dummies[dynasty_name] == 1]['line_result'].values
for word, tag in line_result
if len(word) > 1
]
).value_counts()
def dynasty_lda(df_dummies: pd.DataFrame, dynasty_name: str = '现代', topic_num=None):
words_ls = [
[
word for word, tag in line_result if len(word) > 1
] for line_result in df_dummies[df_dummies[dynasty_name] == 1]['line_result'].values
]
dictionary, corpus = dict_corpus_comment(
words_ls
)
if topic_num is None:
topic_num_list = range(3, 33, 2)
lda_coherence_list, coherence, lda, topic_num = lda_topic_sensitivity(corpus, dictionary, words_ls,
topic_num_list=topic_num_list)
# topic_num = topic_num_list[np.argmax(lda_coherence_list)]
pickle_to_file(list(zip(topic_num_list, lda_coherence_list)), f'lda_coherence_{dynasty_name}.pkl')
else:
lda, coherence = lda_coherence(corpus, dictionary, topic_num, words_ls)
lda.save(f'lda_{dynasty_name}_{topic_num}.model')
return lda, corpus, dictionary
def dynasty_dtm(df_dummies: pd.DataFrame, topic_num: int = None):
# 根据时间节点分割文档
word_slice_num = []
word_piece_total = []
for dynasty_name in dynasty_ordered_list[::-1]:
word_piece = [
[
word for word, tag in line_result if len(word) > 1
] for line_result in df_dummies[df_dummies[dynasty_name] == 1]['line_result'].values
]
word_slice_num.append(len(word_piece))
word_piece_total.extend(word_piece)
dictionary, corpus = dict_corpus_comment(word_piece_total)
# 计算最佳主题数量
if topic_num is None:
topic_num_list = range(2, 123, 5)
lda_coherence_list, max_coherence, max_lda, max_num = lda_topic_sensitivity(corpus, dictionary,
word_piece_total,
topic_num_list=topic_num_list)
topic_num = topic_num_list[np.argmax(lda_coherence_list)]
pickle_to_file(list(zip(topic_num_list, lda_coherence_list)), f'coherence_{"全部朝代时间序列"}.pkl')
# 训练模型
dtm_model = DtmModel('dtm-win64.exe', corpus, word_slice_num, num_topics=topic_num,
id2word=dictionary, initialize_lda=True,
lda_sequence_min_iter=30, lda_sequence_max_iter=100,
lda_max_em_iter=50
)
dtm_model.save(f'dtm_{"全部朝代时间序列"}_{topic_num}.model')
# 得到各文本对应主题
topic_index_list = np.argmax(dtm_model.gamma_, axis=1)
for index, dynasty_name in enumerate(dynasty_ordered_list[::-1]):
slice_num = word_slice_num[index]
df_dummies.loc[df_dummies[dynasty_name] == 1, dynasty_name + 'topic_index'] = topic_index_list[0:slice_num]
topic_index_list = topic_index_list[slice_num:]
pickle_to_file(df_dummies, f'df_{"全部朝代时间序列"}_{topic_num}.pkl')
if __name__ == '__main__':
df = pd.DataFrame(unpickle_from_file('df.pkl'))
df_dummies = df_to_dummies(df)
# for dynasty_name in dynasty_ordered_list:
# dynasty_lda(df_dummies, dynasty_name)
# dynasty_lda(df_dummies, '秦代', 4)
dynasty_dtm(df_dummies, )
print()
```
#### File: lib/slab_nlp/base.py
```python
from matplotlib import pyplot as plt
from pathlib import Path
import numpy as np
import pandas as pd
from slab_utils.pickle_util import pickle_to_file, unpickle_from_file
# 设置画图支持中文的字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams.update({'font.size': 22})
from gensim import corpora
def word_segment_list_to_dictionary_corpus(word_segment_list):
'''
根据词语列表生成字典与向量语料库
:param words_ls:
:return:
'''
# 字典
dictionary = corpora.Dictionary(word_segment_list)
# 通过字典转为向量语料库
corpus = [dictionary.doc2bow(word_segment) for word_segment in word_segment_list]
return dictionary, corpus
```
#### File: lib/slab_nlp/topic_bert.py
```python
from bertopic import BERTopic
from sentence_transformers import SentenceTransformer
from umap import UMAP
from hdbscan import HDBSCAN
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
import pkuseg
import networkx as nx
from .base import *
from typing import List
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
return linkage_matrix
class BertTopicSLab(BERTopic):
def __init__(self, docs: List[str]):
# 初始化各部分模型参数
self.docs = docs
self.segment_model = pkuseg.pkuseg(postag=True)
self.sentence_model = SentenceTransformer("stsb-xlm-r-multilingual", device="cpu")
self.umap_model = UMAP(n_neighbors=15, n_components=10, min_dist=0.0, metric='cosine')
self.hdbscan_model = HDBSCAN(min_cluster_size=5, metric='euclidean', cluster_selection_method='eom',
prediction_data=True)
self.vectorizer_model = CountVectorizer(tokenizer=lambda text: [
word for word, tag in self.segment_model.cut(text) if len(word) > 1
], token_pattern=None)
# 调用父类构造函数
super(BertTopicSLab, self).__init__(
embedding_model=self.sentence_model,
# umap_model=umap_model,
# hdbscan_model=hdbscan_model,
vectorizer_model=self.vectorizer_model,
)
# sentence to vector and reduce dimension
self.sentence_embeddings = self.sentence_model.encode(self.docs)
self.umap_embeddings = UMAP(n_neighbors=15,
n_components=5,
min_dist=0.0,
metric='cosine').fit(self.sentence_embeddings).transform(self.sentence_embeddings)
def hierarchical_model(self):
model = AgglomerativeClustering(distance_threshold=0, n_clusters=None).fit(self.umap_embeddings)
self.hierarchical_linkage_matrix = plot_dendrogram(model, truncate_mode='level', p=3)
plt.savefig("hierarchical.png", format="PNG")
self.hierarchical_distance = model.distances_
return
def hierarchical_compare(self, distance1, distance2, sample_size=5):
distance_min = min(distance1, distance2)
distance_max = max(distance1, distance2)
# smaller distance for more cluster
model_large = AgglomerativeClustering(distance_threshold=distance_min, n_clusters=None)
model_large.fit(self.umap_embeddings)
# larger distancce for less cluster
model_small = AgglomerativeClustering(distance_threshold=distance_max, n_clusters=None)
model_small.fit(self.umap_embeddings)
df = pd.DataFrame({'Document': self.docs, 'Topic': model_large.labels_})
self._update_topic_size(df)
self._extract_topics(df)
self.get_topic_info()
for cluster_index in range(model_small.n_clusters_):
mapping_from_index_list = np.unique(
model_large.labels_[model_small.labels_ == cluster_index]
)
if len(mapping_from_index_list) > 1:
for mapping_from_index in mapping_from_index_list:
mapping_from_count = np.count_nonzero(model_large.labels_ == mapping_from_index)
mapping_from_docs = np.array(self.docs)[model_large.labels_ == mapping_from_index]
mapping_from_docs_chioce = np.random.choice(mapping_from_docs, sample_size)
print(f'from cluster {mapping_from_index}({mapping_from_count}):\n')
print(''.join(mapping_from_docs_chioce))
print(self.get_topic(mapping_from_index))
print()
mapping_to_count = np.count_nonzero(model_small.labels_ == cluster_index)
print(f'to cluster {cluster_index}({mapping_to_count})')
df = pd.DataFrame({'Document': self.docs, 'Topic': model_small.labels_})
self._update_topic_size(df)
self._extract_topics(df)
print(self.get_topic(cluster_index))
# print(
# f'{", ".join([str(mapping_from_index) + "(" + str(np.count_nonzero(model_large.labels_ == mapping_from_index)) + ")" for mapping_from_index in mapping_from_index_list])} -> {cluster_index}'
# )
def model(self):
umap_embeddings = self.umap_embeddings
topic_model = self
docs = self.docs
docs = [doc for doc in docs if len(doc) > 10]
topics, _ = topic_model.fit_transform(docs, self.sentence_embeddings)
topic_model.visualize_topics().write_html("bert_topic_vis.html")
df = pd.DataFrame(
{'text': docs, 'topic': topics}
)
df = df.reset_index()
G = nx.Graph()
G.add_nodes_from(df.index.tolist())
distance_matrix = pairwise_distances(umap_embeddings, metric='minkowski', p=2)
for row in range(distance_matrix.shape[0]):
for column in range(distance_matrix.shape[1]):
if row >= column:
continue
distance = distance_matrix[row, column]
if topics[row] == -1 or topics[column] == -1:
continue
if topics[row] == topics[column]:
continue
if distance < 0.1:
G.add_edge(row, column, weight=distance)
print(f'add edge {row} {column}')
from pyvis.network import Network
net = Network(notebook=True)
net.from_nx(G)
net.show('net.html')
print()
```
#### File: lib/slab_nlp/topic_dtm.py
```python
from gensim.models.wrappers.dtmmodel import DtmModel
from .segmentation import *
from .base import *
from .topic_lda import LdaModelSLab
import math
class DtmlModelSLab():
def __init__(self,
namespace: str,
docs: List[str],
time_slice: List[int]):
self.namespace = namespace
Path(namespace).mkdir(exist_ok=True, parents=True)
self.docs = docs
self.time_slice = time_slice
self.dictionary = None
self.corpus = None
self.topic_num = None
self.topic_index_list = None
self.dtm_model = None
def model(self,
topic_num_best: int = None,
topic_num_list: List[int] = range(2, 22, 2)):
docs = self.docs
time_slice = self.time_slice
pkuseg = PKUSegment()
docs_segmented = list()
word_segment_list = list()
tag_segment_list = list()
time_slice_segmented = list()
time_doc_count_accumulate = 0
for time_doc_count in time_slice:
doc_list_part, word_segment_list_part, tag_segment_list_part = pkuseg.segment_docs(
docs[time_doc_count_accumulate: time_doc_count_accumulate + time_doc_count],
include_tag_list=['a', 'ad', 'j', 'l', 'n', 'ns', 'nt', 'nz', 'v', 'vd', 'vn'],
min_length=2
)
docs_segmented.extend(doc_list_part)
word_segment_list.extend(word_segment_list_part)
tag_segment_list.extend(tag_segment_list_part)
time_slice_segmented.append(len(word_segment_list_part))
time_doc_count_accumulate += time_doc_count
dictionary, corpus = word_segment_list_to_dictionary_corpus(word_segment_list)
self.dictionary = dictionary
self.corpus = corpus
self.word_segment_list = word_segment_list
self.tag_segment_list = tag_segment_list
self.docs = docs_segmented
lda_model = LdaModelSLab('中共', docs_segmented)
lda_model.word_segment_list = word_segment_list
lda_model.corpus = corpus
lda_model.dictionary = dictionary
# 计算最佳主题数量
if topic_num_best is None:
coherence_list, coherence_best, model_best, topic_num_best = lda_model.select_best_topic_num(topic_num_list)
# 训练模型
self.dtm_model = DtmModel('dtm-win64.exe', corpus, time_slice_segmented, num_topics=topic_num_best,
id2word=dictionary, initialize_lda=True,
lda_sequence_min_iter=30, lda_sequence_max_iter=100,
lda_max_em_iter=50
)
# 得到各文本对应主题
self.topic_index_list = np.argmax(self.dtm_model.gamma_, axis=1)
self.topic_num = topic_num_best
df = pd.DataFrame({'doc': docs_segmented, 'topic': self.topic_index_list})
self.df = df
return df
def save(self):
pickle_to_file(self, f'{self.namespace}/dtm_slab.pkl')
# self.dtm_model.save(f'{self.namespace}/dtm_{self.topic_num}.model')
# pickle_to_file(self.docs, f'{self.namespace}/docs.pkl')
# pickle_to_file(self.df, f'{self.namespace}/dtm_df.pkl')
@classmethod
def load(cls, namespace: str):
# docs = unpickle_from_file(f'{namespace}/docs.pkl')
# instance = cls(namespace, docs)
# instance.df = unpickle_from_file(f'{namespace}/dtm_df.pkl')
instance = unpickle_from_file(f'{namespace}/dtm_slab.pkl')
return instance
def dtm_draw_topic(self, topic_index: int, time_num: int = None, topn=10):
# 自动判断时间数量
if time_num is None:
time_num = 0
while True:
try:
self.dtm_model.show_topic(topic_index, time_num, topn)
time_num += 1
except:
break
x = range(time_num)
# 统计所有时间的关键词
word_set = set()
for time_index in range(time_num):
for prob, word in self.dtm_model.show_topic(topic_index, time_index, topn):
word_set.add(word)
word_stat = {word: [] for word in word_set}
# 在各个时间下,根据关键词获取频率
max_prob = 0
for time_index in range(time_num):
word_dict = {word: prob for prob, word in self.dtm_model.show_topic(topic_index, time_index, topn)}
for word in word_set:
if word in word_dict:
word_stat[word].append(word_dict[word])
if word_dict[word] > max_prob:
max_prob = word_dict[word]
else:
word_stat[word].append(0)
# 统计当前主题文档数量
current_topic_doc_num = pd.Series(np.argmax(self.dtm_model.gamma_, axis=1)).value_counts().sort_index()[
topic_index]
total_doc_num = len(np.argmax(self.dtm_model.gamma_, axis=1))
# 画图
subplot_num = len(word_stat)
subplot_col = 4
subplot_row = math.ceil(float(subplot_num) / subplot_col)
plt.figure(figsize=(4 * subplot_col, 4 * subplot_row))
plt.suptitle(
f'主题ID:{topic_index},共{self.dtm_model.num_topics}个主题,当前主题文本数量:{current_topic_doc_num}/{total_doc_num}')
for word_index, (word, prob_list) in enumerate(word_stat.items()):
plt.subplot(subplot_row, subplot_col, word_index + 1)
plt.plot(x, prob_list, label=word)
plt.xticks([*range(0, x[-1], 2), x[-1]])
plt.ylim(0, max_prob)
plt.legend()
plt.show()
plt.savefig(f'{self.namespace}/dtm_topic{topic_index}.png')
def print_topic_all_time_slice(self, topic_index, topn=10):
time_index = 0
while True:
try:
msg = self.dtm_model.print_topic(topic_index, time_index, topn)
print(msg)
except:
return
time_index += 1
``` |
{
"source": "463758947/github",
"score": 2
} |
#### File: unit/check/go.py
```python
import os
import subprocess
def check_go(current_dir, temp_dir, test_dir):
if not os.path.exists(temp_dir + '/go'):
os.mkdir(temp_dir + '/go')
env = os.environ.copy()
env['GOPATH'] = current_dir + '/build/go'
env['GO111MODULE'] = 'auto'
try:
process = subprocess.run(
[
'go',
'build',
'-o',
temp_dir + '/go/app',
test_dir + '/go/empty/app.go',
],
env=env,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
if process.returncode == 0:
return True
except KeyboardInterrupt:
raise
except subprocess.CalledProcessError:
return None
``` |
{
"source": "465583030/tf_yolo_v3",
"score": 2
} |
#### File: 465583030/tf_yolo_v3/yolow_ncs.py
```python
import os, sys, logging
from argparse import ArgumentParser
from openvino.inference_engine import IEPlugin, IENetwork
from utils import *
from predict import *
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO, stream=sys.stdout)
log=logging.getLogger()
class YolowNCS(object):
_ANCHORS = anchors_for_yolov3()
def __init__(self, model_name=None, num_requests=2):
if model_name is None:
model_name = 'ir/frozen_yolow'
self.model=model_name + '.xml'
self.weights=model_name + '.bin'
self.plugin=IEPlugin(device='MYRIAD')
log.info('Loading network files:\n\t{}\n\t{}'.format(self.model, self.weights))
self.net=IENetwork(model=self.model, weights=self.weights)
log.info('Preparing inputs')
self.input_blob=next(iter(self.net.inputs))
self.net.batch_size=1
log.info('Loading model to the plugin')
self.current_request_id = 0
self.next_request_id = 1
self.num_requests = num_requests
self.exec_net=self.plugin.load(network=self.net, num_requests=self.num_requests)
def predict(self, input_list, confidence_theshold=.6, iou_theshould=.5, async_mode=False):
batch_predictions = []
get_from = 0
input_size = input_list.shape[2]
input_dict = {self.input_blob: input_list}
request_handle = self.exec_net.requests[self.current_request_id]
if async_mode:
next_request_id = self.current_request_id + 1
if next_request_id == self.num_requests:
next_request_id = 0
else:
request_handle.wait()
next_request_id = self.current_request_id
self.exec_net.start_async(request_id=next_request_id,
inputs=input_dict)
if async_mode:
self.current_request_id = next_request_id
request_handle.wait()
pred_dict = request_handle.outputs
for preds in pred_dict.values():
preds = np.transpose(preds, [0, 2, 3, 1])
get_to = get_from + 3
batch_predictions.append(region_np(preds, self._ANCHORS[get_from:get_to], input_size))
get_from = get_to
batch_predictions = np.concatenate(batch_predictions, axis=1)
return predict(batch_predictions, confidence_theshold, iou_theshould)
# def predict(self, input_list, confidence_theshold=.6, iou_theshould=.5, async_mode=False):
# batch_predictions = []
# get_from = 0
# input_size = input_list.shape[2]
# input_dict = {self.input_blob: input_list}
# request_handle = self.exec_net.requests[self.current_request_id]
# if async_mode:
# request_id = self.next_request_id
# else:
# request_handle.wait()
# request_id = self.current_request_id
# self.exec_net.start_async(request_id=request_id,
# inputs=input_dict)
# if async_mode:
# self.current_request_id, self.next_request_id = self.next_request_id, self.current_request_id
# request_handle.wait()
# pred_dict = request_handle.outputs
# for preds in pred_dict.values():
# preds = np.transpose(preds, [0, 2, 3, 1])
# get_to = get_from + 3
# batch_predictions.append(region_np(preds, self._ANCHORS[get_from:get_to], input_size))
# get_from = get_to
# batch_predictions = np.concatenate(batch_predictions, axis=1)
# return predict(batch_predictions, confidence_theshold, iou_theshould)
``` |
{
"source": "465b/General-Ecosystem-Modeling-Framework",
"score": 2
} |
#### File: General-Ecosystem-Modeling-Framework/tests/test_import.py
```python
import nemf
import pickle
def test_import(model_minimal_yml,model_minimal_pkl):
assert model_minimal_yml.compartment == model_minimal_pkl.compartment
assert model_minimal_yml.interactions == model_minimal_pkl.interactions
assert model_minimal_yml.configuration == model_minimal_pkl.configuration
``` |
{
"source": "46elks/elks-cli",
"score": 2
} |
#### File: elks-cli/elks/helpers.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import elkme.config
import elkme.elks
import json
import sys
import os
import requests
try: # Py3
from urllib.parse import urlencode
except ImportError: # Py2
from urllib import urlencode
from argparse import ArgumentParser
from datetime import datetime, timedelta
months = ['now', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep',
'oct', 'nov', 'dec']
years = range(2011, datetime.now().year+1)
_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
_TIME_FORMAT_SEC = '%Y-%m-%dT%H:%M:%S'
_TIME_FORMAT_MIN = '%Y-%m-%dT%H:%M:%S'
timeformat = lambda t: t.strftime(_TIME_FORMAT)
def pretty_time(s):
try:
return datetime.strptime(s, _TIME_FORMAT).strftime(_TIME_FORMAT_MIN)
except ValueError:
return datetime.strptime(s, _TIME_FORMAT_SEC).strftime(_TIME_FORMAT_MIN)
def get_auth(args):
""" Get the elkme 46elks authentication details in a requests
friendly format """
conf = read_conf(args)
auth = (conf.get('username'), conf.get('password'))
if args.subaccount:
elksconn = elkme.elks.Elks(auth, api_url = get_api_url(args))
url = '/Subaccounts/%s' % args.subaccount
subaccount = elksconn.query_api(endpoint=url)
subaccount = json.loads(subaccount)
auth = (args.subaccount, subaccount['secret'])
return auth
def get_api_url(args):
""" Read the config and look for custom api_url in the config file """
return read_conf(args).get('api_url')
def read_conf(args):
""" Read the config file specified in the arguments or the default
config file for the `elkme` application into a dictionary
"""
if args.configfile:
conffile = os.path.expanduser(args.configfile)
else:
conffile = elkme.config.default_config_location()
conf = elkme.config.read_config(conffile)
return conf
def open_elksconn(args):
""" Create a connection class to 46elks and return it """
return elkme.elks.Elks(get_auth(args), api_url = get_api_url(args))
def elks_download_media(args, endpoint):
elksconn = open_elksconn(args)
url = elksconn.api_url % endpoint
res = requests.get(
url,
auth = get_auth(args)
)
return res.content
def elks_store_media(args, endpoint, destination):
print('[Downloading...]')
image = elks_download_media(args, endpoint)
with open(destination, 'wb') as f:
f.write(image)
print('[Downloaded]')
def elksapi(args, endpoint, query = {}, data = None):
""" Access a specific endpoint for the 46elks API in a
object format. Supports fetching everything between
two dates and any number 1-10 000 elements"""
elksconn = open_elksconn(args)
try:
if args.limit:
query['limit'] = args.limit
elif args.month or args.year:
query['end'], query['start'] = format_date(args)
except AttributeError:
pass
if query.keys():
url = '%s?%s' % (endpoint, urlencode(query))
else:
url = endpoint
if data and args.donotpost:
raise Exception('Attempted POST request with donotpost flag. Aborting')
rv = elksconn.query_api(endpoint=url, data = data)
rv = json.loads(rv)
response = rv
if 'data' in rv:
rv['data'] = reversed(rv['data'])
if not 'limit' in query and 'end' in query:
rv['data'] = list(rv['data'])
while ('next' in response and
response['next'] < query.get('start',
timeformat(datetime.now()))):
query['start'] = response['next']
url = '%s?%s' % (endpoint, urlencode(query))
response = json.loads(elksconn.query_api(endpoint=url))
for item in response['data']:
rv['data'].insert(0, item)
return rv
def format_date(args):
""" Read the list of arguments and creates the date range for the
specified month/year
"""
date = datetime.now()
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if args.year:
date = date.replace(year=args.year)
if not args.month:
end = date.replace(month = 1, day = 1)
start = date.replace(
month = 12,
day = 31,
hour = 23,
minute = 59,
second = 59,
microsecond = 999999)
if args.month:
if args.month == months[0]: # 'now'
date = date.replace(day = 1)
else:
new_month = months.index(args.month)
# If month hasn't started yet this year, assume last year
if new_month > date.month and not args.year:
date = date.replace(year = date.year - 1)
date = date.replace(month=new_month, day = 1)
end = date
# Fetch last second of month
start = date.replace(month=date.month % 12 + 1) + timedelta(seconds=-1)
# Iff december, previous line causes year to be last year
if date.month == 12:
start = start.replace(year=start.year + 1)
return (timeformat(end), timeformat(start))
def parser_inject_generics(parser):
parser.add_argument('--limit', type=int,
help='Set maximum number of items to fetch')
parser.add_argument('--month', choices=months,
help='Examine objects for a specific month')
parser.add_argument('--year', choices=years, type=int,
help='Examine objects for a specific year')
def input_yes_no(question, default=False):
hint = '[Y/n]' if default else '[y/N]'
answer = input('{} {} '.format(question, hint))
answer = answer.strip()
if answer == '':
return default
else:
if answer.lower().startswith('y'):
return True
else:
return False
```
#### File: elks-cli/elks/__main__.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import sys
import argparse
import importlib
import elks.mods
import elks.__init__
import signal
VERSION = elks.__init__.__version__
modules = sorted([
'billing',
'images',
'calls',
'numbers',
'recordings',
'setup',
'sms',
'status',
'subaccounts',
'transactions'
])
modules_help = """\
Communication
numbers Manage your 46elks numbers
sms List and compose SMS
calls List and make voice calls
Media
recordings List and listen to recordings
images List and display image attachments
Account management
billing See the billing history of your 46elks account
subaccounts Manage your 46elks subaccounts
status Information about your 46elks account (including balance)
transactions See your transaction (payment) history
"""
def main(argv):
global modules
parser = argparse.ArgumentParser(prog='elks',
description=modules_help,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--version', action='store_true',
help='Display elks version')
parser.add_argument('-p', '--pretty', action='store_true',
help='Print human friendly numbers')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print detailed information')
parser.add_argument('-c', '--config', dest='configfile',
help='Location of elks/elkme conffile'),
parser.add_argument('-a', '--subaccount',
help='Subaccount to use for the request')
parser.add_argument('--donotpost', action='store_true',
help='Will try to not do anything costly with your account')
subparsers = parser.add_subparsers(help='Commands',
dest='subparser_name')
for module in modules:
mod = importlib.import_module('.%s' % module, 'elks.mods')
try:
mod.parse_arguments(subparsers.add_parser(module))
except NotImplementedError as e:
print(e)
print('\nThat must be why we\'re not shipping elks yet')
print('You\'ve reached a feature which isn\'t implemented yet!')
args = parser.parse_args(argv)
if args.version:
version()
exit(0)
if args.subparser_name in modules:
mod = importlib.import_module('.%s' % args.subparser_name, 'elks.mods')
try:
mod.main(args)
except NotImplementedError as e:
print(e)
print('\nThat must be why we\'re not shipping elks yet')
print('You\'ve reached a feature which isn\'t implemented yet!')
except Exception as e:
exctype, value = sys.exc_info()[:2]
arguments = ''
if len(sys.argv) > 1:
arguments = map(lambda x: '"%s"' % x if ' ' in x else x,
sys.argv[1:])
arguments = ' '.join(arguments)
print('--8>-------------------------------------------------8<--',
file=sys.stderr)
print('Called %s with arguments `elks %s`' % (mod.__name__,
arguments),
file=sys.stderr)
print('%s:' % exctype.__name__, value, file=sys.stderr)
print(('==========\nThis is a bug in elks. Please report at '
'https://github.com/46elks/elks/issues\n'
'or directly to <EMAIL>'), file=sys.stderr)
print('--8>-------------------------------------------------8<--',
file=sys.stderr)
else:
parser.print_help()
sys.exit(2)
def version():
print('elks command line intermooface v%s' % VERSION)
print('2016-2017 46elks AB <hello<EMAIL>>')
def run():
argv = sys.argv[1:]
main(argv)
ctrlc = False
def ctrlc_handler(signal, frame):
global ctrlc
if ctrlc:
print('')
sys.exit(0)
else:
ctrlc = True
print('\nctrl+c detected, press again to force quit', file=sys.stderr)
if __name__ == '__main__':
signal.signal(signal.SIGINT, ctrlc_handler)
run()
```
#### File: elks/mods/numbers.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import argparse
from elks.helpers import elksapi, input_yes_no
import json
import readline
from time import sleep
import elkme.elks
def main(args):
if args.new:
allocate_numbers(args)
# raise NotImplementedError('Cannot allocate number at this point')
return
numbers = filter_numbers(args)
if not numbers:
return
if args.sms_url or args.mms_url or args.voice:
if not args.number:
print('Must select a specific number to update')
return
update_number(args, numbers[0])
elif args.deactivate and args.number:
for number in numbers:
deactivate(args, number)
elif args.deactivate:
print('Select a specific number to deactivate')
return
else:
numberinfo(args, numbers)
def allocate_numbers(args):
request = {}
capabilities = []
if args.number:
request['number'] = args.number
if args.country:
request['country'] = args.country
else:
print('Please enter the desired 2 letter country-code for the country')
print('where you wish to allocate your number.')
request['country'] = input('Country (2 letter CC) > ')
opt_sms = input_yes_no('Do you need SMS support?', True)
opt_mms = input_yes_no('Do you need MMS support?', False)
opt_voice = input_yes_no('Do you need voice support?', True)
if opt_sms:
capabilities.append('sms')
if opt_mms:
capabilities.append('mms')
if opt_voice:
capabilities.append('voice')
request['capabilities'] = ','.join(capabilities)
json_response = elksapi(args, 'numbers', data = request)
numberinfo(args, [json_response])
def filter_numbers(args):
numbers = elksapi(args, 'numbers')
numbers = numbers['data']
number_is_active = lambda n: not n.get('active', 'no') == 'no'
if args.number:
num = args.number
if ',' in args.number:
num = args.number.split(',')
num_filter = lambda n: (n['number'] in num or n['id'] in num)
numbers = list(filter(num_filter, numbers))
if len(numbers) > 0 and numbers[0]['id'] == args.number:
args.all = True # If matched by id, show even when deactivated
if args.country:
numbers = list(filter(lambda n: (n['country'].lower() ==
args.country.lower()), numbers))
if not args.all and not args.inactive:
numbers = list(filter(number_is_active, numbers))
if args.inactive:
numbers = list(filter(lambda n: not number_is_active(n), numbers))
if not numbers:
print('No numbers found.')
if not args.all:
print('Try again with `--inactive` to show inactive numbers')
return
return numbers
def update_number(args, number):
update = {}
if args.sms_url:
update['sms_url'] = args.sms_url
if args.mms_url:
update['mms_url'] = args.mms_url
if args.voice:
update['voice_start'] = args.voice
try:
response = elksapi(args,
endpoint='numbers/%s' % number['id'],
data=update)
except:
print('Something went wrong')
return
print('Updated %s' % number['number'])
def deactivate(args, number):
try:
response = elksapi(args,
endpoint='numbers/%s' % number['id'],
data={'active': 'no'})
except:
print('Something went wrong')
return
print('Deactivated number %s' % number['number'])
def numberinfo(args, numbers):
for number in numbers:
print(number['number'])
if not args.summary:
print('\tIdentifier: %s' % number.get('id'))
print('\tAllocated: %s' % number.get('allocated'))
if 'deallocated' in number:
print('\tDeallocated: %s' % number['deallocated'])
print('\tCapabilities: %s' % ", ".join(
number.get('capabilities', ['None'])
))
print('\tCountry: %s' % number.get('country', 'None').upper())
print('\tActive: %s' % number.get('active', 'Unknown'))
if 'capabilities' in number:
if 'sms' in number['capabilities']:
print('\tSMS URL: %s' % number.get('sms_url'))
if 'mms' in number['capabilities']:
print('\tMMS URL: %s' % number.get('mms_url'))
if 'voice' in number['capabilities']:
print('\tVoice start: %s' % number.get('voice_start'))
def parse_arguments(parser):
parser.add_argument('-a', '--all', action='store_true',
help='Show all numbers, even deactivated')
parser.add_argument('--inactive', action='store_true',
help='Show deactivated numbers only')
parser.add_argument('-s', '--summary', action='store_true',
help='Show only number')
parser.add_argument('number', nargs='?',
help='Select a specific number or number id')
parser.add_argument('--sms_url', '--sms',
help='Try to set a new SMS URL for the number')
parser.add_argument('--mms_url', '--mms',
help='Try to set a new MMS URL for the number')
parser.add_argument('--voice', '--voice_start',
help='Try to set an action to perform on voice start')
parser.add_argument('--deactivate', action='store_true',
help='Deactivate the choosen number [WARNING. DESTRUCTIVE]')
parser.add_argument('--country', metavar='CC',
help='Limit selection to country with country code CC')
parser.add_argument('--new', action='store_true',
help='Try to activate a new number')
```
#### File: mods/sms/__init__.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import importlib
submodules = ['list', 'new']
def parse_arguments(parser):
parser.description = (
'Send and receive SMS'
)
subparsers = parser.add_subparsers(help='Sub-commands',
dest='sms_subparser')
for module in submodules:
mod = importlib.import_module('.%s' % module, 'elks.mods.sms')
mod.parse_arguments(subparsers.add_parser(module))
def main(args):
if args.sms_subparser in submodules:
mod = importlib.import_module('.%s' % args.sms_subparser,
'elks.mods.sms')
mod.main(args)
else:
print (
'The SMS Module\n\n',
'Sub-commands:\n',
'- new\n',
'- list\n\n',
'Handle SMS on your 46elks account'
)
```
#### File: elks/mods/status.py
```python
from __future__ import print_function
from elks.helpers import elksapi
from elks.formatting import kv_print, credits_to_currency
def main(args):
response = elksapi(args, 'me')
if not args.subaccount:
print_user(response)
else:
from elks.__main__ import main as entrypoint
command = ['-a', args.subaccount, 'subaccounts', args.subaccount]
if args.pretty:
command.insert(0, '-p')
entrypoint(command)
def parse_arguments(parser):
pass
def print_user(user):
print(user['displayname'], '|', user['email'], '|', user['mobilenumber'])
kv_print('Id:', user['id'], indentlevel=0)
if 'currency' in user:
kv_print('Credits:',
credits_to_currency(user.get('balance', 0), user['currency']),
indentlevel=0
)
kv_print('Cost Type:', user.get('costtype'))
if 'creditlimit' in user:
kv_print('Creditlimit:',
credits_to_currency(user.get('creditlimit', 0),
user['currency']),
)
if 'creditalert' in user:
kv_print('Credit Alert level:',
credits_to_currency(user.get('creditalert', 0),
user['currency']),
)
kv_print('Invoicing:', user.get('invoicing'))
else:
print('Currency not set, please contact <EMAIL> to get started')
kv_print('Restricted:', user.get('restricted'))
``` |
{
"source": "46graus/pagarme-python",
"score": 2
} |
#### File: 46graus/pagarme-python/setup.py
```python
import os
import re
from setuptools import setup, find_packages
__description__ = 'Pagar.me Python'
__long_description__ = 'Python library for Pagar.me API'
__author__ = '<NAME>, <NAME>'
__author_email__ = '<EMAIL>'
__special_things__ = '<NAME>, <NAME>'
testing_extras = [
'pytest',
'pytest-cov',
]
def _find_version():
filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'pagarme/sdk.py'
)
with open(filename) as f:
data = f.read()
match = re.search(r"VERSION = '(.+)'", data)
return match.groups()[0]
__version__ = _find_version()
install_requires = open('requirements.txt').read().strip().split('\n')
setup(
name='pagarme-python',
version=__version__,
author=__author__,
author_email=__author_email__,
packages=find_packages(),
license='MIT',
description=__description__,
long_description=__long_description__,
special_things=__special_things__,
url='https://github.com/pagarme/pagarme-python',
keywords='Payment, pagarme',
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Environment :: Web Environment',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest'],
extras_require={
'testing': testing_extras,
},
)
``` |
{
"source": "46zhang/SqlBuilder",
"score": 3
} |
#### File: SqlBuilder/sqlbuilder/builder.py
```python
import typing
class OPERATION:
OP_EQ = "="
OP_NE = "!="
OP_GE = ">="
OP_GT = ">"
OP_LE = "<="
OP_LT = "<"
OP_IN = "in"
OP_NOT_IN = "not in"
OP_LIKE = "like"
OP_NOT_LIKE = "not like"
OP_PREFIX = "prefix"
OP_NOT_PREFIX = "not prefix"
OP_SUFFIX = "suffix"
OP_NOT_SUFFIX = "not suffix"
OP_AND = "and"
OP_OR = "or"
OP_JOIN = "join"
OP_INNER_JOIN = "inner join"
OP_LEFT_JOIN = "left join"
OP_RIGHT_JOIN = "right join"
class Condition:
"""
Condition类,是条件的基本类,主要有三种类别
Condition(key,value):表示 key=value
Condition(key,value,ops):表示 key ops value ops可以为 OPERATION的前10种操作
Condition(key,value,ops,join_table):表示 ops(['join','left join','right join']) join_table on key=value
"""
def __init__(self, key: str, value: str or int, ops: str = None, join_table=None):
self.key = key
self.value = value
self.ops = '='
self.join_table = None
if ops:
self.ops = ops
if join_table:
self.join_table = join_table
def to_string(self) -> str:
if not self.join_table:
if isinstance(self.value, str):
return "{} {} '{}'".format(self.key, self.ops, self.value)
else:
return "{} {} {}".format(self.key, self.ops, self.value)
# 如果存在join_table 说明是join条件的,那么ops是['join','left_join',...]
else:
return "{} {} on {} = {}".format(self.ops, self.join_table, self.key, self.value)
class SqlBuilder(object):
def __init__(self, table):
self.table = table
self.sql = ""
self.join_parameter = []
self.condition_and_parameter = []
self.condition_or_parameter = []
self.group_by_parameter = []
self.asc_parameter = []
self.desc_parameter = []
def join(self, table_list: typing.List[typing.Tuple[str, str, str]] = None) -> object:
if table_list:
for t in table_list:
c = Condition(join_table=t[0], key=t[1], value=t[2], ops=OPERATION.OP_JOIN)
self.join_parameter.append(c.to_string())
return self
def where(self,
keys: typing.List = None,
values: typing.List = None,
conditions: typing.List[typing.Tuple[str, str, str or int]] = None,
ops: str = None
) -> object:
if conditions:
# 默认是条件and
if not ops or ops == OPERATION.OP_AND:
# 遍历,然后添加字符串到condition_parameter列表中
for c in conditions:
self.condition_and_parameter.append(Condition(c[0], c[2], c[1]).to_string())
# 添加到或的条件中
elif ops == OPERATION.OP_OR:
for c in conditions:
self.condition_or_parameter.append(Condition(c[0], c[2], c[1]).to_string())
# 如果参数是以俩个列表直接传入
if keys and values:
if not ops or ops == OPERATION.OP_AND:
for k, v in zip(keys, values):
self.condition_and_parameter.append(Condition(k, OPERATION.OP_AND, v).to_string())
if ops == OPERATION.OP_OR:
for k, v in zip(keys, values):
self.condition_or_parameter.append(Condition(k, OPERATION.OP_OR, v).to_string())
return self
def group_by(self, columns: typing.List[str]):
self.group_by_parameter.append(",".join(columns))
def asc(self, column: typing.List[str] = None) -> object:
if column:
self.asc_parameter.append(",".join(column))
return self
def desc(self, column: typing.List[str] = None) -> object:
if column:
self.desc_parameter.append(",".join(column))
return self
def build(self) -> str:
return self.sql
class Select(SqlBuilder):
def __init__(self, table, column: str = None):
super().__init__(table)
self.colunm = "*"
if column:
self.colunm = column
def build(self) -> str:
self.sql = "SELECT {} FROM {} ".format(self.colunm, self.table)
# 判断是否存在表连接操作
if self.join_parameter:
self.sql += " ".join(self.join_parameter)
"""
添加条件,存在4种情况需要处理
1.同时存在or与 and条件 ,那么在使用"."拼接的时候会漏掉最开始的or,要再拼接第一个or
2.只存在and
3.只存在or ,2,3都只需要直接使用join函数将列表转为字符串即可
4.不存在and与or条件,不做处理
"""
if self.condition_and_parameter and self.condition_or_parameter:
self.sql += " WHERE {} ".format(
" and ".join(self.condition_and_parameter) + " or " + " or ".join(self.condition_or_parameter))
elif self.condition_and_parameter:
self.sql += " WHERE {} ".format(" and ".join(self.condition_and_parameter))
elif self.condition_or_parameter:
self.sql += " WHERE {} ".format(" or ".join(self.condition_or_parameter))
if self.group_by_parameter:
self.sql += " group by {}".format(" ".join(self.group_by_parameter))
if self.asc_parameter:
self.sql += " order by {} ".format("".join(self.asc_parameter))
if self.desc_parameter:
self.sql += " order by {} desc ".format("".join(self.desc_parameter))
self.sql += ";"
return self.sql
class Update(SqlBuilder):
def __init__(self, table, key_value: typing.List[typing.Tuple[str, str]]):
super().__init__(table)
self.key_value = [Condition(kv[0], kv[1]) for kv in key_value]
def build(self) -> str:
self.sql = "UPDATE {} SET {}".format(self.table, ",".join([i.to_string() for i in self.key_value]))
"""
添加条件,存在4种情况需要处理
1.同时存在or与 and条件 ,那么在使用"."拼接的时候会漏掉最开始的or,要再拼接第一个or
2.只存在and
3.只存在or ,2,3都只需要直接使用join函数将列表转为字符串即可
4.不存在and与or条件,不做处理
"""
if self.condition_and_parameter and self.condition_or_parameter:
self.sql += " WHERE {} ".format(
" and ".join(self.condition_and_parameter) + " or " + " or ".join(self.condition_or_parameter))
elif self.condition_and_parameter:
self.sql += " WHERE {} ".format(" and ".join(self.condition_and_parameter))
elif self.condition_or_parameter:
self.sql += " WHERE {} ".format(" or ".join(self.condition_or_parameter))
self.sql += ";"
return self.sql
class Delete(SqlBuilder):
def __init__(self, table):
super().__init__(table)
def build(self) -> str:
self.sql = "DELETE FROM {} ".format(self.table)
"""
添加条件,存在4种情况需要处理
1.同时存在or与 and条件 ,那么在使用"."拼接的时候会漏掉最开始的or,要再拼接第一个or
2.只存在and
3.只存在or ,2,3都只需要直接使用join函数将列表转为字符串即可
4.不存在and与or条件,不做处理
"""
if self.condition_and_parameter and self.condition_or_parameter:
self.sql += " WHERE {} ".format(
" and ".join(self.condition_and_parameter) + " or " + " or ".join(self.condition_or_parameter))
elif self.condition_and_parameter:
self.sql += " WHERE {} ".format(" and ".join(self.condition_and_parameter))
elif self.condition_or_parameter:
self.sql += " WHERE {} ".format(" or ".join(self.condition_or_parameter))
self.sql += ";"
return self.sql
class Insert(SqlBuilder):
def __init__(self, table, columns: typing.List[str], values: typing.List):
super().__init__(table)
self.columns = columns
self.values = []
for v in values:
# 如果是字符串,那么value值应该加单引号
if isinstance(v, str):
self.values.append("'" + v + "'")
# 数字需要转为字符串方便后面拼接,但是不用加单引号
else:
self.values.append(str(v))
def build(self) -> str:
self.sql = "INSERT INTO {} ({}) value ({})".format(self.table, " , ".join(self.columns),
" , ".join(self.values))
self.sql += ";"
return self.sql
```
#### File: SqlBuilder/sqlorm/field.py
```python
class Field(object):
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchar(256)'):
super().__init__(name, ddl, primary_key, default)
class BooleanField(Field):
def __init__(self, name=None, default=False):
super().__init__(name, 'boolean', False, default)
class IntegerField(Field):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class FloatField(Field):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'real', primary_key, default)
class TextField(Field):
def __init__(self, name=None, default=None):
super().__init__(name, 'text', False, default)
``` |
{
"source": "47045039/deval",
"score": 2
} |
#### File: ios/utils/instruct_helper.py
```python
import subprocess
import traceback
import time
import sys
import random
from deval.device.std.error import DevalError
from deval.utils.snippet import reg_cleanup, on_method_ready, get_std_encoding
from deval.utils.logger import get_logger
from deval.utils.retry import retries
LOGGING = get_logger(__name__)
class InstructHelper(object):
"""
ForwardHelper class
or help run other Instruction
"""
proxy_process = 'iproxy'
def __init__(self):
self.subprocessHandle = []
reg_cleanup(self.teardown)
@on_method_ready('start')
def get_ready(self):
pass
def teardown(self):
# stop all process started by self
for sub_proc in self.subprocessHandle:
sub_proc.kill()
# this function auto gen local port
@retries(3)
def setup_proxy(self, remote_port):
local_port = random.randint(11111, 20000)
self.do_proxy(local_port, remote_port)
return local_port, remote_port
def do_proxy(self, local_port, remote_port):
"""
Start do proxy of ios device and self device
Returns:
None
"""
cmds = [self.proxy_process, str(local_port), str(remote_port)]
proc = subprocess.Popen(
cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# something like port binding fail
time.sleep(0.5)
if proc.poll() is not None:
stdout, stderr = proc.communicate()
stdout = stdout.decode(get_std_encoding(sys.stdout))
stderr = stderr.decode(get_std_encoding(sys.stderr))
raise DevalError((stdout, stderr))
self.subprocessHandle.append(proc)
if __name__ == '__main__':
ins = InstructHelper()
ins.do_proxy(8100, 8100)
```
#### File: component/linux/screen.py
```python
from Xlib import display, X
from PIL import Image
from deval.component.std.screen import ScreenComponent
from deval.utils.cv import imwrite
from deval.utils.cv import pil_2_cv2
class LinuxScreenComponent(ScreenComponent):
def __init__(self, name):
self._name = name
def snapshot(self, filename="tmp.png"):
w, h = self.get_current_resolution()
dsp = display.Display()
root = dsp.screen().root
raw = root.get_image(0, 0, w, h, X.ZPixmap, 0xffffffff)
image = Image.frombytes("RGB", (w, h), raw.data, "raw", "BGRX")
image = pil_2_cv2(image)
if filename:
imwrite(filename, image)
return image
def get_current_resolution(self):
d = display.Display()
screen = d.screen()
w, h = (screen["width_in_pixels"], screen["height_in_pixels"])
return w, h
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
```
#### File: component/mac/input.py
```python
import Quartz
import time
from mss import mss
from deval.component.std.input import InputComponent
class MacInputComponent(InputComponent):
def __init__(self, name):
self._name = name
self.screen = mss() # 双屏需要
self.monitor = self.screen.monitors[0]
self.singlemonitor = self.screen.monitors[1]
def click(self, pos, duration=0.05, button='left'):
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
if button not in ("left", "right"):
raise ValueError("Unknow button: " + button)
if button == 'left':
button = 1
elif button == 'right':
button = 2
pos = list(pos)
pos[0] = pos[0] + self.monitor["left"]
pos[1] = pos[1] + self.monitor["top"]
theEvent = Quartz.CGEventCreateMouseEvent(
None, pressID[button], (pos[0], pos[1]), button - 1) # 按下消息
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent) # 发送消息
Quartz.CGEventSetType(theEvent, releaseID[button]) # 抬起消息
time.sleep(duration)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent) # 发送消息
def swipe(self, p1, p2, duration=0.5, steps=5, fingers=1, button='left'):
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
if button is "middle":
button = 3
elif button is "right":
button = 2
elif button is "left":
button = 1
else:
raise ValueError("Unknow button: " + button)
x1, y1 = p1
x2, y2 = p2
x1 = x1 + self.monitor["left"]
x2 = x2 + self.monitor["left"]
y1 = y1 + self.monitor["top"]
y2 = y2 + self.monitor["top"]
ratio_x = self.monitor["width"] / self.singlemonitor["width"]
ratio_y = self.monitor["height"] / self.singlemonitor["height"]
x2 = x1 + (x2 - x1) / ratio_x
y2 = y1 + (y2 - y1) / ratio_y
sx = abs(x1 - x2)
sy = abs(y1 - y2)
stepx = sx / (duration * 10.0) # 将滑动距离分割,实现平滑的拖动
stepy = sy / (duration * 10.0)
moveevent = Quartz.CGEventCreateMouseEvent(
None, Quartz.kCGEventMouseMoved, (x1, y1), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, moveevent)
pressevent = Quartz.CGEventCreateMouseEvent(
None, pressID[button], (x1, y1), button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, pressevent)
duration = int(duration * 10.0)
for i in range(duration + 1):
drag = Quartz.CGEventCreateMouseEvent(
None, Quartz.kCGEventLeftMouseDragged, (x1 + stepx * i, y1 + stepy * i), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
time.sleep(0.1)
event = Quartz.CGEventCreateMouseEvent(
None, releaseID[button], (x2, y2), button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def double_tap(self, pos, button='left'):
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
if button not in ("left", "right"):
raise ValueError("Unknow button: " + button)
pos = list(pos)
pos[0] = pos[0] + self.monitor["left"]
pos[1] = pos[1] + self.monitor["top"]
if button == 'left':
button = 1
else:
button = 2
theEvent = Quartz.CGEventCreateMouseEvent(
None, pressID[button], (pos[0], pos[1]), button - 1)
Quartz.CGEventSetIntegerValueField(
theEvent, Quartz.kCGMouseEventClickState, 2)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, releaseID[button])
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, pressID[button])
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
Quartz.CGEventSetType(theEvent, releaseID[button])
Quartz.CGEventPost(Quartz.kCGHIDEventTap, theEvent)
def scroll(self, pos, direction="vertical", duration=0.5, steps=5):
if direction not in ('vertical', 'horizontal'):
raise ValueError(
'Argument `direction` should be one of "vertical" or "horizontal". Got {}'.format(repr(direction)))
pos = list(pos)
pos[0] = pos[0] + self.monitor["left"]
pos[1] = pos[1] + self.monitor["top"]
moveevent = Quartz.CGEventCreateMouseEvent(
None, Quartz.kCGEventMouseMoved, (pos[0], pos[1]), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, moveevent)
if direction == 'horizontal':
interval = float(duration) / (abs(steps) + 1)
if steps < 0:
for i in range(0, abs(steps)):
time.sleep(interval)
self._scroll(None, 1)
else:
for i in range(0, abs(steps)):
time.sleep(interval)
self._scroll(None, -1)
else:
interval = float(duration) / (abs(steps) + 1)
if steps < 0:
for i in range(0, abs(steps)):
time.sleep(interval)
self._scroll(1)
else:
for i in range(0, abs(steps)):
time.sleep(interval)
self._scroll(-1)
def _scroll(self, vertical=None, horizontal=None, depth=None):
# Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
# Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
```
#### File: component/mac/keyevent.py
```python
import time
from pynput.keyboard import Controller
from deval.component.std.keyevent import KeyEventComponent
class MacKeyEventComponent(KeyEventComponent):
def __init__(self, name):
self._name = name
self.keyboard = Controller()
def keyevent(self, keyname):
"""
Use pynput to simulate keyboard input
Parameters:
keyname - the keys
"""
waittime = 0.05
for c in keyname:
self.keyboard.press(key=c)
self.keyboard.release(key=c)
time.sleep(waittime)
def text(self, text, enter=False):
"""
Use pynput to simulate keyboard input
Parameters:
keyname - the keys
"""
waittime = 0.05
for c in text:
self.keyboard.press(key=c)
self.keyboard.release(key=c)
time.sleep(waittime)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
```
#### File: component/std/keyevent.py
```python
from deval.component.std.component import Component
class KeyEventComponent(Component):
def keyevent(self, keyname):
"""
If the target device is a PC, perform keyboard operations.
Otherwise, perform the action you specified, such as 'Home'.
Parameters:
keyname - a string refer to the keys.
"""
raise NotImplementedError
def text(self, text, enter=True):
"""
If the target device is a PC, perform keyboard operations.
Otherwise, type some text.
Parameters:
text - a string refer to the text.
enter - Whether to enter the Enter key.
"""
raise NotImplementedError
def __call__(self, keyname):
return self.keyevent(keyname)
```
#### File: device/ios/ios.py
```python
from deval.device.std.device import DeviceBase
from deval.component.ios.app import IOSAppComponent
from deval.component.ios.network import IOSNetworkComponent
from deval.component.ios.input import IOSInputComponent
from deval.component.ios.keyevent import IOSKeyEventComponent
from deval.component.ios.screen import IOSScreenComponent
from deval.component.ios.statue import IOSStatueComponent
from deval.component.ios.utils.iosfuncs import IOSProxy, check_platform_ios
class IOSDevice(DeviceBase):
def __init__(self, uri):
super(IOSDevice, self).__init__(uri)
kw = check_platform_ios(uri)
self.iosproxy = IOSProxy(**kw)
self.add_component(IOSAppComponent("app", self, uri))
self.add_component(IOSNetworkComponent("network", self, uri))
self.add_component(IOSInputComponent("input", self, uri))
self.add_component(IOSKeyEventComponent("keyevent", self, uri))
self.add_component(IOSScreenComponent("screen", self, uri))
self.add_component(IOSStatueComponent("statue", self, uri))
@property
def uuid(self):
try:
return self.iosproxy.addr
except AttributeError:
self.iosproxy = IOSProxy(
**check_platform_ios(self.uri))
return self.iosproxy.addr
```
#### File: device/std/error.py
```python
class BaseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class DevalError(BaseError):
"""
This is Deval BaseError
"""
pass
class TargetNotFoundError(DevalError):
"""
This is TargetNotFoundError BaseError
When something is not found
"""
pass
class ScriptParamError(DevalError):
"""
This is ScriptParamError BaseError
When something goes wrong
"""
pass
class DeviceConnectionError(BaseError):
"""
device connection error
"""
DEVICE_CONNECTION_ERROR = r"error:\s*((device \'\w+\' not found)|(cannot connect to daemon at [\w\:\s\.]+ Connection timed out))"
pass
class PerformanceError(BaseError):
pass
```
#### File: deval/utils/compat.py
```python
import sys
from six import PY3
if PY3:
def decode_path(path):
return path
else:
def decode_path(path):
return path.decode(sys.getfilesystemencoding()) if path else path
```
#### File: deval/utils/parse.py
```python
from six.moves.urllib.parse import parse_qsl, urlparse
def parse_uri(uri):
"""
Parse the uri and return a dictionary containing the various parameters contained in the uri.
Parameters:
uri - an URI where to connect to device, e.g. `android://adbhost:adbport/serialno?param=value¶m2=value2`
Example:
* ``android:///`` # local adb device using default params
* ``android://adbhost:adbport/1234566?cap_method=javacap&touch_method=adb`` # remote device using custom params
* ``windows:///`` # local Windows application
* ``ios:///`` # iOS device
* ``linux:///`` # Linux device
* ``mac:///`` # Mac device
Returns:
A dictionary containing the various parameters contained in the uri.
"""
d = urlparse(uri)
platform = d.scheme
host = d.netloc
uuid = d.path.lstrip("/")
params = dict(parse_qsl(d.query))
if host:
params["host"] = host.split(":")
params["platform"] = platform.lower()
params["uuid"] = uuid
return params
``` |
{
"source": "471VE/Pre-trained-Faster-Grad-CAM-demo",
"score": 3
} |
#### File: src/PreTrainedFasterGradCAMDemo/demo.py
```python
import glob
import os
import os.path as osp
import cv2
import joblib
import numpy as np
from tensorflow.lite.python.interpreter import Interpreter
model_path = osp.abspath(osp.dirname(__file__)) + "/model/"
if os.path.exists(model_path):
# load csv
print("csv loading...")
channel_weight = np.loadtxt(model_path + "channel_weight.csv", delimiter=",")
channel_adress = np.loadtxt(model_path + "channel_adress.csv", delimiter=",", dtype="float")
channel_adress = channel_adress.astype(int)
vector_pa = np.loadtxt(model_path + "vector_pa.csv", delimiter=",")
kmeans = joblib.load(model_path + "k-means.pkl.cmp")
else:
raise Exception("The path to the model weights does not exist.")
path_to_images = osp.abspath(osp.dirname(__file__)) + "/hand_images/"
if os.path.exists(path_to_images):
image_names = glob.glob(f"{path_to_images}*.jpg") + glob.glob(f"{path_to_images}*.png")
else:
print("The path to the directory with the images of hands does not exist.")
def get_score_arc(pa_vector, test):
# cosine similarity
cos_similarity = cosine_similarity(test, pa_vector)
return np.max(cos_similarity)
def cosine_similarity(x1, x2):
if x1.ndim == 1:
x1 = x1[np.newaxis]
if x2.ndim == 1:
x2 = x2[np.newaxis]
x1_norm = np.linalg.norm(x1, axis=1)
x2_norm = np.linalg.norm(x2, axis=1)
cosine_sim = np.dot(x1, x2.T) / (x1_norm * x2_norm + 1e-10)
return cosine_sim
def predict_faster_gradcam(channel, vector, img, kmeans, channel_weight, channel_adress):
channel_out = channel[0]
# k-means and heat_map
cluster_no = kmeans.predict(vector)
cam = np.dot(channel_out[:, :, channel_adress[cluster_no[0]]], channel_weight[cluster_no[0]])
# nomalize
cam = cv2.resize(cam, (img.shape[1], img.shape[0]), cv2.INTER_LINEAR)
cam = np.maximum(cam, 0)
cam = cam / cam.max()
return cam
def get_x_y_limit(heatmap, thresh):
map_ = np.where(heatmap > thresh)
x_max = np.max(map_[1])
x_min = np.min(map_[1])
y_max = np.max(map_[0])
y_min = np.min(map_[0])
x_max = int(x_max)
x_min = int(x_min)
y_max = int(y_max)
y_min = int(y_min)
return x_min, y_min, x_max, y_max
def bounding_box(img, x_min, y_min, x_max, y_max):
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), (0, 255, 0), 5)
def load_images(image_names):
return [cv2.imread(image_name) for image_name in image_names]
def main(images=load_images(image_names), show_image=True):
input_size = 96
hand_thresh = 0.25
OD_thresh = 0.8
message1 = "Push [q] to go to the next image or to quit."
message2 = "Push [s] to change mode."
like_OD = False # like object detection
interpreter = Interpreter(model_path=model_path + "weights_weight_quant.tflite")
try:
interpreter.set_num_threads(4)
except Exception:
pass
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
hand_positions = []
processed_images = []
for image in images:
img = cv2.resize(image, (input_size, input_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
interpreter.set_tensor(input_details[0]["index"], img)
interpreter.invoke()
channel_out = interpreter.get_tensor(output_details[0]["index"])
test_vector = interpreter.get_tensor(output_details[1]["index"])
score = get_score_arc(vector_pa, test_vector)
def show_info(show_image):
new_image = image.copy()
if score < hand_thresh: # hand is closed
hand = "Closed"
color = (255, 0, 0)
heatmap = predict_faster_gradcam(
channel_out, test_vector, new_image, kmeans, channel_weight, channel_adress
)
if like_OD:
x_min, y_min, x_max, y_max = get_x_y_limit(heatmap, OD_thresh)
bounding_box(new_image, x_min, y_min, x_max, y_max)
else:
heatmap = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
new_image = np.copy(cv2.addWeighted(heatmap, 0.5, new_image, 0.5, 2.2))
else: # hand is open
hand = "Open"
color = (0, 0, 255)
# message
if show_image:
cv2.putText(
new_image,
f"{hand}, score: {score:.1f}",
(15, 80),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
1,
cv2.LINE_AA,
)
cv2.putText(
new_image,
message1,
(15, 25),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 0),
1,
cv2.LINE_AA,
)
cv2.putText(
new_image,
message2,
(15, 45),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 0),
1,
cv2.LINE_AA,
)
else:
cv2.putText(
new_image,
f"{hand}, score: {score:.1f}",
(15, 35),
cv2.FONT_HERSHEY_SIMPLEX,
1,
color,
1,
cv2.LINE_AA,
)
# display the image
return hand, new_image
if show_image:
# quit or change mode
while True:
hand, image_to_show = show_info()
cv2.imshow("Result", image_to_show)
key = cv2.waitKey(10) & 0xFF
if key == ord("q"):
hand_positions.append(hand)
processed_images.append(image_to_show)
break
elif key == ord("s"):
if like_OD:
like_OD = False
else:
like_OD = True
cv2.destroyAllWindows()
else:
hand, first_image_to_show = show_info(show_image)
like_OD = not like_OD
_, second_image_to_show = show_info(show_image)
hand_positions.append(hand)
processed_images.append((first_image_to_show, second_image_to_show))
return hand_positions, processed_images
if __name__ == "__main__":
main()
```
#### File: Pre-trained-Faster-Grad-CAM-demo/tests/test_demo.py
```python
import os.path as osp
import numpy as np
import pytest
from PreTrainedFasterGradCAMDemo import demo
def prepare_images(list_of_short_names):
path_to_images = osp.abspath(osp.dirname(__file__)) + "/hand_images/"
images_for_testing = [path_to_images + image for image in list_of_short_names]
return demo.load_images(images_for_testing)
# Test for whether the processing of images returns the same result
def test_regression():
for image in prepare_images(
["fist.jpg", "open_palm.jpg", "large_image.jpg", "small_image.jpg"]
):
first_result, first_image = demo.main([image], show_image=False)
second_result, second_image = demo.main([image], show_image=False)
# To reverse the channels of the second image, uncomment the line below:
# second_image = second_image[:, :, ::-1]
assert (
np.array_equal(first_image[0][0], second_image[0][0])
& np.array_equal(first_image[0][1], second_image[0][1])
& (first_result == second_result)
)
# Test for whether the processing of hand-picked images returns the true result
def test_correctness_of_results():
true_positions = ["Closed", "Open"]
predicted_positions, _ = demo.main(
prepare_images(["fist.jpg", "open_palm.jpg"]), show_image=False
)
assert true_positions == predicted_positions
# Tests for whether program runs without errors on large and on a small image
@pytest.mark.parametrize("image_name", ["large_image.jpg", "small_image.jpg"])
def test_for_errors_on_images_of_different_sizes(image_name):
demo.main(prepare_images([image_name]), show_image=False)
``` |
{
"source": "471VE/VideoFaceId",
"score": 3
} |
#### File: VideoFaceId/utilities/trainingsetextractor.py
```python
import cv2
from glob import glob
from os.path import isdir
from os import makedirs
from sys import argv
class IncorrectNumberOfArguments(Exception):
pass
class NoDirectory(Exception):
pass
class UnacceptableFeatureExtractorType(Exception):
pass
def square_params(x_initial, x, y_initial, y):
"""
Calculates square parameters acquired from the mouse movements for rendering the square on the image.
"""
side = abs(y_initial - y)
x_top = round(x - side/2)
x_bottom = round(x + side/2)
y_top = min(y_initial, y)
y_bottom = max(y_initial, y)
return (x_top, y_top), (x_bottom, y_bottom)
def descriptor_filename(image_filename):
"""
Returns path where image will be saved.
"""
path = image_filename.split("\\")
if not isdir(f"{directory}\\descriptors\\SIFT"):
makedirs(f"{directory}\\descriptors\\SIFT")
path[-1] = f"descriptors\\SIFT\\{path[-1][:-4]}_descriptor_SIFT.png"
return "\\".join(path)
def save_descriptor(image_name, face):
"""
Saves the descriptor to ".png" image for easy loading in the main program.
"""
sift = cv2.SIFT_create()
_, descriptors = sift.detectAndCompute(face, None)
cv2.imwrite(descriptor_filename(image_name), descriptors)
if __name__ == "__main__":
if len(argv) != 2:
raise IncorrectNumberOfArguments('Only the path to images must be specified.')
directory = argv[1]
if not isdir(directory):
raise NoDirectory('No such directory exists.')
is_drawing = False
x_initial = -1
y_initial = -1
# Get images present in the directory:
image_names = glob(f'{directory}\\*.jpg') + glob(f'{directory}\\*.png')
for image_name in image_names:
image = cv2.imread(image_name)
# Downscale the image in case it cannot fit the screen:
scale = 1
if image.shape[1] > 1500 or image.shape[0] > 800:
scale_x = 1500 / image.shape[1]
scale_y = 800 / image.shape[0]
scale = min(scale_x, scale_y)
width = int(image.shape[1] * scale)
height = int(image.shape[0] * scale)
image = cv2.resize(image, (width, height))
cache = image.copy()
top_corner = -1
bottom_corner = -1
already_drawn = False
def draw_square(event, x, y, flags, param):
"""
Function that actually draws square.
"""
global is_drawing, x_initial, y_initial
global image, cache, square_parameters
global top_corner, bottom_corner
global already_drawn
if event == cv2.EVENT_LBUTTONDOWN:
if already_drawn:
cv2.putText(image, 'There must be only one face', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
else:
is_drawing = True
x_initial = x
y_initial = y
elif event == cv2.EVENT_LBUTTONUP:
if already_drawn:
cv2.putText(image, 'There must be only one face', (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
else:
is_drawing = False
top_corner, bottom_corner = square_params(x_initial, x, y_initial, y)
cv2.rectangle(image, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
already_drawn = True
elif event == cv2.EVENT_MOUSEMOVE:
if is_drawing:
image = cache.copy()
top_corner, bottom_corner = square_params(x_initial, x, y_initial, y)
cv2.rectangle(image, top_corner, bottom_corner, color=(0, 0, 255), thickness=2)
cv2.namedWindow(image_name)
cv2.setMouseCallback(image_name, draw_square)
while True:
cv2.imshow(image_name, image)
if cv2.waitKey(5) & 0xFF in (13, 32, 27):
break
if already_drawn:
face = cache[top_corner[1]:bottom_corner[1], top_corner[0]:bottom_corner[0], :]
save_descriptor(image_name, face)
cv2.destroyAllWindows()
``` |
{
"source": "473867143/Prometheus",
"score": 2
} |
#### File: multi_map_server/msg/_VerticalOccupancyGridList.py
```python
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class VerticalOccupancyGridList(genpy.Message):
_md5sum = "7ef85cc95b82747f51eb01a16bd7c795"
_type = "multi_map_server/VerticalOccupancyGridList"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 x
float32 y
int32[] upper
int32[] lower
int32[] mass
"""
__slots__ = ['x','y','upper','lower','mass']
_slot_types = ['float32','float32','int32[]','int32[]','int32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,upper,lower,mass
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(VerticalOccupancyGridList, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.upper is None:
self.upper = []
if self.lower is None:
self.lower = []
if self.mass is None:
self.mass = []
else:
self.x = 0.
self.y = 0.
self.upper = []
self.lower = []
self.mass = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.upper))
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.lower))
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.mass))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.upper.tostring())
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.lower.tostring())
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.mass.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2f = struct.Struct("<2f")
```
#### File: quadrotor_msgs/msg/_PPROutputData.py
```python
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class PPROutputData(genpy.Message):
_md5sum = "732c0e3ca36f241464f8c445e78a0d0a"
_type = "quadrotor_msgs/PPROutputData"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
uint16 quad_time
float64 des_thrust
float64 des_roll
float64 des_pitch
float64 des_yaw
float64 est_roll
float64 est_pitch
float64 est_yaw
float64 est_angvel_x
float64 est_angvel_y
float64 est_angvel_z
float64 est_acc_x
float64 est_acc_y
float64 est_acc_z
uint16[4] pwm
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','quad_time','des_thrust','des_roll','des_pitch','des_yaw','est_roll','est_pitch','est_yaw','est_angvel_x','est_angvel_y','est_angvel_z','est_acc_x','est_acc_y','est_acc_z','pwm']
_slot_types = ['std_msgs/Header','uint16','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','uint16[4]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,quad_time,des_thrust,des_roll,des_pitch,des_yaw,est_roll,est_pitch,est_yaw,est_angvel_x,est_angvel_y,est_angvel_z,est_acc_x,est_acc_y,est_acc_z,pwm
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PPROutputData, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.quad_time is None:
self.quad_time = 0
if self.des_thrust is None:
self.des_thrust = 0.
if self.des_roll is None:
self.des_roll = 0.
if self.des_pitch is None:
self.des_pitch = 0.
if self.des_yaw is None:
self.des_yaw = 0.
if self.est_roll is None:
self.est_roll = 0.
if self.est_pitch is None:
self.est_pitch = 0.
if self.est_yaw is None:
self.est_yaw = 0.
if self.est_angvel_x is None:
self.est_angvel_x = 0.
if self.est_angvel_y is None:
self.est_angvel_y = 0.
if self.est_angvel_z is None:
self.est_angvel_z = 0.
if self.est_acc_x is None:
self.est_acc_x = 0.
if self.est_acc_y is None:
self.est_acc_y = 0.
if self.est_acc_z is None:
self.est_acc_z = 0.
if self.pwm is None:
self.pwm = [0,0,0,0]
else:
self.header = std_msgs.msg.Header()
self.quad_time = 0
self.des_thrust = 0.
self.des_roll = 0.
self.des_pitch = 0.
self.des_yaw = 0.
self.est_roll = 0.
self.est_pitch = 0.
self.est_yaw = 0.
self.est_angvel_x = 0.
self.est_angvel_y = 0.
self.est_angvel_z = 0.
self.est_acc_x = 0.
self.est_acc_y = 0.
self.est_acc_z = 0.
self.pwm = [0,0,0,0]
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_H13d.pack(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z))
buff.write(_struct_4H.pack(*self.pwm))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 106
(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z,) = _struct_H13d.unpack(str[start:end])
start = end
end += 8
self.pwm = _struct_4H.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_H13d.pack(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z))
buff.write(self.pwm.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 106
(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z,) = _struct_H13d.unpack(str[start:end])
start = end
end += 8
self.pwm = numpy.frombuffer(str[start:end], dtype=numpy.uint16, count=4)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_H13d = struct.Struct("<H13d")
_struct_3I = struct.Struct("<3I")
_struct_4H = struct.Struct("<4H")
```
#### File: uav_utils/scripts/tf_assist.py
```python
import rospy
import numpy as np
import tf
from tf import transformations as tfs
from math import pi
from nav_msgs.msg import Odometry
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Imu
from sensor_msgs.msg import Joy
imu_pub = None
odom_pub = None
br = None
class OdometryConverter(object):
def __init__(self, frame_id_in_, frame_id_out_, broadcast_tf_, body_frame_id_, intermediate_frame_id_, world_frame_id_):
self.frame_id_in = frame_id_in_
self.frame_id_out = frame_id_out_
self.broadcast_tf = broadcast_tf_
self.body_frame_id = body_frame_id_
self.intermediate_frame_id = intermediate_frame_id_
self.world_frame_id = world_frame_id_
self.in_odom_sub = None
self.out_odom_pub = None
self.out_path_pub = None
self.path_pub_timer = None
self.tf_pub_flag = True
if self.broadcast_tf:
rospy.loginfo('ROSTopic: [%s]->[%s] TF: [%s]-[%s]-[%s]',
self.frame_id_in, self.frame_id_out, self.body_frame_id, self.intermediate_frame_id, self.world_frame_id)
else:
rospy.loginfo('ROSTopic: [%s]->[%s] No TF',
self.frame_id_in, self.frame_id_out)
self.path = []
def in_odom_callback(self, in_odom_msg):
q = np.array([in_odom_msg.pose.pose.orientation.x,
in_odom_msg.pose.pose.orientation.y,
in_odom_msg.pose.pose.orientation.z,
in_odom_msg.pose.pose.orientation.w])
p = np.array([in_odom_msg.pose.pose.position.x,
in_odom_msg.pose.pose.position.y,
in_odom_msg.pose.pose.position.z])
e = tfs.euler_from_quaternion(q, 'rzyx')
wqb = tfs.quaternion_from_euler(e[0], e[1], e[2], 'rzyx')
wqc = tfs.quaternion_from_euler(e[0], 0.0, 0.0, 'rzyx')
#### odom ####
odom_msg = in_odom_msg
assert(in_odom_msg.header.frame_id == self.frame_id_in)
odom_msg.header.frame_id = self.frame_id_out
odom_msg.child_frame_id = ""
self.out_odom_pub.publish(odom_msg)
#### tf ####
if self.broadcast_tf and self.tf_pub_flag:
self.tf_pub_flag = False
if not self.frame_id_in == self.frame_id_out:
br.sendTransform((0.0, 0.0, 0.0),
tfs.quaternion_from_euler(0.0, 0.0, 0.0, 'rzyx'),
odom_msg.header.stamp,
self.frame_id_in,
self.frame_id_out)
if not self.world_frame_id == self.frame_id_out:
br.sendTransform((0.0, 0.0, 0.0),
tfs.quaternion_from_euler(0.0, 0.0, 0.0, 'rzyx'),
odom_msg.header.stamp,
self.world_frame_id,
self.frame_id_out)
br.sendTransform((p[0], p[1], p[2]),
wqb,
odom_msg.header.stamp,
self.body_frame_id,
self.world_frame_id)
br.sendTransform(((p[0], p[1], p[2])),
wqc,
odom_msg.header.stamp,
self.intermediate_frame_id,
self.world_frame_id)
#### path ####
pose = PoseStamped()
pose.header = odom_msg.header
pose.pose.position.x = p[0]
pose.pose.position.y = p[1]
pose.pose.position.z = p[2]
pose.pose.orientation.x = q[0]
pose.pose.orientation.y = q[1]
pose.pose.orientation.z = q[2]
pose.pose.orientation.w = q[3]
self.path.append(pose)
def path_pub_callback(self, event):
if self.path:
path = Path()
path.header = self.path[-1].header
path.poses = self.path[-30000::1]
self.out_path_pub.publish(path)
def tf_pub_callback(self, event):
self.tf_pub_flag = True
if __name__ == "__main__":
rospy.init_node('tf_assist')
converters = []
index = 0
while True:
prefix = "~converter%d/" % index
try:
frame_id_in = rospy.get_param('%sframe_id_in' % prefix)
frame_id_out = rospy.get_param('%sframe_id_out' % prefix)
broadcast_tf = rospy.get_param('%sbroadcast_tf' % prefix, False)
body_frame_id = rospy.get_param('%sbody_frame_id' % prefix, 'body')
intermediate_frame_id = rospy.get_param(
'%sintermediate_frame_id' % prefix, 'intermediate')
world_frame_id = rospy.get_param(
'%sworld_frame_id' % prefix, 'world')
converter = OdometryConverter(
frame_id_in, frame_id_out, broadcast_tf, body_frame_id, intermediate_frame_id, world_frame_id)
converter.in_odom_sub = rospy.Subscriber(
'%sin_odom' % prefix, Odometry, converter.in_odom_callback, tcp_nodelay=True)
converter.out_odom_pub = rospy.Publisher(
'%sout_odom' % prefix, Odometry, queue_size=10, tcp_nodelay=True)
converter.out_path_pub = rospy.Publisher(
'%sout_path' % prefix, Path, queue_size=10)
converter.tf_pub_timer = rospy.Timer(
rospy.Duration(0.1), converter.tf_pub_callback)
converter.path_pub_timer = rospy.Timer(
rospy.Duration(0.5), converter.path_pub_callback)
index += 1
except KeyError, e:
if index == 0:
raise(KeyError(e))
else:
if index == 1:
rospy.loginfo(
'prefix:"%s" not found. Generate %d converter.' % (prefix, index))
else:
rospy.loginfo(
'prefix:"%s" not found. Generate %d converters' % (prefix, index))
break
br = tf.TransformBroadcaster()
rospy.spin()
```
#### File: py_nodes/rtsp_stream/gstreamer_rtsp_stream.py
```python
import rospy
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import yaml
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import GObject, Gst, GstRtspServer
def main(width,height,port,factory_name):
global out_send
out_send = cv2.VideoWriter('appsrc is-live=true ! videoconvert ! \
omxh264enc bitrate=12000000 ! video/x-h264, \
stream-format=byte-stream ! rtph264pay pt=96 ! \
udpsink host=127.0.0.1 port=5400 async=false',
cv2.CAP_GSTREAMER, 0, 30, (width,height), True)
if not out_send.isOpened():
print('VideoWriter not opened')
exit(0)
rtsp_port_num = port
server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)
factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_launch("(udpsrc name=pay0 port=5400 buffer-size=524288 \
caps=\"application/x-rtp, media=video, clock-rate=90000, \
encoding-name=(string)H264, payload=96 \")")
factory.set_shared(True)
server.get_mount_points().add_factory(factory_name, factory)
print("\n *** Launched RTSP Streaming at rtsp://localhost:%d/demo \n" % rtsp_port_num)
def callback(data):
scaling_factor = 0.5
global count,bridge
global out_send
count = count + 1
if count == 1:
count = 0
cv_img = bridge.imgmsg_to_cv2(data, "bgr8")
out_send.write(cv_img)
cv2.imshow("frame" , cv_img)
cv2.waitKey(3)
else:
pass
if __name__ == '__main__':
rospy.init_node('gstreamer_rtsp_stream', anonymous=True)
input_topic = rospy.get_param('~camera_topic', '/prometheus/camera/rgb/image_raw')
config = rospy.get_param('~config_info', 'encode_config.yaml')
yaml_config_fn = config
print('Input config file: {}'.format(config))
yaml_config = yaml.load(open(yaml_config_fn))
image_width=yaml_config['image_width']
image_height=yaml_config['image_height']
rtsp_port=yaml_config['rtsp_port']
factory_name=yaml_config['factory_name']
global count,bridge
global out_send
count = 0
bridge = CvBridge()
main(image_width,image_height,rtsp_port,factory_name)
rospy.Subscriber(input_topic, Image, callback)
rospy.spin()
```
#### File: py_nodes/video_replay/video_replay.py
```python
import sys
import cv2
import os.path
import rospy
from sensor_msgs.msg import Image
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
import signal
rospy.init_node('video_replay', anonymous=True)
def exit(signum, frame):
print('You choose to stop me.')
exit()
def video_replay(input_video_dir, t, pub_topic):
pub = rospy.Publisher(pub_topic, Image, queue_size=10)
rate = rospy.Rate(t)
pathDir = os.listdir(input_video_dir)
signal.signal(signal.SIGINT, exit)
signal.signal(signal.SIGTERM, exit)
for allDir in pathDir:
videopath = os.path.join(input_video_dir, allDir)
cap = cv2.VideoCapture(videopath)
while cap.isOpened():
ret, frame = cap.read()
if ret:
bridge = CvBridge()
msg = bridge.cv2_to_imgmsg(frame, encoding="bgr8")
pub.publish(msg)
else:
rospy.loginfo("video replay failed.")
cv2.destroyAllWindows()
rate.sleep()
if __name__ == '__main__':
input_video_dir = rospy.get_param('~input_video_dir', '/home/nvidia/Prometheus/video')
t = rospy.get_param('~video_rate', 20)
pub_topic = rospy.get_param('~publish_topic', '/prometheus/object_detection/video_replay')
try:
video_replay(input_video_dir, t, pub_topic)
except rospy.ROSInterruptException:
pass
``` |
{
"source": "474416133/pydantic-manager",
"score": 3
} |
#### File: pydantic-manager/pydantic_manager/errors.py
```python
__author__ = "sven"
__email__ = "<EMAIL>"
import enum
from enum import Enum
class BizError(RuntimeError):
"""
业务异常
"""
__slots__ = "error_code", "error_value", "error_remark"
def __init__(self, error_code, error_value, error_remark=None, *args):
self.error_code = error_code
self.error_value, self.error_remark = error_value, error_value
if error_remark:
self.error_remark = error_remark
if args:
self.error_remark = self.error_remark.format(*args)
def __str__(self):
"""
@overide
:return:
"""
return f"error_code={self.error_code}," \
f" error_value={self.error_value}," \
f" error_remark={self.error_remark}"
def as_dict(self):
"""
:return:
"""
return {
"error_code" : self.error_code,
"error_value" :self.error_value,
"error_remark" : self.error_remark
}
class IError(Enum):
"""
异常
"""
def exception(self, error_remark=None, *args):
"""
异常
:param msg:
:param args:
:return: BizError对象
"""
return BizError(self.value, self.name, error_remark, *args)
def reraise(self, error_remark=None, *args):
"""
抛出异常
:param remark:
:param args:
:return:
"""
raise self.exception(error_remark, *args)
@enum.unique
class Errors(IError):
OK = 0
UNKNOWN_ERROR = 9999
# 模型构建错误
MODEL_BUILDING_ERROR = 10000
# 属性命名错误
FIELD_NAMMING_ERROR = 10001
# 属性不允许修改
NOT_ALLOWED_MODIFY_FIELD = 10002
# 属性值设置错误
FIELD_SETTING_ERROR = 10003
# 属性不存在错误
FIELD_NOT_EXIST = 10004
# 复制错误
COPY_ERROR = 10005
# 属性描述错误
DESCRIPTOR_ERROR = 10006
#比较错误
CMP_ERROR = 10007
# 断言错
ASSERT_ERROR = 10999
#调用错误
INVOKE_ERROR = 10998
# 参数错误
PARAMS_ERROR = 11000
# 检验错误
VALIDATE_ERROR = 110001
# 数据已存在
ENTITY_HAD_EXIST = 20000
# 数据不存在
ENTITY_NOT_EXIST = 20001
# 写操作
WRITE_ERROR = 21000
#web
HTTP_ERROR = 30000
#编码错误
PROGRAMMING_ERROR = 90000
class ValidateError(BizError):
def __init__(self, msg):
"""
:param msg:
"""
super().__init__(Errors.VALIDATE_ERROR.value, Errors.VALIDATE_ERROR.name, msg)
def exception(error_remark=None, *args):
"""
异常
:param msg:
:param args:
:return: BizError对象
"""
return Errors.UNKNOWN_ERROR.exception(error_remark)
def reraise(error_remark=None, *args):
"""
抛出异常
:param remark:
:param args:
:return:
:
"""
Errors.UNKNOWN_ERROR.reraise(error_remark)
```
#### File: pydantic-manager/pydantic_manager/managers.py
```python
__author__ = "sven"
__email__ = "<EMAIL>"
from collections import Iterable, defaultdict
from . import logs
from . import errors
from . import utils
from . import signals
from .sql.vendors import pg
from .sql import builder
logger = logs.logger
ERRORS = errors.Errors
IncreasePolicyEnum = builder.IncreasePolicyEnum
class ManagerBase(object):
"""
pydantic 持久化操作工作,包括:
- 插入数据库(单个,批量)
- 删除(单个,批量)
- 更新(单个, 批量)
- 查询(单个,列表)
- 更新或新增
- 获取或新增
- 计数器
- 唯一性
"""
get_model_fields = utils.get_model_fields
get_model_pk = utils.get_pk_and_validate
def __init__(self,
model,
table_name,
sharing_policy=None):
"""
:param model:
:param table_name:
"""
cls_ = self.__class__
self._model = model
self._table_name = table_name
self._sharing_policy = sharing_policy
if self._sharing_policy and not callable(sharing_policy):
raise ERRORS.MODEL_BUILDING_ERROR.exception("sharing_policy MUST BE callable")
self._model_primary_key = cls_.get_model_pk(self._model)
self._model_fields = list(cls_.get_model_fields(self.model))
@property
def table_name(self):
return self._table_name
@property
def model(self):
return self._model
@property
def sharing_policy(self):
return self._sharing_policy
@property
def model_pk(self):
return self._model_primary_key
@property
def model_fields(self):
return self._model_fields
def get_model_field(self, field_name):
"""
获取模型属性
:param field_name:
:return:
"""
return utils.get_model_field(self._model, field_name)
def create_instance_by_record(self, record):
"""
从数据可对象中实例化model
:param record:
:return:
"""
return self._model(**record)
def convert_instance_to_dict(self, model_instance):
"""
model对象转换成dict
:param model_instance:
:return:
"""
return model_instance.dict()
async def create(self,
model_instance,
db=None,
signal=None):
"""
创建
:param model_instance:
:param db:
:return:
"""
raise NotImplementedError
async def bulk_create(self,
model_instances,
db=None,
signal=None):
"""
批量创建
:param model_instances:
:param db:
:return:
"""
raise NotImplementedError
async def get(self,
pk,
fields=None,
db=None):
"""
获取
:param pk:
:param fields:
:param db:
:return:
"""
raise NotImplementedError
async def get_or_create(self,
model_instance,
db=None,
signal=None):
"""
获取某记录,如果不存在,则创建
:param signal:
:param model_instance:
:param db:
:return:
"""
raise NotImplementedError
async def update(self,
model_instance,
update_fields=None,
db=None,
inc_fields=None,
inc_policy=None,
signal=None):
"""
更新
:param model_instance:
:param update_fields: 需要更新大的字段
:param db:
:param inc_fields: 其他属性
:param inc_policy: 自增/自减 ...
:param signal: 信号
:return:
"""
raise NotImplementedError
async def update_or_create(self,
model_instance,
update_fileds=None,
db=None,
signal=None):
"""
更新或者添加
:param model_instance:
:param update_fileds:
:param insert_field:
:param db:
:return:
"""
raise NotImplementedError
async def update_by_PKs(self,
model_instance,
pks=None,
update_fields=None,
db=None,
signal=None):
"""
根据pks更新
:param model_instance:
:param pks:
:param update_fields:
:param db:
:return:
"""
raise NotImplementedError
async def delete(self, pk, db=None, signal=None):
"""
删除
:param pk:
:param db:
:return: 删除数量
"""
raise NotImplementedError
async def delete_by_PKs(self, pks, db=None, signal=None):
"""
根据id列表删除
:param pks:
:param db:
:return:
"""
raise NotImplementedError
async def find(self, sql, params=None, db=None, **kwargs):
"""
查询
:param sql:
:param params:
:param db:
:return: list
"""
raise NotImplementedError
async def find_by_PKs(self, pks, db=None):
"""
根据pk列表查询
:param pks:
:param db:
:return:
"""
raise NotImplementedError
async def is_existed(self, instance, db=None):
"""
对象是否存在
:param instance:
:param db:
:return:
"""
raise NotImplementedError
async def get_by_field(self,
field_name,
field_value,
selected_fields=None,
db=None):
"""
:param field_name:
:param db:
:return:
"""
raise NotImplementedError
async def create_table(self, ignore_existed=False):
"""
:param ignore_existed:
:return:
"""
raise NotImplementedError
async def is_unique(self, model_instance, field_name, *, db=None):
"""
是否唯一
:param model_instance:
:param field_name:
:param db:
:return:
"""
raise NotImplementedError
async def validate_unique(self, model_instance, field_name, *, db=None):
"""
:param model_instance:
:param field_name:
:param db:
:return:
"""
if not self.is_unique(model_instance, field_name, db=db):
raise ERRORS.ValidateError(
"{field_name}值{field_value}不唯一".format(field_name=field_name,
field_value=model_instance.get(field_name))
)
async def count(self, *args, db=None):
"""
计数器
:param args:
:param db:
:return:
"""
raise NotImplemented
def parse_insert_ret(ret):
_, _, count = ret.split()
return count
def parse_update_ret(ret):
"""
解析更新结果
:param ret:
:return:
"""
_, count = ret.split()
return int(count)
parse_delete_ret = parse_update_ret
class PSQLManager(ManagerBase):
"""
postgre数据库基本操作
"""
default_sql_builder_cls = pg.PsqlBuilder
def __init__(self,
model,
table_name,
sharing_policy=None,
sql_builder_cls=None):
"""
:param table_name: (str), 表名
"""
super().__init__(model, table_name, sharing_policy)
sql_builder_cls = sql_builder_cls or self.default_sql_builder_cls
self.sql_builder = sql_builder_cls(self)
async def create(self, model_instance, db=None, signal=None):
"""
插入
:param model_instance:
:param db:
:return:
"""
pk = model_instance[self.model_pk.name]
sql = self.sql_builder.build_insert_sql(pk)
ret = await db.execute(sql, *[model_instance.get(field.name) for field in self.model_fields])
_, _, count = ret.split()
if count != "1":
raise ERRORS.ENTITY_HAD_EXIST.exception(
"table {}: id={} had exist".format(self.table_name, model_instance[self.model_pk.name]))
logger.debug("[INSERT] SQL:%s ; data:%s " % (sql, model_instance))
return pk
async def bulk_create(self, model_instances, db=None, signal=None):
"""
批量插入
:param model_instances:
:param db:
:return:
"""
pk0 = model_instances[0][self.model_pk.name]
sql = self.sql_builder.build_insert_sql(pk0)
ret = await db.executemany(sql, [[data_instance.get(field.name) for field in self.model_fields] for
data_instance in model_instances])
logger.debug("[INSERT] SQL:{}, PARAMS:{}, RET:{}".format(sql, model_instances, ret))
return len(model_instances)
async def get_raw(self, pk, fields=None, db=None):
"""
根据pk获取记录
:param pk:
:param db:
:return:
"""
_sql = self.sql_builder.build_get_sql(fields, pk)
logger.debug("[SELECT] SQL:{}, PARAMS:{}".format(_sql, pk))
return await db.fetchrow(_sql, pk)
async def get(self, pk, fields=None, db=None):
"""
根据pk获取记录
:param pk:
:param db:
:return:
"""
record = await self.get_raw(pk, fields, db)
return self.create_instance_by_record(record) if record else None
async def is_existed(self, model_instance, *, db=None):
"""
判断对象是否存在
:param model_instance:
:param db:
:return:
"""
pk = model_instance.get(self.model_pk.name)
if not pk:
return False
sql = self.build_get_sql(sharing_pk=pk)
count = await self.count(sql, [pk], db=db)
return count > 0
async def get_or_create(self, model_instance, db=None, signal=None):
"""
获取或创建
:param model_instance:
:param db:
:return:
"""
conn = await db.acquire() if hasattr(db, "acquire") else db
try:
async with conn.transaction() as trans:
instance = await self.get_raw(model_instance.get(self.model_pk.name), db=conn)
if instance:
return False, model_instance
else:
await self.create(model_instance, db=conn, signal=signal)
return True, model_instance
finally:
if hasattr(db, "release"):
await db.release(conn)
async def update(self,
model_instance,
update_fields=None,
db=None,
inc_fields=None,
inc_policy=IncreasePolicyEnum.NOTHING,
signal=None):
"""
个体更新
:param model_instance:
:param update_fields:
:param db:
:return:
"""
return await self.update_by_field(self.model_pk.name,
model_instance,
update_fields,
db,
inc_fields,
inc_policy,
signal)
async def update_by_PKs(self,
model_instance,
pks=None,
update_fields=None,
db=None,
inc_fields=None,
inc_policy=IncreasePolicyEnum.NOTHING,
signal=None):
"""
:param model_instance:
:param pks:
:param update_fields:
:param db:
:return:
"""
_sql, _update_fields = self.sql_builder.build_bulk_update_sql(update_fields, inc_fields, inc_policy)
_values = [model_instance.get(item[0]) for item in _update_fields]
_values.insert(0, pks)
ret = await db.execute(_sql, *_values)
logger.debug("[UPDATE] SQL:%s, PARAMS:%s, RET:%s" % (_sql, _values, ret))
del _update_fields
del _values
return parse_update_ret(ret)
async def update_or_create(self,
model_instance,
update_fileds=None,
db=None,
signal=None):
"""
更新或添加
:param model_instance:
:param update_fileds:
:param insert_fields:
:param db:
:return: 2:update 1:create
"""
conn = await db.acquire() if hasattr(db, "acquire") else db
try:
async with conn.transaction() as trans:
ret = await self.update(model_instance, update_fileds, db=conn, signal=signal)
if ret:
return 2
else:
await self.create(model_instance, db=conn, signal=signal)
return 1
finally:
if hasattr(db, "release"):
await db.release(conn)
async def delete(self, pk, db=None, signal=None):
"""
删除
:param pk:
:param db:
:return:
"""
_sql = self.sql_builder.build_delete_sql(pk)
ret = await db.execute(_sql, pk)
logger.debug("[DELETE] SQL:{}, PARAMS:{}, RET:{}".format(_sql, pk, ret))
_, count = ret.split()
return int(count)
async def delete_by_PKs(self, pks, db=None, signal=None):
"""
批量删除
:param pks:
:param db:
:return:
"""
_sql = self.sql_builder.bulk_delete_sql
ret = await db.execute(_sql, pks)
signals.after_delete.send(sender=self.model,
db=db,
manager=self,
signal=signal,
pks=pks,
multi=True)
logger.debug("[DELETE] SQL:{}, PARAMS:{}, RET:{}".format(_sql, pks, ret))
_, count = ret.split()
return int(count)
@classmethod
async def find(cls,
sql,
params=None,
order_by=None,
limit=-1,
offset=0,
db=None):
"""
查询
:param sql: sql or callable
:param params:
:param db:
:param kwargs:
:return:
"""
_sql = sql
if order_by:
_sql += " ORDER BY %s" % order_by
if limit > 0:
_sql += f' LIMIT {limit} OFFSET {offset} '
params = params or []
logger.debug('[SELECT] sql:{}, params:{}'.format(_sql, params))
return await db.fetch(_sql, *params)
@classmethod
async def count(cls, sql, params=None, *, db=None):
"""
计数器
:param sql:
:param params:
:param db:
:return:
"""
_count_sql = cls.default_sql_builder_cls.build_count_sql(sql)
logger.debug("[COUNT]count_sql: {}, params: {}".format(_count_sql, params))
return await db.fetchval(_count_sql, *params)
async def all(self, db=None):
"""
获取全部
:return:
"""
sql = self.sql_builder.build_select_sql("*")
logger.info("[select] {}".format(sql))
return await self.find(sql, db=db)
async def find_by_field(self,
field_name,
field_value,
selected_fields=None,
order_by=None,
limit=-1,
offset=0,
db=None):
"""
:param field_name:
:param db:
:return:
"""
sql = self.sql_builder.build_select_sql(selected_fields)
sql += " WHERE {} = $1 ".format(field_name)
ret = await self.find(sql, (field_value,), order_by, limit, offset, db=db)
logger.debug('[SELECT] ')
return ret
async def get_by_field(self,
field_name,
field_value,
selected_fields=None,
order_by=None,
db=None):
"""
:param field_name:
:param db:
:return:
"""
sql = self.sql_builder.build_select_sql(selected_fields)
if order_by:
sql += " WHERE {} = $1 ORDER BY {}".format(self.get_model_field(field_name).name, order_by)
else:
sql += " WHERE {} = $1 ".format(self.get_model_field(field_name).name)
ret = await db.fetchrow(sql, field_value)
return self.create_instance_by_record(ret) if ret else None
async def create_table(self,
ignore_existed=False,
db=None):
"""
创建表
:param ignore_existed:
:return:
"""
sql = self.sql_builder.build_create_table_sql(ignore_existed)
logger.debug("[create table] sql: {}".format(sql))
await db.execute(sql)
async def is_unique(self,
model_instance,
field_name,
*,
db=None,
sharing_pk=None):
"""
唯一检验
:param model_instance:
:param db:
:return:
"""
sql = self.sql_builder.build_select_sql(selected_fields=[self.model_pk.name],
sharing_pk=sharing_pk)
sql += "WHERE {} = $1".format(getattr(self.model, field_name))
count = self.count(sql, model_instance.get(field_name))
return count > 0
async def find_by_PKs(self,
pks,
selected_fields=None,
db=None):
"""
:param pks:
:param selected_fields:
:param db:
:return:
"""
sql = self.sql_builder.build_select_sql(selected_fields=selected_fields)
sql += " WHERE %s = ANY($1)" % self.model_pk.name
return await self.find(sql, pks, db=db)
async def update_by_field(self,
field_name,
model_instance,
update_fields=None,
db=None,
inc_fields = None,
inc_policy=IncreasePolicyEnum.NOTHING,
signal=None):
"""
根据field_name更新
:param inc_policy:
:param field_name:
:param field_value:
:param db:
:param signal:
:return:
"""
_update_sql, _update_fields = self.sql_builder.build_update_sql(update_fields,
inc_fields,
inc_policy,
where_field=field_name)
_values = [model_instance.get(item[0]) for item in _update_fields]
_values.insert(0, model_instance[field_name])
ret = await db.execute(_update_sql, *_values)
logger.debug('[UPDATE] sql:{}, '
'params: {}, '
'update_fields:{}, '
'inc_fields: {}, '
'compiled_update: {},'
'ret: {}'.format(_update_sql,
_values,
update_fields,
inc_fields,
_update_fields,
ret))
del _update_fields
del _values
del _update_sql
return parse_update_ret(ret)
class MongoDBManager(ManagerBase):
"""
mongodb基本操作封装
"""
__slots__ = ["model", "name", "collection_name", "collection_pk_name"]
def __init__(self, collection_name=None,
duplicate_error_cls=None,
write_error_cls=None):
"""
:param collection_name:
"""
self.model = None
self.name = None
self.collection_name = collection_name
self.collection_pk_name = None
self.duplicate_error_cls = duplicate_error_cls or Exception
self.write_error_cls = write_error_cls or Exception
async def create(self, model_instance, db=None):
"""
插入一条记录
:param model_instance:
:param db:
:return:
"""
_doc = self.convert_instance_to_dict(model_instance) if isinstance(model_instance, self.model) else model_instance
try:
await db[self.collection_name].insert_one(_doc)
except self.duplicate_error_cls as de:
logger.error("[INSERT ONE]{}.{}({}) error:{}".format(db.name, self.collection_name, _doc, de))
ERRORS.ENTITY_HAD_EXIST.reraise("{}".format(de))
return model_instance[self.collection_pk_name]
async def bulk_create(self, model_instances, db=None):
"""
插入多条记录
:param model_instances:
:param db:
:return:
"""
await db[self.collection_name].insert_many(model_instances)
async def get(self, pk, fields=None, db=None):
"""
:param pk:
:param db:
:return:
"""
projection = dict([(field, 1) for field in fields]) if isinstance(fields, Iterable) else None
_doc = await db[self.collection_name].find_one({self.collection_pk_name: pk},
projection=projection)
return self.model(data=_doc) if _doc is not None else None
async def get_or_create(self, model_instance, db=None):
"""
:param model_instance:
:param db:
:return:
"""
# 锁
pk = model_instance.get(self.model_pk.name)
if not pk:
raise RuntimeError("field '{}' value had not present".format(self.model_pk.name))
_doc = await self.get(pk, db=db)
logger.debug("_doc: {}".format(_doc))
if _doc is None:
await self.create(model_instance, db)
return True, model_instance
return False, _doc
async def update(self, model_instance, update_fields=None, db=None):
"""
更新
:param model_insrances:
:param update_fields:
:param db:
:return:
"""
_doc = self.convert_instance_to_dict(model_instance) if isinstance(model_instance, self.model) else model_instance
filter_doc = {self.collection_pk_name: _doc[self.collection_pk_name]}
update_doc = self._create_new_doc(_doc, update_fields)
try:
ret = await db[self.collection_name].update_one(filter_doc, {"$set": update_doc})
return ret.modified_count
except self.write_error_cls as we:
logger.error("[UPDATE] {}.{}, doc:{}".format(db.name, self.collection_name, _doc))
raise ERRORS.WRITE_ERROR.exception(str(we))
finally:
del update_doc
del filter_doc
async def update_by_PKs(self, model_instance, pks=None, update_fields=None, db=None):
"""
根据pk列表更新
:param model_instance:
:param pks:
:param update_fields:
:param db:
:return: -1 未处理
"""
if not isinstance(pks, (tuple, list)):
logger.warning("pks must be a tuple or list. return and nothing to do ")
return -1
_doc = self.convert_instance_to_dict(model_instance) \
if isinstance(model_instance, self.model) else \
model_instance
filter_doc = {self.collection_pk_name: {"$in": pks}}
update_doc = self._create_new_doc(_doc)
try:
ret = await db[self.collection_name].update_many(filter_doc, {"$set": update_doc})
return ret.modified_count
except self.write_error_cls as we:
logger.error("[UPSERT] {}.{}, doc:{}".format(db.name, self.collection_name, _doc))
raise ERRORS.WRITE_ERROR.exception(we.message)
finally:
del filter_doc
del update_doc
async def update_or_create(self, model_instance, update_fileds=None, db=None):
"""
更新或创建
:param model_instance:
:param update_fileds:
:param insert_fields:
:param db:
:return:
"""
try:
await self.create(model_instance, db=db)
return 1
except ERRORS.BizError as be:
if be.error_code == ERRORS.ENTITY_HAD_EXIST.value:
if isinstance(model_instance, self.model):
self.convert_instance_to_dict(model_instance).pop("_id")
elif isinstance(model_instance, dict):
model_instance.pop("_id", None)
ret = await self.update(model_instance, update_fileds, db)
return ret if ret == -1 else 2
except Exception as e:
logger.error("[UPSERT] error:{}".format(e))
return -1
async def delete(self, pk, db=None):
"""
:param pk:
:param db:
:return:
"""
logger.debug("_doc: {}".format({self.collection_pk_name: pk}))
ret = await db[self.collection_name].delete_one({self.collection_pk_name: pk})
logger.debug("[DELETE] pk:{}, deleted_count:{}".format(pk, ret.deleted_count))
return ret.deleted_count
async def delete_by_PKs(self, pks, db=None):
"""
:param pks:
:param db:
:return:
"""
ret = await db[self.collection_name].delete_many({self.collection_pk_name: {"$in": pks}})
logger.debug("[DELETE] pks:{}, deleted_count:{}".format(pks, ret.deleted_count))
return ret.deleted_count
def _create_new_doc(self, doc, update_fields=None):
"""
返回新的更新文档
:param data:
:param update_fields:
:return:
"""
if update_fields:
_new_doc = dict([(item, doc.get(item)) for item in update_fields if item != self.collection_pk_name])
else:
_new_doc = dict(doc)
_new_doc.pop(self.collection_pk_name, None)
return _new_doc
async def find(self, filter_doc=None, projection=None, sorted=None, limit=None, skip=None, db=None, callback=None):
"""
查询
:param filter_doc:
:param projection:
:param db:
:param inflate_cls: 填充类
:return:
"""
filter_doc = filter_doc or {}
_c = db[self.collection_name].find(filter=filter_doc,
projection=projection,
sort=sorted)
if limit > -1:
_c.limit(limit)
if skip:
_c.skip(skip)
ret = await _c.to_list(None)
return callback(ret) if callable(callback) else ret
async def find_by_PKs(self, pks, selected_fields=None, db=None, callback=None):
"""
:param pks:
:param db:
:return:
"""
filter_doc = {
self.model_pk.name: {"$in": pks}
}
return await self.find(filter_doc, selected_fields, db=db, callback=callback)
async def get_by_field(self, field_name, field_value, selected_fields=None, order_by=None, db=None):
"""
:param field_name:
:param db:
:return:
"""
filter_doc = {
field_name: field_value,
}
return await self.find(filter_doc, selected_fields, order_by, limit=1, skip=0, db=db)
async def is_existed(self, instance, db=None):
"""
:param instance:
:param db:
:return:
"""
filter_doc = {
self.model_pk.name: instance.get(self.model_pk.name)
}
_count = await self.count(filter_doc)
return _count > 0
async def is_unique(self, model_instance, field_name, *, db=None):
"""
:param model_instance:
:param field_name:
:param db:
:return:
"""
filter_doc = {
field_name: model_instance.get(field_name)
}
_count = await self.count(filter_doc)
return _count > 0
async def count(self, filter_doc=None, db=None):
"""
计数器
:param filter_doc:
:param db:
:return:
"""
return await db[self.collection_name].count_documents(filter=filter_doc or {})
class PSQLExtendManager(PSQLManager):
"""
psql拓展类,适应分表,根据id分表
。 """
default_sql_builder_cls = pg.SharingPsqlBuilder
def __init__(self, model, sharing_policy=None, sql_builder_cls=None):
"""
:param model: pydantic模型
:param sharing_policy: 分表策略
:param sql_builder_cls: sql构建器
"""
super().__init__(model,
table_name="{table_name}",
sharing_policy=sharing_policy,
sql_builder_cls=sql_builder_cls)
self.sharing_policy = sharing_policy
def _group_by_pks(self, pks):
"""
根据id分组
:param pks: id列表
:return: dict
"""
groups = defaultdict(list)
for pk in pks:
table_name = self.sharing_policy(pk)
groups[table_name].append(pk)
return groups
async def bulk_create(self, model_instances, db=None):
"""
批量插入
:param model_instances:
:param db:
:return:
"""
groups = defaultdict(list)
for model_instance in model_instances:
table_name = self.sharing_policy(model_instance[self.model_pk.name])
groups[table_name].append(model_instance)
sql_template = self.sql_builder.bulk_insert_sql
conn = await db.acquire() if hasattr(db, "acquire") else db
try:
async with conn.transaction() as trans:
for table_name in groups:
sql = sql_template.format(table_name=table_name)
ret = await conn.executemany(sql,
[[data_instance.get(field.name) for field in self.model_fields]
for data_instance in groups[table_name]])
finally:
if hasattr(db, "release"):
await db.release(conn)
return len(model_instances)
async def update_by_PKs(self, model_instance, pks=None, update_fields=None, db=None, inc_fields=None,
inc_policy=IncreasePolicyEnum.NOTHING, signal=None):
"""
@override
:param model_instance:
:param pks:
:param update_fields:
:param db:
:return:
"""
_sql_template = self.sql_builder.build_bulk_update_sql(update_fields, inc_fields, inc_policy)
groups = self._group_by_pks(pks)
_update_fields = update_fields or [field.name for field in self.model_fields]
_values = [model_instance.get(item) for item in _update_fields]
task = signals.before_update.send(sender=self.model,
instance=model_instance,
db=db,
manager=self,
update_fields=_update_fields,
signal=signal,
pks=pks,
multi=True
)
if task:
await task
conn = await db.acquire() if hasattr(db, "acquire") else db
rets = []
try:
async with conn.transaction() as trans:
for table_name in groups:
_values.insert(0, groups[table_name])
ret = await db.execute(_sql_template.format(table_name=table_name), *_values)
logger.debug("[UPDATE] SQL:%s, PARAMS:%s, RET:%s" % (_sql_template, _values, ret))
_values.pop(0)
rets.append(parse_update_ret(ret))
finally:
if hasattr(db, "release"):
await db.release(conn)
signals.after_update.send(sender=self.model,
instance=model_instance,
db=db,
manager=self,
update_fields=_update_fields,
signal=signal,
pks=pks,
multi=True
)
del _update_fields
del _values
return sum(rets)
async def delete_by_PKs(self, pks, db=None, signal=None):
"""
@override
批量删除
:param pks: id列表
:param db:
:return:
"""
_sql_template = self.sql_builder.bulk_delete_sql
groups = self._group_by_pks(pks)
task = signals.before_delete.send(sender=self.model,
db=db,
manager=self,
signal=signal,
pks=pks,
multi=True
)
if task:
await task
conn = await db.acquire() if hasattr(db, "acquire") else db
rets = []
try:
async with conn.transaction() as trans:
for table_name in groups:
ret = await db.execute(_sql_template.format(table_name=table_name), groups[table_name])
logger.debug("[DELETE] SQL:%s, PARAMS:%s, RET:%s" % (_sql_template, groups[table_name], ret))
_, count = ret.split()
rets.append(int(count))
finally:
if hasattr(db, "release"):
await db.release(conn)
signals.after_delete.send(sender=self.model,
db=db,
manager=self,
signal=signal,
pks=pks,
multi=True
)
return sum(rets)
``` |
{
"source": "474416133/wx",
"score": 2
} |
#### File: wx/wx_pay/__init__.py
```python
__author__ = "sven"
import logging
import ssl
import aiohttp
from wx_pay.utils import (uuid32,
dict_to_xml,
xml_to_dict,
generate_sign_MD5)
logger = logging.getLogger("wx_pay")
class AsyncClient(object):
"""
支付
"""
SIGN_KEY = "key"
CODE_SUCCESS = "SUCCESS"
def __init__(self, mch_id, mch_key, appid, app_secret, ip=None, notify_url=None,
nonce_str_func= uuid32, dict2xml=dict_to_xml, xml2dict=xml_to_dict,
sign_genertor=generate_sign_MD5, json_dumps=None, json_loads=None,
cert_path=None, key_path=None):
"""
"""
self._mch_id = mch_id
self._mch_key = mch_key
self._appid = appid
self._app_secret = app_secret
self._ip = ip
self._notify_url = notify_url
self._nonce_str_func = nonce_str_func
self._xml2dict = xml2dict
self._dict2xml = dict2xml
self._sign_genertor = sign_genertor
self._json_dumps = json_dumps
self._json_loads = json_loads
self._ssl_context = None
if cert_path and key_path:
self._ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self._ssl_context.load_cert_chain(cert_path, key_path)
async def _execute(self, url, method="GET", params=None, data=None, headers=None, ssl=None):
"""
http请求
:param url:
:param data:
:param method:
:param schema:
:param ssl:
:return:
"""
async with aiohttp.ClientSession() as session:
async with session.request(method=method.lower(), url=url, params=params, data=data, headers=headers, ssl=ssl) as resp:
#await resp.text()
return resp
async def execute(self, url, method="GET", params=None, data=None, headers=None, ssl=None, resp_body_handler=None):
"""
http请求,并返回经resp_body_handler处理过的结果
:param url:
:param method:
:param params:
:param data:
:param headers:
:param ssl:
:param resp_body_handler:
:return:
"""
resp = await self._execute(url, method=method, params=params, data=data, headers=headers, ssl=ssl)
if not callable(resp_body_handler):
return resp
_body = await resp.text()
return resp_body_handler(_body)
def validate_sign(self, mustbe_dict):
"""
校验签名
:param mustbe_dict: 必须是字典
:return:
"""
sign = mustbe_dict.pop("sign", None)
if not sign:
return False
_sign = self._sign_genertor(mustbe_dict, self._mch_key)
logger.debug("sign0: {}, sign1: {}".format(sign, _sign))
return _sign == sign
def parse_xml(self, xml_doc):
"""
解析xml, 并返回dict
:param xml_doc: xml文本
:return:
"""
_dict = self._xml2dict(xml_doc)
if self.validate_sign(_dict):
return True, _dict
return False, None
def generate_xml(self, mustbe_dict):
"""
根据dict生成xml
:param mustbe_dict:
:return:
"""
sign = self._sign_genertor(mustbe_dict, self._mch_key)
mustbe_dict["sign"] = sign
return self._dict2xml(mustbe_dict)
async def unified_order(self, product_dict, openid=None, trade_type="JSAPI"):
"""
统一下单
:param product_dict:
详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_1
:type product_dict : dict
:key body: 商品描述
:key total_fee: 总金额,单位分
:key client_ip: 可选,APP和网页支付提交用户端ip,Native支付填调用微信支付API的机器IP
:key user_id: 可选,用户在商户appid下的唯一标识。trade_type=JSAPI和appid已设定,此参数必传
:key sub_user_id: 可选,小程序appid下的唯一标识。trade_type=JSAPI和sub_appid已设定,此参数必传
:key out_trade_no: 可选,商户订单号,默认自动生成
:key detail: 可选,商品详情
:key attach: 可选,附加数据,在查询API和支付通知中原样返回,该字段主要用于商户携带订单的自定义数据
:key fee_type: 可选,符合ISO 4217标准的三位字母代码,默认人民币:CNY
:key time_start: 可选,订单生成时间,默认为当前时间
:key time_expire: 可选,订单失效时间,默认为订单生成时间后两小时
:key goods_tag: 可选,商品标记,代金券或立减优惠功能的参数
:key product_id: 可选,trade_type=NATIVE,此参数必传。此id为二维码中包含的商品ID,商户自行定义
:key device_info: 可选,终端设备号(门店号或收银设备ID),注意:PC网页或公众号内支付请传"WEB"
:key limit_pay: 可选,指定支付方式,no_credit--指定不能使用信用卡支付
:key scene_info: 可选,上报支付的场景信息
:type scene_info: dict
:param openid:
:param trade_type:
:return:
"""
if not isinstance(product_dict, dict):
raise RuntimeError("arg product_dict must be a dict")
if "out_trade_no" not in product_dict:
raise RuntimeError("miss out_trade_no")
if "body" not in product_dict:
raise RuntimeError("miss body")
if "total_fee" not in product_dict:
raise RuntimeError("miss total_fee")
if trade_type not in ("JSAPI", "NATIVE"):
raise RuntimeError("trade_type either JSAPI or NATIVE")
if trade_type == "JSAPI" and not openid:
raise RuntimeError("openid must be presented when trade_type=JSAPI")
product_dict.update(appid=self._appid,
mch_id=self._mch_id,
nonce_str=self._nonce_str_func,
notify_url=self._notify_url,
spbill_create_ip=self._ip,
trade_type=trade_type)
_body = self.generate_xml(product_dict)
resp_json = await self.execute('https://api.mch.weixin.qq.com/pay/unifiedorder',
method="post",
data=_body,
resp_body_handler=self._handle_resp_result)
return resp_json
def _handle_resp_result(self, resp_body, raise_exception=None, error_msg="resp error"):
"""
统一下单结果处理
:param resp_body:
:return:
"""
resp_json = self.parse_xml(resp_body)
if resp_json['return_code'] == AsyncClient.CODE_SUCCESS and resp_json["result_code"] == AsyncClient.CODE_SUCCESS:
return resp_json
if raise_exception is None:
raise RuntimeError(error_msg)
else:
raise raise_exception(error_msg)
async def query_order(self, transaction_id, appid):
"""
订单查询
详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/app/app.php?chapter=9_2&index=4
:param transaction_id:
:param appid:
:return:
"""
body = {"transaction_id" : transaction_id,
"appid": appid,
"mch_id" : self._mch_id,
"nonce_tr" : self._nonce_str_func()}
xml_body = self.generate_xml(body)
resp_json = await self.execute("https://api.mch.weixin.qq.com/pay/orderquery",
method="post",
data=xml_body,
resp_body_handler=self._handle_resp_result)
return resp_json
async def close_order(self, out_trade_no):
"""
关闭订单
详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_3
:param out_trade_no:
:return:
"""
body = {"out_trade_no" : out_trade_no,
"appid": self._appid,
"mch_id": self._mch_id,
"nonce_str": self._nonce_str_func()}
xml_body = self.generate_xml(body)
resp_json = await self.execute("https://api.mch.weixin.qq.com/pay/closeorder",
method="post",
data=xml_body,
resp_body_handler=self._handle_resp_result)
return resp_json
async def refund_order(self, transaction_id, op_user_id, out_refund_no=None):
"""
申请退款
详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_4
out_trade_no: 商户订单号
transaction_id: 微信订单号
out_refund_no: 商户退款单号(若未传入则自动生成)
total_fee: 订单金额
refund_fee: 退款金额
:param transaction_id:
:param op_user_id:
:param out_refund_no
:return:
"""
if not self._ssl_context:
raise RuntimeError("need ssl")
body = {"transaction_id": transaction_id,
"mch_id": self._mch_id,
"op_user_id": op_user_id,
"nonce_str": self._nonce_str_func(),
"appid" : self._appid,
"out_refund_no": out_refund_no}
xml_body = self.generate_xml(body)
resp_json = await self.execute("https://api.mch.weixin.qq.com/secapi/pay/refund",
method="post",
data=xml_body,
ssl=self._ssl_context,
resp_body_handler=self._handle_resp_result)
return resp_json
async def query_refund(self, transaction_id):
"""
查询退款
提交退款申请后,通过调用该接口查询退款状态。退款有一定延时,
用零钱支付的退款20分钟内到账,银行卡支付的退款3个工作日后重新查询退款状态。
详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_5
data: out_refund_no、out_trade_no、transaction_id、refund_id四个参数必填一个
out_refund_no: 商户退款单号
out_trade_no: 商户订单号
transaction_id: 微信订单号
refund_id: 微信退款单号
:param transaction_id: 微信订单号
:return:
"""
body = {
"transaction_id" : transaction_id,
"appid" : self._appid,
"mch_id" : self._mch_id,
"nonce_str" : self._nonce_str_func()
}
xml_body = self.generate_xml(body)
resp_json = await self.execute("https://api.mch.weixin.qq.com/secapi/pay/refundquery",
method="post",
data=xml_body,
resp_body_handler=self._handle_resp_result)
return resp_json
async def enterprise_pay(self, partner_trade_no, amount, openid, desc, re_user_name=None, check_name="FORCE_CHECK"):
"""
使用企业对个人付款功能
详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/tools/mch_pay.php?chapter=14_2
:param partner_trade_no:
:param amount:
:param openid:
:param desc:
:param re_user_name:
:param check_name:
:return:
"""
if not self._ssl_context:
raise RuntimeError("need ssl")
if check_name not in ("FORCE_CHECK", "NO_CHECK"):
check_name = "FORCE_CHECK"
body = {
"mch_appid" : self._appid,
"mchid": self._mch_id,
"nonce_str" : self._nonce_str_func(),
"amount" : amount,
"partner_trade_no" : partner_trade_no,
"openid" : openid,
"desc" : desc,
"check_name": check_name
}
if check_name == "FORCE_CHECK":
body["re_user_name"] = re_user_name
xml_body = self.generate_xml(body)
resp_json = await self.execute("https://api.mch.weixin.qq.com/mmpaymkttransfers/promotion/transfers",
method="post",
data=xml_body,
ssl=self._ssl_context,
resp_body_handler=self._handle_resp_result)
return resp_json
``` |
{
"source": "475Cumulus/TBone-admin",
"score": 3
} |
#### File: TBone-admin/tbone_admin/admins.py
```python
class ModelAdminManager(object):
def __init__(self, *args, **kwargs):
self._registry = {}
def register(self, name, model):
self._registry[name] = model
def get_model(self, name):
if name in self._registry:
return self._registry[name]
raise KeyError('Model not registered')
model_manager = ModelAdminManager()
```
#### File: TBone-admin/tbone_admin/views.py
```python
import os
from jinja2 import Environment, PackageLoader, TemplateNotFound
from sanic import Blueprint
from sanic.response import html
current_directory = os.path.dirname(os.path.realpath(__file__))
static_directory = os.path.join(current_directory, 'static')
env = Environment(loader=PackageLoader('tbone_admin', 'templates'))
bp = Blueprint('admin', url_prefix='/admin')
bp.static('/static', static_directory, name='static')
@bp.route('/', methods=['GET'])
async def index(request):
try:
template = env.get_template('index.html')
except TemplateNotFound:
raise TemplateNotFound('index.html')
content = template.render()
return html(content)
``` |
{
"source": "477-vrms/vrms-pi",
"score": 3
} |
#### File: vrms-pi/vrms/background.py
```python
import threading
from threading import Thread
from vrms.hardware.arm import ArmHandler
from vrms.network.mqtt import Mqtt
from vrms.network.udp import Udp
def arm(lock) -> None:
a = ArmHandler.load_arm()
a.client(lock)
def mqtt(lock) -> None:
m = Mqtt.load_mqtt()
m.client(lock)
def udp(lock) -> None:
u = Udp.load_udp()
u.client(lock)
pass
class Background:
def __init__(self):
lock = threading.Lock()
self.p1 = Thread(target=arm, args=(lock,))
self.p2 = Thread(target=mqtt, args=(lock,))
self.p3 = Thread(target=udp, args=(lock,))
def listen(self):
self.p1.start()
self.p2.start()
self.p3.start()
def close(self):
self.p1.join()
self.p2.join()
self.p3.join()
``` |
{
"source": "478959472/tensorflow_detection",
"score": 2
} |
#### File: tensorflow_detection/test/restest.py
```python
import os
import sys
import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from matplotlib import pyplot as plt
class TOD(object):
def __init__(self):
# Path to frozen detection graph. This is the actual model that is used for the object detection.
self.PATH_TO_CKPT = 'data/frozen_inference_graph.pb/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
self.PATH_TO_LABELS = 'data/label_map.pbtxt'
# 分类数量
self.NUM_CLASSES = 1
self.detection_graph = self._load_model()
self.category_index = self._load_label_map()
def _load_model(self):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect(self, image,path):
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
tf.device('/gpu:3')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=8)
# path = os.path.join(path, i)
cv2.imwrite(os.path.join(path, 'your_image.jpg'), image)
# plt.imshow(image)
# plt.show()
if __name__ == '__main__':
detecotr = TOD()
img_path = 'images/test'
for i in os.listdir(img_path):
if i.endswith('.jpg'):
path = os.path.join(img_path, i)
image = cv2.imread(path)
detecotr.detect(image,img_path)
``` |
{
"source": "47attribute/DL-MF-FL",
"score": 3
} |
#### File: DL-MF-FL/IRBFL/getXML.py
```python
import requests
import json
from bs4 import BeautifulSoup
from xml.dom import minidom
def generateXML(pid, version, summary, description, modified):
dom = minidom.Document()
root_node = dom.createElement('bugrepository')
root_node.setAttribute('name', pid)
dom.appendChild(root_node)
bug_node = dom.createElement('bug')
root_node.appendChild(bug_node)
bug_node.setAttribute('id', version)
bug_node.setAttribute('opendate', '')
bug_node.setAttribute('fixdate', '')
info_node = dom.createElement('buginformation')
fix_node = dom.createElement('fixedFiles')
bug_node.appendChild(info_node)
bug_node.appendChild(fix_node)
summary_node = dom.createElement("summary")
info_node.appendChild(summary_node)
summary_text = dom.createTextNode(summary)
summary_node.appendChild(summary_text)
desc_node = dom.createElement("description")
info_node.appendChild(desc_node)
desc_text = dom.createTextNode(description)
desc_node.appendChild(desc_text)
for i in modified:
file_node = dom.createElement("file")
fix_node.appendChild(file_node)
file_text = dom.createTextNode(i + ".java")
file_node.appendChild(file_text)
try:
with open("XMLfile/" + pid + "/" + version + '.xml', 'w', encoding='UTF-8') as fh:
dom.writexml(fh, indent='', addindent='\t', newl='\n', encoding='UTF-8')
print(pid + '-' + version + ' 写入XML成功!')
except Exception as err:
print('错误信息:{0}'.format(err))
for pid in ['Chart', 'Cli', 'Closure', 'Codec', 'Collections', 'Compress', 'Csv', 'Gson', 'JacksonCore',
'JacksonDatabind', 'JacksonXML', 'Jsoup', 'JxPath', 'Lang', 'Math', 'Mockito', 'Time']:
# Jsoup 45 url无效
with open('Data/' + pid, 'r') as lines:
for line in lines:
version = line.split(',')[0]
bid = line.split(',')[1]
if bid == 'UNKNOWN': continue
url = line.split(',')[2]
modified = line.split(',')[3].replace('\"', '').replace('\n', '').split(';')
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
# D4J 2.0.0 四类url获取 summary 和 description
if 'sourceforge.net' in url:
summary = soup.h2.text.replace('\n', '').strip()
description = soup.find('div', class_='markdown_content').text.replace('\n', ' ').replace('\r', '')
elif 'issues.apache.org' in url:
summary = soup.find('h1', id='summary-val').text
# description 缺失
try:
description = soup.find('div', class_='user-content-block').text.replace('\n', ' ').replace('\r', '')
except:
description = ''
elif 'github.com' in url:
summary = soup.find('h1', class_='gh-header-title mb-2 lh-condensed f1 mr-0 flex-auto break-word') \
.text.replace('\n', '').strip()
description = soup.find('div', class_='edit-comment-hide').text.replace('\n', ' ').strip()
elif 'storage.googleapis.com' in url:
d = json.loads(r.text)
summary = d['summary']
description = d['comments'][0]['content'].replace('\n', '').replace('\r', '')
generateXML(pid, version, summary, description, modified)
``` |
{
"source": "47bwy/wsyn",
"score": 3
} |
#### File: 47bwy/wsyn/wsyn.py
```python
import os
from wsyn import Scan, wsyn_cmd_parser
ENV = 'PluginDirs'
BASEDIR = os.path.abspath(os.path.dirname(__file__))
DEFAULT_PATH = [os.path.join(BASEDIR, path) for path in ["plugins"]]
def main():
args = wsyn_cmd_parser().parse_args()
PLUGINS_DIRS = args.plugin_dirs or os.environ.get(ENV, DEFAULT_PATH)
scan = Scan(dirs=PLUGINS_DIRS, target=args.url)
scan.run()
if __name__ == "__main__":
main()
``` |
{
"source": "47lining/image-pipeline-demo",
"score": 2
} |
#### File: image-pipeline-demo/commands/bucketandq.py
```python
from nucleator.cli.command import Command
from nucleator.cli.utils import ValidateCustomerAction
class BucketAndQ(Command):
"""
A no-op command - provides access to ansible playbooks through utility functions,
but does not interact with the nucleator cli parser.
"""
name = "bucketandq"
def provision(self, **kwargs):
"""
Provisions an S3 bucket and an SQS queue within specified Account for specified Customer.
"""
cli = Command.get_cli(kwargs)
cage = kwargs.get("cage", None)
customer = kwargs.get("customer", None)
if cage is None or customer is None:
raise ValueError("cage and customer must be specified")
app_name = kwargs.get("app_name", "pipeline")
extra_vars={
"cage_name": cage,
"customer_name": customer,
"app_name": app_name,
"verbosity": kwargs.get("verbosity", None),
"debug_credentials": kwargs.get("debug_credentials", None),
"rs_url": kwargs.get("redshift_url", None),
}
command_list = []
command_list.append("account")
command_list.append("cage") # because stackset/ansible/roles/instan... depends on cage_provision
command_list.append("bucketandq")
cli.obtain_credentials(commands = command_list, cage=cage, customer=customer, verbosity=kwargs.get("verbosity", None), debug_credentials=kwargs.get("debug_credentials", None))
return cli.safe_playbook(self.get_command_playbook("bucketandq-provision.yml"),
is_static=True, # do not use dynamic inventory script, credentials may not be available
**extra_vars
)
def orchestrate(self, **kwargs):
"""
Provisions an S3 bucket and an SQS queue within specified Account for specified Customer.
"""
cli = Command.get_cli(kwargs)
queue_name = kwargs.get("queue_name", None)
cage = kwargs.get("cage", None)
customer = kwargs.get("customer", None)
extra_vars={
"cage_name": cage,
"customer_name": customer,
"queue_name": queue_name,
"verbosity": kwargs.get("verbosity", None),
"debug_credentials": kwargs.get("debug_credentials", None),
}
playbook = "orchestrate.yml"
command_list = []
command_list.append("bucketandq")
cli.obtain_credentials(commands = command_list, cage=cage, customer=customer, verbosity=kwargs.get("verbosity", None), debug_credentials=kwargs.get("debug_credentials", None))
return cli.safe_playbook(self.get_command_playbook(playbook),
is_static=True, # do not use dynamic inventory script, credentials may not be available
**extra_vars
)
def parser_init(self, subparsers):
"""
Initialize parsers for this command.
"""
# add parser for cage command
setup_parser = subparsers.add_parser(self.name)
setup_subparsers=setup_parser.add_subparsers(dest="subcommand")
beanstalk_provision=setup_subparsers.add_parser('provision', help="provision a new nucleator bucketandq stackset")
beanstalk_provision.add_argument("--customer", required=True, action=ValidateCustomerAction, help="Name of customer from nucleator config")
beanstalk_provision.add_argument("--cage", required=True, help="Name of cage from nucleator config")
beanstalk_provision.add_argument("--app_name", required=True, help="Name of Application (will be used in bucket and queue names")
beanstalk_provision.add_argument("--redshift_url", required=True, help="The stackset url of the redshift cluster")
orchestrator=setup_subparsers.add_parser('orchestrate', help="Runs the orchestrator to start producing qrcode images")
orchestrator.add_argument("--customer", required=True, action=ValidateCustomerAction, help="Name of customer from nucleator config")
orchestrator.add_argument("--cage", required=True, help="Name of cage from nucleator config")
orchestrator.add_argument("--queue_name", required=True, help="Name of the sqs queue to push messages to")
# Create the singleton for auto-discovery
command = BucketAndQ()
```
#### File: image-pipeline-demo/distributor/application.py
```python
import logging, json, boto, flask, os, hashlib
from flask import request, Response
from storyspecification import StorySpecification
import qrcode_generator as QR
import random
import uuid
import psycopg2
import subprocess
import time
import boto
from boto import sts
import boto.dynamodb
import boto.s3
# Create and configure the Flask app
application = flask.Flask(__name__)
application.debug = True
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Handler
LOG_FILE = '/tmp/sample-app.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1048576, backupCount=5)
handler.setLevel(logging.INFO)
# Formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
tmpimagefolder = "tmpimagefolder"
if not os.path.isdir(tmpimagefolder):
os.mkdir(tmpimagefolder)
# Add Formatter to Handler
handler.setFormatter(formatter)
# add Handler to Logger
logger.addHandler(handler)
bucket_name = os.environ["S3_BUCKET_NAME"]
storySpec = StorySpecification()
def makeImageName(record):
# Output a string based on stuff
hash = hashlib.sha1("mymessage".encode("UTF-8")).hexdigest()
return "QRImage-"+str(random.uniform(0,10))+".jpg"
def makeBucketKey(record):
# Output a string based on stuff
key = record["date_time"]
# put a random string at the beginning
# remove ':' and ' '
key = key.replace(':', '')
key = key.replace(' ', '')
hash = uuid.uuid4().hex[:4]
key = hash + '-' + key
return key
def writeAnImage(record):
global bucket_name
print "Writing image for "+json.dumps(record)
image_file_name = makeImageName(record)
QR.create_local_image_file(image_file_name, record["product_url"], record["latitude"],
record["longitude"], record["date_time"], tmpimagefolder+"/")
key = makeBucketKey(record)
s3_location = "q/" + key
# print "aws s3 cp "+tmpimagefolder+"/"+image_file_name+" s3://"+bucket_name+"/"+key+"/"+image_file_name
QR.upload_file_to_s3(bucket_name, s3_location, image_file_name, tmpimagefolder+"/")
def copytoredshift(record):
dynamo_table_name = os.environ["DYN_TABLENAME"]
redshift_username = os.environ["RSDB_USERNAME"]
redshift_password = os.environ["<PASSWORD>"]
redshift_database = os.environ["RSDB_DATABASE"]
redshift_port = os.environ["RSDB_PORT"]
customer = os.environ["CUSTOMER"]
cage = os.environ["CAGE"]
role_name = "NucleatorBucketandqDistributorServiceRunner"
iam_conn = boto.connect_iam()
role = iam_conn.get_role(role_name)
role_arn = role["get_role_response"]["get_role_result"]["role"]["arn"]
stsconn = sts.STSConnection()
response = stsconn.assume_role(role_arn, "redshift_copy_session")
access_key = response.credentials.access_key
secret_key = response.credentials.secret_key
session_token = response.credentials.session_token
if customer is "47Lining":
endpoint = "redshift.%s.%s.com" % (cage, customer)
else:
endpoint = "redshift.%s.%s.47lining.com" % (cage, customer)
print "Connecting to redshift cluster: %s" % endpoint
conn = psycopg2.connect(dbname=redshift_database, host=endpoint, port=redshift_port, user=redshift_username, password=<PASSWORD>)
cur = conn.cursor()
print "Connected. Creating table"
cur.execute("CREATE TABLE IF NOT EXISTS imageproccessingtable(key varchar(50) NOT NULL, url varchar(200) NOT NULL, dateoriginal timestamp NOT NULL, gpslatitude float8 NOT NULL, gpslongitude float8 NOT NULL, image varchar(100));")
conn.commit()
print "Table recreated. Running copy command..."
cur.execute("copy imageproccessingtable from 'dynamodb://%s' credentials 'aws_access_key_id=%s;aws_secret_access_key=%s;token=%s' readratio 100;" % (dynamo_table_name, access_key, secret_key, session_token))
conn.commit()
print "Copy command completed"
def copyseconddata(record):
region = os.environ["REGION"]
dest_bucket_name = os.environ["S3_BUCKET_NAME"]
source_bucket_name = os.environ["S3_SOURCE_SECOND_BUCKET_NAME"]
dynamo_table_name = os.environ["DYN_TABLENAME"]
print "Deleting and recreating dynamo table so only new records are inserted into redshift"
dynamo_conn = boto.dynamodb.connect_to_region(region)
table = dynamo_conn.get_table(dynamo_table_name)
dynamo_conn.delete_table(table)
dynamo_schema = dynamo_conn.create_schema(hash_key_name='key',hash_key_proto_value=str)
time.sleep(5)
print "Sleeping for 5 seconds to let table delete"
table = dynamo_conn.create_table(name=dynamo_table_name,schema=dynamo_schema,read_units=500, write_units=150)
role_name = "NucleatorBucketandqDistributorServiceRunner"
iam_conn = boto.connect_iam()
role = iam_conn.get_role(role_name)
role_arn = role["get_role_response"]["get_role_result"]["role"]["arn"]
stsconn = sts.STSConnection()
response = stsconn.assume_role(role_arn, "redshift_copy_session")
access_key = response.credentials.access_key
secret_key = response.credentials.secret_key
session_token = response.credentials.session_token
print "Running S3 Copy Command"
command = "export AWS_ACCESS_KEY_ID=%s; export AWS_SECRET_ACCESS_KEY=%s; export AWS_SESSION_TOKEN=%s; aws s3 cp s3://%s/ s3://%s/ --recursive --include '*' > /dev/null" % (access_key, secret_key, session_token, source_bucket_name, dest_bucket_name)
subprocess.call(command, shell=True)
copytoredshift(record)
def copyinitialdata(record):
region = os.environ["REGION"]
dest_bucket_name = os.environ["S3_BUCKET_NAME"]
source_bucket_name = os.environ["S3_SOURCE_FIRST_BUCKET_NAME"]
dynamo_table_name = os.environ["DYN_TABLENAME"]
role_name = "NucleatorBucketandqDistributorServiceRunner"
iam_conn = boto.connect_iam()
role = iam_conn.get_role(role_name)
role_arn = role["get_role_response"]["get_role_result"]["role"]["arn"]
stsconn = sts.STSConnection()
response = stsconn.assume_role(role_arn, "redshift_copy_session")
access_key = response.credentials.access_key
secret_key = response.credentials.secret_key
session_token = response.credentials.session_token
print "Running S3 Copy Command"
command = "export AWS_ACCESS_KEY_ID=%s; export AWS_SECRET_ACCESS_KEY=%s; export AWS_SESSION_TOKEN=%s; aws s3 cp s3://%s/ s3://%s/ --recursive --include '*' > /dev/null" % (access_key, secret_key, session_token, source_bucket_name, dest_bucket_name)
subprocess.call(command, shell=True)
copytoredshift(record)
def handleMessage(message):
print "Message = ", message
if "date" in message:
msg_date = message["date"]
if storySpec.generate_record(msg_date):
record = storySpec.create_record(msg_date)
# write the image
writeAnImage(record)
else:
print "Choosing not to write image for "+msg_date
elif "product_url" in message:
# write the image
writeAnImage(message)
elif "redshift_initial_copy" in message:
# write the image
copyinitialdata(message)
elif "redshift_second_copy" in message:
# write the image
copyseconddata(message)
@application.route('/', methods=['POST'])
def proc_message():
response = None
if request.json is None:
# Expect application/json request
response = Response("", status=415)
else:
try:
# If the message has an SNS envelope, extract the inner message
if request.json.has_key('TopicArn') and request.json.has_key('Message'):
message = json.loads(request.json['Message'])
else:
message = request.json
handleMessage(message)
response = Response("", status=200)
except Exception as ex:
logging.exception('Error processing message: %s' % request.json)
response = Response(ex.message, status=500)
return response
# here we are going to use boto to up the message visibility timeout
#region = os.environ["REGION"]
#connection = boto.sqs.connect_to_region(region)
#queue = get_queue(queue_name)
#connection.set_queue_attribute(queue, 'VisibilityTimeout', 900) # 15 min
if __name__ == '__main__':
application.run(host='0.0.0.0')
``` |
{
"source": "47lining/quickstart-osisoft-pisystem2aws-connector",
"score": 2
} |
#### File: assets/lambdas/copy_licenced_binary_lambda.py
```python
import os
from concurrent.futures import ThreadPoolExecutor
import boto3
import functools
from lambdas.utils import send_cfnresponse
def copy_data(event, source_bucket, source_key, destination_key):
submissions_bucket = boto3.resource('s3').Bucket(event['ResourceProperties']['DestinationBucketName'])
copy_source = {
'Bucket': source_bucket,
'Key': source_key
}
return functools.partial(submissions_bucket.copy, copy_source, destination_key)
def recursive_copy_data(event, source_bucket, source_prefix, destination_prefix):
data_bucket = boto3.resource('s3').Bucket(source_bucket)
source_path = source_prefix
for obj in data_bucket.objects.filter(Prefix=source_path):
source_key = obj.key
destination_key = os.path.join(destination_prefix, os.path.basename(obj.key))
yield copy_data(event, source_bucket, source_key, destination_key)
def generate_copy_jobs(event):
yield from recursive_copy_data(event,
source_bucket=event['ResourceProperties']['LicensedSoftwareS3BucketName'],
source_prefix=event['ResourceProperties']['LicensedSoftwareS3KeyPrefix'],
destination_prefix=event['ResourceProperties']['DestinationKeyPrefix'])
yield from recursive_copy_data(event,
source_bucket=event['ResourceProperties']['ConnectorAgentAssetsS3BucketName'],
source_prefix=event['ResourceProperties']['ConnectorAgentAssetsS3KeyPrefix'],
destination_prefix=event['ResourceProperties']['DestinationKeyPrefix'])
@send_cfnresponse
def handler(event, context):
if event['RequestType'] == 'Create':
with ThreadPoolExecutor(max_workers=7) as executor:
futures = [executor.submit(job) for job in generate_copy_jobs(event)]
for future in futures:
exception = future.exception()
if exception is not None:
print(exception)
raise exception
elif event['RequestType'] == 'Delete':
regional_lambda_bucket = boto3.resource('s3').Bucket(event['ResourceProperties']['DestinationBucketName'])
for key in regional_lambda_bucket.objects.filter(Prefix=event['ResourceProperties']['DestinationKeyPrefix']):
key.delete()
```
#### File: workers/managed_feeds/test_managed_feeds_dynamodb_dao.py
```python
from io import BytesIO
from operator import itemgetter
from time import sleep
from freezegun import freeze_time
from tests.fixtures import *
@freeze_time('2016-01-02 11:12:13')
def test_update_pi_points_status(managed_feeds_dynamo_dao, pi_points_dynamo_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1'})
pi_points = ['point1', 'point2']
managed_feeds_dynamo_dao.update_pi_points_status(pi_points, 'pending')
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
assert sorted_points == [
{'update_timestamp': '2016-01-02T11:12:13', 'subscription_status': 'pending', 'pi_point': 'point1'},
{'update_timestamp': '2016-01-02T11:12:13', 'subscription_status': 'pending', 'pi_point': 'point2'}
]
def test_get_latest_af_structure(managed_feeds_dynamo_dao, events_status_table, s3_resource):
events_status_table.put_item(
Item={
'id': '1',
'update_timestamp': '2000-11-11T22:22:22',
'event_type': 'sync_af',
'database': 'database',
's3_bucket': 'bucket1',
's3_key': 'af_structure.json',
'status': 'success'
}
)
events_status_table.put_item(
Item={
'id': '2',
'update_timestamp': '2001-01-01T22:22:22',
'event_type': 'sync_af',
'database': 'database',
's3_bucket': 'bucket2',
's3_key': 'af_structure.json',
'status': 'success'
}
)
events_status_table.put_item(
Item={
'id': '3',
'update_timestamp': '2002-01-01T22:22:22',
'event_type': 'sync_af',
'database': 'database',
's3_bucket': 'bucket3',
's3_key': 'af_structure.json',
'status': 'failure'
}
)
events_status_table.put_item(
Item={
'id': '4',
'update_timestamp': '2017-01-01T22:22:22',
'event_type': 'sync_af',
'database': 'otherdatabase',
's3_bucket': 'bucket4',
's3_key': 'af_structure.json',
'status': 'success'
}
)
s3_resource.Bucket('bucket2').upload_fileobj(
BytesIO(b'{"af_structure": "test"}'),
'af_structure.json'
)
af_structure = managed_feeds_dynamo_dao.get_latest_af_structure('database')
assert af_structure == {'af_structure': 'test'}
def test_get_latest_af_structure_without_data(managed_feeds_dynamo_dao):
af_structure = managed_feeds_dynamo_dao.get_latest_af_structure('database')
assert af_structure is None
def test_get_event_by_id(managed_feeds_dynamo_dao, events_status_table):
events_status_table.put_item(Item={'id': '1', 'key': 'test'})
event = managed_feeds_dynamo_dao.get_event_by_id('1')
assert event == {'id': '1', 'key': 'test'}
```
#### File: utils/piaf/af_structure_browser.py
```python
import re
class AfStructureBrowser(object):
def __init__(self, assets_query, assets_field="name", attributes_query=".*", attributes_field="name"):
self.assets_query = assets_query.replace("\\", "\\\\") if assets_field == 'path' else assets_query
self.assets_field = assets_field
self.attributes_query = attributes_query
self.attributes_field = attributes_field
def search_assets(self, structure):
results = {}
self._search_assets_tree(structure, results)
return results
def _search_assets_tree(self, structure, results):
for asset in structure:
if self._match_asset_field_with_query(asset, self.assets_query, self.assets_field):
copy = self._copy_node_and_remove_children_assets(asset)
filtered_attributes = self._filter_attributes(copy['attributes'])
if len(filtered_attributes) > 0:
copy['attributes'] = filtered_attributes
results[copy['path']] = copy
if 'assets' in asset:
self._search_assets_tree(asset['assets'], results)
def _copy_node_and_remove_children_assets(self, asset):
copy = asset.copy()
if 'assets' in asset:
copy.pop('assets')
return copy
def _filter_attributes(self, attributes_list):
result = []
for attribute in attributes_list:
if self._match_attribute_field_with_query(attribute, self.attributes_query, self.attributes_field):
result.append(attribute)
return result
@staticmethod
def _match_asset_field_with_query(asset, query, field):
field_not_present = field not in asset \
or asset[field] is None \
or (isinstance(asset[field], list) and len(asset[field]) == 0)
if field_not_present:
return query == ".*"
if field != 'categories':
string_to_match = asset[field]
return re.match("^" + query + "$", string_to_match)
else:
for category in asset['categories']:
if re.match("^" + query + "$", category):
return True
return False
@staticmethod
def _match_attribute_field_with_query(attribute, query, field):
field_not_present = field not in attribute \
or attribute[field] is None \
or (isinstance(attribute[field], list) and len(attribute[field]) == 0)
if field_not_present:
return query == ".*"
if field != 'categories':
string_to_match = attribute[field]
return re.match("^" + query + "$", string_to_match)
else:
for category in attribute['categories']:
for k, v in category.items():
if re.match("^" + query + "$", v):
return True
return False
```
#### File: assets/webapp_management_console/app_exceptions.py
```python
import traceback
from functools import wraps
import logging
logger = logging.getLogger(__name__)
class BackendException(Exception):
"""Base class for exceptions raised by API methods
Makes easier to handle exceptions in webapp
"""
def __init__(self, message, status_code=500, payload=None):
super().__init__()
self.message = message
self.status_code = status_code
self.payload = payload
def to_dict(self):
result = dict(self.payload or ())
result['message'] = self.message
return result
def compose_error_payload(exception):
tb = traceback.format_exc()
error_payload = {
'exception': exception.__class__.__name__,
'description': str(exception),
'traceback': tb
}
return error_payload
def raise_backend_exception(error_message):
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
try:
response = fun(*args, **kwargs)
except Exception as e:
payload = compose_error_payload(e)
logger.exception("%s:\n%s", error_message, payload['traceback'])
raise BackendException(error_message, payload=payload)
return response
return inner
return outer
``` |
{
"source": "47rooks/bible-utilities",
"score": 2
} |
#### File: bibleutils/test/test_versification.py
```python
import unittest
from bibleutils.versification import VersificationID, BookID, Identifier, \
ReferenceFormID, parse_refs, ETCBCHVersification, Ref, convert_refs, \
expand_refs, VersificationException
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testVersificationIDs(self):
'''Verify that ids can be referred to by the property methods
'''
assert VersificationID.ETCBCH == 1
assert VersificationID.ETCBCG == 2
assert VersificationID.IGNTPSinaiticus == 3
assert VersificationID.Accordance == 4
def testVersificationIDsImmutable(self):
with self.assertRaises(AttributeError):
VersificationID.ETCBCH = 12
def testVersificationIDsCannotBeAdded(self):
# FIXME I cannot prevent an attribute being added.
with self.assertRaises(AttributeError):
VersificationID.FOO = 15
def testVersificationIter(self):
for k in VersificationID:
print('key={:s}'.format(k))
def testBookNameFromBookId(self):
self.assertEqual(ETCBCHVersification.book_name(BookID._NUMBERS), 'Numeri',
f'Incorrect name from book_id {ETCBCHVersification.book_id(BookID._NUMBERS)}')
def testBookIdFromBookName(self):
self.assertEqual(ETCBCHVersification.book_id('Numeri'),
BookID._NUMBERS,
f"Incorrect ID from book_name {ETCBCHVersification.book_name('Numeri')}")
def testIDValuesUnique(self):
'''Verify that duplicates cannot be created in the Identifier class
'''
chk = {'_GENESIS':1, '_EXODUS':2, '_LEVITICUS':3,
'_NUMBERS':4, '_DEUTERONOMY':5, '_DEUTERONOMYA':5}
with self.assertRaises(VersificationException) as expected_ex:
Identifier(chk)
ex = expected_ex.exception
self.assertEqual(ex.message[:51],
'duplicate value in supplied map at key _DEUTERONOMY',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testBookIDSmoker(self):
'''Just a quick smoker
'''
self.assertEqual(BookID._1CHRONICLES, 38, 'Unexpected value {:d}')
def testParseBookOnly(self):
r = parse_refs("Exodus", ReferenceFormID.BIBLEUTILS)
self.assertEquals(len(r), 1)
self.assertEqual(r[0].versification, ReferenceFormID.BIBLEUTILS,
'wrong versification system {}'.format(r[0].versification))
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertIsNone(r[0].end_book,
'ending book is wrong {}'.format(r[0].end_book))
self.assertIsNone(r[0].st_ch, 'st_ch not None {}'.format(r[0].st_ch))
self.assertIsNone(r[0].end_ch, 'end_ch not None {}'.format(r[0].end_ch))
self.assertIsNone(r[0].st_vs, 'st_vs not None {}'.format(r[0].st_vs))
self.assertIsNone(r[0].end_vs, 'end_vs not None {}'.format(r[0].end_vs))
self.assertIsNone(r[0].st_sub_vs, 'st_sub_vs not None {}'.format(r[0].st_sub_vs))
self.assertIsNone(r[0].end_sub_vs, 'end_sub_vs not None {}'.format(r[0].end_sub_vs))
def testParseNumBookOnly(self):
r = parse_refs("1Kings", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._1KINGS,
'wrong book id {}'.format(r[0].st_book))
def testParseBookRangeOnly(self):
r = parse_refs("Exodus-Numbers", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].end_book, BookID._NUMBERS,
'wrong book id {}'.format(r[0].end_book))
def testParseBookRangeTwoDelims(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus--Numbers", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid book name at pos 7 in Exodus--Numbers',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseChVsRangeTwoDelims(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus fc00:db20:35b:7399::5", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid verse reference at pos 10 in Exodus fc00:db20:35b:7399::5',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseTwoCommas(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus 12-13,,15", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid chapter at pos 13 in Exodus 12-13,,15',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseMixedDelims(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus 12-13,:-15", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid chapter at pos 13 in Exodus 12-13,:-15',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseBookRangeTooManyBooks(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus-Numbers-Deuteronomy", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid "-" delimiter at 15 in Exodus-Numbers-Deuteronomy')
def testParseMultiBookRangeOnly(self):
r = parse_refs("Exodus-Numbers,Matt-Mark", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].end_book, BookID._NUMBERS,
'wrong book id {}'.format(r[0].end_book))
self.assertEqual(r[1].st_book, BookID._MATTHEW,
'wrong book id {}'.format(r[1].st_book))
self.assertEqual(r[1].end_book, BookID._MARK,
'wrong book id {}'.format(r[1].end_book))
def testParseNumBookRangeOnly(self):
r = parse_refs("1Kings-2Kings", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._1KINGS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].end_book, BookID._2KINGS,
'wrong book id {}'.format(r[0].end_book))
def testParseBookChapter(self):
r = parse_refs("Exodus 12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertIsNone(r[0].end_book,
'book id is not None {}'.format(r[0].end_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect chapter {}'.format(r[0].st_ch))
self.assertIsNone(r[0].end_ch, 'chapter is not None')
def testParseBookChapterRange(self):
r = parse_refs("Exodus 12-15", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].end_ch, 15,
'incorrect ending chapter {}'.format(r[0].end_ch))
def testParseBookMultiChapterRange(self):
r = parse_refs("Exodus 12-15, 17-25", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].end_ch, 15,
'incorrect ending chapter {}'.format(r[0].end_ch))
self.assertEqual(r[1].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[1].st_book))
self.assertEqual(r[1].st_ch, 17,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].end_ch, 25,
'incorrect ending chapter {}'.format(r[1].end_ch))
def testParseBookAbbrevCh(self):
r = parse_refs("Ex 12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
def testParseBookAbbrevWithDot(self):
r = parse_refs("Ex. 12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
def testParseBookChVs(self):
r = parse_refs("Gen 12:1", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
def testParseBookChVsRange(self):
r = parse_refs("Gen 12:1-12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 12,
'incorrect starting chapter {}'.format(r[0].end_vs))
def testParseBookChVsRangeSeq(self):
r = parse_refs("Gen 12:1-12,13", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 12,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 12,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 13,
'incorrect starting chapter {}'.format(r[1].st_vs))
def testParseGen1_3(self):
r = parse_refs('Gen 1:1-2,6-23', ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 1,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 2,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 1,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 6,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[1].end_vs, 23,
'incorrect starting chapter {}'.format(r[1].st_vs))
def testParseBookChVsChVs(self):
r = parse_refs('Gen 1:1-2,6-23,2:23', ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 1,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 2,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 1,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 6,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[1].end_vs, 23,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[2].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[2].st_book))
self.assertEqual(r[2].st_ch, 2,
'incorrect starting chapter {}'.format(r[2].st_ch))
self.assertEqual(r[2].st_vs, 23,
'incorrect starting chapter {}'.format(r[2].st_vs))
def testParseComplexRefString(self):
r = parse_refs('Gen 1:1-2,6, Ex 17:3, Deut 12,13', ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 1,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 2,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 1,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 6,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[2].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[2].st_book))
self.assertEqual(r[2].st_ch, 17,
'incorrect starting chapter {}'.format(r[2].st_ch))
self.assertEqual(r[2].st_vs, 3,
'incorrect starting chapter {}'.format(r[2].st_vs))
self.assertEqual(r[3].st_book, BookID._DEUTERONOMY,
'wrong book id {}'.format(r[3].st_book))
self.assertEqual(r[3].st_ch, 12,
'incorrect starting chapter {}'.format(r[3].st_ch))
self.assertEqual(r[4].st_book, BookID._DEUTERONOMY,
'wrong book id {}'.format(r[4].st_book))
self.assertEqual(r[4].st_ch, 13,
'incorrect starting chapter {}'.format(r[4].st_vs))
def testConvertInternalToETCBCH(self):
refs = [Ref(ReferenceFormID.BIBLEUTILS,
BookID._DEUTERONOMY, sc=3, sv=4),
Ref(ReferenceFormID.BIBLEUTILS,
BookID._EXODUS, BookID._EXODUS, 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.ETCBCH)
self.assertEqual(c_refs[0].versification, ReferenceFormID.ETCBCH,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, 'Deuteronomium',
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.ETCBCH,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[1].st_book, 'Exodus',
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testConvertETCBCHToInternal(self):
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, sv=4),
Ref(ReferenceFormID.ETCBCH,
'Exodus', 'Exodus', 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.BIBLEUTILS)
self.assertEqual(c_refs[0].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, BookID._DEUTERONOMY,
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[1].versification}')
self.assertEqual(c_refs[1].st_book, BookID._EXODUS,
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testConvertInternalToETCBCG(self):
refs = [Ref(ReferenceFormID.BIBLEUTILS,
BookID._LUKE, sc=3, sv=4),
Ref(ReferenceFormID.BIBLEUTILS,
BookID._MARK, BookID._MARK, 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.ETCBCG)
self.assertEqual(c_refs[0].versification, ReferenceFormID.ETCBCG,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, 'Luke',
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.ETCBCG,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[1].st_book, 'Mark',
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testConvertETCBCGToInternal(self):
refs = [Ref(ReferenceFormID.ETCBCG,
'Luke', sc=3, sv=4),
Ref(ReferenceFormID.ETCBCG,
'Mark', 'Mark', 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.BIBLEUTILS)
self.assertEqual(c_refs[0].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, BookID._LUKE,
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[1].versification}')
self.assertEqual(c_refs[1].st_book, BookID._MARK,
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testExpandVerse(self):
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, sv=4, ev=6)]
e_refs = expand_refs(refs)
self.assertEqual(len(e_refs), 3, 'incorrect number of expanded refs')
self.assertEqual(e_refs[0].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[0].end_book, 'end_book is not None')
self.assertEqual(e_refs[0].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[0].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[0].st_vs, 4, 'wrong verse')
self.assertIsNone(e_refs[0].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[1].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[1].end_book, 'end_book is not None')
self.assertEqual(e_refs[1].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[1].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[1].st_vs, 5, 'wrong verse')
self.assertIsNone(e_refs[1].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[2].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[2].end_book, 'end_book is not None')
self.assertEqual(e_refs[2].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[2].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[2].st_vs, 6, 'wrong verse')
self.assertIsNone(e_refs[2].end_vs, 'end_vs is not None')
def testExpandList(self):
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, sv=4, ev=6),
Ref(ReferenceFormID.ETCBCH,
'Exodus', sc=6, sv=1, ev=7)]
e_refs = expand_refs(refs)
self.assertEqual(len(e_refs), 10, 'incorrect number of expanded refs')
self.assertEqual(e_refs[0].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[0].end_book, 'end_book is not None')
self.assertEqual(e_refs[0].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[0].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[0].st_vs, 4, 'wrong verse')
self.assertIsNone(e_refs[0].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[1].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[1].end_book, 'end_book is not None')
self.assertEqual(e_refs[1].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[1].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[1].st_vs, 5, 'wrong verse')
self.assertIsNone(e_refs[1].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[2].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[2].end_book, 'end_book is not None')
self.assertEqual(e_refs[2].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[2].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[2].st_vs, 6, 'wrong verse')
self.assertIsNone(e_refs[2].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[3].st_book, 'Exodus', 'st_book is not Exodus')
self.assertIsNone(e_refs[3].end_book, 'end_book is not None')
self.assertEqual(e_refs[3].st_ch, 6, 'wrong chapter')
self.assertIsNone(e_refs[3].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[3].st_vs, 1, 'wrong verse')
self.assertIsNone(e_refs[3].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[4].st_book, 'Exodus', 'st_book is not Exodus')
self.assertIsNone(e_refs[4].end_book, 'end_book is not None')
self.assertEqual(e_refs[4].st_ch, 6, 'wrong chapter')
self.assertIsNone(e_refs[4].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[4].st_vs, 2, 'wrong verse')
self.assertIsNone(e_refs[4].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[9].st_book, 'Exodus', 'st_book is not Exodus')
self.assertIsNone(e_refs[9].end_book, 'end_book is not None')
self.assertEqual(e_refs[9].st_ch, 6, 'wrong chapter')
self.assertIsNone(e_refs[9].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[9].st_vs, 7, 'wrong verse')
self.assertIsNone(e_refs[9].end_vs, 'end_vs is not None')
def testExpandChapter(self):
with self.assertRaises(VersificationException) as expected_ex:
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, ec=4, sv=4, ev=6)]
expand_refs(refs)
ex = expected_ex.exception
print(f'ex is {ex}')
self.assertEqual(ex.message,
'reference extends over more than one chapter')
def testExpandEndBook(self):
with self.assertRaises(VersificationException) as expected_ex:
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', 'Exodus', sc=3, sv=4)]
expand_refs(refs)
ex = expected_ex.exception
self.assertEqual(ex.message,
'reference extends over more than one book')
def testRefBadCh(self):
with self.assertRaises(VersificationException) as expected_ex:
Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', 'Exodus', sc=3, ec=2)
ex = expected_ex.exception
self.assertEqual(ex.message,
'ending chapter 2 is before the starting chapter 3')
def testRefBadVs(self):
with self.assertRaises(VersificationException) as expected_ex:
Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', 'Exodus', sv=3, ev=2)
ex = expected_ex.exception
self.assertEqual(ex.message,
'ending verse 2 is before the starting verse 3')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
``` |
{
"source": "47rooks/puzzles",
"score": 3
} |
#### File: core/test/test_etcbc.py
```python
import unittest
from puzzles.core.etcbc import get_words
from puzzles.core.etcbc import Corpus
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGetHebrewWords(self):
l = get_words(('Genesis', 1, 1),('Genesis', 1, 2),('Genesis', 1, 3))
self.assertEqual(len(l), 39, 'incorrect number of words')
self.assertEqual(l,
['בְּ', 'רֵאשִׁ֖ית', 'בָּרָ֣א', 'אֱלֹהִ֑ים', 'אֵ֥ת', 'הַ', 'שָּׁמַ֖יִם'
, 'וְ', 'אֵ֥ת', 'הָ', 'אָֽרֶץ', 'וְ', 'הָ', 'אָ֗רֶץ', 'הָיְתָ֥ה'
, 'תֹ֨הוּ֙', 'וָ', 'בֹ֔הוּ', 'וְ', 'חֹ֖שֶׁךְ', 'עַל', 'פְּנֵ֣י', 'תְהֹ֑ום'
, 'וְ', 'ר֣וּחַ', 'אֱלֹהִ֔ים', 'מְרַחֶ֖פֶת', 'עַל', 'פְּנֵ֥י', 'הַ', 'מָּֽיִם'
, 'וַ', 'יֹּ֥אמֶר', 'אֱלֹהִ֖ים', 'יְהִ֣י', 'אֹ֑ור', 'וַֽ', 'יְהִי', 'אֹֽור'],
'incorrect words retrieved')
def testGetGreekWords(self):
l = get_words(('Matthew', 1, 1),('Matthew', 1, 2),('Matthew', 1, 3),
work=Corpus.GREEK)
self.assertEqual(len(l), 47, 'incorrect number of words')
self.assertEqual(l,
['Βίβλος', 'γενέσεως', 'Ἰησοῦ', 'χριστοῦ', 'υἱοῦ',
'Δαυὶδ', 'υἱοῦ', 'Ἀβραάμ', 'Ἀβραὰμ', 'ἐγέννησεν',
'τὸν', 'Ἰσαάκ', 'δὲ', 'Ἰσαὰκ', 'ἐγέννησεν', 'τὸν',
'Ἰακώβ', 'δὲ', 'Ἰακὼβ', 'ἐγέννησεν', 'τὸν', 'Ἰούδαν',
'καὶ', 'τοὺς', 'ἀδελφοὺς', 'αὐτοῦ', 'δὲ', 'Ἰούδας',
'ἐγέννησεν', 'τὸν', 'Φαρὲς', 'καὶ', 'τὸν', 'Ζάρα',
'ἐκ', 'τῆς', 'Θαμάρ', 'δὲ', 'Φαρὲς', 'ἐγέννησεν',
'τὸν', 'Ἑσρώμ', 'δὲ', 'Ἑσρὼμ', 'ἐγέννησεν', 'τὸν',
'Ἀράμ'],
'incorrect words retrieved')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
``` |
{
"source": "47shubh/blog",
"score": 4
} |
#### File: DataSciencePractice/DataScience/numpy.py
```python
import numpy as np
#load the library and check its version
import numpy as np
print(np.__version__)
## The data manipulation capabilities of pandas are built on top of the numpy library. In a way, numpy is a dependency of the pandas library.
import pandas as pd
#1) Creating Arrays in numpy
get_zero = np.zeros(10, dtype='int')
print(get_zero)
#creating a 2 row x 4 column matrix
data = np.ones((2,4),dtype=float)
print(data)
#creating a matrix with a predefined value
data = np.full((2,4),4.2)
print(data)
##create an array with a set sequence
data = np.arange(0,20,2)
print(data)
#create an array of even space between the given range of values
data = np.linspace(0,10,4)
print(data)
#create a 3x3 array with mean 0 and standard deviation 1 in a given dimension
data = np.random.normal(0,1,(3,3))
print(data)
#create an identity matrix
data = np.eye(4)
print(data)
#set a random seed
#With the seed reset (every time), the same set of numbers will appear every time
np.random.seed(0)
# 1 d
x1 =np.random.randint(10,size=6)
# 2 D
x2 =np.random.randint(10,size=(3,5))
# 3 D
print("3d")
x3 = np.random.randint(10,size=(3,4,5))
print(x3)
print(x3.size)
print(x3.shape)
print(x3.ndim)
## 2 Array Indexing
x1 = np.array([4, 3, 4, 4, 8, 4])
print(x1[2])
x1[0]
#get the last value
x1[-1]
#in a multidimensional array, we need to specify row and column index
x2= np.array([[3, 7, 5, 5],
[0, 1, 5, 9],
[3, 0, 5, 0]])
#1st row and 2nd column value
print(x2[2,2])
#replace value at 0,0 index
x2[0,0] = 12
x2
## 3) Array Slicing
x = np.arange(10)
#from start to 4th position
print(x[:5])
#from 4th position to end
x[4:]
#from 4th to 6th position
x[4:7]
#return elements at even place
print(x[ : : 2])
#return elements from first position step by two
x[1::2]
#reverse the array
x[::-1]
## 4 Array Concatenation
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
z= np.concatenate([x, y])
print(z)
# for combine a 2D array with 1D use np.vstack or np.hstack
x = np.array([3,4,5])
grid = np.array([[1,2,3],[17,18,19]])
np.vstack([x,grid])
z = np.array([[9],[9]])
np.hstack([grid,z])
### arrange , reshape it
reshap= np.arange(16).reshape((4,4))
print(reshap)
## function
def greater(a,b):
print("inside function")
print(a)
print(b)
if a>b :
return a
else:
return b
t = greater(np.array(3),np.array(4))
print("d")
print(t)
```
#### File: DataSciencePractice/MachineLearning/Linear_regression_with_gradient_descent.py
```python
# The code in this notebook will not use any libraries except numpy for mathematical calculations and matplotlib for graph plotting and pandas for handling data. This will give a better understanding of how machine learning algorithms works under the hood.
# ## Dataset
# The dataset consists of two columns
# * Number of study hours
# * Test Scores
# In[3]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt, rcParams as rc
rc["figure.figsize"] = 10,6
# ## Error function
# Error function is used to calculate the cost or the error on each iteration. Here we use Mean Squared Error algorithm for calculating the error.
# $RMSE = \frac{1}{2n}\Sigma_{i=1}^{n}{({h_i -Y_i})^2}$
#
# In[4]:
def error_function(features,labels,m,c):
m_derived = np.sum((m*features + c - labels)*features)/len(labels)
c_derived = np.sum((m*features + c)-labels)/len(labels)
error = np.sum(np.square(m*features+c - labels))/(2*len(labels))
return [m_derived,c_derived,error]
# ## Gradient Descent
# Gradient descent is a very important parameter learning algorithm which is used to decide the value of parameters for which the cost function is minimum. The below function will perform a single step of gradient descent.
# 
#
#
# In[5]:
def step_gradient_descent(features,labels,learning_rate,c,m):
[derived_m,derived_c,error] = error_function(features.values.reshape(100,1),labels.values.reshape(100,1),m,c)
m = m - learning_rate*derived_m
c = c - learning_rate*derived_c
return [c,m,error]
# This function repeats the gradient descent step multiple times to minimize the cost function J(Q).
# In[6]:
def gradient_descent_runner(features,labels,learning_rate,initial_c,initial_m,num_iterations):
m= initial_m
c=initial_c
error_list=[]
for a in range(num_iterations):
[c,m,error] = step_gradient_descent(features,labels,learning_rate,c,m)
error_list.append(error)
return [c,m,error,error_list]
# ## Start
# Here we decide the following parameters for our linear regression Model.
# * Learning rate
# * initial value for out parameters
# * number of iterations for the gradient descent
#
# In[7]:
def run():
points = pd.read_csv('data.csv',delimiter=',',header = None,names=['study hours','test scores']).sort_values(by=['study hours'])
learning_rate=0.000001
initial_c = 4
initial_m = 5
num_iterations = 1000
[c,m,error,error_list]=gradient_descent_runner(points['study hours'],points['test scores'],learning_rate,initial_c,initial_m,num_iterations)
plt.title("study hours v/s test scores")
plt.scatter(points['study hours'],points['test scores'])
plt.plot(points['study hours'],m*points['study hours']+c)
plt.xlabel('study hours')
plt.ylabel('test scores')
plt.show()
print("cost function is {}".format(error))
plt.title("Cost Function Convergence graph")
plt.plot(list(range(num_iterations)),error_list)
plt.xlabel("No. of iterations")
plt.ylabel("Cost Function")
run()
# # Graphs
# ## Study hours and test Scores
# The graph shows how the test scores of students are affected by the number of study hours. The best fit line shows how well the learning algorithm performed.
#
# ## No. of Iterations V/S Cost Function
# A good way according to me to check whether the algorithm is working fine is by plotting a graph between Cost function. If the cost function is decreasing, it means that the learning algorithm is working fine. You can even check the effect of learning rate on gradient descent by fine tuning it yourself.
# In[ ]:
```
#### File: DataSciencePractice/MachineLearning/Logistic_Regression.py
```python
# # Dataset
# Data were extracted from images that were taken from genuine and forged banknote-like specimens. For digitization, an industrial camera usually used for print inspection was used. The final images have 400x 400 pixels. Due to the object lens and distance to the investigated object gray-scale pictures with a resolution of about 660 dpi were gained. Wavelet Transform tool were used to extract features from images.
#
# Attribute Information:
#
# 1. variance of Wavelet Transformed image (continuous)
# 2. skewness of Wavelet Transformed image (continuous)
# 3. curtosis of Wavelet Transformed image (continuous)
# 4. entropy of image (continuous)
# 5. class (integer)
#
# The dataset is preovided By [UCL Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/banknote+authentication)
# # Libraries
# We are goping to use the following libraries
# 1. Pandas for data Preparation
# 2. numpy for mathematical calculations.
# 3. sklearn for shuffling the data during data preparation.
# 4. Matplotlib for data plottting
# In[14]:
from sklearn.utils import shuffle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt, rcParams as rc
# get_ipython().run_line_magic('matplotlib', 'inline')
rc["figure.figsize"] = 10,6
# # Data Preparation
# Data Preparation of data preprocessing is a very important part of machine learning models. In this method we are going to perform the following steps.
# 1. shuffling the dataset to reduce variance and avoid overfitting.
# 2. adding a ones column so that we can have a hypothesis equation like: h(Q) = Q1*x1 + Q2*x2 + Q3*x3.. where x1=1.This is just to make our hypothesis equation complete by adding a bias term.
#
# 3. Generation of row vector of parameters(theta)
# In[15]:
def dataPreparation():
df = shuffle(pd.read_csv('data_banknote_authentication.txt',names=['image variance','skewness','kurtosis','entropy','y']))
X = df[['image variance','skewness', 'kurtosis', 'entropy']].values
Y = df[['y']].values
one_column = np.ones((1372,1),dtype = 'float64' )
X= np.append(one_column,X, axis=1).T
theta = np.random.normal(size=5).reshape(1,5)
return [X, Y, theta]
# ## Hypothesis
# The hypothesis function in logistic regression is little bit different from linear regression. In this the predicted value need to be between 0 and 1 and to achieve that we need to pass our hypothesis equation to a sigmoid/ logistic function.
#
# 
# In[16]:
def hypothesis(theta,X):
return np.exp((np.dot(theta,X)).T)/(1+np.exp((np.dot(theta,X))).T)
# # Cost function
# Cost function can be defined as the difference between predicted value and the actual value. It tells how far are you from predicting the actual value.
# For logistic regression we use log function to calculate the cost function.
# 
# In[17]:
def cost_function(theta, X, Y):
predictions = hypothesis(theta,X)
cost = Y*(np.log(predictions.T)).T + (1-Y)*(np.log(1-predictions.T)).T
average_cost = -np.sum(cost)/len(Y)
return [average_cost,predictions]
# # Gradient Descent
# Gradient Descent is the optimization algorithm for learning parameters. It works best with convex function because of single global minima.
# Below method represent a sing step of gradient descent which needs to be repeated multiple times to get a set of prameters for which cost function is minimum.
# 
# In[18]:
def single_step_descent(X, Y, theta, learning_rate):
gradient = learning_rate*(np.dot(X,hypothesis(theta,X)-Y)/len(Y)).T
theta = theta - gradient
return theta
# In[22]:
def gradient_descent(X,Y, theta, iterations,learning_rate):
cost_history=[]
for a in range(iterations):
theta = single_step_descent(X,Y,theta,learning_rate)
[cost,predictions]= cost_function(theta,X,Y)
cost_history.append(cost)
return [cost_history,theta,predictions]
# # Getting Started
# In the below method we can fine tune our hyperparameters like learning_rate and number of iterations you need to perform.
# This method also saves a result.csv files which you can use to compare the difference between your predicted and actual values
# In[23]:
def start():
learning_rate = 0.001
iterations = 1500
[X, Y, theta] = dataPreparation()
[cost_history, theta, predictions] = gradient_descent(X,Y, theta,iterations, learning_rate)
predictions[predictions>=0.5] = 1
predictions[predictions<0.5] =0
data = {'Predicted Value':predictions.reshape(1372,).astype('int32'),
'Actual Value': Y.reshape(1372,).astype('int32'),
'Difference': abs(predictions.reshape(1372,).astype('int32') - Y.reshape(1372,).astype('int32'))}
result = pd.DataFrame(data,columns=['Predicted Value','Actual Value', 'Difference'],index=range(len(Y)))
result.to_csv('result.csv',sep=',')
plt.plot(range(iterations),cost_history)
plt.xlabel("# of iterations")
plt.ylabel("J(Q)")
plt.title("No. of iterations V/S Cost Function")
# ## Number of iterations V/S Cost Function
# According to me, the best way to to predict whether your learning algorithm is working fine or not is by plotting a graph between number of iteration and Cost function. The graph should be decreasing because good algorithm should always reduce the cost function with each iteration.
# In[24]:
start()
```
#### File: DataSciencePractice/MachineLearning/RegressionBasic.py
```python
import pandas as pd
from pandas import Series as s , DataFrame as df
import numpy as np
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import seaborn as sb
from matplotlib import pyplot as plt, rcParams as rc
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
# get_ipython().run_line_magic('matplotlib', 'inline')
rc["figure.figsize"] = 10,6
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.pipeline import Pipeline
# # Step : 1) Load Data
# In[49]:
#1) Load Data
data = pd.read_csv("hp_data.csv")
# # Step : 2) Read and Analysis Data
# In[7]:
def getDataAllDetails(data):
print("Return a tuple representing the dimensionality of the DF. : ",data.shape)
print("Count of Each Data : ",data.count())
print("Start,End,Column,Step : ",data.axes)
print("Column : ",data.columns)
print("First 5 items : ",data.head(2))
print("Missing Vlaue : ",data.isnull().sum())
print("information : ",data.info())
print("Description : ",data.describe())
print("Size(Row*Column) represent the no of elements in this object: ",data.size)
print("Return unbiased skew over requested axis : ",data.skew())
print("std err of mean : ",data.sem())
print("Return sample standard deviation over requested axis : ",data.std())
print("sum of every data : ",data.sum())
print("Copy Data to other : ",data.copy())
print("Correlation of Data : ",data.corr())
print("Covariance of Columns : ",data.cov())
print("cumulative sum over data : ",data.cumsum())
print("cumulative min or max : ",data.cummin())
print("Remove duplicate row : ",data.drop_duplicates())
print("Romove missing Value : ",data.dropna())
print("Drop Specify Label : ",data.drop(labels=[2,49,78,88]))
print("Drop Specify Label(Plz check inplace true means) : ",data.drop(labels=[2,49,78,88],inplace=True))
print("Tell about Data Types : ",data.dtypes)
print("Find the Duplicate Row : ",data.duplicated())
print("DataFrame is Empty(True) or not(False) : ",data.empty)
print("Expanding : ",data.expanding())
print("Fill Na/Nan Value using Specify metho : ",data.fillna)
#print("rows/columns of DF A/c to labels in specified index : ",data.filter)
#print(" check it(fill backward) : ",data.bfill())
#print(" check it(fill forward) : ",data.ffill())
#print(" check it ? : ",data.from_csv)
#print(" check it ? : ",data.from_dict,data.from_items,data.from_records)
print("Return(indication of sparse/dense and dtype) in Df : ",data.from_records)
print("Tell about Data Types(check abobe ftypes) : ",data.dtypes)
print("Return counts of unique dtypes in this object : ",data.get_dtype_counts())
print("Return counts of unique dtypes in this object(dense) : ",data.get_ftype_counts())
print("Return ndarray after convert sparse values to dense : ",data.get_values())
#print(" check it : ",data.groupby)
print("RangeIndex(start=0,stop=data size, step=1) : ",data.index)
print("concise summary of DF,Dtypes,Memory,Shape,Many Info : ",data.info)
#print(" check it : ",data.insert)
#print(" check it : ",data.interpolate)
#print(" check it : ",data.is_copy)
print("Detect the Missing Value(Both Same isna & isnull ) : ",data.isna().sum(),data.isnull().sum())
#print(" check it : ",data.join)
print("Get the 'info axis(same as columns : data.columns) : ",data.keys,data.columns)
print("unbiased kurt over requested axis using Fisher's def : ",data.kurt,data.kurtosis)
print("both are same data.kurt & data.kurtosis : ",data.kurt,data.kurtosis)
print("Return index for last non-NA/null value : ",data.last_valid_index())
print("mean absolute deviation value for the requested axis : ",data.mad())
print("Returns the maximum of the values in the object : ",data.max())
print("Returns the minimum of the values in the object : ",data.min())
print("Return the mean of the values for the requested axis : ",data.mean())
print("Return the median of the values for the request axis : ",data.median())
#print(" check it : ",data.melt())
print("Return the memory usage of each column in bytes. : ",data.memory_usage())
#print(" check it : ",data.merge)
#print(" check it (mod, mul,multiply) : ",data,mod,data.mul,data.multiply)
print("Return an int representing the no of axes/array dims : ",data.ndim)
print("row's DF sorted by the n smallest values of `columns : ",data.nsmallest(n=10,columns="price"))
print("row's DF sorted by the n largest values of `columns : ",data.nlargest(n=10,columns="price"))
print("Find existing(non-missing) values(Same:notna,notnull) : ",data.notna(),data.notnull())
print("Series with no of distinct observations over requested axis : ",data.nunique(axis=0))
#print(" check it : ",data.pct_change)
#print(" check it(pivot,pivot_table) : ",data.pivot,data.pivot_table)
print("Return item & drop/delete from frame.Raise Error,if not found: ",data.pop("price"))
#print(" check it : ",data.pow)
print("product/prod same of the value for the request(default axis=0): ",data.prod(axis=0),data.product(axis=0))
print("values quantile over requested axis, a la numpy.percentile. : ",data.quantile())
#print(" check it : ",data.query)
#print(" check it : ",data.radd)
print("Compute numerical data rank(1 through n)along axis.Equal values: ",data.rank(numeric_only=True,axis=0))
print("Conform DF to new index with optional filling logic,placing : ",data.reindex().sum())
#print("check it : ",data.rename,data.rename_axis,data.reorder_levels,data.replace,data.resample,data.resample)
#print(" check it : ",data.reset_index(),data.rmod,data.rmul,data.rolling,data.rpow,data.rsub,data.rtruediv)
print("Round a DataFrame to a variable number of decimal places. : ",data.round())
print("Return a random sample of items from an axis of object. : ",data.sample())
print("check it : ",data.select,data.set_index,data.set_value)
print("Return unbiased standard error of the mean over requested axis.: ",data.sem())
print("Shift index by desired no of periods with an optional time freq: ",data.shift(axis=0,periods=3))
print("Equivalent to `shift` without copying data : ",data.slice_shift(axis=0,periods=5))
print("Sort object by labels (along an axis=0, default) : ",data.sort_index(axis=1,ascending=False))
print("Sort by the values along either axis : ",data.sort_values(by=["price","yearsOld"],axis=0,ascending=False))
print("Sort multilevel index by chosen axis level(sort based on words): ",data.sortlevel(level=0, axis=1, ascending=True))
#print("Check it : ",data.stack(),data.sub,data.subtract())
print("Display All Items(same as head(total no of items)) : ",data.style)
print("Interchange axes & swap values axes(swap rows to columns) : ",data.swapaxes(axis1=0,axis2=1,copy=False))
print("Interchange axes & swap values axes appropriately : ",data.swaplevel(i=0,j=0,axis=0))
#print(" check it : ",data.unstack,data.update())
print("Return unbiased variance over requested axis. : ",data.var(axis=0))
#print(" check it : ",data.where,data.xs())
# # Rename Column Name
# In[8]:
# dic = { "KEY" : "VALUE"}
def renameColumn(dic, data ) :
# DICTIONARY : KEY AND VALUE , KEY IS OLD COLUMN NAME , VALUE IS NEW COLUMN NAME
data = data.rename(columns=dic)
return data
# # Step : 3) Visualization the Data
# There are 2 types of library to visualization.
# ##1) Matplot 2) Seaborn(use attractive and looking good and more features)
# #Draw the Graph (Histogram, Straight Line, Pie, Bar, Scatter, ETC)
# In[9]:
def graphDetail():
print("Prints the values to a stream, or to sys.stdout by default boxplot : ",data.boxplot())
print("Prints the values to a stream, or to sys.stdout by default hist : ",data.hist())
print("Prints the values to a stream, or to sys.stdout by default plot : ",data.plot())
#print("Prints the values to a stream, or to sys.stdout by default : ",data.boxplot())
#print("Prints the values to a stream, or to sys.stdout by default : ",data.boxplot())
# # Step : 4) Data Preprocessing
# used to convert the raw data into a clean data set
# # A) Data Cleaning
# In[10]:
#getDataAllDetails(data=data)
# In[11]:
#data.head(3) # before label encoding
# # B) Label Encoding : Convert Categorical Value to Integer Value
# In[12]:
#1st Method
# char_cols = data.dtypes.pipe(lambda x: x[x == 'object']).index
# for c in char_cols:
# data[c] = pd.factorize(data[c])[0]
#OR 2nd method
# enc = LabelEncoder()
# data["place"] = enc.fit_transform(data["place"])
# data["sale"] = enc.fit_transform(data["sale"])
# data["built"] = enc.fit_transform(data["built"])
#Do not try to change integer value
#data["price"] = enc.fit_transform(data["price"])
# # Label Encoding and Inverse: Convert Categorical Value to Integer Value and Vice-versa
# In[13]:
from sklearn.preprocessing import LabelEncoder
def labelEncodingCategoryToIntergerViceVersa(dataset):
'''
@author Rakesh
@see The function label encodes the object type columns and gives label encoded and inverse tranform of the label encoded data
@param dataset dataframe on whoes column the label encoding has to be done
@return label encoded and inverse tranform of the label encoded data.
'''
data_original = dataset[:]
data_tranformed = dataset[:]
for y in dataset.columns:
#check the dtype of the column object type contains strings or chars
if (dataset[y].dtype == object):
print("The string type features are : " + y)
le = LabelEncoder()
le.fit(dataset[y].unique())
#label encoded data
data_tranformed[y] = le.transform(dataset[y])
#inverse label transform data
data_original[y] = le.inverse_transform(data_tranformed[y])
return data_tranformed,data_original
# # Rename Column name
# In[35]:
# dic = { "place" : "Places", "built" : "builts"}
dic = { "Unnamed: 0" : "ID"}
data = renameColumn(dic,data)
# In[36]:
data_tranformed,data_original = labelEncodingCategoryToIntergerViceVersa(data)
# In[37]:
data_tranformed.head(3)# after label encoding
# In[38]:
data_original.head(3)# original data
# # 5) Feature Engineering
# In[1]:
# feature engineering
x = data_tranformed.loc[:,["ID","place","built","sqft","sale","yearsOld","floor","totalFloor","bhk"]]
y = data_tranformed.price
# # 6) Split Dataset (x_train, x_test , y_train , y_test)
# In[57]:
x_train, x_test , y_train , y_test = train_test_split(x ,y , test_size = 0.25 , random_state = 10)
# In[58]:
names = ["Linear Regression", "Decision Tree Regression", "Random Forest Regression"]
algorithms = [ LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
columns_name = ["Model_name", "Random_state",'r2_score']
# In[59]:
rows=[]
def addRandomStateForAlgorithm(x,y,names,algorithms,columns_name,random_state_list):
for j in range(len(algorithms)):
model = algorithms[j]
for i in random_state_list:
x_train, x_test , y_train , y_test = train_test_split(x ,y , test_size = 0.25 , random_state = i)
model.fit(x_train,y_train)
pred_test = model.predict(x_test)
row = [names[j],i,r2_score (y_test,pred_test)]
rows.append(row)
models_df = pd.DataFrame(rows)
models_df.columns = columns_name
print(models_df)
# In[60]:
random_state_list_up_to_10 = [0,1,2,3,4,5,6,7,8,9,10]
random_state_list_10_up_to_20 = [10,11,12,13,14,15,16,17,18,19,20]
# In[61]:
addRandomStateForAlgorithm(x, y,names,algorithms,columns_name,random_state_list_up_to_10)
# In[43]:
#data[2:]
#data[:3]
#data.iloc[:,2:]
``` |
{
"source": "47-studio-org/watchman",
"score": 3
} |
#### File: fbcode_builder/getdeps/subcmd.py
```python
class SubCmd(object):
NAME = None
HELP = None
def run(self, args):
"""perform the command"""
return 0
def setup_parser(self, parser):
# Subclasses should override setup_parser() if they have any
# command line options or arguments.
pass
CmdTable = []
def add_subcommands(parser, common_args, cmd_table=CmdTable):
"""Register parsers for the defined commands with the provided parser"""
for cls in cmd_table:
command = cls()
command_parser = parser.add_parser(
command.NAME, help=command.HELP, parents=[common_args]
)
command.setup_parser(command_parser)
command_parser.set_defaults(func=command.run)
def cmd(name, help=None, cmd_table=CmdTable):
"""
@cmd() is a decorator that can be used to help define Subcmd instances
Example usage:
@subcmd('list', 'Show the result list')
class ListCmd(Subcmd):
def run(self, args):
# Perform the command actions here...
pass
"""
def wrapper(cls):
class SubclassedCmd(cls):
NAME = name
HELP = help
cmd_table.append(SubclassedCmd)
return SubclassedCmd
return wrapper
```
#### File: getdeps/test/scratch_test.py
```python
import unittest
from ..buildopts import find_existing_win32_subst_for_path
class Win32SubstTest(unittest.TestCase):
def test_no_existing_subst(self):
self.assertIsNone(
find_existing_win32_subst_for_path(
r"C:\users\alice\appdata\local\temp\fbcode_builder_getdeps",
subst_mapping={},
)
)
self.assertIsNone(
find_existing_win32_subst_for_path(
r"C:\users\alice\appdata\local\temp\fbcode_builder_getdeps",
subst_mapping={"X:\\": r"C:\users\alice\appdata\local\temp\other"},
)
)
def test_exact_match_returns_drive_path(self):
self.assertEqual(
find_existing_win32_subst_for_path(
r"C:\temp\fbcode_builder_getdeps",
subst_mapping={"X:\\": r"C:\temp\fbcode_builder_getdeps"},
),
"X:\\",
)
self.assertEqual(
find_existing_win32_subst_for_path(
r"C:/temp/fbcode_builder_getdeps",
subst_mapping={"X:\\": r"C:/temp/fbcode_builder_getdeps"},
),
"X:\\",
)
def test_multiple_exact_matches_returns_arbitrary_drive_path(self):
self.assertIn(
find_existing_win32_subst_for_path(
r"C:\temp\fbcode_builder_getdeps",
subst_mapping={
"X:\\": r"C:\temp\fbcode_builder_getdeps",
"Y:\\": r"C:\temp\fbcode_builder_getdeps",
"Z:\\": r"C:\temp\fbcode_builder_getdeps",
},
),
("X:\\", "Y:\\", "Z:\\"),
)
def test_drive_letter_is_case_insensitive(self):
self.assertEqual(
find_existing_win32_subst_for_path(
r"C:\temp\fbcode_builder_getdeps",
subst_mapping={"X:\\": r"c:\temp\fbcode_builder_getdeps"},
),
"X:\\",
)
def test_path_components_are_case_insensitive(self):
self.assertEqual(
find_existing_win32_subst_for_path(
r"C:\TEMP\FBCODE_builder_getdeps",
subst_mapping={"X:\\": r"C:\temp\fbcode_builder_getdeps"},
),
"X:\\",
)
self.assertEqual(
find_existing_win32_subst_for_path(
r"C:\temp\fbcode_builder_getdeps",
subst_mapping={"X:\\": r"C:\TEMP\FBCODE_builder_getdeps"},
),
"X:\\",
)
```
#### File: watchman/integration/test_log.py
```python
from __future__ import absolute_import, division, print_function
import pywatchman
import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestLog(WatchmanTestCase.WatchmanTestCase):
def test_invalidNumArgsLogLevel(self):
for params in [["log-level"], ["log-level", "debug", "extra"]]:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand(*params)
self.assertIn("wrong number of arguments", str(ctx.exception))
def test_invalidLevelLogLevel(self):
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("log-level", "invalid")
self.assertIn("invalid log level", str(ctx.exception))
def test_invalidNumArgsLog(self):
for params in [["log"], ["log", "debug"], ["log", "debug", "test", "extra"]]:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand(*params)
self.assertIn("wrong number of arguments", str(ctx.exception))
def test_invalidLevelLog(self):
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("log", "invalid", "test")
self.assertIn("invalid log level", str(ctx.exception))
``` |
{
"source": "482798/combination-lock",
"score": 3
} |
#### File: 482798/combination-lock/main.py
```python
def on_button_pressed_a():
global User_Input
User_Input = "" + User_Input + "A"
input.on_button_pressed(Button.A, on_button_pressed_a)
def on_button_pressed_ab():
global User_Input, _1
if Password == User_Input:
if _1 > 2:
pins.servo_write_pin(AnalogPin.P0, 0)
basic.show_icon(IconNames.NO)
basic.pause(300)
User_Input = ""
basic.clear_screen()
else:
basic.show_icon(IconNames.YES)
pins.servo_write_pin(AnalogPin.P0, 180)
basic.pause(5000)
pins.servo_write_pin(AnalogPin.P0, 0)
User_Input = ""
basic.clear_screen()
else:
pins.servo_write_pin(AnalogPin.P0, 0)
basic.show_icon(IconNames.NO)
_1 += 1
basic.pause(300)
User_Input = ""
basic.clear_screen()
input.on_button_pressed(Button.AB, on_button_pressed_ab)
def on_button_pressed_b():
global User_Input
User_Input = "" + User_Input + "B"
input.on_button_pressed(Button.B, on_button_pressed_b)
_1 = 0
User_Input = ""
Password = ""
Password = "<PASSWORD>"
User_Input = ""
_1 = 0
def on_forever():
global _1
_1 = 0
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.pause(5000)
basic.forever(on_forever)
``` |
{
"source": "48-41-50/queue-sample",
"score": 3
} |
#### File: queue-sample/http/queue_publisher.py
```python
from http import HTTPStatus
import json
import logging
import os
import requests
import sys
import time
PUBLISHER_URL = os.environ.get('PUBLISHER_URL', 'http://queues-server:8888')
class ContentTypeError(Exception):
pass
class UnsubscribedError(Exception):
pass
class QPublisher:
def __init__(self, log_to_file=True):
self.id = time.time()
logging_params = {'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
'level': logging.INFO}
if log_to_file:
logging_params['filename'] = f'publisher_{self.id}.log'
logging_params['filemode'] = 'w'
else:
logging_params['stream'] = sys.stderr
logging.basicConfig(**logging_params)
self._log = logging.getLogger(__file__)
def _handle_request(self, url: str, data: dict, request_type: str='post'):
rmethod = getattr(requests, request_type)
if request_type == 'post':
rargs = {'json': data}
else:
rargs = {'params': data}
res = rmethod(url, **rargs)
res.raise_for_status()
if res.status_code == HTTPStatus.OK:
if 'Content-type' in res.headers:
if res.headers['Content-type'] == 'application/json':
return res.json()
else:
raise ContentTypeError(f'Expected "application/json" but got {res.headers["Content-type"]}')
def create_topic(self, topic: str, description: str):
data = self._handle_request(f'{PUBLISHER_URL}/topic', {'topic': topic, 'description': description})
self._log.info(f"Topic {data['topic']} created")
def delete_topic(self, topic: str):
data = self._handle_request(f'{PUBLISHER_URL}/topic_delete', {'topic': topic})
def reset_topic(self, topic: str, offset: int):
data = self._handle_request(f'{PUBLISHER_URL}/topic_reset', {'topic': topic, 'offset': offset})
def list_topics(self, topic: str=''):
if topic:
topic_param = {'topic': topic}
else:
topic_param = {}
data = self._handle_request(f'{PUBLISHER_URL}/topics', topic_param, 'get')
self._log.info(data)
def publish_message(self, topic: str, message: str):
data = self._handle_request(f'{PUBLISHER_URL}/publish', {'topic': topic, 'message': message})
def list_messages(self, topic: str=''):
if topic:
topic_param = {'topic': topic}
else:
topic_param = {}
data = self._handle_request(f'{PUBLISHER_URL}/topic_messages', topic_param, 'get')
self._log.info(data)
def list_subscribers(self, topic: str=''):
if topic:
topic_param = {'topic': topic}
else:
topic_param = {}
data = self._handle_request(f'{PUBLISHER_URL}/topic_subscribers', topic_param, 'get')
self._log.info(data)
if __name__ == '__main__':
qp = QPublisher()
qp.create_topic('search-web')
``` |
{
"source": "485294-muni/streamlit-cnn-vis",
"score": 3
} |
#### File: 485294-muni/streamlit-cnn-vis/app.py
```python
import streamlit as st
import pandas as pd
import numpy as np
import torch
from PIL import Image, ImageChops
import os
from torch.nn.functional import cross_entropy
from streamlit_image_comparison import image_comparison
st.set_page_config(layout="wide")
@st.cache(allow_output_mutation=True)
def load_model():
efficientnet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_efficientnet_b0', pretrained=True)
return efficientnet.eval()
@st.cache(allow_output_mutation=True)
def load_classnames():
with open("classes.txt") as file:
return eval(file.read())
@st.cache(allow_output_mutation=True)
def load_images():
files = os.listdir("./images")
img_suffixes = ("jpg", "jpeg", "png")
img_files = (f for f in files if f.endswith(img_suffixes))
return [Image.open("./images/"+file) for file in img_files]
@st.cache(allow_output_mutation=True)
def load_styles():
with open("style.css") as f:
return '<style>{}</style>'.format(f.read())
st.markdown(load_styles(), unsafe_allow_html=True)
def img2tensor(img: Image) -> torch.Tensor:
arr = np.array(img).transpose(2, 0, 1)[np.newaxis, ...]
return torch.tensor(arr).float() / 255
def tensor2img(tensor: torch.Tensor) -> Image:
tensor = tensor.squeeze(0) * 255
arr = np.uint8(tensor.numpy()).transpose(1, 2, 0)
return Image.fromarray(arr)
classnames = load_classnames()
images = load_images()
model = load_model()
if "selected_img" not in st.session_state:
st.session_state["selected_img"] = images[0]
uploaded_file = st.sidebar.file_uploader("", type=['png', 'jpg', "jpeg"])
if uploaded_file is not None:
uploaded_img = Image.open(uploaded_file)
clicked = st.sidebar.button("analyze uploaded", key=100)
if clicked:
st.session_state.selected_img = uploaded_img
st.sidebar.markdown("<hr />", unsafe_allow_html=True)
st.sidebar.markdown("or select from a few examples")
for i, img in enumerate(images):
st.sidebar.markdown("<hr />", unsafe_allow_html=True)
st.sidebar.image(img)
clicked = st.sidebar.button("analyze", key=i)
if clicked:
st.session_state.selected_img = img
st.sidebar.markdown("<hr />", unsafe_allow_html=True)
st.sidebar.markdown("Photos source: "
"<a href='https://unsplash.com/photos/pk_1RdcAfbE'>street sign</a>, "
"<a href='https://unsplash.com/photos/X63FTIZFbZo'>clock on nightstand</a>, "
"<a href='https://unsplash.com/photos/fAz5Cf1ajPM'>wine</a>, "
"<a href='https://unsplash.com/photos/eWqOgJ-lfiI'>red cabin</a>, ",
unsafe_allow_html=True)
top_k = 3
st.slider(min_value=0,
max_value=40,
label="sensitivity:",
value=20,
step=4,
key="slider")
@st.cache(allow_output_mutation=True)
def process(img):
img_small = img.resize((300, 300), resample=Image.BILINEAR)
input_tensor = img2tensor(img_small).repeat(top_k, 1, 1, 1)
input_tensor.requires_grad = True
prediction = model(input_tensor)
confidences = torch.softmax(prediction.detach()[0], dim=-1)
tops = torch.topk(confidences.flatten(), top_k)
indeces = tops.indices.tolist()
values = tops.values.tolist()
target = torch.tensor(indeces)
cross_entropy(prediction, target).backward()
expl_tensors = [torch.mean(input_tensor.grad[option], axis=0, keepdim=True) for option in range(top_k)]
return indeces, values, expl_tensors
img = st.session_state.selected_img
indeces, values, expl_tensors = process(img)
def label_formatter(i):
index = indeces[i]
confidence = values[i]
return f"{classnames[index]} ({confidence*100:>.0f}%)"
option = st.radio("most likely objects in image:", options=range(top_k), format_func=label_formatter)
st.checkbox("blend explanation with image", key="blend")
expl_tensor = torch.abs(expl_tensors[option] * st.session_state.slider).clamp(0, 1).repeat(3, 1, 1)
expl_img = tensor2img(expl_tensor).resize(img.size)
if st.session_state.blend:
expl_img = ImageChops.multiply(img, expl_img)
image_comparison(img, expl_img, in_memory=True)
``` |
{
"source": "48cfu/CarND-Advanced-Lane-Lines",
"score": 3
} |
#### File: CarND-Advanced-Lane-Lines/source/lane_detection.py
```python
import numpy as np
import cv2
import glob
import matplotlib
import matplotlib.pyplot as plt
from camera import Camera
from line import Line
class LaneDetection():
def __init__(self):
'''
Configuration parameters for each frame: tuned in main_images.py
'''
self.sobel_kernel = 3
self.thresh_color_s_channel = (80, 255) # from Gradients and Color Spaces: HLS Quiz
self.thresh_sobel_x = (20, 100)
self.thresh_dir_gradient = (0.7, 1.3)
self.thresh_magnitude = (30, 100)
self.left_low = (100, 720)
self.left_high = (600, 450)
self.right_low = (1240, 720)
self.right_high = (805, 450)
self.vertices = np.array([[self.left_low, self.left_high, self.right_high, self.right_low]], dtype=np.int32)
#set camera
self.camera = Camera()
self.camera.calibrate()
#lines
self.line_left = Line()
self.line_right = Line()
def region_of_interest(self, img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def measure_curvature_real(self, ploty, left_fit_cr, right_fit_cr):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
# ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def abs_sobel_thresh(self, img, orient='x', thresh_min=20, thresh_max=100):
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
return binary_output
def thresholding_pipeline(self, img_undistorted):
combined_binary, color_binary = self.camera.binary_from_combined_thresholds(
img_undistorted, self.sobel_kernel, self.thresh_color_s_channel, self.thresh_sobel_x,
self.thresh_dir_gradient, self.thresh_magnitude)
return combined_binary
def process_image(self, img_original):
'''
first undistort
'''
img_undistorted = self.camera.undistort(img_original)
'''
Get combined binary image
'''
combined_binary, color_binary = self.camera.binary_from_combined_thresholds(
img_undistorted, self.sobel_kernel, self.thresh_color_s_channel, self.thresh_sobel_x,
self.thresh_dir_gradient, self.thresh_magnitude)
'''
# Unwarp and identify lane pixels, fit with polynomial and calculate curvature
'''
offset1 = 350
offset2 = 520
big_x = 1230
small_x = 250
corners_source = np.float32([[small_x, 720], [small_x + offset1, 450], [big_x - offset2, 450], [big_x, 720]])
corners_destination = np.float32([[small_x, 720], [small_x, 0], [big_x, 0], [big_x, 720]])
print(corners_source)
print(corners_destination)
cp_img = np.copy(img_undistorted)
img_size = (img_undistorted.shape[1], img_undistorted.shape[0])
#warped_img, M = self.camera.corners_unwarp(cp_img, corners_source, corners_destination)
'''
Binary warped image
'''
binary_warped, M = self.camera.corners_unwarp(combined_binary, corners_source, corners_destination)
#check if left and right were detected
if self.line_left.detected == False or self.line_right.detected == False:
left_detected, right_detected, leftx, lefty, rightx, righty, ploty, left_fit, right_fit, left_fitx, right_fitx, out_img = self.camera.fit_polynomial(
binary_warped
)
else:
#TODO: change to search around previous one
#left_detected, right_detected, leftx, lefty, rightx, righty, ploty, left_fit, right_fit, left_fitx, right_fitx, out_img = self.camera.search_around_poly(
# binary_warped, self.line_left.best_fit, self.line_right.best_fit
#)
left_detected, right_detected, leftx, lefty, rightx, righty, ploty, left_fit, right_fit, left_fitx, right_fitx, out_img = self.camera.fit_polynomial(
binary_warped
)
#color_binary = np.dstack((combined_binary, combined_binary, combined_binary)) * 255
#left line
self.line_left.detected = left_detected
self.line_left.allx = leftx #noisy lane pixels x
self.line_left.ally = lefty #noisy lane pixels y
self.line_left.current_fit.append(left_fit) #polinomial coefficients of current fit
self.line_left.best_fit = self.line_left.low_pass_filter()
#right line
self.line_right.detected = right_detected
self.line_right.allx = rightx #noisy lane pixels x
self.line_right.ally = righty #noisy lane pixels y
self.line_right.current_fit.append(right_fit) #polinomial coefficients of current fit
self.line_right.best_fit = self.line_right.low_pass_filter()
try:
left_fitx = self.line_left.best_fit[0]*ploty**2 + self.line_left.best_fit[1]*ploty + self.line_left.best_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
#print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
try:
right_fitx = self.line_right.best_fit[0]*ploty**2 + self.line_right.best_fit[1]*ploty + self.line_right.best_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
#print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
'''
Project back to original pic
'''
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
#print(warp_zero.shape)
text_warp = np.dstack((warp_zero.T, warp_zero.T, warp_zero.T))
position = ((int) (text_warp.shape[1]/2 - 268/2), (int) (text_warp.shape[0]/2 - 36/2))
position = ((int) (text_warp.shape[1]/2 - 268/2), 0)
position = (30, 700)
#print(position)
text_warp = cv2.putText(
text_warp, #numpy array on which text is written
"48cfu", #text
position, #position at which writing has to start
cv2.FONT_HERSHEY_COMPLEX_SMALL, #font family
4, #font scale
(255, 255, 255), #font color
10 #thickness
)
text_warp = text_warp.get()
text_warp = np.rot90(text_warp)
color_warp = cv2.addWeighted(color_warp, 1, text_warp, 0.999, 0)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
Minv = cv2.getPerspectiveTransform(corners_destination, corners_source)
newwarp = cv2.warpPerspective(color_warp, Minv, (img_undistorted.shape[1], img_undistorted.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img_undistorted, 1, newwarp, 0.3, 0)
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit a second order polynomial to pixel positions in each fake lane line
##### TO-DO: Fit new polynomials to x,y in world space #####
##### Utilize `ym_per_pix` & `xm_per_pix` here #####
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
left_curverad, right_curverad = self.measure_curvature_real(ploty, left_fit_cr, right_fit_cr)
# remove outliers
if left_curverad < 10000:
self.line_left.radius_of_curvature.append(left_curverad)
if right_curverad < 10000:
self.line_right.radius_of_curvature.append(right_curverad)
text_radius = str(int(0.5 * self.line_left.get_curvature_LPF() + 0.5 * self.line_right.get_curvature_LPF()))
if int(0.5 * self.line_left.get_curvature_LPF() + 0.5 * self.line_right.get_curvature_LPF()) > 3000:
text_radius = 'Straight line'
result = cv2.putText(
result, #numpy array on which text is written
'Lane curvature [m]: ' + text_radius, #text
(10, 30), #position at which writing has to start
cv2.FONT_HERSHEY_COMPLEX_SMALL, #font family
1, #font scale
(255, 255, 255), #font color
2#thickness
)
'''
POsition vehicle with respect to center
'''
xm_per_pix = 3.7/700 # meters per pixel in x dimension
mid_lane = 0.5 * (self.line_left.best_fit[0]*img_undistorted.shape[0]**2 + self.line_left.best_fit[1]*img_undistorted.shape[0] + self.line_left.best_fit[2]) + 0.5*(self.line_right.best_fit[0]*img_undistorted.shape[0]**2 + self.line_right.best_fit[1]*img_undistorted.shape[0] + self.line_right.best_fit[2])
offset = mid_lane - 0.5 * img_undistorted.shape[1]
relative_position = offset * xm_per_pix
self.line_left.line_base_pos.append(relative_position)
result = cv2.putText(
result, #numpy array on which text is written
'Vehicle relative position [m]: ' + str(round(self.line_left.get_relative_position_LPF(), 2)), #text
(10, 60), #position at which writing has to start
cv2.FONT_HERSHEY_COMPLEX_SMALL, #font family
1, #font scale
(255, 255, 255), #font color
2#thickness
)
return result
#return color_binary
```
#### File: CarND-Advanced-Lane-Lines/source/line.py
```python
import numpy as np
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [] #[np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = []
#distance in meters of vehicle center from the line
self.line_base_pos = []
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
#make sure to append new values to current_fit before calling
def low_pass_filter(self, window_size = 11):
#shape = self.current_fit.shape
snapshot = self.current_fit[-window_size:]
if snapshot[-1:] == [10, 20, 0]:
snapshot = np.delete(snapshot, -1)
best_fit = np.mean(snapshot, axis = 0)
self.best_fit = best_fit
return best_fit
#make sure to append new values to radius_of_curvature before calling
def get_curvature_LPF(self, window_size = 15):
snapshot = self.radius_of_curvature[-window_size:]
curvature = np.mean(snapshot, axis = 0)
return curvature
#make sure to append new values to line_base before calling
def get_relative_position_LPF(self, window_size = 30):
snapshot = self.line_base_pos[-window_size:]
relative_position = np.mean(snapshot, axis = 0)
return relative_position
``` |
{
"source": "48cfu/near-api-py",
"score": 2
} |
#### File: near-api-py/near_api/account.py
```python
import base58
import json
import itertools
from near_api import transactions
# Amount of gas attached by default 1e14.
DEFAULT_ATTACHED_GAS = 100000000000000
class TransactionError(Exception):
pass
class ViewFunctionError(Exception):
pass
class Account(object):
def __init__(self, provider, signer, account_id):
self._provider = provider
self._signer = signer
self._account_id = account_id
self._account = provider.get_account(account_id)
self._access_key = provider.get_access_key(account_id, signer._key_pair.encoded_public_key())
print(account_id, self._account, self._access_key)
def _sign_and_submit_tx(self, receiver_id, actions):
self._access_key["nonce"] += 1
block_hash = self._provider.get_status()['sync_info']['latest_block_hash']
block_hash = base58.b58decode(block_hash.encode('utf8'))
serialzed_tx = transactions.sign_and_serialize_transaction(
receiver_id, self._access_key["nonce"], actions, block_hash, self._signer)
result = self._provider.send_tx_and_wait(serialzed_tx, 10)
for outcome in itertools.chain([result['transaction_outcome']], result['receipts_outcome']):
for log in outcome['outcome']['logs']:
print("Log:", log)
if 'Failure' in result['status']:
raise TransactionError(result['status']['Failure'])
return result
@property
def account_id(self):
return self._account_id
@property
def signer(self):
return self._signer
@property
def provider(self):
return self._provider
@property
def access_key(self):
return self._access_key
@property
def state(self):
return self._account
def fetch_state(self):
"""Fetch state for given account."""
self._account = self.provider.get_account(self.account_id)
def send_money(self, account_id, amount):
"""Sends funds to given account_id given amount."""
return self._sign_and_submit_tx(account_id, [transactions.create_transfer_action(amount)])
def function_call(self, contract_id, method_name, args, gas=DEFAULT_ATTACHED_GAS, amount=0):
args = json.dumps(args).encode('utf8')
return self._sign_and_submit_tx(contract_id, [transactions.create_function_call_action(method_name, args, gas, amount)])
def create_account(self, account_id, public_key, initial_balance):
actions = [
transactions.create_create_account_action(),
transactions.create_full_access_key_action(public_key),
transactions.create_transfer_action(initial_balance)]
return self._sign_and_submit_tx(account_id, actions)
def deploy_contract(self, contract_code):
return self._sign_and_submit_tx(self._account_id, [transactions.create_deploy_contract_action(contract_code)])
def stake(self, public_key, amount):
return self._sign_and_submit_tx(self._account_id, [transactions.create_stake_action(public_key, amount)])
def create_and_deploy_contract(self, contract_id, public_key, contract_code, initial_balance):
actions = [
transactions.create_create_account_action(),
transactions.create_transfer_action(initial_balance),
transactions.create_deploy_contract_action(contract_code)] + \
([transactions.create_full_access_key_action(public_key)] if public_key is not None else [])
return self._sign_and_submit_tx(contract_id, actions)
def create_deploy_and_init_contract(self, contract_id, public_key, contract_code, initial_balance, args,
gas=DEFAULT_ATTACHED_GAS, init_method_name="new"):
args = json.dumps(args).encode('utf8')
actions = [
transactions.create_create_account_action(),
transactions.create_transfer_action(initial_balance),
transactions.create_deploy_contract_action(contract_code),
transactions.create_function_call_action(init_method_name, args, gas, 0)] + \
([transactions.create_full_access_key_action(public_key)] if public_key is not None else [])
return self._sign_and_submit_tx(contract_id, actions)
def view_function(self, contract_id, method_name, args):
result = self._provider.view_call(contract_id, method_name, json.dumps(args).encode('utf8'))
if "error" in result:
raise ViewFunctionError(result["error"])
result["result"] = json.loads(''.join([chr(x) for x in result["result"]]))
return result
``` |
{
"source": "48ix/routingpolicy",
"score": 2
} |
#### File: routingpolicy/routingpolicy/generate.py
```python
import asyncio
from pathlib import Path
from datetime import datetime
# Project
from routingpolicy.irr import render_prefixes
from routingpolicy.log import log
from routingpolicy.config import params
from routingpolicy.peeringdb import max_prefixes
from routingpolicy.rendering import POLICIES_DIR, template_env
from routingpolicy.models.participant import Participant
def verify_complete(file: Path) -> bool:
"""Verify a template exists and isn't empty."""
complete = False
if file.exists() and file.stat().st_size != 0:
complete = True
if not complete:
log.error("{} does not exist or is empty.", str(file))
return complete
def create_file_structure() -> bool:
"""Gracefully create output policy file structure."""
if not POLICIES_DIR.exists():
log.debug("Creating {}", str(POLICIES_DIR))
POLICIES_DIR.mkdir()
for rs in params.route_servers:
rs_dir = POLICIES_DIR / rs.name
if not rs_dir.exists():
log.debug("Creating {}", str(rs_dir))
rs_dir.mkdir()
for participant in params.participants:
participant_dir = rs_dir / str(participant.asn)
if not participant_dir.exists():
log.debug("Creating {}", str(participant_dir))
participant_dir.mkdir()
return True
async def communities(
participant: Participant,
) -> None:
"""Generate Participant-specific BGP Community Lists."""
log.info("Generating Communities for {}", participant.pretty)
create_file_structure()
participant_comms = template_env.get_template("participant-communities.j2")
for rs in params.route_servers:
result = await participant_comms.render_async(
p=participant, now=datetime.utcnow().isoformat()
)
output_file = POLICIES_DIR / rs.name / str(participant.asn) / "communities.ios"
if not output_file.exists():
output_file.touch()
log.debug("Communities for {}\n{}", participant.pretty, result)
with output_file.open("w+") as of:
of.write(result)
if verify_complete(output_file):
log.success(
"Generated Communities for {} at {}",
participant.pretty,
str(output_file),
)
async def route_map(participant: Participant) -> None:
"""Generate Participant-specific Route Maps."""
log.info("Generating Route Maps for {}", participant.pretty)
create_file_structure()
participant_route_map = template_env.get_template("participant-route-map.j2")
for rs in params.route_servers:
result = await participant_route_map.render_async(
p=participant,
rs=rs.id,
loc=rs.loc_id,
metro=rs.metro_id,
now=datetime.utcnow().isoformat(),
)
output_file = POLICIES_DIR / rs.name / str(participant.asn) / "route-map.ios"
if not output_file.exists():
output_file.touch()
log.debug("Route Maps for {}\n{}", participant.pretty, result)
with output_file.open("w+") as of:
of.write(result)
if verify_complete(output_file):
log.success(
"Generated Route Maps for {} at {}",
participant.pretty,
str(output_file),
)
async def prefixes(participant: Participant) -> None:
"""Generate Participant-specific Prefix Lists."""
log.info("Generating Prefix Lists for {}", participant.pretty)
create_file_structure()
for rs in params.route_servers:
async for family, render in render_prefixes(
participant,
max_ipv4=params.max_length.ipv4,
max_ipv6=params.max_length.ipv6,
template_env=template_env,
):
output_file = (
POLICIES_DIR
/ rs.name
/ str(participant.asn)
/ f"prefix-list-ipv{family}.ios"
)
if not output_file.exists():
output_file.touch()
rendered = await render
log.debug(
"IPv{} Prefix List for {}\n{}",
family,
participant.pretty,
rendered,
)
with output_file.open("w") as of:
of.write(rendered)
if verify_complete(output_file):
log.success(
"Generated IPv{} Prefix Lists for {} at {}",
family,
participant.pretty,
str(output_file),
)
async def bgp(participant: Participant) -> None:
"""Generate Participant-specific BGP Configs."""
log.info("Generating BGP Config for {}", participant.pretty)
create_file_structure()
max4, max6 = await max_prefixes(participant.asn)
for rs in params.route_servers:
output_file = POLICIES_DIR / rs.name / str(participant.asn) / "bgp.ios"
if not output_file.exists():
output_file.touch()
template = template_env.get_template("participant-bgp.j2")
result = await template.render_async(
p=participant, max4=max4, max6=max6, now=datetime.utcnow().isoformat()
)
log.debug("BGP Config for {}\n{}", participant.pretty, result)
with output_file.open("w+") as of:
of.write(result)
if verify_complete(output_file):
log.success(
"Generated BGP Config for {} at {}",
participant.pretty,
str(output_file),
)
async def generate_all() -> None:
"""Generate all templates for all route route servers and participants."""
coros = (communities, route_map, prefixes, bgp)
tasks = (c(p) for c in coros for p in params.participants)
await asyncio.gather(*tasks)
```
#### File: routingpolicy/routingpolicy/main.py
```python
import sys
import asyncio
import logging
from datetime import datetime
# Third Party
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.schedulers.asyncio import AsyncIOScheduler
# Project
from routingpolicy import APP_DIR
from routingpolicy.log import log
from routingpolicy.run import policy, acls
from routingpolicy.config import params
from routingpolicy.api.main import start_api
logger = logging.getLogger("routingpolicy")
interval = IntervalTrigger(
minutes=params.interval, start_date=datetime(2020, 9, 18, 6, 0, 0, 0)
)
class PrintJobs:
"""File-Like Object for APScheduler to Print Jobs towards."""
@staticmethod
def write(message: str) -> None:
"""Log job details."""
msg = message.strip().rstrip()
if msg and "Jobstore" not in msg:
log.info("Job: {}", msg)
if __name__ == "__main__":
# Ensure main app directory exists.
if not APP_DIR.exists():
APP_DIR.mkdir()
loop = asyncio.new_event_loop()
# Initialize scheduler.
scheduler = AsyncIOScheduler(logger=logger, timezone="Etc/UTC", event_loop=loop)
# Run RPC API
scheduler.add_job(start_api, id="api")
# Run Route Policy Updater
scheduler.add_job(policy, id="policies", trigger=interval)
# Run Switch ACL Updater
scheduler.add_job(acls, id="switch_acls", trigger=interval)
scheduler.start()
scheduler.print_jobs(out=PrintJobs)
try:
log.success("Starting 48 IX Routing Policy Server...")
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
log.critical("Stopping 48 IX Routing Policy Server...")
sys.exit(1)
```
#### File: routingpolicy/tests/test_irr.py
```python
import sys
import asyncio
# Project
from routingpolicy.irr import get_prefixes
from routingpolicy.log import log
async def _run_test(asn: str) -> None:
for family in (4, 6):
prefixes = get_prefixes(asn, family)
log.info("IPv{} prefixes for {}:", str(family), f"AS{asn}")
async for prefix in prefixes:
log.info(prefix)
if __name__ == "__main__":
if len(sys.argv) > 1:
asn = sys.argv[1].replace("AS", "")
else:
asn = "14525"
task = _run_test(asn)
try:
asyncio.run(task)
except KeyboardInterrupt:
task.close()
log.critical("Stopped")
``` |
{
"source": "48ix/rsagent",
"score": 2
} |
#### File: rsagent/services/main.py
```python
import hashlib
import logging
from typing import Any
from pathlib import Path
# Third Party
from rpyc import Service
from cryptography.fernet import Fernet, InvalidToken
# Project
from rsagent.config import params
from rsagent.frrouting import merge_config, validate_config
log = logging.getLogger(f"{__package__}.{__name__}")
OUTPUT_DIR = Path.home() / "rs-policies"
class Agent(Service):
"""Route Server Agent Service."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Set custom attributes."""
super().__init__(*args, **kwargs)
self.digest_encrypted: str = ""
self.digest_decrypted: str = ""
def verify_payload(self, encrypted: bytes) -> str:
"""Verify that input data digest matches pre-sent digest."""
decrypted = b""
if not all((self.digest_encrypted, self.digest_decrypted)):
raise UnboundLocalError("No digests have been set.")
# Validate encrypted data
encrypted_input_digest = hashlib.sha256(encrypted).hexdigest()
if encrypted_input_digest != self.digest_encrypted:
log.error("Invalid digest for encrypted data: %s", encrypted_input_digest)
raise ValueError("Digest doesn't match encrypted data.")
# Decompress data & validate digest
encryption = Fernet(params.key.get_secret_value())
try:
decrypted = encryption.decrypt(encrypted)
decrypted_input_digest = hashlib.sha256(decrypted).hexdigest()
if decrypted_input_digest != self.digest_decrypted:
log.error(
"Invalid digest for decrypted data: %s", decrypted_input_digest
)
raise ValueError("Digest doesn't match decrypted data.")
except InvalidToken:
log.critical("Invalid token for data %s", encrypted_input_digest)
raise ValueError("Invalid secret.")
return decrypted.decode()
def exposed_set_digest(self, encrypted: str, decrypted: str) -> None:
"""Set the digest of incoming data."""
self.digest_encrypted = encrypted
self.digest_decrypted = decrypted
log.info("Set encrypted digest %s", encrypted)
log.info("Set decrypted digest %s", decrypted)
def exposed_push_policy(self, policy: bytes) -> str:
"""Ingest new FRR policy & apply."""
result = "An unknown error occurred."
try:
payload = self.verify_payload(policy)
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
policy_file = OUTPUT_DIR / f"{self.digest_decrypted}.ios"
with policy_file.open("w") as f:
f.write(payload)
valid = validate_config(policy_file)
if not valid:
raise RuntimeError("Config failed validation.")
merged = merge_config(policy_file)
if not merged:
raise RuntimeError("Config validated, but config merge failed.")
result = "Successfully merged configuration."
log.info(f"{self.digest_decrypted}: " + result)
except Exception as err:
result = str(err)
log.error(f"{self.digest_decrypted}: " + result)
return result
``` |
{
"source": "48ix/stats",
"score": 2
} |
#### File: stats/api/policy.py
```python
from typing import Optional
# Third Party
from fastapi import Header, BackgroundTasks
# Project
from stats.log import log
from stats.auth.main import get_job, create_job, authorize_route, authenticate_user
from stats.exceptions import AuthError
from stats.actions.policy import _update_policy, _update_switch_acl
from stats.models.update_policy import UpdatePolicyResponse
async def _verify_auth(username: str, password: str, route: str) -> bool:
"""Authenticate & authorize a user.
Verifies the proper headers are provided, authenticates the username
& password, and authorizes the route.
"""
has_headers = all((username, password))
authenticated = await authenticate_user(username=username, password=password)
authorized = await authorize_route(username, route)
full_auth = all((has_headers, authenticated, authorized))
if not full_auth:
raise AuthError(
"Authentication or authorization failed for user '{user}'",
user=username,
status_code=401,
)
return full_auth
async def update_policy(
background_tasks: BackgroundTasks,
x_48ix_api_user: Optional[str] = Header(None),
x_48ix_api_key: Optional[str] = Header(None),
):
"""Initiate a manual policy update."""
await _verify_auth(x_48ix_api_user, x_48ix_api_key, "/policy/update/")
job = await create_job(requestor=x_48ix_api_user)
await job.fetch_related("requestor")
background_tasks.add_task(_update_policy, wait=1, job=job.id)
job_response = UpdatePolicyResponse(
id=job.id,
request_time=job.request_time,
complete_time=job.complete_time,
requestor=job.requestor.username,
detail=job.detail,
in_progress=job.in_progress,
)
return job_response.dict()
async def update_acls(
background_tasks: BackgroundTasks,
x_48ix_api_user: Optional[str] = Header(None),
x_48ix_api_key: Optional[str] = Header(None),
):
"""Initiate a manual policy update."""
await _verify_auth(x_48ix_api_user, x_48ix_api_key, "/acls/update/")
job = await create_job(requestor=x_48ix_api_user)
await job.fetch_related("requestor")
background_tasks.add_task(_update_switch_acl, job=job.id)
job_response = UpdatePolicyResponse(
id=job.id,
request_time=job.request_time,
complete_time=job.complete_time,
requestor=job.requestor.username,
detail=job.detail,
in_progress=job.in_progress,
)
return job_response.dict()
async def job_status(
job_id: int,
x_48ix_api_user: Optional[str] = Header(None),
x_48ix_api_key: Optional[str] = Header(None),
):
"""Get the status of a job by ID."""
await _verify_auth(x_48ix_api_user, x_48ix_api_key, "/job/*")
job = await get_job(job_id)
await job.fetch_related("requestor")
response = UpdatePolicyResponse(
id=job.id,
request_time=job.request_time,
complete_time=job.complete_time,
requestor=job.requestor.username,
detail=job.detail,
in_progress=job.in_progress,
)
log.debug("Job {} status: {}", job.id, response)
return response.dict()
```
#### File: stats/auth/main.py
```python
from typing import List, Union
from datetime import datetime
# Third Party
from tortoise import Tortoise
from passlib.hash import argon2
from tortoise.exceptions import DoesNotExist, IntegrityError
# Project
from stats.log import log
from stats.config import params
from stats.exceptions import AuthError, StatsError
from stats.auth.models import ApiJob, ApiUser, ApiRoute
async def authdb_start() -> None:
"""Initialize database connection."""
log.debug("Opening database connection")
await Tortoise.init(
db_url=f"sqlite:///{str(params.api.dbmain_path)}",
modules={"models": ["stats.auth.models"]},
)
await Tortoise.generate_schemas()
async def authdb_stop() -> None:
"""Close database connection."""
log.debug("Closing database connection")
await Tortoise.close_connections()
async def get_user(username: str) -> ApiUser:
"""Get a user object by username."""
try:
user = await ApiUser.get(username=username)
except DoesNotExist as err:
raise AuthError(
"User '{u}' does not exist.", u=username, status_code=404
) from err
return user
async def get_job(job_id: int) -> ApiJob:
"""Get a job object by id."""
try:
job = await ApiJob.get(id=job_id)
except DoesNotExist as err:
raise StatsError(f"Job {job_id} does not exist.") from err
return job
async def get_route(route: str) -> ApiRoute:
"""Get a user object by username."""
try:
_route = await ApiRoute.get(name=route)
except DoesNotExist as err:
raise AuthError("Route '{r}' does not exist.", r=route) from err
return _route
async def create_user(username: str, password: str) -> None:
"""Create an API user."""
hashed_password = <PASSWORD>(password)
try:
await ApiUser.create(username=username, password=hashed_password)
except IntegrityError:
raise AuthError(
"User '{u}' already exists.", u=username, status_code=409
) from None
log.success("Added user {}", username)
async def delete_user(username) -> None:
"""Delete an API user."""
user = await get_user(username)
await user.delete()
log.success("Deleted user {}", username)
async def create_route(name: str) -> None:
"""Create an API route entry for authorization."""
try:
await ApiRoute.create(name=name)
except IntegrityError:
raise StatsError(f"Route '{name}' already exists") from None
log.success("Added route {}", name)
async def delete_route(route) -> None:
"""Delete an API route."""
_route = await get_route(route)
await _route.delete()
log.success("Deleted route {}", route)
async def create_job(requestor: str) -> ApiJob:
"""Create a new API job record."""
user = await get_user(requestor)
job = ApiJob(requestor=user, in_progress=True)
await job.save()
return job
async def update_job(job_id: int, **kwargs) -> None:
"""Update a job's attributes."""
await ApiJob.filter(id=job_id).update(**kwargs)
async def complete_job(job_id: int) -> None:
"""Mark a job as complete."""
await ApiJob.filter(id=job_id).update(
in_progress=False, complete_time=datetime.utcnow()
)
async def _change_route(
username: str, routes: Union[str, List[str]], action: str
) -> None:
"""Associate or disassociate a route from a user."""
user = await get_user(username)
if isinstance(routes, str):
routes = [routes]
if action == "add":
coro = user.routes.add
msg = "Added route {} to user {}"
elif action == "remove":
coro = user.routes.remove
msg = "Removed route {} from user {}"
else:
raise StatsError(f"Action {action} is not supported")
for route in routes:
matched = await get_route(route)
await coro(matched)
log.success(msg, route, user.username)
async def associate_route(username: str, routes: Union[str, List[str]]) -> None:
"""Add routes to a user."""
await _change_route(username, routes, "add")
async def disassociate_route(username: str, routes: Union[str, List[str]]) -> None:
"""Remove routes from a user."""
await _change_route(username, routes, "remove")
async def authorize_route(username: str, route: str) -> bool:
"""Verify if a user has access to an API route."""
is_authorized = False
try:
user = await get_user(username)
await user.fetch_related("routes")
async for user_route in user.routes:
if route == user_route.name:
is_authorized = True
break
except DoesNotExist:
raise AuthError(
"User '{u}' does not exist.", u=username, status_code=401
) from None
if is_authorized:
log.debug("{} is authorized to access {}", username, route)
if not is_authorized:
log.error("{} is not authorized to access {}", username, route)
return is_authorized
async def authenticate_user(username: str, password: str) -> bool:
"""Authenticate a user."""
user = await get_user(username)
valid = argon2.verify(password, user.password)
if valid:
log.debug("Authentication succeeded for user {}", username)
if not valid:
log.error("Authentication failed for user {}", username)
return valid
```
#### File: stats/models/overall_utilization.py
```python
import math
from typing import List
# Third Party
from pydantic import Field, BaseModel, StrictInt, validator
class OverallUtilization(BaseModel):
"""IX-Wide utilization response model."""
ingress: List[List] = Field(
..., title="Ingress Utilization", description="Actual port utilization data."
)
egress: List[List] = Field(
..., title="Egress Utilization", description="Actual port utilization data."
)
ingress_average: StrictInt = Field(..., title="Ingress Average")
egress_average: StrictInt = Field(..., title="Egress Average")
ingress_peak: StrictInt = Field(..., title="Peak Ingress Utilization")
@validator("ingress_average", "egress_average", "ingress_peak", pre=True)
def round_avg_bits(cls, value):
"""Round up bit floats to whole integers."""
return math.ceil(value)
@validator("ingress", "egress")
def round_utilization_bits(cls, value):
"""Round up bit floats to whole integers."""
if len(value) != 1:
for pair in value:
pair[1] = math.ceil(pair[1])
return value
``` |
{
"source": "48kRAM/recipes",
"score": 3
} |
#### File: recipes/yWorks/YWorksURLProvider.py
```python
import os
import urllib2
from autopkglib import Processor, ProcessorError
__all__ = ["YWorksURLProvider"]
class YWorksURLProvider(Processor):
"""This processor obtains a download URL for the latest version of yWorks"""
description = __doc__
input_variables = {
"product_name": {
"required": True,
"description":
"Product to fetch URL for. Right now, only 'yEd'.",
},
}
output_variables = {
"url": {
"description": "URL to latest version of the given product.",
},
}
def main(self):
"""Provide a yWorks product download URL"""
product_name = self.env["product_name"]
# http://www.yworks.com/products/yed/demo/yEd-CurrentVersion.txt
base_url = "http://www.yworks.com/products"
check_url = "%s/%s/demo/%s-CurrentVersion.txt" % ( base_url, product_name.lower(), product_name)
# Get the text file
try:
fref = urllib2.urlopen(check_url)
txt = fref.read()
fref.close()
except BaseException as err:
raise ProcessorError("Can't download %s: %s" % (check_url, err))
# Create download link
latest=txt.rstrip()
base_prod_url="http://live.yworks.com/yed-downloads"
download_url = "%s/%s-%s_with-JRE7.dmg" % (base_prod_url, product_name, latest)
self.env["url"] = download_url
self.output("Found URL as %s" % self.env["url"])
if __name__ == "__main__":
PROCESSOR = YWorksURLProvider()
PROCESSOR.execute_shell()
``` |
{
"source": "48productions/Mr-Genetics",
"score": 2
} |
#### File: 48productions/Mr-Genetics/config_man.py
```python
from pyexpat import ExpatError
from xml.dom import minidom
from xml.dom.minidom import parse
import xml.dom.minidom
import codecs
import sys
# The config is organized like this:
# <config>
# <admin_role>admin role id</admin_role>
# <scoreboard>Default scoreboard to load</scoreboard>
# <category name="Category name", listeningChannel="channel ID", listeningMessage="message ID", altChannel="channel ID", altMessage="message ID", altRole="Role id", description="optional description">
# <role name="Role id" dispName="Role display name" emoji="Unicode emoji/Custom emoji name" usesCustomEmoji="True/False", assignable="True/False">Role Description</role>
# <role>...
# </category>
# <category>...
# </config>
print("Initializing config_man...")
def save_config():
file = codecs.open("config.xml", "w", "utf_8_sig")
file.write(config.toxml(encoding="utf-8").decode("utf-8"))
#config.writexml(file, addindent="\t", newl="\n", encoding="utf-8")
file.close()
get_categories()
# Sets the categories variable to a dict containing all categories as keys, and the channel/message IDs to listen on as values
def get_categories():
global categories, categoriesAlt
ret = {} # Why?
retAlt = {}
configCategories = config.getElementsByTagName("category") # First iterate through all the categories in the file...
for category in configCategories:
if category.hasAttribute("name"): # Check if this category has a name
if category.hasAttribute("listeningChannel") and category.hasAttribute("listeningMessage"): # If it also has a channel/message to listen for, add them too
ret[category.getAttribute("name")] = category.getAttribute("listeningChannel") + ";" + category.getAttribute("listeningMessage")
if category.hasAttribute("altChannel") and category.hasAttribute("altMessage"): # If this category also has an alt message specified, add it to the alt categories as well
retAlt[category.getAttribute("name")] = category.getAttribute("altChannel") + ";" + category.getAttribute("altMessage")
else: # Otherwise, set the channel/message fields to -1
print("Category", category.getAttribute("name"), "doesn't have listeningMessage/Channel attributes (and thus can't track a message for reactions to assign these roles!)\nRun the role list command to generate a role list message to fix this!")
ret[category.getAttribute("name")] = "-1;-1"
else:
print("Config warning: Category in config is missing a name and won't be loaded!")
categories = ret
categoriesAlt = retAlt
# Returns a dict with all of the categories in the config and their descriptions (or some placeholder text, if they don't have one)
def get_category_descriptions():
global categories
ret = {}
configCategories = config.getElementsByTagName("category") # First iterate through all the categories in the file...
for category in configCategories:
if category.hasAttribute("name"): # Check if this category has a name
if category.hasAttribute("description"): # Now check if it has a description
desc = category.getAttribute("description") # Categories with blank descriptions should use placeholder text, otherwise just return the description
ret[category.getAttribute("name")] = desc if desc is not "" else "React with these emotes to get roles!"
else: # No description attribute in the config? Also use some placeholder text
ret[category.getAttribute("name")] = "React with these emotes to get roles!"
return ret
# Returns a dict of all roles in a category and their descriptions
def get_roles(category):
global categories
ret = {}
if category in categories: # If the category we're requesting is in our valid categories list, find that category in the config
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category: # Found the category element, now grab the roles from it
for configRole in configCategory.getElementsByTagName("role"):
ret[configRole.getAttribute("name")] = configRole.firstChild.nodeValue if configRole.hasChildNodes() else "" # Grab this role from the config and its description (or "" if no description is saved), then add it to the dict to return
return ret
else:
print("Error: Attempt to get roles from non-existent category \"", category, "\"")
return False
# Alternate version of get_roles to grab the emojis used for each role
def get_roles_emoji(category):
global categories
ret = {}
if category in categories: # If the category we're requesting is in our valid categories list, find that category in the config
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category: # Found the category element, now grab the roles from it
for configRole in configCategory.getElementsByTagName("role"):
if configRole.hasAttribute("name") and configRole.hasAttribute("emoji") and configRole.hasAttribute("usesCustomEmoji") and configRole.hasAttribute("assignable") and configRole.getAttribute("assignable") == "True": # Only add roles with a name, emoji and assignable="True" set
ret[configRole.getAttribute("name")] = [configRole.getAttribute("emoji"), configRole.getAttribute("usesCustomEmoji")] # Grab this role from the config and its emoji (if it has both attributes), then add it to the dict to return
return ret
else:
print("Error: Attempt to get roles from non-existent category \"", category, "\"")
return False
# Alternate version of get_roles_emoji to grab the emoji used the role for a category's alt message
def get_alt_role_emoji(category):
global categoriesAlt
ret = {}
if category in categories: # If the category we're requesting is in our valid categories list, find that category in the config
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category and configCategory.hasAttribute("altRole"): # Found the category element, now grab the roles from it
for configRole in configCategory.getElementsByTagName("role"):
if configRole.hasAttribute("name") and configRole.getAttribute("name") == configCategory.getAttribute("altRole") and configRole.hasAttribute("emoji") and configRole.hasAttribute("usesCustomEmoji") and configRole.hasAttribute("assignable") and configRole.getAttribute("assignable") == "True": # Only add roles with a name, emoji and assignable="True" set
ret[configRole.getAttribute("name")] = [configRole.getAttribute("emoji"), configRole.getAttribute("usesCustomEmoji")] # Grab this role from the config and its emoji (if it has both attributes), then add it to the dict to return
return ret
else:
print("Error: Attempt to get alt role from non-existent alt category \"", category, "\" - does this category have an alt message/role set?")
return False
# Returns whether a role can be assigned via reactions
def is_role_assignable(category, role):
global categories
if category in categories: # If the category we're requesting is in our valid categories list, find that category in the config
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category: # Found the category element, now grab the roles from it
for configRole in configCategory.getElementsByTagName("role"):
if configRole.hasAttribute("name") and configRole.getAttribute("name") == str(role): # Found le role
return configRole.hasAttribute("assignable") and configRole.getAttribute("assignable") == "True" # Return whether this role both has the "assignable" attribute and if that attribute is true
else:
print("Error: Attempt to get role from non-existent category \"", category, "\"")
return False
# Adds a role (and category, if the specified one doesn't exist) to the config
def add_role(category, role, dispName, emoji, description, isCustomEmoji, assignable):
if category not in categories: # Check if the specified category doesn't exist
if add_category(category) is False: # Try adding it! If that fails, the program is mega derped and we should return
print("Failed to add role", role, ": Category \"", category, "\"doesn't exist and was unable to be added.\n\nThis probably shouldn't happen.")
return "Failed to add role: Category \"", category, "\"doesn't exist and was unable to be added.\n\nThis probably shouldn't happen."
for emote in get_roles_emoji(category).values():
if emote[0] == emoji and assignable:
print("Failed to add role: Emote " + emoji + " is already used for another role in this category!")
return "Failed to add role: Emoji " + emoji + " is already used for another role in this category!"
for category_element in config.getElementsByTagName("category"): # Now go and add this role to the category
category_name = category_element.getAttribute("name")
if category_name is not None and category_name == category:
config_role = dom.createElement("role") # Create the role element (<role>)
config_role.setAttribute("name", role)
config_role.setAttribute("dispName", dispName)
config_role.setAttribute("emoji", emoji if assignable else "")
config_role.setAttribute("usesCustomEmoji", str(isCustomEmoji))
config_role.setAttribute("assignable", str(assignable))
config_role.appendChild(dom.createTextNode(description)) # Finally, set the text for it (the description)
category_element.appendChild(config_role) # Then add it to the category (should at this point be <role name="role name", emoji="emoji name">description</role>
save_config()
print("Added role", str(role), "to category", category, "(emoji:", emoji, ", desc: \"", description, "\")")
return True
print("Failed to add role", str(role), "to category", category, ": Unable to find category to add role to. This should never happen.")
return "Unable to find category to add role to.\n\nThe category should've been automagically added, so this should never happen."
# Removes a role from the config
def remove_role(category, role):
if category not in categories: # The specified category doesn't exist
print("Failed to remove role \"", role, "\": Category \"", category, "\"doesn't exist!")
return "Failed to remove role: Category \"", category, "\"doesn't exist!"
for category_element in config.getElementsByTagName("category"): # Now go and add this role to the category
category_name = category_element.getAttribute("name")
if category_name is not None and category_name == category:
for role_element in category_element.getElementsByTagName("role"):
if role_element.hasAttribute("name") and role_element.getAttribute("name") == role: # Found the role to delete, delete it!
category_element.removeChild(role_element)
print("Removed role", str(role), "from category", category)
if len(category_element.getElementsByTagName("role")) < 1: # No other roles in this category, also remove the now-empty category
ret = [-1, -1]
if category_element.hasAttribute("listeningChannel") and category_element.hasAttribute("listeningMessage"): # Did this category get assigned a rolelist message?
ret[0] = int(category_element.getAttribute("listeningChannel"))
ret[1] = int(category_element.getAttribute("listeningMessage"))
else: # No message, set the message id to return to -1
ret[0] = -1
ret[1] = -1
config.removeChild(category_element)
print("Removed now empty category " + category)
save_config()
return ret
save_config()
return True
print("Failed to remove role", str(role), "from category", category, ": Unable to find category or role.")
return "Failed to remove role: Unable to find role or category in config."
# Adds a category to the config if it doesn't already exist
def add_category(category):
for category_element in config.getElementsByTagName("category"): # Check for duplicate categories
category_name = category_element.getAttribute("name")
if category_name is not None and category == category_name:
print("Duplicate category", category, "could not be added")
return False
config_category = dom.createElement("category")
config_category.setAttribute("name", category)
config.appendChild(config_category)
get_categories() # Don't forget to refresh the category/message dict!
print("Created category", category)
return True
# Sets the message and channel ids for a category in the config
def set_category_message(category, channel_id, message_id):
global categories
categories[category] = channel_id + ';' + message_id
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category:
configCategory.setAttribute("listeningChannel", channel_id)
configCategory.setAttribute("listeningMessage", message_id)
# Note: Config saving isn't done here, but instead in the rolelist command handler (since we're probably gonna be updating multiple config entries at once)
# Sets the description of a category
def set_category_description(category, description):
global categories
if category in categories:
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category:
configCategory.setAttribute("description", description)
print("Set description of category \"" + category + "\" to \"" + description + "\"")
return "Set description of category \"" + category + "\" to \"" + description + "\""
else:
print("Failed to set category description: Category \"", category, "\"doesn't exist!")
return "Failed to set category description: Category \"", category, "\"doesn't exist!"
save_config()
# Sets the message and channel ids for a category in the config
def set_category_alt_message(category, channel_id, message_id, role):
global categoriesAlt
categoriesAlt[category] = channel_id + ';' + message_id
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category:
configCategory.setAttribute("altChannel", str(channel_id))
configCategory.setAttribute("altMessage", str(message_id))
configCategory.setAttribute("altRole", str(role.id))
save_config()
print("Set category \"" + category + "\"'s alt rolelist message for role\"" + role.name + "\"")
return "Set category \"" + category + "\"'s alt rolelist message for role\"" + role.name + "\""
print("Failed to set category alt rolelist message: Category \"", category, "\"doesn't exist!")
return "Failed to set category alt rolelist message: Category \"", category, "\"doesn't exist!"
# Sorts roles in a category by alphabetical order
def sort_category(category):
if category in categories: # Find this category in the config
for configCategory in config.getElementsByTagName("category"):
if configCategory.hasAttribute("name") and configCategory.getAttribute("name") == category: # Now sort the roles from the category: Janky-style because documentation wasn't easy to find!
configRoles = configCategory.getElementsByTagName("role") # First: Grab the roles and sort them alphabetically based on the role's display name
configRoles.sort(key=lambda x: str(x.attributes["dispName"].value))
for node in configCategory.childNodes: # Then, remove this category's child nodes in the config...
configCategory.removeChild(node)
for role in configRoles: # Finally, re-add the sorted roles and save the config
configCategory.appendChild(role)
save_config()
return "Sorted category \"" + category + "\" by alphabetical order"
else:
print("Failed to sort category: Category \"", category, "\"doesn't exist!")
return "Failed to sort category: Category \"" + category + "\"doesn't exist!"
# Returns text inside the first "admin_role" element
def get_admin_role():
elements = config.getElementsByTagName("admin_role")
if len(elements) >= 1:
role = elements[0].firstChild.nodeValue
if role is not None:
return int(role)
print("\nWarning: Call to get_admin_role() returned None - has a role been specified in the config?\n\nRun 'setAdminRole' within discord to set this!\n")
return None
# Sets the admin_role element in the config
def set_admin_role(role_id):
admin_roles = config.getElementsByTagName("admin_role")
admin_role = None
if len(admin_roles) == 0: # First: Create the admin_role tag if it doesn't exist
admin_role = dom.createElement("admin_role")
config.appendChild(admin_role)
else:
admin_role = admin_roles[0]
if admin_role.hasChildNodes(): # If there's old children nodes, remove them all
for child in admin_role.childNodes:
admin_role.removeChild(child)
admin_role.appendChild(dom.createTextNode(role_id))
save_config()
return True
# Returns text inside the first "scoreboard" element
def get_default_scoreboard():
elements = config.getElementsByTagName("scoreboard")
if len(elements) >= 1:
scoreboard = elements[0].firstChild
if scoreboard is not None and scoreboard.nodeValue is not None:
return scoreboard.nodeValue
return None
# Sets the scoreboard element in the config
def set_default_scoreboard(scrbrd):
scoreboard_elements = config.getElementsByTagName("scoreboard")
scoreboard = None
if len(scoreboard_elements) == 0: # First: Create the scoreboard tag if it doesn't exist
scoreboard = dom.createElement("scoreboard")
config.appendChild(scoreboard)
else:
scoreboard = scoreboard_elements[0]
if scoreboard.hasChildNodes(): # If there's old children nodes, remove them all
for child in scoreboard.childNodes:
scoreboard.removeChild(child)
scoreboard.appendChild(dom.createTextNode(scrbrd))
save_config()
return True
# Let's open our config:
try:
dom = xml.dom.minidom.parse("config.xml")
except FileNotFoundError: # No config file? Create one and set its root element
dom = minidom.Document()
root = dom.createElement("config")
dom.appendChild(root)
except ExpatError as e: # Our formatting is screwed
print("Error parsing config.xml, your config formatting is corrupted.\n\nExpatError: " + str(e))
sys.exit(2)
config = dom.documentElement
categories = None # Don't forget to initialize the category list, too!
categoriesAlt = None
get_categories()
print("Loaded categories:")
for category in categories.keys():
print(category, ":", categories[category])
save_config()
```
#### File: 48productions/Mr-Genetics/scoreboard.py
```python
import config_man
import scoreboard_config_man
import asyncio
from random import randrange
import discord
from discord.utils import get
from discord.ext import commands
import utils
class Scoreboard(commands.Cog):
score_messages = ( # Random messages to display on score submission
"Good luck on the win!",
"Go for the gold!",
"Ganbare!",
"***Do it!!!***",
"Yo! Tear it up!",
"Your scores are on FIRE!",
"Can I call you a dancin' MASTER?",
"Wow, your steps are AMAZIN'!",
"Yeah, I KNEW you could do it!",
"Wow, you're awesome!",
"Way to step it up!",
"Your skills are UN-be-lieveable!",
"Perfection is your first, last, AND middle name!",
"Total ability UP!",
"Showers are sexy! Take one today!",
"\*Does a little jig\*",
"Hell yeah, pizza time.",
"I'm screenshotting this, thank you very much",
)
def __init__(self, bot):
self.bot = bot
self.sc_config = scoreboard_config_man.ScoreboardConfig()
self.scfield_emote = None
self.scdiv_emote = None
self.scdiv_division = {}
self.scsubmit_field = {}
self.scverify_field = {}
self.scrm_field = {}
default_scoreboard = config_man.get_default_scoreboard()
if default_scoreboard is not None:
print("Default scoreboard " + default_scoreboard + " is saved, attempting load...")
self.sc_config.load_sc_config(default_scoreboard, False)
# Tries loading a scoreboard config file
@commands.command()
async def scload(self, ctx, name):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
self.sc_config.save_sc_config() #Don't nuke ur data kids, triple-check that you've saved to disk before loading a new config
loaded = self.sc_config.load_sc_config(name, False)
if loaded == 0:
await ctx.send(embed=utils.format_embed("Loaded scoreboard " + name, False))
config_man.set_default_scoreboard(name)
else:
await ctx.send(embed=utils.format_embed("Error: Scoreboard config not found: " + name, True))
# Tries unloading the current scoreboard config (so no config is active)
@commands.command()
async def scunload(self, ctx):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("No scoreboard is currently loaded, nothing to unload!", False))
return
self.sc_config.save_sc_config() #Don't nuke ur data kids, triple-check that you've saved to disk before loading a new config
self.sc_config.unload_sc_config()
config_man.set_default_scoreboard("")
await ctx.send(embed=utils.format_embed("Unloaded scoreboard - no scoreboard is active now", False))
# Tries making a new scoreboard config file
@commands.command()
async def scnew(self, ctx, name, *, description=""):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
self.sc_config.save_sc_config() # Don't nuke ur data kids, triple-check that you've saved to disk before loading a new config
#sc_config.sc_config_exists(name) # Not yet
loaded = self.sc_config.load_sc_config(name, True, description)
if loaded == 0:
await ctx.send(embed=utils.format_embed("Loaded scoreboard " + name + " - this really shouldn't happen :sweat_smile:", False))
config_man.set_default_scoreboard(name)
elif loaded == 1:
await ctx.send(embed=utils.format_embed("Created new scoreboard " + name, False))
config_man.set_default_scoreboard(name)
else:
await ctx.send(embed=utils.format_embed("Error: Scoreboard config not found: " + name, True))
# Sets the scoreboard's display name
@commands.command()
async def scdisplayname(self, ctx, *, name):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
self.sc_config.set_disp_name(name)
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard, BOI
await ctx.send(embed=utils.format_embed("Set scoreboard display name to " + name, False))
# Sets the scoreboard's description
@commands.command()
async def scdescription(self, ctx, *, desc):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
self.sc_config.set_desc(desc)
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard, BOI
await ctx.send(embed=utils.format_embed("Set scoreboard description to " + desc, False))
# Creates a new scoreboard division
@commands.command()
async def scnewdiv(self, ctx, name, *, desc=""):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
embed = discord.Embed(title="Creating a new scoreboard division:", color=0x4EDB23)
msg_text = '\n**Name:**' + name
msg_text += '\n**Desc:**' + desc
msg_text += '\n\nTo confirm: React with an emote to associate with this division!'
embed.description = msg_text
msg = await ctx.send(embed=embed)
def reaction_check(reaction, user): # Checks if the emoji reaction to sc_field is valid or not
if user != ctx.author or reaction.message.id != msg.id: # First: Only accept reactions from the command sender on the message we just sent
return False
if str(reaction.emoji) in self.sc_config.get_division_emotes().keys(): # Make sure the emoji isn't in use in another division
print("Reaction check failed: Duplicate emoji")
return False
if reaction.custom_emoji: # Finally: If this is a custom emote, make sure the bot can actually use it
emoji = get(ctx.guild.emojis, id=reaction.emoji.id)
if emoji is None or emoji.available is False:
return False
self.scdiv_emote = str(reaction.emoji)
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=reaction_check)
except asyncio.TimeoutError:
msg_text += '\n**Waiting for an emote timed out - run this command again**'
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else:
if self.sc_config.div_new(name, desc, self.scdiv_emote):
await msg.add_reaction('👍')
return
else:
await ctx.send(embed=utils.format_embed("Error creating division \"" + name + "\".", True))
return
# Sets a division's description
@commands.command()
async def scdivdescription(self, ctx, name, *, description):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
if self.sc_config.div_desc(name, description):
await ctx.send(embed=utils.format_embed("Set division \"" + name + "\"'s description", False))
return
else:
await ctx.send(embed=utils.format_embed("Error removing division - division \"" + name + "\" doesn't exist!", True))
return
# Removes a scoreboard division
@commands.command()
async def scremovediv(self, ctx, name):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
if self.sc_config.div_remove(name):
await ctx.send(embed=utils.format_embed("Removed division \"" + name + "\".", False))
return
else:
await ctx.send(embed=utils.format_embed("Error removing division - division \"" + name + "\" doesn't exist!", True))
return
# Creates or updates a scoreboard field
@commands.command()
async def scfield(self, ctx, division, name, type=0):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
div = self.sc_config.get_division(division)
if div is None:
await ctx.send(embed=utils.format_embed("Error: Invalid divsion name \"" + division + "\"", False))
return
type = 0 # Todo: We only support one field type for now, so enforce it here
ftype = self.sc_config.parse_field_type(type)
if ftype is None:
await ctx.send(embed=utils.format_embed("Error: Invalid field type \"" + type + "\"!", True))
return
fields = self.sc_config.get_fields(div)
if name in fields.keys():
print("Field " + name + " exists")
embed = discord.Embed(title="Editing existing scoreboard field:", color=0x4EDB23)
else:
print("Field " + name + " doesn't exist")
embed = discord.Embed(title="Creating new scoreboard field:", color=0x4EDB23)
msg_text = '\n**Name:** ' + name
msg_text += '\n**Type:** ' + str(type)
msg_text += '\n\nTo confirm: React with an emote to associate with this field!'
embed.description = msg_text
msg = await ctx.send(embed=embed)
def reaction_check(reaction, user): # Checks if the emoji reaction to sc_field is valid or not
if user != ctx.author or reaction.message.id != msg.id: # First: Only accept reactions from the command sender on the message we just sent
return False
if str(reaction.emoji) in self.sc_config.get_fields_emoji(div).keys(): # Make sure the emoji isn't in use in another field
print("Reaction check failed: Duplicate emoji")
return False
if reaction.custom_emoji: # Finally: If this is a custom emote, make sure the bot can actually use it
emoji = get(ctx.guild.emojis, id=reaction.emoji.id)
if emoji is None or emoji.available is False:
return False
self.scfield_emote = str(reaction.emoji)
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=reaction_check)
except asyncio.TimeoutError:
msg_text += '\n**Waiting for an emote timed out - run this command again**'
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else:
print(self.scfield_emote)
self.sc_config.update_field(div, name, str(type), self.scfield_emote)
await msg.add_reaction("👍")
#await msg.channel.send(embed=utils.format_embed("Updated field " + name + "!", False))
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard, BOI
# Creates or updates a scoreboard field
@commands.command()
async def scremovefield(self, ctx, division, name):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
div = self.sc_config.get_division(division)
if div is None:
await ctx.send(embed=utils.format_embed("Error: Invalid divsion name \"" + division + "\""))
return
fields = self.sc_config.get_fields(div)
if name in fields.keys():
embed = discord.Embed(title="Removing scoreboard field:", color=0x4EDB23)
else:
ctx.send(embed=utils.format_embed("Error: Field " + name + " doesn't exist!", True))
return
msg_text = '\n**Name:** ' + name
msg_text += '\n**Warning: This will permanently delete this field and its scores!**'
msg_text += '\n\nTo confirm deletion: React with "❌"'
embed.description = msg_text
msg = await ctx.send(embed=embed)
await msg.add_reaction("❌")
def reaction_check(reaction, user): # Checks if the emoji reaction to scremovefield is valid or not
return user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) == '❌' # Only accept 'X' reactions from the command sender on the message we sent
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=reaction_check)
except asyncio.TimeoutError:
msg_text += '\n**Waiting for a reaction timed out - run this command again**'
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else:
self.sc_config.remove_field(div, name)
#await msg.channel.send(embed=utils.format_embed("Deleted field " + name, False))
await msg.add_reaction("👍")
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard, BOI
# Submits a score for entry
@commands.command()
async def submit(self, ctx, score=-1):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member): # Prevent this from running outside of a guild:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded!", True))
return
#division = "EZ" # DEBUG: Hard-code a division name, here, until we can grab a player's real division
division = self.sc_config.get_player_division(ctx.author.id)
print("Player " + ctx.author.display_name + "(id: " + str(ctx.author.id) + ") submitting unverified score " + str(score))
if division is None: # Player hasn't joined a division yet, prompt them to join
print("Division check returned None, prompting player for division")
res = await self.division(ctx)
if res is False: # Didn't choose a division lol, bail here
return
division = self.sc_config.get_player_division(ctx.author.id)
else:
print("Division check passed, player division \"" + division + "\"")
div = self.sc_config.get_division(division)
if div is None:
print("Division invalid!")
await ctx.send(embed=utils.format_embed("Error: The division you joined (\"" + division + "\") is invalid... somehow. Please contact an admin :("))
return
# Our logic here changes if there's only one field on the scoreboard
# If there's one field, select it by default and just submit scores to that
# For multiple fields, we need to prompt the user to select a field
fields = self.sc_config.get_fields_emoji(div)
if len(fields) == 1:
sub_field = list(fields.values())[0]
else:
sub_field = None
# Our reaction-based field prompt - Validate input if the submitting user reacted to it, and their reaction is a field emoji
def sub_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in fields.keys():
self.scsubmit_field[user.id] = fields[str(reaction.emoji)]
return True
return False
try:
if sub_field is None: # If we haven't set a default field to submit to (above), send the reaction prompt and wait for a reaction
embed = discord.Embed(title="Submitting a score:", color=0x4EDB23)
msg_text = "\n" + ctx.author.display_name + ": " + str(score)
msg_text += "\n\n**React with the field to submit this score to!**\n"
for emote, field in fields.items(): # Display the emotes for all the fields we can submit to
msg_text += emote + " - " + field + "\n"
embed.description = msg_text
msg = await ctx.send(embed=embed)
for emote in fields.keys(): # And react to the message with all the fields we can submit to (two for loops yeehaw)
await msg.add_reaction(emote)
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=sub_reaction_check) # Now wait for the user to react
except asyncio.TimeoutError:
msg_text += "\n\n**Waiting for a reaction timed out - run this command again**"
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else: # On reaction (or if we have a field to submit to and never prompted for a reaction), submit the score!
if sub_field is None:
sub_field = self.scsubmit_field[ctx.author.id]
print("Reaction-based field set: " + sub_field)
print("Attempting submission to field " + sub_field)
self.sc_config.update_entry(div, sub_field, ctx.author.id, score, False)
if randrange(0, 100) == 0:
scmsg = "You showed us... your ULTIMATE dance... Thank you very much... I can't stop CRYING, BUCKETS of ***TEARS.....***"
else:
scmsg = Scoreboard.score_messages[randrange(len(Scoreboard.score_messages))]
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard
embedd = discord.Embed(title="Score submitted for verification - " + scmsg, description=sub_field + ": " + str(score), colour=0x16E200)
await ctx.send(embed=embedd)
# Joins a player to a division, returns if the player is currently in a division or not
@commands.command()
async def division(self, ctx):
divisions = self.sc_config.get_division_names() # Get a list of divisions and divisions emotes/descriptions
divisions_emotes = self.sc_config.get_division_emotes()
cur_div = self.sc_config.get_player_division(ctx.author.id)
if cur_div is not None:
await ctx.send(embed=utils.format_embed("You're in division \"" + cur_div + "\"!\n\n (At this time, you cannot switch divisions)", False))
return True
# If there's only one divisions, we want to auto-join that one!
if len(divisions) == 1:
selected_div = list(divisions.keys())[0]
else:
selected_div = None
# Helper function to validate reaction-based input for the below prompt
def div_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in divisions_emotes.keys():
self.scdiv_division[user.id] = divisions_emotes[str(reaction.emoji)]
return True
return False
try:
if selected_div is None: # Didn't auto-pick a division? Prompt for one
emb = discord.Embed(title="Pick a division to join:", color=0x4EDB23)
desc = ""
for div_emote, div_name in divisions_emotes.items(): # List the divisions to join, their emotes, and descriptions
desc += div_emote + " **" + div_name + ":** " + divisions[div_name] + "\n"
emb.description = desc
msg = await ctx.send(embed=emb)
for emote in divisions_emotes.keys(): # React to the message with the division emotes we can join
await msg.add_reaction(emote)
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=div_reaction_check) # And wait for the user to react to pick a division
except asyncio.TimeoutError: # User didn't react in time
desc += "\n\n**Waiting for a reaction timed out - run this command again**"
emb.description = desc
emb.color = 0xDB2323
await msg.edit(embed=emb)
return False
else: # On reaction (or auto-selected division), set the player's division
if selected_div is None: # Didn't auto-select
selected_div = self.scdiv_division[ctx.author.id]
print("Reaction-based division set: " + selected_div)
print("Attempting to set user division: " + ctx.author.display_name + ", " + selected_div)
res = self.sc_config.set_player_division(ctx.author.id, selected_div)
if res:
await ctx.send(embed=utils.format_embed("You've joined division " + selected_div + "!", False))
return True
else:
await ctx.send(embed=utils.format_embed("Unable to join division " + selected_div + " - Please contact an admin", True))
return False
# Sets a user's verified score entry (to a specified score if specified, else to their unverified score if they have one)
@commands.command()
async def verify(self, ctx, division, member: discord.Member, score=-1):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
div = self.sc_config.get_division(division)
if div is None:
await ctx.send(embed=utils.format_embed("Error: Invalid divsion name \"" + division + "\"", True))
return
fields = self.sc_config.get_fields_emoji(div)
if len(fields) == 1: # Only one field, just submit to that one by default
ver_field = list(fields.values())[0]
else: # Multiple fields - figure out which later:tm:
ver_field = None
# Validation method when prompting the user to pick a field to verify scores from
def ver_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in fields.keys():
self.scverify_field[user.id] = fields[str(reaction.emoji)]
return True
return False
try:
if ver_field is None: # Still need to prompt the user to choose a field to submit to, do it
embed = discord.Embed(title="Verifying score:", color=0x4EDB23)
msg_text = "\n" + member.display_name
if int(score) == -1:
msg_text += "'s unverified score"
else:
msg_text += ": " + str(score)
msg_text += "\n\n**React with the field to verify this score from!**"
embed.description = msg_text
msg = await ctx.send(embed=embed)
for emote in fields.keys(): # React to this message with the emotes for all the fields we can submit to
await msg.add_reaction(emote)
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=ver_reaction_check)
except asyncio.TimeoutError:
msg_text += "\n\n**Waiting for a reaction timed out - run this command again**"
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else: # On valid reaction/we cheated and already know the field to verify:
if ver_field is None:
ver_field = self.scverify_field[ctx.author.id]
print("Reaction-based field set: " + ver_field)
print("Attempting verification of score from field " + ver_field)
#try:
if int(score) == -1: # Score = -1 means score wasn't specified as an argument, so verify the user's unverified score
existing_scores = self.sc_config.get_entry(div, ver_field, member.id)
print("Attempting verify of user " + member.display_name + "'s unverified score")
print(existing_scores)
if existing_scores is False or existing_scores[0] == -1: # Plot twist: The user doesn't have an unverified score to verify
await ctx.send(embed=utils.format_embed("Error: This user doesn't have an unverified score in field " + ver_field + "! Specify their score after their username!", True))
return
else: # They have an unverified score, set their new verified score to it
score = existing_scores[0]
#except TypeError as e:
# print("TypeError in score verification: " + str(e) + "\nWas the specified score an int?")
# await ctx.send(embed=utils.format_embed("Error: Specified score \"" + str(score) + "\" is not an int!", True))
# return
self.sc_config.update_entry(div, ver_field, member.id, score, True)
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard
embedd = discord.Embed(title="Set user " + member.display_name + "'s verified score", description="Division: " + division + "\nField " + ver_field + ": " + str(score), colour=0x16E200)
await ctx.send(embed=embedd)
# Removes a score entry from the scoreboard
@commands.command()
async def scremoveentry(self, ctx, division, member: discord.Member):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
div = self.sc_config.get_division(division)
if div is None:
await ctx.send(embed=utils.format_embed("Error: Invalid divsion name \"" + division + "\""))
return
fields = self.sc_config.get_fields_emoji(div)
if len(fields) == 1:
rm_field = list(fields.values())[0]
else:
rm_field = None
# Field prompt part 3rd
def rm_reaction_check(reaction, user):
if user == ctx.author and reaction.message.id == msg.id and str(reaction.emoji) in fields.keys():
self.scrm_field[user.id] = fields[str(reaction.emoji)]
return True
return False
try:
if rm_field is None:
embed = discord.Embed(title="Removing a score:", color=0x4EDB23)
msg_text = "\n" + ctx.author.display_name
msg_text += "\n\n**React with the field to remove this user's score from!**\n"
for emote, field in fields.items():
msg_text += emote + " - " + field + "\n"
embed.description = msg_text
msg = await ctx.send(embed=embed)
for emote in fields.keys():
await msg.add_reaction(emote)
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=rm_reaction_check)
except asyncio.TimeoutError:
msg_text += "\n\n**Waiting for a reaction timed out - run this command again**"
embed.description = msg_text
embed.color = 0xDB2323
await msg.edit(embed=embed)
else:
if rm_field is None:
rm_field = self.scrm_field[ctx.author.id]
print("Reaction-based field set: " + rm_field)
print("Attempting entry removal from field " + rm_field)
result = self.sc_config.remove_entry(div, rm_field, member.id)
if result:
await self.generate_scoreboard_message(ctx, False) # Update the scoreboard
await ctx.send(embed=utils.format_embed("Removed " + member.display_name + "'s entry from " + rm_field, False))
else:
await ctx.send(embed=utils.format_embed("Unable to remove " + member.display_name + "'s entry from " + rm_field, True))
# Sends a new leaderboard message
@commands.command()
async def scoreboard(self, ctx):
if not isinstance(ctx.channel, discord.DMChannel) and isinstance(ctx.author, discord.Member) and utils.authorize_admin(ctx.guild, ctx.author): # Prevent this from running outside of a guild or by non-admins:
if not self.sc_config.is_scoreboard_loaded():
await ctx.send(embed=utils.format_embed("Error: No scoreboard is currently loaded! Load one with !scload", True))
return
await self.generate_scoreboard_message(ctx, True)
# Generates a scoreboard message, either updating the existing one or sending a new one
async def generate_scoreboard_message(self, ctx, generate_new):
# Make sure a scoreboard config is loaded first
if not self.sc_config.is_scoreboard_loaded():
print("Error: Attempted to generate a scoreboard message while no scoreboard config was loaded")
return
# First, if we're sending a new message we should delete the old one (if it exists)
old_msg_id = self.sc_config.get_scoreboard_msg()
if old_msg_id is None: # Don't even have an old message id, we gotta make a new one
generate_new = True
if generate_new:
if old_msg_id is not None:
try:
old_msg = await self.bot.get_channel(old_msg_id[0]).fetch_message(old_msg_id[1])
await old_msg.delete()
except discord.errors.NotFound:
print("Received 404 trying to delete scoreboard message with ID " + str(old_msg_id[1]) + ", was it already deleted?")
# Next, generate the message embed
embed = discord.Embed(title="👑 Event Leaderboard 👑", # Title: Leaderboard
color=0xFF7D00)
embed.set_author(name=self.sc_config.get_disp_name(),
url="https://www.youtube.com/watch?v=ZyhrYis509A") # Author field: Event name, link
embed.set_footer(text="Type !submit to submit a score - \"⚠\" scores are unverified") # Footer: Brief instructions
desc_text = self.sc_config.get_desc() + "\n\n"
for div_emoji, div in self.sc_config.get_divisions().items(): # First get a list of divisions to display...
desc_text += div_emoji + " - Division " + div.getAttribute("name") + "\n"
# print("Div " + div_emoji)
for emoji, field in self.sc_config.get_fields_emoji(div).items(): # First (but not first) get a list of fields to display...
# print("Field" + emoji)
fieldtext = ""
entries = self.sc_config.get_entries(div, field, ctx.guild) # ...then a list of entries for that field
entry_members = sorted(entries.items(), key=lambda i: i[1][0], reverse=True) # Get a list of users, sorted by their entry's highest score
# print(entry_members)
for i, entry in enumerate(entry_members): # And place em on the leaderboard!
# print(entry[0] + str(entry[1][0]) + str(entry[1][1]))
fieldtext += str(i + 1) + ") " + entry[0] + " *" + str(entry[1][0]) + "*"
if entry[1][1] is False: # This entry is unverified, mark it as such
fieldtext += " ⚠"
else:
fieldtext += " ✔"
fieldtext += "\n"
if fieldtext == "":
fieldtext = "No scores yet!"
embed.add_field(name=div_emoji + ": " + emoji + " " + field, value=fieldtext)
desc_text += "\n."
embed.description = desc_text
# Updating an old message
if not generate_new:
try:
msg = await self.bot.get_channel(old_msg_id[0]).fetch_message(old_msg_id[1])
await msg.edit(embed=embed)
print("Updated scoreboard message")
except (TypeError, discord.errors.NotFound) as e: # 404 i dunno where the old message went
print("Error updating scoreboard message: Message with ID " + str(old_msg_id[1]) + " not found, generating new message instead")
generate_new = True
# Generating a new message (or updating the old one failed above)
if generate_new:
msg = await ctx.send(embed=embed)
self.sc_config.set_scoreboard_msg(msg.channel.id, msg.id)
print("New scoreboard message sent (ID=" + str(msg.id) + ")")
``` |
{
"source": "490720818/jx3_bot",
"score": 2
} |
#### File: managers/bot_manager/__init__.py
```python
from datetime import datetime
from nonebot import get_bots, get_driver, on_regex
from nonebot.adapters.cqhttp import Bot, MessageSegment, PrivateMessageEvent
from nonebot.adapters.cqhttp.permission import PRIVATE_FRIEND
from nonebot.permission import SUPERUSER
from nonebot.plugin import export
from src.managers.group_manager import data_source as group_source
from src.managers.plugins_manager import data_source as plugins_source
from src.utils.browser import get_html_screenshots
from src.utils.config import config
from src.utils.log import logger
from src.utils.scheduler import scheduler
from . import data_source as source
export = export()
export.plugin_name = 'bot管理插件'
export.plugin_command = ""
export.plugin_usage = '用于bot的管理'
export.default_status = True # 插件默认开关
export.ignore = True # 插件管理器忽略此插件
driver = get_driver()
outtime: int = config.get('default').get('bot-outtime')
@driver.on_bot_connect
async def _(bot: Bot):
'''
链接bot
'''
bot_id = int(bot.self_id)
log = f'连接到bot({bot.self_id}),正在注册bot_info信息'
logger.debug(log)
await source.bot_connect(bot_id)
nickname = await source.get_bot_nickname(bot_id)
bot.config.nickname = [nickname]
log = f'bot({bot.self_id})bot_info信息注册完毕'
logger.debug(log)
log = f'bot({bot.self_id})正在注册group_info和plugin_info信息'
logger.debug(log)
group_list = await bot.get_group_list()
for group in group_list:
group_id = group['group_id']
group_name = group['group_name']
await plugins_source.plugin_init(bot_id, group_id)
await group_source.group_init(bot_id=bot_id, group_id=group_id, group_name=group_name)
# 用户注册
user_list = await bot.get_group_member_list(group_id=group_id)
for user in user_list:
user_id = user['user_id']
user_name = user['nickname'] if user['card'] == "" else user['card']
await group_source.user_init(bot_id, user_id, group_id, user_name)
log = f'bot({bot.self_id})group_info和plugin_info信息注册完毕'
logger.debug(log)
@driver.on_bot_disconnect
async def _(bot: Bot):
'''
机器人断开链接
'''
bot_id = int(bot.self_id)
log = f'检测到bot({bot.self_id})断开链接.'
logger.info(log)
await source.bot_disconnect(bot_id)
# 定时清理离线超过时间的Bot
@scheduler.scheduled_job("cron", hour=23, minute=59)
async def _():
log = '正在清理离线bot'
logger.info(log)
outtime = config.get('default').get('bot-outtime')
count = await source.clean_bot(outtime)
log = f'清理完毕,本次共清理 {count} 个机器人数据'
logger.info(log)
# 设置管理员
set_owner = on_regex(pattern=r"^设置管理员$", permission=PRIVATE_FRIEND, priority=2, block=True)
# 清除管理员
clean_owner = on_regex(pattern=r"^清除管理员$", permission=PRIVATE_FRIEND, priority=2, block=True)
# 查看所有连接机器人
server_list = on_regex(pattern=r"^服务器列表$", permission=SUPERUSER, priority=2, block=True)
# 手动清理离线机器人
clean_outline_bot = on_regex(pattern=r"(^清理所有离线$)|(^清理离线 [0-9]+$)", permission=SUPERUSER, priority=2, block=True)
# 管理员更新数据库
update_database = on_regex(pattern=r"^清理数据$", permission=SUPERUSER, priority=2, block=True)
# 帮助信息
super_help = on_regex(pattern=r"^超级帮助$", permission=SUPERUSER, priority=2, block=True)
@set_owner.handle()
async def _(bot: Bot, event: PrivateMessageEvent):
'''私聊设置管理员'''
bot_id = int(bot.self_id)
owner_id = event.user_id
nickname = event.sender.nickname
flag = await source.set_bot_owner(bot_id, owner_id)
if flag is None:
msg = "设置失败,机器人记录不存在。"
elif flag:
msg = f'设置成功,当前管理员为:{nickname}({owner_id})'
else:
msg = "设置失败,该机器人已有管理员。\n如需要更换管理员,请管理员账号输入:清除管理员"
await set_owner.finish(msg)
@clean_owner.handle()
async def _(bot: Bot, event: PrivateMessageEvent):
'''清除管理员'''
bot_id = int(bot.self_id)
owner_id = event.user_id
flag = await source.clean_bot_owner(bot_id, owner_id)
if flag is None:
msg = "没有什么好清除的了。"
elif flag:
msg = "清除成功,可以重新设置管理员了。"
else:
msg = "清除失败惹,你不是管理员。"
await clean_owner.finish(msg)
@server_list.handle()
async def _(bot: Bot, event: PrivateMessageEvent):
'''
查看所有链接机器人
'''
data = await source.get_all_bot()
for one_data in data:
if one_data['owner_id'] is None:
one_data['owner_id'] = "无"
last_sign: datetime = one_data['last_sign']
one_data['last_sign'] = last_sign.strftime("%Y-%m-%d %H:%M:%S")
if one_data['last_left'] is None:
one_data['last_left'] = "无记录"
else:
last_left: datetime = one_data['last_left']
one_data['last_left'] = last_left.strftime("%Y-%m-%d %H:%M:%S")
alldata = {}
alldata['data'] = data
alldata['robot_nums'] = len(data)
now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
alldata['time'] = now_time
pagename = "robot.html"
img = await get_html_screenshots(pagename=pagename, data=alldata)
msg = MessageSegment.image(img)
await server_list.finish(msg)
@clean_outline_bot.handle()
async def _(bot: Bot, event: PrivateMessageEvent):
'''
私聊清理离线bot
'''
text_list = event.get_plaintext().split(" ")
if len(text_list) == 1:
outtime = 0
else:
outtime = int(text_list[-1])
log = f"管理员清理机器人,参数:{outtime}"
logger.info(log)
count = await source.clean_bot(outtime)
msg = f"清理完毕,共清理 {str(count)} 个机器人。"
await clean_outline_bot.finish(msg)
@update_database.handle()
async def _(bot: Bot, event: PrivateMessageEvent):
'''
超级用户更新数据库
'''
msg = "开始清理数据库"
await update_database.send(msg)
msg_dict = {}
botdict = get_bots()
for id, one_bot in botdict.items():
bot_id = int(id)
count = 0
group_list = [x['group_id'] for x in await one_bot.get_group_list()]
data_group_list = await source.get_bot_group_list(bot_id)
for one_group in data_group_list:
if one_group not in group_list:
# 数据不在群列表中
await source.clean_one_group(bot_id, one_group)
# 记录
count += 1
msg_dict[id] = count
msg = "数据库清理完毕……\n"
for id, count in msg_dict.items():
msg += f"bot[{id}] 共清理群数据 {count} 个.\n"
await update_database.finish(msg)
@super_help.handle()
async def _(bot: Bot, event: PrivateMessageEvent):
'''超级用户帮助'''
pagename = "superuser_help.html"
img = await get_html_screenshots(pagename)
msg = MessageSegment.image(img)
log = "超级用户私聊帮助"
logger.info(log)
await super_help.finish(msg)
```
#### File: managers/plugins_manager/__init__.py
```python
import os
from nonebot import get_driver, on_regex
from nonebot.adapters.cqhttp import (GROUP_ADMIN, GROUP_OWNER, Bot,
GroupMessageEvent, MessageSegment)
from nonebot.adapters.cqhttp.permission import GROUP
from nonebot.exception import IgnoredException
from nonebot.message import run_preprocessor
from nonebot.plugin import Matcher
from nonebot.typing import T_State
from src.utils.browser import get_html_screenshots
from src.utils.log import logger
from src.utils.utils import OWNER, get_nickname
from . import data_source as source
from .model import manager_init
# 获取本模块名
_, self_module = os.path.split(os.path.split(__file__)[0])
# ==============插件管理器注册==============
driver = get_driver()
# 启动处理,生成插件管理模块
driver.on_startup(manager_init)
@run_preprocessor
async def _(matcher: Matcher, bot: Bot, event: GroupMessageEvent, state: T_State):
'''
插件管理预处理函数,只处理群消息
'''
# 跳过不是群消息的事件
event_type = event.get_event_name()
if event_type != "message.group.normal":
return
# 获取群号id
group_id = event.group_id
# 获取插件模块名
module_name = matcher.plugin_name
bot_id = int(bot.self_id)
# 判断是否注册
is_init = await source.check_group_init(bot_id, group_id, module_name)
if is_init is False or module_name == self_module:
log = f'Bot({bot.self_id}) | 此插件不归管理器管理,跳过。'
logger.debug(log)
return
# 管理器管理函数
status = await source.check_plugin_status(bot_id, module_name, group_id)
if status is False:
reason = f'[{module_name}]插件未开启'
log = f'Bot({bot.self_id}) | 事件被阻断:{reason}'
logger.debug(log)
raise IgnoredException(reason)
# =================================管理员手动更新==========================
update = on_regex(r"^更新信息$", permission=OWNER, priority=2, block=True)
@update.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
'''
管理员手动更新群信息
'''
bot_id = int(bot.self_id)
# 群id
group_id = event.group_id
# 注册
log = f'Bot({bot.self_id}) | 管理员手动注册群插件:{group_id}'
logger.info(log)
await source.plugin_init(bot_id, group_id)
changeregex = r'^(打开|关闭) [\u4E00-\u9FA5A-Za-z0-9_]+$'
change = on_regex(changeregex, permission=OWNER | GROUP_OWNER | GROUP_ADMIN, priority=3, block=True)
# =================================功能开关===============================
@change.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
bot_id = int(bot.self_id)
group_id = event.group_id
text = event.get_plaintext()
try:
plugin_name, status = _get_change_params(text)
log = f'Bot({bot.self_id}) | ({group_id})群尝试设置插件[{plugin_name}]的状态:[{status}]。'
logger.info(log)
msg = await source.change_plugin_status(bot_id, plugin_name, group_id, status)
log = f'插件[{plugin_name}]状态设置成功。'
logger.info(log)
except Exception:
log = f'插件[{plugin_name}]状态设置失败。'
logger.info(log)
msg = MessageSegment.text('参数正确吗?检查一下。')
await change.finish(msg)
def _get_change_params(text: str) -> tuple[str, bool]:
'''
:说明
从原始消息中解析出插件名和开关状态
:参数
原始消息
:返回
* plugin_name:插件名
* status:开关状态
'''
text_list = text.split(' ')
plugin_name = text_list[1]
_status = text_list[0]
status = (_status == "打开")
return plugin_name, status
# ===============插件菜单===============
meauregex = r'(^菜单$)|(^功能$)|(^状态$)'
meau = on_regex(meauregex, permission=GROUP, priority=2, block=True)
@meau.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
'''
显示功能开关状态
'''
self_id = int(bot.self_id)
nickname = await get_nickname(self_id)
group_id = event.group_id
log = f'Bot({bot.self_id}) | {event.sender.nickname}({event.user_id},{event.group_id})请求功能菜单。'
logger.info(log)
data = await source.get_meau_data(self_id, group_id, nickname)
pagename = "meau.html"
img = await get_html_screenshots(pagename, data)
msg = MessageSegment.image(img)
await meau.finish(msg)
help_info = on_regex(pattern=r"^帮助$", permission=GROUP, priority=2, block=True)
@help_info.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
'''帮助info'''
pagename = "search_help.html"
img = await get_html_screenshots(pagename)
msg = MessageSegment.image(img)
log = f"Bot({bot.self_id}) | 群[{event.group_id}]请求帮助"
logger.info(log)
await help_info.finish(msg)
```
#### File: src/modules/bot_info.py
```python
from datetime import datetime
from typing import Optional
from src.utils.config import config
from tortoise import fields
from tortoise.models import Model
defaule_nickname: str = config.get('default').get('nickname')
class BotInfo(Model):
'''QQ机器人表'''
bot_id = fields.IntField(pk=True)
'''机器人QQ号'''
owner_id = fields.IntField(null=True)
'''管理员账号'''
nickname = fields.CharField(max_length=255, default=defaule_nickname)
'''机器人昵称'''
last_sign = fields.DatetimeField(null=True)
'''上次登录时间'''
last_left = fields.DatetimeField(null=True)
'''上次离线时间'''
online = fields.BooleanField(default=True)
'''当前在线情况'''
class Meta:
table = "bot_info"
table_description = "管理QQ机器人账号信息"
@classmethod
async def bot_connect(cls, bot_id):
'''
:说明
机器人链接
:参数
* bot_id:机器人QQ号
'''
record, _ = await cls.get_or_create(bot_id=bot_id)
now_time = datetime.now()
record.last_sign = now_time
record.online = True
await record.save(update_fields=["last_sign", "online"])
@classmethod
async def bot_disconnect(cls, bot_id):
'''
:说明
机器人断开链接
:参数
* bot_id:机器人QQ号
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
now_time = datetime.now()
record.last_left = now_time
record.online = False
await record.save(update_fields=["last_left", "online"])
@classmethod
async def set_owner(cls, bot_id, owner_id) -> bool:
'''
:说明
设置机器人管理员
:参数
* bot_id:机器人QQ号
* owner_id:管理员QQ号
:返回
* bool:是否成功
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = owner_id
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_owner(cls, bot_id) -> Optional[int]:
'''
:说明
获取机器人管理员
:参数
* bot_id:机器人QQ
:返回
* int:管理员QQ
* None
'''
record = await cls.get_or_none(bot_id=bot_id)
owner_id = None
if record is not None:
owner_id = record.owner_id
return owner_id
@classmethod
async def clean_owner(cls, bot_id) -> bool:
'''
:说明
清除管理员
:参数
* bot_id:机器人QQ
:返回
* bool:是否清除成功
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.owner_id = None
await record.save(update_fields=["owner_id"])
return True
@classmethod
async def get_online(cls, bot_id) -> Optional[bool]:
'''
:说明
获取机器人在线状态
:参数
* bot_id:机器人QQ
:返回
* bool:是否在线
* None:不存在
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.online
@classmethod
async def set_nickname(cls, bot_id: int, nickname: str) -> bool:
'''
:说明
设置昵称
:参数
* bot_id:机器人QQ
* nickname:昵称
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is None:
return False
record.nickname = nickname
await record.save(update_fields=["nickname"])
return True
@classmethod
async def get_nickname(cls, bot_id: int) -> Optional[str]:
'''
:说明
获取昵称
:参数
* bot_id:机器人QQ
:返回
* str:昵称
'''
record = await cls.get_or_none(bot_id=bot_id)
return None if record is None else record.nickname
@classmethod
async def detele_bot(cls, bot_id) -> bool:
'''
:说明
删除机器人
:参数
* bot_id:机器人QQ
:返回
* bool:删除是否成功,失败则数据不存在
'''
record = await cls.get_or_none(bot_id=bot_id)
if record is not None:
await record.delete()
return True
return False
@classmethod
async def get_disconnect_bot(cls) -> list[dict]:
'''
获取离线bot列表,dict["bot_id", "last_left"]
'''
record_list = await cls.filter(online=False).values("bot_id", "last_left")
return record_list
@classmethod
async def get_all_bot(cls) -> list[dict]:
'''
获取所有数据
'''
record_list = await cls.all().values("bot_id", "owner_id", "nickname", "last_sign", "last_left", "online")
return record_list
```
#### File: plugins/say/__init__.py
```python
from nonebot import on_regex
from nonebot.adapters.cqhttp import Bot, GroupMessageEvent, MessageSegment
from nonebot.adapters.cqhttp.permission import GROUP
from nonebot.plugin import export
from nonebot.rule import to_me
from src.utils.log import logger
from . import data_source as source
export = export()
export.plugin_name = '语音说'
export.plugin_command = "@机器人+说XXX"
export.plugin_usage = '让机器人说话。'
export.default_status = True # 插件默认开关
export.ignore = False # 插件管理器忽略此插件
say = on_regex(pattern=r"^说", rule=to_me(), permission=GROUP, priority=5, block=True)
@say.handle()
async def _(bot: Bot, event: GroupMessageEvent):
group_id = event.group_id
_text = event.get_plaintext()
loc = _text.find("说")
text = _text[loc+1:]
log = f"Bot({bot.self_id}) | 群[{str(group_id)}]请求说话:{text}"
logger.info(log)
voice_str = await source.get_voice(text)
if voice_str is not None:
msg = MessageSegment.record(voice_str)
await say.finish(msg)
else:
await say.finish()
```
#### File: plugins/weather/get_weather.py
```python
from typing import Optional, Tuple
from httpx import AsyncClient
from src.utils.config import config as baseconfig
from src.utils.log import logger
config = baseconfig.get('weather')
apikey = config.get('api-key')
url_weather_api = config.get('url-weather')
url_geoapi = config.get('url-geoapi')
# 获取城市ID
async def get_Location(city_kw: str, api_type: str = "lookup") -> dict:
async with AsyncClient() as client:
res = await client.get(
url_geoapi + api_type, params={"location": city_kw, "key": apikey}
)
return res.json()
# 获取天气信息
async def get_WeatherInfo(api_type: str, city_id: str) -> dict:
async with AsyncClient() as client:
res = await client.get(
url_weather_api + api_type, params={"location": city_id, "key": apikey}
)
return res.json()
async def get_City_Weather(city: str) -> Tuple[str, Optional[dict[str, str]]]:
# global city_id
city_info = await get_Location(city)
code = city_info['code']
if code != "200":
log = f"获取城市id失败,参数:{city}"
logger.debug(log)
return code, None
city_id = city_info["location"][0]["id"]
city_name = city_info["location"][0]["name"]
log = f"获取城市id成功,name:{city_name} id:{city_id}"
logger.debug(log)
# 3天天气
daily_info = await get_WeatherInfo("3d", city_id)
daily = daily_info["daily"]
day1 = daily[0]
day2 = daily[1]
day3 = daily[2]
# 实时天气
now_info = await get_WeatherInfo("now", city_id)
now = now_info["now"]
req_data = {"city": city_name, "now": now, "day1": day1, "day2": day2, "day3": day3}
return code, req_data
```
#### File: src/utils/scheduler.py
```python
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from nonebot.log import logger
from pydantic import BaseSettings, Field
class Config(BaseSettings):
apscheduler_config: dict = Field(
default_factory=lambda: {"apscheduler.timezone": "Asia/Shanghai"})
class Config:
extra = "ignore"
plugin_config = Config()
scheduler = AsyncIOScheduler(timezone="Asia/Shanghai")
'''全局定时器对象'''
async def start_scheduler():
if not scheduler.running:
scheduler.configure(plugin_config.apscheduler_config)
scheduler.start()
logger.opt(colors=True).info("<y>定时器模块已开启。</y>")
``` |
{
"source": "491-password-security/storage",
"score": 3
} |
#### File: storage/models/user_models.py
```python
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy import *
from collections import UserDict
from enum import unique
import logging
from sqlalchemy import exists
from sqlalchemy.orm import backref
from sqlalchemy.sql.expression import and_, null
from app import sql
from passlib.hash import pbkdf2_sha256 as sha256
class UserModel(sql.Model):
"""
User Model Class
"""
__tablename__ = 'user'
id = sql.Column(sql.Integer, primary_key=True)
confirmedAt = sql.Column(sql.DateTime(), nullable=False)
email = sql.Column(sql.String(320), nullable=True) #max length possible
active = sql.Column(sql.SmallInteger(), nullable=False)
password = sql.Column(sql.String(100), nullable=True)
# User fields
fullName = sql.Column(sql.String(100), nullable=True)
profilePictureUrl = sql.Column(sql.String(50), nullable=True)
username = sql.Column(sql.String(50), nullable=True)
about = sql.Column(sql.String(320), nullable=True)
followers = sql.Column(ARRAY(String), nullable=True)
following = sql.Column(ARRAY(String), nullable=True)
#social
network = sql.Column(sql.String(50), nullable=True)
token = sql.Column(sql.String(320), nullable=True)
timezone = sql.Column(sql.String(50), nullable=True)
def get_user_details_as_json(self):
return {
"email": self.email,
"fullName": self.fullName,
"profilePictureUrl": self.profilePictureUrl,
"username": self.username,
"about": self.about,
"followers": self.followers,
"following": self.following,
"network": self.network,
"token": self.token,
"timezone": self.timezone
}
def update_user(self, data):
for key, value in data.items():
if(value is not None):
setattr(self, key, value)
self.save_to_db()
"""
Save user details in Database
"""
def save_to_db(self):
sql.session.add(self)
sql.session.commit()
"""
Commit changes to Database
"""
def commit_to_db(self):
sql.session.commit()
"""
Find user by phone number
"""
@classmethod
def find_by_email(cls, email):
return cls.query.filter_by(email=email).first()
@classmethod
def find_by_token(cls, token):
return cls.query.filter_by(token=token).first()
"""
return all the user data in json form available in sql
"""
@classmethod
def return_all(cls):
def to_json(x):
return {
'phoneNumber': x.phoneNumber,
}
return {'users': [to_json(user) for user in UserModel.query.all()]}
"""
Delete user data
"""
@classmethod
def delete_all(cls):
try:
num_rows_deleted = sql.session.query(cls).delete()
sql.session.commit()
return {'message': f'{num_rows_deleted} row(s) deleted'}
except:
return {'message': 'Something went wrong'}
class RevokedTokenModel(sql.Model):
"""
Revoked Token Model Class
"""
__tablename__ = 'revoked_token'
id = sql.Column(sql.Integer, primary_key=True)
jti = sql.Column(sql.String(120))
"""
Save Token in sql
"""
def add(self):
sql.session.add(self)
sql.session.commit()
"""
Checking that token is blacklisted
"""
@classmethod
def is_jti_blacklisted(cls, jti):
query = cls.query.filter_by(jti=jti).first()
return bool(query)
```
#### File: storage/utility/email_ops.py
```python
import smtplib, ssl
import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from mailjet_rest import Client
class Mail:
@staticmethod
def sendEmail(to, new_pass):
api_key = '<KEY>'
api_secret = '70fc15e4740bba0d771593dde77b622f'
mailjet = Client(auth=(api_key, api_secret), version='v3.1')
logging.error(new_pass)
data = {
'Messages': [
{
"From": {
"Email": "<EMAIL>",
"Name": "Veviski"
},
"To": [
{
"Email": to,
"Name": ""
}
],
"Subject": "Cryopass",
"TextPart": str(new_pass),
"HTMLPart": "<h3>Your one time pass code is: </h3>" + str(new_pass),
"CustomID": "AppGettingStartedTest"
}
]
}
result = mailjet.send.create(data=data)
logging.error(result.status_code)
logging.error(result.json())
```
#### File: storage/views/event_request.py
```python
class AddParticipantRequest:
def __init__(self, parser):
self.eventId = checkExistence(parser.eventId)
self.phoneNumber = checkExistence(parser.phoneNumber)
self.children = checkExistence(parser.children)
self.adultInformation = checkExistence(parser.adultInformation)
class RemoveParticipantRequest:
def __init__(self, parser):
self.eventId = checkExistence(parser.eventId)
class AddFavouritesRequest:
def __init__(self, parser):
self.eventId = checkExistence(parser.eventId)
class RemoveFavouritesRequest:
def __init__(self, parser):
self.eventId = checkExistence(parser.eventId)
class GradeEventRequest:
def __init__(self, parser):
self.eventId = checkExistence(parser.eventId)
self.generalRating = checkExistence(parser.generalRating)
self.locationRating = checkExistence(parser.locationRating)
self.eventRating = checkExistence(parser.eventRating)
self.comment = parser.comment
def checkExistence(obj):
if obj is None: raise TypeError
return obj
``` |
{
"source": "494283111/TextRecognitionDataGenerator",
"score": 3
} |
#### File: TextRecognitionDataGenerator/trdg/data_generator.py
```python
import os
import random as rnd
from PIL import Image, ImageFilter
from trdg import computer_text_generator, background_generator, distorsion_generator
try:
from trdg import handwritten_text_generator
except ImportError as e:
print("Missing modules for handwritten text generation.")
class FakeTextDataGenerator(object):
@classmethod
def generate_from_tuple(cls, t):
"""
Same as generate, but takes all parameters as one tuple
"""
cls.generate(*t)
@classmethod
def generate(
cls,
index,
text,
font,
out_dir,
size,
extension,
skewing_angle,
random_skew,
blur,
random_blur,
background_type,
distorsion_type,
distorsion_orientation,
is_handwritten,
name_format,
width,
alignment,
text_color,
orientation,
space_width,
character_spacing,
margins,
fit,
output_mask,
word_split,
image_dir,
):
image = None
margin_top, margin_left, margin_bottom, margin_right = margins
horizontal_margin = margin_left + margin_right
vertical_margin = margin_top + margin_bottom
##########################
# Create picture of text #
##########################
if is_handwritten:
if orientation == 1:
raise ValueError("Vertical handwritten text is unavailable")
image, mask = handwritten_text_generator.generate(text, text_color)
else:
image, mask = computer_text_generator.generate(
text,
font,
text_color,
size,
orientation,
space_width,
character_spacing,
fit,
word_split,
)
random_angle = rnd.randint(0 - skewing_angle, skewing_angle)
rotated_img = image.rotate(
skewing_angle if not random_skew else random_angle, expand=1
)
rotated_mask = mask.rotate(
skewing_angle if not random_skew else random_angle, expand=1
)
#############################
# Apply distorsion to image #
#############################
if distorsion_type == 0:
distorted_img = rotated_img # Mind = blown
distorted_mask = rotated_mask
elif distorsion_type == 1:
distorted_img, distorted_mask = distorsion_generator.sin(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
elif distorsion_type == 2:
distorted_img, distorted_mask = distorsion_generator.cos(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
else:
distorted_img, distorted_mask = distorsion_generator.random(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
##################################
# Resize image to desired format #
##################################
# Horizontal text
if orientation == 0:
new_width = int(
distorted_img.size[0]
* (float(size - vertical_margin) / float(distorted_img.size[1]))
)
resized_img = distorted_img.resize(
(new_width, size - vertical_margin), Image.ANTIALIAS
)
resized_mask = distorted_mask.resize((new_width, size - vertical_margin))
background_width = width if width > 0 else new_width + horizontal_margin
background_height = size
# Vertical text
elif orientation == 1:
new_height = int(
float(distorted_img.size[1])
* (float(size - horizontal_margin) / float(distorted_img.size[0]))
)
resized_img = distorted_img.resize(
(size - horizontal_margin, new_height), Image.ANTIALIAS
)
resized_mask = distorted_mask.resize(
(size - horizontal_margin, new_height), Image.ANTIALIAS
)
background_width = size
background_height = new_height + vertical_margin
else:
raise ValueError("Invalid orientation")
#############################
# Generate background image #
#############################
if background_type == 0:
background_img = background_generator.gaussian_noise(
background_height, background_width
)
elif background_type == 1:
background_img = background_generator.plain_white(
background_height, background_width
)
elif background_type == 2:
background_img = background_generator.quasicrystal(
background_height, background_width
)
else:
background_img = background_generator.image(
background_height, background_width, image_dir
)
background_mask = Image.new(
"RGB", (background_width, background_height), (0, 0, 0)
)
#############################
# Place text with alignment #
#############################
new_text_width, _ = resized_img.size
if alignment == 0 or width == -1:
background_img.paste(resized_img, (margin_left, margin_top), resized_img)
background_mask.paste(resized_mask, (margin_left, margin_top))
elif alignment == 1:
background_img.paste(
resized_img,
(int(background_width / 2 - new_text_width / 2), margin_top),
resized_img,
)
background_mask.paste(
resized_mask,
(int(background_width / 2 - new_text_width / 2), margin_top),
)
else:
background_img.paste(
resized_img,
(background_width - new_text_width - margin_right, margin_top),
resized_img,
)
background_mask.paste(
resized_mask,
(background_width - new_text_width - margin_right, margin_top),
)
##################################
# Apply gaussian blur #
##################################
gaussian_filter = ImageFilter.GaussianBlur(
radius=blur if not random_blur else rnd.randint(0, blur)
)
final_image = background_img.filter(gaussian_filter)
final_mask = background_mask.filter(gaussian_filter)
#####################################
# Generate name for resulting image #
#####################################
if name_format == 0:
image_name = "{}_{}.{}".format(text, str(index), extension)
mask_name = "{}_{}_mask.png".format(text, str(index))
elif name_format == 1:
image_name = "{}_{}.{}".format(str(index), text, extension)
mask_name = "{}_{}_mask.png".format(str(index), text)
elif name_format == 2:
image_name = "{}.{}".format(str(index), extension)
mask_name = "{}_mask.png".format(str(index))
else:
print("{} is not a valid name format. Using default.".format(name_format))
image_name = "{}_{}.{}".format(text, str(index), extension)
mask_name = "{}_{}_mask.png".format(text, str(index))
# Save the image
if out_dir is not None:
final_image.convert("RGB").save(os.path.join(out_dir, image_name))
if output_mask == 1:
final_mask.convert("RGB").save(os.path.join(out_dir, mask_name))
else:
if output_mask == 1:
return final_image.convert("RGB"), final_mask.convert("RGB")
return final_image.convert("RGB")
``` |
{
"source": "494589939/csy.github.io",
"score": 2
} |
#### File: csy.github.io/python/wechat.py
```python
import requests
import json
import os
import sys
# POST发送信息
def send_message(access_token, title, message):
# 定义POST请求字典
data = {
"touser": "这里填自己的用户名",
"toparty": "这里填要接收部门",
"msgtype": "text",
"agentid": 这里填部门ID,
"text": {
"content": '故障标题:' + title + '\n' + '故障详情:' + message
},
"safe": 0
}
# 用 json 将 dict转换成json格式传给微信服务器
payload = json.dumps(data)
post_message = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={}".format(
access_token)
p = requests.post(post_message, payload)
print(p.json())
if __name__ == '__main__':
# 获取access_token
get_https = "https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ww7aa25663557cd46b&corpsecret=<KEY>"
g = requests.get(get_https)
access_token = g.json()['access_token']
title = sys.argv[1] # 获取第一个参数
message = sys.argv[2] # 获取第二个参数
send_message(access_token, title, message)
``` |
{
"source": "496080199/dccron",
"score": 2
} |
#### File: dccron/app/trade.py
```python
from .models import *
import ccxt, re
from decimal import Decimal
import traceback,shelve
def getproxy(ex):
try:
myproxy=None
proxy = shelve.open('proxy.conf', flag='r', protocol=2, writeback=False)
if 'proxy' in proxy.keys() and proxy['proxy'] != '':
proxyvalue='http://'+proxy['proxy']
myproxy={'http': proxyvalue,'https': proxyvalue}
if myproxy is not None:
ex.proxies=myproxy
finally:
proxy.close()
return ex
def exlogin(exid):
exchange = Exchange.objects.get(pk=exid)
ex = eval("ccxt." + exchange.code + "()")
ex=getproxy(ex)
ex.apiKey = exchange.apikey
ex.secret = exchange.secretkey
return ex
def writecastlog(cid, content):
castlog = Castlog.objects.create(cast_id=cid)
castlog.content = content
castlog.save()
def casttoorder(cast, exchange):
try:
symbol = re.sub('_', '/', cast.symbol)
ex = eval("ccxt." + exchange.code + "()")
if exchange.status == 0:
raise RuntimeError('交易所已被禁用,无法执行')
ex = getproxy(ex)
ex.apiKey = exchange.apikey
ex.secret = exchange.secretkey
ex.options['createMarketBuyOrderRequiresPrice'] = False
ex.options['marketBuyPrice'] = False
try:
cast = Cast.objects.get(pk=cast.id)
cost = Decimal(cast.cost)
firstsymbol = symbol.split('/')[0]
quatity = Decimal(ex.fetch_balance()[firstsymbol]['free'])
orderbook = ex.fetch_order_book(symbol=symbol)
bid = orderbook['bids'][0][0] if len(orderbook['bids']) > 0 else None
ask = orderbook['asks'][0][0] if len(orderbook['asks']) > 0 else None
averageprice = Decimal((ask + bid) / 2)
currentvalue = averageprice * quatity
if cost == 0.0 and quatity != 0.0:
cast.cost = currentvalue
cast.save()
if currentvalue > cost * Decimal(1 + (cast.sellpercent / 100)):
sellorderdata = ex.create_market_sell_order(symbol=symbol, amount=str(quatity))
if sellorderdata['info']['status'] == 'ok':
content = '定投收益已达到' + str(cast.sellpercent) + '%,成功卖出'
writecastlog(cast.id, content)
else:
content = '卖出单异常:' + sellorderdata['info']['status']
writecastlog(cast.id, content)
except:
content = '卖出单异常:' + traceback.format_exc()
writecastlog(cast.id, content)
try:
amount = Decimal(cast.amount)
buyorderdata = ex.create_market_buy_order(symbol=symbol, amount=str(amount), params={'cost': str(amount)})
if buyorderdata['info']['status'] == 'ok':
cast.cost += amount
cast.save()
content = '定投成功买入' + str(amount) + '金额的' + str(symbol.split('/')[1])
writecastlog(cast.id, content)
else:
content = '买入单异常:' + buyorderdata['info']['status']
writecastlog(cast.id, content)
except:
content = '买入单异常:' + traceback.format_exc()
writecastlog(cast.id, content)
except:
content = '初始化异常:' + traceback.format_exc()
writecastlog(cast.id, content)
def writeconditionlog(cid, content):
conditionlog = Conditionlog.objects.create(condition_id=cid)
conditionlog.content = content
conditionlog.save()
def scconditionstop(cid):
jobid = 'condition' + str(cid)
jobs = DjangoJob.objects.filter(name=jobid)
if jobs.exists():
jobs[0].delete()
def conditiontoorder(condition, exchange):
try:
symbol = re.sub('_', '/', condition.symbol)
price = condition.price
number = condition.number
ex = eval("ccxt." + exchange.code + "()")
ex = getproxy(ex)
ex.apiKey = exchange.apikey
ex.secret = exchange.secretkey
orderbook = ex.fetch_order_book(symbol=symbol)
bid = orderbook['bids'][0][0] if len(orderbook['bids']) > 0 else None
ask = orderbook['asks'][0][0] if len(orderbook['asks']) > 0 else None
averageprice = Decimal((ask + bid) / 2)
if condition.direction == 'sell' and averageprice > price:
sellorderdata = ex.create_market_sell_order(symbol=symbol, amount=str(number))
if sellorderdata['info']['status'] == 'ok':
content = '已按价格为' + str(price) + '条件下单卖出'
writeconditionlog(condition.id, content)
scconditionstop(condition.id)
else:
content = '卖出单异常'
writeconditionlog(condition.id, content)
elif condition.direction == 'buy' and averageprice < price:
buyorderdata = ex.create_market_buy_order(symbol=symbol, amount=str(number), params={'price':str(averageprice)})
if buyorderdata['info']['status'] == 'ok':
content = '已按价格为' + str(averageprice) + '条件下单买入'
writeconditionlog(condition.id, content)
scconditionstop(condition.id)
else:
content = '买入单异常'
writeconditionlog(condition.id, content)
else:
content = '未满足下单条件'
writeconditionlog(condition.id, content)
except:
content = '条件投异常' + traceback.format_exc()
writeconditionlog(condition.id, content)
``` |
{
"source": "497022407/Shifts-manager",
"score": 3
} |
#### File: Shifts-manager/scheduleSynchronizer/consistency.py
```python
from datetime import datetime, timedelta
from requests.api import get, request
from scheduleSynchronizer.debug import *
from scheduleSynchronizer.web_scraper1111.search import look
import re
# from scheduleSynchronizer.web_scraper.search import *
def add_key(key, value, dict):
if key in dict:
return 'exist'
else:
dict[key] = value
return 'added'
courses_cache = {}
def get_courses_cache():
return courses_cache
# month = {
# "Jan": 1,
# "Feb": 2,
# "Mar": 3,
# "Apr": 4,
# "May": 5,
# "Jun": 6,
# "Jul": 7,
# "Aug": 8,
# "Sep": 9,
# "Oct": 10,
# "Nov": 11,
# "Dec": 12
# }
group_dict = {
"PBL": "Puzzle Based Learning",
"CSNS": "Introduction to Computer Systems, Networks and Security",
"ITP": "Information Technology Project",
"PITS": "Introduction to Programming for Information Technology Specialists",
"OOP": "Object Oriented Programming",
"GSCS": "Grand Challenges in Computer Science",
"AI": "Artificial Intelligence",
"FCS": "Foundations of Computer Science",
"ADDS": "Algorithm Design & Data Structures",
"ADSA": "Algorithm & Data Structure Analysis",
"CS": "Computer Systems",
"CNA": "Computer Networks & Applications"
}
def parse_abbreviation(abbreviation):
if abbreviation in group_dict.keys():
return group_dict[abbreviation]
else:
return abbreviation
# input a shift and return whether it's mapped
def match(shift):
isMatched = 1
shift['groupName'] = parse_abbreviation(shift['groupName'])
# print("comparing course name: ", shift['groupName'])
if shift['groupName'] in courses_cache:
courses = courses_cache[shift['groupName']]
else:
print("looking up from course planner. course name: ",
shift['groupName'])
courses = look(shift['groupName'])
print('searching successful! number of classes found:',len(courses))
courses_cache[shift['groupName']] = courses
print(shift['groupName'], 'cached')
# courses = courses_cache[shift['groupName']]
for course in courses:
if(course.title == shift['groupName']
and course.section == shift['displayName']
and course.location == shift['notes']
and course.sdate == shift['startDateTime']
and course.edate == shift['endDateTime']):
# matched!
course.isMatched = 0
isMatched = 0
break
elif (course.sdate == shift['startDateTime']
and course.edate == shift['endDateTime']):
# incorrect location!
course.isMatched = 3
isMatched = 3
elif(course.location == shift['notes'] and isMatched != 3):
# incorrect time!
course.isMatched = 2
isMatched = 2
# print('matching result:',isMatched)
# courses_cache[shift['groupName']] = courses
return isMatched
```
#### File: Shifts-manager/scheduleSynchronizer/search.py
```python
import scraper
import requests
import re
import string
import datetime
import time
from bs4 import BeautifulSoup
from dateutil.parser import parse
class Session:
def __init__(self, sdate, edate, days, time, location):
self.sdate = sdate
self.edate = edate
self.days = days
self.time = time
self.location = location
def display(self):
print(self.sdate)
print(self.edate)
print(self.days)
print(self.time)
print(self.location)
# def change(self):
# self.dates.split(" - ")
# print(self.dates,type(self.dates))
# 4110 semester 1 4120 semester 2
# https://access.adelaide.edu.au/courses/search.asp?year=2021&m=r&title=computer+science&subject=&catalogue=&action=Search&term=&career=&campus=&class=&sort=
# we search use course title (or part of title)and catalogue number (e.g. 1001, 7095) to
# get to the returning list page, if there is only one returned course in the list, redirct to the course page
def look(title):
#need to change space in course title to +
name = title.replace(" ","+")
year = time.localtime().tm_year
#trimester 1 2 3 are 4133 4136 4139
# if semester == 2:
# term = 4120
# elif semester == 1:
# term = 4110
# elif semester == 3:#tri 1
# term = 4133
# elif semester == 4:#tri 2
# term = 4136
# elif semester == 5:#tri 3
# term = 4139
# elif semester == 6:#term 1
# term = 4142
# elif semester == 7:#term 2
# term = 4144
# elif semester == 8:#term 3
# term = 4146
# elif semester == 9:#term 4
# term = 4148
# elif semester == 10:#summer
# term = 4105
# elif semester == 11:#winter
# term = 4115
# this is the middle step page
value = "https://access.adelaide.edu.au/courses/search.asp?year="+str(year)+"&m=r&title="+name+"&subject=&catalogue=&action=Search&term=&career=&campus=&class=&sort="
try:
requests.adapters.DEFAULT_RETRIES = 10
s = requests.session()
s.keep_alive = False
html = requests.get(value).text
key = re.findall('"odd"><a href="details.asp?(.*?)</a>',html,re.S)
keys = re.findall('"even"><a href="details.asp?(.*?)</a>',html,re.S)
urlset = []
for x in key:
us = x.split("\">")
if us[1] == title:
urlset.append(us[0])
for ea in keys:
us = ea.split("\">")
if us[1] == title:
urlset.append(us[0])
ses = []
#real each url in odd line returned class table
for x in urlset:
ur = x.replace("amp;","")
url = "https://access.adelaide.edu.au/courses/details.asp"+ur
data = scraper.download(url)# returned resultset
# print(url)
for y in range(2,len(data)):
st = data[y].text.encode("ascii")
ses.append(st)#append each line in the course details table in ses list
#adjust ses list
session = []
for each in ses:
#change it to list of lists of each line in session
session.append(each.split("\n"))
#delete unnecessary elements
ls = []
for each in session:
if len(each)==6 or len(each) == 10:
if each[1] != 'Class Nbr':
ls.append(each)
result = []
for each in ls:
if len(each) == 10:
ll = each[5].split(" - ")
tt = []
for x in ll:
x = x+" "+str(year)
d = parse(x)
tt.append(d)
se = Session(tt[0],tt[1],each[6],each[7],each[8])
else:
ll = each[1].split(" - ")
tt = []
for x in ll:
x = x+" "+str(year)
d = parse(x)
tt.append(d)
se = Session(tt[0],tt[1],each[2],each[3],each[4])
result.append(se)
return result#list of session objects
except Exception as e:
raise
``` |
{
"source": "4979/courses",
"score": 3
} |
#### File: clustering-data/python/evaluation.py
```python
from math import log, sqrt
def purity(groundtruthAssignment, algorithmAssignment):
purity = 0
# TODO
# Compute the purity
return purity
def NMI(groundtruthAssignment, algorithmAssignment):
NMI = 0
# TODO
# Compute the NMI
return NMI
```
#### File: clustering-data/python/k_means.py
```python
from utils import *
def computeSSE(data, centers, clusterID):
sse = 0
nData = len(data)
for i in range(nData):
c = clusterID[i]
sse += squaredDistance(data[i], centers[c])
return sse
def updateClusterID(data, centers):
nData = len(data)
clusterID = [0] * nData
# TODO
# assign the closet center to each data point
return clusterID
# K: number of clusters
def updateCenters(data, clusterID, K):
nDim = len(data[0])
centers = [[0] * nDim for i in range(K)]
# TODO recompute the centers based on current clustering assignment
# If a cluster doesn't have any data points, in this homework, leave it to ALL 0s
return centers
def kmeans(data, centers, maxIter = 100, tol = 1e-6):
nData = len(data)
if nData == 0:
return [];
K = len(centers)
clusterID = [0] * nData
if K >= nData:
for i in range(nData):
clusterID[i] = i
return clusterID
nDim = len(data[0])
lastDistance = 1e100
for iter in range(maxIter):
clusterID = updateClusterID(data, centers)
centers = updateCenters(data, clusterID, K)
curDistance = computeSSE(data, centers, clusterID)
if lastDistance - curDistance < tol or (lastDistance - curDistance)/lastDistance < tol:
print "# of iterations:", iter
print "SSE = ", curDistance
return clusterID
lastDistance = curDistance
print "# of iterations:", iter
print "SSE = ", curDistance
return clusterID
``` |
{
"source": "498888197/simpleubjson",
"score": 2
} |
#### File: simpleubjson/simpleubjson/__init__.py
```python
NOOP = type('NoOp', (object,), {'__slots__': ()})()
_EOS = type('EndOfStream', (object,), {'__slots__': ()})
#: EOS (end of stream) sentinel value. Draft-8 only.
EOS = _EOS()
EOS_A = type('EndOfArrayStream', (_EOS,), {'__slots__': ()})()
EOS_O = type('EndOfObjectStream', (_EOS,), {'__slots__': ()})()
del _EOS
import warnings
from .version import __version__
from .draft8 import Draft8Decoder, Draft8Encoder
from .draft9 import Draft9Decoder, Draft9Encoder
from .tools.inspect import pprint
from .exceptions import DecodeError, EncodeError
__all__ = ['decode', 'encode', 'pprint', 'NOOP', 'DecodeError', 'EncodeError',
'__version__']
_draft8_decoder = Draft8Decoder
_draft8_encoder = Draft8Encoder
_draft9_decoder = Draft9Decoder
_draft9_encoder = Draft9Encoder
warnings.simplefilter('once')
_DRAFT8_DEPRECATED = ('Draft-8 specification is too old and deprecated.'
' Please upgrade your data to fit Draft-9 spec.')
def decode(data, allow_noop=False, spec='draft9'):
"""Decodes input stream of UBJSON data to Python object.
:param data: `.read([size])`-able object or source string.
:param allow_noop: Allow to emit :const:`~simpleubjson.NOOP` values for
unsized arrays and objects.
:type allow_noop: bool
:param spec: UBJSON specification. Supported Draft-8 and Draft-9
specifications by ``draft-8`` or ``draft-9`` keys.
:type spec: str
:return: Decoded Python object. See mapping table below.
"""
if spec.lower() in ['draft8', 'draft-8']:
warnings.warn(_DRAFT8_DEPRECATED, DeprecationWarning)
return _draft8_decoder(data, allow_noop).decode_next()
elif spec.lower() in ['draft9', 'draft-9']:
return _draft9_decoder(data, allow_noop).decode_next()
else:
raise ValueError('Unknown or unsupported specification %s' % spec)
def encode(data, output=None, default=None, spec='draft-9'):
"""Encodes Python object to Universal Binary JSON data.
:param data: Python object.
:param output: `.write([data])`-able object. If omitted result would be
returned instead of written into.
:param default: Callable object that would be used if there is no handlers
matched for Python data type.
Takes encodable value as single argument and must return
valid UBJSON encodable value.
:param spec: UBJSON specification. Supported Draft-8 and Draft-9
specifications by ``draft-8`` or ``draft-9`` keys.
:type spec: str
:return: Encoded Python object. See mapping table below.
If `output` param is specified, all data would be written into it
by chunks and None will be returned.
"""
if spec.lower() in ['draft8', 'draft-8']:
warnings.warn(_DRAFT8_DEPRECATED, DeprecationWarning)
res = _draft8_encoder(default).encode_next(data)
elif spec.lower() in ['draft9', 'draft-9']:
res = _draft9_encoder(default).encode_next(data)
else:
raise ValueError('Unknown or unsupported specification %s' % spec)
if output:
output.write(res)
else:
return res
``` |
{
"source": "499602D2/tg-launchbot",
"score": 3
} |
#### File: tg-launchbot/tg-launchbot/utils.py
```python
import datetime
import time
import logging
import flag
import iso3166
from hashlib import sha1
def retry_after(retry_after_secs):
"""
Sleep at most 5 seconds during Telegram rate-limits
"""
if retry_after_secs > 30:
time.sleep(30)
else:
time.sleep(retry_after_secs + 0.15)
def anonymize_id(chat: str) -> str:
'''
For pseudo-anonymizing chat IDs, a truncated, unsalted SHA-1 hash
is returned for use in logging.
Keyword arguments:
chat (str): chat ID to anonymize
Returns:
chat (str): the anonymized chat ID
'''
return chat
#return sha1(str(chat).encode('utf-8')).hexdigest()[0:6]
def reconstruct_link_for_markdown(link: str) -> str:
'''
Telegram's MarkdownV2 requires some special handling, so
parse the link here into a compatible format.
Keyword arguments:
link (str): link to reconstruct for Markdown
Returns:
link_reconstruct (str): the reconstructed link
'''
link_reconstruct, char_set = '', (')', '\\')
for char in link:
if char in char_set:
link_reconstruct += f'\\{char}'
else:
link_reconstruct += char
return link_reconstruct
def reconstruct_message_for_markdown(message: str) -> str:
'''
Performs effectively the same functions as reconstruct_link, but
is intended to be used for message body text.
Keyword arguments:
message (str): message to escape for Markdown
Returns:
message_reconstruct (str): the escaped message
'''
message_reconstruct = ''
char_set = ('[', ']', '(', ')', '~', '>', '#', '+', '-', '=', '|', '{', '}', '.', '!')
for char in message:
if char in char_set:
message_reconstruct += f'\\{char}'
else:
message_reconstruct += char
return message_reconstruct
def short_monospaced_text(text: str) -> str:
'''
Telegram has extremely wide spaces for the monospaced font. This function
helps eliminate those wide spaces by replacing them with "regular" spaces.
Keyword arguments:
text (str): text to monospace in a shortened format
Returns:
text (str): monospaced text
'''
return ' '.join("`{}`".format(word) for word in text.split(' '))
def map_country_code_to_flag(country_code: str) -> str:
'''
Maps a country code to a corresponding emoji flag: truly modern.
The function returns a blank, white flag if the country code
doesn't exist in the iso3166 database.
Keyword arguments:
country_code (str): country code to return the flag for
Returns:
emoji_flag (str): the flag for the country code
'''
if len(country_code) == 3:
try:
# try converting alpha-3 to alpha-2
alpha2 = iso3166.countries_by_alpha3[country_code].alpha2
except KeyError:
return "🏳"
else:
# some codes may already be in alpha2
alpha2 = country_code
# emoji flag corresponding to alpha-2 code
emoji_flag = flag.flag(alpha2)
# avoid sending some non-existent dynamically constructed garbage
if len(emoji_flag) == 2:
return emoji_flag
logging.warning(f"Got non-existent country flag: alpha3={country_code} alpha2={alpha2}")
return "🏳"
def suffixed_readable_int(number: int) -> str:
'''
Generates a readable (positional?) number.
'''
if number < 0:
return str(number)
if number < 10:
suffixed_number = {
0: 'zeroth',
1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth',
6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth'}[number]
return suffixed_number
try:
if number in (11, 12, 13):
suffix = 'th'
else:
suffix = {1: 'st', 2: 'nd', 3: 'rd'}[int(str(number)[-1])]
return f'{number}{suffix}'
except KeyError:
return f'{number}th'
def timestamp_to_unix(timestamp: str) -> int:
'''
Parses a LL2 timestamp from its format into a unix timestamp,
i.e. seconds since the unix epoch.
Keyword arguments:
timestamp (str): timestamp in the format used by the LL2 API
Returns:
unix_timestamp (int): unix timestamp corresponding to the above timestamp
'''
# convert to a datetime object from the custom format, ex. 2020-10-18T12:25:00Z
utc_dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S%fZ')
# convert UTC datetime to integer seconds since the unix epoch, return
return int((utc_dt - datetime.datetime(1970, 1, 1)).total_seconds())
def timestamp_to_legible_date_string(timestamp: int, use_utc: bool) -> str:
'''
Converts a unix timestamp to a pretty date string, in the form
of MM dd+suffix, ex. "February 13th".
Keyword arguments:
timestamp (int): timestamp to convert to a date string
Returns:
date_str (str): a pretty date string
'''
# convert unix timestamp to a datetime object
if use_utc:
date_object = datetime.datetime.utcfromtimestamp(timestamp)
else:
date_object = datetime.datetime.fromtimestamp(timestamp)
# map months to month names
month_map = {
1: 'January', 2: 'February', 3: 'March', 4: 'April',
5: 'May', 6: 'June', 7: 'July', 8: 'August',
9: 'September', 10: 'October', 11: 'November', 12: 'December'}
try:
if int(date_object.day) in (11, 12, 13):
suffix = 'th'
else:
suffix = {1: 'st', 2: 'nd', 3: 'rd'}[int(str(date_object.day)[-1])]
except KeyError:
suffix = 'th'
return f'{month_map[date_object.month]} {date_object.day}{suffix}'
def time_delta_to_legible_eta(time_delta: int, full_accuracy: bool) -> str:
'''
This is a tiny helper function, used to convert integer time deltas
(i.e. second deltas) to a legible ETA, where the largest unit of time
is measured in days.
Keyword arguments:
time_delta (int): time delta in seconds to convert
full_accuracy (bool): whether to use triple precision or not
(in this context, e.g. dd:mm:ss vs. dd:mm)
Returns:
pretty_eta (str): the prettily formatted, readable ETA string
'''
# convert time delta to a semi-redable format: {days, hh:mm:ss}
eta_str = "{}".format(str(datetime.timedelta(seconds=time_delta)))
# parse into a "pretty" string. If ',' in string, it's more than 24 hours.
if ',' in eta_str:
day_str = eta_str.split(',')[0]
hours = int(eta_str.split(',')[1].split(':')[0])
mins = int(eta_str.split(',')[1].split(':')[1])
if hours > 0 or full_accuracy:
pretty_eta = f'{day_str}{f", {hours} hour"}'
if hours != 1:
pretty_eta += 's'
if full_accuracy:
pretty_eta += f', {mins} minute{"s" if mins != 1 else ""}'
else:
if mins != 0 or full_accuracy:
pretty_eta = f'{day_str}{f", {mins} minute"}'
if mins != 1:
pretty_eta += 's'
else:
pretty_eta = f'{day_str}'
else:
# split eta_string into hours, minutes, and seconds -> convert to integers
hhmmss_split = eta_str.split(':')
hours, mins, secs = (
int(hhmmss_split[0]),
int(hhmmss_split[1]),
int(float(hhmmss_split[2]))
)
if hours > 0:
pretty_eta = f'{hours} hour{"s" if hours > 1 else ""}'
pretty_eta += f', {mins} minute{"s" if mins != 1 else ""}'
if full_accuracy:
pretty_eta += f', {secs} second{"s" if secs != 1 else ""}'
else:
if mins > 0:
pretty_eta = f'{mins} minute{"s" if mins != 1 else ""}'
pretty_eta += f', {secs} second{"s" if secs != 1 else ""}'
else:
if secs > 0:
pretty_eta = f'{secs} second{"s" if secs != 1 else ""}'
else:
pretty_eta = 'just now'
return pretty_eta
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.