metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JDongian/discordbw",
"score": 3
}
|
#### File: JDongian/discordbw/tl.py
```python
from IPython import embed
import requests
import re
from bs4 import BeautifulSoup
from urllib.parse import quote
BASE_URL = "http://www.teamliquid.net"
SEARCH_URL = BASE_URL + "/tlpd/maps/index.php?section=korean&tabulator_page=1&tabulator_order_col=default&tabulator_search={query}"
HEADERS = {
'Host': 'www.teamliquid.net',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Cookie': '__cfduid=df74cf2ecffe2b487ad5d80d768f6212a1509307992; SID=6hjurn007m6pere60e7a6tt0v3',
}
def search(query):
print(SEARCH_URL.format(query=query))
return requests.get(SEARCH_URL.format(query=query))
def _parse_winrates(html):
"""
<td><strong>TvZ</strong>:</td>
<td>148-139</td>
<td>(51.6%)</td>
<td>[ <a href="/tlpd/korean/maps/237_Fighting_Spirit/games/TvZ">Games</a> ]</td>
<td><strong>ZvP</strong>:</td>
<td>134-117</td>
<td>(53.4%)</td>
<td>[ <a href="/tlpd/korean/maps/237_Fighting_Spirit/games/ZvP">Games</a> ]</td>
<td><strong>PvT</strong>:</td>
<td>143-137</td>
<td>(51.1%)</td>
<td>[ <a href="/tlpd/korean/maps/237_Fighting_Spirit/games/TvP">Games</a> ]</td>
"""
soup = BeautifulSoup(html, 'html.parser')
rows = [row.contents[0] for row in soup.select(".roundcont table td")]
tvz_games, tvz_wr = rows[1], rows[2]
zvp_games, zvp_wr = rows[5], rows[6]
pvt_games, pvt_wr = rows[9], rows[10]
return {'TvZ': "{} {}".format(tvz_games, tvz_wr),
'ZvP': "{} {}".format(zvp_games, zvp_wr),
'PvT': "{} {}".format(pvt_games, pvt_wr),
'summary': "TvZ: {} {}\n"
"ZvP: {} {}\n"
"PvT: {} {}".format(tvz_games, tvz_wr,
zvp_games, zvp_wr,
pvt_games, pvt_wr)}
def _parse_image(html):
soup = BeautifulSoup(html, 'html.parser')
img_path = soup.select(".roundcont img")[0].attrs['src'].strip()
return BASE_URL + quote(img_path)
def parse_map_link(link):
"""Return a dictionary containing extracted map information.
"""
html = requests.get(link, headers=HEADERS).content
return {'link': link,
'image_link': _parse_image(html),
'win_rates': _parse_winrates(html)}
def get_map_links(html):
maps = re.findall("/tlpd/korean/maps/\d+_\w+", str(html))
return [BASE_URL + m for m in maps]
def get_map_stats(query):
html = search(query).content
print(html[:100])
first_map = get_map_links(html)[0]
result = parse_map_link(first_map)
print(result)
return result
if __name__ == "__main__":
result = get_map_stats("fighting spirit")
```
|
{
"source": "JDongian/revai-python-sdk",
"score": 3
}
|
#### File: models/asynchronous/transcript.py
```python
class Transcript:
def __init__(self, monologues):
"""
:param monologues: list of monologues included in output
"""
self.monologues = monologues
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.monologues, other.monologues))
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls([Monologue.from_json(monologue) for monologue in json.get('monologues', [])])
class Monologue:
def __init__(self, speaker, elements):
"""
:param speaker: speaker identified for this monologue
:param elements: list of elements spoken in this monologue
"""
self.speaker = speaker
self.elements = elements
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.elements, other.elements)) \
and self.speaker == other.speaker
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['speaker'],
[Element.from_json(element) for element in json.get('elements', [])])
class Element:
def __init__(self, type_, value, timestamp, end_timestamp, confidence):
"""
:param type_: type of element: text, punct, or unknown
:param value: value of the element
:param timestamp: time at which this element starts in the audio
:param end_timestamp: time at which this element ends in the audio
:param confidence: confidence in this output
"""
self.type_ = type_
self.value = value
self.timestamp = timestamp
self.end_timestamp = end_timestamp
self.confidence = confidence
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['type'],
json['value'],
json.get('ts'),
json.get('end_ts'),
json.get('confidence'))
```
#### File: rev_ai/models/customvocabulary.py
```python
class CustomVocabulary:
"""CustomVocabulary object to provide a clean way
to create custom vocabularies. CustomVocabulary objects
can be used in the various api clients."""
def __init__(self, phrases):
"""Constructor
:param phrases: list of strings of custom phrases to be recognized in
submitted audio
"""
self.phrases = [phrase for phrase in phrases]
def to_dict(self):
"""Returns the raw form of the custom vocabularies as the api
expects them"""
return {'phrases': self.phrases}
```
#### File: revai-python-sdk/tests/test_utils.py
```python
from src.rev_ai.utils import _process_vocabularies
from src.rev_ai.models import CustomVocabulary
phrases = ["<NAME>", "<NAME>"]
other_phrases = ['<NAME>']
class TestUtils:
def test_process_vocabularies_with_custom_vocab_dict(self):
customvocabs = [{'phrases': phrases}]
processed_vocabs = _process_vocabularies(customvocabs)
assert processed_vocabs == [{'phrases': phrases}]
def test_process_vocabularies_with_CustomVocabulary_instance(self):
customvocabs = [CustomVocabulary(phrases)]
processed_vocabs = _process_vocabularies(customvocabs)
assert processed_vocabs == [{'phrases': phrases}]
def test_process_vocabularies_with_mixed_input(self):
customvocabs = [CustomVocabulary(phrases), {'phrases': other_phrases}]
processed_vocabs = _process_vocabularies(customvocabs)
assert processed_vocabs == [{'phrases': phrases}, {'phrases': other_phrases}]
def test_process_vocabularies_with_empty_list(self):
processed_vocabs = _process_vocabularies([])
assert processed_vocabs == []
```
|
{
"source": "JDongian/rpi-setup",
"score": 3
}
|
#### File: rpi-setup/rpi/bt.py
```python
import bluetooth, os, time, sys, threading
# The in directory for new pcap files
PCAP_DIR = "/tmp/pcaps"
GPSPATH = '/tmp/gpsfifo'
SERVICE_NAME = "EyeOfTechnology"
LOGFILE = "/var/log/iot.log"
is_running = True
def _curr_time():
return time.strftime("%Y-%m-%d %H:%M:%S")
def _format_log(logstring):
return _curr_time() + ": " + logstring + "\n"
"""
def bt_loop(ld):
'''
Connects to a device and then transmits pcaps.
'''
ld.write(_format_log("Staring service"))
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
ld.write(_format_log("Got bluetooth socket"))
# All the services with this name should be fine
service_desc = get_connection(ld)
# Getting service information
port = service_desc['port']
target_address = service_desc['host']
# Connecting to the device
sock.connect((target_address, port))
ld.write(_format_log("Connected to android device"))
while True:
# Loop through the in directory and send over files
time.sleep(2)
files = os.listdir(PCAP_DIR)
for f in files:
fd = open(PCAP_DIR + '/' + f, 'rb')
temp = fd.read()
sock.send(temp)
ld.write(_format_log("Sending " + f))
fd.close()
os.remove(PCAP_DIR + "/" + f)
"""
"""
def receive_loop(ld):
ld.write(_format_log("Staring service"))
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
ld.write(_format_log("Got bluetooth socket"))
# All the services with this name should be fine
service_desc = get_connection(ld)
# Getting service information
port = service_desc['port']
target_address = service_desc['host']
# Connecting to the device
sock.connect((target_address, port))
ld.write(_format_log("Connected to android device"))
while True:
time.sleep(2)
print "Getting data"
data = sock.recv(1024)
print "Data: " + data
"""
def send_data(ld, sock):
global is_running
while is_running:
try:
# Loop through the in directory and over files
time.sleep(2)
files = os.listdir(PCAP_DIR)
for f in files:
fn, fe = os.path.splitext(f)
if fe == ".pcap":
fd = open(PCAP_DIR + '/' + f, 'rb')
temp = fd.read()
sock.send(str(len(temp)).zfill(8))
sock.sendall(temp)
#ld.write(_format_log("Sending " + f))
fd.close()
os.remove(PCAP_DIR + "/" + f)
except Exception as e:
is_running = False
#ld.write(_format_log(str(e)))
#ld.write(_format_log("Send thread stopped"))
def receive_data(ld, sock):
global is_running
while is_running:
try:
time.sleep(7)
data = sock.recv(200)
with open (GPSPATH, 'w') as fd:
fd.write(data + ";\n")
except Exception as e:
is_running = False
#ld.write(_format_log(str(e)))
#ld.write(_format_log("Receive thread stopped"))
def connect_bluetooth(ld):
socket = get_bluetooth_socket(ld)
# any service with the name should be fine
service = get_bluetooth_services(ld, SERVICE_NAME)[0]
socket.connect((service['host'], service['port']))
#ld.write(_format_log("Connected to android device"))
return socket
def get_bluetooth_services(ld, name):
services = []
while len(services) < 1:
try:
# Search for the service
services = bluetooth.find_service(name=name)
except bluetooth.btcommon.BluetoothError as e:
error_msg = str(e)
#if not error_msg == "error accessing bluetooth device":
#ld.write(_format_log(str(e)))
return services
def get_bluetooth_socket(ld):
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
#ld.write(_format_log("Got bluetooth socket"))
return sock
def setup_logs(path):
if os.path.isfile(path):
return open(path, 'a', 0)
else:
return open(path, 'w', 0)
def start_threads(ld, sock):
sock.setblocking(True)
s = threading.Thread(target=send_data, args=(ld, sock))
r = threading.Thread(target=receive_data, args=(ld, sock))
s.start()
r.start()
return s, r
def handle_exception(ld, e, sock):
is_running = False
if sock is not None:
sock.close()
#ld.write(_format_log(str(e)))
#ld.write(_format_log("Out of send and receive threads"))
is_running = True
#ld.write(_format_log("Restarting service"))
if __name__=="__main__":
#ld = setup_logs(LOGFILE)
#ld.write(_format_log("Starting service"))
ld = None
while True:
socket = None
is_running = True
try:
socket = connect_bluetooth(ld)
s, r = start_threads(ld, socket)
s.join()
r.join()
except Exception as e:
handle_exception(ld, e, socket)
```
|
{
"source": "JDongian/speech2doc",
"score": 3
}
|
#### File: JDongian/speech2doc/test.py
```python
from argparse import ArgumentParser
import re
import os
import json
import logging
from time import sleep
from rev_ai.apiclient import RevAiAPIClient
from rev_ai.models.job_status import JobStatus
import jinja2
import pypandoc
API_KEY = os.environ.get('REVAI_API_KEY')
def _init_args():
parser = ArgumentParser(description="Convert audio to docx transcript.")
parser.add_argument(
'input',
type=str,
help="Provide a URI to transcribe."
)
parser.add_argument(
'-o', metavar="output",
type=str,
default="output.doc",
help="Provide a file which to output to."
)
parser.add_argument(
'--api_key',
type=str,
default=API_KEY,
help="Provide Rev.ai API key."
)
args = parser.parse_args()
return args
def wait_for(client, job):
status = JobStatus.IN_PROGRESS
while status == JobStatus.IN_PROGRESS:
sleep(7)
status = client.get_job_details(job.id).status
def _main(args):
# create your client
client = RevAiAPIClient(args.api_key)
# or send a link to the file you want transcribed
job = client.submit_job_url(args.input)
wait_for(client, job)
transcript = client.get_transcript_json(job.id)
with open('transcript.json', 'w') as fp:
json.dump(transcript, fp)
render_md('transcript.json', 'transcript.md')
produce_docx('transcript.md', args.o)
def _main2(key, job_id):
# create your client
client = RevAiAPIClient(key)
transcript = client.get_transcript_json(job_id)
with open('transcript.json', 'w') as fp:
json.dump(transcript, fp)
def _escape_latex(text):
return re.sub("(?=_|&|{|}|%|#|\$)", r"\\", text)
def render_md(f_in, f_out):
with open(f_in, 'r') as fp:
transcript = json.load(fp)
monologues = transcript['monologues']
texts = [
(f"<b>Speaker {m['speaker']}:</b>\t"
f"{''.join(e['value'] for e in m['elements'])}")
for m in monologues
]
md_jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.abspath('.'))
)
template = md_jinja_env.get_template('template.md')
result = template.render(
texts=texts
)
with open(f_out, 'w') as f:
f.write(result)
def render_latex(f_in, f_out):
"""Unused because latex->docx is difficult.
"""
with open(f_in, 'r') as fp:
transcript = json.load(fp)
monologues = transcript['monologues']
speakers = [m['speaker'] for m in monologues]
texts = [
_escape_latex(''.join(e['value'] for e in m['elements']))
for m in monologues
]
latex_jinja_env = jinja2.Environment(
block_start_string='\BLOCK{',
block_end_string='}',
variable_start_string='\VAR{',
variable_end_string='}',
comment_start_string='\#{',
comment_end_string='}',
line_statement_prefix='%%',
line_comment_prefix='%#',
trim_blocks=True,
autoescape=False,
loader=jinja2.FileSystemLoader(os.path.abspath('.'))
)
template = latex_jinja_env.get_template('template.tex')
result = template.render(
speaker_set=sorted(set(speakers)),
data=zip(speakers, texts)
)
with open(f_out, 'w') as f:
f.write(result)
def produce_docx(md_in, docx_out):
"""Use pandoc to convert markdown to docx."""
output = pypandoc.convert_file(md_in, 'docx', outputfile=docx_out)
if __name__ == "__main__":
ARGS = _init_args()
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(filename)s - %(levelname)s - %(message)s")
_main(ARGS)
# render('transcript.json', 'transcript.tex')
```
|
{
"source": "JDonini/Cats-and-Dogs-Classification",
"score": 2
}
|
#### File: scripts/cat_breeds/data_preprocessing.py
```python
import random
import time
import sys
import os
from PIL import Image
import numpy as np
from scipy.misc import imsave, imread
sys.path.append('utils')
from config import *
from data_augmentation import *
print("\nPreprocessing Cat Breeds...")
train_samples, test_samples = [], []
breeds = {
'abyssinian': [],
'american_bulldog': [],
'american_pit_bull_terrier': [],
'basset_hound': [],
'beagle': [],
'bengal': [],
'birman': [],
'bombay': [],
'boxer': [],
'british_shorthair': [],
'chihuahua': [],
'egyptian_mau': [],
'english_cocker_spaniel': [],
'english_setter': [],
'german_shorthaired': [],
'great_pyrenees': [],
'havanese': [],
'japanese_chin': [],
'keeshond': [],
'leonberger': [],
'maine_coon': [],
'miniature_pinscher': [],
'newfoundland': [],
'persian': [],
'pomeranian': [],
'pug': [],
'ragdoll': [],
'russian_blue': [],
'saint_bernard': [],
'samoyed': [],
'scottish_terrier': [],
'shiba_inu': [],
'siamese': [],
'sphynx': [],
'staffordshire_bull_terrier': [],
'wheaten_terrier': [],
'yorkshire_terrier': []
}
category_to_int = {
'abyssinian': 1,
'american_bulldog': 2,
'american_pit_bull_terrier': 3,
'basset_hound': 4,
'beagle': 5,
'bengal': 6,
'birman': 7,
'bombay': 8,
'boxer': 9,
'british_shorthair': 10,
'chihuahua': 11,
'egyptian_mau': 12,
'english_cocker_spaniel': 13,
'english_setter': 14,
'german_shorthaired': 15,
'great_pyrenees': 16,
'havanese': 17,
'japanese_chin': 18,
'keeshond': 19,
'leonberger': 20,
'maine_coon': 21,
'miniature_pinscher': 22,
'newfoundland': 23,
'persian': 24,
'pomeranian': 25,
'pug': 26,
'ragdoll': 27,
'russian_blue': 28,
'saint_bernard': 29,
'samoyed': 30,
'scottish_terrier': 31,
'shiba_inu': 32,
'siamese': 33,
'sphynx': 34,
'staffordshire_bull_terrier': 35,
'wheaten_terrier': 36,
'yorkshire_terrier': 37
}
int_to_category = {
1: 'abyssinian',
2: 'american_bulldog',
3: 'american_pit_bull_terrier',
4: 'basset_hound',
5: 'beagle',
6: 'bengal',
7: 'birman',
8: 'bombay',
9: 'boxer',
10: 'british_shorthair',
11: 'chihuahua',
12: 'egyptian_mau',
13: 'english_cocker_spaniel',
14: 'english_setter',
15: 'german_shorthaired',
16: 'great_pyrenees',
17: 'havanese',
18: 'japanese_chin',
19: 'keeshond',
20: 'leonberger',
21: 'maine_coon',
22: 'miniature_pinscher',
23: 'newfoundland',
24: 'persian',
25: 'pomeranian',
26: 'pug',
27: 'ragdoll',
28: 'russian_blue',
29: 'saint_bernard',
30: 'samoyed',
31: 'scottish_terrier',
32: 'shiba_inu',
33: 'siamese',
34: 'sphynx',
35: 'staffordshire_bull_terrier',
36: 'wheaten_terrier',
37: 'yorkshire_terrier'
}
cat_breeds = {
1: 'abyssinian',
2: 'bengal',
3: 'birman',
4: 'bombay',
5: 'british_shorthair',
6: 'egyptian_mau',
7: 'maine_coon',
8: 'persian',
9: 'ragdoll',
10: 'russian_blue',
11: 'siamese',
12: 'sphynx'
}
with open(DATA_ANNOTATION, 'rt') as lines:
for line in lines:
if line[0] == '#':
pass
else:
(file_path, class_id, category, *tail) = line.split(' ')
complete_file_path = DATA_PATH_IMAGES+'{}.jpg'.format(file_path)
breeds[int_to_category[int(class_id)]].append(file_path)
samples_count = min([len(file_paths) for file_paths in breeds.values()])
train_count = int(samples_count * 0.7)
test_count = int(samples_count * 0.3)
for (class_id, file_paths) in breeds.items():
random.shuffle(file_paths)
for file_path in file_paths[:train_count]:
train_samples.append((class_id, file_path))
for file_path in file_paths[train_count:train_count + test_count]:
test_samples.append((class_id, file_path))
random.shuffle(train_samples)
random.shuffle(test_samples)
def all_data_augmentation():
augment_images(image, f)
remove_noise()
print('\nProcessing train samples...')
time_start_train = time.time()
a = []
for (class_id, file_path) in train_samples:
for item in dirs:
if (item.split('.')[0] == file_path) and (class_id in cat_breeds[1]):
f, e = os.path.splitext(SAVE_CAT_ABYSSIANIAN_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[2]):
f, e = os.path.splitext(SAVE_CAT_BENGAL_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[3]):
f, e = os.path.splitext(SAVE_CAT_BIRMAN_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[4]):
f, e = os.path.splitext(SAVE_CAT_BOMBAY_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[5]):
f, e = os.path.splitext(SAVE_CAT_BRITISH_SHORTHAIR_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[6]):
f, e = os.path.splitext(SAVE_CAT_EGYPTIAN_MAU_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[7]):
f, e = os.path.splitext(SAVE_CAT_MAINE_COON_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[8]):
f, e = os.path.splitext(SAVE_CAT_PERSIAN_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[9]):
f, e = os.path.splitext(SAVE_CAT_RAGDOOL_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[10]):
f, e = os.path.splitext(SAVE_CAT_RUSSIAN_BLUE_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[11]):
f, e = os.path.splitext(SAVE_CAT_SIAMESE_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[12]):
f, e = os.path.splitext(SAVE_CAT_SPHYNX_TRAIN + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
all_data_augmentation()
time_train = time.time() - time_start_train
print('Time to process train samples: {:.2f} [sec].'.format(time_train))
print('\nProcessing test samples...')
time_start_test = time.time()
for (class_id, file_path) in test_samples:
for item in dirs:
if (item.split('.')[0] == file_path) and (class_id in cat_breeds[1]):
f, e = os.path.splitext(SAVE_CAT_ABYSSIANIAN_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[2]):
f, e = os.path.splitext(SAVE_CAT_BENGAL_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[3]):
f, e = os.path.splitext(SAVE_CAT_BIRMAN_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[4]):
f, e = os.path.splitext(SAVE_CAT_BOMBAY_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[5]):
f, e = os.path.splitext(SAVE_CAT_BRITISH_SHORTHAIR_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[6]):
f, e = os.path.splitext(SAVE_CAT_EGYPTIAN_MAU_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[7]):
f, e = os.path.splitext(SAVE_CAT_MAINE_COON_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[8]):
f, e = os.path.splitext(SAVE_CAT_PERSIAN_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[9]):
f, e = os.path.splitext(SAVE_CAT_RAGDOOL_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[10]):
f, e = os.path.splitext(SAVE_CAT_RUSSIAN_BLUE_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[11]):
f, e = os.path.splitext(SAVE_CAT_SIAMESE_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
elif (item.split('.')[0] == file_path) and (class_id in cat_breeds[12]):
f, e = os.path.splitext(SAVE_CAT_SPHYNX_TEST + item)
img = Image.open(DATA_PATH_IMAGES + item).convert("RGB")
image = np.array(img)
imsave(f + '.jpg', image)
time_test = time.time() - time_start_test
print('Time to process test samples: {:.2f} [sec].'.format(time_train))
print('\nTime to process all stages: {:.2f} [sec].'.format(time_test + time_train))
```
|
{
"source": "JDonini/Stacking-Audio-Tagging",
"score": 3
}
|
#### File: Stacking-Audio-Tagging/src/check_data.py
```python
import os
import sys
import numpy as np
import pandas as pd
sys.path.append('src/')
from generate_structure import BINARY_ANNOTATIONS, AUDIO
sys.path.append('config/')
from config_project import EXT_IMG, EXT_AUDIO, SEED
np.random.seed(SEED)
annotation_list, song_list, = [], []
def remove_missing_data():
fp = open(BINARY_ANNOTATIONS, 'r+')
content = fp.read().split('\n')
content = content[1:]
for i in range(len(content)):
content[i] = content[i].split(EXT_IMG)
annotation_list.append(content[i][0])
for file in os.listdir(AUDIO):
file = file.split(EXT_AUDIO)[0]
song_list.append(file)
annotation_remove = [item for item in annotation_list if item not in song_list]
song_remove = [item for item in song_list if item not in annotation_list]
print('Remove Annotation - ', annotation_remove)
print(len(annotation_remove))
print('Remove Song - ', song_remove)
print(len(song_remove))
df = pd.read_csv(BINARY_ANNOTATIONS)
for file in annotation_remove:
print('Remove Annotation : {}'.format(file + EXT_IMG))
df = df[df.song_name != file + EXT_IMG]
df.to_csv(BINARY_ANNOTATIONS, sep=',', index=False)
if __name__ == '__main__':
remove_missing_data()
```
|
{
"source": "jdonkervliet/dedalov2",
"score": 3
}
|
#### File: dedalov2/dedalov2/explanation.py
```python
from typing import Optional, Set
from .example import Example, Examples
from .knowledge_graph import Vertex
from .path import Path
class Explanation:
def __init__(self, p: Path, value: Vertex):
self.path: Path = p
self.value: Vertex = value
self.record: Optional[Record] = None
def explains(self, examples: Examples) -> Set[Example]:
return self.path.get_starting_points_connected_to_endpoint(self.value)
def __lt__(self, other):
return self.path < other.path
def __eq__(self, other):
return type(other) == Explanation and self.path == other.path and self.value == other.value
def __hash__(self):
return hash(self.path)*31+hash(self.value)
def __str__(self):
return "{} -| {}".format(self.path, self.value)
class Record:
def __init__(self, explanation: Explanation, score: float, num_examples: int = None, num_positives: int = None,
num_connected_positives: int = None, num_connected_negatives: int = None):
self.explanation: Explanation = explanation
self.score: float = score
self.num_examples: Optional[int] = num_examples
self.num_positives: Optional[int] = num_positives
self.num_connected_positives: Optional[int] = num_connected_positives
self.num_connected_negatives: Optional[int] = num_connected_negatives
def __str__(self):
if self.num_examples is None or self.num_positives is None or self.num_connected_positives is None or self.num_connected_negatives is None:
return "SCORE: {} EXPL: {}".format(self.score, self.explanation)
else:
return "SCORE: {} P:{}:{} N:{}:{} EXPL: {}".format(self.score,
self.num_connected_positives,
self.num_positives,
self.num_connected_negatives,
self.num_examples-self.num_positives,
self.explanation)
def __lt__(self, other):
return self.score < other.score
def __eq__(self, other):
same_exp = self.explanation == other.explanation
if same_exp:
assert self.score == other.score
return same_exp
```
#### File: dedalov2/dedalov2/knowledge_graph.py
```python
import hdt
from . import local_hdt
from . import urishortener
class Predicate:
@staticmethod
def fromString(id: str):
int_value = local_hdt.document().convert_term(id, hdt.IdentifierPosition.Predicate)
if int_value <= 0:
raise ValueError("{} does not exist as Predicate.".format(id))
return Predicate(int_value)
def __init__(self, id: int):
self.id: int = id
def __str__(self):
return urishortener.shorten(local_hdt.document().convert_id(self.id, hdt.IdentifierPosition.Predicate))
def __eq__(self, other):
return type(other) == Predicate and self.id == other.id
def __hash__(self):
return self.id
class Vertex:
@staticmethod
def fromString(id: str) -> 'Vertex':
s_id = local_hdt.document().convert_term(id, hdt.IdentifierPosition.Subject)
o_id = local_hdt.document().convert_term(id, hdt.IdentifierPosition.Object)
if s_id == 0 and o_id == 0:
raise ValueError("{} does not exist in this HDT file.".format(id))
return Vertex(s_id=s_id, o_id=o_id)
@staticmethod
def fromSubjectId(id: int) -> 'Vertex':
if id == 0:
raise ValueError("0 is not a valid Subject ID.")
uri = local_hdt.document().convert_id(id, hdt.IdentifierPosition.Subject)
o_id = local_hdt.document().convert_term(uri, hdt.IdentifierPosition.Object)
return Vertex(s_id=id, o_id=o_id)
@staticmethod
def fromObjectId(id: int) -> 'Vertex':
if id == 0:
raise ValueError("0 is not a valid Object ID.")
uri = local_hdt.document().convert_id(id, hdt.IdentifierPosition.Object)
s_id = local_hdt.document().convert_term(uri, hdt.IdentifierPosition.Subject)
return Vertex(s_id=s_id, o_id=id)
def __init__(self, s_id: int = 0, o_id: int = 0):
if s_id == 0 and o_id == 0:
raise ValueError("Vertex does not exist in HDT file.")
self.s_id = s_id
self.o_id = o_id
def is_subject(self):
return self.s_id > 0
def is_object(self):
return self.o_id > 0
def __str__(self):
id: int
pos: hdt.IdentifierPosition
if self.is_subject():
id = self.s_id
pos = hdt.IdentifierPosition.Subject
else:
id = self.o_id
pos = hdt.IdentifierPosition.Object
return urishortener.shorten(local_hdt.document().convert_id(id, pos))
def __eq__(self, other):
return self.s_id == other.s_id and self.o_id == other.o_id
def __hash__(self):
return self.s_id * 31 + self.o_id
```
|
{
"source": "jdonkervliet/pecosa",
"score": 2
}
|
#### File: jdonkervliet/pecosa/pecosa.py
```python
import os
import sys
import time
import psutil
def key_or_val(li, key, value, header):
if header:
li.append(key)
else:
li.append(value)
if __name__ == "__main__":
logfile = sys.argv[1]
pid = int(sys.argv[2])
p = psutil.Process(pid)
first = True
with open(logfile, "w+") as fout:
while True:
counters = []
key_or_val(counters, "timestamp", f"{time.time() * 1000}", first)
sys_counters = p.as_dict()
for k in sorted(sys_counters):
v = sys_counters[k]
if k in ["environ", "cmdline", "connections", "open_files", "memory_maps", "threads", "cpu_affinity"]:
continue
elif k in ["gids", "memory_info", "uids", "num_ctx_switches", "cpu_times", "io_counters", "ionice",
"memory_full_info"]:
vdict = v._asdict()
for sk in sorted(vdict):
sv = vdict[sk]
key_or_val(counters, f"proc.{k}.{sk}", f"{sv}", first)
else:
key_or_val(counters, f"proc.{k}", f"{v}", first)
net = psutil.net_io_counters(pernic=True)
for device in sorted(net):
net_device = net[device]
key_or_val(counters, f"net.bytes_sent.{device}", f"{net_device.bytes_sent}", first)
key_or_val(counters, f"net.bytes_recv.{device}", f"{net_device.bytes_recv}", first)
key_or_val(counters, f"net.packets_sent.{device}", f"{net_device.packets_sent}", first)
key_or_val(counters, f"net.packets_recv.{device}", f"{net_device.packets_recv}", first)
key_or_val(counters, f"net.errin.{device}", f"{net_device.errin}", first)
key_or_val(counters, f"net.errout.{device}", f"{net_device.errout}", first)
key_or_val(counters, f"net.dropin.{device}", f"{net_device.dropin}", first)
key_or_val(counters, f"net.dropout.{device}", f"{net_device.dropout}", first)
disks = psutil.disk_io_counters(perdisk=True)
for disk in sorted(disks):
disks_disk = disks[disk]
key_or_val(counters, f"disk.read_count.{disk}", f"{disks_disk.read_count}", first)
key_or_val(counters, f"disk.read_bytes.{disk}", f"{disks_disk.read_bytes}", first)
key_or_val(counters, f"disk.write_count.{disk}", f"{disks_disk.write_count}", first)
key_or_val(counters, f"disk.write_bytes.{disk}", f"{disks_disk.write_bytes}", first)
cputimes = psutil.cpu_times(percpu=False)
cpudict = cputimes._asdict()
for sk in sorted(cpudict):
sv = cpudict[sk]
key_or_val(counters, f"cpu.{sk}", f"{sv}", first)
cpupercent = psutil.cpu_percent()
key_or_val(counters, f"cpu.percent", f"{cpupercent}", first)
cpufreq = psutil.cpu_freq()
key_or_val(counters, f"cpu.freq.current", f"{cpufreq.current}", first)
key_or_val(counters, f"cpu.freq.min", f"{cpufreq.min}", first)
key_or_val(counters, f"cpu.freq.max", f"{cpufreq.max}", first)
cpus = psutil.cpu_stats()
key_or_val(counters, f"cpu.ctx_switches", f"{cpus.ctx_switches}", first)
key_or_val(counters, f"cpu.interrupts", f"{cpus.interrupts}", first)
key_or_val(counters, f"cpu.soft_interrupts", f"{cpus.soft_interrupts}", first)
key_or_val(counters, f"cpu.syscalls", f"{cpus.syscalls}", first)
fout.write("\t".join(counters))
fout.write(os.linesep)
fout.flush()
first = False
time.sleep(1)
```
|
{
"source": "jdormuth1/panotti",
"score": 3
}
|
#### File: jdormuth1/panotti/train_network.py
```python
from __future__ import print_function
import sys
print(sys.path)
print(sys.version)
import numpy as np
from panotti.models import *
from panotti.datautils import *
#from keras.callbacks import ModelCheckpoint #,EarlyStopping
import os
from os.path import isfile
from timeit import default_timer as timer
from panotti.multi_gpu import MultiGPUModelCheckpoint
from panotti.mixup_generator import MixupGenerator
import math
def train_network(weights_file="weights.hdf5", classpath="Preproc/Train/",
epochs=50, batch_size=20, val_split=0.2, tile=False, max_per_class=0,
k_fold=1):
np.random.seed(1) # fix a number to get reproducibility; comment out for random behavior
# Get the data
X_train, Y_train, paths_train, class_names = build_dataset(path=classpath,
batch_size=batch_size, tile=tile, max_per_class=max_per_class)
save_best_only = (val_split > 1e-6)
assert k_fold <= 1/val_split # make sure we don't repeat folds
for k in range(k_fold):
# Instantiate the model
model, serial_model = setup_model(X_train, class_names,
weights_file=weights_file, quiet=(k!=0))
# Split between Training and Validation Set, val_split = percentage to use for val
split_index = int(X_train.shape[0]*(1-val_split)) # Train first, Val second
X_val, Y_val = X_train[split_index:], Y_train[split_index:]
X_train, Y_train = X_train[:split_index], Y_train[:split_index]
# if we're doing k-folding cross-val, don't overwrite the weights file until the last time
callbacks = None if (k < k_fold-1) else [MultiGPUModelCheckpoint(filepath=weights_file, verbose=1, save_best_only=save_best_only,
serial_model=serial_model, period=1, class_names=class_names)]
steps_per_epoch = X_train.shape[0] // batch_size
if False and ((len(class_names) > 2) or (steps_per_epoch > 1)):
training_generator = MixupGenerator(X_train, Y_train, batch_size=batch_size, alpha=0.25)()
model.fit_generator(generator=training_generator, steps_per_epoch=steps_per_epoch,
epochs=epochs, shuffle=True,
verbose=1, callbacks=callbacks, validation_data=(X_val, Y_val))
else:
model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, shuffle=True,
verbose=1, callbacks=callbacks, #validation_split=val_split)
validation_data=(X_val, Y_val))
if k < k_fold-1: # reconstitute and re-split the data for the next loop
print("\n\n------ Starting another round of cross-validation, for k =",k+2,"/",k_fold,"------")
X_train = np.concatenate((X_val, X_train)) # stick the val at the front this time
Y_train = np.concatenate((Y_val, Y_train))
# overwrite text file class_names.txt - does not put a newline after last class name
with open('class_names.txt', 'w') as outfile:
outfile.write("\n".join(class_names))
# Score the model against Test dataset
X_test, Y_test, paths_test, class_names_test = build_dataset(path=classpath+"../Test/", tile=tile)
assert( class_names == class_names_test )
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="trains network using training dataset")
parser.add_argument('-w', '--weights', #nargs=1, type=argparse.FileType('r'),
help='weights file (in .hdf5)', default="weights.hdf5")
parser.add_argument('-c', '--classpath', #type=argparse.string,
help='Train dataset directory with list of classes', default="Preproc/Train/")
parser.add_argument('--epochs', default=20, type=int, help="Number of iterations to train for")
parser.add_argument('--batch_size', default=40, type=int, help="Number of clips to send to GPU at once")
parser.add_argument('--val', default=0.2, type=float, help="Fraction of train to split off for validation")
parser.add_argument("--tile", help="tile mono spectrograms 3 times for use with imagenet models",action="store_true")
parser.add_argument('-m', '--maxper', type=int, default=0, help="Max examples per class")
parser.add_argument('-k', '--kfold', type=int, default=1, help="Enable k-fold cross-validation (max = 1/val)")
args = parser.parse_args()
train_network(weights_file=args.weights, classpath=args.classpath, epochs=args.epochs,
batch_size=args.batch_size, val_split=args.val, tile=args.tile, max_per_class=args.maxper,
k_fold=args.kfold)
```
#### File: panotti/utils/pullTags.py
```python
import os
import subprocess
import errno
import shutil
from sys import version_info
import glob
from multiprocessing import Pool
from functools import partial
if version_info >= (3, 4):
from plistlib import loads as readPlistFromString
else:
from plistlib import readPlistFromString
import re
keys_to_use = ['kMDItemMusicalInstrumentName',
'kMDItemMusicalInstrumentCategory',
'kMDItemMusicalGenre',
'kMDItemAppleLoopDescriptors',
'kMDItemTimeSignature']
def make_dir(directory): # makes a directory if it doesn't exist
try:
os.stat(directory)
except:
os.mkdir(directory)
def make_link(source, link_name): # makes a symbolic link (unix) or shortcut (Windows):
# TODO: how to do on windows?
try:
os.stat(link_name)
except:
os.symlink(source, link_name)
def pullTags_one_file(file_list, new_main_folder, file_index):
# We actually pull the tags and create the directories & symbolic links as we go,
# and let the OS/filesystem handle 'collisions' if more than one process is trying to create the same thing
infile = file_list[file_index]
#print('infile = ',infile)
output = subprocess.check_output(['mdls','-plist','-', infile]) # mdls is an Apple command-line utility
plist = readPlistFromString(output)
#print(plist)
print_every = 100
if (0 == file_index % print_every):
print("pullTags: File ",file_index,"/",len(file_list),": ",infile,sep="")
for key in keys_to_use:
#print(" Checking key = ",key)
try:
tags = plist[key]
if tags: # guard against blank tags
if isinstance(tags, str):
tags = [tags] # just make it a length-1 list
#print(" key = ",key,", tags (list):")
for tag in tags:
tag = re.sub('[^a-zA-Z\d\ ]|( ){2,}','_',tag ) # whitelist only certain characters. e.g. "4/4"->"4_4"
#tag = tag.replace("/", "-").replace(";", "-").replace("\\", "-").replace(" ", "_").replace
#print(" [",tag,']',sep="")
if tag: # guard against blank tags
new_folder = new_main_folder+'/'+tag
make_dir(new_folder)
link_name = new_folder+'/'+os.path.basename(infile)
make_link(infile, link_name)
except:
#print("Key error: File",infile,"doesn't contain key",key)
pass
return
def hawleys_way(args):
search_dirs = args.dir
new_main_folder='Samples/'
make_dir(new_main_folder)
print("Searching for .caf files in starting directories ",search_dirs)
# Rescursively search for .caf files starting in various starting directories
file_list = []
for search_dir in search_dirs:
for filename in glob.iglob(search_dir+'/**/*.caf', recursive=True):
file_list.append(filename)
# farm out the pulling of tags to many files simultaneously across all processors
file_indices = tuple( range(len(file_list)) )
cpu_count = os.cpu_count()
if (False): # simple single-processor execution for testing
for file_index in file_indices:
pullTags_one_file(file_list,new_main_folder,file_index)
else:
pool = Pool(cpu_count)
print("Mapping to",cpu_count,"processes")
pool.map(partial(pullTags_one_file, file_list, new_main_folder), file_indices)
return
def bradens_way():
# Current working directory
CWD = os.getcwd()
UNKNOWN_FOLDER = "New Folder"
# Folders
folders = {
UNKNOWN_FOLDER: []
}
# get a list of all the names of new folders
for relative_path in os.listdir(CWD):
full_path = os.path.join(CWD, relative_path)
# If running from a script, skip self.
#if os.path.samefile(full_path, __file__):
# continue
try:
output = subprocess.check_output(['mdls','-plist','-', full_path])
plist = readPlistFromString(output)
if (('kMDItemMusicalInstrumentName' in plist) or ('kMDItemMusicalInstrumentCategory' in plist)
or ('kMDItemMusicalGenre' in plist) or ('kMDItemAppleLoopDescriptors' in plist)):
tag1 = plist['kMDItemMusicalInstrumentName']
tag2 = plist['kMDItemMusicalInstrumentCategory']
tag3 = plist['kMDItemMusicalGenre']
tag4 = plist['kMDItemAppleLoopDescriptors']
if tag1 not in folders:
folders[tag1] = []
if tag2 not in folders:
folders[tag2] = []
if tag3 not in folders:
folders[tag3] = []
if tag4 not in folders:
folders[tag4] = []
new_path = os.path.join(CWD, tag1, relative_path)
folders[tag1].append([full_path, new_path])
new_path = os.path.join(CWD, tag2, relative_path)
folders[tag2].append([full_path, new_path])
new_path = os.path.join(CWD, tag3, relative_path)
folders[tag3].append([full_path, new_path])
new_path = os.path.join(CWD, tag4, relative_path)
folders[tag4].append([full_path, new_path])
else:
# Move file to the catch-all folder
new_path = os.path.join(CWD, UNKNOWN_FOLDER, relative_path)
folders[UNKNOWN_FOLDER].append([full_path, new_path])
except:
print("Could not process: %s" % full_path)
#Create folders and move files
for (folder, tuples) in folders.items():
folder_path = os.path.join(CWD, folder)
#print(folder_path)
# Create folder if it does not exist
try:
os.makedirs(folder_path)
print("Created folder: %s" % folder_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Move files
for t in tuples:
try:
shutil.copy(t[0],t[1])
shutil.copy(t[0],t[2])
shutil.copy(t[0],t[3])
shutil.copy(t[0],t[4])
except:
print("Could not move file: %s" % t[0])
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Pull metadata tags from Apple Loops .caf files')
parser.add_argument('dir', help="directory/ies to search in", nargs='*', default=['/Library/Audio'])
args = parser.parse_args()
hawleys_way(args)
```
|
{
"source": "Jdorri/rl-medical",
"score": 2
}
|
#### File: LandmarkDetection/DQN/GUI_tests.py
```python
import sys
import unittest
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt
from GUI.thread import WorkerThread
from controller import Controller, Tab
from GUI.right_widget_automatic import RightWidgetSettings
from GUI.right_widget_browse import RightWidgetSettingsBrowseMode, XMove, YMove, ZMove
from GUI.left_widget import LeftWidgetSettings
import numpy as np
import glob
import os
import pickle
class RightWidgetTester(unittest.TestCase):
''' Class to perform unit tests on the buttons and functionatlity within
the right widget of the GUI Launcher.
'''
def setUp(self):
'''Method run before every test. Use this to prepare the test fixture.'''
self.controller = Controller()
self.m = self.controller.right_widget.automatic_mode
self.w = self.m.window
Controller.allWidgets_setCheckable(self.controller.app)
def tearDown(self):
''' Method run after each test is run. Use this to reset the testing
environment.
Raises ValueError to progress onto next test as unittest doesn't
work correctly with threading & PyQt.
'''
try:
test_results[self.id()] = self._outcome.errors[1][1][1]
except TypeError:
test_results[self.id()] = 'success'
raise ValueError('Stop test here')
def test_taskRadioButton(self):
''' Check the task button changes between play and eval modes
'''
# Check default
self.assertEqual(self.m.which_task(), 'Play')
# Change to eval mode and check
QTest.mouseClick(self.m.eval_button, Qt.LeftButton)
self.assertEqual(self.m.which_task(), 'Evaluation')
# Change back to play mode and check
QTest.mouseClick(self.m.play_button, Qt.LeftButton)
self.assertEqual(self.m.which_task(), 'Play')
def test_agentSpeedSlider(self):
'''Checks if the slider works and if it adjusts the thread speed'''
# Check initial position is correct
self.slider_checker()
# Change to min value
self.m.speed_slider.setValue(self.m.speed_slider.minimum())
self.assertEqual(self.m.speed_slider.value(), self.m.speed_slider.minimum())
self.slider_checker()
# Change to medium value
self.m.speed_slider.setValue(round((self.m.speed_slider.maximum() - \
self.m.speed_slider.minimum()) / 2, 1) )
self.slider_checker()
def slider_checker(self):
''' Helper function for checking slider position corresponds to correct
thread speed
'''
if self.m.speed_slider.value() == self.m.speed_slider.maximum():
self.assertEqual(self.m.thread.speed, WorkerThread.FAST)
elif self.m.speed_slider.value() == self.m.speed_slider.minimum():
self.assertEqual(self.m.thread.speed, WorkerThread.SLOW)
else:
self.assertEqual(self.m.thread.speed, WorkerThread.MEDIUM)
class RightWidgetBrowseModeTester(unittest.TestCase):
''' Class to perform unit tests on the buttons and functionatlity within
the right widget of the GUI Launcher while browse mode is activated.
'''
def setUp(self):
'''Method run before every test. Use this to prepare the test fixture.'''
self.controller = Controller()
self.m = self.controller.right_widget.browse_mode
self.w = self.m.window
self.v = self.controller.window.widget.plot_3d
self.controller.right_widget.tab_widget.setCurrentIndex(1) # Change to browse mode
Controller.allWidgets_setCheckable(self.controller.app)
def tearDown(self):
''' Method run after each test is run. Use this to reset the testing
environment.
Raises ValueError to progress onto next test as unittest doesn't
work correctly with threading & PyQt.
'''
try:
test_results[self.id()] = self._outcome.errors[1][1][1]
except TypeError:
test_results[self.id()] = 'success'
raise ValueError('Stop test here')
def test_trajPlot(self):
''' Test the trajectory plot works as expected and clears
on loading a new image
'''
# Check buffers begin empty
self.assertTrue([] == self.v.x_traj == self.v.y_traj == self.v.z_traj)
# Move agent for plotting
QTest.mouseClick(self.m.y_action.up_button, Qt.LeftButton)
QTest.mouseClick(self.m.x_action.left_button, Qt.LeftButton)
QTest.mouseClick(self.m.z_action.in_button, Qt.LeftButton)
self.assertTrue(
3 == len(self.v.x_traj) == len(self.v.y_traj) == len(self.v.z_traj))
# Click next image and check plot clears
QTest.mouseClick(self.m.next_img, Qt.LeftButton)
self.assertTrue(
1 == len(self.v.x_traj) == len(self.v.y_traj) == len(self.v.z_traj))
def test_upButton(self):
''' Check clicking the up button moves the agent '''
init_loc = self.m.env._location
QTest.mouseClick(self.m.y_action.up_button, Qt.LeftButton)
self.assertTrue(self.m.y_action.up_button.isChecked())
self.assertNotEqual(init_loc, self.m.env._location)
def test_downButton(self):
''' Check clicking the down button moves the agent '''
init_loc = self.m.env._location
QTest.mouseClick(self.m.y_action.down_button, Qt.LeftButton)
self.assertTrue(self.m.y_action.down_button.isChecked())
self.assertNotEqual(init_loc, self.m.env._location)
def test_leftButton(self):
''' Check clicking the left button moves the agent '''
init_loc = self.m.env._location
QTest.mouseClick(self.m.x_action.left_button, Qt.LeftButton)
self.assertTrue(self.m.x_action.left_button.isChecked())
self.assertNotEqual(init_loc, self.m.env._location)
def test_rightButton(self):
''' Check clicking the left button moves the agent '''
init_loc = self.m.env._location
QTest.mouseClick(self.m.x_action.right_button, Qt.LeftButton)
self.assertTrue(self.m.x_action.right_button.isChecked())
self.assertNotEqual(init_loc, self.m.env._location)
def test_zInButton(self):
''' Check clicking the z in button moves the agent '''
init_loc = self.m.env._location
QTest.mouseClick(self.m.z_action.in_button, Qt.LeftButton)
self.assertTrue(self.m.z_action.in_button.isChecked())
self.assertNotEqual(init_loc, self.m.env._location)
def test_zOutButton(self):
''' Check clicking the z out button moves the agent '''
init_loc = self.m.env._location
QTest.mouseClick(self.m.z_action.out_button, Qt.LeftButton)
self.assertTrue(self.m.z_action.out_button.isChecked())
self.assertNotEqual(init_loc, self.m.env._location)
def test_zoomInButton(self):
''' Check clicking the zoom in button registers '''
QTest.mouseClick(self.m.zoomInButton, Qt.LeftButton)
self.assertTrue(self.m.zoomInButton.isChecked())
def test_zoomOutButton(self):
''' Check clicking the zoom in button registers '''
QTest.mouseClick(self.m.zoomOutButton, Qt.LeftButton)
self.assertTrue(self.m.zoomOutButton.isChecked())
def test_nextImgButton(self):
''' Check clicking next image loads a new image by comparing
the intensity of the initial img to new img
'''
init_intensity = np.sum(self.m.env.viewer.widget.arr)
QTest.mouseClick(self.m.next_img, Qt.LeftButton)
self.assertTrue(self.m.next_img.isChecked())
self.assertTrue(init_intensity - np.sum(self.m.env.viewer.widget.arr) > 1e-5)
def test_delHITLButton_notClickable(self):
''' Check the HITL delete button is not clickable if
HITL mode is not enabled.
'''
QTest.mouseClick(self.m.HITL_delete, Qt.LeftButton)
self.assertTrue(not self.m.HITL_delete.isChecked())
class RightWidgetHITLTester(unittest.TestCase):
''' Tester for the functionality of HITL mode within browse mode on right
widget.
'''
def setUp(self):
'''Method run before every test. Use this to prepare the test fixture.'''
self.controller = Controller()
self.m = self.controller.right_widget.browse_mode
self.w = self.m.window
self.controller.right_widget.on_change(1)
Controller.allWidgets_setCheckable(self.controller.app)
self.m.testing = True
self.m.HITL = True
def tearDown(self):
''' Method run after each test is run. Use this to reset the testing
environment.
Raises ValueError to progress onto next test as unittest doesn't
work correctly with threading & PyQt.
'''
try:
test_results[self.id()] = self._outcome.errors[1][1][1]
except (TypeError, IndexError):
test_results[self.id()] = 'success'
raise ValueError('Stop test here')
def test_enableHITLCheckBox(self):
''' Test the HITL checkbox works '''
self.m.HITL = False
QTest.mouseClick(self.m.HITL_mode, Qt.LeftButton)
self.assertTrue(self.m.HITL_mode.isChecked())
def test_delHITLButton(self):
''' Test the HITL delete episode deletes the latest episode as expected.
'''
# Move to fill location history
QTest.mouseClick(self.m.y_action.down_button, Qt.LeftButton)
QTest.mouseClick(self.m.y_action.up_button, Qt.LeftButton)
# Delete episode
QTest.mouseClick(self.m.HITL_delete, Qt.LeftButton)
self.assertEqual(self.m.HITL_logger, [])
def test_saveHITL(self):
''' Test the HITL session is saved when HITL mode is disabled
'''
# Move to fill location history
QTest.mouseClick(self.m.y_action.down_button, Qt.LeftButton)
QTest.mouseClick(self.m.y_action.up_button, Qt.LeftButton)
# End HITL mode (as this calls save_HITL())
QTest.mouseClick(self.m.HITL_mode, Qt.LeftButton)
self.assertTrue(not self.m.HITL_mode.isChecked())
# Load the created file
list_of_files = glob.glob('./data/HITL/*.pickle')
latest_file = max(list_of_files, key=os.path.getctime)
with open(latest_file, 'rb') as f:
log = pickle.load(f)
# Check contents of the log are correct
self.assertEqual(len(log), 1)
self.assertEqual(len(log[0]['states']), 3)
self.assertEqual(len(log[0]['rewards']), 3)
self.assertEqual(len(log[0]['actions']), 3)
self.assertEqual(log[0]['is_over'][-1], 1)
self.assertEqual(np.unique(log[0]['is_over'][:-1])[0], 0)
self.assertTrue(([i in [1,2,3] for i in log[0]['resolution']].count(True)
== len(log[0]['resolution'])))
self.assertTrue((log[0]['img_name'].startswith('ADNI') or
log[0]['img_name'].startswith('iFIND') or
log[0]['img_name'].startswith('14')))
# Delete the log file
os.remove(latest_file)
def test_checkHITLZoom(self):
''' Check that changing resolution doesn't make an action '''
buttons = [self.m.zoomInButton, self.m.zoomOutButton]
for button in buttons:
QTest.mouseClick(button, Qt.LeftButton)
self.assertEqual(self.m.HITL_logger, [])
def test_bufferFillsCorrectly(self):
''' Check that the buffer fills correctly with the agent's movement.
'''
# Move to fill location history
QTest.mouseClick(self.m.y_action.up_button, Qt.LeftButton)
QTest.mouseClick(self.m.x_action.left_button, Qt.LeftButton)
QTest.mouseClick(self.m.z_action.out_button, Qt.LeftButton)
# End HITL mode (as this calls save_HITL())
QTest.mouseClick(self.m.HITL_mode, Qt.LeftButton)
# Load the created file
list_of_files = glob.glob('./data/HITL/*.pickle')
latest_file = max(list_of_files, key=os.path.getctime)
with open(latest_file, 'rb') as f:
log = pickle.load(f)
# Check contents of the log are correct
self.assertEqual(len(log), 1)
self.assertEqual(len(log[0]['actions']), 4)
self.assertEqual(log[0]['actions'][1:], [1, 3, 5])
# Delete the log file
os.remove(latest_file)
class LeftWidgetTester(unittest.TestCase):
''' Class to perform unit tests on the buttons and functionatlity within
the right widget of the GUI Launcher.
'''
def setUp(self):
'''Method run before every test. Use this to prepare the test fixture.'''
self.controller = Controller()
self.w = self.controller.window.left_widget
self.m = self.controller.right_widget.automatic_mode
self.w.testing = True
Controller.allWidgets_setCheckable(self.controller.app)
def tearDown(self):
''' Method run after each test is run. Use this to reset the testing
environment.
Raises ValueError to progress onto next test as unittest doesn't
work correctly with threading & PyQt.
'''
try:
test_results[self.id()] = self._outcome.errors[1][1][1]
except TypeError:
test_results[self.id()] = 'success'
raise ValueError('Stop test here')
def test_browseImagesButton(self):
''' Test to check clicking browse image loads a correct (default
in this instance) file of image paths.
'''
QTest.mouseClick(self.w.img_file_edit, Qt.LeftButton)
self.assertTrue(self.w.img_file_edit.isChecked())
self.assertEqual(self.w.fname_images,
'./data/filenames/brain_test_files_new_paths.txt')
def test_browseLandmarksButton(self):
''' Test to check clicking browse image loads a correct (default
in this instance) file of landmark paths.
'''
QTest.mouseClick(self.w.landmark_file_edit, Qt.LeftButton)
self.assertTrue(self.w.landmark_file_edit.isChecked())
self.assertEqual(self.w.fname_landmarks,
'./data/filenames/brain_test_landmarks_new_paths.txt')
def test_browseModelButton(self):
''' Test to check clicking browse image loads a correct (default
in this instance) file of model paths.
'''
QTest.mouseClick(self.w.model_file_edit, Qt.LeftButton)
self.assertTrue(self.w.model_file_edit.isChecked())
self.assertEqual(self.w.fname_model, './data/models/DQN_multiscale' +
'_brain_mri_point_pc_ROI_45_45_45/model-600000.data-00000-of-00001')
def test_changeDataToggle(self):
''' Test to toggling through the data type options changes settings as
desired.
'''
# Change to cardiac and test
QTest.mouseClick(self.w.cardiac_button, Qt.LeftButton)
self.assertTrue(self.w.cardiac_button.isChecked())
self.assertTrue(self.m.fname_images.name.find('cardiac'))
# Change to fetal and test
QTest.mouseClick(self.w.ultrasound_button, Qt.LeftButton)
self.assertTrue(self.w.ultrasound_button.isChecked())
self.assertTrue(self.m.fname_images.name.find('fetal'))
# Change to brain and test
QTest.mouseClick(self.w.brain_button, Qt.LeftButton)
self.assertTrue(self.w.brain_button.isChecked())
self.assertTrue(self.m.fname_images.name.find('brain'))
class ControllerTester(unittest.TestCase):
''' Tester for functionality for the controller class, which is the
doorway into launching the GUI.
'''
def setUp(self):
'''Method run before every test. Use this to prepare the test fixture.'''
self.controller = Controller()
self.w = self.controller.right_widget.automatic_mode.window
Controller.allWidgets_setCheckable(self.controller.app)
def tearDown(self):
''' Method run after each test is run. Use this to reset the testing
environment.
Raises ValueError to progress onto next test as unittest doesn't
work correctly with threading & PyQt.
'''
try:
test_results[self.id()] = self._outcome.errors[1][1][1]
except TypeError:
test_results[self.id()] = 'success'
raise ValueError('Stop test here')
def test_switchModes(self):
''' Test to ensure moving between Default Mode and Browse Mode tabs work
correctly
'''
# Test default mode is showing
self.assertEqual(self.controller.right_widget.tab_widget.currentIndex(), 0)
# Change to browse mode and test again
self.controller.right_widget.tab_widget.setCurrentIndex(1)
self.assertEqual(self.controller.right_widget.tab_widget.currentIndex(), 1)
# Change back to default mode and test again
self.controller.right_widget.tab_widget.setCurrentIndex(0)
self.assertEqual(self.controller.right_widget.tab_widget.currentIndex(), 0)
def test_load_defaults(self):
''' Test to check browse mode loads an image as expected
'''
# Change to browse mode
self.controller.right_widget.tab_widget.setCurrentIndex(1)
self.assertTrue(abs(np.sum(self.w.widget.arr)) > 1e-5)
if __name__ == '__main__':
test_results = {}
classes_to_test = [
RightWidgetTester,
RightWidgetBrowseModeTester,
RightWidgetHITLTester,
LeftWidgetTester,
ControllerTester,
]
loader = unittest.TestLoader()
suites_list = []
for test_class in classes_to_test:
suite = loader.loadTestsFromTestCase(test_class)
suites_list.append(suite)
big_suite = unittest.TestSuite(suites_list)
runner = unittest.TextTestRunner()
results = runner.run(big_suite)
print(test_results)
print(f'\nTests passed: {list(test_results.values()).count("success")} / {len(test_results)}\n')
```
#### File: DQN/utils/inspect_HITL_buffer.py
```python
import pickle
import glob
import os
def show_latest():
# Load the created file
list_of_files = glob.glob('./data/HITL/*.pickle')
latest_file = max(list_of_files, key=os.path.getctime)
with open(latest_file, 'rb') as f:
log = pickle.load(f)
episodes = 0
states = -1
for l in log:
episodes += 1
for state in l['states']:
states += 1
print('For the most recent pickle file: ')
print('Episodes: ', episodes)
print('States: ', states)
print('')
def show_all(data_type):
list_of_files = glob.glob(f'./data/HITL/*{data_type}*.pickle')
tot_episodes = tot_states = 0
for file in list_of_files:
with open(file, 'rb') as f:
log = pickle.load(f)
episodes = 0
states = -1
for l in log:
episodes += 1
for state in l['states']:
states += 1
tot_episodes += episodes
tot_states += states
print(f'Total for {data_type}: ')
print('Episodes: ', tot_episodes)
print('States: ', tot_states)
print('')
print('')
show_latest()
for d in ['BrainMRI','CardiacMRI','FetalUS']:
show_all(d)
```
|
{
"source": "jdorville1/algorithms",
"score": 3
}
|
#### File: algorithms/tests/test_map.py
```python
from algorithms.map import (
HashTable, ResizableHashTable,
Node, SeparateChainingHashTable,
word_pattern,
is_isomorphic,
is_anagram
)
import unittest
class TestHashTable(unittest.TestCase):
def test_one_entry(self):
m = HashTable(10)
m.put(1, '1')
self.assertEqual('1', m.get(1))
def test_add_entry_bigger_than_table_size(self):
m = HashTable(10)
m.put(11, '1')
self.assertEqual('1', m.get(11))
def test_get_none_if_key_missing_and_hash_collision(self):
m = HashTable(10)
m.put(1, '1')
self.assertEqual(None, m.get(11))
def test_two_entries_with_same_hash(self):
m = HashTable(10)
m.put(1, '1')
m.put(11, '11')
self.assertEqual('1', m.get(1))
self.assertEqual('11', m.get(11))
def test_get_on_full_table_does_halts(self):
# and does not search forever
m = HashTable(10)
for i in range(10, 20):
m.put(i, i)
self.assertEqual(None, m.get(1))
def test_delete_key(self):
m = HashTable(10)
for i in range(5):
m.put(i, i**2)
m.del_(1)
self.assertEqual(None, m.get(1))
self.assertEqual(4,m.get(2))
def test_delete_key_and_reassign(self):
m = HashTable(10)
m.put(1, 1)
del m[1]
m.put(1, 2)
self.assertEqual(2, m.get(1))
def test_assigning_to_full_table_throws_error(self):
m = HashTable(3)
m.put(1, 1)
m.put(2, 2)
m.put(3, 3)
with self.assertRaises(ValueError):
m.put(4, 4)
def test_len_trivial(self):
m = HashTable(10)
self.assertEqual(0, len(m))
for i in range(10):
m.put(i, i)
self.assertEqual(i + 1, len(m))
def test_len_after_deletions(self):
m = HashTable(10)
m.put(1, 1)
self.assertEqual(1, len(m))
m.del_(1)
self.assertEqual(0, len(m))
m.put(11, 42)
self.assertEqual(1, len(m))
def test_resizable_hash_table(self):
m = ResizableHashTable()
self.assertEqual(ResizableHashTable.MIN_SIZE, m._size)
for i in range(ResizableHashTable.MIN_SIZE):
m.put(i, 'foo')
self.assertEqual(ResizableHashTable.MIN_SIZE * 2, m._size)
self.assertEqual('foo', m.get(1))
self.assertEqual('foo', m.get(3))
self.assertEqual('foo', m.get(ResizableHashTable.MIN_SIZE - 1))
def test_fill_up_the_limit(self):
m = HashTable(10)
for i in range(10):
m.put(i,i**2)
for i in range(10):
self.assertEqual(i**2,m.get(i))
class TestSeparateChainingHashTable(unittest.TestCase):
def test_one_entry(self):
m = SeparateChainingHashTable(10)
m.put(1, '1')
self.assertEqual('1', m.get(1))
def test_two_entries_with_same_hash(self):
m = SeparateChainingHashTable(10)
m.put(1, '1')
m.put(11, '11')
self.assertEqual('1', m.get(1))
self.assertEqual('11', m.get(11))
def test_len_trivial(self):
m = SeparateChainingHashTable(10)
self.assertEqual(0, len(m))
for i in range(10):
m.put(i, i)
self.assertEqual(i + 1, len(m))
def test_len_after_deletions(self):
m = SeparateChainingHashTable(10)
m.put(1, 1)
self.assertEqual(1, len(m))
m.del_(1)
self.assertEqual(0, len(m))
m.put(11, 42)
self.assertEqual(1, len(m))
def test_delete_key(self):
m = SeparateChainingHashTable(10)
for i in range(5):
m.put(i, i**2)
m.del_(1)
self.assertEqual(None, m.get(1))
self.assertEqual(4, m.get(2))
def test_delete_key_and_reassign(self):
m = SeparateChainingHashTable(10)
m.put(1, 1)
del m[1]
m.put(1, 2)
self.assertEqual(2, m.get(1))
def test_add_entry_bigger_than_table_size(self):
m = SeparateChainingHashTable(10)
m.put(11, '1')
self.assertEqual('1', m.get(11))
def test_get_none_if_key_missing_and_hash_collision(self):
m = SeparateChainingHashTable(10)
m.put(1, '1')
self.assertEqual(None, m.get(11))
class TestWordPattern(unittest.TestCase):
def test_word_pattern(self):
self.assertTrue(word_pattern("abba", "dog cat cat dog"))
self.assertFalse(word_pattern("abba", "dog cat cat fish"))
self.assertFalse(word_pattern("abba", "dog dog dog dog"))
self.assertFalse(word_pattern("aaaa", "dog cat cat dog"))
class TestIsSomorphic(unittest.TestCase):
def test_is_isomorphic(self):
self.assertTrue(is_isomorphic("egg", "add"))
self.assertFalse(is_isomorphic("foo", "bar"))
self.assertTrue(is_isomorphic("paper", "title"))
class TestIsAnagram(unittest.TestCase):
def test_is_anagram(self):
self.assertTrue(is_anagram("anagram", "nagaram"))
self.assertFalse(is_anagram("rat", "car"))
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jdorvi/MonteCarlos_SLC",
"score": 3
}
|
#### File: jdorvi/MonteCarlos_SLC/kriebel_dean.py
```python
import numpy as np
def kriebel_dean(w_cm, B, D, W, m, S, T_d, H_b, gamma=0.78):
'''Calculates storm erosion based on the method presented in,
<NAME>., and <NAME>., 'Convolution method for time-dependent
beach-profile response' J. Waterway, Port, Coastal, Ocean Eng., 1993,
119(2): 204-226
Inputs:
REQUIRED \n
w_cm = sediment fall velocity (cm/s) \n
B = Berm height above mean sea-level (meters) \n
D = Dune height (meters) \n
W = Width of the back-shore (meters) \n
m = Linear beach face slope (m/m) \n
S = Water-level rise ('storm-surge') (meters) \n
T_d = Storm duration (hours) \n
H_b = Breaking wave height (meters) \n
OPTIONAL \n
gamma = Breaker index, usually taken to be 0.78-1.0 \n
Returns:
V_max = Maximum shoreline erosion volume (m**3) \n
R_max = Maximum shoreline erosion distance (m)
'''
# Constants
g = 9.8066 # gravitational acceleration (m/s/s)
# Sediment data
#d_50 = 0.3 # mass-mean sediment grain-size diameter (mm)
w_cm = w_cm # sediment fall velocity (cm/s)
w = w_cm/100 # m/sec
# Profile data
# Based on equilibrium profile of the form 'x=(h/A)**(3/2)', where h = the
# water depth at a distance x offshore from the still-water level
A = 2.25*((w**2)/g)**(1/3) # Eq. 15 'parameter governs profile steepness'
# valid for sand where 0.1mm < d_50 < 0.4mm
B = B # Berm height above mean sea-level (meters)
D = D # Dune height (meters)
W = W # Width of the back-shore (meters)
m = m # Linear beach face slope (m/m)
# Storm data
S = S # given water-level rise ('storm-surge') (meters)
T_d = T_d # Storm duration (hours)
gamma = gamma # Breaker index, usually taken to be 0.78-1.0.
H_b = H_b # Breaking wave height (meters)
h_b = H_b/gamma # Breaking depth, assumed to remain constant (meters)
# Active profile width 'x_b', x_0 = the distance from the still-water
# shoreline to the virtual origin of the concave equilibrium profile form,
# given by x_0 = h_T/3m, where h_T is the depth at which the linear slope
# is tangent to the concave profile, which may be shown to equal
# 4A**3/9m**2.
h_T = (4/9)*(A**3/m**2) # Eq. 16b_1
x_0 = h_T/(3*m) # Eq. 16b_2
x_b = x_0+(h_b/A)**(3/2) # Eq. 23
# Calculate erosion potential
# Maximum erosion potential, 'R_inf', and maximum potential volume eroded,
# 'V_inf', based on an equilibrium profile with a linear beach slope.
#R_inf = S*(x_b-(h_b/m)) / (B+h_b-(S/2)) # Eq. 22
#V_inf = R_inf*B + (S**2)/(2*m) - (2/5)*(S**(5/2))/(A**(3/2)) # Eq. 24
# Calculate maximum erosion potential 'R_inf' and maximum potential volume
# eroded 'V_inf' based on an equilibrium profile with a dune.
# Dune with no back-shore.
# R_inf = S*(x_b-(h_b/m))/(B+D+h_b-(S/2)) # Eq. 25
# Dune with a wide back-shore.
R_inf = (S*(x_b-(h_b/m)) - (W*(B+h_b-(S/2)))) / (B+D+h_b-(S/2)) # Eq. 26
# Volume eroded
V_inf = R_inf*D + (R_inf+W)*(B-S) # Eq. 27 --> used in K&D examples
# Volume eroded above original sea level #Eq. 28
# V_minf = R_inf*D +(R_inf+W)*B+(S**2)/(2*m)-(2/5)*(S**(5/2))/(A**(3/2))
# Calculate erosion timescale
# Time scale of profile response
C_1 = 320 # Empirical coefficient from Kriebel and Dean 1993
# Time scale parameter # Eq.31 (sec)
T_sec = ((H_b**(3/2))/(g**(1/2) * A**3)) / (1+(h_b/B)+(m*x_b)/h_b)
T_s = C_1*T_sec/3600 # convert seconds to hours
# Combine erosion potential and timescale
# Beach response to idealized storm surge
alpha = 1/T_s
sigma = np.pi/T_d
beta = 2*sigma/alpha # 2*np.pi*(T_s/T_d)
# Eq. 10
# R_t/R_inf=0.5*(1 - \
# (beta**2/(1+beta**2))*np.exp(-(2*sigma*t)/beta) - \
# (1/(1+beta**2))*(np.cos(2*sigma*t)+beta*np.sin(2*sigma*t)))
# Setting time derivative of Eq. 10 to zero leads to Eq. 12, where t_max is
# the time at which maximum erosion will take place.
def find_t_max(t_max):
""" doc string """
zero = np.cos(2*sigma*t_max) - \
(1/beta)*np.sin(2*sigma*t_max) - \
np.exp(-(2*sigma*t_max)/beta) # Eq. 12
return zero
# This can then be solved iteratively to find the time at which maximum
# erosion occurs, 't_max' (hrs)
import scipy.optimize as opt
t_max = opt.brentq(find_t_max,
a=T_d/2,
b=T_d)
# Finally calculate maximum shoreline recession and volumetric erosion for
# the given storm parameters.
R_max = R_inf*0.5*(1-np.cos(2*sigma*t_max)) # Eq. 13
V_max = V_inf*(R_max/R_inf)
# Turn this block on if need to debug
'''
print("R_max: {:.1f} (m)".format(R_max))
print("R_inf: {:.1f} (m)".format(R_inf))
print("R_max/R_inf: {:.2f}".format(R_max/R_inf))
print("V_max: {:.1f} (m**3/m)".format(V_max))
print("V_inf: {:.1f} (m**#/m)".format(V_inf))
print("T_s: {:.2f} (h)".format(T_s))
print("t_max: {:.1f} (h)".format(t_max))
print("A: {:.3f}".format(A))
print("alpha: {:.3f} (1/h)".format(alpha))
print("beta: {:.3f}".format(beta))
print("sigma: {:.3f}".format(sigma))
'''
return (V_max, R_max, V_inf, R_inf)
def recovery(V_max, interim, T_a=400):
'''Calculate eroded sand-volume post recovery during storm interim.
Inputs:
V_max = Initially erroded volume (m**3)
interim = Period of calm between storms (h)
T_a = Characteristic accretive timescale (h)
Outputs:
V_recoverd = Eroded volume remaining after recovery (m**3)
'''
from numpy import exp
V_recovered = V_max*exp(-1*interim/T_a) # Eq. 28 Callaghan et al. 2008
return V_recovered
```
#### File: jdorvi/MonteCarlos_SLC/sediment_fall_velocity.py
```python
def fall_velocity(d_50, rho_s=2650, rho_w=1028, a=1, b=1, c=0.9):
''' Calculate sediment fall velocity based the method presented in,
<NAME>., and <NAME>., 'Formulas for sediment porosity and settling
velocity' J. Hydraul. Eng., 2006, 132(8): 858-862
Inputs:
REQUIRED
d_50 = Sediment mass mean diameter grainsize (mm)
OPTIONAL
rho_s = 2650 (kg/m**3), sediment density (quartz value as default)
rho_w = 1028 (kg/m**3), sea water density (Gill's (1982) reference as
default. Where salinity = 35 PPT, temp = 5 degC, and pressure
= 0 dbar)
a,b,c = Lengths of the longest, intermediate, and shortest axes of the
sediment particle. (dimensions not important)
Returns:
w = sediment fall velocity (cm/s) '''
from numpy import exp
d = d_50 / 1000 # sediment mass mean diameter grainsize (m)
g = 9.8066 # gravitational acceleration (m/s/s)
s = rho_s/rho_w # relative density
dv = 0.0016193 # dynamic viscosity (kg/(m*s)), S=35PPT, temp=5degC, P=1atm
kv = dv/rho_w # m**2/s
S_f = c/((a*b)**0.5) # Corey shape factor
M = 53.5*exp(-0.65*S_f)
N = 5.65*exp(-2.5*S_f)
n = 0.7+0.9*S_f
D_s = d*((rho_s/(rho_w-1))*(g/kv**2))**(1/3)
# Calculate fall-velocity
# Cut-off of 0.2557 mm was chosen based on approximate equivalence of
# methods at this grain-sized diameter
if d_50 > 0.2557:
'''Wu et al. 2006'''
w = (M*kv)/(N*d) * \
(((0.25+(4*N*D_s**3)/(3*M**2))**(1/n))**0.5 - 0.5)**n
elif d_50 <= 0.2557:
'''Stokes equation'''
w = 1/18 * (s-1)*(g*d**2)/kv
w *= 100 # convert m/s to cm/s
return w
```
#### File: jdorvi/MonteCarlos_SLC/sinusoidal_regression.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
# <codecell>
file_name = 'montauk_combined_data.csv'
df = pd.read_csv(file_name)
df.rename(columns=lambda x: x.strip().rstrip(), inplace=True)
df.end = pd.to_datetime(df.end)
df.set_index('end', inplace=True)
x = df['hsig'].groupby([df.index.month]).agg('count')/13
data = np.matrix((x.index.values,x.values))
data = data.T
dataf = pd.DataFrame(data=data)
#dataf.to_csv('count_month.csv', index=False, header=['month','frequency'])
# <codecell>
# The following code is directly modelled after a blog post by <NAME>
# on using the mcpy3 Python package.
# Source: http://www.databozo.com/2014/01/17/Exploring_PyMC3.html
trace = None
x_data = dataf[0]
x_data[12] = 13
x_data = (x_data-1)/12
y_data = dataf[1]
y_data[12] = y_data[0]
# <codecell>
def graph(formula, x_range, color='black', alpha=1):
x = np.array(x_range)
y = eval(formula)
plt.plot(x, y, color=color, alpha=alpha)
# <codecell>
with pm.Model() as model:
alpha = pm.Normal('alpha', mu=0, sd=20)
beta = pm.Normal('beta', mu=0, sd=20)
gamma = pm.Normal('gamma', mu=0, sd=20)
sigma = pm.Uniform('sigma', lower=0, upper=20)
y_est = alpha + beta*np.sin(2*np.pi*x_data) + gamma*np.cos(2*np.pi*x_data)
likelihood = pm.Normal('y', mu=y_est, sd=sigma, observed=y_data)
start = pm.find_MAP()
step = pm.NUTS(state=start)
trace = pm.sample(2000, step, start=start, progressbar=True) #very slow without theano
pm.traceplot(trace)
# <codecell>
plt.scatter(x_data,12*y_data)
for i in np.arange(0,1000):
#point = trace.point(i)
formula = '{0} + {1}*np.sin(2*np.pi*{3}) + {2}*np.cos(2*np.pi*{3})'
#graph(formula.format(point['alpha'],
# point['beta'],
# point['gamma']),
# np.arange(0,13/12,1/12),
# color='black',
# alpha=0.01)
# Model data fit: alpha=1.498, beta=-0.348, gamma=1.275
x10_data = np.arange(0,1.01,0.01)
y_calc = [eval(formula.format(12*1.498, 12*-0.348, 12*1.275, i)) for i in x10_data]
plt.plot(x10_data, y_calc, color='red')
#1.273,1.106,0.81,7.43)), color='red')
```
|
{
"source": "jdoshi8/RaWorkflowOrchestrator",
"score": 2
}
|
#### File: common/experimental/run_dag.py
```python
import json
from airflow.exceptions import DagRunAlreadyExists, DagNotFound
from airflow.models import DagRun, DagBag
from airflow.utils import timezone
from airflow.utils.state import State
def _run_dag(
dag_id,
dag_bag,
dag_run,
run_id,
conf,
replace_microseconds,
execution_date,
):
if dag_id not in dag_bag.dags:
raise DagNotFound("Dag id {} not found".format(dag_id))
dag = dag_bag.get_dag(dag_id)
if execution_date is None:
execution_date = timezone.utcnow()
assert timezone.is_localized(execution_date)
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if not run_id:
run_id = "manual__{0}".format(execution_date.isoformat())
dr = dag_run.find(dag_id=dag_id, run_id=run_id)
if dr:
raise DagRunAlreadyExists("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
if type(conf) is dict:
run_conf = conf
else:
run_conf = json.loads(conf)
runs = list()
dags_to_trigger = list()
dags_to_trigger.append(dag)
from airflow.executors import get_default_executor
executor = get_default_executor()
while dags_to_trigger:
dag = dags_to_trigger.pop()
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.NONE,
conf=run_conf,
external_trigger=True,
)
runs.append(trigger)
if dag.subdags:
dags_to_trigger.extend(dag.subdags)
dag.run(
start_date=execution_date,
end_date=execution_date,
mark_success=False,
executor=executor,
donot_pickle=True,
ignore_first_depends_on_past=True,
verbose=True,
rerun_failed_tasks=True)
return runs
def run_dag(
dag_id,
run_id=None,
conf=None,
replace_microseconds=True,
execution_date=None,
):
"""Runs DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run - even if more than one Dag Runs were present or None
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound("Dag id {} not found in DagModel".format(dag_id))
dagbag = DagBag(dag_folder=dag_model.fileloc)
"""
dagbag = DagBag()
dag_run = DagRun()
runs = _run_dag(
dag_id=dag_id,
dag_run=dag_run,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
replace_microseconds=replace_microseconds,
execution_date=execution_date,
)
return runs[0] if runs else None
```
|
{
"source": "jdosoriopu/rasa",
"score": 2
}
|
#### File: core/channels/evachannel.py
```python
import logging
from typing import Text, Dict, Optional, Callable, Awaitable, Any
from sanic import Blueprint, response
from sanic.request import Request
from rasa.core.channels.channel import (
OutputChannel,
UserMessage,
RestInput,
InputChannel,
)
from rasa.utils.endpoints import EndpointConfig, ClientResponseError
from sanic.response import HTTPResponse
logger = logging.getLogger(__name__)
class EvaOutput(OutputChannel):
@classmethod
def name(cls) -> Text:
return "evachannel"
def __init__(self, endpoint: EndpointConfig, device: Text) -> None:
self.eva_endpoint = endpoint
self.device = device
super().__init__()
async def send_text_message(
self, recipient_id: Text, text: Text, **kwargs: Any
) -> None:
"""Send a message through this channel."""
try:
await self.eva_endpoint.request(
"post", content_type="application/json", json={ "from": self.device, "to": recipient_id, "text": text }
)
except ClientResponseError as e:
logger.error(
"Failed to send output message to WhatsAppi. "
"Status: {} Response: {}"
"".format(e.status, e.text)
)
class EvaInput(RestInput):
@classmethod
def name(cls) -> Text:
return "evachannel"
@classmethod
def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel:
return cls(EndpointConfig.from_dict(credentials))
def __init__(self, endpoint: EndpointConfig) -> None:
self.eva_endpoint = endpoint
def blueprint(
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
) -> Blueprint:
evachannel_webhook = Blueprint("evachannel_webhook", __name__)
@evachannel_webhook.route("/", methods=["GET"])
async def health(_: Request):
return response.json({"status": "ok"})
@evachannel_webhook.route("/webhook", methods=["POST"])
async def webhook(request: Request) -> HTTPResponse:
sender = request.json.get("contact", None)
text = request.json.get("message", None)
device = request.json.get("device", None)
output_channel = self.get_output_channel(device)
if "#eva" in text.lower():
await on_new_message(
UserMessage("/restart", output_channel, sender, input_channel=self.name())
)
await on_new_message(
UserMessage("/start_bot", output_channel, sender, input_channel=self.name())
)
else:
await on_new_message(
UserMessage(text, output_channel, sender, input_channel=self.name())
)
return response.text("success")
return evachannel_webhook
def get_output_channel(self, device: Text) -> OutputChannel:
return EvaOutput(self.eva_endpoint, device)
```
|
{
"source": "jdospina95/Thesis",
"score": 2
}
|
#### File: jdospina95/Thesis/.~c9_invoke_IfBWlH.py
```python
import os,sys
import subprocess
from functools import wraps #Se agrega wraps para validacion de iniciada sesion
from flask import Flask, render_template, request, redirect, jsonify, session
from bson.objectid import ObjectId #Se agreaga para poder consultar por _id
from bson.json_util import dumps, loads #serializacion de OjectId de mongo
import json
from pymongo import MongoClient #Pymongo Framework -> MongoDB
app = Flask(__name__)
app = Flask(__name__)
app.secret_key = os.urandom(24) #LLave para envio de session
client = MongoClient('mongodb://usuario:<EMAIL>:31835/tesis') #Conexion con MongoDB en MongoLab
db = client['tesis']
usuarios = db.usuarios #Referencia a la coleccion "Usuarios" de la DB
configuraciones = db.configuraciones #Referencia a la coleccion "Configuraciones" de la DB
personas = db.personas #Referencia a la coleccion "Usuarios" de la DB
comidas = db.comidas #Referencia a la coleccion "Configuraciones" de la DB
cosas = db.cosas #Referencia a la coleccion "Usuarios" de la DB
def login_required(f):
@wraps(f)
def wrap():
if 'usuario' not in session:
return redirect('/IniciarSesion')
else:
return f()
return wrap #Nueva funcion para validacion de sesion iniciada
@app.route('/', methods=['GET'])
def index():
global session
return render_template('index.html', sesion = session)
@app.route('/IniciarSesion', methods=['GET', 'POST'])
def iniciarsesion():
if request.method == 'POST':
#Cambio a traer usuario de BD y guardar datos en sesion
query = {"_id": ObjectId(request.form['nom_usuario'])}
resultado = usuarios.find(query)[0]
resultado = dumps(resultado)
session['usuario'] = resultado
#Cambio a traer usuario de BD y guardar datos en sesion
return redirect('/MenuInicio')
else:
reg = []
for usuario in usuarios.find(): #Query .find() = SELECT *
data = usuario
reg.append({'_id':data['_id'], 'nombre':data['nombre'], 'apellido':data['apellido']}) #Se agraga campo _id para consultar usuario iniciado
return render_template('iniciarSesion.html',usuarios=reg)
@app.route('/RegistrarUsuario', methods=['GET', 'POST'])
def registrarusuario():
#usuarios.delete_many({}) #Borra todos los registros de la coleccion usuarios
if request.method == 'POST':
fdata = request.form
data = {}
for (k,v) in fdata.items():
data[k]=v
data['configuracion'] = configuraciones.find({"nombre":"Default"})[0]['_id']#'default' #se agrega comfiguracion default al registrar usuario
usuarios.insert_one(data) #Inserta un solo registro a la coleccion "Usuarios"
return redirect('/')
else:
return render_template('registrarUsuario.html')
@app.route('/MenuInicio')
@login_required #Validacion de inicio de sesion
def menuInicio():
global session
return render_template('menuInicio.html', sesion = session)
#TODO: Falta darle update a la configuracion seleccionada
@app.route('/Configuracion', methods=['GET', 'POST'])
@login_required #Validacion de inicio de sesion
def configuracion():
if request.method == 'POST':
deserializedSession = loads(session['usuario'])
query = { "_id": ObjectId(deserializedSession['_id'])}
deserializedSession['configuracion'] = request.form['id_configuracion']
nuevaConfiguracion = { "$set": { "configuracion": request.form['id_configuracion'] } }
usuarios.update_one(query, nuevaConfiguracion)
session['usuario'] = dumps(deserializedSession)
return redirect('/MenuInicio')
else:
#Insercion de Data Configuracion. Mientras se mira como se llena.
#configuraciones.delete_many({}) #Borra todos los registros de la coleccion configuraciones
#insert = [
# {"nombre": "Default", "tamano_fuente": "12", "color_fuente": "Negro", "color_fondo": "Verde"},
# {"nombre": "Ciegos", "tamano_fuente": "12", "color_fuente": "Negro", "color_fondo": "Rojo"},
# {"nombre": "<NAME>", "tamano_fuente": "9", "color_fuente": "Blanco", "color_fondo": "Amarillo"}
#]
#configuraciones.insert_many(insert) #Inserta en la Coleccion una lista de configuraciones
#Insercion de Data Configuracion. Mientras se mira como se llena.
reg = []
for configuracion in configuraciones.find(): #Query .find() = SELECT *
data = configuracion
reg.append({'_id':data['_id'],'nombre':data['nombre'], 'tamano_fuente':data['tamano_fuente'], 'color_fuente':data['color_fuente'], 'color_fondo':data['color_fondo']})
return render_template('configuracion.html',configuraciones=reg)
if __name__ == '__main__':
app.run(host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 8080)))
```
|
{
"source": "jdossgollin/2018-paraguay-floods",
"score": 3
}
|
#### File: 2018-paraguay-floods/notebooks/visualize.py
```python
import cartopy.feature
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import matplotlib.pyplot as plt
import numpy as np
def _format_axis(plt_ax, **kwargs):
"""Function to format a single axis. Should not be called directly.
"""
coast = kwargs.pop('coast', True)
grid = kwargs.pop('grid', False)
river = kwargs.pop('river', False)
border = kwargs.pop('border', False)
states = kwargs.pop('states', False)
extent = kwargs.pop('extent', None)
feature_list = kwargs.pop('feature_list', None)
xticks = kwargs.pop('xticks', None)
yticks = kwargs.pop('yticks', None)
crs = kwargs.pop('crs', ccrs.PlateCarree())
plt_ax.axes.get_xaxis().set_ticklabels([])
plt_ax.axes.get_yaxis().set_ticklabels([])
plt_ax.set_xlabel('')
plt_ax.set_ylabel('')
if coast:
plt_ax.coastlines()
if border:
plt_ax.add_feature(cartopy.feature.BORDERS)
if river:
plt_ax.add_feature(cartopy.feature.RIVERS)
if states:
states = cartopy.feature.NaturalEarthFeature(
category='cultural', name='admin_1_states_provinces_lines',
scale='50m', facecolor='none'
)
plt_ax.add_feature(states, edgecolor='gray')
if xticks is not None:
plt_ax.set_xticks(xticks, crs=crs)
lon_formatter = LongitudeFormatter()
plt_ax.xaxis.set_major_formatter(lon_formatter)
if yticks is not None:
plt_ax.set_yticks(yticks, crs=crs)
lat_formatter = LatitudeFormatter()
plt_ax.yaxis.set_major_formatter(lat_formatter)
if grid:
if xticks is not None and yticks is None:
plt_ax.gridlines(xlocs=xticks)
elif xticks is None and yticks is not None:
plt_ax.gridlines(ylocs=yticks)
elif xticks is None and yticks is None:
plt_ax.gridlines()
elif xticks is not None and yticks is not None:
plt_ax.gridlines(xlocs=xticks, ylocs=yticks)
if feature_list is not None:
for f in feature_list:
plt_ax.add_feature(f)
if extent is not None:
plt_ax.set_extent(extent, crs=crs)
def format_axes(axes, **kwargs):
"""Format one or more axes.
Passes all arguments to _format_axis
"""
if isinstance(axes, np.ndarray):
# There are multiple axes, format each of them
for ax in axes.flat:
_format_axis(ax, **kwargs)
else:
# There is just one
_format_axis(axes, **kwargs)
def get_row_col(i, axes):
"""Get the ith element of axes
"""
if isinstance(axes, np.ndarray):
# it contains subplots
if len(axes.shape) == 1:
return axes[i]
elif len(axes.shape) == 2:
nrow = axes.shape[0]
ncol = axes.shape[1]
row_i = i // ncol
col_i = i - (row_i * ncol)
return axes[row_i, col_i]
else:
ValueError('This is a 3-dimensional subplot, this function can only handle 2D')
else:
return axes
```
#### File: src/get/download_elevation.py
```python
import argparse
import os
import xarray as xr
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser() # pylint: disable=C0103
parser.add_argument("--outfile", help="the filename of the data to save")
def download_data(outfile):
"""Download the elevation data
"""
# read in the data
url = "http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NGDC/.GLOBE/.topo/"
url += "X/-180/0.025/180/GRID/Y/-90/0.025/90/GRID/dods"
data = xr.open_dataarray(url) # doesn't follow time conventions
# save to file
if os.path.isfile(outfile):
os.remove(outfile)
data.to_netcdf(outfile, format="NETCDF4", mode="w")
def main():
"""Parse the command line arguments and run download_data().
"""
args = parser.parse_args()
outfile = os.path.abspath(args.outfile)
download_data(outfile=outfile)
if __name__ == "__main__":
main()
```
#### File: src/get/download_nino34.py
```python
import argparse
import os
import pandas as pd
import numpy as np
from datetime import datetime
parser = argparse.ArgumentParser() # pylint: disable=C0103
parser.add_argument("--syear", help="the first year to retain")
parser.add_argument("--eyear", help="the last year to retain")
parser.add_argument("--outfile", help="the filename of the data to save")
def download_data(sdate, edate, outfile):
"""Load in the NINO 3.4 Data
"""
url = "http://iridl.ldeo.columbia.edu/SOURCES/.Indices/.nino/.EXTENDED/.NINO34/gridtable.tsv"
col_name = "nino_34"
nino_34 = pd.read_table(
url,
delim_whitespace=True,
index_col=None,
skiprows=2,
names=["time", "{}".format(col_name)],
)
# the times don't make sense, parse them in
nino_34["time"] = np.int_(np.floor(nino_34["time"]))
nino_34["year"] = 1960 + nino_34["time"] // 12
nino_34["month"] = 1 + nino_34["time"] % 12
nino_34["day"] = 1
nino_34["time"] = pd.to_datetime(nino_34[["year", "month", "day"]])
nino_34.set_index("time", inplace=True)
nino_34 = nino_34[["{}".format(col_name)]]
nino_34 = nino_34.loc[sdate:edate]
nino_34 = nino_34.to_xarray()
# save to file
if os.path.isfile(outfile):
os.remove(outfile)
nino_34.to_netcdf(outfile, format="NETCDF4", mode="w")
def main():
"""Parse the command line arguments and run download_data().
"""
args = parser.parse_args()
outfile = os.path.abspath(args.outfile)
sdate = datetime(int(args.syear), 1, 1)
edate = datetime(int(args.eyear), 12, 31)
download_data(sdate=sdate, edate=edate, outfile=outfile)
if __name__ == "__main__":
main()
```
#### File: src/get/download_reanalysis_year.py
```python
import argparse
import os
import xarray as xr
import numpy as np
parser = argparse.ArgumentParser() # pylint: disable=C0103
parser.add_argument("--outfile", help="the filename of the data to save")
parser.add_argument("--year", help="the year of data to download")
parser.add_argument("--coord_system", help="the coordinate system containing the data")
parser.add_argument("--var", help="the name of the variable")
parser.add_argument("--level", help="the pressure level")
def download_data(coord_system, var, year, level, outfile):
"""Download a single year of reanalysis V2 data
"""
# Open a connection with the DODs URL
base_url = "https://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis2"
full_url = "{}/{}/{}.{}.nc".format(base_url, coord_system, var, year)
data = xr.open_dataset(full_url, decode_cf=False).sel(level=level)
# Need to have the variable name since we created a data set not array
varname = list(data.data_vars.keys())[0]
# Have to mess around a bit with the cf conventions for this data set
data[varname].attrs.pop("missing_value")
data = xr.decode_cf(data, mask_and_scale=True, decode_times=True)[varname]
# Save to file
if os.path.isfile(outfile):
os.remove(outfile)
data.to_netcdf(outfile, format="NETCDF4", mode="w")
def main():
"""Parse the command line arguments and run download_data().
"""
args = parser.parse_args()
outfile = os.path.abspath(args.outfile)
year = int(args.year)
coord_system = args.coord_system
var = args.var
level = int(args.level)
download_data(
year=year, outfile=outfile, coord_system=coord_system, var=var, level=level
)
if __name__ == "__main__":
main()
```
#### File: src/process/calculate_streamfunction.py
```python
import argparse
import os
import xarray as xr
import numpy as np
from windspharm.xarray import VectorWind
parser = argparse.ArgumentParser() # pylint: disable=C0103
parser.add_argument("--outfile", help="the filename of the data to save")
parser.add_argument("--uwnd", help="path to zonal wind file")
parser.add_argument("--vwnd", help="path to meridional wind file")
def calculate_streamfunction(uwnd, vwnd):
"""Calculate the Streamfunction
"""
uwnd_ds = xr.open_dataarray(uwnd)
vwnd_ds = xr.open_dataarray(vwnd)
wind = VectorWind(uwnd_ds, vwnd_ds)
psi = wind.streamfunction()
return psi
def main():
"""Run everything
"""
args = parser.parse_args()
uwnd = os.path.abspath(args.uwnd)
vwnd = os.path.abspath(args.vwnd)
outfile = os.path.abspath(args.outfile)
psi = calculate_streamfunction(uwnd, vwnd)
if os.path.isfile(outfile):
os.remove(outfile)
psi.to_netcdf(outfile, format="NETCDF4", mode="w")
if __name__ == "__main__":
main()
```
|
{
"source": "jdossgollin/2018-robust-adaptation-cyclical-risk",
"score": 3
}
|
#### File: codebase/statfit/nonstationary.py
```python
import os
import numpy as np
from . import StatisticalModel
from ..path import data_path
from ..util import compile_model
class LN2LinearTrend(StatisticalModel):
"""Lognormal Model with linear trend and constant CV
"""
def __init__(self, **kwargs) -> None:
self.model_file = os.path.abspath(os.path.join(data_path, "ln2-trend.stan"))
model_param: dict = {
"mu0_mean": kwargs.pop("mu0_mean", 10),
"mu0_sd": kwargs.pop("mu0_sd", 1),
"beta_mu_mean": kwargs.pop("beta_mu_mean", 0),
"beta_mu_sd": kwargs.pop("beta_mu_sd", 0.5),
"cv_logmean": kwargs.pop("cv_logmean", np.log(0.1)),
"cv_logsd": kwargs.pop("cv_logsd", 0.5),
"n_warmup": kwargs.pop("n_warmup", 1000),
"n_chain": kwargs.pop("n_chain", 1),
}
super().__init__(**kwargs)
self.param.update(model_param)
self.model_name = "LN2 Linear Trend"
def _calculate_one(self, data) -> np.ndarray:
stan_data = {"y": data, "N": self.N, "M": self.M}
for param in [
"mu0_mean",
"mu0_sd",
"beta_mu_mean",
"beta_mu_sd",
"cv_logmean",
"cv_logsd",
]:
stan_data.update({"{}".format(param): self.param.get(param)})
stan_mod = compile_model(
filename=self.model_file, model_name="LN2-Linear-Trend"
)
n_iter: int = self.param.get("n_mcsim") + self.param.get("n_warmup")
fit = stan_mod.sampling(
data=stan_data,
iter=n_iter,
chains=self.param.get("n_chain"),
warmup=self.param.get("n_warmup"),
)
fit_dict = fit.extract(permuted=True)
return fit_dict["yhat"]
```
#### File: codebase/synthetic/nino3.py
```python
import os
import numpy as np
import pandas as pd
import xarray as xr
from datetime import datetime
from .synthetic import SyntheticFloodSequence
from ..path import data_path
class NINO3Linear(SyntheticFloodSequence):
"""Draw streamflow sequences based on a linear relationship with a NINO3 index/
NINO3 data from Ramesh et al (2017)
"""
def __init__(self, **kwargs) -> None:
model_param = {
"mu0": kwargs.pop("mu0"),
"gamma": kwargs.pop("gamma", 0),
"beta": kwargs.pop("beta", 0.5),
"coeff_var": kwargs.pop("coeff_var", 0.1),
"sigma_min": kwargs.pop("sigma_min", 0.01),
}
super().__init__(**kwargs)
self.param.update(model_param)
self.model_name = "NINO3"
def _calculate_one(self) -> np.ndarray:
"""Run the calculation
"""
np.random.seed(datetime.now().microsecond)
filename = os.path.join(data_path, "ramesh2017.csv")
nino3 = pd.read_csv(filename, index_col="year")
valid_start_years = np.arange(nino3.index.max() - (self.M + self.N))
syear = np.random.choice(valid_start_years)
eyear = syear + self.N + self.M - 1
nino3_sub = nino3.loc[syear:eyear]["nino3"].values
mu = (
self.param.get("mu0")
+ self.param.get("gamma") * self._get_time(period="all")
+ self.param.get("beta") * nino3_sub
)
sigma = self.param.get("coeff_var") * mu
sigma[np.where(sigma < self.param.get("sigma_min"))] = self.param.get(
"sigma_min"
)
sflow = np.exp(np.random.normal(loc=mu, scale=sigma))
return sflow
```
|
{
"source": "jdossgollin/leveesim",
"score": 3
}
|
#### File: leveesim/leveesim/flood.py
```python
from collections import OrderedDict
import multiprocessing
import os
from typing import Tuple
from numba import jitclass, float64
import pandas as pd
from .util import compile_model, hash_string, DATA_DIR, CACHE_DIR, gev_cdf, random_gev
from .param import FloodPriorParameters
@jitclass(
OrderedDict(
loc_base=float64,
loc_trend=float64,
coeff_var=float64,
shape=float64,
zero_time=float64,
scale_min=float64,
)
)
class FloodModel:
"""
This is the state of world, or simulation-specific set of parameters
governing
"""
def __init__(
self,
loc_base: float,
loc_trend: float,
coeff_var: float,
shape: float,
zero_time: float,
scale_min: float,
) -> None:
"""
Build the model
"""
self.loc_base = loc_base
self.loc_trend = loc_trend
self.coeff_var = coeff_var
self.shape = shape
self.zero_time = zero_time
self.scale_min = scale_min
def get_gev_param(self, year: float) -> Tuple[float, float, float]:
"""
Get the location, scale, and shape parameter for a particular year
"""
year_adj = year - self.zero_time
loc = self.loc_base + self.loc_trend * year_adj
scale = max(loc * self.coeff_var, self.scale_min)
shape = self.shape
return loc, scale, shape
def simulate_flood(self, year: float) -> float:
"""
Simulate a storm surge for a particular year
"""
loc, scale, shape = self.get_gev_param(year=year)
return random_gev(loc=loc, scale=scale, shape=shape)
def calc_exceedance_prob(self, year: float, height: float) -> float:
"""
Calculate the probability that the flood in `year` is greater than `height`
"""
loc, scale, shape = self.get_gev_param(year=year)
return 1.0 - gev_cdf(x=height, loc=loc, scale=scale, shape=shape)
class FloodSimulationLibrary:
"""
Stores a large number of simulations of the flood parameters
"""
def __init__(self, param: FloodPriorParameters, **kwargs) -> None:
"""
Build a library of simulation parameters
"""
self.param = param
simulation_fname = self.get_filename()
try:
self.simulations = pd.read_feather(simulation_fname)
except IOError:
self.simulations = self.create_simulation_library(**kwargs)
self.simulations.to_feather(simulation_fname)
def get_filename(self) -> str:
"""
Get a unique filename for the library where the resulting library
can be stored
"""
simulation_hash = hash_string(OrderedDict(self.param._asdict()).__str__())
return os.path.join(CACHE_DIR, f"{simulation_hash}.feather")
def create_simulation_library(
self,
n_sim: int = 100_000,
n_chains: int = multiprocessing.cpu_count(),
n_warmup: int = 2500,
) -> pd.DataFrame:
"""
Run the quadratic model to create many simulations of
the SeaLevelModel parameters
"""
sea_level = pd.read_csv(
os.path.join(DATA_DIR, "sea-level-annual.csv"), index_col="year"
)
stan_fname = os.path.join(DATA_DIR, "gev-trend.stan")
model = compile_model(filename=stan_fname, model_name="stormsurge")
stan_data = dict(
N=sea_level.shape[0],
y=sea_level["storm_surge"].values,
time=sea_level.index.values,
# PRIORS
time_offset=self.param.zero_time,
loc_base_expected=self.param.loc_base_expected,
loc_base_std=self.param.loc_base_std,
loc_trend_expected=self.param.loc_trend_expected,
loc_trend_std=self.param.loc_trend_std,
coeff_var_expected=self.param.coeff_var_expected,
coeff_var_std=self.param.coeff_var_std,
sigma_min=self.param.sigma_min,
)
# calculate the number of iterations needed
n_iter = n_sim / n_chains + n_warmup
fitted = model.sampling(
data=stan_data, chains=int(n_chains), iter=int(n_iter), warmup=int(n_warmup)
)
return pd.DataFrame(
fitted.extract(["loc_base", "loc_trend", "coeff_var", "shape"])
)
def build_model(self) -> FloodModel:
"""
Get a model with randomly sampled state of the world
"""
row = self.simulations.sample(1, axis=0)
return FloodModel(
zero_time=self.param.zero_time,
loc_base=row["loc_base"].values[0],
loc_trend=row["loc_trend"].values[0],
coeff_var=row["coeff_var"].values[0],
shape=row["shape"].values[0],
scale_min=self.param.sigma_min,
)
```
#### File: leveesim/leveesim/numeric.py
```python
from collections import OrderedDict
from numba import jitclass, float64, int64
import numpy as np
from .costs import calc_insurance_premium, calc_construction_cost
from .exposure import ExposureModel
from .flood import FloodModel
from .sealevel import SeaLevelModel
@jitclass(
OrderedDict(
# PARAMETERS
initial_height_cm=float64,
construction_quadratic=float64,
construction_linear=float64,
construction_fixed=float64,
# VALUES
construction_cost_euro=float64[:],
exposed_value_euro=float64[:],
insurance_premium_euro=float64[:],
levee_height_cm=float64[:],
mean_sea_level_cm=float64[:],
storm_surge_cm=float64[:],
year=float64[:],
index=int64,
# MODULES
exposure_model=ExposureModel.class_type.instance_type, # type: ignore
flood_model=FloodModel.class_type.instance_type, # type: ignore
sea_level_model=SeaLevelModel.class_type.instance_type, # type: ignore
# HISTORICAL DATA
historical_length=int64,
historical_exposure=float64[:],
historical_sea_level=float64[:],
historical_surge=float64[:],
)
)
class SimulationModel:
"""
All the number crunching of the model goes here
You won't want to build this by itself, as it takes in as arguments
a bunch of models that are messy to build. Instead,
"""
def __init__(
self,
start_year: int,
end_year: int,
construction_quadratic: float,
construction_linear: float,
construction_fixed: float,
exposure_model: ExposureModel,
flood_model: FloodModel,
sea_level_model: SeaLevelModel,
historical_sea_level: np.ndarray,
historical_surge: np.ndarray,
historical_exposure: np.ndarray,
initial_height_cm: float,
) -> None:
"""
Build the model
"""
# get the historical data
self.historical_length = historical_exposure.size
assert historical_sea_level.size == self.historical_length
assert historical_surge.size == self.historical_length
self.historical_surge = historical_surge.astype(np.float64)
self.historical_sea_level = historical_sea_level.astype(np.float64)
self.historical_exposure = historical_exposure.astype(np.float64)
# initialize values
self.index = -1
self.year = np.arange(start_year, end_year + 1).astype(np.float64)
self.construction_cost_euro = (np.nan * np.ones_like(self.year)).astype(
np.float64
)
self.exposed_value_euro = (np.nan * np.ones_like(self.year)).astype(np.float64)
self.insurance_premium_euro = (np.nan * np.ones_like(self.year)).astype(
np.float64
)
self.levee_height_cm = (np.nan * np.ones_like(self.year)).astype(np.float64)
self.mean_sea_level_cm = (np.nan * np.ones_like(self.year)).astype(np.float64)
self.storm_surge_cm = (np.nan * np.ones_like(self.year)).astype(np.float64)
# set up the three models of exogenous variables
self.exposure_model = exposure_model
self.flood_model = flood_model
self.sea_level_model = sea_level_model
# update the exposure model to use historical exposure
self.exposure_model.exposure_euro = self.historical_exposure[-1]
# store parameters
self.construction_quadratic = construction_quadratic
self.construction_linear = construction_linear
self.construction_fixed = construction_fixed
self.initial_height_cm = initial_height_cm
def step(self, height_increase_cm: float) -> float:
"""
Take the action, update the environment, and get the reward
IMPORTANT:
This *will* throw an error unless you manually update some of the values,
as done in the LeveeEnv
"""
self.index += 1
i = self.index # for shorthand
year = self.year[i] # shorthand
# if historical, we don't need to step the exogenous models forward
if i < self.historical_length:
self.levee_height_cm[i] = self.initial_height_cm
self.mean_sea_level_cm[i] = self.historical_sea_level[i]
self.storm_surge_cm[i] = self.historical_surge[i]
self.exposed_value_euro[i] = self.historical_exposure[i]
# if it's not historical, we need to step the exogenous models forward
else:
self.levee_height_cm[i] = self.levee_height_cm[i - 1] + height_increase_cm
self.mean_sea_level_cm[i] = self.sea_level_model.simulate_msl(year=year)
self.storm_surge_cm[i] = self.flood_model.simulate_flood(year=year)
self.exposed_value_euro[i] = self.exposure_model.simulate_exposure()
# calculate the dependent variables
flood_prob = self.flood_model.calc_exceedance_prob(
year=year, height=self.levee_height_cm[i] - self.mean_sea_level_cm[i]
)
self.insurance_premium_euro[i] = calc_insurance_premium(
exposed_value_euro=self.exposed_value_euro[i], flood_prob=flood_prob
)
self.construction_cost_euro[i] = calc_construction_cost(
levee_height_cm=self.levee_height_cm[i],
height_increase_cm=height_increase_cm,
construction_quadratic=self.construction_quadratic,
construction_linear=self.construction_linear,
construction_fixed=self.construction_fixed,
)
# done, calculate reward
total_cost = self.insurance_premium_euro[i] + self.construction_cost_euro[i]
reward = -total_cost
return reward
```
|
{
"source": "jdost321/factools",
"score": 2
}
|
#### File: factools/libexec/auto_downtimes.py
```python
import urllib2
import xml.parsers.expat
import time
import os
import ssl
from glideinwms.creation.lib import factoryXmlConfig
def get_dt_format(t_struct):
return time.strftime("%Y-%m-%dT%H:%M:%S+00:00", t_struct)
downtimes = {}
entry_downtimes = {}
str_buf = []
###### OSG downtimes xml parsing variables and callbacks
# only collect downtimes if services are relevant
# 1 = CE
relevant_osg_services = set(['1'])
in_services = False
def osg_start_element(name, attrs):
global cur_el
global services
global in_services
global str_buf
cur_el = name
if name == 'Services':
services = []
in_services = True
elif name == 'ResourceFQDN' or name == 'StartTime' or name == 'EndTime' or name == 'ID' or name == 'Description':
str_buf = []
def osg_char_data(data):
if cur_el == 'ResourceFQDN' or cur_el == 'StartTime' or cur_el == 'EndTime' or cur_el == 'ID' or cur_el == 'Description':
str_buf.append(data)
def osg_end_element(name):
global in_services
global hostname
global start_time
global end_time
global descript
if name == 'Downtime':
relevant = False
for s in services:
if s in relevant_osg_services:
relevant = True
break
if relevant:
if hostname not in downtimes:
downtimes[hostname] = []
downtimes[hostname].append({'start': start_time, 'end': end_time, 'desc': descript})
elif name == 'ResourceFQDN':
hostname = "".join(str_buf)
elif name == 'StartTime':
start_time = time.strptime("".join(str_buf), "%b %d, %Y %H:%M %p UTC")
elif name == 'EndTime':
end_time = time.strptime("".join(str_buf), "%b %d, %Y %H:%M %p UTC")
elif name == 'ID' and in_services:
services.append("".join(str_buf))
elif name == 'Description' and not in_services:
descript = "".join(str_buf)
elif name == 'Services':
in_services = False
###### END OSG downtimes xml parsing variables and callbacks
###### EGI downtimes xml parsing variables and callbacks
relevant_egi_services = set(['CREAM-CE','ARC-CE', 'org.opensciencegrid.htcondorce'])
def egi_start_element(name, attrs):
global cur_el
global str_buf
cur_el = name
if name == 'HOSTNAME' or name == 'START_DATE' or name == 'END_DATE' or name == 'DESCRIPTION' or name == 'SERVICE_TYPE' or name == 'SEVERITY':
str_buf = []
def egi_char_data(data):
if cur_el == 'HOSTNAME' or cur_el == 'START_DATE' or cur_el == 'END_DATE' or cur_el == 'DESCRIPTION' or cur_el == 'SERVICE_TYPE' or cur_el == 'SEVERITY':
str_buf.append(data)
def egi_end_element(name):
global hostname
global start_time
global end_time
global descript
global service
global severity
if name == 'DOWNTIME' and service in relevant_egi_services and severity == 'OUTAGE':
if hostname not in downtimes:
downtimes[hostname] = []
downtimes[hostname].append({'start': start_time, 'end': end_time, 'desc': descript})
elif name == 'HOSTNAME':
hostname = "".join(str_buf)
elif name == 'START_DATE':
start_time = time.gmtime(float("".join(str_buf)))
elif name == 'END_DATE':
end_time = time.gmtime(float("".join(str_buf)))
elif name == 'DESCRIPTION':
descript = "".join(str_buf)
elif name == 'SERVICE_TYPE':
service = "".join(str_buf)
elif name == 'SEVERITY':
severity = "".join(str_buf)
###### END EGI downtimes xml parsing variables and callbacks
# Try GLIDEIN_FACTORY_DIR env var first
if 'GLIDEIN_FACTORY_DIR' in os.environ:
gfactory_dir=os.environ['GLIDEIN_FACTORY_DIR']
# is it an rpm install?
elif os.path.isdir("/var/lib/gwms-factory/work-dir"):
gfactory_dir="/var/lib/gwms-factory/work-dir"
else:
gfactory_dir="."
url = 'https://topology.opensciencegrid.org/rgdowntime/xml'
dt_xml = urllib2.urlopen(url)
#dt_xml = open("down.xml")
#for line in dt_xml:
# print line,
#fout = open('osg_debug.xml','w')
#for line in dt_xml:
# fout.write(line)
#fout.close()
#dt_xml.seek(0)
xmlparser = xml.parsers.expat.ParserCreate()
xmlparser.StartElementHandler = osg_start_element
xmlparser.EndElementHandler = osg_end_element
xmlparser.CharacterDataHandler = osg_char_data
xmlparser.ParseFile(dt_xml)
dt_xml.close()
egi_url = 'https://goc.egi.eu/gocdbpi/public/?method=get_downtime&ongoing_only=yes'
#dt_xml = open("egi_down.xml")
dt_xml = urllib2.urlopen(egi_url, context=ssl._create_unverified_context())
xmlparser = xml.parsers.expat.ParserCreate()
xmlparser.StartElementHandler = egi_start_element
xmlparser.EndElementHandler = egi_end_element
xmlparser.CharacterDataHandler = egi_char_data
xmlparser.ParseFile(dt_xml)
dt_xml.close()
conf_path = "/etc/gwms-factory/glideinWMS.xml"
conf = factoryXmlConfig.parse(conf_path)
for entry in conf.get_child_list('entries'):
if entry['enabled'] == 'True':
if entry['gridtype'] == 'gt2' or entry['gridtype'] == 'gt5' or entry['gridtype'] == 'cream':
hostname = entry['gatekeeper'].split(':')[0]
# works for nordugrid and condor-ce
else:
hostname = entry['gatekeeper'].split()[0]
if hostname in downtimes:
entry_downtimes[entry['name']] = downtimes[hostname]
#for dt in downtimes[hostname]:
# print "%s %s %s All All # _ad_ %s" % (get_dt_format(dt['start']), get_dt_format(dt['end']), attrs['name'], ";".join(dt['desc'].split('\n')))
dt_file = open(os.path.join(gfactory_dir, "glideinWMS.downtimes"))
manual_dts = []
for line in dt_file:
lines = line.split("#")
# _force_ means don't consider for auto downtime at all
# include in list of manual downtimes, and remove from aggregated list
if '_force_' in lines[1]:
manual_dts.append(line)
entry = lines[0].split()[2]
if entry in entry_downtimes:
del entry_downtimes[entry]
elif '_ad_' not in lines[1]:
manual_dts.append(line)
dt_file.close()
new_dt_file = open(os.path.join(gfactory_dir, "glideinWMS.downtimes.tmp"), 'w')
for entry in sorted(entry_downtimes):
for dt in entry_downtimes[entry]:
new_dt_file.write("%s %s %s All All # _ad_ " % (get_dt_format(dt['start']), get_dt_format(dt['end']), entry))
desc_str = ";".join(dt['desc'].split('\n'))
try:
new_dt_file.write("%s\n" % desc_str)
except UnicodeEncodeError as ue:
print "Unicode not allowed; skipping description for %s: %s" % (entry, ue)
new_dt_file.write("\n")
for dt in manual_dts:
new_dt_file.write(dt)
new_dt_file.close()
os.rename(os.path.join(gfactory_dir, "glideinWMS.downtimes.tmp"), os.path.join(gfactory_dir, "glideinWMS.downtimes"))
```
|
{
"source": "jdost/dd-trace-py",
"score": 2
}
|
#### File: dd-trace-py/ddtrace/api.py
```python
import time
import ddtrace
from json import loads
import socket
# project
from .encoding import get_encoder, JSONEncoder
from .compat import httplib, PYTHON_VERSION, PYTHON_INTERPRETER, get_connection_response
from .internal.logger import get_logger
from .internal.runtime import container
from .payload import Payload, PayloadFull
from .utils.deprecation import deprecated
log = get_logger(__name__)
_VERSIONS = {'v0.4': {'traces': '/v0.4/traces',
'services': '/v0.4/services',
'compatibility_mode': False,
'fallback': 'v0.3'},
'v0.3': {'traces': '/v0.3/traces',
'services': '/v0.3/services',
'compatibility_mode': False,
'fallback': 'v0.2'},
'v0.2': {'traces': '/v0.2/traces',
'services': '/v0.2/services',
'compatibility_mode': True,
'fallback': None}}
class Response(object):
"""
Custom API Response object to represent a response from calling the API.
We do this to ensure we know expected properties will exist, and so we
can call `resp.read()` and load the body once into an instance before we
close the HTTPConnection used for the request.
"""
__slots__ = ['status', 'body', 'reason', 'msg']
def __init__(self, status=None, body=None, reason=None, msg=None):
self.status = status
self.body = body
self.reason = reason
self.msg = msg
@classmethod
def from_http_response(cls, resp):
"""
Build a ``Response`` from the provided ``HTTPResponse`` object.
This function will call `.read()` to consume the body of the ``HTTPResponse`` object.
:param resp: ``HTTPResponse`` object to build the ``Response`` from
:type resp: ``HTTPResponse``
:rtype: ``Response``
:returns: A new ``Response``
"""
return cls(
status=resp.status,
body=resp.read(),
reason=getattr(resp, 'reason', None),
msg=getattr(resp, 'msg', None),
)
def get_json(self):
"""Helper to parse the body of this request as JSON"""
try:
body = self.body
if not body:
log.debug('Empty reply from Datadog Agent, %r', self)
return
if not isinstance(body, str) and hasattr(body, 'decode'):
body = body.decode('utf-8')
if hasattr(body, 'startswith') and body.startswith('OK'):
# This typically happens when using a priority-sampling enabled
# library with an outdated agent. It still works, but priority sampling
# will probably send too many traces, so the next step is to upgrade agent.
log.debug('Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date')
return
return loads(body)
except (ValueError, TypeError) as err:
log.debug('Unable to parse Datadog Agent JSON response: %s %r', err, body)
def __repr__(self):
return '{0}(status={1!r}, body={2!r}, reason={3!r}, msg={4!r})'.format(
self.__class__.__name__,
self.status,
self.body,
self.reason,
self.msg,
)
class UDSHTTPConnection(httplib.HTTPConnection):
"""An HTTP connection established over a Unix Domain Socket."""
# It's "important" to keep the hostname and port arguments here; while there are not used by the connection
# mechanism, they are actually used as HTTP headers such as `Host`.
def __init__(self, path, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class API(object):
"""
Send data to the trace agent using the HTTP protocol and JSON format
"""
TRACE_COUNT_HEADER = 'X-Datadog-Trace-Count'
# Default timeout when establishing HTTP connection and sending/receiving from socket.
# This ought to be enough as the agent is local
TIMEOUT = 2
def __init__(self, hostname, port, uds_path=None, headers=None, encoder=None, priority_sampling=False):
"""Create a new connection to the Tracer API.
:param hostname: The hostname.
:param port: The TCP port to use.
:param uds_path: The path to use if the connection is to be established with a Unix Domain Socket.
:param headers: The headers to pass along the request.
:param encoder: The encoder to use to serialize data.
:param priority_sampling: Whether to use priority sampling.
"""
self.hostname = hostname
self.port = int(port)
self.uds_path = uds_path
self._headers = headers or {}
self._version = None
if priority_sampling:
self._set_version('v0.4', encoder=encoder)
else:
self._set_version('v0.3', encoder=encoder)
self._headers.update({
'Datadog-Meta-Lang': 'python',
'Datadog-Meta-Lang-Version': PYTHON_VERSION,
'Datadog-Meta-Lang-Interpreter': PYTHON_INTERPRETER,
'Datadog-Meta-Tracer-Version': ddtrace.__version__,
})
# Add container information if we have it
self._container_info = container.get_container_info()
if self._container_info and self._container_info.container_id:
self._headers.update({
'Datadog-Container-Id': self._container_info.container_id,
})
def __str__(self):
if self.uds_path:
return self.uds_path
return '%s:%s' % (self.hostname, self.port)
def _set_version(self, version, encoder=None):
if version not in _VERSIONS:
version = 'v0.2'
if version == self._version:
return
self._version = version
self._traces = _VERSIONS[version]['traces']
self._services = _VERSIONS[version]['services']
self._fallback = _VERSIONS[version]['fallback']
self._compatibility_mode = _VERSIONS[version]['compatibility_mode']
if self._compatibility_mode:
self._encoder = JSONEncoder()
else:
self._encoder = encoder or get_encoder()
# overwrite the Content-type with the one chosen in the Encoder
self._headers.update({'Content-Type': self._encoder.content_type})
def _downgrade(self):
"""
Downgrades the used encoder and API level. This method must fallback to a safe
encoder and API, so that it will success despite users' configurations. This action
ensures that the compatibility mode is activated so that the downgrade will be
executed only once.
"""
self._set_version(self._fallback)
def send_traces(self, traces):
"""Send traces to the API.
:param traces: A list of traces.
:return: The list of API HTTP responses.
"""
start = time.time()
responses = []
payload = Payload(encoder=self._encoder)
for trace in traces:
try:
payload.add_trace(trace)
except PayloadFull:
# Is payload full or is the trace too big?
# If payload is not empty, then using a new Payload might allow us to fit the trace.
# Let's flush the Payload and try to put the trace in a new empty Payload.
if not payload.empty:
responses.append(self._flush(payload))
# Create a new payload
payload = Payload(encoder=self._encoder)
try:
# Add the trace that we were unable to add in that iteration
payload.add_trace(trace)
except PayloadFull:
# If the trace does not fit in a payload on its own, that's bad. Drop it.
log.warning('Trace %r is too big to fit in a payload, dropping it', trace)
# Check that the Payload is not empty:
# it could be empty if the last trace was too big to fit.
if not payload.empty:
responses.append(self._flush(payload))
log.debug('reported %d traces in %.5fs', len(traces), time.time() - start)
return responses
def _flush(self, payload):
try:
response = self._put(self._traces, payload.get_payload(), payload.length)
except (httplib.HTTPException, OSError, IOError) as e:
return e
# the API endpoint is not available so we should downgrade the connection and re-try the call
if response.status in [404, 415] and self._fallback:
log.debug("calling endpoint '%s' but received %s; downgrading API", self._traces, response.status)
self._downgrade()
return self._flush(payload)
return response
@deprecated(message='Sending services to the API is no longer necessary', version='1.0.0')
def send_services(self, *args, **kwargs):
return
def _put(self, endpoint, data, count):
headers = self._headers.copy()
headers[self.TRACE_COUNT_HEADER] = str(count)
if self.uds_path is None:
conn = httplib.HTTPConnection(self.hostname, self.port, timeout=self.TIMEOUT)
else:
conn = UDSHTTPConnection(self.uds_path, self.hostname, self.port, timeout=self.TIMEOUT)
try:
conn.request('PUT', endpoint, data, headers)
# Parse the HTTPResponse into an API.Response
# DEV: This will call `resp.read()` which must happen before the `conn.close()` below,
# if we call `.close()` then all future `.read()` calls will return `b''`
resp = get_connection_response(conn)
return Response.from_http_response(resp)
finally:
conn.close()
```
|
{
"source": "jdost/restler",
"score": 3
}
|
#### File: src/restler/errors.py
```python
ERRORS = {
'InvalidURL': 1,
'RequestError': 4, # 4xx errors
'ServerError': 5 # 5xx errors
}
class InvalidURLError(Exception):
""" Error raised when a URL is malformed or unparseable
"""
pass
class RequestError(Exception):
""" Error for when the request failed for a handled reason (4xx HTTP error
codes)
"""
def __init__(self, code, body):
self.code = code
self.body = body
def __cmp__(self, other):
if self.code == other:
return 0
return 1 if self.code > other else -1
def __str__(self):
return self.body
class ServerError(Exception):
""" Error for when the request failed for an unhandled error on the server
side (5xx HTTP error codes)
"""
def __init__(self, code, body):
self.code = code
self.body = body
def __cmp__(self, other):
if self.code == other:
return 0
return 1 if self.code > other else -1
def __str__(self):
return self.body
```
#### File: restler/url/auth.py
```python
try:
import urllib2
except ImportError:
import urllib.request as urllib2
class AuthManager(object):
""" Simple wrapper around HTTP Auth registration and lookup. Wraps the
``urllib2`` manager and provides a simple interface to add in new
credentials.
"""
def __init__(self, auths, url):
self.url = url
if isinstance(auths, urllib2.HTTPPasswordMgr):
self._manager = auths
else:
self._manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
self + auths
@property
def handler(self):
""" Valid ``urllib2`` request handler
"""
return urllib2.HTTPBasicAuthHandler(self._manager)
def __add__(self, auth):
if isinstance(auth, tuple):
self._manager.add_password(None, self.url, auth[0], auth[1])
elif isinstance(auth, dict):
self._manager.add_password(None, self.url, auth["username"],
auth["password"])
elif isinstance(auth, list):
for a in auth:
self + a
```
#### File: tests/integration/test_restler.py
```python
import unittest
from restler import Restler, Route
import http_server
http_server.QUIET = True
class TestRestler(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = http_server.TestServer(port=9001, threaded=True)
cls.server.start()
def setUp(self):
self.local = Restler("http://127.0.0.1:9001")
@classmethod
def tearDownClass(cls):
cls.server.stop()
def test_simple_request(self):
''' Test a basic request to the test server
Just sends a basic request and makes sure the data is set to the
defaults
'''
response = self.local()
self.assertEquals(response.url, "http://127.0.0.1:9001/")
self.assertEquals(response.data['method'], "GET")
self.assertEquals(response.data['params'], {})
def test_different_methods(self):
''' Test that various methods are used if dictated
Makes requests with the common RESTful methods and makes sure the
server sees them used.
'''
for method in ['GET', 'POST', 'PATCH', 'PUT', 'DELETE']:
response = self.local(method=method)
self.assertEquals(response.data['method'], method)
def test_long_routes(self):
''' Test that making a long path successfully sends to the server
Makes a request using a long path and ensures that this is the path
that the end server sees.
'''
response = self.local.a.long.route.making.a.request()
self.assertEquals(
str(response.data['path']),
"http://127.0.0.1:9001/a/long/route/making/a/request")
self.assertIsInstance(response.data['path'], Route)
def test_params(self):
''' Test that params go through
Sends a variety of types to the test server and sees if they are
received correctly.
'''
response = self.local(foo="bar", bar=1, baz=True)
try:
self.assertItemsEqual(response.data['params'],
{"foo": "bar", "bar": 1, "baz": True})
except AttributeError:
self.assertDictEqual(response.data['params'],
{"foo": "bar", "bar": "1", "baz": "True"})
```
#### File: restler/tests/test_request.py
```python
import unittest
import json
from restler import Restler
def normalize(s):
return s.decode("utf8") if isinstance(s, bytearray) else s
class TestRequest(unittest.TestCase):
def setUp(self):
self.app = Restler("http://127.0.0.1/")
self.app.__test__ = True
def assertSameUrlStrings(self, a, b):
a_set = a.split("&")
b_set = b.split("&")
if hasattr(self, "assertItemsEqual"):
return self.assertItemsEqual(a_set, b_set)
else:
return self.assertCountEqual(a_set, b_set)
def test_basic(self):
''' Tests the basic Request formation from a `Route`
Gets the created `urllib2.Request` object and confirms basic setup
'''
request = self.app.users()
self.assertEqual(request.get_selector(), "/users")
def test_methods(self):
''' Tests the method and data setup for a `Request`
Checks the created Request object for method setup and data creation
'''
request = self.app.users("POST", user="test")
self.assertEqual(request.get_method(), "POST")
self.assertEqual(normalize(request.get_data()), "user=test")
def test_method_casing(self):
''' Tests the casing of the method
Makes sure the method gets cast to all uppercase
'''
request = self.app.users("post")
self.assertEqual(request.get_method(), "POST")
def test_headers(self):
''' Tests setting a header for a request
Checks that you can set headers via the request call
'''
request = self.app.users(headers={"Accepts": "application/json"})
self.assertTrue(request.has_header("Accepts"))
def test_json(self):
''' Tests the data body respects the content-type
Sets the `Content-type` header for json, the data body should use this
mimetype
'''
request = self.app.users(headers={"Content-type": "application/json"},
users="test", foo="bar")
self.assertEqual(json.dumps({"users": "test", "foo": "bar"}),
normalize(request.get_data()))
def test_long_data(self):
''' Tests the data body for lots of data
Creates a request with a large set of data, checks that it all gets
added to the data body
'''
request = self.app.users(users="test", foo="bar", bar="baz")
self.assertSameUrlStrings("foo=bar&bar=baz&users=test",
normalize(request.get_data()))
def test_qs_path(self):
''' Tests that a path with a query string sets the params
If a path with a query string is fed into a `Route`, the query string
should be parsed and set as default params
'''
route = self.app['users?name=test']
request = route("POST")
self.assertEqual("name=test", normalize(request.get_data()))
self.assertEqual(request.get_selector(), "/users")
def test_multi_params(self):
''' Tests a data body with an array value
Creates a request with multiple values for a key set, checks that the
key is used for each value individually.
'''
request = self.app.users(users=["foo", "bar", "baz"])
self.assertSameUrlStrings("users=foo&users=bar&users=baz",
normalize(request.get_data()))
def test_get_data(self):
''' Tests that data for a GET is in the query string
The data will also be in the header, but it is common for the data to
live in the query string of the URL.
'''
route = self.app.test
request = route(foo="bar")
self.assertEqual("foo=bar", normalize(request.get_data()))
self.assertEqual("/test?foo=bar", normalize(request.get_selector()))
def test_reusable_routes(self):
''' Tests that the route builder re-uses routes of the same path
The route builder should not need to build a new route for every hop in
the path and should be able to re-use routes that have been created for
a path.
'''
route = self.app.reusable
route.params.add_params(foo="bar")
self.assertDictEqual(self.app.reusable.params._default_params,
{"foo": "bar"})
```
|
{
"source": "jdost/wingcommander",
"score": 3
}
|
#### File: src/wingcommander/commander.py
```python
import cmd
import logging
from wingcommander import util
from wingcommander.command import Command
import sys
class WingCommander(cmd.Cmd):
"""Master class for command line applications.
Use as the base class for the definition of a command line application.
:param name: name of the application
:param parent: (optional) parent <WingCommander> object
Usage: ::
from wingcommander import WingCommander
class ShellApp(WingCommander):
pass
app = ShellApp(name='example')
"""
END_CMDS = ['back', 'exit', 'EOF']
def __init__(self, name="", parent=None, *args, **kwargs):
cmd.Cmd.__init__(self, *args, **kwargs)
self.parent = parent
self.name = name
self.prompt = util.gen_prompt(self)
self.handler = logging.StreamHandler(sys.stdout)
@classmethod
def command(cls, cmd=None, completions=None):
"""decorator method to convert a function and properties into a command
for the new command line application.
returns a :class:`Command <Command>` object.
:param completions: (optional) A completion definition (see:
:doc:`user/completions`)
:param cmd: function to be converted into a command in the application
(it is the one being decorated)
Usage::
@ShellApp.command
def count(app):
app.count = (app.count or 0) + 1
return app.count
@ShellApp.command(completions=["start", "stop"])
def app(app, action="start"):
if action == "start":
app.state = "started"
elif action == "stop":
app.state = "stopped"
return True
"""
if not cmd:
return lambda f: cls.command(cmd=f, completions=completions)
if not isinstance(cmd, Command):
cmd = Command(cmd)
cmd.update_completion(completions)
cmd.__attach__(cls)
return cmd
def default(self, command):
if command in self.END_CMDS:
return True
return cmd.Cmd.default(self, command)
def preloop(self):
self.old_completer_fix = None
try:
import readline
self.old_completer_fix = readline.get_completer()
readline.set_completer(self.complete)
if 'libedit' in readline.__doc__:
completekey = "^I" if self.completekey == "tab" \
else self.completekey
readline.parse_and_bind("bind " + completekey
+ " rl_complete")
except ImportError:
pass
def postloop(self):
if self.old_completer_fix:
self.old_completer = self.old_completer_fix
```
#### File: src/wingcommander/command.py
```python
from wingcommander.util import gen_completion
import types
class Command(object):
'''Wrapper class for commands/actions used in a
:class:`WingCommander <WingCommander>`
Base wrapper for the commands used in the ``WingCommander`` controller.
It is meant to provide an accessible and extensible API around the
functionality added via the CLI behavior of the controller.
Can be created manually and imported::
def echo_func(cmd, *args):
return " ".join(args)
echo = Command(echo_func)
echo.update_help("echoes whatever you give it")
ShellApp.command(cmd=echo)
'''
def __init__(self, command):
self.__name__ = command.__name__
self.parent = None
self.help_text = command.__doc__
self.complete = None
self.__command__ = command
self.__doc__ = lambda x: self.help_text
def update_help(self, txt):
'''Updates the help text associated with the ``Command`` in the master
application.
:param txt: string containing the new help text to be displayed by the
help command for this command.
'''
self.help_text = txt if isinstance(txt, str) \
and len(txt) > 0 else self.help_text
def update_completion(self, completions):
''' Updates the completion set for the ``Command`` in the command line
interface. This takes any of the :doc:`user/completions` accepted types
and generates a new tab completion function. This can be a ``list`` of
any possible arguments that can be completed at any position, a
``dict`` of arguments based on inherited position, or a function that
will be used as the entire completion function.
:param completions: completion set as described above and in the
:doc:`user/completions` section.
'''
self.complete = gen_completion(completions)
def __help__(self):
print self.help_text
def __complete__(self, *args, **kwargs):
''' __complete__
Generates the completion set based on the current state of the command
being authored.
'''
return self.complete(*args, **kwargs) if self.complete else None
def __call__(self, cmdr, *args, **kwargs):
pass_generator = kwargs.get("pass_generator", False)
if "pass_generator" in kwargs:
del kwargs["pass_generator"]
output = self.__command__(cmdr, *args, **kwargs)
if isinstance(output, types.GeneratorType) and not pass_generator:
output = map(lambda x: x, output)
if isinstance(output, tuple):
output = list(output)
elif isinstance(output, str):
output = output.split('\n')
return output
def __call_split__(self, cmdr, args):
''' Wrapper to handle translating the input from the command line
interface and returning the command's output in conjunction with the
``WingCommander`` master class.
:param cmdr: The ``self`` reference to the ``WingCommander`` parent
instance calling the command.
:param args: string that represents the arguments following the
execution of the command.
'''
output = self.__call__(self, cmdr, *args.split(' '),
pass_generator=True)
if isinstance(output, list):
print '\n'.join(output)
return len(output) == 0
elif isinstance(output, types.GeneratorType):
for line in output:
print line
return True
return output
def __attach__(self, cmdr):
''' Associates/attaches this ``Command`` object with the parent class
instance that it is passed.
:param cmdr: The ``WingCommander`` class definition to register this
command with.
'''
setattr(cmdr, "do_" + self.__name__, self.__call_split__)
setattr(cmdr, "help_" + self.__name__, self.__help__)
if self.complete:
setattr(cmdr, "complete_" + self.__name__, self.__complete__)
self.parent = cmdr
```
#### File: src/wingcommander/test.py
```python
import unittest
from wingcommander import WingCommander
from StringIO import StringIO
class TestShell(WingCommander):
pass
class CmdTestCase(unittest.TestCase):
def setUp(self):
self.stdin = StringIO("")
self.stdout = StringIO("")
self.cmd = TestShell(
name=__name__, stdin=self.stdin, stdout=self.stdout)
def tearDown(self):
self.stdin.seek(0)
self.stdin.truncate()
self.stdout.seek(0)
self.stdout.truncate()
def seedInput(self, lines=[]):
''' seedInput:
Generate simulated input for stdin using the lines argument.
'''
self.stdin.write("\n".join(lines))
self.stdin.seek(0)
def collectOutput(self):
''' collectOutput:
Collect the stored stdout into a list of lines.
'''
self.stdout.seek(0)
return [line for line in self.stdout.readline()]
def cmd_name(self, cmd):
return "cmd_%s" % cmd
def cmp_name(self, cmd):
return "complete_%s" % cmd
def help_name(self, cmd):
return "help_%s" % cmd
def assertCompletes(self, cmd, src_completions, line=""):
''' assertCompletes:
Generates completions for the `cmd` function and asserts the output of
the completion function against the expected completions.
'''
line = cmd + ' ' + line
tmp = line.split(' ')
current = tmp[-1]
completions = getattr(self.cmd, self.cmp_name(cmd))(
current, line, 0, len(tmp))
self.assertListEqual(src_completions, completions)
def runWithInput(self, cmd, lines=[]):
''' runWithInput:
Takes a command and input lines that will be used as simulated input
via stdin. It will return either the return value from the executed
command or the lines from stdout (if the return value was a boolean or
None).
'''
cmd_ = cmd.split(' ')
args = ' '.join(cmd_[1:]) if len(cmd_) > 1 else ''
cmd_ = cmd_[0]
self.seedInput(lines)
r = getattr(self.cmd, self.cmd_name(cmd_))(args)
return self.collectOutput() if (isinstance(r, bool) or r is None) \
else r
```
#### File: wingcommander/util/tablize.py
```python
DIVIDERS = ["|", "", ""]
BOX_DIVIDERS = ["│", "─", "┼"]
CENTER = 'c'
LEFT = 'l'
RIGHT = 'r'
COLUMN_TEMPLATE = " {{:{0}{1}.{1}}} "
class Table(list):
def __str__(self, *args, **kwargs):
return '\n'.join(self)
def tablize(data, max_length=-1, keys=None, dividers=DIVIDERS, labels=None,
alignment=None):
'''Converts a tabular set of data into an aligned table for easier display
in the terminal.
There are a number of options for customizing the output. The ``keys``
parameter will limit the keys to pull from each ``dict`` of data in the
dataset. The ``labels`` parameter enables the header row and fills in
the labels for each column.
The ``dividers`` parameter is used to configure the characters used to
divide the various data cells. It is a `list` of length 3 that is defined:
0. The character used to divide between columns
1. The character used to divide between rows
2. The character used when the division between columns and rows occurs
The default is to display a pipe between columns and nothing between rows.
A more ornamental set of dividers using box drawing characters, it can be
found by importing ``BOX_DIVIDERS`` from ``wingcommander.util.tablize``.
The ``alignment`` parameter is used to determine the alignment of the
text in each column. The list should mirror the number of columns
available and should probably be used with the ``keys`` parameter. The
possible values are ``l``, ``c``, and ``r`` which correspond to
left, center, and right aligned text respectively. You can also use the
values ``LEFT``, ``CENTER``, and ``RIGHT`` from the
``wingcommander.util.tablize`` module.
:param data: a ``list`` of ``dict`` sets of data to be displayed
:param max_length: a limit on the maximum wdith of a column/length of a
data point, a value of -1 means there is no limit
:param keys: a ``list`` of keys to be used from the ``dict`` of data
provided, if this is set to ``None`` all of the keys will be used
:param dividers: a ``list`` defining the dividers to use when displaying
the tabular data.
:param labels: a ``list`` of the labels to be displayed above the table,
if ``None`` no labels will be displayed
:param alignment: a ``list`` of the alignment setup associated with each
column
Usage: ::
>>> from wingcommander.util import tablize
>>> from wingcommander.util.tablize import BOX_DIVIDERS, LEFT, RIGHT
>>> tablize([{"name": "Joe", "occupation": "Teacher", "age": 45},
... {"name": "Jane", "occupation": "Engineer", "age": 27},
... {"name": "Mark", "occupation": "Astronomer", "age": 33}],
... keys=["name", "occupation", "age"], max_length=7,
... dividers=BOX_DIVIDERS, labels=["Name", "Job", "Age"],
... alignment=[LEFT, LEFT, RIGHT])
...
Name │ Job │ Age
─────┼─────────┼─────
Joe │ Teacher │ 45
─────┼─────────┼─────
Jane │ Enginee │ 27
─────┼─────────┼─────
Mark │ Astrono │ 33
'''
if not len(data):
return None
if len(data) and isinstance(data[0], dict):
data = extract_dicts(data, keys)
data = clean_up(data)
sizes = calc_columns(data, max_length)
frmt = []
row_divider = ""
for i in range(len(sizes)):
frmt.append(COLUMN_TEMPLATE.format(
translate_alignment(alignment[i] if alignment else None),
str(sizes[i])))
frmt = (dividers[0] if dividers[0] else ' ').join(frmt).strip()
if dividers[1]:
row_divider = (dividers[2] if dividers[2] else dividers[1]).join(
[(s + 2) * dividers[1] for s in sizes])
output = Table([frmt.format(*labels)] if labels else [])
for d in data:
if output and row_divider:
output.append(row_divider)
output.append(frmt.format(*d))
return output
def extract_dicts(data, keys):
if keys is None:
keys = data[0].keys()
return [[d.get(k, '') for k in keys] for d in data]
def calc_columns(data, max_length):
if len(data) > 1:
sizes = map(
lambda x: max(*map(lambda s: len(str(s)), [d[x] for d in data])),
range(len(data[0])))
else:
sizes = map(lambda s: len(str(s)), data[0])
if isinstance(max_length, list):
return [min(max_length[i], sizes[i]) for i in range(len(sizes))]
elif max_length > 0:
return map(lambda x: min(x, max_length), sizes)
return sizes
def clean_up(d):
return map(lambda x: map(str, x), d)
def translate_alignment(alignment="l"):
return "<" if alignment == "l" else "^" if alignment == "c" else ">"
```
|
{
"source": "jdotpy/dispatch",
"score": 3
}
|
#### File: dispatch/dispatch/core.py
```python
from datetime import datetime, timedelta
import threading
import logging
import time
from .schedules import one_time_schedule, interval_schedule, CalendarSchedule
logger = logging.getLogger(__name__)
class Reactor():
""" The reactor holds the main loop and constructs the plans """
def __init__(self):
self.running = False
self.plans = []
def run(self):
logger.info('Starting reactor')
self.running = True
while self.running and self.plans:
now = self.now()
logger.debug('[Reactor] awoke at {}; Checking {} plans'.format(
now,
len(self.plans),
))
to_remove = []
earliest_action = None
for plan in self.plans:
# Mark for removal if no future actions need to take place
if plan.next_run is None:
to_remove.append(plan)
continue
# Execute if we've passed the next run time
elif now >= plan.next_run:
logger.debug('[Reactor] starting plan {} as it was scheduled to run at {}'.format(
plan,
plan.next_run
))
plan.run(now)
# Keep running tab of when the next time i'll have to do something is
if earliest_action is None or plan.next_run < earliest_action:
earliest_action = plan.next_run
# Remove plans that are complete
for plan in to_remove:
logger.info('[Reactor] removing plan {} as it is complete'.format(plan))
self.plans.remove(plan)
# Sleep until next action
if earliest_action:
logger.debug('[Reactor] next scheduled plan is {}'.format(earliest_action))
sleep_seconds = (earliest_action - self.now()).total_seconds()
if sleep_seconds > 0:
logger.debug('[Reactor] sleeping for {} seconds till next job'.format(sleep_seconds))
time.sleep(sleep_seconds)
self.running = False
def _debug(self, runs=5):
for plan in self.plans:
print('------------- Plan {} ---------------'.format(str(plan)))
for i in range(runs):
print('Next Run @ {}'.format(str(plan.next_run)))
plan.get_next_run()
def stop(self):
self.running = False
def now(self):
return datetime.now()
def run_in_background(self):
thread = threading.Thread(target=self.run)
thread.start()
def dispatch(self, schedule, action, *args, **kwargs):
self.plans.append(Plan(schedule, action))
# Shortcuts for actions
def action(self, func, *args, **kwargs):
return FunctionCallAction(func, args, kwargs, threaded=False)
def background_action(self, func, *args, **kwargs):
return FunctionCallAction(func, args, kwargs, threaded=True)
# Schedule helpers
schedule_one_time = one_time_schedule
schedule_calendar = CalendarSchedule
def schedule_interval(self, **kwargs):
return interval_schedule(**kwargs)
def schedule_calendar(self, **kwargs):
if 'start' not in kwargs:
kwargs['start'] = self.now()
return CalendarSchedule(**kwargs)
def schedule_daily(self, hour=0, minute=0, second=0):
now = self.now()
start = now.replace(hour=hour, minute=minute, second=second)
if start < now:
start = start + timedelta(days=1)
return interval_schedule(start=start, days=1)
def schedule_hourly(self, minute=0, second=0):
now = self.now()
start = now.replace(minute=minute, second=second)
if start < now:
start = start + timedelta(hours=1)
return interval_schedule(start=start, hours=1)
class Plan():
""" A Plan encapsulates the schedule and what is being scheduled. """
def __init__(self, schedule, action, name=None, allow_multiple=False):
self.name = name
self.schedule = schedule
self.action = action
self.last_run = None
self.next_run = None
self.get_next_run()
self.allow_multiple = allow_multiple
def __str__(self):
if self.name:
return self.name
else:
return str(self.action)
def get_next_run(self):
try:
self.next_run = next(self.schedule)
except StopIteration:
self.next_run = None
def get_run_id(self):
if hasattr(self.action, 'run_id'):
return self.action.run_id()
return ''
def run(self, cycle_time):
logger.debug('[Plan={}] attempting to run at {}'.format(self, cycle_time))
if hasattr(self.action, 'is_running'):
is_running = self.action.is_running()
else:
is_running = False
if is_running and not self.allow_multiple:
self.get_next_run()
run_id = self.get_run_id()
logger.warn('[Plan={}] Skipping as another instance ({}) is still running. Rescheduling for {}.'.format(
self,
run_id,
self.next_run,
))
return False
# Actually run
logger.debug('[Plan={}] starting'.format(self))
self.last_run = cycle_time
self.action.run()
self.get_next_run()
class FunctionCallAction():
def __init__(self, func, args, kwargs, threaded=False):
self.func = func
self.args = args
self.kwargs = kwargs
self.threaded = threaded
self.last_thread = None
def __str__(self):
return 'function:' + self.func.__name__
def run_id(self):
return self.last_thread.ident
def _func_wrapper(self):
logger.debug('[Action={}] started'.format(self))
try:
self.func(*self.args, **self.kwargs)
except Exception as e:
logger.error('[Action={}] failed with exception'.format(self), exc_info=True)
logger.debug('[Action={}] complete'.format(self))
def run(self):
if self.threaded:
thread = threading.Thread(target=self._func_wrapper)
thread.start()
self.last_thread = thread
else:
self._func_wrapper()
def is_running(self):
if not self.last_thread:
return False
return self.last_thread.is_alive()
```
#### File: dispatch/tests/test_dispatch.py
```python
import time
import pytest
from unittest import mock
from dispatch.core import Reactor
@pytest.fixture
def reactor():
return Reactor()
def test_basic(reactor):
m = mock.MagicMock()
reactor.dispatch(
reactor.schedule_interval(seconds=1),
reactor.background_action(m)
)
reactor.run_in_background()
time.sleep(1.1)
reactor.stop()
assert m.call_count == 2
```
#### File: dispatch/example/example.py
```python
from dispatch.core import Reactor
import random
import time
def my_task():
i = random.randint(1,10000)
time.sleep(10)
def echo(text=None):
print('Echo:', text)
def main():
r = Reactor()
schedule = r.schedule_interval(seconds=2)
action = r.background_action(my_task)
r.dispatch(schedule, action)
r.run()
if __name__ == '__main__':
main()
```
|
{
"source": "jdotpy/quickconfig",
"score": 3
}
|
#### File: jdotpy/quickconfig/quickconfig.py
```python
from io import open
import argparse
import json
import sys
import os
import re
from pprint import pprint
try:
# Python 3
from configparser import ConfigParser
from io import StringIO
base_string = str
except ImportError:
# Python 2
from ConfigParser import ConfigParser
from StringIO import StringIO
base_string = basestring
try:
import yaml
except ImportError:
yaml = None
class EnvironmentVariable():
def __init__(self, key):
self.path = os.getenv(key, None)
class CommandArgument():
source = sys.argv[1:]
def __init__(self, key):
parser = argparse.ArgumentParser()
parser.add_argument('--' + key, help='QuickConfig Configuration File', default=None)
args, _remaining = parser.parse_known_args(self.source)
self.path = getattr(args, key, None)
class MissingConfigFileError(IOError):
pass
class InvalidConfigError(ValueError):
pass
class RequiredConfigurationError(ValueError):
pass
class ExtractionFailed(KeyError):
pass
class Extractor():
def __init__(self, *sources, **kwargs):
self.sources = sources
self.delimiter = kwargs.pop('delimiter', '.')
def extract(self, path, default=None):
if isinstance(path, (list, tuple)):
attrs = path
else:
attrs = path.split(self.delimiter)
for source in reversed(self.sources):
value = source
try:
for attr in attrs:
if isinstance(value, (list, tuple)):
try:
attr = int(attr)
except:
raise ExtractionFailed()
try:
value = value.__getitem__(attr)
except (KeyError, IndexError, ValueError, AttributeError):
raise ExtractionFailed()
return value
except ExtractionFailed:
continue
if isinstance(default, BaseException):
raise default
elif type(default) == type and issubclass(default, BaseException):
raise default('path not found: ' + '.'.join(attrs))
else:
return default
def extract(sources, path, default=None, **options):
return Extractor(sources, **options).extract(path, default=default)
class Configuration():
Env = EnvironmentVariable
Arg = CommandArgument
def __init__(self, *sources, **options):
self.sources = []
self.loaded_sources = []
self.replace = options.get('replace', False)
self.require = options.get('require', 0)
self.silent_on_missing = options.get('silent_on_missing', True)
self.silent_on_invalid = options.get('silent_on_invalid', False)
for source in sources:
self.load_source(source)
# Support boolean require values that imply 1
if not isinstance(self.require, int):
if self.require:
self.require = 1
else:
self.require = 0
if self.require > len(self.loaded_sources):
if self.require == 1:
message = 'At least one configuration source is required.'
else:
message = 'At least %d configuration sources are required but only %d are found.' % (self.require, len(self.loaded_sources))
print('\nConfiguration sources:')
for source in self.sources:
print('\t' + source['origin'])
raise RequiredConfigurationError(message)
def load_source(self, path, destination='', encoding='utf-8', replace=False):
if isinstance(path, dict):
source_info = {
'origin': path,
'location': 'Dynamic Data Dictionary',
'type': None,
'contents': None,
'loaded': True,
'message': 'Success',
'data': path,
'destination': destination
}
else:
origin = path
if isinstance(path, (self.Env, self.Arg)):
path = path.path
ext = self._get_file_type(path)
contents = self._get_file_contents(path, encoding)
if contents is None and not self.silent_on_missing:
raise MissingConfigFileError('Missing configuration file: ' + origin)
if contents is None:
data = None
message = 'No file contents to parse'
else:
data, message = self._parse_contents(contents, ext)
if data is None and not self.silent_on_invalid:
raise InvalidConfigError(origin + ' has invalid configuration: ' + message)
loaded = data is not None
source_info = {
'origin': origin,
'location': path,
'type': ext,
'contents': contents,
'loaded': loaded,
'message': message,
'data': data,
'destination': destination
}
if '--configdebug' in sys.argv:
pprint('ConfigTest. Added the following config source:')
pprint(source_info)
self.sources.append(source_info)
self._create_extractor()
self.loaded_sources = [source for source in self.sources if source['loaded']]
self.any_loaded = len(self.loaded_sources) > 0
self.loaded = len(self.loaded_sources)
def _create_extractor(self):
all_source_structs = []
for source in self.sources:
destination = source['destination']
if destination:
source_data = {destination: source['data']}
else:
source_data = source['data']
all_source_structs.append(source_data)
self.extractor = Extractor(*all_source_structs)
def _parse_contents(self, contents, file_type):
if contents is None:
return None, 'No content to parse'
if file_type == 'json':
try:
return json.loads(contents), 'Success'
except ValueError as e:
return None, str(e)
elif file_type == 'yaml':
if yaml is None:
raise ImportError('A yaml config file was specified but yaml isnt available!')
try:
return yaml.load(contents), 'Success'
except ValueError as e:
return None, str(e)
elif file_type == 'ini':
try:
buf = StringIO(contents)
config = ConfigParser()
if hasattr(config, 'read_file'):
config.read_file(buf)
else:
config.readfp(buf)
data = {'defaults': dict(config.defaults())}
for section in config.sections():
data[section] = dict(config.items(section))
return data, 'Success'
except Exception as e:
return None, str(e)
else:
raise ValueError('Invalid config extension: ' + file_type)
def _get_file_type(self, path):
if path is None or not isinstance(path, base_string):
return None
path, ext = os.path.splitext(path)
ext = ext[1:] # Remove leading dot
return ext
def _get_file_contents(self, path, encoding='utf-8'):
if not path:
return None
path = os.path.expanduser(path)
try:
f = open(path, encoding=encoding)
contents = f.read()
f.close()
return contents
except IOError:
return None
def get(self, *args, **kwargs):
return self.extractor.extract(*args, **kwargs)
```
|
{
"source": "jdotpy/splunk-connect-for-kubernetes",
"score": 2
}
|
#### File: test/k8s_logging_tests/test_config_logging.py
```python
import pytest
import time
import os
import logging
import json
import sys
from urllib.parse import urlparse
from ..common import check_events_from_splunk, create_index_in_splunk, delete_index_in_splunk
from kubernetes import client, config
from kubernetes.client.rest import ApiException
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
@pytest.mark.parametrize("test_input,expected", [
("test_data", 1)
])
def test_splunk_index(setup, test_input, expected):
'''
Test that user specified index can successfully index the
log stream from k8s. If no index is specified, default
index "ci_events" will be used.
'''
logger.info("testing test_splunk_index input={0} expected={1} event(s)".format(
test_input, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
search_query = "index=" + index_logging
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("test_input,expected", [
("ci-k8s-cluster", 1)
])
def test_cluster_name(setup, test_input, expected):
'''
Test that user specified cluster-name is attached as a metadata to all the logs
'''
logger.info("testing test_clusterName input={0} expected={1} event(s)".format(
test_input, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
search_query = "index=" + index_logging + " cluster_name::" + test_input
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("label,index,expected", [
("pod-w-index-wo-ns-index", "pod-anno", 1),
("pod-wo-index-w-ns-index", "ns-anno", 1),
("pod-w-index-w-ns-index", "pod-anno", 1)
])
def test_label_collection(setup, label, index, expected):
'''
Test that user specified labels is attached as a metadata to all the logs
'''
logger.info("testing label_app label={0} index={1} expected={2} event(s)".format(
label, index, expected))
search_query = "index=" + index + " label_app::" + label
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["<PASSWORD>password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("container_name,index,expected", [
("pod-w-index-w-ns-index", "pod-anno", 1),
("pod-wo-index-w-ns-index", "ns-anno", 1),
("pod-w-index-wo-ns-index", "pod-anno", 1),
("pod-wo-index-wo-ns-index", os.environ["CI_INDEX_EVENTS"]
if os.environ["CI_INDEX_EVENTS"] else "ci_events", 1),
])
def test_annotation_routing(setup, container_name, index, expected):
'''
Test annotation routing feature. it tests different combinations of
namespace annotations and pod annotations.
'''
logger.info("testing test_annotation_routing pod={0} index={1} expected={2} event(s)".format(
container_name, index, expected))
search_query = "index=" + index + " container_name::" + container_name
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("container_name,expected", [
("pod-w-index-w-ns-exclude", 0),
("pod-w-exclude-wo-ns-exclude", 0)
])
def test_annotation_excluding(setup, container_name, expected):
'''
Test annotation excluding feature.
'''
logger.info("testing test_annotation_excluding pod={0} expected={1} event(s)".format(
container_name, expected))
search_query = "index=*" + " container_name::" + container_name
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["<PASSWORD>"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) == expected
@pytest.mark.parametrize("test_input,expected", [
("kube:container:kube-apiserver", 1),
("kube:container:etcd", 1),
("kube:container:kube-controller-manager", 1),
("kube:container:splunk-fluentd-k8s-metrics-agg", 1),
("kube:container:splunk-fluentd-k8s-metrics", 1),
("kube:container:splunk-fluentd-k8s-logs", 1),
("kube:container:splunk-fluentd-k8s-objects", 1),
("empty_sourcetype", 0)
])
def test_sourcetype(setup, test_input, expected):
'''
Test that known sourcetypes are present in target index
'''
logger.info("testing for presence of sourcetype={0} expected={1} event(s)".format(
test_input, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
source_type = ' sourcetype=""' if test_input == "empty_sourcetype" else ' sourcetype=' + test_input
search_query = "index=" + index_logging + source_type
events = check_events_from_splunk(start_time="-24h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected if test_input != "empty_sourcetype" else len(
events) == expected
@pytest.mark.parametrize("sourcetype,index,expected", [
("kube:container:pod-wo-index-w-ns-index", "ns-anno", 1),
("sourcetype-anno", "pod-anno", 1)
])
def test_annotation_sourcetype(setup, sourcetype, index, expected):
'''
Test annotation for sourcetype properly overwrites it when set
'''
logger.info("testing for annotation sourcetype of {0} index={1} expected={2} event(s)".format(
sourcetype, index, expected))
search_query = "index=" + index + ' sourcetype=' + sourcetype
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("test_input,expected", [
("/var/log/containers/kube-apiserver-*", 1),
("/var/log/containers/ci*", 1),
("/var/log/containers/coredns*", 1),
("/var/log/containers/etcd-*", 1),
("empty_source", 0)
])
def test_source(setup, test_input, expected):
'''
Test that known sources are present in target index
'''
logger.info("testing for presence of source={0} expected={1} event(s)".format(
test_input, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
source = ' source=""' if test_input == "empty_source" else ' source=' + test_input
search_query = "index=" + index_logging + ' OR index="kube-system"' + source
events = check_events_from_splunk(start_time="-24h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected if test_input != "empty_source" else len(
events) == expected
@pytest.mark.parametrize("test_input,expected", [
("dummy_host", 1),
("empty_host", 0)
])
def test_host(setup, test_input, expected):
'''
Test that known hosts are present in target index
'''
logger.info("testing for presence of host={0} expected={1} event(s)".format(
test_input, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
host = ' host!=""' if test_input == "dummy_host" else ' host=""'
search_query = "index=" + index_logging + host
events = check_events_from_splunk(start_time="-24h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("test_input,expected", [
("pod", 1),
("namespace", 1),
("container_name", 1),
("container_id", 1)
])
def test_default_fields(setup, test_input, expected):
'''
Test that default fields are attached as a metadata to all the logs
'''
logger.info("testing test_clusterName input={0} expected={1} event(s)".format(
test_input, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
search_query = "index=" + index_logging + " " + test_input + "::*"
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("field,value,expected", [
("customfield1", "customvalue1", 1),
("customfield2", "customvalue2", 1)
])
def test_custom_metadata_fields(setup, field,value, expected):
'''
Test user provided custom metadata fields are ingested with log
'''
logger.info("testing custom metadata field={0} value={1} expected={2} event(s)".format(
field,value, expected))
index_logging = os.environ["CI_INDEX_EVENTS"] if os.environ["CI_INDEX_EVENTS"] else "ci_events"
search_query = "index=" + index_logging + " " + field + "::" + value
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
@pytest.mark.parametrize("label,index,value,expected", [
("pod-w-index-wo-ns-index", "pod-anno", "pod-value-2", 1),
("pod-wo-index-w-ns-index", "ns-anno", "ns-value", 1),
("pod-w-index-w-ns-index", "pod-anno", "pod-value-1", 1)
])
def test_custom_metadata_fields_annotations(setup, label, index, value, expected):
'''
Test that user specified labels are resolved from the user specified annotations and attached as a metadata
to all the logs
'''
logger.info("testing custom metadata annotation label={0} value={1} expected={2} event(s)".format(
label, value, expected))
search_query = "index=" + index + " label_app::" + label + " custom_field::" + value
events = check_events_from_splunk(start_time="-1h@h",
url=setup["splunkd_url"],
user=setup["splunk_user"],
query=["search {0}".format(
search_query)],
password=setup["splunk_password"])
logger.info("Splunk received %s events in the last minute",
len(events))
assert len(events) >= expected
```
|
{
"source": "jdotpy/taskthreader",
"score": 3
}
|
#### File: jdotpy/taskthreader/taskthreader.py
```python
import threading
import time
class Worker(threading.Thread):
def __init__(self, *args, **kwargs):
self.target = kwargs.pop('target', None)
self.args = kwargs.pop('args', [])
if self.args is None:
self.args = []
self.kwargs = kwargs.pop('kwargs', {})
if self.kwargs is None:
self.kwargs = {}
super(Worker, self).__init__(*args, **kwargs)
def run(self):
if not self.target:
self.result = None
return None
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.result = e
class WorkGroup():
def __init__(self, max_threads=5, tasks=None):
self.max_threads = max_threads
self.tasks = self._parse_tasks(tasks)
def _parse_tasks(self, mapping=None):
tasks = {}
mapping = mapping or {}
for name, task in mapping.items():
args = (name,) + task
self._add_task(tasks, *args)
return tasks
def add_task(self, name, func, *args, **kwargs):
""" This method just wraps the internal one collapsing the args and kwargs """
self._add_task(self.tasks, name, func, args, kwargs)
def _add_task(self, tasks, name, func, args=None, kwargs=None):
tasks[name] = (func, args, kwargs)
def run(self, tasks=None):
start_time = time.time()
results = {}
if tasks:
tasks = self._parse_tasks(tasks)
else:
tasks = self.tasks
task_queue = [(task_name, ) + task for task_name, task in tasks.items()]
# Start threads for them
active_workers = set()
while True:
# Clean up complete workers
for worker in list(active_workers):
if not worker.is_alive():
results[worker.name] = worker.result
active_workers.remove(worker)
# Finish if work is done
if len(task_queue) == 0 and len(active_workers) == 0:
break
# If we have open worker slots available use them
to_make = self.max_threads - len(active_workers)
to_make = min(to_make, len(task_queue))
if to_make:
for i in range(to_make):
name, func, args, kwargs = task_queue.pop()
new_worker = Worker(
name=name,
target=func,
args=args,
kwargs=kwargs
)
new_worker.daemon = True
active_workers.add(new_worker)
new_worker.start()
# Wait on next thread to finish
watched = active_workers.pop() # Instead of iterating over to grab an item, i'm popping and re-adding
active_workers.add(watched)
watched.join()
end_time = time.time()
self.last_run_time = end_time - start_time
success = all([not isinstance(v, BaseException) for v in results.values()])
return success, results
```
#### File: jdotpy/taskthreader/tests.py
```python
import unittest
import time
from taskthreader import WorkGroup
def example_task(result=None, wait_time=.1, exc=None):
if exc:
raise exc()
time.sleep(wait_time)
return result
class WorkGroupTest(unittest.TestCase):
work_time = .1
def test_basic_work_group(self):
# This funcgettion represents some work we want to accomplish
# Run 3 tasks in parallel making sure their results are
# accurate and timing is less than if they ran in sequence
work_group = WorkGroup()
work_group.add_task('foo', example_task, 1, wait_time=self.work_time)
work_group.add_task('bar', example_task, 2, wait_time=self.work_time)
work_group.add_task('zip', example_task, 3, wait_time=self.work_time)
success, results = work_group.run()
assert success
self.assertEqual(results['foo'], 1)
self.assertEqual(results['bar'], 2)
self.assertEqual(results['zip'], 3)
self.assertTrue(work_group.last_run_time < self.work_time * 3)
# Run again with threads set to one to run them in sequence
work_group.max_threads = 1
success2, results2 = work_group.run()
assert success2
self.assertTrue(work_group.last_run_time >= self.work_time * 3)
def test_params(self):
# Test the altnerate parameter style
tasks = {
'foo': (example_task, [1], {'wait_time': self.work_time}),
'bar': (example_task, [2]),
'zip': (example_task,)
}
work_group = WorkGroup(tasks=tasks)
success, results = work_group.run()
assert success
self.assertEqual(results['foo'], 1)
self.assertEqual(results['bar'], 2)
self.assertEqual(results['zip'], None)
work_group = WorkGroup()
success, results = work_group.run(tasks=tasks)
assert success
self.assertEqual(results['foo'], 1)
self.assertEqual(results['bar'], 2)
def test_error(self):
work_group = WorkGroup()
work_group.add_task('foo', example_task, 1, wait_time=self.work_time)
work_group.add_task('err', example_task, exc=KeyError)
success, results = work_group.run()
assert not success
self.assertEqual(results['foo'], 1)
assert isinstance(results['err'], KeyError)
```
|
{
"source": "JDougherty/doughuware",
"score": 2
}
|
#### File: doughuware/core/models.py
```python
from django.db import models
class Tag(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=100, blank=True)
parent = models.ForeignKey('self', blank=True, null=True, related_name="children")
def children(self):
return Tag.objects.filter(parent=self.pk)
def serializable_object(self):
obj = {'name': self.name, 'children': []}
for child in self.children.all():
obj['children'].append(child.serializable_object())
return obj
class Document(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(blank=True)
docfile = models.FileField(upload_to="documents/%Y/%m/%d")
thumbnail = models.ImageField(upload_to="documents/thumbnails/%Y/%m/%d")
preview = models.ImageField(upload_to="documents/previews/%Y/%m/%d")
```
|
{
"source": "jdowlingmedley/biorbd",
"score": 2
}
|
#### File: binding/Python3/test_binder_python_rigidbody.py
```python
from pathlib import Path
import numpy as np
import pytest
import biorbd
def test_load_model():
biorbd.Model('../../models/pyomecaman.bioMod')
def test_dof_ranges():
m = biorbd.Model('../../models/pyomecaman.bioMod')
pi = 3.14159265358979323846
# Pelvis
ranges = m.segment(0).ranges()
assert(ranges[0].min() == -10)
assert(ranges[0].max() == 10)
assert(ranges[1].min() == -10)
assert(ranges[1].max() == 10)
assert(ranges[2].min() == -pi)
assert(ranges[2].max() == pi)
# BrasD
ranges = m.segment(3).ranges()
assert(ranges[0].min() == -pi)
assert(ranges[0].max() == pi)
assert(ranges[1].min() == 0)
assert(ranges[1].max() == pi)
# BrasG
ranges = m.segment(4).ranges()
assert(ranges[0].min() == -pi)
assert(ranges[0].max() == pi)
assert(ranges[1].min() == 0)
assert(ranges[1].max() == pi)
# CuisseD
ranges = m.segment(5).ranges()
assert(ranges[0].min() == -pi / 12)
assert(ranges[0].max() == pi / 2 + pi / 3)
# JambeD
ranges = m.segment(6).ranges()
assert(ranges[0].min() == -pi / 2 - pi / 6)
assert(ranges[0].max() == 0)
# PiedD
ranges = m.segment(7).ranges()
assert(ranges[0].min() == -pi / 2)
assert(ranges[0].max() == pi / 2)
# CuisseG
ranges = m.segment(8).ranges()
assert(ranges[0].min() == -pi / 12)
assert(ranges[0].max() == pi / 2 + pi / 3)
# JambeG
ranges = m.segment(9).ranges()
assert(ranges[0].min() == -pi / 2 - pi / 6)
assert(ranges[0].max() == 0)
# PiedG
ranges = m.segment(10).ranges()
assert(ranges[0].min() == -pi / 2)
assert(ranges[0].max() == pi / 2)
```
|
{
"source": "jdowlingmedley/pyomeca",
"score": 3
}
|
#### File: pyomeca/io/utils.py
```python
import csv
from typing import Optional
def col_spliter(x, p, s):
if p and s:
return x.split(p)[-1].split(s)[0]
if p:
return x.split(p)[-1]
if s:
return x.split(s)[0]
return x
def find_end_header_in_opensim_file(filename: str, end_header: Optional[int] = None):
with open(filename, "rt") as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if row[0] == "endheader":
end_header = idx
break
if end_header is None:
raise IndexError(
"endheader not detected in your file. Try to specify the `end_header` parameter"
)
return end_header
```
#### File: pyomeca/io/write.py
```python
from pathlib import Path
from typing import Union, Optional
import pandas as pd
import xarray as xr
from scipy.io import savemat
def to_wide_dataframe(array: xr.DataArray) -> pd.DataFrame:
if array.ndim > 2:
df = array.to_series().unstack().T
df.columns = ["_".join(col).strip() for col in df.columns]
return df
return array.to_series().unstack().T
def write_csv(
array: xr.DataArray, filename: Union[str, Path], wide: Optional[bool] = True
):
if wide:
array.meca.to_wide_dataframe().to_csv(filename)
else:
array.to_dataframe().to_csv(filename)
def write_matlab(array: xr.DataArray, filename: Union[str, Path]):
savemat(filename, array.to_dict())
```
#### File: pyomeca/processing/markers.py
```python
import numpy as np
import xarray as xr
from pyomeca.processing import misc
def rotate_markers(markers: xr.DataArray, rt: xr.DataArray) -> xr.DataArray:
misc.has_correct_name(markers, "markers")
rotated_markers = markers.copy()
if rt.ndim == 3 and markers.ndim == 3:
rotated_markers.data = np.einsum("ijk,jlk->ilk", rt, markers)
elif rt.ndim == 2 and markers.ndim == 2:
rotated_markers.data = np.dot(rt, markers)
elif rt.ndim == 2 and markers.ndim == 3:
rotated_markers.data = np.einsum("ij,jkl->ikl", rt, markers)
else:
raise ValueError("`rt` and `markers` dimensions do not match.")
return rotated_markers
```
#### File: pyomeca/tests/test_documentation_code_blocks.py
```python
from pathlib import Path
import black
import pytest
from tests.utils import extract_code_blocks_from_md
docs_path = Path("./docs")
doc_files = [f"{file}" for file in docs_path.glob("*.md")]
doc_files_string = []
for file in doc_files:
with open(f"{file}", "r", encoding="utf8") as f:
doc_files_string.append(f.read().replace("../tests/data", "tests/data"))
@pytest.mark.parametrize("doc_file_string", doc_files_string, ids=doc_files)
def test_code_blocks(doc_file_string):
exec(extract_code_blocks_from_md(doc_file_string), {}, {})
@pytest.mark.parametrize("doc_file_string", doc_files_string, ids=doc_files)
def test_lint_code_blocks(doc_file_string):
code_blocks = extract_code_blocks_from_md(doc_file_string)
if code_blocks:
code_blocks = f"{code_blocks}\n"
assert code_blocks == black.format_str(code_blocks, mode=black.FileMode())
```
|
{
"source": "jdowner/gist",
"score": 3
}
|
#### File: gist/tests/test_cli.py
```python
import base64
import json
import responses
def b64encode(s):
"""Return the base64 encoding of a string
To support string encodings other than ascii, the content of a gist needs
to be uploaded in base64. Because python2.x and python3.x handle string
differently, it is necessary to be explicit about passing a string into
b64encode as bytes. This function handles the encoding of the string into
bytes, and then decodes the resulting bytes into a UTF-8 string, which is
returned.
"""
return base64.b64encode(s.encode("utf-8")).decode("utf-8")
@responses.activate
def test_list(editor, gist_command):
message = list()
expected_gists = list()
for id in range(300):
desc = "test-{}".format(id)
public = id % 2 == 0
message.append(
{
"id": id,
"description": desc,
"public": public,
}
)
expected_gists.append("{} {} test-{}".format(id, "+" if public else "-", id))
responses.add(
responses.GET,
"https://api.github.com/gists",
body=json.dumps(message),
status=200,
)
gists = gist_command("list")
assert gists == expected_gists
@responses.activate
def test_content(editor, gist_command):
responses.add(
responses.GET,
"https://api.github.com/gists/1",
body=json.dumps(
{
"files": {
"file-A.txt": {
"filename": "file-A.txt",
"content": b64encode("test-content-A"),
},
"file-B.txt": {
"filename": "file-B.txt",
"content": b64encode("test-content-\u212C"),
},
},
"description": "test-gist",
"public": True,
"id": 1,
}
),
status=200,
)
lines = gist_command("content 1")
assert "file-A.txt:" in lines
assert "test-content-A" in lines
assert "file-B.txt:" in lines
assert "test-content-\u212c" in lines
```
#### File: gist/tests/test_config.py
```python
import configparser
import gist
import pytest
@pytest.fixture
def config():
cfg = configparser.ConfigParser()
cfg.add_section("gist")
return cfg
def test_get_value_from_command():
"""
Ensure that values which start with ``!`` are treated as commands and
return the string printed to stdout by the command, otherwise ensure
that the value passed to the function is returned.
"""
assert "magic token" == gist.client.get_value_from_command('!echo "\nmagic token"')
assert "magic token" == gist.client.get_value_from_command(' !echo "magic token\n"')
assert "magic token" == gist.client.get_value_from_command("magic token")
def test_get_personal_access_token_missing(config):
with pytest.raises(gist.client.GistMissingTokenError):
gist.client.get_personal_access_token(config)
@pytest.mark.parametrize("token", ["", " "])
def test_get_personal_access_token_empty(config, token):
config.set("gist", "token", token)
with pytest.raises(gist.client.GistEmptyTokenError):
gist.client.get_personal_access_token(config)
@pytest.mark.parametrize("token", [" <PASSWORD> ", "<KEY>"])
def test_get_personal_access_token_valid(config, token):
config.set("gist", "token", token)
gist.client.get_personal_access_token(config)
```
|
{
"source": "jdowner/katipo",
"score": 3
}
|
#### File: katipo/katipo/traverse.py
```python
import hashlib
import logging
import urlparse
import string
from bs4 import BeautifulSoup
import nltk
import requests
import datastore
log = logging.getLogger(__name__)
class Traverse(object):
"""
A Traverse object takes a corpus and a set of seed URLs and performs a
traversal of the links to URLs contained within. Because the URLs that are
discovered are placed in a set, the order that they are discovered in is not
preserved. Thus the traversal is neither a depth-first nor a breadth-first
traversal.
"""
def __init__(self, corpus):
"""
Creates a Traversal object.
"""
self._corpus = corpus
@property
def corpus(self):
return self._corpus
def should_search(self, url):
headers = requests.head(url).headers
return headers.get('content-type', '').startswith('text/html')
def __call__(self):
try:
db = datastore.Datastore()
# remove the url from the queue and add it to the 'searched' set so
# that it is never searched again.
url = db.pop_pending()
if url is None:
return
# Tag the URL as being processed. If it cannot be tagged as
# processed that means that it is being processed by another process
# or thread, which means we should not process it here.
if not db.mark_as_processing(url):
log.debug('already processing %s' % url)
db.push_pending(url)
return
db.add_to_searched(url)
if not self.should_search(url):
return
text = requests.get(url).text
soup = BeautifulSoup(text)
# calculate a hash for the text
uhash = hashlib.md5(text.encode('utf-8')).hexdigest()
# determine the score for the text
score = self.score(text)
db.add_result(url, score)
log.info('score %d %s' % (score, url))
for a in soup.find_all('a', href=True):
link = a['href']
# convert relative URLs into absolute URLs
if not link.startswith('http'):
link = urlparse.urljoin(url, link)
# if the join does not work, ignore this URL
if not link.startswith('http'):
continue
# if link is already searched, skip it
if db.is_searched(link):
continue
# add link to pending searches
db.push_pending(link)
except Exception as e:
log.exception(e)
def score(self, text):
tokens = nltk.word_tokenize(text.lower())
tokens = {t for t in tokens if t not in string.punctuation}
return len(tokens.intersection(self.corpus))
```
|
{
"source": "jdowner/loren-ipsum",
"score": 3
}
|
#### File: jdowner/loren-ipsum/loren_ipsum.py
```python
import logging
import random
import re
import sys
import docopt
logger = logging.getLogger('loren-ipsum')
words = [
'ad',
'adipisicing',
'aliqua',
'aliquip',
'anim',
'aute',
'cillum',
'commodo',
'consectetur',
'consequat',
'culpa',
'cupidatat',
'deserunt',
'do',
'dolor',
'dolore',
'duis',
'ea',
'eiusmod',
'elit',
'enim',
'esse',
'est',
'et',
'eu',
'ex',
'excepteur',
'exercitation',
'fugiat',
'id',
'in',
'incididunt',
'ipsum',
'irure',
'labore',
'laboris',
'laborum'
'lorem',
'magna',
'minim',
'mollit',
'nisi',
'non',
'nostrud',
'nulla',
'occaecat',
'officia',
'pariatur',
'proident',
'qui',
'quis',
'reprehenderit',
'sed',
'sint',
'sunt',
'tempor',
'ullamco',
'ut',
'velit',
'veniam',
'voluptate',
]
class Transform(object):
def __init__(self):
self.rules = {}
self.rules['F'] = ['A', 'B', 'FB', 'FA', 'FGF']
self.rules['S'] = ['FT']
self.rules['A'] = ['WW']
self.rules['B'] = ['WWW']
self.rules['G'] = [',']
self.rules['T'] = ['.']
self.rules['P'] = ['S', 'PS']
def transform(self, s):
"""Transform a string according to the rules"""
previous = str(s)
while True:
logger.debug(previous)
# Iterate through each character in the string and transform it if there is
# a rule associated with it. Otherwise, just copy the character.
current = []
for c in previous:
if c in self.rules:
current.append(random.choice(self.rules[c]))
else:
current.append(c)
current = ''.join(current)
# If the transformed string is identical to the original string, the
# transformation is at a fixed point and successive transformations will
# have no effect.
if previous == current:
return current
previous = current
def format_sentence(tokens):
w = []
for c in tokens:
if c == 'W':
w.append(random.choice(words))
else:
w.append(c)
w[0] = w[0].title()
return re.sub(r' ,', r',', '{}.'.format(' '.join(w)))
def format_paragraph(paragraph):
sentences = [s for s in paragraph.split('.') if s]
return ' '.join(format_sentence(s) for s in sentences)
def main(argv=sys.argv[1:]):
args = docopt.docopt(__doc__)
if args['word']:
print(random.choice(words))
return
if args['sentence']:
t = Transform()
print(format_sentence(t.transform('F')))
return
if args['paragraph']:
t = Transform()
print(format_paragraph(t.transform('P')))
if __name__ == "__main__":
logging.basicConfig()
main()
```
|
{
"source": "jdowner/lrc",
"score": 3
}
|
#### File: lrc/lrc/device.py
```python
import asyncio
import logging
import sys
class TerminalStdout(object):
def __init__(self, memory):
self.memory = memory
self.log = logging.getLogger('lrc.terminal')
self.loop = asyncio.get_event_loop()
self.buf = list()
@asyncio.coroutine
def listen(self):
try:
val = self.memory.read(0)
if val != 0:
self.log.debug('read {}'.format(val))
self.buf.append(val)
self.memory.write(0, 0)
self.loop.call_soon(self.flush)
self.loop.create_task(self.listen())
finally:
self.flush()
def flush(self):
self.log.debug("write {}".format(self.buf))
sys.stdout.write(''.join(map(chr, self.buf)))
self.buf = list()
```
|
{
"source": "jdowner/python-svg",
"score": 3
}
|
#### File: python-svg/svg/svg.py
```python
import copy
class Attributes(dict):
def __init__(self, *args, **kwargs):
super(Attributes, self).__init__(*args, **kwargs)
def __repr__(self):
return " ".join('{}="{}"'.format(k, v) for k, v in self.items())
class Leaf(object):
def __init__(self, name, **attrs):
self.attrs = Attributes(attrs)
self.name = name
def open_repr(self):
return '<{name} {attrs}>{{}}</{name}>'.format(
name=self.name,
attrs=self.attrs,
)
def closed_repr(self):
return '<{name} {attrs} />'.format(name=self.name, attrs=self.attrs)
@property
def id(self):
return self.attrs.get("id", None)
class Element(Leaf):
def __init__(self, name, **attrs):
super(Element, self).__init__(name, **attrs)
self.children = list()
def __repr__(self):
if not self.children:
return self.closed_repr()
return self.open_repr().format("".join(repr(c) for c in self.children))
class Definitions(Element):
def __init__(self):
super(Definitions, self).__init__("defs")
def __contains__(self, id):
return any(c.id == id for c in self.children)
class Svg(Element):
def __init__(self):
super(Svg, self).__init__("svg")
self.definitions = Definitions()
self.children.insert(0, self.definitions)
self.attrs["xmlns"] = "http://www.w3.org/2000/svg"
self.attrs["xmlns:xlink"] = "http://www.w3.org/1999/xlink"
def add_definition(self, id, element):
if id in self.definitions:
raise KeyError("A definition with that ID already exists")
definition = copy.copy(element)
definition.attrs["id"] = id
self.definitions.children.append(definition)
class Group(Element):
def __init__(self, **attrs):
super(Group, self).__init__("g", **attrs)
class Use(Element):
def __init__(self, name, **attrs):
attrs["xlink:href"] = "#{}".format(name)
super(Use, self).__init__("use", **attrs)
class Rect(Element):
def __init__(self, x=0, y=0, height=0, width=0, **attrs):
attrs = Attributes(attrs)
attrs.update(dict(x=x, y=y, height=height, width=width))
super(Rect, self).__init__("rect", **attrs)
class Text(Leaf):
def __init__(self, text, **attrs):
super(Text, self).__init__("text", **attrs)
self.text = text
def __repr__(self):
return self.open_repr().format(self.text)
```
|
{
"source": "jdowner/rigidbody",
"score": 3
}
|
#### File: rigidbody/tests/test_matrix.py
```python
from rigidbody import (Matrix, svd, approx)
def test_matrix_equality():
A = Matrix(3, 3)
B = Matrix(3, 3)
assert(A == B)
A = Matrix.identity(3, 3)
B = Matrix.identity(3, 3)
assert(A == B)
A = Matrix.zero(3, 3)
B = Matrix.zero(3, 3)
assert(A == B)
def test_matrix_identities():
A = Matrix.identity(3, 3)
A[0, 1] = 1
assert(A + Matrix.zero(3, 3) == A)
assert(Matrix.zero(3, 3) + A == A)
assert(A * Matrix.identity(3, 3) == A)
assert(Matrix.identity(3, 3) * A == A)
def test_matrix_addition():
A = Matrix(3, 3)
B = Matrix(3, 3)
C = Matrix(3, 3)
A[0, 1] = 1
B[1, 0] = 2
C[0, 1] = 1
C[1, 0] = 2
assert(A + B == C)
assert(B + A == C)
def test_matrix_subtraction():
A = Matrix(3, 3)
B = Matrix(3, 3)
C = Matrix(3, 3)
A[0, 1] = 1
B[1, 0] = 2
C[0, 1] = 1
C[1, 0] = 2
assert(C - A == B)
assert(C - B == A)
def test_matrix_multiplication():
A = Matrix(3, 3)
B = Matrix(3, 3)
AB = Matrix(3, 3)
BA = Matrix(3, 3)
A[0, :] = [1, 0, 0]
A[1, :] = [0, 0, 1]
A[2, :] = [0, 1, 0]
B[0, :] = [1, 2, 3]
B[1, :] = [4, 5, 6]
B[2, :] = [7, 8 ,9]
AB[0, :] = [1, 2, 3]
AB[1, :] = [7, 8 ,9]
AB[2, :] = [4, 5, 6]
assert(A * B == AB)
BA[0, :] = [1, 3, 2]
BA[1, :] = [4, 6, 5]
BA[2, :] = [7 ,9, 8]
assert(B * A == BA)
def test_matrix_approx():
A = Matrix(3, 3)
B = Matrix(3, 3)
A[0, :] = B[0, :] = [1, 2, 3]
A[1, :] = B[1, :] = [4, 5, 6]
A[2, :] = B[2, :] = [7, 8 ,9]
assert(approx(A, B, tol=0.001))
A[0, 0] += 0.002
assert(not approx(A, B, tol=0.001))
def test_matrix_svd():
A = Matrix(3, 3)
A[0, :] = [1, 2, 3]
A[1, :] = [4, 5, 6]
A[2, :] = [7, 8 ,9]
U, S, V = svd(A)
assert(approx(U * S * V.transposed(), A))
```
|
{
"source": "jdowner/tree",
"score": 4
}
|
#### File: jdowner/tree/tree.py
```python
import networkx as nx
graph = nx.DiGraph(["ac", "cd", "ce", "ab", 'ef','dg', 'dh'])
def tree(graph):
def recurse(node, padding, last=False):
if last:
print(u"{}└── {}".format(padding[:-4], node))
else:
print(u"{}├── {}".format(padding[:-4], node))
children = graph.successors(node)
if children:
for child in children[:-1]:
recurse(child, padding + u"│ ", last=False)
recurse(children[-1], padding + u" ", last=True)
recurse(graph.nodes()[0], u" ", last=True)
tree(graph)
```
|
{
"source": "JDownloader/GEL-3014_Design3",
"score": 3
}
|
#### File: GEL-3014_Design3/baseStation/baseStation.py
```python
from vision.kinect import Kinect, NoKinectDetectedException
from tests.test_vision_kinect import FakeKinect
from cubeFinder import DemoCubeFinder, CubeFinder
from vision.robotLocator import RobotPosition
from flag import Flag
class BaseStation():
def __init__(self):
try:
self.kinect = Kinect('1')
except NoKinectDetectedException:
self.kinect = FakeKinect()
self.cube_finder = CubeFinder(self.kinect)
self.flag = None
self.question = ''
self.answer = ''
self.robot_position = None
def set_question(self, question, answer):
self.question = question
self.answer = answer
self.flag = Flag(answer)
def set_robot_position(self, x, y, angle):
self.robot_position = RobotPosition(x, y, angle)
```
#### File: GEL-3014_Design3/baseStation/flagProcessor.py
```python
import json
from flag import Flag
class FlagProcessor():
def __init__(self, country):
self.flag = Flag(country)
def get_flag(self):
return self.flag.get_matrix()
```
#### File: GEL-3014_Design3/baseStation/__main__.py
```python
import os.path
from flask import Flask, abort, redirect, url_for, jsonify, request
from contextProvider import ContextProvider
from robotIPFinder import RobotFinder
from vision.robotLocator import RobotLocator
import requests
import json
import constants as cte
from questionanswering.question_processor import QuestionProcessor
from baseStation import BaseStation
import flagProcessor
SERVER_PORT = 8000
class BaseStationServer(Flask):
robot_ip_address = RobotFinder.IP_NOT_FOUND
def __init__(self, *args, **kwargs):
super(BaseStationServer, self).__init__(*args, **kwargs)
self.base_station = BaseStation()
self.context_provider = ContextProvider(self.base_station)
self.robot_locator = RobotLocator()
self.refresh_since_last_kinect_update = 999
def set_robot_ip_address(self, ip):
print ip
self.robot_ip_address = ip
app = BaseStationServer(__name__)
app.config.from_object(__name__)
# since it will only be use by software engineer, debug on is ok
app.debug = True
thread_robot_finder = RobotFinder(app.set_robot_ip_address)
thread_robot_finder.start()
app.set_robot_ip_address('192.168.0.36')
def root_dir():
return os.path.abspath(os.path.dirname(__file__))
@app.route('/')
def hello():
return redirect(url_for('static', filename='index.html'))
@app.route('/start')
def start():
if app.robot_ip_address == RobotFinder.IP_NOT_FOUND:
abort(500)
else:
data = {'ip': '192.168.0.32'}
response = requests.post('http://' + app.robot_ip_address + ':8001' + '/basestationip', data=data)
# if response.status_code == 200:
# response2 = requests.get('http://' + app.robot_ip_address + ':8001' + '/')
return 'ok'
@app.route('/robotposition')
def fetch_robot_position():
position = refresh_kinect()
return jsonify(angle=position.get_angle_in_deg(),
position=(position.position[0], position.position[1]))
@app.route('/cubeposition', methods=['POST'])
def fetch_cube_position():
cube_position = (-500, -500)
if request.method == 'POST':
color = request.form.get('color', None)
cube_position = app.base_station.cube_finder.get_cube_position_with_color(color)
return jsonify(position_x=cube_position[0] , position_y=cube_position[1])
@app.route('/path', methods=['POST'])
def receive_path():
if request.method == 'POST':
position = refresh_kinect()
path = eval(request.data)
app.context_provider.set_path(path)
return "ok"
@app.route('/flag')
def fetch_flag():
# flag = Flag('Canada').get_matrix()
flag = ''
strikes = 0
for cycle in xrange(cte.NUMBER_OF_WRONG_ANSWER_ALLOWED):
question = fetch_question()
if question is None:
break
print question
answer = fetch_answer(question)
if is_right_answer(answer):
app.base_station.set_question(question, answer)
flag_processor = flagProcessor.FlagProcessor(answer)
flag = flag_processor.get_flag()
break
else:
strikes += 1
if strikes >= 2:
answer = 'Burkina Faso'
app.base_station.set_question(question, answer)
flag_processor = flagProcessor.FlagProcessor(answer)
flag = flag_processor.get_flag()
break
# flag = Flag('Russia').get_matrix()
return jsonify(flag=flag)
# A javaScript fonction calls this method every 250 ms
@app.route('/context')
def get_context():
app.refresh_since_last_kinect_update += 1
if app.refresh_since_last_kinect_update >= 4:
refresh_kinect()
context = app.context_provider.get_context(app.robot_ip_address)
return jsonify(context)
@app.route('/changerobotposition', methods=['POST'])
def change():
if request.method == 'POST':
position_x = request.form.get('position_x', None)
position_y = request.form.get('position_y', None)
angle = request.form.get('angle', None)
app.base_station.set_robot_position(position_x, position_y, angle)
return 'ok'
def fetch_question():
json_question = ''
question = ''
for url in cte.ATLAS_WEB_SERVER_URLS:
try:
response = requests.get(url, verify=False, timeout=0.5)
if response.status_code == 200:
json_question = response.text
break
except Exception:
pass
try :
question = json.loads(json_question)['question']
except Exception:
print 'No question from Atlas'
fetch_question()
return question
def fetch_answer(question):
print "question : " + question
processor = QuestionProcessor()
processor.answer_question(question)
return processor.answer
def is_right_answer(answer):
print answer
answer_is_good = raw_input('Is this the right answer ? (y/n) : ')
if answer_is_good[0] is 'y':
return True
else:
print 'Will retry...'
return False
def refresh_kinect():
position = app.robot_locator.get_position(app.base_station.kinect)
app.base_station.robot_position = position
app.refresh_since_last_kinect_update = 0
app.context_provider.add_known_position(position.position)
return position
if __name__ == '__main__': # pragma: no cover
app.run(host='0.0.0.0', port=SERVER_PORT, use_reloader=False, threaded=False)
thread_robot_finder.stop()
```
#### File: GEL-3014_Design3/controller/serialCom.py
```python
import serial
from serial.tools.list_ports import comports
import controller.constants
from maestro import Controller
import time
class BadMovementDirection(Exception):
def __init__(self):
pass
class PololuSerialConnectionFailed(Exception):
pass
class LedController:
COLORS_FOR_CONTROLLER = {'red': 'R',
'blue': 'B',
'green': 'G',
'yellow': 'Y',
'white': 'W',
'black': 'K',
'off': 'F'}
def __init__(self):
communication_port = ''
for port in comports():
if port[2].find('USB VID:PID=2341:003b') > -1:
communication_port = port[0]
if len(communication_port) > 0:
self.serial_communication = serial.Serial(communication_port, 9600)
def change_color(self, led_color='F', led_position=9):
self.serial_communication.write(str(led_position) + self.COLORS_FOR_CONTROLLER.get(led_color))
class PololuConnectionCreator:
def __init__(self):
self.pololu_serial_communication = ''
pololu_communication_port = ''
for port in comports():
if port[2].find('VID:PID=1ffb:0089') > -1:
pololu_communication_port = port[0]
try:
self.pololu_serial_communication = Controller(pololu_communication_port)
except Exception:
raise PololuSerialConnectionFailed()
class CameraController:
def __init__(self, pololu_serial_communication):
self.position_vertical = int(4 * 1240)
self.position_horizontal = int(4 * 1550)
self.channel_vertical = controller.constants.POLOLU_CHANNELS_PWM.get('camera_vertical')
self.channel_horizontal = controller.constants.POLOLU_CHANNELS_PWM.get('camera_horizontal')
self.reset_position(pololu_serial_communication)
def reset_position(self, pololu_serial_communication):
pololu_serial_communication.setTarget(self.channel_horizontal, self.position_horizontal)
pololu_serial_communication.setTarget(self.channel_vertical, self.position_vertical)
class GripperController:
def __init__(self, pololu_serial_communication):
self.gripper_serial_communication = pololu_serial_communication
self.channel_vertical = controller.constants.POLOLU_CHANNELS_PWM.get('gripper_vertical')
self.channel_pliers = controller.constants.POLOLU_CHANNELS_PWM.get('gripper_pliers')
self.min_vertical = int(4*952)
self.max_vertical = int(4*1929)
self.pos_vertical_raised_high = int(4*1921)
self.pos_vertical_raised_low = int(4*1115)
self.pos_vertical_table = int(4*1000)
self.vertical_speed = 20
self.min_pliers = int(4*890)
self.max_pliers = int(4*2344)
self.pos_pliers_open_big = int(4*900)
self.pos_pliers_open_small = int(4*1643)
self.pos_pliers_closed = int(4*2075)
self.pliers_speed = 20
# self.min_vertical = int(4*704.00)
# self.max_vertical = int(4*2096.00)
# self.pos_vertical_raised_high = int(4*0)
# self.pos_vertical_raised_low = int(4*1785.00)
# self.pos_vertical_table = int(4*1264.75)
# self.vertical_speed = 20
# self.min_pliers = int(4*454.25)
# self.max_pliers = int(4*2374.50)
# self.pos_pliers_open_big = int(4*2031.00)
# self.pos_pliers_open_small = int(4*1156.75)
# self.pos_pliers_closed = int(4*850.00)
# self.pliers_speed = 30
self.GRIPPER_RAISE_LEVEL_DICTIONARY = {0: self.pos_vertical_table,
1: self.pos_vertical_raised_low,
2: self.pos_vertical_raised_high}
self.PLIERS_OPENING_LEVEL_DICTIONARY = {0: self.pos_pliers_closed,
1: self.pos_pliers_open_small,
2: self.pos_pliers_open_big}
self._set_parameters()
def _set_parameters(self):
if self.gripper_serial_communication:
self.gripper_serial_communication.setRange(self.channel_vertical, self.min_vertical, self.max_vertical)
self.gripper_serial_communication.setSpeed(self.channel_vertical, self.vertical_speed)
self.gripper_serial_communication.setTarget(self.channel_vertical, self.pos_vertical_raised_high)
self.gripper_serial_communication.setRange(self.channel_pliers, self.min_pliers, self.max_pliers)
self.gripper_serial_communication.setSpeed(self.channel_pliers, self.pliers_speed)
self.gripper_serial_communication.setTarget(self.channel_pliers, self.pos_pliers_open_big)
def change_vertical_position(self, raise_level):
self.gripper_serial_communication.setTarget(self.channel_vertical,
self.GRIPPER_RAISE_LEVEL_DICTIONARY[raise_level])
while self.gripper_serial_communication.isMoving(self.channel_vertical):
pass
def pliers_control(self, opening_level):
self.gripper_serial_communication.setTarget(self.channel_pliers,
self.PLIERS_OPENING_LEVEL_DICTIONARY[opening_level])
while self.gripper_serial_communication.isMoving(self.channel_pliers):
pass
class RobotMovementController:
ARDUINO_SERIAL_DIRECTION_STRING = {'forward': 1,
'right': 2,
'reverse': 3,
'left': 4}
def __init__(self):
for port in comports():
if port[2].find('USB VID:PID=2341:003d') > -1 or port[2].find('USB VID:PID=2a03:003d') > -1:
communication_port = port[0]
self.serial_communication = serial.Serial(communication_port, baudrate=9600, timeout=7)
break
if self.serial_communication is None:
print 'Arduino not detected!'
def move_robot(self, direction, distance_in_mm):
distance_in_cm = int(round(distance_in_mm / 10))
if distance_in_cm >= 40:
speed_percentage = 75
elif 25 < distance_in_cm <= 40:
speed_percentage = 50
elif 15 < distance_in_cm <= 25:
speed_percentage = 30
else:
speed_percentage = 10
self.serial_communication.flushInput()
try:
self.serial_communication.write(str(chr(self.ARDUINO_SERIAL_DIRECTION_STRING.get(direction))))
except Exception:
raise BadMovementDirection()
self.serial_communication.write(str(chr(speed_percentage)))
self.serial_communication.write(str(chr(int(distance_in_cm))))
self.serial_communication.readline()
def rotate_robot(self, rotation_direction_is_left, rotation_angle_in_degrees, rotation_speed_is_slow):
if rotation_speed_is_slow:
speed_percentage = 5
else:
speed_percentage = 25
self.serial_communication.flushInput()
if rotation_direction_is_left:
self.serial_communication.write(str(chr(101)))
else:
self.serial_communication.write(str(chr(102)))
self.serial_communication.write(str(chr(speed_percentage)))
self.serial_communication.write(str(chr(rotation_angle_in_degrees)))
self.serial_communication.readline()
def stop_all_movement(self):
self.serial_communication.write(str(chr(99)))
self.serial_communication.write(str(chr(0)))
self.serial_communication.write(str(chr(0)))
class Robot:
def __init__(self):
self._initialize_connections()
try:
self.pololu_serial_connection = PololuConnectionCreator()
except PololuSerialConnectionFailed:
print 'Polulu connection failed'
else:
self._initialize_pololu_connections(self.pololu_serial_connection)
time.sleep(controller.constants.LED_COMMUNICATION_INITIALIZATION_WAIT_TIME)
def _initialize_connections(self):
self.movement_controller = RobotMovementController()
self.led_controller = LedController()
def _initialize_pololu_connections(self, pololu_connection):
self.camera_controller = CameraController(pololu_connection.pololu_serial_communication)
self.gripper_controller = GripperController(pololu_connection.pololu_serial_communication)
def move(self, direction, distance_in_mm):
self.movement_controller.move_robot(direction, distance_in_mm)
def rotate(self, rotation_direction_is_left, rotation_angle_in_degrees, movement_speed_is_slow=False):
self.movement_controller.rotate_robot(rotation_direction_is_left, int(round(rotation_angle_in_degrees)),
movement_speed_is_slow)
def move_gripper_vertically(self, raise_level):
self.gripper_controller.change_vertical_position(raise_level)
def change_pliers_opening(self, opening_level):
self.gripper_controller.pliers_control(opening_level)
def change_led_color(self, led_color, led_position):
self.led_controller.change_color(str(led_color), int(led_position))
def stop_movement(self):
self.movement_controller.stop_all_movement()
```
#### File: questionanswering/filters/population_filter.py
```python
from collections import OrderedDict
from questionanswering import bidictionnary
import nltk
def process(question, query_builder):
mapped_question = next(question)
if ('population' in mapped_question) and ('growth' not in mapped_question)and ('death' not in mapped_question) and ('birth' not in mapped_question):
if('greater' in mapped_question) and ('than' in mapped_question):
population_regex = extract_greater_than_population_number(mapped_question)
query_builder.with_category_only('population')
query_builder.with_regex_query(population_regex)
else :
population = extract_population(mapped_question)
query_builder.with_category_data('population', population)
yield mapped_question
def extract_population(mapped_question):
reverse_dict = bidictionnary.Bidict(mapped_question)
number = reverse_dict.keys_with_value('CD')
if len(number) > 1:
return extract_population_from_spaced_number(mapped_question)
elif "," in number[0]:
return ' '.join(number)
else:
return format(int(' '.join(number)), ",d")
def extract_population_from_spaced_number(mapped_question):
sentence = mapped_question.items()
grammar = "NP: {<CD>*<CD>}"
parser = nltk.RegexpParser(grammar)
result = parser.parse(sentence)
numbers = []
for subtree in result.subtrees():
if subtree.label() == 'NP':
numbers = [key for key in OrderedDict(subtree.leaves())]
return ','.join(numbers)
def extract_greater_than_population_number(mapped_question):
final_regex = ''
minimum_population_number = extract_population_from_spaced_number(mapped_question)
for index, number in enumerate(minimum_population_number):
if number is ',':
final_regex += ','
else:
if (index is 0) | (index is 2):
final_regex += '[' + number + '-9' + ']'
else:
final_regex += '[0-9]'
return final_regex
```
#### File: questionanswering/filters/population_growth_rate_filter.py
```python
from questionanswering import bidictionnary
from numpy import arange
def process(question, query_builder):
mapped_question = next(question)
if ('population' in mapped_question) & ('growth' in mapped_question) & ('rate' in mapped_question):
unemployment_rate = extract_growth_rate(mapped_question)
if ('between' in mapped_question):
numbers_in_range = process_between_growth_rate(unemployment_rate)
query_builder.with_category_data('population growth rate', ' '.join(numbers_in_range))
else :
query_builder.with_category_data('population growth rate', ' '.join(unemployment_rate))
yield mapped_question
def extract_growth_rate(mapped_question):
reverse_dict = bidictionnary.Bidict(mapped_question)
return reverse_dict.keys_with_values(['CD'])
def process_between_growth_rate(unemployment_rates):
rates = arange(float(unemployment_rates[0]), float(unemployment_rates[1]),0.01)
converted_rates = [str(item) for item in rates]
return set(converted_rates).difference(unemployment_rates)
```
#### File: questionanswering/filters/total_area_filter.py
```python
from questionanswering import bidictionnary
def process(question, query_builder):
mapped_question = next(question)
if ('total' in mapped_question) & ('area' in mapped_question):
total_area = extract_total_area(mapped_question)
query_builder.with_category_data('area', 'total ' + total_area)
yield mapped_question
def extract_total_area(mapped_question):
reverse_dict = bidictionnary.Bidict(mapped_question)
total_area = reverse_dict.key_with_value('CD')
return format(int(total_area), ",d")
```
#### File: questionanswering/filters/urban_areas_filter.py
```python
from questionanswering import bidictionnary
def process(question, query_builder):
mappedQuestion = next(question)
if ('urban' in mappedQuestion) & (('area' in mappedQuestion) | ('areas' in mappedQuestion)):
urban_areas = extract_urban_areas(mappedQuestion)
query_builder.with_category_data('major urban areas', ' '.join(urban_areas))
yield mappedQuestion
def extract_urban_areas(mappedQuestion):
reverse_dict = bidictionnary.Bidict(mappedQuestion)
urban_areas = reverse_dict.keys_with_value('NNP')
if not urban_areas:
return reverse_dict.keys_with_value('CD')
else:
return urban_areas
```
#### File: GEL-3014_Design3/questionanswering/__main__.py
```python
import json
import time
from question_processor import QuestionProcessor
import requests
def fetchQuestion():
# return ["My telephone lines in use are 1.217 million.","My unemployment rate is 40.6%.","My population is 3,060,631.", "One national symbol of this country is the edelweiss.", "The lotus blossom is the national symbol of this country.",
# "The major urban areas of this country are Santiago, Valparaiso and Concepcion.", "The title of my national anthem is Advance Australia Fair.",
# "What country has .dz as its internet country code?", "What country has a latitude of 41.00 S?", "What country has a population growth rate of 1.46%?",
# "What country has a total area of 390757 sq km?", "What country has declared its independence on 22 May 1990?", "What country has religions including hindu, muslim, Christian, and sikh?",
# "What country has Yaounde as its capital?", "In 1923, we proclaimed our independence.", "My latitude is 16 00 S and my longitude is 167 00 E.", "My population growth rate is between 1.44% and 1.47%.",
# "My national symbol is the elephant.", "My latitude is 16 00 S and my longitude is 167 00 E.", "My internet country code is .br.", "My independence was declared in August 1971.",
# "My death rate is greater than 13 death/1000 and my capital starts with Mos.", "My capital name starts with Moga.", "My capital name starts with Ath and ends with ens.","22 September 1960 is the date of independence of this country." ]
return "What country has a birth rate of 46.12 births/ 1000 population?"
ATLAS_WEB_SERVER_URLS = ['https://192.168.0.2', 'https://1172.16.58.3', 'https://172.16.17.32']
def main():
# flag = ''
# for cycle in xrange(2):
# question = fetch_question()
# print question
# answer = fetch_answer(question)
# if is_right_answer(answer):
# # app.base_station.set_question(question, answer)
# print 'Nicely done!'
# break
# else:
# print 'Oh Oh'
#
# def fetch_answer(question):
# print "question : " + question
# processor = QuestionProcessor()
# processor.answer_question(question)
# return processor.answer
#
# def is_right_answer(answer):
# print answer
# answer_is_good = raw_input('Is this the right answer ? (y/n) : ')
# strikes = 0
# if answer_is_good[0] is 'y':
# return True
# else:
# strikes += 1
# if strikes < 2:
# return False
# else :
# return True
#
# def fetch_question():
# question = ''
# for url in ATLAS_WEB_SERVER_URLS:
# try:
# response = requests.get(url, verify=False, timeout=0.1)
# if response.status_code == 200:
# question = response.text
# break
# except Exception:
# pass
# return json.loads(question)['question']
question = fetchQuestion()
start_time = time.time()
print "question : " + question
processor = QuestionProcessor()
processor.answer_question(question)
print "answer : " + processor.answer
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
main()
```
#### File: GEL-3014_Design3/questionanswering/question_processor.py
```python
import json
from collections import OrderedDict
import nltk.corpus # 'maxent_treebank_pos_tagger', 'punkt' are attually used
import elastic_search_client
from query_builder import QueryBuilder
from filters import capital_filter, independence_date_filter, country_code_filter, national_symbol_filter, \
urban_areas_filter, religion_filter, geographic_coordinates_filter, national_anthem_filter, \
unemployment_rate_filter, population_growth_rate_filter, total_area_filter, population_filter,\
telephone_lines_filter, language_filter, public_debt_filter, illicit_drugs_filter, industries_filter, \
importation_filter, inflation_rate_filter, electricity_production_filter, climate_filter, death_rate_filter, \
birth_rate_filter, internet_users_filter, exportation_filter
class QuestionProcessor:
def __init__(self):
self.answer = ""
def answer_question(self, question):
query_builder = QueryBuilder()
pipeline_steps = [dissect_sentence, capital_filter.process, independence_date_filter.process,
country_code_filter.process, national_symbol_filter.process, urban_areas_filter.process,
religion_filter.process, geographic_coordinates_filter.process, national_anthem_filter.process,
unemployment_rate_filter.process, population_growth_rate_filter.process, total_area_filter.process,
population_filter.process, telephone_lines_filter.process, language_filter.process,
public_debt_filter.process, illicit_drugs_filter.process, industries_filter.process,climate_filter.process,
importation_filter.process, inflation_rate_filter.process, electricity_production_filter.process,
death_rate_filter.process, birth_rate_filter.process, internet_users_filter.process, exportation_filter.process,
self.fetch_answer]
pipeline = combine_pipeline(question, query_builder, pipeline_steps)
consume(pipeline)
def fetch_answer(self, question, query_builder):
next(question)
query = query_builder.build()
client = elastic_search_client.ElasticSearchClient()
response = client.post_request(query)
try:
self.answer = dict(json.loads(response)['hits']['hits'][0])['_source']['country']
except IndexError:
print "Woops, no answer to your question :("
yield
def combine_pipeline(source, query_builder, pipeline):
return reduce(lambda x, y: y(x, query_builder) , pipeline, source)
def consume(iter):
for _ in iter:
pass
def dissect_sentence(question, query_builder):
# first use only to download nltk dictionnaries
# nltk.download('punkt')
# nltk.download('maxent_treebank_pos_tagger')
tokenized_question = nltk.word_tokenize(question)
tokenized_and_tagged_question = nltk.pos_tag(tokenized_question)
yield OrderedDict(tokenized_and_tagged_question)
```
#### File: GEL-3014_Design3/robot/flagConstructionCycle.py
```python
import time
from pathfinding.pathfinding import Pathfinding
import pathfinding.constants
import math
class FlagConstructionCycle:
def flag_construction_sequence(self, flag_matrix, robot):
for cube_index, cube in enumerate(self.flag_matrix):
if cube is not None:
self.fetch_cube(str(cube), cube_index)
def fetch_cube(self, cube_color, cube_position_in_flag):
movement_processor = MovementProcessor(self.robot_connection)
self.robot_connection.send_led_color_change_command(cube_color, cube_position_in_flag)
pathfind_to_wait_position_tuple = self.pathfinder.find_path_to_point(self.robot_status.position,
pathfinding.constants.SAFE_POINT)
movement_processor.move(pathfind_to_wait_position_tuple, self.robot_status.position,
movement_speed=75)
target_cube = Cube(cube_color)
self.cube_finder.add_cube(target_cube)
self.cube_finder.refresh_position()
self.cube_finder.refresh_position()
self.cube_finder.refresh_position()
pathfind_tuple_to_cube = self.pathfinder.find_path_to_point(self.robot_status.position,
target_cube.position)
movement_processor.move(pathfind_tuple_to_cube, self.robot_status.position, movement_speed=75)
self.robot_connection.send_change_gripper_height_command(False)
time.sleep(2)
self.robot_connection.send_change_gripper_height_command(True)
time.sleep(1)
pathfind_tuple_pre_drop_point = self.pathfinder.find_path_to_point(self.robot_status.position,
pathfinding.constants.DOCK_POINT)
movement_processor.move(pathfind_tuple_pre_drop_point, self.robot_status.position,
movement_speed=75)
pathfind_tuple_to_drop_angle = (self.pathfinder.determine_rotation_angle(math.degrees(self.robot_status.position.angle), 180),
0)
movement_processor.move(pathfind_tuple_to_drop_angle, self.robot_status.position)
movement_processor.move((0, 200), self.robot_status.position, movement_speed=75)
self.robot_connection.send_change_gripper_height_command(False)
```
#### File: GEL-3014_Design3/tests/test_cubeFinder.py
```python
from unittest import TestCase
from baseStation.cubeFinder import CubeFinder
from vision.cube import Cube
from vision.kinect import Kinect
from mock import Mock
class TestCubeFinder(TestCase):
CUBE_COLOR = 'red'
CUBE_X = 30
CUBE_Y = 31
def setUp(self):
fake_kinect = Mock(Kinect)
fake_kinect.grab_new_image = Mock(return_value=None)
self.empty_cubefinder = CubeFinder(fake_kinect)
self.single_cube_cubefinder = CubeFinder(fake_kinect)
cube = Cube(self.CUBE_COLOR)
cube.find_position = Mock(return_value=(self.CUBE_X, self.CUBE_Y))
cube.position = (self.CUBE_X, self.CUBE_Y)
self.single_cube_cubefinder.add_cube(cube)
def test_get_all_cubes_is_empty_at_first(self):
cubes = self.empty_cubefinder.get_all_cubes()
self.assertEqual(len(cubes), 0)
def test_get_all_cubes_contains_one_for_single_cube(self):
cubes = self.single_cube_cubefinder.get_all_cubes()
self.assertEqual(len(cubes), 1)
def test_get_all_cubes_return_new_added_cube(self):
is_inside_cubes = False
cube = Cube('blue')
self.empty_cubefinder.add_cube(cube)
cubes = self.empty_cubefinder.get_all_cubes()
if cube in cubes:
is_inside_cubes = True
assert is_inside_cubes
def test_for_a_cubefinder_a_cube_cant_be_add_two_times(self):
cube = Cube('blue')
self.empty_cubefinder.add_cube(cube)
self.empty_cubefinder.add_cube(cube)
cubes = self.empty_cubefinder.get_all_cubes()
self.assertEqual(len(cubes), 1)
# def test_get_position_list(self):
# array = [[self.CUBE_X, self.CUBE_Y, self.CUBE_COLOR,]]
# self.assertEqual(self.single_cube_cubefinder.get_cubes_positions(), array)
```
#### File: GEL-3014_Design3/vision/robotLocator.py
```python
import cv2
from cube import Cube, FormStencil, TABLE_STENCIL, FormFilter
# import cube
from kinect import Kinect
from kinectCaptureHelper import KinectCaptureHelper
from visiontools import VisionTools
import numpy as np
import math
import time
class RobotLocator():
PIXEL_SHIFT = 3
def __init__(self):
self.position = RobotPosition()
def get_position(self, kinect):
self.position = RobotPosition()
for x in range(0, 7):
position = self.attempt_get_position(kinect, x)
if position is not None:
if position.is_valid():
for y in range(0, 5):
second_position = self.attempt_get_position(kinect, y)
if second_position is not None:
if second_position.is_valid() and second_position.is_like(position):
self.position = self.merge_position(position, second_position)
return self.position
self.position = RobotPosition()
KinectCaptureHelper().save_kinect_capture(kinect, str(time.time()))
return self.position
def merge_position(self, position_1, position_2):
pos_x = int((position_1.position[0]+position_2.position[0])/2)
pos_y = int((position_1.position[1]+position_2.position[1])/2)
angle = float((position_1.angle+position_2.angle)/float(2))
return RobotPosition(pos_x, pos_y, angle)
def attempt_get_position(self, kinect, attempt_no):
new_position = None
img_hsv = self.get_masked_hsv(kinect, attempt_no)
purple_corner = Cube('purple')
green_corner = Cube('forest_green')
purple_position = purple_corner.find_position(img_hsv, kinect)
green_position = green_corner.find_position(img_hsv, kinect)
if purple_corner.is_valid_position(purple_position):
new_position = self.test_other_corners(img_hsv, kinect, purple_corner, 0)
elif green_corner.is_valid_position(green_position):
new_position = self.test_other_corners(img_hsv, kinect, green_corner, math.pi / 2)
return new_position
def get_masked_hsv(self, kinect, attempt_no):
if attempt_no == 0:
img = kinect.grab_new_image(median_filter_activated=True)
else:
img = kinect.grab_new_image(bilateral_filter_activated=True)
img_hsv = VisionTools().get_hsv_image(img)
polyline = np.array([[0, 270], [640, 270], [640, 480], [0, 480]], np.int32)
stencil = FormStencil([polyline])
return stencil.apply(img_hsv)
def test_other_corners(self, img_hsv, kinect, found_corner, angle_modificator=0):
new_position = RobotPosition()
found_corner_x_position = kinect._get_centre_object(found_corner.apply_filters(img_hsv))[0]
if angle_modificator == 0:
maybe_first_corner_position = self.find_left_orange_corner(img_hsv, kinect, found_corner_x_position)
maybe_second_corner_position = self.find_right_orange_corner(img_hsv, kinect, found_corner_x_position)
else:
maybe_first_corner_position = self.find_right_orange_corner(img_hsv, kinect, found_corner_x_position)
maybe_second_corner_position = self.find_left_orange_corner(img_hsv, kinect, found_corner_x_position)
if self.position.is_valid_position(maybe_first_corner_position):# TODO
if angle_modificator == 0:
found_corner.find_position(img_hsv, kinect, self.PIXEL_SHIFT)
new_position.set_from_points(maybe_first_corner_position, found_corner.position, 0)
# print 'purple first'
else:
new_position.set_from_points(maybe_first_corner_position, found_corner.position, math.pi/2)
found_corner.find_position(img_hsv, kinect, -self.PIXEL_SHIFT)
# print 'green first'
elif self.position.is_valid_position(maybe_second_corner_position):# TODO
if angle_modificator == 0:
found_corner.find_position(img_hsv, kinect, -self.PIXEL_SHIFT)
new_position.set_from_points(maybe_second_corner_position, found_corner.position, math.pi*3/2)
# print 'purple second'
else:
found_corner.find_position(img_hsv, kinect, self.PIXEL_SHIFT)
new_position.set_from_points(maybe_second_corner_position, found_corner.position, math.pi)
# print 'green second'
return new_position
def find_left_orange_corner(self,img_hsv, kinect, x_limit):
polyline = np.array([[0, 0], [x_limit, 0], [x_limit, 480], [0, 480]], np.int32)
return self.find_orange_corner(img_hsv, kinect, polyline, True)
def find_right_orange_corner(self, img_hsv, kinect, x_limit):
polyline = np.array([[x_limit, 0], [640, 0], [640, 480], [x_limit, 480]], np.int32)
return self.find_orange_corner(img_hsv, kinect, polyline, False)
def find_orange_corner(self, img_hsv, kinect, polyline, is_left):
stencil = FormStencil([polyline])
img_hsv_mask = stencil.apply(img_hsv)
orange_corner = Cube('orange')
if is_left:
return orange_corner.find_position(img_hsv_mask, kinect, self.PIXEL_SHIFT)
return orange_corner.find_position(img_hsv_mask, kinect, -self.PIXEL_SHIFT)
def get_rgb_calibration(self, img_hsv, kinect, form_filter=True):
rgb_result = np.zeros((img_hsv.shape[0], img_hsv.shape[1], 3), np.uint8)
orange_cube = Cube('orange')
green_cube = Cube('forest_green')
purple_cube = Cube('purple')
if form_filter == False:
orange_cube.form_filter = FormFilter([0, 0, 1, 1])
green_cube.form_filter = FormFilter([0, 0, 1, 1])
purple_cube.form_filter = FormFilter([0, 0, 1, 1])
orange_filter = orange_cube.apply_filters(img_hsv)
green_filter = green_cube.apply_filters(img_hsv)
purple_filter = purple_cube.apply_filters(img_hsv)
for i in range(180, rgb_result.shape[0]-205):
for j in range(0, rgb_result.shape[1]):
rgb_result[i, j][1] += int(orange_filter[i, j] * 0.5)
rgb_result[i, j][2] += orange_filter[i, j]
rgb_result[i, j][0] += int(purple_filter[i, j] * 0.5)
rgb_result[i, j][2] += int(purple_filter[i, j] * 0.5)
rgb_result[i, j][1] += int(green_filter[i, j] * 0.25)
if kinect is not None:
rgb_result = FormStencil(TABLE_STENCIL.get(kinect.table)).apply(rgb_result)
return rgb_result
class Position():
NEGATIVE_POSITION_TOLERANCE_IN_MM = -100
NO_POSITION_TOLERANCE = 4
def __init__(self, x=None, y=None, angle=None):
self.position = (x, y)
self.angle = angle
def set_angle_from_points(self, point_1, point_2):
delta_x = point_1[1] - point_2[1]
delta_y = point_1[0] - point_2[0]
self.angle = math.atan2(delta_y, delta_x)
def set_position(self, x, y):
self.position = (x, y)
def get_angle_in_deg(self):
if self.angle is None:
return None
return self.angle * 180 / math.pi
def is_valid(self):
return self.is_valid_position(self.position)
def is_valid_position(self, position):
if position is None:
return False
if position[0] is None or position[1] is None:
return False
if position[0] > self.NEGATIVE_POSITION_TOLERANCE_IN_MM \
and position[1] > self.NEGATIVE_POSITION_TOLERANCE_IN_MM \
and position[0] < 1220 \
and position[1] < 2400:
return True
return False
def normalize_angle(self):
if self.angle < -math.pi:
self.angle += 2 * math.pi
elif self.angle > math.pi:
self.angle -= 2 * math.pi
def normalize_angle_degree(self):
if self.angle < -180:
self.angle += 360
elif self.angle > 180:
self.angle -= 360
class RobotPosition(Position):
ROBOT_DIMENSION = 220
ANGLE_TOLERANCE = math.radians(5)
DISTANCE_TOLERANCE = 50
def __init__(self, x=None, y=None, angle=None):
Position.__init__(self, x, y, angle)
def set_from_points(self, point_1, point_2, angle_modificator):
self.set_angle_from_points(point_1, point_2)
self.angle += angle_modificator
diagonal = math.sqrt(2) * self.ROBOT_DIMENSION / 2
if angle_modificator < math.pi:
x_value = point_1[0] + diagonal * math.cos(self.angle + math.pi/float(4))
y_value = point_1[1] - diagonal * math.sin(self.angle + math.pi/float(4))
else:
x_value = point_1[0] - diagonal * math.cos(self.angle + math.pi/float(4))
y_value = point_1[1] + diagonal * math.sin(self.angle + math.pi/float(4))
self.position = (x_value, y_value)
self.normalize_angle()
def update_with_pathfinding_tuple(self, pathfinding_tuple):
self.angle += pathfinding_tuple[0]
self.position = (self.position[0] + math.sin(math.radians(self.angle)) * pathfinding_tuple[1],
self.position[1] + math.cos(math.radians(self.angle)) * pathfinding_tuple[1])
self.normalize_angle_degree()
def update_with_movement_direction_and_distance(self, movement_direction, distance):
new_pos = (0, 0)
if movement_direction == 'forward':
new_pos = (self.position[0] + distance * math.sin(math.radians(self.angle)),
self.position[1] + distance * math.cos(math.radians(self.angle)))
elif movement_direction == 'reverse':
new_pos = (self.position[0] + distance * math.sin(math.radians(self.angle + 180)),
self.position[1] + distance * math.cos(math.radians(self.angle + 180)))
elif movement_direction == 'left':
new_pos = (self.position[0] + distance * math.sin(math.radians(self.angle + 90)),
self.position[1] + distance * math.cos(math.radians(self.angle + 90)))
elif movement_direction == 'right':
new_pos = (self.position[0] + distance * math.sin(math.radians(self.angle - 90)),
self.position[1] + distance * math.cos(math.radians(self.angle - 90)))
self.position = new_pos
def is_like(self, other_position):
if abs(self.angle-other_position.angle) < self.ANGLE_TOLERANCE and \
abs(self.position[0]-other_position.position[0]) < self.DISTANCE_TOLERANCE and \
abs(self.position[1]-other_position.position[1]) < self.DISTANCE_TOLERANCE:
return True
return False
```
|
{
"source": "JDowns412/IPv6-Tracer",
"score": 3
}
|
#### File: IPv6-Tracer/Scripts/associator.py
```python
import os, json, pprint, requests, re, urllib.request, argparse
from bs4 import BeautifulSoup
def reader(length, experiment):
with open("../Logs/Experiment %s.log" % str(experiment), 'a') as log:
goalLength = length
file = "top%sEven" % str(goalLength)
location = "../Data/" + file + ".json"
data = {}
# read in and organize the list of sites we want to experiment on
with open(location, "r") as f:
data = json.load(f)
sites = data["sites"]
print("\n______________________associator.py______________________\n")
print("read in %d sites from %s" %(len(sites), location))
log.write("\n______________________associator.py______________________\n")
log.write("read in %d sites from %s" %(len(sites), location))
return(sites)
# due to the volatile nature of website content, this should be run
# every time we wan to run an experiment
def obj_associator(sites, experiment):
with open("../Logs/Experiment %s.log" % str(experiment), 'a') as log:
data = {"exceptions" : {}, "zeros" : [], "valid": {}, "progress" : ["A"]}
for dom in range(len(sites)):
# added this line because the script was hanging on these sites
if (sites[dom] != "jabong.com" and sites[dom] != "bestbuy.com"):
try:
# print (dom)
# response = requests.get()
domain = "http://www." + sites[dom]
print("\nGetting %d/%d: %s" % (dom+1, len(sites), domain))
log.write("\nGetting %d/%d: %s" % (dom+1, len(sites), domain))
req = urllib.request.Request(domain, headers={'User-Agent': 'Mozilla/5.0'})
html_page = urllib.request.urlopen(req)
soup = BeautifulSoup(html_page, "lxml")
# pprint.pprint(soup.prettify())
objects = []
# iterate through all the types of website objects that
# we're willing to use for our future requests
types = ["ico", "jpg", "jpeg", "png", "gif", "avi", "doc", "mp4", "mp3", "mpg", "mpeg", "txt", "wav", "pdf", "tiff", "mov"]
tags = ["img", "meta", "link"]
for tag in tags:
for obj in soup.findAll(tag):
# print("FOUND")
if (tag == "img"):
o = obj.get('src')
if (o is not None and (o[-3:] in types or o[-4:] in types)):
# TODO: check if there is a domain name at the start of the link
objects.append(o)
elif (tag == "meta"):
o = obj.get('content')
if (o is not None and (o[-3:] in types or o[-4:] in types)):
# TODO: check if there is a domain name at the start of the link
objects.append(o)
elif (tag == "link"):
o = obj.get('href')
if (o is not None and (o[-3:] in types or o[-4:] in types)):
# TODO: check if there is a domain name at the start of the link
objects.append(o)
# print(o[-3:])
if (len(objects) == 0):
print("Couldn't find any objects for ", domain)
log.write("Couldn't find any objects for %s" % domain)
data["zeros"].append(sites[dom])
else:
print("Found %d objects for %s" % (len(objects), domain))
log.write("Found %d objects for %s" % (len(objects), domain))
data["valid"][sites[dom]] = {"objects" : objects}
except Exception as exception:
name = repr(exception).split('(')[0]
print("%s exception encountered while requesting %s" % (name, domain))
log.write("%s exception encountered while requesting %s" % (name, domain))
data["exceptions"][sites[dom]] = name
# with open("temp.txt", "wb") as w:
# w.write(soup.prettify().encode('utf8'))
return (data)
def dumper(data, goalLength, experiment):
with open("../Logs/Experiment %s.log" % str(experiment), 'a') as log:
fileName = ("results")
os.chdir('../Results/Associated')
for elem in data["progress"]:
fileName += "_" + elem
fileName += "[" + str(goalLength) + "]"
# if we're actually setting the experiment number, probably through runner.py
if (experiment != -1):
fileName += str(experiment)
fileName += ".json"
print("\ndumping results to ../Results/Associated/", fileName)
log.write("\ndumping results to ../Results/Associated/%s" % fileName)
with open(fileName, 'w') as fp:
json.dump(data, fp, indent=4)
def run():
goalLength = 100
experiment = -1
parser = argparse.ArgumentParser()
parser.add_argument(action="store", dest="goalLength", nargs="?")
parser.add_argument(action="store", dest="experiment", nargs="?")
args = parser.parse_args()
# apply the inputted arguments
if (args.goalLength):
goalLength = args.goalLength
if (args.experiment):
experiment = args.experiment
# read in the list of hostnames we want to analyze
sites = reader(goalLength, experiment)
# go to each of the hostnames and find an object that we can use
# for collecting results on
data = obj_associator(sites, experiment)
# dump our results out to a JSON to be analyzed later
dumper(data, goalLength, experiment)
if __name__ == "__main__":
run()
```
|
{
"source": "jdp7689/SimpleStego",
"score": 4
}
|
#### File: jdp7689/SimpleStego/simplePicStego.py
```python
import argparse
import simplePicStegoEmbed
import simplePicStegoError
import simplePicStegoReveal
class UnknownFunctionError(simplePicStegoError.Error):
"""
Raise error when unknown commands are given
"""
def __init__(self, message):
self.message = message;
version = "1.0"
def init_program():
parser = argparse.ArgumentParser(description="An app that embeds strings into images")
# parser.add_argument("--version", action="version", version="%(prog)s %s" % version)
parser.add_argument("-e", action="store", dest="encode_file", help="The file name to store the string",
default=False)
parser.add_argument("-m", action="store", dest="message", help="The message to store. Combine with -e",
default=None)
parser.add_argument("-d", action="store", dest="decode_file", help="The file to extract the message")
results = parser.parse_args()
if (results.encode_file and results.decode_file) or (not results.emcode_file and not results.decode_file):
raise UnknownFunctionError("Must either encode or decode a file")
elif results.encode_file: # create object to encode message into file and perform operation
if results.encode_file.split(".")[1] != "png":
raise simplePicStegoError.Error("Can only support png file right now")
simplePicStegoEmbed.PicEmbed(results.encode_file, results.message).embed_message()
elif results.decode_file: # create object to attempt to find a message within an image file
if results.decode_file.split(".")[1] != "png":
raise simplePicStegoError.Error("Can only support png file right now")
message = simplePicStegoReveal.SimplePicStegoReveal(results.decode_file).reveal()
print(message)
def main():
init_program()
if __name__ == '__main__':
main()
```
|
{
"source": "JD-P/accordius",
"score": 4
}
|
#### File: accordius/lw2/search.py
```python
import json
import re
from django.db.models import Q
"""Search Syntax Quick Guide
A search string is made up of one or more EXPRESSIONS which are joined by
OPERATORS.
- An expression is a string of non-whitespace unicode characters which may or
may not be enclosed by quotation marks.
- An expression enclosed in DOUBLE QUOTES is searched for exactly, otherwise
it's searched for as a substring and may have enhancement features such as
synonym matching applied.
- There are three operators: AND, OR, and NOT
- The AND operator is applied by separating two or more expressions with SPACES.
- The OR operator is applied by typing capital OR surrounded by whitespace
between two or more expressions.
- The NOT operator is applied by putting a dash (-) in front of an expression,
this will cause the search backend to use an exclude() instead of a filter()
for that exrpression.
The syntax also supports PARAMETERS, which are special search string keywords
that can be passed to specify certain behavior such as date restrictions. These
are structured as keyword:argument pairs that may appear as expressions in a
search string. All parameters are currently undefined, but in a later edition
will provide extended search functionality."""
def parse_search_string(search_s):
# Extract parameters
parameters = {}
param_re = re.compile("(\S+):(\S+)")
parameters_raw = param_re.findall(search_s)
for parameter in parameters_raw:
parameters[parameter[0]] = parameter[1]
search_s = param_re.sub('', search_s)
# Extract OR operations
or_op_re = re.compile("(\S+) OR (\S+)")
or_ops = or_op_re.findall(search_s)
search_s = or_op_re.sub('', search_s)
# Extract rest
and_ops = search_s.split()
return {"parameters":parameters,
"or_ops":or_ops,
"and_ops":and_ops}
def mk_search_filters(parsed_operations):
"""Create the search filters that implement the operations from the parsed
query string."""
filters = []
for and_op in parsed_operations["and_ops"]:
operation = mk_operation(and_op)
operation["type"] = "and"
filters.append(operation)
for or_op in parsed_operations["or_ops"]:
operation = {"type":"or",
"exclude":None,
"Q":None}
left_or = mk_operation(or_op[0])
right_or = mk_operation(or_op[1])
operation["Q"] = (left_or["Q"] | right_or["Q"])
filters.append(operation)
return filters
def mk_operation(op):
operation = {"exclude":False,
"Q":None}
if op[0] == "-":
operation["exclude"] = True
op = op[1:]
if op[0] == op[-1] == "\"":
operation["Q"] = Q(body__contains=op)
else:
operation["Q"] = Q(body__icontains=op)
return operation
```
|
{
"source": "jdpatt/bga_color_map",
"score": 4
}
|
#### File: part_map/pins/widget.py
```python
from PySide2 import QtGui, QtWidgets
class PinWidget(QtWidgets.QWidget):
"""The widget for all pins."""
def __init__(self, pin_item, parent=None):
super().__init__(parent)
self.pin_item = pin_item
self.color = QtGui.QColor(self.pin_item.pin["color"])
self.color_button = QtWidgets.QPushButton(self.pin_item.pin["color"])
self.name_edit = QtWidgets.QLineEdit()
self.name_edit.setText(self.pin_item.pin["name"])
layout = QtWidgets.QFormLayout()
layout.addRow("Name", self.name_edit)
layout.addRow("Color", self.color_button)
self.setLayout(layout)
# Connect Signal/Slots
self.color_button.clicked.connect(self.change_color)
self.name_edit.editingFinished.connect(self.change_name)
def change_color(self):
"""Update the color."""
color_picker = QtWidgets.QColorDialog(self.color)
self.color = color_picker.getColor()
self.color_button.setText(self.color.name())
self.pin_item.pin["color"] = self.color.name()
self.pin_item.update()
def change_name(self):
"""Update the Name"""
self.pin_item.pin["name"] = self.name_edit.text()
self.pin_item.update()
```
#### File: jdpatt/bga_color_map/setup.py
```python
from setuptools import find_packages, setup
def readme():
with open("README.md") as f:
return f.read()
setup(
name="part_map",
version="2.1.0",
description="Part Visualizer",
long_description=readme(),
url="https://github.com/jdpatt/part_map",
author="<NAME>",
license="MIT",
python_requires=">=3.6",
packages=find_packages(include=["part_map", "part_map.*"]),
include_package_data=True,
install_requires=["openpyxl", "Click", "natsort", "PySide2"],
entry_points={"console_scripts": ["part-map = part_map.cli:map"]},
zip_safe=False,
)
```
#### File: bga_color_map/tests/test_part_map.py
```python
from pathlib import Path
from part_map.part_map import PartObject
def test_load_from_excel():
obj = PartObject.from_excel(
Path(__file__).parent.parent.joinpath("examples", "artix7_example.xlsx")
)
assert obj.get_number_of_pins() == 238
def test_load_from_json():
obj = PartObject.from_json(
Path(__file__).parent.parent.joinpath("examples", "connector_example.json")
)
assert obj.get_number_of_pins() == 58
def test_columns():
obj = PartObject.from_json(
Path(__file__).parent.parent.joinpath("examples", "connector_example.json")
)
assert obj.columns == [str(x) for x in range(1, 17)]
def test_rows():
obj = PartObject.from_json(
Path(__file__).parent.parent.joinpath("examples", "connector_example.json")
)
assert obj.rows == ["A", "B", "C", "D"]
def test_add_pin():
obj = PartObject({}, "TestObject")
obj.add_pin("A1", "TEST", "0xFFFFFF")
assert list(obj.pins) == ["A1"]
```
|
{
"source": "jdpatt/cookiecutter-template",
"score": 2
}
|
#### File: {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}.py
```python
{%- if cookiecutter.type_of_project|lower != 'library' %}
from {{ cookiecutter.project_slug }} import logger
def main():
"""Main Entry point for {{cookiecutter.project_name}}"""
log = logger.setup_logger("{{ cookiecutter.project_slug }}", "{{ cookiecutter.project_slug }}.log")
{%- elif cookiecutter.type_of_project|lower == 'application w/ gui' %}
"""{{ cookiecutter.project_slug }}"""
import logging
from PySide6 import QtCore, QtWidgets
from {{ cookiecutter.project_slug }}.logger import QTLogHandler
from {{ cookiecutter.project_slug }}.view import console_colors
from {{ cookiecutter.project_slug }}.view.main_window import Ui_MainWindow
LOG = logging.getLogger("{{ cookiecutter.project_slug }}")
class Application(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setupUi(self)
# Override the qt designer outputs
self.setWindowTitle("{{ cookiecutter.project_slug }}")
self.statusbar.setVisible(False)
self.stacked_widget.setCurrentIndex(0)
self.connect_title_bar_buttons()
self.connect_page_buttons()
# Setup the logging infrastructure
qt_log_handler = QTLogHandler()
LOG.addHandler(qt_log_handler)
qt_log_handler.new_record.connect(self.log_message)
def connect_page_buttons(self) -> None:
"""Connect the buttons in the side bar to update which widget is shown."""
self.log_btn.clicked.connect(
lambda x: self.stacked_widget.setCurrentWidget(self.log_widget)
)
self.main_btn.clicked.connect(
lambda x: self.stacked_widget.setCurrentWidget(self.main_widget)
)
self.settings_btn.clicked.connect(
lambda x: self.stacked_widget.setCurrentWidget(self.settings_widget)
)
def connect_title_bar_buttons(self):
"""Connect the buttons in the title bar to actions."""
self.minimize_btn.clicked.connect(lambda x: self.showMinimized())
self.maximize_btn.clicked.connect(self.toggle_window_size)
self.exit_btn.clicked.connect(lambda x: QtWidgets.QApplication.quit())
def toggle_window_size(self) -> None:
"""Toggle between normal and maximized window size."""
if self.isMaximized():
self.showNormal()
else:
self.showMaximized()
def log_message(self, level, msg) -> None:
"""Log any builtin logger messages to the log widget."""
if level in console_colors.COLORS:
self.log_textedit.appendHtml(
f'<p style="color:{console_colors.COLORS[level]};">{msg}</p>'
)
else:
self.log_textedit.appendPlainText(msg)
self.log_textedit.ensureCursorVisible()
{%- endif %}
```
#### File: {{cookiecutter.project_slug}}/tests/test_smoketests.py
```python
import pytest
@pytest.fixture
def equipment():
# Open DUT connection
return True
@pytest.mark.smoketest
def test_equipment_init(equipment):
assert equipment
@pytest.mark.regression
def test_more_complicated_thing(equipment):
assert equipment
```
|
{
"source": "jdpatt/pindelays",
"score": 3
}
|
#### File: pindelays/pindelays/pindelays.py
```python
from pathlib import Path
from typing import Any, Dict
import click
from openpyxl import Workbook, load_workbook
def get_column(name: str, worksheet) -> int:
"""Search a row of cells for the string.
Args:
name: The text to search for
columns: The list or generator of columns in the excel sheet
Returns:
Either returns the column number or returns 0 if no column matched the name
"""
for rows in worksheet.iter_rows(min_row=1, max_row=1, min_col=1):
for column in rows:
if column.value == name:
return column.col_idx
return 0
def parse_excel_file(workbook: Workbook, pin_col=None, delay_col=None) -> Dict[str, Any]:
"""Read in excel and get the pin number and internal length
The excel file must have a header row with the cells "Pin Name" and "Delay". It does not
matter which column they are in.
Args:
excel_file: The excel file to open and read from
"""
sheet = workbook.active
delay_dict = dict()
try:
pin_col = pin_col or get_column("Pin Name", sheet)
delay_col = delay_col or get_column("Delay", sheet)
for excel_row in range(2, sheet.max_row + 1):
pin = str(sheet.cell(row=excel_row, column=pin_col).value)
delay = str(sheet.cell(row=excel_row, column=delay_col).value)
if not all([pin, delay]):
raise ValueError
delay_dict.update({pin: delay})
except (ValueError, KeyError, UnboundLocalError) as error:
print(error)
raise
return delay_dict
def generate_mentor(partnumber: str, unit: str, delays: Dict) -> None:
"""This function generates a text file that can be imported in the Constraint Manager tool.
Example:
UNITS <value> th
PART_NUMBER <part_number>
<pin_number> <value>
Args:
partnumber: The part number to apply these delays
delays: The data read in from the excel file
"""
if unit == "mil":
filename = "PinPkgLengths.txt"
unit = "th"
else:
filename = "PinPkgDelays.txt"
with open(filename, "w") as output:
output.write(f"UNITS {unit}\n")
output.write(f"PART_NUMBER {partnumber}\n")
for key, value in delays.items():
output.write(f"{key} {value}\n")
def generate_cadence(ref: str, package: str, unit: str, delays: Dict) -> None:
"""This function generates a text file that can be imported into Allergo if you are using the
high speed license. Allergo applies delays individual vs against all part numbers that match
like mentor. UNITS MIL can be a header row that applies to everything or you can list unit for
every row. This does the later.
Example:
[PIN DELAY]
[RefDes <refdes>]
[DEVICE <package name>]
[UNITS <mks units>]
<Pin number> <delay value> <...>
Args:
ref: The reference designator to apply the delays
package: The cadence source package
unit: The unit of the delays in either MIL or NS
delays: The data read in from the excel file
"""
with open(f"{package}.csv", "w") as output:
output.write("PIN DELAY\n")
output.write(f"REFDES\t{ref}\n")
output.write(f"DEVICE\t{package}\n")
output.write("\n")
for key, value in delays.items():
output.write(f"{key}\t{value}\t{unit.upper()}\n")
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.argument("excel_file", nargs=-1)
@click.argument("output_type", type=click.Choice(["cadence", "mentor"]), default="cadence")
@click.option(
"--partnumber",
"-p",
type=str,
default="dummy_part",
help="Part number [Only used in mentor]",
)
@click.option(
"--package",
"-d",
type=str,
default="dummy_package",
help="Device Package [Only used in cadence]",
)
@click.option("--refdes", "-r", default="U1", type=str, help="RefDes [Only used in cadence]")
@click.option(
"--units",
"-u",
type=click.Choice(["ns", "ps", "mil"]),
default="ns",
help="Units",
)
@click.version_option()
def pindelay(excel_file, output_type, partnumber, package, refdes, units):
"""For today's high speed designs, one must take into account the internal length or delay of a pin.
This python program takes an excel file and produces a pin delay file that is correctly formatted
for your EDA tool set."""
for file_to_parse in excel_file:
print(f"Reading in Excel File: {Path(file_to_parse)}")
part_delays = parse_excel_file(load_workbook(file_to_parse, data_only=True))
if output_type == "cadence":
generate_cadence(refdes, package, units, part_delays)
print("Cadence File Generated")
if output_type == "mentor":
generate_mentor(partnumber, units, part_delays)
print("Mentor File Generated")
if __name__ == "__main__":
pindelay()
```
|
{
"source": "jdpatt/project-diff-view",
"score": 2
}
|
#### File: project-diff-view/projectdiffview/cli.py
```python
import distutils.dir_util as dir_util
import sys
from pathlib import Path
import click
from PySide2.QtWidgets import QApplication
from projectdiffview import projectdiffview, prompts
@click.group(
invoke_without_command=True, context_settings=dict(help_option_names=["-h", "--help"])
)
@click.pass_context
@click.option("--verbose", "-vv", is_flag=True, help="Enable debug prints.")
@click.option("--debug", is_flag=True, help="Use the local debug directory.")
@click.version_option()
def cli(ctx, verbose, debug):
"""CLI for projectdiffview."""
click.echo("Opening projectdiffview GUI.")
ctx.obj = {"verbose": verbose, "debug": debug}
if ctx.invoked_subcommand is None:
app = QApplication([])
gui = projectdiffview.ProjectDiffView(verbose=ctx.obj["verbose"], debug=ctx.obj["debug"])
gui.show()
prompts.warn_user()
sys.exit(app.exec_())
@cli.command()
@click.pass_context
def clone(ctx):
"""Clone the full template folder into the current working directory."""
app = QApplication([])
gui = projectdiffview.ProjectDiffView(verbose=ctx.obj["verbose"], debug=ctx.obj["debug"])
cwd = str(Path().cwd())
click.echo(f"Cloning Template into: {cwd}")
dir_util.copy_tree(
str(gui.template_directory), str(cwd), preserve_symlinks=True, update=True,
)
gui.quit()
sys.exit(app.exec_())
@cli.command()
@click.pass_context
def cleanup(ctx):
"""Clone the full template folder into the current working directory."""
app = QApplication([])
gui = projectdiffview.ProjectDiffView(verbose=ctx.obj["verbose"], debug=ctx.obj["debug"])
gui.working_directory = Path().cwd()
click.echo(f"Cleaning {gui.working_directory}...")
gui.cleanup_working_folder()
gui.quit()
sys.exit(app.exec_())
```
#### File: project-diff-view/projectdiffview/colorable_model.py
```python
from pathlib import Path
from PySide2 import QtCore, QtGui, QtWidgets
class ColorableFileSystemModel(QtWidgets.QFileSystemModel):
"""Sub-class QFileSystemModel to allow colored items."""
def __init__(self, view):
super().__init__()
self.view = view
def data(self, index, role=QtCore.Qt.DisplayRole):
"""Return the data stored under the given role for the item referred to by the index."""
if role == QtCore.Qt.BackgroundRole:
file_path = Path(self.filePath(index))
if file_path in self.view.working_only:
return QtGui.QColor(self.view.config.color_new)
return super().data(index, role)
```
#### File: project-diff-view/projectdiffview/logger.py
```python
import logging
from PySide2.QtCore import QObject, Signal
def setup_logger(root_name, log_file_path="", is_verbose: bool = False):
"""Create the Handlers and set the default level to DEBUG."""
log = logging.getLogger(root_name)
# Setup a Console Logger
console_handler = logging.StreamHandler()
ch_format = logging.Formatter("%(message)s")
console_handler.setFormatter(ch_format)
console_handler.setLevel(logging.ERROR)
log.addHandler(console_handler)
# Setup a File Logger
file_handler = logging.FileHandler(log_file_path, mode="w", delay=True)
fh_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler.setFormatter(fh_format)
file_handler.setLevel(logging.DEBUG)
log.addHandler(file_handler)
if is_verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.info(f"Log file created at: {log_file_path}")
return log
class LogQObject(QObject):
"""Create a dummy object to get around the PySide multiple inheritance problem."""
new_record = Signal(str, str)
class ThreadLogHandler(logging.Handler):
"""Create a custom logging handler that appends each record to the TextEdit Widget."""
def __init__(self):
super().__init__()
self.log = LogQObject()
self.new_record = self.log.new_record
self.setFormatter(logging.Formatter("%(asctime)s - %(message)s"))
self.setLevel(logging.INFO)
def emit(self, record):
"""Append the record to the Widget. Color according to 'TEXT_COLOR'."""
msg = self.format(record)
level = record.levelname
self.new_record.emit(level, msg)
```
#### File: project-diff-view/projectdiffview/settings_widget.py
```python
from PySide2 import QtCore, QtWidgets
class SettingsDialog(QtWidgets.QDialog):
"""Settings Menu for Project Diff View."""
def __init__(self, configuration):
super(SettingsDialog, self).__init__()
self.setWindowTitle("Settings")
self.config = configuration
q_button_box = QtWidgets.QDialogButtonBox.Save | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(q_button_box)
self.buttonBox.accepted.connect(self.save_config)
self.buttonBox.rejected.connect(self.reject)
self.deleted = self.config.color_deleted
self.new = self.config.color_new
self.added = self.config.color_added
self.deleted_button = QtWidgets.QPushButton(self.deleted.name())
self.deleted_button.setStyleSheet("background-color: %s;" % self.deleted.name())
self.new_button = QtWidgets.QPushButton(self.new.name())
self.new_button.setStyleSheet("background-color: %s;" % self.new.name())
self.added_button = QtWidgets.QPushButton(self.added.name())
self.added_button.setStyleSheet("background-color: %s;" % self.added.name())
layout = QtWidgets.QFormLayout()
# layout.addRow("Template Directory", self.update_directory)
layout.addRow("Color Added", self.added_button)
layout.addRow("Color Deleted", self.deleted_button)
layout.addRow("Color New", self.new_button)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
# Connect Signal/Slots
self.deleted_button.clicked.connect(self.change_deleted_color)
self.new_button.clicked.connect(self.change_new_color)
self.added_button.clicked.connect(self.change_added_color)
def __str__(self):
return f"Widget for {self.graphic.__class__.__name__}"
# @QtCore.Slot()
# def update_directory(self):
# pass
@QtCore.Slot()
def change_deleted_color(self):
"""Update the btn label after selection and tell the gui to the update the background."""
color_picker = QtWidgets.QColorDialog(self.deleted)
new_color = color_picker.getColor()
if new_color.isValid():
self.deleted = new_color
self.deleted_button.setText(self.deleted.name())
self.deleted_button.setStyleSheet("background-color: %s;" % self.deleted.name())
@QtCore.Slot()
def change_new_color(self):
"""Update the btn label after selection and tell the gui to the update the background."""
color_picker = QtWidgets.QColorDialog(self.new)
new_color = color_picker.getColor()
if new_color.isValid():
self.new = new_color
self.new_button.setText(self.new.name())
self.new_button.setStyleSheet("background-color: %s;" % self.new.name())
@QtCore.Slot()
def change_added_color(self):
"""Update the btn label after selection and tell the gui to the update the background."""
color_picker = QtWidgets.QColorDialog(self.added)
new_color = color_picker.getColor()
if new_color.isValid():
self.added = new_color
self.added_button.setText(self.added.name())
self.added_button.setStyleSheet("background-color: %s;" % self.added.name())
@QtCore.Slot()
def save_config(self):
"""Update the configuration values and save it."""
self.config.color_deleted = self.deleted
self.config.color_new = self.new
self.config.color_added = self.added
self.config.save_config()
self.accept()
```
#### File: jdpatt/project-diff-view/setup.py
```python
from setuptools import setup
def readme():
"""Open up the readme and use the text for the long description."""
with open("README.md") as f:
return f.read()
setup(
name="projectdiffview",
version="1.0.0",
description="Utility to make managing project directories easier.",
long_description=readme(),
entry_points={
"console_scripts": [
"project-diff-cli=projectdiffview.cli:cli",
"project-diff=projectdiffview.__main__:main",
],
},
packages=["projectdiffview",],
package_dir={"projectdiffview": "projectdiffview"},
include_package_data=True,
install_requires=["Click", "PySide2"],
license="MIT",
zip_safe=False,
keywords="projectdiffview",
classifiers=["Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7",],
)
```
|
{
"source": "jdpatt/PyAMI",
"score": 2
}
|
#### File: pyibisami/ami/configurator.py
```python
import logging
from pathlib import Path
from traits.api import Bool, Enum, HasTraits, Range, Trait
from traitsui.api import Group, Item, View
from traitsui.menu import ModalButtons
from pyibisami.ami.parameter import AMIParameter
from pyibisami.ami.parser import parse_ami_file
class AMIParamConfigurator(HasTraits):
"""
Customizable IBIS-AMI model parameter configurator.
This class can be configured to present a customized GUI to the user
for configuring a particular IBIS-AMI model.
The intended use model is as follows:
1. Instantiate this class only once per IBIS-AMI model invocation.
When instantiating, provide the unprocessed contents of the AMI
file, as a single string. This class will take care of getting
that string parsed properly, and report any errors or warnings
it encounters, in its ``ami_parsing_errors`` property.
2. When you want to let the user change the AMI parameter
configuration, call the ``open_gui`` member function.
(Or, just call the instance as if it were executable.)
The instance will then present a GUI to the user,
allowing him to modify the values of any *In* or *InOut* parameters.
The resultant AMI parameter dictionary, suitable for passing
into the ``ami_params`` parameter of the ``AMIModelInitializer``
constructor, can be accessed, via the instance's
``input_ami_params`` property. The latest user selections will be
remembered, as long as the instance remains in scope.
The entire AMI parameter definition dictionary, which should *not* be
passed to the ``AMIModelInitializer`` constructor, is available in the
instance's ``ami_param_defs`` property.
Any errors or warnings encountered while parsing are available, in
the ``ami_parsing_errors`` property.
"""
def __init__(self, ami_filepath: Path):
"""
Args:
ami_filepath: The filepath to the .ami file.
"""
# Super-class initialization is ABSOLUTELY NECESSARY, in order
# to get all the Traits/UI machinery setup correctly.
super().__init__()
self._log = logging.getLogger("pyibisami")
# Parse the AMI file contents, storing any errors or warnings,
# and customize the view accordingly.
err_str, param_dict = parse_ami_file(ami_filepath)
if not param_dict:
self._log.error("Empty dictionary returned by parse_ami_file()!")
self._log.error("Error message:\n%s", err_str)
raise KeyError("Failed to parse AMI file; see console for more detail.")
top_branch = list(param_dict.items())[0]
param_dict = top_branch[1]
if "Reserved_Parameters" not in param_dict:
self._log.error("Error: %s\nParameters: %s", err_str, param_dict)
raise KeyError("Unable to get 'Reserved_Parameters' from the parameter set.")
if "Model_Specific" not in param_dict:
self._log.error("Error: %s\nParameters: %s", err_str, param_dict)
raise KeyError("Unable to get 'Model_Specific' from the parameter set.")
pdict = param_dict["Reserved_Parameters"]
pdict.update(param_dict["Model_Specific"])
gui_items, new_traits = make_gui_items(
# "Model Specific In/InOut Parameters", param_dict["Model_Specific"], first_call=True
"Model In/InOut Parameters",
pdict,
first_call=True,
)
trait_names = []
for trait in new_traits:
self.add_trait(trait[0], trait[1])
trait_names.append(trait[0])
self._content = gui_items
self._param_trait_names = trait_names
self._root_name = top_branch[0]
self._ami_parsing_errors = err_str
self._content = gui_items
self._param_dict = param_dict
def __call__(self):
self.open_gui()
def open_gui(self):
"""Present a customized GUI to the user, for parameter customization."""
self.edit_traits()
def default_traits_view(self):
view = View(
resizable=False,
buttons=ModalButtons,
title="AMI Parameter Configurator",
id="pyibisami.ami.param_config",
)
view.set_content(self._content)
return view
def fetch_param_val(self, branch_names):
"""Returns the value of the parameter found by traversing 'branch_names'
or None if not found.
Note: 'branch_names' should *not* begin with 'root_name'.
"""
param_dict = self.ami_param_defs
while branch_names:
branch_name = branch_names.pop(0)
if branch_name in param_dict:
param_dict = param_dict[branch_name]
else:
return None
if isinstance(param_dict, AMIParameter):
return param_dict.pvalue
return None
def set_param_val(self, branch_names, new_val):
"""Sets the value of the parameter found by traversing 'branch_names'
or raises an exception if not found.
Note: 'branch_names' should *not* begin with 'root_name'.
Note: Be careful! There is no checking done here!
"""
param_dict = self.ami_param_defs
while branch_names:
branch_name = branch_names.pop(0)
if branch_name in param_dict:
param_dict = param_dict[branch_name]
else:
raise ValueError(
f"Failed parameter tree search looking for: {branch_name}; available keys: {param_dict.keys()}"
)
if isinstance(param_dict, AMIParameter):
param_dict.pvalue = new_val
try:
eval(f"self.set({branch_name}_={new_val})") # mapped trait; see below
except Exception:
eval(f"self.set({branch_name}={new_val})") # pylint: disable=eval-used
else:
raise TypeError(f"{param_dict} is not of type: AMIParameter!")
@property
def ami_parsing_errors(self):
"""Any errors or warnings encountered, while parsing the AMI parameter definition file contents."""
return self._ami_parsing_errors
@property
def ami_param_defs(self):
"""The entire AMI parameter definition dictionary.
Should *not* be passed to ``AMIModelInitializer`` constructor!
"""
return self._param_dict
@property
def input_ami_params(self):
"""The dictionary of *Model Specific* AMI parameters of type
'In' or 'InOut', along with their user selected values.
Should be passed to ``AMIModelInitializer`` constructor.
"""
res = {}
res["root_name"] = self._root_name
params = self.ami_param_defs["Model_Specific"]
for pname in params:
res.update(self.input_ami_param(params, pname))
return res
def input_ami_param(self, params, pname):
"""Retrieve one AMI parameter, or dictionary of subparameters."""
res = {}
param = params[pname]
if isinstance(param, AMIParameter):
if pname in self._param_trait_names: # If model specific and In or InOut...
# See the docs on the *HasTraits* class, if this is confusing.
try: # Querry for a mapped trait, first, by trying to get '<trait_name>_'. (Note the underscore.)
res[pname] = self.get(pname + "_")[pname + "_"]
except Exception: # We have an ordinary (i.e. - not mapped) trait.
res[pname] = self.get(pname)[pname]
elif isinstance(param, dict): # We received a dictionary of subparameters, in 'param'.
subs = {}
for sname in param.keys():
subs.update(self.input_ami_param(param, sname))
res[pname] = subs
return res
def make_gui_items(pname, param, first_call=False):
"""Builds list of GUI items from AMI parameter dictionary."""
gui_items = []
new_traits = []
if isinstance(param, AMIParameter):
pusage = param.pusage
if pusage in ("In", "InOut"):
if param.ptype == "Boolean":
new_traits.append((pname, Bool(param.pvalue)))
gui_items.append(Item(pname, tooltip=param.pdescription))
else:
pformat = param.pformat
if pformat == "Range":
new_traits.append((pname, Range(param.pmin, param.pmax, param.pvalue)))
gui_items.append(Item(pname, tooltip=param.pdescription))
elif pformat == "List":
list_tips = param.plist_tip
default = param.pdefault
if list_tips:
tmp_dict = {}
tmp_dict.update(list(zip(list_tips, param.pvalue)))
val = list(tmp_dict.keys())[0]
if default:
for tip, pvalue in tmp_dict.items():
if pvalue == default:
val = tip
break
new_traits.append((pname, Trait(val, tmp_dict)))
else:
val = param.pvalue[0]
if default:
val = default
new_traits.append((pname, Enum([val] + param.pvalue)))
gui_items.append(Item(pname, tooltip=param.pdescription))
else: # Value
new_traits.append((pname, param.pvalue))
gui_items.append(Item(pname, style="readonly", tooltip=param.pdescription))
else: # subparameter branch
subparam_names = list(param.keys())
subparam_names.sort()
sub_items = []
group_desc = ""
# Build GUI items for this branch.
for subparam_name in subparam_names:
if subparam_name == "description":
group_desc = param[subparam_name]
else:
tmp_items, tmp_traits = make_gui_items(subparam_name, param[subparam_name])
sub_items.extend(tmp_items)
new_traits.extend(tmp_traits)
# Put all top-level non-grouped parameters in a single VGroup.
top_lvl_params = []
sub_params = []
for item in sub_items:
if isinstance(item, Item):
top_lvl_params.append(item)
else:
sub_params.append(item)
sub_items = [Group(top_lvl_params)] + sub_params
# Make the top-level group an HGroup; all others VGroups (default).
if first_call:
gui_items.append(
Group([Item(label=group_desc)] + sub_items, label=pname, show_border=True, orientation="horizontal")
)
else:
gui_items.append(Group([Item(label=group_desc)] + sub_items, label=pname, show_border=True))
return gui_items, new_traits
```
#### File: pyibisami/ami/parser.py
```python
import logging
import re
from pathlib import Path
from parsec import ParseError, generate, many, many1, regex, string
from pyibisami.ami.parameter import AMIParamError, AMIParameter
# ignore cases.
whitespace = regex(r"\s+", re.MULTILINE)
comment = regex(r"\|.*")
ignore = many((whitespace | comment))
def lexeme(p):
"""Lexer for words."""
return p << ignore # skip all ignored characters.
def int2tap(x):
"""Convert integer to tap position."""
if x[0] == "-":
res = "pre" + x[1:]
else:
res = "post" + x
return res
lparen = lexeme(string("("))
rparen = lexeme(string(")"))
number = lexeme(regex(r"[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?"))
integ = lexeme(regex(r"[-+]?[0-9]+"))
nat = lexeme(regex(r"[0-9]+"))
tap_ix = integ.parsecmap(int2tap)
symbol = lexeme(regex(r"[a-zA-Z_][^\s()]*"))
true = lexeme(string("True")).result(True)
false = lexeme(string("False")).result(False)
ami_string = lexeme(regex(r'"[^"]*"'))
atom = number | symbol | ami_string | (true | false)
node_name = symbol | tap_ix # `tap_ix` is new and gives the tap position; negative positions are allowed.
@generate("AMI node")
def node():
"Parse AMI node."
yield lparen
label = yield node_name
values = yield many1(expr)
yield rparen
return (label, values)
expr = atom | node
ami_defs = ignore >> node
def proc_branch(branch):
"""
Process a branch in a AMI parameter definition tree.
That is, build a dictionary from a pair containing:
- a parameter name, and
- a list of either:
- parameter definition tags, or
- subparameters.
We distinguish between the two possible kinds of payloads, by
peaking at the names of the first two items in the list and noting
whether they are keys of 'AMIParameter._param_def_tag_procs'.
We have to do this twice, due to the dual use of the 'Description'
tag and the fact that we have no guarantee of any particular
ordering of subparameter branch items.
Args:
p (str, list): A pair, as described above.
Returns:
(str, dict): A pair containing:
err_str:
String containing any errors or warnings encountered,
while building the parameter dictionary.
param_dict:
Resultant parameter dictionary.
"""
results = ("", {}) # Empty Results
if len(branch) != 2:
if not branch:
err_str = "ERROR: Empty branch provided to proc_branch()!\n"
else:
err_str = f"ERROR: Malformed item: {branch[0]}\n"
results = (err_str, {})
param_name = branch[0]
param_tags = branch[1]
if not param_tags:
err_str = f"ERROR: No tags/subparameters provided for parameter, '{param_name}'\n"
results = (err_str, {})
try:
if (
(len(param_tags) > 1)
and (param_tags[0][0] in AMIParameter._param_def_tag_procs)
and (param_tags[1][0] in AMIParameter._param_def_tag_procs)
):
try:
results = ("", {param_name: AMIParameter(param_name, param_tags)})
except AMIParamError as err:
results = (str(err), {})
elif param_name == "Description":
results = ("", {"description": param_tags[0].strip('"')})
else:
err_str = ""
param_dict = {}
param_dict[param_name] = {}
for param_tag in param_tags:
temp_str, temp_dict = proc_branch(param_tag)
param_dict[param_name].update(temp_dict)
if temp_str:
err_str = (
f"Error returned by recursive call, while processing parameter, '{param_name}':\n{temp_str}"
)
results = (err_str, param_dict)
results = (err_str, param_dict)
except Exception:
log = logging.getLogger("pyibisami")
log.error("Error processing branch:\n%s", param_tags)
return results
def parse_ami_file(ami_parameter_file: Path):
"""
Parse the contents of a IBIS-AMI parameter definition file.
Args:
ami_parameter_file: The filepath to an ami parameter file.
Example:
::
(err_str, param_dict) = parse_ami_file(ami_parameter_file)
Returns:
(str, dict): A pair containing:
err_str:
- None, if parser succeeds.
- Helpful message, if it fails.
param_dict:
Dictionary containing parameter definitions.
(Empty, on failure.)
It has a single key, at the top level, which is the
model root name. This key indexes the actual
parameter dictionary, which has the following
structure::
{
'description' : <optional model description string>
'Reserved_Parameters' : <dictionary of reserved parameter defintions>
'Model_Specific' : <dictionary of model specific parameter definitions>
}
The keys of the 'Reserved_Parameters' dictionary are
limited to those called out in the IBIS-AMI
specification.
The keys of the 'Model_Specific' dictionary can be
anything.
The values of both are either:
- instances of class *AMIParameter*, or
- sub-dictionaries following the same pattern.
"""
try:
with open(ami_parameter_file, encoding="UTF-8") as in_file:
res = ami_defs.parse(in_file.read())
except ParseError as pe:
err_str = f"Expected {pe.expected} at {pe.loc()} in:\n{pe.text[pe.index:]}"
return err_str, {}
err_str, param_dict = proc_branch(res)
if err_str:
return (err_str, {"res": res, "dict": param_dict})
reserved_found = False
init_returns_impulse_found = False
getwave_exists_found = False
model_spec_found = False
params = list(param_dict.items())[0][1]
for label in list(params.keys()):
if label == "Reserved_Parameters":
reserved_found = True
tmp_params = params[label]
for param_name in list(tmp_params.keys()):
if param_name not in AMIParameter.RESERVED_PARAM_NAMES:
err_str += f"WARNING: Unrecognized reserved parameter name, '{param_name}', found in parameter definition string!\n"
continue
param = tmp_params[param_name]
if param.pname == "AMI_Version":
if param.pusage != "Info" or param.ptype != "String":
err_str += "WARNING: Malformed 'AMI_Version' parameter.\n"
elif param.pname == "Init_Returns_Impulse":
init_returns_impulse_found = True
elif param.pname == "GetWave_Exists":
getwave_exists_found = True
elif label == "Model_Specific":
model_spec_found = True
elif label == "description":
pass
else:
err_str += f"WARNING: Unrecognized group with label, '{label}', found in parameter definition string!\n"
if not reserved_found:
err_str += "ERROR: Reserved parameters section not found! It is required."
if not init_returns_impulse_found:
err_str += "ERROR: Reserved parameter, 'Init_Returns_Impulse', not found! It is required."
if not getwave_exists_found:
err_str += "ERROR: Reserved parameter, 'GetWave_Exists', not found! It is required."
if not model_spec_found:
err_str += "WARNING: Model specific parameters section not found!"
return (err_str, param_dict)
```
#### File: pyibisami/ibis/parser.py
```python
import logging
import re
from pathlib import Path
from parsec import (
ParseError,
count,
eof,
generate,
letter,
many,
many1,
none_of,
one_of,
optional,
regex,
separated,
sepBy1,
string,
times,
)
from pyibisami.ibis.model import Component, Model
DEBUG = False
log = logging.getLogger("pyibisami")
# Parser Definitions
# TODO: Consider shifting to an exclusively line-oriented parsing strategy.
whitespace = regex(r"\s+", re.MULTILINE)
# whitespace = regex(r"\s+")
comment = regex(r"\|.*")
ignore = many(whitespace | comment)
def lexeme(p):
"""Lexer for words."""
return p << ignore # Skip all ignored characters after word, including newlines.
def word(p):
"""Line limited word lexer."""
return p << regex(r"\s*") # Only skip space after words; don't skip comments or newlines.
@generate("remainder of line")
def rest_line():
"Parse remainder of line."
chars = yield many(none_of("\n\r")) << ignore # So that we still function as a lexeme.
return "".join(chars)
skip_line = lexeme(rest_line).result("(Skipped.)")
name_only = regex(r"[_a-zA-Z0-9/\.()#-]+")
name = word(name_only)
symbol = lexeme(regex(r"[a-zA-Z_][^\s()\[\]]*"))
true = lexeme(string("True")).result(True)
false = lexeme(string("False")).result(False)
quoted_string = lexeme(regex(r'"[^"]*"'))
fail = one_of("")
skip_keyword = (skip_line >> many(none_of("[") >> skip_line)).result(
"(Skipped.)"
) # Skip over everything until the next keyword begins.
IBIS_num_suf = {
"T": "e12",
"k": "e3",
"n": "e-9",
"G": "e9",
"m": "e-3",
"p": "e-12",
"M": "e6",
"u": "e-6",
"f": "e-15",
}
@generate("number")
def number():
"Parse an IBIS numerical value."
s = yield word(regex(r"[-+]?[0-9]*\.?[0-9]+(([eE][-+]?[0-9]+)|([TknGmpMuf][a-zA-Z]*))?") << many(letter()))
m = re.search(r"[^\d]+$", s)
if m:
ix = m.start()
c = s[ix]
if c in IBIS_num_suf:
res = float(s[:ix] + IBIS_num_suf[c])
else:
raise ParseError("IBIS numerical suffix", s[ix:], ix)
else:
res = float(s)
return res
na = word(string("NA") | string("na")).result(None)
@generate("typminmax")
def typminmax():
"Parse Typ/Min/Max values."
typ = yield number
log.debug("Typ.: %d", typ)
minmax = yield optional(count(number, 2) | count(na, 2).result([]), [])
log.debug("Min./Max.: %s", minmax)
yield ignore # So that ``typminmax`` behaves as a lexeme.
res = [typ]
res.extend(minmax)
return res
vi_line = (number + typminmax) << ignore
@generate("ratio")
def ratio():
[num, den] = yield (separated(number, string("/"), 2, maxt=2, end=False) | na.result([0, 0]))
if den:
return num / den
return None
ramp_line = string("dV/dt_") >> ((string("r").result("rising") | string("f").result("falling")) << ignore) + times(
ratio, 1, 3
)
ex_line = (
word(string("Executable"))
>> (
(
((string("L") | string("l")) >> string("inux")).result("linux")
| ((string("W") | string("w")) >> string("indows")).result("windows")
)
<< string("_")
<< many(none_of("_"))
<< string("_")
)
+ lexeme(string("32") | string("64"))
+ count(name, 2)
<< ignore
)
def manyTrue(p):
"Run a parser multiple times, filtering ``False`` results."
@generate("manyTrue")
def fn():
"many(p) >> filter(True)"
nodes = yield many(p)
res = list(filter(None, nodes))
return res
return fn
def many1True(p):
"Run a parser at least once, filtering ``False`` results."
@generate("many1True")
def fn():
"many1(p) >> filter(True)"
nodes = yield many1(p)
res = list(filter(None, nodes))
return res
return fn
# IBIS file parser:
def keyword(kywrd=""):
"""Parse an IBIS keyword.
Keyword Args:
kywrd (str): The particular keyword to match; null for any keyword.
If provided, *must* be in canonicalized form (i.e. - underscores,
no spaces)!
Returns:
Parser: A keyword parser.
"""
@generate("IBIS keyword")
def fn():
"Parse IBIS keyword."
yield regex(r"^\[", re.MULTILINE)
wordlets = yield sepBy1(name_only, one_of(" _")) # ``name`` gobbles up trailing space, which we don't want.
yield string("]")
yield ignore # So that ``keyword`` functions as a lexeme.
res = "_".join(wordlets) # Canonicalize to: "<wordlet1>_<wordlet2>_...".
if kywrd:
# assert res.lower() == kywrd.lower(), f"Expecting: {kywrd}; got: {res}." # Does not work!
if res.lower() == kywrd.lower():
return res
return fail.desc(f"Expecting: {kywrd}; got: {res}.")
return res
return fn
@generate("IBIS parameter")
def param():
"Parse IBIS parameter."
pname = yield regex(r"^[a-zA-Z]\w*", re.MULTILINE) # Parameters must begin with a letter in column 1.
log.debug(pname)
res = yield (regex(r"\s*") >> ((word(string("=")) >> number) | typminmax | name | rest_line))
yield ignore # So that ``param`` functions as a lexeme.
return (pname.lower(), res)
def node(valid_keywords, stop_keywords):
"""Build a node-specific parser.
Args:
valid_keywords (dict): A dictionary with keys matching those
keywords we want parsed. The values are the parsers for
those keywords.
stop_keywords: Any iterable with primary values (i.e. - those
tested by the ``in`` function) matching those keywords we want
to stop the parsing of this node and pop us back up the
parsing stack.
Returns:
Parser: A parser for this node.
Notes:
1: Any keywords encountered that are _not_ found (via ``in``) in
either ``valid_keywords`` or ``stop_keywords`` are ignored.
"""
@generate("kywrd")
def kywrd():
"Parse keyword syntax."
nm = yield keyword()
nmL = nm.lower()
log.debug(nmL)
if nmL in valid_keywords:
if nmL == "end": # Because ``ibis_file_parser`` expects this to be the last thing it sees,
return fail # we can't consume it here.
res = yield valid_keywords[nmL] # Parse the sub-keyword.
elif nmL in stop_keywords:
return fail # Stop parsing.
else:
res = yield skip_keyword
yield ignore # So that ``kywrd`` behaves as a lexeme.
log.debug("%s:%s", nmL, res)
return (nmL, res)
return kywrd | param
# Individual IBIS keyword (i.e. - "node") parsers:
# [End]
@generate("[End]")
def end():
"Parse [End]."
yield keyword("End")
return eof
# [Model]
@generate("[Ramp]")
def ramp():
"Parse [Ramp]."
lines = yield count(ramp_line, 2)
return dict(lines)
Model_keywords = {
"pulldown": many1(vi_line),
"pullup": many1(vi_line),
"ramp": ramp,
"algorithmic_model": many1(ex_line) << keyword("end_algorithmic_model"),
"voltage_range": typminmax,
"temperature_range": typminmax,
"gnd_clamp": many1(vi_line),
"power_clamp": many1(vi_line),
}
@generate("[Model]")
def model():
"Parse [Model]."
nm = yield name
log.debug(" %s", nm)
res = yield many1(node(Model_keywords, IBIS_KEYWORDS))
return {nm: Model(dict(res))}
# [Component]
rlc = lexeme(string("R_pin") | string("L_pin") | string("C_pin"))
@generate("[Package]")
def package():
"Parse package RLC values."
rlcs = yield many1(param)
log.debug("rlcs: %s", rlcs)
return dict(rlcs)
def pin(rlcs):
"Parse individual component pin."
@generate("Component Pin")
def fn():
"Parse an individual component pin."
[nm, sig] = yield count(name, 2)
mod = yield name_only
rem_line = yield rest_line
rlc_vals = optional(count(number, 3), []).parse(rem_line)
rlc_dict = {}
if rlcs:
rlc_dict.update(dict(zip(rlcs, rlc_vals)))
return ((nm + "(" + sig + ")"), (mod, rlc_dict))
return fn
@generate("[Component].[Pin]")
def pins():
"Parse [Component].[Pin]."
def filt(x):
(_, (mod, _)) = x
m = mod.upper()
return not m in ("POWER", "GND", "NC")
yield (lexeme(string("signal_name")) << lexeme(string("model_name")))
rlcs = yield optional(count(rlc, 3), [])
prs = yield many1(pin(rlcs))
prs_filt = list(filter(filt, prs))
return dict(prs_filt)
Component_keywords = {
"manufacturer": rest_line,
"package": package,
"pin": pins,
"diff_pin": skip_keyword,
}
@generate("[Component]")
def comp():
"Parse [Component]."
nm = yield lexeme(name)
res = yield many1(node(Component_keywords, IBIS_KEYWORDS))
return {nm: Component(dict(res))}
# [Model Selector]
@generate("[Model Selector]")
def modsel():
"Parse [Model Selector]."
nm = yield name
res = yield many1(name + rest_line)
return {nm: res}
# Note: The following list MUST have a complete set of keys,
# in order for the parsing logic to work correctly!
IBIS_KEYWORDS = [
"model",
"end",
"ibis_ver",
"comment_char",
"file_name",
"file_rev",
"date",
"source",
"notes",
"disclaimer",
"copyright",
"component",
"model_selector",
"submodel",
"external_circuit",
"test_data",
"test_load",
"define_package_model",
"interconnect_model_set",
]
IBIS_kywrd_parsers = dict(zip(IBIS_KEYWORDS, [skip_keyword] * len(IBIS_KEYWORDS)))
IBIS_kywrd_parsers.update(
{
"model": model,
"end": end,
"ibis_ver": lexeme(number),
"file_name": lexeme(name),
"file_rev": lexeme(name),
"date": rest_line,
"component": comp,
"model_selector": modsel,
}
)
@generate("IBIS File")
def ibis_file_parser():
res = yield ignore >> many1True(node(IBIS_kywrd_parsers, {})) << end
return res
def parse_ibis_file(ibis_file: Path):
"""
Parse the contents of an IBIS file.
Args:
ibis_file: The filepath to the .ibs file.
KeywordArgs:
debug (bool): Output debugging info to console when true.
Default = False
Example:
::
(err_str, model_dict) = parse_ibis_file(ibis_file)
Returns:
(str, dict): A pair containing:
err_str:
A message describing the nature of any parse failure that occurred.
model_dict:
Dictionary containing keyword definitions (empty upon failure).
"""
try:
with open(ibis_file, encoding="UTF-8") as in_file:
nodes = ibis_file_parser.parse(in_file.read())
log.debug("Parsed nodes:\n%s", nodes)
except ParseError as pe:
err_str = f"Expected {pe.expected} at {pe.loc()} in {pe.text[pe.index]}"
return err_str, {}
kw_dict = {}
components = {}
models = {}
model_selectors = {}
for (kw, val) in nodes:
if kw == "model":
models.update(val)
elif kw == "component":
components.update(val)
elif kw == "model_selector":
model_selectors.update(val)
else:
kw_dict.update({kw: val})
kw_dict.update(
{
"components": components,
"models": models,
"model_selectors": model_selectors,
}
)
return "Success!", kw_dict
```
#### File: tests/ami/test_config.py
```python
import shutil
from pathlib import Path
from unittest.mock import patch
from pyibisami.tools import ami_generator
@patch.object(ami_generator, "date", autospec=True)
def test_ami_generator(mock_date, tmp_path):
"""Use example_tx.py and supporting cpp.em found under test/examples to generate a model."""
mock_date.today.return_value = "2019-02-10" # Mock the object data so that we can test it.
# Copy the examples to a temproray path.
FILES = ["example_tx.py", "example_tx.cpp.em"]
examples_folder = Path(__file__).parents[1].joinpath("examples")
for file in FILES:
shutil.copy(examples_folder.joinpath(file), tmp_path.joinpath(file))
ami_generator.ami_generator(py_file=tmp_path.joinpath("example_tx.py"))
with open(tmp_path.joinpath("example_tx.ami"), encoding="UTF-8") as ami_file:
ami = ami_file.read()
assert (
ami
== r"""(example_tx
(Description "Example Tx model from ibisami package.")
(Reserved_Parameters
(AMI_Version
(Usage Info )
(Type String )
(Value "5.1" )
(Description "Version of IBIS standard we comply with." )
)
(Init_Returns_Impulse
(Usage Info )
(Type Boolean )
(Value True )
(Description "In fact, this model is, currently, Init-only." )
)
(GetWave_Exists
(Usage Info )
(Type Boolean )
(Value True )
(Description "This model is dual-mode, with GetWave() mimicking Init()." )
)
)
(Model_Specific
(tx_tap_units
(Usage In )
(Type Integer )
(Range 27 6 27 )
(Description "Total current available to FIR filter." )
)
(tx_tap_np1
(Usage In )
(Type Integer )
(Range 0 0 10 )
(Description "First (and only) pre-tap." )
)
(tx_tap_nm1
(Usage In )
(Type Integer )
(Range 0 0 10 )
(Description "First post-tap." )
)
(tx_tap_nm2
(Usage In )
(Type Integer )
(Range 0 0 10 )
(Description "Second post-tap." )
)
)
)
"""
)
with open(tmp_path.joinpath("example_tx.ibs"), encoding="UTF-8") as ibis_file:
ibis = ibis_file.read()
assert (
r"""[IBIS Ver] 5.1
[File Name] example_tx.ibs
[File Rev] v0.1
[Date] 2019-02-10
[Source] ibisami, a public domain IBIS-AMI model creation infrastructure
[Disclaimer]
THIS MODEL IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS MODEL, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[Notes]
This IBIS file was generated, using the template file,
"example_tx.ibs.em", from ibisami, a public domain IBIS-AMI model
creation infrastructure. To learn more, visit:
https://github.com/capn-freako/ibisami/wiki
[Copyright] Copyright (c) 2016 <NAME>; all rights reserved World wide.
[Component] Example_Tx
[Manufacturer] (n/a)
[Package]
R_pkg 0.10 0.00 0.50
L_pkg 10.00n 0.10n 50.00n
C_pkg 1.00p 0.01p 5.00p
[Pin] signal_name model_name R_pin L_pin C_pin
1p Tx_1_P example_tx
1n Tx_1_N example_tx
2p Tx_2_P example_tx
2n Tx_2_N example_tx
3p Tx_3_P example_tx
3n Tx_3_N example_tx
[Diff_Pin] inv_pin vdiff tdelay_typ tdelay_min tdelay_max
1p 1n 0.1V NA NA NA
2p 2n 0.1V NA NA NA
3p 3n 0.1V NA NA NA
[Model] example_tx
Model_type Output
C_comp 1.00p 0.01p 5.00p
Cref = 0
Vref = 0.5
Vmeas = 0.5
Rref = 50
[Algorithmic Model]
Executable linux_gcc4.1.2_32 example_tx_x86.so example_tx.ami
Executable linux_gcc4.1.2_64 example_tx_x86_amd64.so example_tx.ami
Executable Windows_VisualStudio_32 example_tx_x86.dll example_tx.ami
Executable Windows_VisualStudio_64 example_tx_x86_amd64.dll example_tx.ami
[End Algorithmic Model]
[Temperature_Range] 25.0 0.0 100.0
[Voltage_Range] 1.80 1.62 1.98
"""
in ibis
)
with open(tmp_path.joinpath("example_tx.cpp"), encoding="UTF-8") as cpp_file:
cpp = cpp_file.read()
assert (
cpp
== r"""/** \file example_tx.cpp
* \brief Example of using ibisami to build a Tx model.
*
* Original author: <NAME> <br>
* Original date: May 8, 2015
* Initial conversion to EmPy template format: Feb 25, 2016
*
* Copyright (c) 2015 <NAME>; all rights reserved World wide.
*/
#define TAP_SCALE 0.0407
#include <string>
#include <vector>
#include "include/ami_tx.h"
/// An example device specific Tx model implementation.
class MyTx : public AmiTx {
typedef AmiTx inherited;
public:
MyTx() {}
~MyTx() {}
void init(double *impulse_matrix, const long number_of_rows,
const long aggressors, const double sample_interval,
const double bit_time, const std::string& AMI_parameters_in) override {
// Let our base class do its thing.
inherited::init(impulse_matrix, number_of_rows, aggressors,
sample_interval, bit_time, AMI_parameters_in);
// Grab our parameters and configure things accordingly.
std::vector<std::string> node_names; node_names.clear();
std::ostringstream msg;
msg << "Initializing Tx...\n";
int taps[4];
int tx_tap_units ;
node_names.push_back("tx_tap_units");
tx_tap_units = get_param_int(node_names, 27 );
node_names.pop_back();
int tx_tap_np1 ;
node_names.push_back("tx_tap_np1");
tx_tap_np1 = get_param_int(node_names, 0 );
taps[0] = tx_tap_np1;
node_names.pop_back();
int tx_tap_nm1 ;
node_names.push_back("tx_tap_nm1");
tx_tap_nm1 = get_param_int(node_names, 0 );
taps[2] = tx_tap_nm1;
node_names.pop_back();
int tx_tap_nm2 ;
node_names.push_back("tx_tap_nm2");
tx_tap_nm2 = get_param_int(node_names, 0 );
taps[3] = tx_tap_nm2;
node_names.pop_back();
taps[1] = tx_tap_units - (taps[0] + taps[2] + taps[3]);
if ( (tx_tap_units - 2 * (taps[0] + taps[2] + taps[3])) < 6 )
msg << "WARNING: Illegal Tx pre-emphasis tap configuration!\n";
// Fill in params_.
std::ostringstream params;
params << "(example_tx";
params << " (tx_tap_units " << tx_tap_units << ")";
params << " (taps[0] " << taps[0] << ")";
params << " (taps[1] " << taps[1] << ")";
params << " (taps[2] " << taps[2] << ")";
params << " (taps[3] " << taps[3] << ")";
tap_weights_.clear();
int samples_per_bit = int(bit_time / sample_interval);
int tap_signs[] = {-1, 1, -1, -1};
have_preemph_ = true;
for (auto i = 0; i <= 3; i++) {
tap_weights_.push_back(taps[i] * TAP_SCALE * tap_signs[i]);
params << " (tap_weights_[" << i << "] " << tap_weights_.back() << ")";
for (auto j = 1; j < samples_per_bit; j++)
tap_weights_.push_back(0.0);
}
param_str_ = params.str() + "\n";
msg_ = msg.str() + "\n";
}
} my_tx;
AMIModel *ami_model = &my_tx; ///< The pointer required by the API implementation.
"""
)
```
#### File: PyAMI/tests/test_models.py
```python
import logging
import os
from pathlib import Path
import pytest
from pyibisami.ami.parser import parse_ami_file
from pyibisami.ibis.parser import parse_ibis_file
# pylint: disable=redefined-outer-name
@pytest.fixture(scope="session")
def model_folder():
"""Return a path that is set by the user's local environment variables."""
folder = Path(os.getenv("PYIBISAMI_TEST_MODELS", default=""))
print(f"Searching {folder.absolute()}")
return folder
def test_parse_random_ibis_files(caplog, model_folder):
"""Test that `pyibisami` can parse any ibis model in the `PYIBISAMI_TEST_MODELS` test folder."""
caplog.set_level(logging.DEBUG)
ibis_files = list(model_folder.rglob("*.ibs"))
if not ibis_files:
pytest.skip(f"No IBIS files found in {model_folder}.")
for filepath in ibis_files:
parse_ibis_file(filepath)
def test_parse_random_ami_files(caplog, model_folder):
"""Test that `pyibisami` can parse any ami model in the `PYIBISAMI_TEST_MODELS` test folder."""
caplog.set_level(logging.DEBUG)
ami_files = list(model_folder.rglob("*.ami"))
if not ami_files:
pytest.skip(f"No AMI files found in {model_folder}.")
for filepath in ami_files:
parse_ami_file(filepath)
```
|
{
"source": "jdpatt/PyBERT",
"score": 3
}
|
#### File: PyBERT/pybert/results.py
```python
import pickle
from pathlib import Path
from chaco.api import ArrayPlotData
class PyBertData:
"""
PyBERT simulation results data encapsulation class.
This class is used to encapsulate that subset of the results
data for a PyBERT instance, which is to be saved when the user
clicks the "Save Results" button.
"""
_item_names = [
"chnl_h",
"tx_out_h",
"ctle_out_h",
"dfe_out_h",
"chnl_s",
"tx_s",
"ctle_s",
"dfe_s",
"tx_out_s",
"ctle_out_s",
"dfe_out_s",
"chnl_p",
"tx_out_p",
"ctle_out_p",
"dfe_out_p",
"chnl_H",
"tx_H",
"ctle_H",
"dfe_H",
"tx_out_H",
"ctle_out_H",
"dfe_out_H",
"tx_out",
# "rx_in", # FIXME: There is no plot for this which crashes loading results.
]
def __init__(self, the_PyBERT):
"""
Copy just that subset of the supplied PyBERT instance's
'plotdata' attribute, which should be saved during pickling.
"""
plotdata = the_PyBERT.plotdata
the_data = ArrayPlotData()
for item_name in self._item_names:
the_data.set_data(item_name, plotdata.get_data(item_name))
self.the_data = the_data
def save(self, filepath: Path):
"""Save all of the plot data out to a file."""
with open(filepath, "wb") as the_file:
pickle.dump(self, the_file)
@staticmethod
def load_from_file(filepath: Path, pybert):
"""Recall all the results from a file and load them as reference plots."""
with open(filepath, "rb") as the_file:
user_results = pickle.load(the_file)
if not isinstance(user_results, PyBertData):
raise Exception("The data structure read in is NOT of type: ArrayPlotData!")
for prop, value in user_results.the_data.arrays.items():
pybert.plotdata.set_data(prop + "_ref", value)
# Add reference plots, if necessary.
# - time domain
for (container, suffix, has_both) in [
(pybert.plots_h.component_grid.flat, "h", False),
(pybert.plots_s.component_grid.flat, "s", True),
(pybert.plots_p.component_grid.flat, "p", False),
]:
if "Reference" not in container[0].plots:
(ix, prefix) = (0, "chnl")
item_name = prefix + "_" + suffix + "_ref"
container[ix].plot(("t_ns_chnl", item_name), type="line", color="darkcyan", name="Inc_ref")
for (ix, prefix) in [(1, "tx"), (2, "ctle"), (3, "dfe")]:
item_name = prefix + "_out_" + suffix + "_ref"
container[ix].plot(("t_ns_chnl", item_name), type="line", color="darkmagenta", name="Cum_ref")
if has_both:
for (ix, prefix) in [(1, "tx"), (2, "ctle"), (3, "dfe")]:
item_name = prefix + "_" + suffix + "_ref"
container[ix].plot(("t_ns_chnl", item_name), type="line", color="darkcyan", name="Inc_ref")
# - frequency domain
for (container, suffix, has_both) in [(pybert.plots_H.component_grid.flat, "H", True)]:
if "Reference" not in container[0].plots:
(ix, prefix) = (0, "chnl")
item_name = prefix + "_" + suffix + "_ref"
container[ix].plot(
("f_GHz", item_name), type="line", color="darkcyan", name="Inc_ref", index_scale="log"
)
for (ix, prefix) in [(1, "tx"), (2, "ctle"), (3, "dfe")]:
item_name = prefix + "_out_" + suffix + "_ref"
container[ix].plot(
("f_GHz", item_name),
type="line",
color="darkmagenta",
name="Cum_ref",
index_scale="log",
)
if has_both:
for (ix, prefix) in [(1, "tx"), (2, "ctle"), (3, "dfe")]:
item_name = prefix + "_" + suffix + "_ref"
container[ix].plot(
("f_GHz", item_name),
type="line",
color="darkcyan",
name="Inc_ref",
index_scale="log",
)
```
#### File: PyBERT/tests/test_loading_and_saving.py
```python
import logging
import pickle
import numpy as np
import yaml
from pybert import __version__
from pybert.pybert import PyBERT
def test_save_config_as_yaml(tmp_path):
"""Make sure that pybert can correctly generate a yaml file that can get reloaded."""
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("config.yaml")
app.save_configuration(save_file)
assert save_file.exists() # File was created.
with open(save_file, "r", encoding="UTF-8") as saved_config_file:
user_config = yaml.load(saved_config_file, Loader=yaml.Loader)
assert user_config.version == __version__
def test_save_config_as_pickle(tmp_path):
"""Make sure that pybert can correctly generate a pickle file that can get reloaded."""
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("config.pybert_cfg")
app.save_configuration(save_file)
assert save_file.exists() # File was created.
with open(save_file, "rb") as saved_config_file:
user_config = pickle.load(saved_config_file)
assert user_config.version == __version__
def test_save_config_as_invalid(tmp_path, caplog):
"""When given an unsupported file suffix, no file should be generated and an message logged."""
caplog.set_level(logging.DEBUG)
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("config.json")
app.save_configuration(save_file)
assert not save_file.exists() # File should not have been created.
assert "Pybert does not support this file type." in caplog.text
def test_save_results_as_pickle(tmp_path):
"""Make sure that pybert can correctly generate a waveform pickle file that can get reloaded."""
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("results.pybert_data")
app.save_results(save_file)
assert save_file.exists() # File was created.
with open(save_file, "rb") as saved_results_file:
results = pickle.load(saved_results_file)
assert results.the_data.arrays
def test_load_config_from_yaml(tmp_path):
"""Make sure that pybert can correctly load a yaml file."""
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("config.yaml")
app.save_configuration(save_file)
TEST_NUMBER_OF_BITS = 1234
# Modify the saved yaml file.
with open(save_file, "r", encoding="UTF-8") as saved_config_file:
user_config = yaml.load(saved_config_file, Loader=yaml.Loader)
user_config.nbits = TEST_NUMBER_OF_BITS # Normally, 8000
with open(save_file, "w", encoding="UTF-8") as saved_config_file:
yaml.dump(user_config, saved_config_file)
app.load_configuration(save_file)
assert app.nbits == TEST_NUMBER_OF_BITS
def test_load_config_from_pickle(tmp_path):
"""Make sure that pybert can correctly load a pickle file."""
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("config.pybert_cfg")
app.save_configuration(save_file)
TEST_PATTERN_LENGTH = 31
# Modify the saved pickle file.
with open(save_file, "rb") as saved_config_file:
user_config = pickle.load(saved_config_file)
user_config.pattern_len = TEST_PATTERN_LENGTH # Normally, 127
with open(save_file, "wb") as saved_config_file:
pickle.dump(user_config, saved_config_file)
app.load_configuration(save_file)
assert app.pattern_len == TEST_PATTERN_LENGTH
def test_load_config_from_invalid(tmp_path, caplog):
"""When given an unsupported file suffix, no file should be read and an message logged."""
caplog.set_level(logging.DEBUG)
app = PyBERT(run_simulation=False, gui=False)
save_file = tmp_path.joinpath("config.json")
app.load_configuration(save_file)
assert "Pybert does not support this file type." in caplog.text
def test_load_results_from_pickle(tmp_path, caplog):
"""Make sure that pybert can correctly load a pickle file."""
caplog.set_level(logging.DEBUG)
app = PyBERT(run_simulation=True, gui=False)
save_file = tmp_path.joinpath("config.pybert_data")
app.save_results(save_file)
# Modify the saved pickle file.
with open(save_file, "rb") as saved_results_file:
user_results = pickle.load(saved_results_file)
user_results.the_data.update_data({"chnl_h": np.array([1, 2, 3, 4])})
with open(save_file, "wb") as saved_results_file:
pickle.dump(user_results, saved_results_file)
caplog.clear()
app.load_results(save_file)
# pybert doesn't directly reload the waveform back into the same plot.
# instead if creates a reference plot to compare old vs. new.
assert app.plotdata.get_data("chnl_h_ref").size == 4
```
|
{
"source": "jdpaul123/Binary_Search_Tree",
"score": 4
}
|
#### File: jdpaul123/Binary_Search_Tree/test_binary_search_tree.py
```python
import binary_search_tree
import unittest
class T0_tree__insert(unittest.TestCase):
def test_balanced_binary_search_tree(self):
print("\n")
print("tree_insert_with_individual_check")
t = lab3.Tree()
t.insert(4)
t.insert(2)
t.insert(6)
t.insert(1)
t.insert(3)
t.insert(5)
t.insert(7)
# The following check is without using tree as an iterator (which uses inorder traversal)
# So this function also does not check the implementation of the traversal function
self.assertEqual(t.root.data, 4)
self.assertEqual(t.root.left.data, 2)
self.assertEqual(t.root.left.left.data, 1)
self.assertEqual(t.root.left.right.data, 3)
self.assertEqual(t.root.right.data, 6)
self.assertEqual(t.root.right.left.data, 5)
self.assertEqual(t.root.right.right.data, 7)
print("\n")
def test_balanced_binary_search_tree_two(self):
print("\n")
print("tree_insert_with_individual_check_two")
t = lab3.Tree()
t.insert(20)
t.insert(10)
t.insert(40)
t.insert(5)
t.insert(15)
t.insert(25)
t.insert(75)
t.insert(3)
t.insert(7)
t.insert(12)
t.insert(16)
t.insert(24)
t.insert(30)
t.insert(65)
t.insert(90)
# The following check is without using tree as an iterator (which uses inorder traversal)
# So this function also does not check the implementation of the traversal function
self.assertEqual(t.root.data, 20)
self.assertEqual(t.root.left.data, 10)
self.assertEqual(t.root.left.left.data, 5)
self.assertEqual(t.root.left.left.left.data, 3)
self.assertEqual(t.root.left.left.right.data, 7)
self.assertEqual(t.root.left.right.data, 15)
self.assertEqual(t.root.left.right.right.data, 16)
self.assertEqual(t.root.left.right.left.data, 12)
self.assertEqual(t.root.right.data, 40)
self.assertEqual(t.root.right.left.data, 25)
self.assertEqual(t.root.right.left.left.data, 24)
self.assertEqual(t.root.right.left.right.data, 30)
self.assertEqual(t.root.right.right.data, 75)
self.assertEqual(t.root.right.right.right.data, 90)
self.assertEqual(t.root.right.right.left.data, 65)
print("\n")
def test_unbalanced_binary_search_tree(self):
print("\n")
print("test_unbalanced_bst")
t = lab3.Tree()
t.insert(50)
t.insert(40)
t.insert(80)
t.insert(30)
t.insert(47)
t.insert(22)
t.insert(35)
t.insert(25)
# The following check is without using tree as an iterator (which uses inorder traversal)
# So this function also does not check the implementation of the traversal function
self.assertEqual(t.root.data, 50)
self.assertEqual(t.root.left.data, 40)
self.assertEqual(t.root.left.left.data, 30)
self.assertEqual(t.root.left.right.data, 47)
self.assertEqual(t.root.right.data, 80)
self.assertEqual(t.root.left.left.left.data, 22)
self.assertEqual(t.root.left.left.left.right.data, 25)
self.assertEqual(t.root.left.left.right.data, 35)
print("\n")
def test_skewed_left_binary_search_tree(self):
print("\n")
print("test_skewed_left_bst")
t = lab3.Tree()
t.insert(100)
t.insert(50)
t.insert(35)
t.insert(2)
# The following check is without using tree as an iterator (which uses inorder traversal)
# So this function also does not check the implementation of the traversal function
self.assertEqual(t.root.data, 100)
self.assertEqual(t.root.left.data, 50)
self.assertEqual(t.root.left.left.data, 35)
self.assertEqual(t.root.left.left.left.data, 2)
print("\n")
def test_skewed_right_binary_search_tree(self):
print("\n")
print("test_skewed_right_bst")
t = lab3.Tree()
t.insert(5)
t.insert(100)
t.insert(1000)
t.insert(1234)
# The following check is without using tree as an iterator (which uses inorder traversal)
# So this function also does not check the implementation of the traversal function
self.assertEqual(t.root.data, 5)
self.assertEqual(t.root.right.data, 100)
self.assertEqual(t.root.right.right.data, 1000)
self.assertEqual(t.root.right.right.right.data, 1234)
print("\n")
class T1_min_and_max(unittest.TestCase):
def test_min_and_max(self):
print("\n")
print("Checkin the min and the max functions")
t = lab3.Tree()
t.insert(4)
t.insert(2)
t.insert(6)
t.insert(1)
t.insert(3)
t.insert(5)
t.insert(7)
minimum = t.min()
self.assertEqual(minimum, 1)
maximum = t.max()
self.assertEqual(maximum, 7)
print("\n")
def test_if_min_is_left_most_node(self):
print("\n")
print("Checkin if the min is the left most node")
t = lab3.Tree()
t.insert(20)
t.insert(10)
t.insert(30)
t.insert(5)
t.insert(15)
t.insert(25)
t.insert(35)
minimum = t.min()
self.assertEqual(t.root.left.left.data, minimum)
print("\n")
def test_if_max_is_right_most_node(self):
print("\n")
print("Checkin if the max is the right most node")
t = lab3.Tree()
t.insert(20)
t.insert(10)
t.insert(30)
t.insert(5)
t.insert(15)
t.insert(25)
t.insert(35)
maximum = t.max()
self.assertEqual(t.root.right.right.data, maximum)
print("\n")
def max_on_empty_tree(self):
print("\n")
print("Checkin if the None returns on an empty tree calling max")
t = lab3.Tree()
maxim = t.max()
self.assertEqual(maxim, None)
def min_on_empty_tree(self):
print("\n")
print("Checkin if the None returns on an empty tree calling min")
t = lab3.Tree()
minim = t.max()
self.assertEqual(minim, None)
class T2_Traversal(unittest.TestCase):
def test_traversal(self):
print("\n")
print("Checking all the three traversals")
t = lab3.Tree()
t.insert(4)
t.insert(2)
t.insert(6)
t.insert(1)
t.insert(3)
t.insert(5)
t.insert(7)
tree_iterator = [node for node in t]
inorder = [node for node in t.inorder()]
preorder = [node for node in t.preorder()]
print("__iter__(): inorder traversal")
self.assertEqual(tree_iterator, [1, 2, 3, 4, 5, 6, 7])
print("inorder traversal")
self.assertEqual(inorder, [1, 2, 3, 4, 5, 6, 7])
print("preorder traversal")
self.assertEqual(preorder, [4, 2, 1, 3, 6, 5, 7])
print("\n")
def test_postorder_of_unbalanced_tree(self):
print("\n")
print("test_postorder_of_unbalanced_tree")
t = lab3.Tree()
t.insert(50)
t.insert(40)
t.insert(80)
t.insert(30)
t.insert(47)
t.insert(22)
t.insert(35)
t.insert(25)
postorder = [node for node in t.postorder()]
self.assertEqual(postorder, [25, 22, 35, 30, 47, 40, 80, 50])
# The following check is without using tree as an iterator (which uses inorder traversal)
# So this function also does not check the implementation of the traversal function
print("\n")
class T3_successor(unittest.TestCase):
def test_contains_and__find_node(self):
print("\n")
print("Contains method which uses the __find_node private method")
tree_success = lab3.Tree()
tree_success.insert(8)
tree_success.insert(3)
tree_success.insert(10)
tree_success.insert(1)
tree_success.insert(6)
tree_success.insert(4)
tree_success.insert(7)
tree_success.insert(14)
tree_success.insert(13)
checkT = tree_success.contains(6)
checkF1 = tree_success.contains(50)
checkF2 = tree_success.contains(2)
self.assertEqual(checkT, True)
self.assertEqual(checkF1, False)
self.assertEqual(checkF2, False)
def test_successor(self):
print("\n")
print("successor function")
tree_success = lab3.Tree()
tree_success.insert(8)
tree_success.insert(3)
tree_success.insert(10)
tree_success.insert(1)
tree_success.insert(6)
tree_success.insert(4)
tree_success.insert(7)
tree_success.insert(14)
tree_success.insert(13)
easy_success = tree_success.find_successor(8).data
medium_success = tree_success.find_successor(10).data
tough_success = tree_success.find_successor(7).data
self.assertEqual(easy_success, 10)
self.assertEqual(medium_success, 13)
self.assertEqual(tough_success, 8)
print("\n")
def required_test_successor(self):
print("\n")
print("Successor function after putting in 5, 4, 1, 9, 6, 2, 0 and finding Node(2)")
tree_success = lab3.Tree()
tree_success.insert(5)
tree_success.insert(4)
tree_success.insert(1)
tree_success.insert(9)
tree_success.insert(6)
tree_success.insert(2)
tree_success.insert(0)
this_test = tree_success.find_successor(2).data
self.assertEqual(this_test, 4)
class T4_delete(unittest.TestCase):
def test_delete(self):
print("\n")
print("delete function")
t = lab3.Tree()
t.insert(8)
t.insert(3)
t.insert(10)
t.insert(1)
t.insert(6)
t.insert(4)
t.insert(7)
t.insert(14)
t.insert(13)
l1 = [node for node in t]
t.delete(7)
l2 = [node for node in t]
t.delete(6)
l3 = [node for node in t]
t.delete(8)
l4 = [node for node in t]
t.delete(10)
l5 = [node for node in t]
self.assertEqual(l1, [1, 3, 4, 6, 7, 8, 10, 13, 14])
self.assertEqual(l2, [1, 3, 4, 6, 8, 10, 13, 14])
self.assertEqual(l3, [1, 3, 4, 8, 10, 13, 14])
self.assertEqual(l4, [1, 3, 4, 10, 13, 14])
self.assertEqual(l5, [1, 3, 4, 13, 14])
print("\n")
def test_delete_with_no_nodes(self):
print("\n")
print("test delete with no nodes in the tree")
t = lab3.Tree()
with self.assertRaises(KeyError):
t.delete(7)
def test_delete_on_tree_w_only_root(self):
print("\n")
print("test delete with only the root node in the tree")
t = lab3.Tree()
t.insert(25)
self.assertEqual(t.delete(25), None)
class T5_contains(unittest.TestCase):
def test_contains(self):
print("\n")
print("contains function")
t = lab3.Tree()
t.insert(8)
t.insert(3)
t.insert(10)
t.insert(1)
t.insert(6)
t.insert(4)
t.insert(7)
t.insert(14)
t.insert(13)
self.assertEqual(t.contains(13), True)
self.assertEqual(t.contains(15), False)
print("\n")
def test_contains_with_no_node(self):
print("\n")
print("contains when it doesnt exist")
t = lab3.Tree()
t.insert(8)
t.insert(3)
t.insert(10)
t.insert(1)
t.insert(6)
t.insert(4)
self.assertEqual(t.contains(13), False)
print("\n")
if __name__ == '__main__' :
unittest.main()
```
|
{
"source": "jdpdev/birbcam",
"score": 2
}
|
#### File: birbcam/birbcam/birbcam.py
```python
from picamerax.array import PiRGBArray
from picamerax import PiCamera
from time import sleep
#import lensshading
import sys
import logging
from setproctitle import setproctitle
from .birbconfig import BirbConfig
from .focusassist import FocusAssist
from .imagemask import ImageMask
from .birbwatcher import BirbWatcher
previewResolution = (640, 480)
def run_birbcam():
setproctitle("birbcam")
config = BirbConfig()
if not config.logFile is None:
logging.basicConfig(level=logging.INFO, filename=config.logFile, format='(%(asctime)s) %(levelname)s: %(message)s', datefmt="%H:%M:%S")
else:
logging.basicConfig(level=logging.INFO, format='(%(asctime)s) %(levelname)s: %(message)s', datefmt="%H:%M:%S")
logging.info(f"Saving output to: {config.saveTo}")
if config.noCaptureMode: logging.info("Using No Capture Mode")
if config.debugMode: logging.info("Using Debug Mode")
camera = PiCamera()
camera.resolution = previewResolution
camera.framerate = 30;
camera.iso = 200
rawCapture = PiRGBArray(camera, size=previewResolution)
"""
shading = lensshading.get_lens_shading(args.get("lensshading"))
if shading != None:
shading = shading.astype("uint8")
print(np.shape(shading))
camera.lens_shading_table = shading
"""
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
camera.meter_mode = 'spot'
sleep(2)
camera.shutter_speed = camera.exposure_speed
# **************************************
# Focus assist
# **************************************
focusAssist = FocusAssist()
if focusAssist.run(camera) == False: sys.exit()
# **************************************
# Set mask
# **************************************
imageMask = ImageMask()
if imageMask.run(camera) == False: sys.exit()
mask = imageMask.mask
# **************************************
# Capture loop
# **************************************
watcher = BirbWatcher(config)
watcher.run(camera, mask)
```
#### File: birbcam/exposureadjust/adjustup.py
```python
from .adjust import Adjust
import numpy as np
import logging
class AdjustUp(Adjust):
def setup(self):
logging.info(f"[AdjustUp] take_over")
def do_adjust(self, camera):
if self._shutterFlipper.is_at_end:
self.finish()
return
camera.shutter_speed = self._shutterFlipper.next()
def check_exposure(self, exposure):
delta = exposure - self._targetLevel
logging.info(f"[AdjustUp] {exposure}, {delta} < {self._levelMargin}, {self._lastExposure}")
if self._lastExposure != None:
lastDelta = self._lastExposure - self._targetLevel
# stop if crossed line
if np.sign(delta) != np.sign(lastDelta):
return True
# stop if close enough
if abs(delta) < self._levelMargin:
return True
return False
```
#### File: birbcam/exposureadjust/exposureadjust.py
```python
from ..optionflipper import OptionFlipper
from .exposurestate import ExposureState
from .watch import Watch
from cv2 import normalize, calcHist, cvtColor, COLOR_BGR2GRAY
from time import time
import numpy as np
import logging
class ExposureAdjust:
def __init__(self, shutterFlipper: OptionFlipper, isoFlipper: OptionFlipper, interval: int = 300, targetLevel: int = 110, margin: int = 10):
"""
Parameters
----------
shutterFlipper : OptionFlipper
isoFlipper : OptionFlipper
"""
self.shutterFlipper = shutterFlipper
self.isoFlipper = isoFlipper
self.targetLevel = targetLevel
self._interval = interval
self._actualMargin = margin
self._currentState = None
self.change_state(Watch(self._interval))
@property
def isAdjustingExposure(self):
return self._currentState.isAdjustingExposure if self._currentState != None else False
@property
def targetExposure(self):
return self.targetLevel
@targetExposure.setter
def targetExposure(self, value):
self.targetLevel = value
@property
def sleepInterval(self):
return self._interval
@property
def levelError(self):
return self._actualMargin
def change_state(self, nextState: ExposureState):
if self._currentState != None:
self._currentState.release()
self._currentState = nextState
if self._currentState == None:
self._currentState = Watch(self._interval)
if self._currentState != None:
self._currentState.take_over(self,
self.shutterFlipper,
self.isoFlipper,
self.change_state,
self.targetExposure,
self._actualMargin
)
def check_exposure(self, camera, image):
self._currentState.update(camera, image)
return self._currentState.isAdjustingExposure
```
#### File: birbcam/birbcam/histocompare.py
```python
from common import draw_mask
from time import time, sleep
import cv2
import numpy as np
import imutils
import argparse
def reduce_img(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
gray = imutils.resize(gray, width=800)
return gray
def build_histogram(a):
hist = cv2.calcHist([a], [0], None, [256], [0,256])
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
data = np.int32(np.around(hist))
return (hist, data)
def draw_histogram(data):
blank = np.zeros((600,800,1), np.uint8)
for x, y in enumerate(data):
cv2.line(blank, (x * 3,600),(x * 3,600-y*2),(255,255,255))
return blank
def compare_histograms(a, b):
compare = cv2.compareHist(a, b, cv2.HISTCMP_CHISQR)
return compare
aimg = cv2.imread("/home/pi/Public/birbs/2021-01-22-11:02:44.jpg")
bimg = cv2.imread("/home/pi/Public/birbs/2021-01-22-11:02:33.jpg")
cimg = cv2.imread("/home/pi/Public/birbs/2021-01-22-11:02:21.jpg")
aimg = reduce_img(aimg)
bimg = reduce_img(bimg)
cimg = reduce_img(cimg)
delta = cv2.absdiff(aimg, cimg)
(ahist, adata) = build_histogram(aimg)
(bhist, bdata) = build_histogram(cimg)
ahistimg = draw_histogram(adata)
bhistimg = draw_histogram(bdata)
comparison = compare_histograms(ahist, bhist)
blank = np.zeros((600,800,1), np.uint8)
left = cv2.hconcat([ahistimg, bhistimg])
left = cv2.resize(left, (800, 300))
delta = cv2.resize(delta, (400, 300))
#quad = cv2.hconcat([left, right])
#quad = cv2.resize(quad, (800, 600))
#cv2.imshow('blank', delta)
#cv2.imshow('breakdown', quad)
#cv2.imshow('ahist', ahistimg)
#cv2.imshow('bhist', bhistimg)
cv2.imshow('histograms', left)
cv2.imshow('delta', delta)
print("Comparison: ", comparison)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
```
#### File: birbcam/birbcam/imagemask.py
```python
from picamerax.array import PiRGBArray
from picamerax import PiCamera
from birbcam.common import draw_aim_grid, draw_mask
import cv2
from .rectanglegrabber import RectangleGrabber
class ImageMask:
maskWindowName = "Set Detection Region"
maskWindowResolution = (800, 600)
def __init__(self):
self._mask = (0.25, 0.25, 0.5, 0.5)
@property
def mask(self):
"""The region to mask"""
return self._mask
def run(self, camera):
cv2.namedWindow(self.maskWindowName)
self.maskRect = RectangleGrabber(
self.maskWindowName,
self.maskWindowResolution,
onDrag = lambda bounds: self.__set_mask_rect(bounds),
onEnd = lambda bounds: self.__set_mask_rect(bounds)
)
camera.resolution = self.maskWindowResolution
rawCapture = PiRGBArray(camera, size=self.maskWindowResolution)
keepGoing = self.__loop(camera, rawCapture)
cv2.destroyAllWindows()
return keepGoing
def __loop(self, camera, rawCapture):
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
draw_mask(image, self._mask, self.maskWindowResolution)
draw_aim_grid(image, self.maskWindowResolution)
rawCapture.truncate(0)
cv2.imshow(self.maskWindowName, image)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
return True
if key == ord("x"):
return False
def __set_mask_rect(self, bounds):
(tl, br) = bounds
rx = self.maskWindowResolution[0]
ry = self.maskWindowResolution[1]
x = tl[0] / rx
y = tl[1] / ry
w = (br[0] - tl[0]) / rx
h = (br[1] - tl[1]) / ry
self._mask = (x, y, w, h)
```
#### File: birbcam/birbcam/optionflipper.py
```python
class OptionFlipper:
def __init__(self, options, start = 0, labels = None):
self.options = options
self.index = start
self.labels = labels
@property
def value(self):
return self.options[self.index]
@property
def label(self):
if self.labels == None:
return f"{self.value}"
else:
return self.labels[self.index]
@property
def is_at_start(self):
return self.index == 0
@property
def is_at_end(self):
return self.index == len(self.options) - 1
def next(self):
return self.__flip(1)
def previous(self):
return self.__flip(-1)
def __flip(self, delta):
try:
self.index += delta
if self.index >= len(self.options):
self.index = 0
if self.index < 0:
self.index = len(self.options) - 1
except ValueError:
self.index = 0
return self.options[self.index]
```
#### File: birbcam/tests/picturetaker_test.py
```python
from birbcam.picturetaker import PictureTaker, filename_filestamp, filename_live_picture
from time import time
from datetime import datetime
from collections import deque
class MockCamera:
def __init__(self, startResolution, expectResolution, expectSaveTo):
self.startResolution = startResolution
self.expectResolution = deque(expectResolution)
self.expectSaveTo = expectSaveTo
self._resolution = startResolution
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, r):
expect = self.expectResolution.popleft()
assert expect == r
def capture(self, path):
assert path == self.expectSaveTo
def test_filename_live_picture():
assert filename_live_picture() == "live.jpg"
def test_filename_filestamp():
date = datetime.now()
expect = date.strftime("%Y-%m-%d-%H-%M-%S") + ".jpg"
assert expect == filename_filestamp()
def test_should_immediately_be_ready_to_take_picture():
taker = PictureTaker("800x600", 5, ".", filename_live_picture)
assert taker.readyForPicture == True
def test_should_take_picture():
taker = PictureTaker("1600x1200", 5, ".", filename_live_picture)
mockCamera = MockCamera("800x600", ["1600x1200", "800x600"], "./" + filename_live_picture())
assert taker.take_picture(mockCamera)
```
#### File: birbcam/tests/rectanglegrabber_test.py
```python
from birbcam.rectanglegrabber import RectangleGrabber
from cv2 import EVENT_LBUTTONDOWN, EVENT_LBUTTONUP, EVENT_MOUSEMOVE
def test_tl_to_br_drag(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((300, 300), (400, 400))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,600), onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 300, 300, "", "")
assert rect.isDragging
assert rect.bounds == ((300, 300), (300, 300))
mock_handler(EVENT_MOUSEMOVE, 400, 400, "", "")
assert rect.bounds == ((300, 300), (400, 400))
mock_handler(EVENT_LBUTTONUP, 500, 500, "", "")
def test_tl_to_br_drag_with_aspect_ratio(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((0, 0), (800, 800))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,800), preserveAspectRatio=True, onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 0, 0, "", "")
assert rect.isDragging
assert rect.bounds == ((0, 0), (0, 0))
mock_handler(EVENT_MOUSEMOVE, 800, 600, "", "")
assert rect.bounds == ((0, 0), (800, 800))
mock_handler(EVENT_LBUTTONUP, 500, 500, "", "")
def test_tr_to_bl_drag(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((400, 200), (600, 400))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,600), onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 600, 200, "", "")
assert rect.isDragging
assert rect.bounds == ((600, 200), (600, 200))
mock_handler(EVENT_MOUSEMOVE, 400, 400, "", "")
assert rect.bounds == ((400, 200), (600, 400))
mock_handler(EVENT_LBUTTONUP, 400, 400, "", "")
def test_tr_to_bl_drag_with_aspect_ratio(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((400, 0), (800, 400))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,800), preserveAspectRatio=True, onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 800, 0, "", "")
assert rect.isDragging
assert rect.bounds == ((800, 0), (800, 0))
mock_handler(EVENT_MOUSEMOVE, 400, 200, "", "")
assert rect.bounds == ((400, 0), (800, 400))
mock_handler(EVENT_LBUTTONUP, 400, 400, "", "")
def test_br_to_tl_drag(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((400, 400), (600, 600))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,600), onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 600, 600, "", "")
assert rect.isDragging
assert rect.bounds == ((600, 600), (600, 600))
mock_handler(EVENT_MOUSEMOVE, 400, 400, "", "")
assert rect.bounds == ((400, 400), (600, 600))
mock_handler(EVENT_LBUTTONUP, 400, 400, "", "")
def test_br_to_tl_drag_with_aspect_ratio(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((400, 400), (800, 800))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,800), preserveAspectRatio=True, onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 800, 800, "", "")
assert rect.isDragging
assert rect.bounds == ((800, 800), (800, 800))
mock_handler(EVENT_MOUSEMOVE, 400, 200, "", "")
assert rect.bounds == ((400, 400), (800, 800))
mock_handler(EVENT_LBUTTONUP, 400, 400, "", "")
def test_bl_to_tr_drag(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((200, 400), (400, 600))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,600), onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 200, 600, "", "")
assert rect.isDragging
assert rect.bounds == ((200, 600), (200, 600))
mock_handler(EVENT_MOUSEMOVE, 400, 400, "", "")
assert rect.bounds == ((200, 400), (400, 600))
mock_handler(EVENT_LBUTTONUP, 400, 400, "", "")
def test_bl_to_tr_drag_with_aspect_ratio(mocker):
mock_handler = None
def mockSetClickHandler(window, handler):
nonlocal mock_handler
mock_handler = handler
def mock_drag_done(bounds):
assert bounds == ((0, 400), (400, 800))
mocker.patch(
'cv2.setMouseCallback',
mockSetClickHandler
)
rect = RectangleGrabber("test", (800,800), preserveAspectRatio=True, onEnd=mock_drag_done)
assert not rect.isDragging
mock_handler(EVENT_LBUTTONDOWN, 0, 800, "", "")
assert rect.isDragging
assert rect.bounds == ((0, 800), (0, 800))
mock_handler(EVENT_MOUSEMOVE, 400, 200, "", "")
assert rect.bounds == ((0, 400), (400, 800))
mock_handler(EVENT_LBUTTONUP, 400, 400, "", "")
```
#### File: birbcam/tests/watch_test.py
```python
import unittest
from unittest.mock import patch, MagicMock, Mock
from birbcam.exposureadjust.watch import Watch
from birbcam.exposureadjust.adjustup import AdjustUp
from birbcam.exposureadjust.adjustdown import AdjustDown
@patch('time.time', MagicMock(return_value=25))
def test_not_time_to_update():
with patch('birbcam.exposureadjust.utils.calculate_exposure', MagicMock(return_value=100)) as mock_calculate_exposure:
mockChangeState = Mock()
watch = Watch(50)
watch.take_over(None, None, None, mockChangeState, 100, 10)
watch.update(None, None)
assert not mock_calculate_exposure.called
@patch('time.time', MagicMock(return_value=25))
@patch('birbcam.exposureadjust.utils.calculate_exposure', MagicMock(return_value=100))
def test_no_exposure_adjustment():
mockChangeState = Mock()
watch = Watch(50)
watch.take_over(None, None, None, mockChangeState, 100, 10)
watch.update(None, None)
assert not mockChangeState.called
@patch('time.time', MagicMock(return_value=25))
@patch('birbcam.exposureadjust.utils.calculate_exposure', MagicMock(return_value=80))
def test_step_up():
def mockChangeState(state):
assert state.__class__.__name__ == AdjustUp.__class__.__name__
watch = Watch(50)
watch.take_over(None, None, None, mockChangeState, 100, 10)
watch.update(None, None)
@patch('time.time', MagicMock(return_value=25))
@patch('birbcam.exposureadjust.utils.calculate_exposure', MagicMock(return_value=120))
def test_step_down():
def mockChangeState(state):
assert state.__class__.__name__ == AdjustDown.__class__.__name__
watch = Watch(50)
watch.take_over(None, None, None, mockChangeState, 100, 10)
watch.update(None, None)
```
|
{
"source": "JD-P/discord-hypothesis",
"score": 2
}
|
#### File: JD-P/discord-hypothesis/hypothesis_tracker.py
```python
import time
import json
import h_annot
class HypothesisTracker:
"""Run the Hypothesis search in a separate thread to prevent timing issues
with async Discord library."""
def __init__(self, results_list, list_lock, api_key, group_id):
self.results_list = results_list
self.list_lock = list_lock
self.api_key = api_key
self.group_id = group_id
# Track processed annotation ID's so we don't double report
try:
with open("processed.json") as infile:
self.processed = json.load(infile)
except IOError:
print("Failed to open processed entries!")
self.processed = []
def get_rows(self, new_results):
# Remove previous results
self.results_list.clear()
for row in new_results["rows"]:
if row["id"] in self.processed:
continue
else:
self.results_list.append(row)
#TODO: Figure out why this had double-report issues when recording 10 id's
self.processed = [row["id"] for row in new_results["rows"]]
print(self.processed)
with open("processed.json", "w") as outfile:
json.dump(self.processed, outfile)
def get_loop(self):
while 1:
self.list_lock.acquire()
new_results = json.loads(
h_annot.api.search(self.api_key,
group=self.group_id,
limit=10)
)
self.get_rows(new_results)
self.list_lock.release()
time.sleep(240)
```
|
{
"source": "JDPDO/nwg-panel",
"score": 2
}
|
#### File: nwg_panel/modules/dwl_tags.py
```python
from nwg_panel.tools import check_key
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk
class DwlTags(Gtk.EventBox):
def __init__(self, output, settings):
Gtk.EventBox.__init__(self)
check_key(settings, "tag-names", "1 2 3 4 5 6 7 8 9")
check_key(settings, "title-limit", 55)
self.output = output
self.settings = settings
names = self.settings["tag-names"].split()
self.tags = names if len(names) == 9 else ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
self.byte_dict = {1: 1, 2: 2, 3: 4, 4: 8, 5: 16, 6: 32, 7: 64, 8: 128, 9: 256}
self.box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
self.add(self.box)
self.label = Gtk.Label()
self.tag_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
self.box.pack_end(self.label, False, False, 4)
self.show_all()
def refresh(self, dwl_data):
if dwl_data:
try:
data = dwl_data[self.output]
tags_string = data["tags"]
tags = tags_string.split()
non_empty_output_tags = int(tags[0])
selected_output_tag = int(tags[1])
current_win_on_output_tags = int(tags[2])
urgent_tags = int(tags[3])
if self.tag_box:
self.tag_box.destroy()
self.tag_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
self.tag_box.set_property('name', 'dwl-tag-box')
self.box.pack_start(self.tag_box, False, False, 4)
cnt = 1
win_on_tags = []
for item in self.tags:
tag_wrapper = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
label = Gtk.Label()
tag_wrapper.pack_start(label, False, False, 0)
if self.byte_dict[cnt] == selected_output_tag:
tag_wrapper.set_property('name', "dwl-tag-selected")
self.tag_box.pack_start(tag_wrapper, False, False, 1)
label.set_text(item)
if self.byte_dict[cnt] & non_empty_output_tags != 0:
label.set_property('name', "dwl-tag-occupied")
else:
label.set_property('name', "dwl-tag-free")
if self.byte_dict[cnt] & urgent_tags != 0:
label.set_property('name', "dwl-tag-urgent")
if self.byte_dict[cnt] & current_win_on_output_tags != 0:
win_on_tags.append(str(cnt))
cnt += 1
self.tag_box.show_all()
layout = data["layout"]
title = data["title"]
if len(title) > self.settings["title-limit"]:
title = title[:self.settings["title-limit"] - 1]
# title suffix to add if win present on more than 1 tag
s = ", ".join(win_on_tags) if len(win_on_tags) > 1 else ""
if s:
title = "{} ({})".format(title, s)
# selmon = data["selmon"] == "1"
# print("{} {} {}".format(tags_string, layout, title))
self.label.set_text("{} {}".format(layout, title))
except KeyError:
print("No data found for output {}".format(self.output))
```
#### File: nwg_panel/modules/menu_start.py
```python
from gi.repository import Gtk
import subprocess
from nwg_panel.tools import check_key, update_image
class MenuStart(Gtk.Button):
def __init__(self, panel, icons_path=""):
Gtk.Button.__init__(self)
self.set_always_show_image(True)
self.panel = panel
check_key(panel, "menu-start-settings", {})
self.settings = panel["menu-start-settings"]
self.set_property("name", "button-start")
check_key(self.settings, "icon-size-button", 16)
image = Gtk.Image()
update_image(image, "nwg-shell", self.settings["icon-size-button"], icons_path)
self.set_image(image)
self.connect("clicked", self.on_click)
self.show()
def on_click(self, button):
cmd = "nwg-menu"
if self.settings["cmd-lock"] != "swaylock -f -c 000000":
cmd += " -cmd-lock '{}'".format(self.settings["cmd-lock"])
if self.settings["cmd-logout"] != "swaymsg exit":
cmd += " -cmd-logout '{}'".format(self.settings["cmd-logout"])
if self.settings["cmd-restart"] != "systemctl reboot":
cmd += " -cmd-restart '{}'".format(self.settings["cmd-restart"])
if self.settings["cmd-shutdown"] != "systemctl -i poweroff":
cmd += " -cmd-shutdown '{}'".format(self.settings["cmd-shutdown"])
if self.settings["autohide"]:
cmd += " -d"
if self.settings["file-manager"] != "thunar":
cmd += " -fm {}".format(self.settings["file-manager"])
if self.panel["menu-start"] == "right":
cmd += " -ha {}".format(self.panel["menu-start"])
if self.settings["height"] > 0:
cmd += " -height {}".format(self.settings["height"])
if self.settings["icon-size-large"] != 32:
cmd += " -isl {}".format(self.settings["icon-size-large"])
if self.settings["icon-size-small"] != 16:
cmd += " -iss {}".format(self.settings["icon-size-small"])
if self.settings["margin-bottom"] > 0:
cmd += " -mb {}".format(self.settings["margin-bottom"])
if self.settings["margin-left"] > 0:
cmd += " -ml {}".format(self.settings["margin-left"])
if self.settings["margin-right"] > 0:
cmd += " -mr {}".format(self.settings["margin-right"])
if self.settings["margin-top"] > 0:
cmd += " -mt {}".format(self.settings["margin-top"])
if self.panel["output"]:
cmd += " -o {}".format(self.panel["output"])
if self.settings["padding"] != 2:
cmd += " -padding {}".format(self.settings["padding"])
if self.settings["terminal"] != "foot":
cmd += " -term {}".format(self.settings["terminal"])
if self.panel["position"] != "bottom":
cmd += " -va {}".format(self.panel["position"])
if self.settings["width"] > 0:
cmd += " -width {}".format(self.settings["width"])
print("Executing '{}'".format(cmd))
subprocess.Popen('exec {}'.format(cmd), shell=True)
```
#### File: modules/sni_system_tray/__init__.py
```python
import typing
from threading import Thread
from . import host, watcher
from .tray import Tray
def init_tray(trays: typing.List[Tray]):
host_thread = Thread(target=host.init, args=[0, trays])
host_thread.daemon = True
host_thread.start()
watcher_thread = Thread(target=watcher.init)
watcher_thread.daemon = True
watcher_thread.start()
def deinit_tray():
host.deinit()
watcher.deinit()
```
#### File: modules/sni_system_tray/item.py
```python
from gi.repository import Gdk
from dasbus.connection import SessionMessageBus
from dasbus.client.observer import DBusObserver
from dasbus.client.proxy import disconnect_proxy
from dasbus.error import DBusError
PROPERTIES = [
"Id",
"Category",
"Title",
"Status",
"WindowId",
"IconName",
"IconPixmap",
"OverlayIconName",
"OverlayIconPixmap",
"AttentionIconName",
"AttentionIconPixmap",
"AttentionMovieName",
"ToolTip",
"IconThemePath",
"ItemIsMenu",
"Menu"
]
class StatusNotifierItem(object):
def __init__(self, service_name, object_path):
self.service_name = service_name
self.object_path = object_path
self.on_loaded_callback = None
self.on_updated_callback = None
self.session_bus = SessionMessageBus()
self.properties = {
"ItemIsMenu": True
}
self.item_proxy = None
self.item_observer = DBusObserver(
message_bus=self.session_bus,
service_name=self.service_name
)
self.item_observer.service_available.connect(
self.item_available_handler
)
self.item_observer.service_unavailable.connect(
self.item_unavailable_handler
)
self.item_observer.connect_once_available()
def __del__(self):
if self.item_proxy is not None:
disconnect_proxy(self.item_proxy)
self.item_observer.disconnect()
self.session_bus.disconnect()
def item_available_handler(self, _observer):
self.item_proxy = self.session_bus.get_proxy(self.service_name, self.object_path)
self.item_proxy.PropertiesChanged.connect(
lambda _if, changed, invalid: self.change_handler(list(changed), invalid)
)
self.item_proxy.NewTitle.connect(
lambda: self.change_handler(["Title"])
)
self.item_proxy.NewIcon.connect(
lambda: self.change_handler(["IconName", "IconPixmap"])
)
self.item_proxy.NewAttentionIcon.connect(
lambda: self.change_handler(["AttentionIconName", "AttentionIconPixmap"])
)
if hasattr(self.item_proxy, "NewIconThemePath"):
self.item_proxy.NewIconThemePath.connect(
lambda _icon_theme_path: self.change_handler(["IconThemePath"])
)
self.item_proxy.NewStatus.connect(
lambda _status: self.change_handler(["Status"])
)
for name in PROPERTIES:
try:
self.properties[name] = getattr(self.item_proxy, name)
except (AttributeError, DBusError):
# remote StatusNotifierItem object does not support all SNI properties
pass
if self.on_loaded_callback is not None:
self.on_loaded_callback(self)
def item_unavailable_handler(self, _observer):
disconnect_proxy(self.item_proxy)
self.item_proxy = None
def change_handler(self, changed_properties: list[str], invalid_properties: list[str] = None):
if invalid_properties is None:
invalid_properties = []
actual_changed_properties = []
if len(changed_properties) > 0:
for name in changed_properties:
try:
self.properties[name] = getattr(self.item_proxy, name)
actual_changed_properties.append(name)
except (AttributeError, DBusError):
pass
if len(invalid_properties) > 0:
for name in invalid_properties:
if name in self.properties:
self.properties.pop(name)
if len(actual_changed_properties) > 0:
if self.on_updated_callback is not None:
self.on_updated_callback(self, actual_changed_properties)
def set_on_loaded_callback(self, callback):
self.on_loaded_callback = callback
def set_on_updated_callback(self, callback):
self.on_updated_callback = callback
@property
def item_is_menu(self):
if "ItemIsMenu" in self.properties:
return self.properties["ItemIsMenu"]
else:
return False
def context_menu(self, event: Gdk.EventButton):
self.item_proxy.ContextMenu(event.x, event.y)
def activate(self, event: Gdk.EventButton):
self.item_proxy.Activate(event.x, event.y)
def secondary_action(self, event: Gdk.EventButton):
self.item_proxy.SecondaryAction(event.x, event.y)
def scroll(self, distance, direction):
self.item_proxy.Scroll(distance, direction)
```
#### File: modules/sni_system_tray/menu.py
```python
import typing
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("DbusmenuGtk3", "0.4")
from gi.repository import Gdk, Gtk, DbusmenuGtk3
from dasbus.connection import SessionMessageBus
from dasbus.client.observer import DBusObserver
class Menu(object):
def __init__(self, service_name, object_path, settings, event_box: Gtk.EventBox, item):
self.service_name = service_name
self.object_path = object_path
self.settings = settings
self.event_box = event_box
self.item = item
self.session_bus = SessionMessageBus()
self.menu_widget: typing.Union[None, DbusmenuGtk3.Menu] = None
self.distance_scrolled_x = 0
self.distance_scrolled_y = 0
self.event_box.connect("button-press-event", self.button_press_event_handler)
self.event_box.add_events(Gdk.EventMask.SCROLL_MASK | Gdk.EventMask.SMOOTH_SCROLL_MASK)
self.event_box.connect("scroll-event", self.scroll_event_handler)
self.menu_observer = DBusObserver(
message_bus=self.session_bus,
service_name=self.service_name
)
self.menu_observer.service_available.connect(
self.menu_available_handler
)
self.menu_observer.service_unavailable.connect(
self.menu_unavailable_handler
)
self.menu_observer.connect_once_available()
def __del__(self):
self.menu_observer.disconnect()
self.session_bus.disconnect()
def menu_available_handler(self, _observer):
print(
"Menu -> menu_available_handler: Connecting to menu over dbus:\n"
" service_name: {}\n"
" object_path: {}".format(
self.service_name,
self.object_path
)
)
self.menu_widget = DbusmenuGtk3.Menu().new(
dbus_name=self.service_name,
dbus_object=self.object_path
)
self.menu_widget.show()
def menu_unavailable_handler(self, _observer):
self.event_box.disconnect_by_func(self.button_press_event_handler)
def button_press_event_handler(self, _w, event: Gdk.EventButton):
if (event.button == 1 and self.item.item_is_menu) or event.button == 3:
if self.menu_widget is not None:
self.menu_widget.popup_at_widget(
self.event_box,
Gdk.Gravity.SOUTH,
Gdk.Gravity.NORTH,
event
)
else:
self.item.context_menu(event)
elif event.button == 1:
self.item.activate(event)
elif event.button == 2:
self.item.secondary_action(event)
def scroll_event_handler(self, _w, event: Gdk.EventScroll):
dx = 0
dy = 0
if event.direction == Gdk.ScrollDirection.UP:
dy = -1
elif event.direction == Gdk.ScrollDirection.DOWN:
dy = 1
elif event.direction == Gdk.ScrollDirection.LEFT:
dx = -1
elif event.direction == Gdk.ScrollDirection.RIGHT:
dx = 1
elif event.direction == Gdk.ScrollDirection.SMOOTH:
self.distance_scrolled_x += event.delta_x
self.distance_scrolled_y += event.delta_y
if self.distance_scrolled_x > self.settings["smooth-scrolling-threshold"]:
dx = max((self.distance_scrolled_x, 1.0))
elif self.distance_scrolled_x < self.settings["smooth-scrolling-threshold"]:
dx = min((self.distance_scrolled_x, -1.0))
if self.distance_scrolled_y > self.settings["smooth-scrolling-threshold"]:
dy = max((self.distance_scrolled_y, 1.0))
elif self.distance_scrolled_y > self.settings["smooth-scrolling-threshold"]:
dy = min((self.distance_scrolled_y, -1.0))
if dx != 0:
self.item.scroll(dx, "horizontal")
if dy != 0:
self.item.scroll(dy, "vertical")
```
|
{
"source": "jdpearce4/minipipe",
"score": 3
}
|
#### File: minipipe/minipipe/pipelines.py
```python
from minipipe.pipes import Source
from minipipe.base import Logger, Stream
from multiprocessing import Process, Event
from collections import Counter
from graphviz import Digraph
from copy import copy
class PipeSystem(object):
"""
PipeSystem connects Pipes and creates process pool. Pipes are run and closed with a built PipeSystem.
Toy example:
.. code-block:: python
# Define functors
def genRand(n=10):
for _ in range(n):
yield np.random.rand(10)
def batch(batch_size=2):
x = (yield)
for i in range(len(x)//batch_size):
yield x[i*batch_size:(i+1)*batch_size]
def sumBatch(x):
return x.sum()
def split(x):
return [x, None] if x > 1 else [None, x]
def output_gt_1(x):
print '1 <',x
def output_lt_1(x):
print '1 >',x
# Define streams
s1, s2, s3, s4, s5 = Stream(), Stream(), Stream(), Stream(), Stream()
# Create Pipe segments with up/downstreams
# Order is not important
pipes = [
Source(genRand, 'source1', downstreams=[s1]),
Source(genRand, 'source2', downstreams=[s1]),
Regulator(batch, 'batcher', upstreams=[s1], downstreams=[s2]),
Transform(sumBatch, 'sum', upstreams=[s2], downstreams=[s3]),
Transform(sumBatch, 'sum', upstreams=[s2], downstreams=[s3]),
Transform(sumBatch, 'sum', upstreams=[s2], downstreams=[s3]),
Transform(split, 'split', upstreams=[s2], downstreams=[s4, s5]),
Sink(output_gt_1, 'print_gt_1', upstreams=[s4]),
Sink(output_lt_1, 'print_lt_1', upstreams=[s5]),
]
# Build pipesystem
psys = PipeSystem(pipes)
psys.build()
# Run pipesystem
psys.run()
psys.close()
:param name: List[Pipes], List of Pipes with their upstreams/downstreams
:return: None
"""
def __init__(self, pipes):
self.pipes = pipes
self.streams = None
self.processes = None
self.built = False
def build(self, log_lvl='INFO', monitor=False, ignore_exceptions=None):
"""Connects pipe segments together and builds graph.
:param name: String, log level, one of: info, debug, warning, error or critical
:param monitor: Bool, log stream I/O times
:param ignore_exceptions: List of exceptions to ignore while pipeline is running
:return: None
"""
self.log_lvl = log_lvl
self.monitor = monitor
self.ignore_exceptions = ignore_exceptions
self.logger = Logger(log_lvl)
self.global_term = Event()
# Handle name collisions
pnames = Counter([p.name for p in self.pipes])
for pname, cnt in pnames.items():
if cnt > 1:
p_with_collisions = filter(lambda x: x.name==pname, self.pipes)
for i, p in enumerate(p_with_collisions):
p.name += '_{}'.format(i)
# Connect graph and set global term flag
for p in self.pipes:
if self.ignore_exceptions is not None:
p.ignore_exceptions = self.ignore_exceptions
p.set_global_term_flag(self.global_term)
self._connect_streams(p)
# For each pipe count all upstreams and downstreams.
# Counts are used to determine the number of sentinels a pipe should receive before terminating.
for p in self.pipes:
self._count_relatives(p)
# Create process pool
self.processes = [Process(target=p.run_pipe, args=[p.name], name=p.name)
for p in self.pipes]
# Add logger to each pipe
for p in self.pipes:
p.set_logger(self.logger)
for s in p.downstreams + p.upstreams:
if (s.monitor or self.monitor) and s.logger is None:
s.set_logger(self.logger)
self.built = True
def run(self):
"""Runs pipeline."""
if self.processes is None:
self.build()
for proc in self.processes:
proc.start()
def close(self):
"""Joins pipeline."""
for proc in self.processes:
proc.join()
def reset(self):
"""Resets pipeline."""
self.global_term.clear()
for p in self.pipes:
p.reset()
p.set_logger(self.logger)
# Create new processes since they can only be started once
self.processes = [Process(target=p.run_pipe, args=[p.name], name=p.name)
for p in self.pipes]
def _connect_streams(self, p):
for stream in p.get_downstreams():
stream.add_pipe_out(p)
for stream in p.get_upstreams():
stream.add_pipe_in(p)
def _count_relatives(self, p):
for stream in p.get_downstreams():
for p_in in stream.pipes_in:
p_in._n_ances += 1
for stream in p.get_upstreams():
for p_out in stream.pipes_out:
p_out._n_desc += 1
def diagram(self, draw_streams=False):
"""Draws a graph diagram of pipeline.
:params draw_streams: Bool, if True Streams will be included in graph diagram
:return: graphviz Digraph object
"""
assert(self.built==True), "ERROR: PipeSystem must be built first"
g = Digraph()
edges = set()
# Assumes graph is a DAG thus iterate over downstreams only
# There can only be one edge between nodes
for p in self.pipes:
g.node(p._id(), p.name)
for s in p.get_downstreams():
if draw_streams:
g.node(s._id(), s.name, shape='rectangle')
edge = (p._id(), s._id())
if edge not in edges:
edges.add(edge)
g.edge(*edge)
for p_in in s.pipes_in:
g.node(p_in._id(), p_in.name)
if draw_streams:
edge = (s._id(), p_in._id())
else:
edge = (p._id(), p_in._id())
if edge not in edges:
edges.add(edge)
g.edge(*edge)
return g
class PipeLine(PipeSystem):
"""
A simplified API for linear PipeSytems.
Toy example:
.. code-block:: python
# Define functors
def genRand(n=10):
for _ in range(n):
yield np.random.rand(10)
def batch(batch_size=2):
x = (yield)
for i in range(len(x)//batch_size):
yield x[i*batch_size:(i+1)*batch_size]
def sumBatch(x):
return x.sum()
def print_out(x):
print (x)
# Define pipeline
pline = PipeLine()
pline.add(Source(genRand, 'source'))
pline.add(Regulator(batch, 'batcher'), buffer_size = 10)
pline.add(Transform(sumBatch, 'sum'), n_processes = 3)
pline.add(Sink(print_out, 'print'))
# Build pipeline
pline.build()
# Run pipeline
pline.run()
pline.close()
:param monitor: Bool, log stream I/O times
:queue_type: String, multiprocesses queue type to be used. Valid types: 'multiprocessing.Queue', 'multiprocessing.SimpleQueue'
:return: None
"""
def __init__(self, monitor=False, queue_type='multiprocessing.Queue'):
self.monitor = monitor
self.queue_type = queue_type
self.segments = []
self.pipes = []
self.sid = 0
super(PipeLine, self).__init__(self.pipes)
def add(self, pipe, n_processes=1, buffer_size=3):
"""Adds a pipe segment to the pipeline.
:param pipe: Pipe segment to add to PipeLine
:param n_processes: Number of processes (workers) to assign to pipe segment
:param buffer_size: Size of Stream buffer
:return: None
"""
# Python generators cannot be split
assert not (isinstance(pipe, Source) and n_processes > 1), 'Use PipeSystem API for multiple Sources'
# After the Source connect each segment with a Stream
if len(self.segments) > 0:
s = Stream(buffer_size=buffer_size,
name = 'stream_{}'.format(self.sid),
monitor=self.monitor,
queue_type=self.queue_type)
self.sid += 1
for seg in self.segments[-1]:
seg.set_downstreams([s])
pipe.set_upstreams([s])
# Create a copy of the pipe segment for each process
seg = [copy(pipe) for _ in range(n_processes)]
self.pipes += seg
self.segments.append(seg)
```
#### File: minipipe/minipipe/pipes.py
```python
from minipipe.base import Pipe, Sentinel
class Source(Pipe):
"""
Source pipes are used to load and/or generate data. Sources have no upstreams, but will have one or more
downstreams. Functor must be a valid Python generator. The generator should be initialized before passing
as argument.
:param functor: Python generator
:param name: String associated with pipe segment
:param downstreams: List of Streams that are outputs of functor
:param ignore_exceptions: List of exceptions to ignore while pipeline is running
"""
def __init__(self, functor_obj, name='Source', downstreams=None, ignore_exceptions=None):
# Source has no upstreams
super(Source, self).__init__(functor_obj, name, None, downstreams, ignore_exceptions)
def run_functor(self):
# Generator functors are used for sources
# Will terminate once end of generator is reached
x = None
while self._continue():
try:
# Get next item from generator
try:
x = next(self.functor)
except TypeError:
# If generator is not initialized
self.functor = self.functor()
x = next(self.functor)
# Check for Sentinel signaling termination
if x is Sentinel:
self._terminate_local()
break
# Do nothing on python None
if x is None:
continue
self._out(x)
# Terminate once end of generator is reached
except StopIteration:
self._logger.log('End of stream', self.name)
self._terminate_local()
# These exceptions are ignored raising WARNING only
except BaseException as e:
if e.__class__ in self.ignore_exceptions:
self._logger.log(str(e), self.name, 'warning')
continue
else:
self._logger.log(str(e), self.name, 'error')
self._terminate_global()
raise e
class Sink(Pipe):
"""
Sink pipes are typically used to save/output data. Sinks have no downstreams, but will have one or more
upstreams. Functor should be either a Python function or class with a "run" method and optionally
"local_init" and "local_term" methods. Local_init, if supplied will be called once on the local process
before run, while local_term will be called once afterwards.
:param functor: Python function or class
:param name: String associated with pipe segment
:param upstreams: List of Streams that are inputs to functor
:param init_kwargs: Kwargs to initiate class object on process (no used when func_type = 'function')
:param ignore_exceptions: List of exceptions to ignore while pipeline is running
"""
def __init__(self, functor_obj, name='Sink', upstreams=None,
ignore_exceptions=None, init_kwargs=None):
# Sink has no downstreams
super(Sink, self).__init__(functor_obj, name, upstreams, None,
ignore_exceptions, init_kwargs)
def run_functor(self):
x = None
while self._continue():
try:
x = self._in()
# Check for Sentinel signaling termination
if self._contains_sentinel(x):
if self._terminate_local():
break
else:
continue
# Do nothing on python None
if self._contains_none(x):
continue
x = self.functor(*x)
self._out(x)
# These exceptions are ignored raising WARNING only
except BaseException as e:
if e.__class__ in self.ignore_exceptions:
self._logger.log(str(e), self.name, 'warning')
continue
else:
self._logger.log(str(e), self.name, 'error')
self._terminate_global()
raise e
class Transform(Pipe):
"""
Transform pipes are used to perform arbitrary transformations on data. Transforms will have one or more
upstreams and downstreams. Functor should be either a Python function or class with a "run" method and optionally
"local_init" and "local_term" methods. Local_init, if supplied will be called once on the local process
before run, while local_term will be called once afterwards.
:param functor: Python function or class
:param name: String associated with pipe segment
:param upstreams: List of Streams that are inputs to functor
:param downstreams: List of Streams that are outputs of functor
:param init_kwargs: Kwargs to initiate class object on process (no used when func_type = 'function')
:param ignore_exceptions: List of exceptions to ignore while pipeline is running
"""
def __init__(self, functor_obj, name='Transform', upstreams=None, downstreams=None,
ignore_exceptions=None, init_kwargs=None):
super(Transform, self).__init__(functor_obj, name, upstreams, downstreams,
ignore_exceptions, init_kwargs)
def run_functor(self):
x = None
while self._continue():
try:
x = self._in()
# Check for Sentinel signaling termination
if self._contains_sentinel(x):
if self._terminate_local():
break
else:
continue
# Do nothing on python None
if self._contains_none(x):
continue
x = self.functor(*x)
self._out(x)
# These exceptions are ignored raising WARNING only
except BaseException as e:
if e.__class__ in self.ignore_exceptions:
self._logger.log(str(e), self.name, 'warning')
continue
else:
self._logger.log(str(e), self.name, 'error')
self._terminate_global()
raise e
class Regulator(Pipe):
"""
Regulator pipes are a special type of transformation that changes the data chunk throughput, typically used
for batching or accumulating data. Regulators can have both upstreams and downstreams. Functor should be a
Python coroutine. The coroutine should not be initialized, instead use init_kwargs to initialize on the local
process.
:param functor: Python coroutines
:param name: String associated with pipe segment
:param upstreams: List of Streams that are inputs to functor
:param downstreams: List of Streams that are outputs of functor
:param init_kwargs: Kwargs to initiate class object on process (no used when func_type = 'function')
:param ignore_exceptions: List of exceptions to ignore while pipeline is running
"""
def __init__(self, functor_obj, name='Regulator', upstreams=None, downstreams=None,
ignore_exceptions=None, init_kwargs=None):
super(Regulator, self).__init__(functor_obj, name, upstreams, downstreams,
ignore_exceptions, init_kwargs)
def run_functor(self):
# Coroutine functors act as a transformation and source
# Useful when the data needs to be broken up or accumulated
# On StopIteration coroutine is reset
coroutine = self.functor(**self.init_kwargs)
next(coroutine)
x = None
while self._continue():
try:
x = self._in()
# Check for Sentinel signaling termination
if self._contains_sentinel(x):
if self._terminate_local():
break
else:
continue
# Do nothing on python None
if self._contains_none(x):
continue
# Send data to coroutine
x_i = coroutine.send(*x)
# Iterate over coroutine output
while x_i is not None:
self._out(x_i)
try:
x_i = next(coroutine)
except StopIteration:
# Reset coroutine for next data
coroutine = self.functor(**self.init_kwargs)
next(coroutine)
break
# These exceptions are ignored raising WARNING only
except BaseException as e:
if e.__class__ in self.ignore_exceptions:
self._logger.log(str(e), self.name, 'warning')
continue
else:
self._logger.log(str(e), self.name, 'error')
self._terminate_global()
raise e
```
|
{
"source": "jdpeinado/BodyMeasureControlWeb",
"score": 3
}
|
#### File: BodyMeasureControlWeb/entrymeasures/forms.py
```python
from django import forms
# Models
from entrymeasures.models import EntryMeasure
from django_measurement.models import MeasurementField
# Utils
from measurement.measures import Weight,Distance
class CompareEntryMeasureForm(forms.Form):
"""Entrymeasures to compare"""
date_measure1 = forms.DateField()
date_measure2 = forms.DateField()
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(CompareEntryMeasureForm, self).__init__(*args, **kwargs)
def clean_date_measure1(self):
"""Verify date measures 1 exists"""
date_measure1 = self.cleaned_data['date_measure1']
if EntryMeasure.objects.filter(active=True, user=self.request.user,date_measure=date_measure1).count()==1:
return date_measure1
raise forms.ValidationError('There is no measure in that date.')
def clean_date_measure2(self):
"""Verify date measures 2 exists"""
date_measure2 = self.cleaned_data['date_measure2']
if EntryMeasure.objects.filter(active=True, user=self.request.user,date_measure=date_measure2).count()==1:
return date_measure2
raise forms.ValidationError('There is no measure in that date.')
class UpdateEntryMeasureForm(forms.ModelForm):
"""EntryMeasure update model form."""
clear_front_image = forms.BooleanField(required=False)
clear_side_image = forms.BooleanField(required=False)
clear_back_image = forms.BooleanField(required=False)
chest = forms.DecimalField(max_digits=4, decimal_places=1)
waist = forms.DecimalField(max_digits=4, decimal_places=1)
hip = forms.DecimalField(max_digits=4, decimal_places=1)
leg = forms.DecimalField(max_digits=4, decimal_places=1)
bicep = forms.DecimalField(max_digits=4, decimal_places=1)
bodyweight = forms.DecimalField(max_digits=4, decimal_places=1)
class Meta:
"""Update form field"""
model = EntryMeasure
fields = (
'user',
'profile',
'front_image_url',
'side_image_url',
'back_image_url',
)
def save(self):
"""Update a entrymeasure"""
entrymeasure = self.instance
if 'delete' in self.data:
entrymeasure.delete()
return EntryMeasure()
else:
data = self.cleaned_data
user = data['user']
profile = data['profile']
if profile.measurement_system == 'METRIC':
entrymeasure.bodyweight = Weight(kg=data['bodyweight'])
entrymeasure.chest = Distance(cm=data['chest'])
entrymeasure.waist = Distance(cm=data['waist'])
entrymeasure.hip = Distance(cm=data['hip'])
entrymeasure.leg = Distance(cm=data['leg'])
entrymeasure.bicep = Distance(cm=data['bicep'])
else:
entrymeasure.bodyweight = Weight(lb=data['bodyweight'])
entrymeasure.chest = Distance(inch=data['chest'])
entrymeasure.waist = Distance(inch=data['waist'])
entrymeasure.hip = Distance(inch=data['hip'])
entrymeasure.leg = Distance(inch=data['leg'])
entrymeasure.bicep = Distance(inch=data['bicep'])
entrymeasure.user = user
entrymeasure.profile = profile
if 'front_image_url' in self.changed_data:
entrymeasure.front_image_url = data['front_image_url']
else:
if data['clear_front_image']:
entrymeasure.front_image_url = None
if 'side_image_url' in self.changed_data:
entrymeasure.side_image_url = data['side_image_url']
else:
if data['clear_side_image']:
entrymeasure.side_image_url = None
if 'back_image_url' in self.changed_data:
entrymeasure.back_image_url = data['back_image_url']
else:
if data['clear_back_image']:
entrymeasure.back_image_url = None
entrymeasure.save()
return entrymeasure
class EntryMeasureForm(forms.ModelForm):
"""EntryMeasure model form."""
chest = forms.DecimalField(max_digits=4, decimal_places=1)
waist = forms.DecimalField(max_digits=4, decimal_places=1)
hip = forms.DecimalField(max_digits=4, decimal_places=1)
leg = forms.DecimalField(max_digits=4, decimal_places=1)
bicep = forms.DecimalField(max_digits=4, decimal_places=1)
bodyweight = forms.DecimalField(max_digits=4, decimal_places=1)
class Meta:
"""Form settings."""
model = EntryMeasure
fields = (
'user',
'profile',
'date_measure',
'front_image_url',
'side_image_url',
'back_image_url',
)
def save(self):
"""Save a entrymeasure"""
data = self.cleaned_data
user = data['user']
profile = data['profile']
entrymeasure = EntryMeasure()
if profile.measurement_system == 'METRIC':
entrymeasure.bodyweight = Weight(kg=data['bodyweight'])
entrymeasure.chest = Distance(cm=data['chest'])
entrymeasure.waist = Distance(cm=data['waist'])
entrymeasure.hip = Distance(cm=data['hip'])
entrymeasure.leg = Distance(cm=data['leg'])
entrymeasure.bicep = Distance(cm=data['bicep'])
else:
entrymeasure.bodyweight = Weight(lb=data['bodyweight'])
entrymeasure.chest = Distance(inch=data['chest'])
entrymeasure.waist = Distance(inch=data['waist'])
entrymeasure.hip = Distance(inch=data['hip'])
entrymeasure.leg = Distance(inch=data['leg'])
entrymeasure.bicep = Distance(inch=data['bicep'])
entrymeasure.user = user
entrymeasure.profile = profile
entrymeasure.date_measure = data['date_measure']
if data['front_image_url']:
entrymeasure.front_image_url = data['front_image_url']
if data['side_image_url']:
entrymeasure.side_image_url = data['side_image_url']
if data['back_image_url']:
entrymeasure.back_image_url = data['back_image_url']
entrymeasure.save()
return entrymeasure
```
|
{
"source": "jdperea/sistemasoperativos2",
"score": 4
}
|
#### File: jdperea/sistemasoperativos2/problemaMark.py
```python
cantidad_instrucciones = int(input())
# Se define el arreglo de carpetas
diccionario_carpetas = {}
# Se define el arreglo de Mark
diccionario_carpetas["Mark"] = {}
# Se define la ubicación Actual al arranque
ubicacion_actual = "/Mark"
# Se define el proceso para imprimir, recibiendo el directorio padre, el arreglo de directorios y si es un nivel o un subnivel
def imprimir(directorio_padre, directorios, nivel):
#Se imprime cual es el directorio padre
print(directorio_padre+":")
#ciclo para recorrer los objetos en la variable directorios y estos se guardan en la variable directorio_hijo
for directorio_hijo in directorios:
# Se imprime el doble guión y si esta en hijos entonces imprime doble
print("--"*(nivel+1),end="")
# Se concatena el directorio
imprimir(directorio_hijo, directorios[directorio_hijo],nivel+1)
# Se define el metodo insertar que recibe la ubicación y el nombre de la nueva carpeta
def insertar(ubicacion, nueva_carpeta):
# en la variable navegación se guarda lo que esta en diccionario de carpetas
navegacion = diccionario_carpetas
# Se divide lo que viene en la ubicación ejemplo padre/hijo/nieto y se guarda en un arreglo
directorios = ubicacion.split("/")[1:]
# Ciclo para recorrer los directorios y se guarda en la variable directorio
for directorio in directorios:
navegacion = navegacion[directorio]
# Se define como un arreglo vacío
navegacion[nueva_carpeta] = {}
for _ in range(cantidad_instrucciones):
# Se cicla la cantidad de instrucciones
instruccion, argumento = input().split(" ")
# Se compara si es cd
if instruccion == "cd":
#procesar instruccion cd
if argumento == "..":
#argumento regreso
#borramos el ultimo directorio
ubicacion_actual = "/".join(ubicacion_actual.split("/")[:-1])
else:
#ingresar a una carpeta
ubicacion_actual = ubicacion_actual + "/" + argumento
else:
#procesar mkdir
insertar(ubicacion_actual, argumento)
# Se llama la funcion imprimir con el diccionario de carpetas
imprimir("Mark", diccionario_carpetas["Mark"], 0)
```
|
{
"source": "jdpgrailsdev/advent_of_code_2021",
"score": 3
}
|
#### File: advent_of_code_2021/advent_of_code/cli.py
```python
import re
from os import listdir, path
import click
@click.group()
def cli() -> None:
"""Entry point for the CLI to bind all commands together."""
click.echo("Advent of Code 2021")
@cli.command()
def execute():
"""Executes each puzzle"""
modules = sorted(listdir(path.dirname(__file__) + "/puzzles"))
for m in modules:
if re.search("Puzzle\\d+\\.py", m):
class_name = m.replace(".py", "")
module = __import__(f"advent_of_code.puzzles.{class_name}")
puzzles = getattr(module, "puzzles")
puzzle_module = getattr(puzzles, class_name)
puzzle_class = getattr(puzzle_module, class_name)
puzzle_instance = puzzle_class()
puzzle_instance.execute()
```
#### File: advent_of_code/puzzles/Puzzle05.py
```python
import math
import os
from advent_of_code.puzzles.PuzzleInterface import PuzzleInterface
INPUT_FILE_NAME = os.path.join(os.path.dirname(__file__), "../input/05.txt")
class Puzzle05(PuzzleInterface):
"""Defines the day 5 puzzle"""
def execute(self) -> None:
"""Executes the day 5 puzzle"""
input_file = open(INPUT_FILE_NAME, "r")
data = input_file.read().splitlines()
self.__part1(data)
self.__part2(data)
def __part1(self, data):
"""Executes the day 5 puzzle part 1"""
coordinates = self.__build_coordinates(data)
diagram = self.__build_diagram(coordinates)
hostile_point_count = self.__count_hostile_points(diagram)
print(f"#05 (part 1) - The number of hostile points is {hostile_point_count}")
def __part2(self, data):
"""Executes the day 5 puzzle part 2"""
coordinates = self.__build_coordinates(data, True)
diagram = self.__build_diagram(coordinates)
hostile_point_count = self.__count_hostile_points(diagram)
print(f"#05 (part 2) - The number of hostile points is {hostile_point_count}")
def __build_coordinates(self, data, include_diagonal=False):
coordinates = []
for d in data:
entry = d.split(" -> ")
starting = tuple(map(int, entry[0].split(",")))
ending = tuple(map(int, entry[1].split(",")))
x_diff = abs(starting[0] - ending[0])
y_diff = abs(starting[1] - ending[1])
if x_diff == 0 or y_diff == 0:
if x_diff > 0:
starting_x = min(starting[0], ending[0])
for x in range(0, x_diff + 1):
coordinates.append(tuple([starting_x + x, starting[1]]))
else:
starting_y = min(starting[1], ending[1])
for y in range(0, y_diff + 1):
coordinates.append(tuple([starting[0], starting_y + y]))
if include_diagonal:
degrees = math.atan2(x_diff, y_diff) * (180 / math.pi)
if degrees == 45.0:
x_increment = 1 if starting[0] < ending[0] else -1
y_increment = 1 if starting[1] < ending[1] else -1
x = starting[0]
y = starting[1]
for i in range(0, x_diff + 1):
coordinates.append(tuple([x, y]))
x += x_increment
y += y_increment
return coordinates
def __build_diagram(self, coordinates):
diagram = {}
for c in coordinates:
x = c[0]
y = c[1]
if y in diagram:
counts = diagram[y]
if x in counts:
counts[x] += 1
else:
counts[x] = 1
else:
diagram[y] = {x: 1}
return diagram
def __count_hostile_points(self, diagram):
hostile_point_count = 0
for d in diagram:
for p in diagram[d]:
if diagram[d][p] >= 2:
hostile_point_count += 1
return hostile_point_count
```
#### File: advent_of_code/puzzles/Puzzle11.py
```python
import os
from advent_of_code.puzzles.PuzzleInterface import PuzzleInterface
INPUT_FILE_NAME = os.path.join(os.path.dirname(__file__), "../input/11.txt")
class Puzzle11(PuzzleInterface):
"""Defines the day 11 puzzle"""
def execute(self) -> None:
"""Executes the day 11 puzzle"""
input_file = open(INPUT_FILE_NAME, "r")
data = input_file.read().splitlines()
data = self.__build_grid(data)
self.__part1(data)
self.__part2(data)
def __part1(self, data):
"""Executes the day 11 puzzle part 1"""
total_flashes = 0
for i in range(0, 100):
for r, d in enumerate(data):
for c, o in enumerate(d):
data[r][c] = tuple([o[0] + 1, o[1]])
total_flashes += self.__flash(data)
data = self.__reset(data)
print(
"#11 (part 1) - The total number of flashes after 100 turns is"
f" {total_flashes}"
)
def __part2(self, data):
"""Executes the day 11 puzzle part 2"""
flash_count = 0
step = 0
octopi_count = len(data) * len(data[0])
while flash_count != octopi_count:
step += 1
for r, d in enumerate(data):
for c, o in enumerate(d):
data[r][c] = tuple([o[0] + 1, o[1]])
flash_count = self.__flash(data)
data = self.__reset(data)
print(f"#11 (part 2) - All octopi flash during step {step}")
def __build_grid(self, data):
grid = []
for d in data:
row = []
for o in d:
row.append(tuple([int(o), False]))
grid.append(row)
return grid
def __flash(self, data):
flash_count = 0
for ir, r in enumerate(data):
for ic, c in enumerate(r):
if c[0] > 9 and c[1] is False:
flash_count += 1
data[ir][ic] = tuple([c[0], True])
data = self.__update_adjacent(data, ir, ic)
if flash_count > 0:
flash_count += self.__flash(data)
return flash_count
def __reset(self, data):
for r, d in enumerate(data):
for c, o in enumerate(d):
value = 0 if o[1] is True else o[0]
data[r][c] = tuple([value, False])
return data
def __update_adjacent(self, data, row, col):
for r in range(row - 1, row + 2):
for c in range(col - 1, col + 2):
if 0 <= r < len(data):
if 0 <= c < len(data[0]):
if r != row or c != col:
d = data[r][c]
data[r][c] = tuple([d[0] + 1, d[1]])
return data
```
#### File: advent_of_code/puzzles/Puzzle12.py
```python
import os
from advent_of_code.puzzles.PuzzleInterface import PuzzleInterface
INPUT_FILE_NAME = os.path.join(os.path.dirname(__file__), "../input/12.txt")
class Puzzle12(PuzzleInterface):
"""Defines the day 12 puzzle"""
def execute(self) -> None:
"""Executes the day 12 puzzle"""
input_file = open(INPUT_FILE_NAME, "r")
data = input_file.read().splitlines()
self.__part1(data)
self.__part2(data)
def __part1(self, data):
"""Executes the day 12 puzzle part 1"""
paths = self.__build_graph(data)
available_paths = self.__find_paths("start", paths)
print(
"#12 (part 1) - The number of paths through the cave system is"
f" {len(available_paths)}"
)
def __part2(self, data):
"""Executes the day 12 puzzle part 2"""
paths = self.__build_graph(data)
available_paths = self.__find_paths_2("start", paths)
print(
"#12 (part 2) - The number of paths through the cave system is"
f" {len(available_paths)}"
)
def __build_graph(self, data):
paths = {}
for d in data:
a, b = d.split("-")
if a not in paths:
paths[a] = set()
if b not in paths:
paths[b] = set()
paths[a].add(b)
paths[b].add(a)
return paths
def __find_paths(self, path, paths, visited=set(), accumulated_path=[]):
if path in visited:
return []
elif path.islower():
visited.add(path)
accumulated_path.append(path)
if path == "end":
return [path]
sub_paths = []
for p in paths[path]:
sub_path = self.__find_paths(
p, paths, visited.copy(), accumulated_path.copy()
)
if sub_path:
sub_paths.extend(sub_path)
return sub_paths
def __find_paths_2(self, path, paths, visited={}, accumulated_path=[]):
if path in visited:
if path == "start" or path == "end":
return []
elif list(visited.values()).__contains__(2):
return []
else:
visited[path] = visited[path] + 1
elif path.islower():
visited[path] = 1
accumulated_path.append(path)
if path == "end":
return [path]
sub_paths = []
for p in paths[path]:
sub_path = self.__find_paths_2(
p, paths, visited.copy(), accumulated_path.copy()
)
if sub_path:
sub_paths.extend(sub_path)
return sub_paths
```
#### File: advent_of_code/puzzles/Puzzle19.py
```python
import os
import re
from advent_of_code.puzzles.PuzzleInterface import PuzzleInterface
INPUT_FILE_NAME = os.path.join(os.path.dirname(__file__), "../input/19.txt")
MIN_OVERLAP = 12
TOTAL_BEACONS = 79
MAX_DISTANCE = 1000
class Puzzle19(PuzzleInterface):
"""Defines the day 19 puzzle"""
def execute(self) -> None:
"""Executes the day 19 puzzle"""
input_file = open(INPUT_FILE_NAME, "r")
data = input_file.read().splitlines()
self.__part1(data)
self.__part2(data)
def __part1(self, data):
"""Executes the day 19 puzzle part 1"""
scanner_reports = self.__build_scanner_reports(data)
known_scanners = [(0, 0, 0)]
known_beacons = scanner_reports[0]
unknown_scanners = {k: scanner_reports[k] for k in scanner_reports.keys() if k != 0}
#self.align(known_beacons, known_scanners, unknown_scanners)
print(f"#19 (part 1) - The number of beacons is {len(known_beacons)}")
def __part2(self, data):
"""Executes the day 19 puzzle part 2"""
def __build_scanner_reports(self, data):
reports = {}
scanner_key = None
for d in data:
group = re.search("--- scanner (\\d+) ---", d)
if group:
scanner_key = int(group[1])
elif d.strip():
if scanner_key not in reports:
reports[scanner_key] = []
reports[scanner_key].append(tuple(map(int, d.split(','))))
return reports
def align(self, known_beacons, known_scanners, unknown_scanners):
not_done = True
while unknown_scanners and not_done:
not_done = True
for scanner in unknown_scanners:
report = unknown_scanners[scanner]
beacons, position = self.orient(known_beacons, report)
if beacons:
unknown_scanners.remove(scanner)
known_beacons |= beacons
known_scanners.append(position)
not_done = False
def orient(self, beacons, report):
for x_adjustment in {-1, 0, 1}:
for y_adjustment in {-1, 0, 1}:
for z_adjustment in {-1, 0, 1}:
adjusted = [self.adjust(x_adjustment, y_adjustment, z_adjustment, r) for r in
report]
common_beacons, position = self.compare(beacons, adjusted)
if common_beacons:
return common_beacons, position
return None, None
def adjust(self, x_adjustment, y_adjustment, z_adjustment, report):
return (report[0] * x_adjustment if x_adjustment != 0 else report[0],
report[1] * y_adjustment if y_adjustment != 0 else report[1],
report[2] * z_adjustment if z_adjustment != 0 else report[2])
def compare(self, beacons, adjusted):
for axis in range(3):
sorted_beacons = sorted(beacons, key=lambda position: position[axis])
adjusted.sort(key=lambda position: position[axis])
beacon_diffs = self.relative(beacons)
adjusted_diffs = self.relative(adjusted)
print(f"beacon = {beacon_diffs}, adjusted={adjusted_diffs}")
intersection = set(beacon_diffs) & set(adjusted_diffs)
if intersection:
difference = intersection.pop()
bx, by, bz = sorted_beacons[beacons.index(difference)]
ax, ay, az = adjusted[adjusted_diffs.index(difference)]
cx, cy, cz = (ax - bx, ay - by, az - bz)
relocated = {(x - cx, y - cy, z - cz) for (x, y, z) in adjusted}
matched_beacons = beacons & relocated
if len(matched_beacons) >= MIN_OVERLAP:
return relocated, (cx, cy, cz)
return None, None
def relative(self, readings):
return [(pos1[0] - pos0[0], pos1[1] - pos0[1], pos1[2] - pos0[2])
for pos0, pos1 in zip(readings, readings[1:])]
```
|
{
"source": "JD-P/HNStoryRecommendations",
"score": 3
}
|
#### File: JD-P/HNStoryRecommendations/train_model.py
```python
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
import numpy as np
from search_hn import Hit
import json
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("saved_stories", help="The saved stories from the HN export.")
parser.add_argument("unsaved_stories", help="The stories grabbed with the training data grabber.")
arguments = parser.parse_args()
# Preprocessing stage, get saved stories and HN-search-API data in the same format
with open(arguments.saved_stories) as infile:
saved_stories = json.load(infile)["saved_stories"]
with open(arguments.unsaved_stories, "rb") as infile:
stories = pickle.load(infile)
# Now we have to get them into similar formats
for story in stories:
story.upvoted = False
ss_objects = []
stories_length = len(stories)
for saved_story in saved_stories:
ss_object = Hit.make(saved_story)
ss_object.upvoted = True
ss_objects.append(ss_object)
# Don't forget to filter any duplicates out of the negative set
ss_id_set = set()
[ss_id_set.add(saved_story.id) for saved_story in ss_objects]
for story in enumerate(stories):
if story[1].story_id in ss_id_set:
del(stories[story[0]])
# Calculate word frequencies for both datasets
def calculate_word_frequencies(stories):
stemmer = PorterStemmer()
word_frequencies = {}
for story in stories:
for word in stemmer.stem(story.title).split():
try:
word_frequencies[word] += 1
except KeyError:
word_frequencies[word] = 1
return word_frequencies
story_word_frequencies = calculate_word_frequencies(stories)
ss_word_frequencies = calculate_word_frequencies(ss_objects)
word_probabilities = {}
num_titles = len(ss_objects) + len(stories)
for word in ss_word_frequencies:
try:
word_probabilities[word] = (story_word_frequencies[word]
+ ss_word_frequencies[word]) / num_titles
except KeyError:
word_probabilities[word] = ss_word_frequencies[word] / num_titles
upvote_word_probabilities = {}
for word in ss_word_frequencies:
upvote_word_probabilities[word] = ss_word_frequencies[word] / len(ss_objects)
def p_of_upvote_given_word(word):
try:
p_of_word = word_probabilities[word]
except KeyError:
return 0
p_of_upvote = len(ss_objects) / len(stories)
p_of_word_given_upvote = upvote_word_probabilities[word]
return (p_of_word_given_upvote * p_of_upvote) / p_of_word
def p_of_upvote_given_title(title):
"""I'm pretty sure this isn't how you do Bayes so this will probably get updated later"""
from functools import reduce
from operator import mul
stemmer = PorterStemmer()
p_updates = [1 - p_of_upvote_given_word(word) for word in stemmer.stem(title).split()]
try:
return 1 - reduce(mul, p_updates)
except:
return 0
# Okay now lets figure out precision and recall
hits = 0
precision_total = 0
recall_total = len(ss_objects)
for story in stories + ss_objects:
p_of_upvote = p_of_upvote_given_title(story.title)
if p_of_upvote >= .15:
if story.upvoted:
hits += 1
precision_total += 1
else:
precision_total += 1
print("Precision: {}% of documents retrieved are relevant.".format((hits /
precision_total) *
100))
print("Recall: {}% of relevant documents retrieved.".format((hits/recall_total)*100))
#print("{}: {}%\n{}\n".format(story.title,
# p_of_upvote * 100,
# story.url))
```
|
{
"source": "jdpinedaj/CFLDelays",
"score": 3
}
|
#### File: CFLDelays/scripts/etl.py
```python
from pathlib import Path
import click
import pandas as pd
import numpy as np
import calendar
import datetime as dt
import ppscore as pps
import scipy.stats as stats
from utility import parse_config, set_logger, time_train
@click.command()
@click.argument("config_file", type=str, default="scripts/config.yml")
def etl(config_file):
"""
ETL function that load raw data and convert to train and test set
Args:
config_file [str]: path to config file
Returns:
None
"""
##################
# configure logger
##################
logger = set_logger("./script_logs/etl.log")
##################
# Load config from config file
##################
logger.info(f"Load config from {config_file}")
config = parse_config(config_file)
raw_data_file = config["etl"]["raw_data_file"]
processed_path = Path(config["etl"]["processed_path"])
#test_size = config["etl"]["test_size"]
#random_state = config["etl"]["random_state"]
logger.info(f"config: {config['etl']}")
##################
# Reading data
##################
df = pd.read_csv(raw_data_file, low_memory=False)
##################
# Data transformation and Feature engineering
##################
logger.info(
"-------------------Start data transformation and feature engineering-------------------"
)
# Conversion of some features
df = df.replace(['False', False], np.nan)
df['Train_Weight'] = df['Train_Weight'] / 1000 # Converting units to t
df['weight_length'] = df['Train_Weight'] / df['Train_Length'] # units= t/m
df['weight_wagon'] = df['Train_Weight'] / df[
'wagon_count'] # units= t/wagon
df['IDTRAIN'] = df['IDTRAIN'].astype(str)
df = df[df['Origin_Station'] != 'Mertert-Port'].reset_index(drop=True)
df = df[df['Destination_Station'] != 'Mertert-Port'].reset_index(drop=True)
logger.info(f"shape: {df.shape}")
logger.info("Filtering canceled trains.")
# Filtering canceled trains
df = df[df['Cancelled_Train_Bin'] == 0].reset_index(drop=True)
# Creating data based on stations' data.
logger.info(
"Creating a dictionary of countries and coordinates based on stations' data."
)
dict_country = {
'Antwerp': ['Belgium', '51.371889', '4.276158'],
'Antwerpen-Noord N': ['Belgium', '51.257856', '4.366801'],
'Apach-Frontiere': ['France', '49.468881', '6.369765'],
'Athus': ['Luxembourg', '49.551688', '5.824975'],
'BERTRIX': ['Belgium', '49.85070000', '5.26894000'],
'Berchem': ['Luxembourg', '49.542689', '6.133661'],
'Bettembourg': ['Luxembourg', '49.493889', '6.108889'],
'Bettembourg-frontière': ['Luxembourg', '49.46785900', '6.10915300'],
'Bietigheim-Bissingen': ['Germany', '48.947986', '9.137338'],
'Bischofshofen': ['Austria', '47.417438', '13.219925'],
'Boulou-Perthus': ['France', '42.525299', '2.816646'],
'Brennero (Brenner)': ['Italy', '47.002378', '11.505089'],
'Brennero/Brenner': ['Austria', '47.007338', '11.507839'],
'Brennero/Brenner-Übergang': ['Austria', '47.00194000', '11.50529300'],
'Brugge': ['Belgium', '51.197517', '3.217157'],
'Champigneulles': ['France', '48.736397', '6.167817'],
'Dieulouard': ['France', '48.84307800', '6.07213000'],
'Duisburg-Wedau': ['Germany', '51.42775200', '6.77576600'],
'Ehrang': ['Germany', '49.80200000', '6.68600000'],
'Erfurt-Vieselbach Ubf': ['Germany', '50.990367', '11.125075'],
'Esch-sur-Alzette': ['Luxembourg', '49.49466900', '5.98666200'],
'Forbach': ['France', '49.189505', '6.900831'],
'Forbach-Frontiere': ['France', '49.19390900', '6.91057000'],
'Frankfurt (Oder) Oderbrücke': ['Germany', '52.336830', '14.546645'],
'Frouard': ['France', '48.75520900', '6.14507600'],
'Fulda': ['Germany', '50.554845', '9.684087'],
'Gembloux': ['Belgium', '50.570490', '4.691746'],
'Gent': ['Belgium', '51.06585800', '3.74228800'],
'Gorizia Centrale': ['Italy', '45.927263', '13.610594'],
'Gremberg': ['Germany', '50.931872', '7.004873'],
'Grignano': ['Italy', '45.712456', '13.712081'],
'Gummern': ['Austria', '46.654821', '13.768172'],
'Hagondange': ['France', '49.24571', '6.16293300'],
'Hamburg Harburg': ['Germany', '53.456259', '9.991911'],
'Hamburg-Barmbek': ['Germany', '53.587298', '10.044225'],
'Hamburg-Billwerder Ubf': ['Germany', '53.515913', '10.095863'],
'Hamburg-Eidelstedt': ['Germany', '53.606569', '9.881261'],
'Hamm (Westf) Rbf': ['Germany', '51.678478', '7.808450'],
'Hanau Nord': ['Germany', '50.141900', '8.925691'],
'Harburg (Schwab)': ['Germany', '48.778772', '10.698509'],
'<NAME>': ['Germany', '49.70827000', '6.54751000'],
'Innsbruck-Hbf': ['Austria', '47.263270', '11.401065'],
'Kiel': ['Germany', '54.313586', '10.131083'],
'Kiel-Meimersdorf': ['Germany', '54.28598400', '10.11431200'],
'Kirchweyhe': ['Germany', '52.983717', '8.847205'],
'<NAME> Gbf': ['Germany', '50.358060', '7.590114'],
'Kornwestheim Ubf': ['Germany', '48.862236', '9.179783'],
'Kufstein': ['Austria', '47.58351300', '12.16554500'],
'Kufstein-Transit': ['Austria', '47.601123', '12.177298'],
'<NAME>': ['Germany', '50.906783', '6.931220'],
'Launsdorf-Hochosterwitz': ['Austria', '46.769031', '14.455553'],
'Leuven': ['Belgium', '50.881412', '4.716230'],
'Linz (Rhein)': ['Germany', '50.569312', '7.275979'],
'Lyon': ['France', '45.738545', '4.847001'],
'Löhne (Westf)': ['Germany', '52.196869', '8.713322'],
'Mainz Hbf': ['Germany', '50.001397', '8.258400'],
'Marl-Sinsen': ['Germany', '51.667738', '7.173067'],
'Maschen Rbf': ['Germany', '53.40597900', '10.05616000'],
'Mertert-Port': ['Luxembourg', '49.69852900', '6.47328700'],
'Metz-Chambiere': ['France', '49.12633600', '6.15989800'],
'Metz-Sablon': ['France', '49.09640700', '6.16632000'],
'Monceau': ['Belgium', '50.412302', '4.394469'],
'Monfalcone': ['Italy', '45.807600', '13.543315'],
'Mont-Saint-Martin-(Fre-Bel)': ['France', '49.539485', '5.795765'],
'Muizen': ['Belgium', '51.008260', '4.513946'],
'München-Laim': ['Germany', '48.15053500', '11.46107900'],
'Münster (Westf) Ost': ['Germany', '51.956909', '7.635720'],
'Neumünster': ['Germany', '54.076277', '9.980326'],
'Neuoffingen': ['Germany', '48.482938', '10.345244'],
'Neustadt (Schwarzw)': ['Germany', '47.910334', '8.210948'],
'Neuwied': ['Germany', '50.431505', '7.473153'],
'Ochsenfurt': ['Germany', '49.663138', '10.071667'],
'Osnabrück': ['Germany', '52.27287500', '8.06157700'],
'Perrigny': ['France', '47.29046800', '5.02924200'],
'Plessis-Belleville-(Le)': ['France', '49.09817600', '2.74655600'],
'Pont-A-Mousson': ['France', '48.900174', '6.050552'],
'Port-La-Nouvelle': ['France', '43.019999', '3.038663'],
'Poznan': ['Poland', '52.402759', '17.095353'],
'RZEPIN': ['Poland', '52.350125', '14.815257'],
'Raubling': ['Germany', '47.788421', '12.110035'],
'Rodange': ['Luxembourg', '49.551095', '5.843570'],
'Rodange-Athus-frontière': ['Luxembourg', '49.551665', '5.824990'],
'Rodange-Aubange-frontière':
['Luxembourg', '49.56265800', '5.80172000'],
'Ronet': ['Belgium', '50.457953', '4.829284'],
'Rostock': ['Germany', '54.150763977112', '12.1002551362189'],
'Saarbrücken Rbf': ['Germany', '49.24208900', '6.97109700'],
'Salzburg Hbf': ['Germany', '47.80555200', '13.03289500'],
'Salzburg-Hbf': ['Austria', '47.813024', '13.045337'],
'Schwarzenbach': ['Germany', '49.724680', '11.992859'],
'Schwerin-Goerris': ['Germany', '53.608673', '11.384560'],
'Sibelin': ['France', '46.650792', '4.838708'],
"St-Germain-Au-Mont-D'Or": ['France', '45.888651', '4.804246'],
'<NAME>': ['Italy', '46.506276', '13.607397'],
'Tarvisio-Boscoverde': ['Austria', '46.540147', '13.648156'],
'Thionville': ['France', '49.35566700', '6.17307200'],
'Toul': ['France', '48.679211', '5.880462'],
'Treuchtlingen': ['Germany', '48.961132', '10.908102'],
'Trieste': ['Italy', '45.639233', '13.755926'],
'Udine': ['Italy', '46.055684', '13.242007'],
'Ulm Ubf': ['Germany', '48.399482', '9.982538'],
'Verona Porta Nuova Scalo': ['Italy', '45.428674', '10.982622'],
'Villach-Westbahnhof': ['Austria', '46.608000', '13.839648'],
'Wasserbillig': ['Luxembourg', '49.71361100', '6.50638900'],
'Wattenbek': ['Germany', '54.174414', '10.043755'],
'Woippy': ['France', '49.14951200', '6.15606100'],
'Zeebrugge': ['Belgium', '51.34017', '3.221882'],
'Zoufftgen-Frontiere': ['France', '49.46785900', '6.10915300'],
}
stations_countries = pd.DataFrame({
'station': [key for key in dict_country.keys()],
'country': [dict_country[x][0] for x in dict_country],
'lat': [dict_country[x][1] for x in dict_country],
'long': [dict_country[x][2] for x in dict_country]
})
stations_countries['lat'] = stations_countries['lat'].astype(float)
stations_countries['long'] = stations_countries['long'].astype(float)
## Adding departure and arrival data
# Country_dep
df = pd.merge(df,
stations_countries,
left_on=['Station_Name_dep'],
right_on=['station'],
how='left')
# Country_arriv
df = pd.merge(df,
stations_countries,
left_on=['Station_Name_arriv'],
right_on=['station'],
how='left')
df = df.rename(
columns={
'station_x': 'station_dep',
'station_y': 'station_arriv',
'country_x': 'Country_dep',
'country_y': 'Country_arriv',
'lat_x': 'lat_dep',
'lat_y': 'lat_arriv',
'long_x': 'long_dep',
'long_y': 'long_arriv',
})
df['route'] = df['Station_Name_dep'] + '--' + df['Station_Name_arriv']
df = df.drop(columns=['station_dep', 'station_arriv'])
logger.info("End data transformation and feature engineering")
##################
# Feature selection
##################
logger.info(
"-------------------Start feature selection-------------------")
data = df.copy()
data = data[[ #'IDTRAIN',
'Incoterm',
'Max_TEU',
'TEU_Count',
'Max_Length',
'Train_Length',
'Train_Weight',
'Planned_Departure_DOW',
'Planned_Arrival_DOW',
'Planned_Arrival',
'Depart_Week_Num',
'wagon_count',
'Train_Distance_KM', # Be careful, this distance is a virtual distance calculated using haversine method, is not the actual disance of the track...
'train_tare_weight', # Please review if this value can be duplicated to all rows using the IDTRAIN
#'precipIntensity_final_station', # Only in the destination, not the final station of the arc
#'precipProbability_final_station', # Only in the destination, not the final station of the arc
#'temperature_final_station', # Only in the destination, not the final station of the arc
#'apparentTemperature_final_station', # Only in the destination, not the final station of the arc
#'dewPoint_final_station', # Only in the destination, not the final station of the arc
#'humidity_final_station', # Only in the destination, not the final station of the arc
#'windSpeed_final_station', # Only in the destination, not the final station of the arc
#'windBearing_final_station', # Only in the destination, not the final station of the arc
#'cloudCover_final_station', # Only in the destination, not the final station of the arc
#'uvIndex_final_station', # Only in the destination, not the final station of the arc
#'visibility_final_station', # Only in the destination, not the final station of the arc
#'Station_Name_dep',
#'Station_Name_arriv',
'Country_dep',
'Country_arriv',
#'route',
#'Time_From_Prior_dep',
#'Time_From_Prior_arriv',
#'Depart_Variance',
'Depart_Variance_Mins_dep',
#'Depart_Variance_Mins_Pos',
#'Arrive_Variance',
'Arrive_Variance_Mins_arriv', # Feature to predict? -> Retraso en minutos en la llegada en la estacion de llegada
#'Arrive_Variance_Mins_Pos',
#'Travel_Time_Mins_arriv', ### COMPLETELY UNRELIABLE FEATURE
#'Idle_Time_Mins_dep',
'KM_Distance_Event_arriv', # Be careful, this distance is a virtual distance calculated using haversine method, is not the actual disance of the track...
'weight_length',
'weight_wagon',
#'type_incident',
#'dateh_incident',
#'lieu',
#'statut',
#'statut_commercial',
#'statut_financier',
#'gravite',
#'motif_client',
#'commentaire',
]]
data.rename(
columns={
'Incoterm': 'incoterm',
'Max_TEU': 'max_teu',
'TEU_Count': 'teu_count',
'Max_Length': 'max_length',
'Train_Length': 'train_length',
'Train_Weight': 'train_weight',
'Planned_Departure_DOW': 'planned_departure_day',
'Planned_Arrival_DOW': 'planned_arrival_day',
'Planned_Arrival': 'planned_arrival',
'Depart_Week_Num': 'departure_week_number',
'wagon_count': 'wagon_count',
'Train_Distance_KM': 'total_distance_trip',
'train_tare_weight': 'sum_tares_wagons',
'Station_Name_dep': 'departure_station',
'Station_Name_arriv': 'arrival_station',
'Country_dep': 'departure_country',
'Country_arriv': 'arrival_country',
'route': 'departure_arrival_route',
'Depart_Variance_Mins_dep': 'departure_delay',
'Arrive_Variance_Mins_arriv': 'arrival_delay',
'KM_Distance_Event_arriv': 'distance_between_control_stations',
'weight_length': 'weight_per_length_of_train',
'weight_wagon': 'weight_per_wagon_of_train',
#'type_incident': 'incident_type',
#'dateh_incident': 'incident_date',
#'lieu': 'incident_location',
#'statut': 'incident_status',
#'statut_commercial': 'incident_status_commercial',
#'statut_financier': 'incident_status_financial',
#'gravite': 'incident_gravity',
#'motif_client': 'incident_customer_reason',
#'commentaire': 'incident_comment',
},
inplace=True)
## Adding time data
data['planned_arrival'] = pd.to_datetime(data['planned_arrival'])
data['month_arrival'] = data['planned_arrival'].dt.month
data['month_arrival'] = data['month_arrival'].apply(
lambda x: calendar.month_abbr[x])
data['hour_arrival'] = data['planned_arrival'].dt.hour
data['arrival_night'] = [
'no' if x <= 19 and x >= 6 else 'yes' for x in data['hour_arrival']
]
data['peak_morning'] = [
'yes' if x <= 9 and x >= 6 else 'no' for x in data['hour_arrival']
]
data['peak_evening'] = [
'yes' if x <= 19 and x >= 16 else 'no' for x in data['hour_arrival']
]
data['peak_time'] = [
'yes' if i == 'yes' or j == 'yes' else 'no'
for i, j in zip(data['peak_morning'], data['peak_evening'])
]
data = data.drop(columns=[
'planned_arrival', 'hour_arrival', 'peak_morning', 'peak_evening'
],
axis=1)
logger.info(f"data: {data.shape}")
logger.info("End feature selection")
##################
# Data cleaning
##################
logger.info("-------------------Start data cleaning-------------------")
df = data.copy()
df.dropna(subset=['arrival_delay'], inplace=True)
#df = df.fillna({
# 'incident_type': 'no_incident',
# 'incident_gravity': 'no_incident',
# 'incident_customer_reason': 'no_incident'
#})
df.reset_index(drop=True, inplace=True)
## Dividing dataset in numeric and categorical features
# Categorical features
cat_features = [
'incoterm',
'planned_departure_day',
'planned_arrival_day',
'departure_country',
'arrival_country',
'month_arrival',
'arrival_night',
'peak_time',
#'incident_type', 'incident_gravity', 'incident_customer_reason'
]
cat_feat_train_data = df[cat_features]
cat_feat_train_data = cat_feat_train_data.dropna().reset_index(drop=True)
# Transforming categorical features
cat_feat_train_data_cat = cat_feat_train_data.copy()
for i in cat_feat_train_data_cat:
cat_feat_train_data_cat[i] = cat_feat_train_data_cat[i].astype(
'category')
#cat_feat_train_data_cat[i] = cat_feat_train_data_cat[i].cat.codes
# Numerical features
num_feat_train_data = df.drop(columns=cat_features)
num_feat_train_data = num_feat_train_data.dropna().reset_index(
drop=True).astype(float)
# Removing outliers of numerical features
# find Q1, Q3, and interquartile range for each column
Q1 = num_feat_train_data.quantile(q=0.03)
Q3 = num_feat_train_data.quantile(q=0.97)
IQR = num_feat_train_data.apply(stats.iqr)
# only keep rows in dataframe that have values within 1.5*IQR of Q1 and Q3
num_feat_train_data = num_feat_train_data[~(
(num_feat_train_data < (Q1 - 1.5 * IQR)) |
(num_feat_train_data >
(Q3 + 1.5 * IQR))).any(axis=1)].reset_index(drop=True)
# Merging categorical and numerical features
merged_data = pd.concat([cat_feat_train_data_cat, num_feat_train_data],
axis=1)
merged_data = merged_data.dropna()
merged_data['arrived'] = merged_data['arrival_delay'].apply(
lambda x: time_train(x))
# Calculating PPS matrix to remove some features
predictors = pps.predictors(merged_data,
'arrived',
output="df",
sorted=True)
predictors_to_remove = predictors[
predictors['ppscore'] <
0.003] # TODO: Constant for this hard-code (0.003)
df = merged_data.drop(predictors_to_remove['x'].values, axis=1)
df.drop(columns=['train_weight', 'sum_tares_wagons'], axis=1, inplace=True)
# Removing this column for training regression models
df.drop(columns=['arrived'], axis=1, inplace=True)
logger.info("End data cleaning")
##################
# Export
##################
logger.info("-------------------Export-------------------")
logger.info(f"write data to {processed_path}")
df.to_csv(processed_path / 'df_processed_ml.csv', index=False)
logger.info(f"df_processed_ml: {df.shape}")
logger.info("\n")
if __name__ == "__main__":
etl()
```
#### File: CFLDelays/scripts/predict.py
```python
from pathlib import Path
import click
import pandas as pd
from pycaret.classification import load_model, predict_model
from utility import parse_config, set_logger
@click.command()
@click.argument("config_file", type=str, default="scripts/config.yml")
def predict(config_file):
"""
Main function that runs predictions
Args:
config_file [str]: path to config file
Returns:
None
"""
##################
# configure logger
##################
logger = set_logger("./script_logs/predict.log")
##################
# Load config from config file
##################
logger.info(f"Load config from {config_file}")
config = parse_config(config_file)
model_path = Path(config["predict"]["model_path"])
processed_test = config["predict"]["processed_test"]
predicted_file = config["predict"]["predicted_file"]
export_result = config["predict"]["export_result"]
logger.info(f"config: {config['predict']}")
##################
# Load model & test set
##################
# Load model
logger.info(
f"-------------------Load the trained model-------------------")
model = load_model(model_path)
# Load and prepare data
logger.info(f"Load the data to predict {processed_test}")
data = pd.read_csv(processed_test)
data['weight_per_length_of_train'] = round(
data['train_weight'] / data['train_length'], 1)
data['weight_per_wagon_of_train'] = round(
data['train_weight'] / data['wagon_count'], 1)
data.drop(columns=['train_weight', 'wagon_count'], axis=1, inplace=True)
##################
# Make prediction and export file
##################
logger.info(f"-------------------Predict and evaluate-------------------")
predictions = predict_model(model, data) # Using pycaret
predictions.rename(columns={'Label': 'arrival_delay'}, inplace=True)
predictions.to_csv(predicted_file, index=False)
if __name__ == "__main__":
predict()
```
|
{
"source": "jdpinedaj/darksky",
"score": 3
}
|
#### File: darksky/darksky/request_manager.py
```python
import requests
from aiohttp import ClientSession
from .exceptions import DarkSkyException
class BaseRequestManger:
def __init__(self, gzip: bool):
self.headers = {} if not gzip else {"Accept-Encoding": "gzip"}
def make_request(self, url: str, **params):
raise NotImplementedError
class RequestManger(BaseRequestManger):
def __init__(self, gzip: bool):
super().__init__(gzip)
self.session = requests.Session()
self.session.headers = self.headers
def make_request(self, url: str, **params):
response = self.session.get(url, params=params).json()
if "error" in response:
raise DarkSkyException(response["code"], response["error"])
response["timezone"] = params.get("timezone") or response["timezone"]
return response
class RequestMangerAsync(BaseRequestManger):
async def make_request(
self,
url: str,
session: ClientSession,
**params
):
assert isinstance(session, ClientSession)
for key in list(params.keys()):
if params[key] is None:
del params[key]
elif isinstance(params[key], list):
params[key] = ",".join(params[key])
async with session.get(
url, params=params, headers=self.headers
) as resp:
response = await resp.json()
if "error" in response:
raise DarkSkyException(response["code"], response["error"])
response["timezone"] = params.get("timezone") or response["timezone"]
return response
```
#### File: darksky/tests/test_forecast.py
```python
import asyncio
import copy
import os
import re
import sys
from datetime import datetime
import aiohttp
import aioresponses
import mock
import pytest
from darksky.api import DarkSky, DarkSkyAsync
from darksky.forecast import Forecast
from darksky.utils import get_datetime_from_unix
from . import mokcs, utils
from .data import DATA
sys.path.insert(
0,
os.path.realpath(
os.path.join(
os.path.dirname(__file__),
".."
)
)
)
@mock.patch("requests.Session", mokcs.MockSession)
def get_forecast_sync() -> Forecast:
darksky = DarkSky("api_key")
return darksky.get_forecast(DATA["latitude"], DATA["longitude"])
def get_forecast_async():
async def get_async_data():
darksky = DarkSkyAsync("api_key")
with aioresponses.aioresponses() as resp:
resp.get(re.compile(".+"), status=200, payload=copy.deepcopy(DATA))
result = await darksky.get_forecast(
DATA["latitude"],
DATA["longitude"],
client_session=aiohttp.ClientSession()
)
return result
loop = asyncio.get_event_loop()
return loop.run_until_complete(get_async_data())
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_base_fields(forecast):
assert isinstance(forecast, Forecast)
assert forecast.latitude == DATA["latitude"]
assert forecast.longitude == DATA["longitude"]
assert forecast.timezone == "America/New_York"
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_currently(forecast):
f_item, d_item = forecast.currently, copy.deepcopy(DATA["currently"])
for key in d_item:
forecast_key = utils.snake_case_key(key)
if isinstance(getattr(f_item, forecast_key), datetime):
d_item[key] = get_datetime_from_unix(d_item[key])
assert hasattr(f_item, forecast_key)
assert getattr(f_item, forecast_key) == d_item[key]
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_minutely(forecast):
assert forecast.minutely.summary == DATA["minutely"]["summary"]
assert forecast.minutely.icon == DATA["minutely"]["icon"]
for f_item, d_item in zip(
forecast.minutely.data, copy.deepcopy(DATA["minutely"]["data"])
):
for key in d_item:
forecast_key = utils.snake_case_key(key)
if isinstance(getattr(f_item, forecast_key), datetime):
d_item[key] = get_datetime_from_unix(d_item[key])
assert hasattr(f_item, forecast_key)
assert getattr(f_item, forecast_key) == d_item[key]
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_hourly(forecast):
assert forecast.hourly.summary == DATA["hourly"]["summary"]
assert forecast.hourly.icon == DATA["hourly"]["icon"]
for f_item, d_item in zip(
forecast.hourly.data, copy.deepcopy(DATA["hourly"]["data"])
):
for key in d_item:
forecast_key = utils.snake_case_key(key)
if isinstance(getattr(f_item, forecast_key), datetime):
d_item[key] = get_datetime_from_unix(d_item[key])
assert hasattr(f_item, forecast_key)
assert getattr(f_item, forecast_key) == d_item[key]
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_daily(forecast):
assert forecast.daily.summary == DATA["daily"]["summary"]
assert forecast.daily.icon == DATA["daily"]["icon"]
for f_item, d_item in zip(
forecast.daily.data, copy.deepcopy(DATA["daily"]["data"])
):
for key in d_item:
forecast_key = utils.snake_case_key(key)
if isinstance(getattr(f_item, forecast_key), datetime):
d_item[key] = get_datetime_from_unix(d_item[key])
assert hasattr(f_item, forecast_key)
assert getattr(f_item, forecast_key) == d_item[key]
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_alerts(forecast):
for f_item, d_item in zip(forecast.alerts, copy.deepcopy(DATA["alerts"])):
for key in d_item:
forecast_key = utils.snake_case_key(key)
if isinstance(getattr(f_item, forecast_key), datetime):
d_item[key] = get_datetime_from_unix(d_item[key])
assert hasattr(f_item, forecast_key)
assert getattr(f_item, forecast_key) == d_item[key]
@pytest.mark.parametrize(
"forecast",
[get_forecast_sync(), get_forecast_async()]
)
def test_forecast_flags(forecast):
d_item = copy.deepcopy(DATA["flags"])
f_item = forecast.flags
for key in d_item:
forecast_key = utils.snake_case_key(key)
if isinstance(getattr(f_item, forecast_key), datetime):
d_item[key] = get_datetime_from_unix(d_item[key])
assert hasattr(f_item, forecast_key)
assert getattr(f_item, forecast_key) == d_item[key]
```
#### File: darksky/tests/utils.py
```python
def snake_case_key(key: str) -> str:
assert isinstance(key, str)
new_key = key[0]
for char in key[1:]:
if char.isupper():
new_key += "_{char}".format(char=char.lower())
elif char == "-":
new_key += "__"
else:
new_key += char
return new_key
```
|
{
"source": "jdpinedaj/poc_airflow",
"score": 3
}
|
#### File: cloud_functions/raw-schema-processed/main.py
```python
import pandas as pd
import flask
import json
#from typing import List
class DataRetrieverInterface(object):
def retrieve(self):
raise NotImplementedError()
def process(self):
raise NotImplementedError()
def save(self):
raise NotImplementedError()
class ParquetRetriever(DataRetrieverInterface):
def __init__(self, bucket_name: str, origin_path: str, destiny_path: str,
schema_name: str):
self._bucket_name = bucket_name
self._origin_path = origin_path
self._destiny_path = destiny_path
self._schema_name = schema_name
self._data = None
def retrieve(self):
"""
It retrieves the data from the source url and stores it in the data attribute
"""
origin_path = f"gcs://{self._bucket_name}/{self._origin_path}"
self._data = pd.read_parquet(origin_path, engine='fastparquet')
def process(self):
"""
Casting schema
"""
if self._data is None:
raise Exception("There is no data to process")
self._data.columns = self._data.columns.str.lower()
schema = self._get_schema()
#TODO: Daniel, no entiendo como pasar esto sin usar pandas...
# int_columns = []
# str_columns = []
# float_columns = []
# for column, data_type in schema.items():
# if data_type == 'int':
# int_columns.append(column)
# elif data_type == 'float':
# float_columns.append(column)
# elif data_type == 'str':
# str_columns.append(column)
# else:
# raise Exception("Unknown data type")
# self._parse_int_columns(int_columns)
# self._parse_float_columns(float_columns)
# self._parse_str_columns(str_columns)
self._data = self._data.astype(schema)
def save(self):
"""
It saves the data in the destiny path with new schema
"""
if self._data is None:
raise Exception("There is no data to write")
destiny_path = f"gcs://{self._bucket_name}/{self._destiny_path}"
self._data.to_parquet(destiny_path, engine='fastparquet')
def _get_schema(self):
with open(self._schema_name, 'r') as schema_file:
schema = json.load(schema_file)
return schema
# def _parse_int_columns(self, int_columns: List[str]):
# pass
# def _parse_str_columns(self, str_columns: List[str]):
# pass
# def _parse_float_columns(self, float_columns: List[str]):
# pass
def main(request):
request_json = request.get_json()
bucket_name = request_json['bucket_name']
origin_path = request_json['origin_path']
destiny_path = request_json['destiny_path']
schema_name = request_json['schema_name']
try:
parquet_retriever = ParquetRetriever(bucket_name, origin_path,
destiny_path, schema_name)
parquet_retriever.retrieve()
parquet_retriever.process()
parquet_retriever.save()
response = flask.Response("DONE", status=200)
except Exception as ex:
response = flask.Response(str(ex), status=404)
return response
```
|
{
"source": "jdpinedaj/SentimentMountEtna",
"score": 3
}
|
#### File: SentimentMountEtna/scripts/classification.py
```python
import logging
from pathlib import Path
from utility import parse_config, set_logger
import click
import pandas as pd
import numpy as np
from pycaret.classification import *
import imblearn
#ignore log(0) and divide by 0 warning
np.seterr(divide='ignore')
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
@click.command()
@click.argument("config_file", type=str, default="scripts/config.yml")
def classification(config_file):
"""
Main function that trains & persists model based on training set
Args:
config_file [str]: path to config file
Returns:
None
"""
##################
# configure logger
##################
logger = set_logger("./script_logs/classification.log")
##################
# Load config from config file
##################
logger.info(f"Load config from {config_file}")
config = parse_config(config_file)
review_data = config["classification"]["lda_review_data_tuned"]
title_data = config["classification"]["lda_title_data_tuned"]
predictions_path = Path(config["classification"]["predictions_path"])
model_classification_path = config["classification"][
"model_classification_path"]
logger.info(f"config: {config['classification']}")
##################
# Load and merge data
##################
logger.info(
f"-------------------Load the processed data-------------------")
review_data = pd.read_csv(review_data, low_memory=False)
title_data = pd.read_csv(title_data, low_memory=False)
data = pd.concat([
title_data.drop(columns=['Title_sentiment_rating'], axis=1),
review_data
],
axis=1)
data = data.rename(columns={'Review_sentiment_rating': 'sentiment_rating'})
data = data.dropna(subset=['Title_title', 'Review_content']).reset_index(
drop=True)
logger.info(f"shape: {data.shape}")
logger.info("End Load and merge data")
##################
# Setup
##################
# Setup Pycaret
logger.info(f"-------------------Setup pycaret-------------------")
without_pca = setup(
data=data,
target='sentiment_rating',
session_id=42,
normalize=True,
transformation=True,
ignore_features=[
'Title_title', 'Review_content', 'Title_Dominant_Topic',
'Title_Perc_Dominant_Topic', 'Review_Dominant_Topic',
'Review_Perc_Dominant_Topic'
],
use_gpu=True,
fix_imbalance=True,
fix_imbalance_method=imblearn.over_sampling.SVMSMOTE(),
data_split_stratify=True,
fold_strategy='stratifiedkfold',
silent=True,
)
logger.info(f"End setup pycaret")
##################
# train model
##################
# Train model
logger.info(f"-------------------Training NB model-------------------")
model = create_model(estimator='nb')
predictions = predict_model(model)
logger.info(f"Metrics:\n {pull(model)}")
# Finalizing model
model = finalize_model(model)
logger.info(f"End training NB model")
##################
# Saving model and predictions
##################
logger.info(f"-------------------Saving model-------------------")
save_model(model, model_classification_path)
predictions.to_csv(predictions_path / "predictions.csv", index=False)
logger.info(f"End saving model")
if __name__ == "__main__":
classification()
```
#### File: SentimentMountEtna/scripts/get_data.py
```python
import click
from scrapy.item import Field
from scrapy.item import Item
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst
import re
from typing import Union
import utility
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
##################
# Constants
##################
# Links to read
START_URLS = utility.links
# Absolute xpath
COMMENTS_BLOCK_XPATH = '//*[@id="REVIEWS"]/section/div[1]/div/div[5]/div'
# Relative xpaths
AUTHOR_XPATH = ".//span/div/div[1]/div[1]/div[2]/span/a/text()"
PROFILE_URL_XPATH = ".//div[1]/a/@href"
PLACE_XPATH = ".//span/div/div[1]/div[1]/div[2]/div/div/span[1]/text()"
CONTRIBUTIONS_XPATH = ".//span/div/div[1]/div[1]/div[2]/div/div/span[2]/text()"
TITLE_XPATH = ".//span/div/div[4]/a/span/text()"
CONTENT_XPATH = ".//div[contains(@style,'-webkit-line-clamp')]/div/span/text()"
DATE_XPATH = ".//span/div/div[8]/div[1]/text()"
RATING_XPATH = './/span/div/div[3]/svg/@title'
##################
# Functions to parse
##################
def parse_profile_url(url: str) -> str:
return "https://www.tripadvisor.com" + url
def parse_rating(rating_title: str) -> int:
return int(rating_title[0])
def parse_place(place_content: str) -> Union[str, None]:
return None if "contrib" in place_content.lower() else place_content
def parse_date(date_text: str) -> str:
date_text = date_text.replace('Written ', '')
return date_text
def parse_contributions(contributions_text: str) -> int:
return int(''.join(re.findall(r"\d", contributions_text)))
##################
# Getting data and saving it
##################
class Comment(Item):
author = Field()
title = Field()
content = Field()
place = Field(input_processor=MapCompose(parse_place))
date = Field(input_processor=MapCompose(parse_date))
rating = Field(input_processor=MapCompose(parse_rating),
output_processor=TakeFirst())
profile_url = Field(input_processor=MapCompose(parse_profile_url),
output_processor=TakeFirst())
url_path = Field()
contributions = Field(input_processor=MapCompose(parse_contributions),
output_processor=TakeFirst())
class TripAdvisorSpider(Spider):
name = "Trip Spider"
start_urls = START_URLS
def parse(self, response):
sel = Selector(response)
comments = sel.xpath(COMMENTS_BLOCK_XPATH)
comments = comments[1:-1]
for elem in comments:
item = ItemLoader(Comment(), elem)
# add_xpath(<field>, <xpath>)
item.add_xpath('author', AUTHOR_XPATH)
item.add_xpath('title', TITLE_XPATH)
item.add_xpath('content', CONTENT_XPATH)
item.add_xpath('place', PLACE_XPATH)
item.add_xpath('date', DATE_XPATH)
item.add_xpath('rating', RATING_XPATH)
item.add_xpath('profile_url', PROFILE_URL_XPATH)
item.add_value('url_path', response.url)
item.add_xpath('contributions', CONTRIBUTIONS_XPATH)
yield item.load_item()
next_pages = response.css('.cCnaz > div:nth-child(1) > a:nth-child(1)')
if len(next_pages) > 0:
for next_page in next_pages:
yield response.follow(next_page, self.parse)
```
#### File: SentimentMountEtna/scripts/nlp.py
```python
import logging
from pathlib import Path
from utility import parse_config, set_logger
import utility
import click
from pycaret.nlp import *
import pandas as pd
import numpy as np
import spacy
from collections import Counter
spacy.load("en_core_web_sm")
# NLTK
import nltk
from nltk import word_tokenize
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('words')
nltk.download('averaged_perceptron_tagger')
from nltk.corpus import wordnet, stopwords
#ignore log(0) and divide by 0 warning
np.seterr(divide='ignore')
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
@click.command()
@click.argument("config_file", type=str, default="scripts/config.yml")
def nlp(config_file):
"""
NLP function that extracts the dimensions in the titles and the reviews of the data.
Args:
config_file [str]: path to config file
Returns:
None
"""
##################
# configure logger
##################
logger = set_logger("./script_logs/nlp.log")
##################
# Load config from config file
##################
logger.info(f"Load config from {config_file}")
config = parse_config(config_file)
raw_data_file = config["nlp"]["raw_data_file"]
model_title_path = config["nlp"]["model_title_path"]
model_review_path = config["nlp"]["model_review_path"]
dimensions_path = Path(config["nlp"]["dimensions_path"])
logger.info(f"config: {config['nlp']}")
##################
# Data transformation and Feature engineering
##################
logger.info(
"-------------------Start data transformation and feature engineering-------------------"
)
df = pd.read_csv(raw_data_file, low_memory=False)
data = df[['title', 'content', 'rating']]
# Dropping rows without data in content
data = data.dropna(subset=['content']).reset_index(drop=True)
# Filtering comments according to keywords
logger.info("Filtering comments according to keywords.")
data['is_transport_related'] = data['content'].str.contains(
utility.TRANSPORT_KEYWORDS, case=False, na=False)
data = data[data['is_transport_related'] == True].reset_index(drop=True)
data = data.drop(columns=['is_transport_related'])
logger.info(f"shape: {data.shape}")
# Creating sentiment_rating feature based on rating
logger.info("Creating sentiment_rating feature based on rating.")
data['sentiment_rating'] = np.where(data['rating'] > 3, 1, 0)
# changing bad and good just for visualization in the recall metric
data['sentiment_rating'] = data['sentiment_rating'].replace(
[0, 1], ['negative', 'positive'])
data['sentiment_rating'] = data['sentiment_rating'].replace(
['negative', 'positive'], [1, 0]) # NEGATIVE IS 1!!!!
# Defining noise words
logger.info("Defining noise words.")
stopwords_corpus = nltk.corpus.stopwords
eng_stop_words = stopwords_corpus.words('english')
## For title
noise_words_title = []
token_lists_title = [word_tokenize(each) for each in data['title']]
tokens_title = [item for sublist in token_lists_title for item in sublist]
one_percentile_title = int(len(set(tokens_title)) * 0.01)
top_1_percentile_title = Counter(tokens_title).most_common(
one_percentile_title)
bottom_1_percentile_title = Counter(
tokens_title).most_common()[-one_percentile_title:]
noise_words_title.extend(eng_stop_words)
noise_words_title.extend([word for word, val in top_1_percentile_title])
noise_words_title.extend([word for word, val in bottom_1_percentile_title])
## For review
noise_words_review = []
token_lists_review = [word_tokenize(each) for each in data['content']]
tokens_review = [
item for sublist in token_lists_review for item in sublist
]
one_percentile_review = int(len(set(tokens_review)) * 0.01)
top_1_percentile_review = Counter(tokens_review).most_common(
one_percentile_review)
bottom_1_percentile_review = Counter(
tokens_review).most_common()[-one_percentile_review:]
noise_words_review.extend(eng_stop_words)
noise_words_review.extend([word for word, val in top_1_percentile_review])
noise_words_review.extend(
[word for word, val in bottom_1_percentile_review])
logger.info("End data transformation and feature engineering")
##################
# NLP for title
##################
logger.info("-------------------NLP for title-------------------")
# Setup Pycaret
logger.info(f"Setup Pycaret")
exp_name = setup(data=data[['title', 'sentiment_rating']],
target='title',
session_id=42,
custom_stopwords=noise_words_title)
# Training model
logger.info(f"Training model")
logger.info(f"Tuning model")
tuned_lda_title = tune_model(model='lda',
multi_core=True,
supervised_target='sentiment_rating',
custom_grid=[2, 3, 4, 5, 6],
optimize='AUC',
verbose=False)
lda_title_tuned = create_model(
'lda', multi_core=True,
num_topics=tuned_lda_title.num_topics) # Latent Dirichlet Allocation
lda_title_data_tuned = assign_model(lda_title_tuned)
lda_title_data_tuned = lda_title_data_tuned.add_prefix('Title_')
lda_title_data_tuned['Title_Dominant_Topic'] = lda_title_data_tuned[
'Title_Dominant_Topic'].replace(' ', '_', regex=True)
logger.info("End NLP for title")
##################
# NLP for review
##################
logger.info("-------------------NLP for review-------------------")
# Setup Pycaret
logger.info(f"Setup Pycaret")
exp_name = setup(data=data[['content', 'sentiment_rating']],
target='content',
session_id=42,
custom_stopwords=noise_words_review)
# Training model
logger.info(f"Training model")
logger.info(f"Tuning model")
tuned_lda_review = tune_model(model='lda',
multi_core=True,
supervised_target='sentiment_rating',
custom_grid=[2, 3, 4, 5, 6],
optimize='AUC',
verbose=False)
lda_review_tuned = create_model(
'lda', multi_core=True,
num_topics=tuned_lda_review.num_topics) # Latent Dirichlet Allocation
lda_review_data_tuned = assign_model(lda_review_tuned)
lda_review_data_tuned = lda_review_data_tuned.add_prefix('Review_')
lda_review_data_tuned['Review_Dominant_Topic'] = lda_review_data_tuned[
'Review_Dominant_Topic'].replace(' ', '_', regex=True)
logger.info("End NLP for review")
##################
# Export
##################
logger.info("-------------------Export-------------------")
logger.info(f"write data to {dimensions_path}")
lda_title_data_tuned.to_csv(dimensions_path / "lda_title_data_tuned.csv",
index=False)
lda_review_data_tuned.to_csv(dimensions_path / "lda_review_data_tuned.csv",
index=False)
logger.info(f"lda_title_data_tuned shape: {lda_title_data_tuned.shape}")
logger.info(f"lda_review_data_tuned shape: {lda_review_data_tuned.shape}")
logger.info("\n")
logger.info("End Export")
##################
# Saving models
##################
logger.info(f"-------------------Saving models-------------------")
save_model(lda_title_tuned, model_title_path)
save_model(lda_review_tuned, model_review_path)
logger.info(f"End saving models")
if __name__ == "__main__":
nlp()
```
|
{
"source": "jdp/jarg",
"score": 3
}
|
#### File: jarg/jarg/jsonform.py
```python
import json
undefined = object()
class JSONFormEncoder(json.JSONEncoder):
def default(self, obj):
if obj == undefined:
return None
else:
return super(JSONFormEncoder, self).default(obj)
def parse_path(path):
"""
http://www.w3.org/TR/2014/WD-html-json-forms-20140529/#dfn-steps-to-parse-a-json-encoding-path
"""
original = path
failure = [(original, {'last': True, 'type': object})]
steps = []
try:
first_key = path[:path.index("[")]
if not first_key:
return original
steps.append((first_key, {'type': 'object'}))
path = path[path.index("["):]
except ValueError:
return failure
while path:
if path.startswith("[]"):
steps[-1][1]['append'] = True
path = path[2:]
if path:
return failure
elif path[0] == "[":
path = path[1:]
try:
key = path[:path.index("]")]
path = path[path.index("]")+1:]
except ValueError:
return failure
try:
steps.append((int(key), {'type': 'array'}))
except ValueError:
steps.append((key, {'type': 'object'}))
else:
return failure
for i in range(len(steps)-1):
steps[i][1]['type'] = steps[i+1][1]['type']
steps[-1][1]['last'] = True
return steps
def set_value(context, step, current_value, entry_value):
"""
http://www.w3.org/TR/2014/WD-html-json-forms-20140529/#dfn-steps-to-set-a-json-encoding-value
"""
key, flags = step
if flags.get('last', False):
if current_value == undefined:
if flags.get('append', False):
context[key] = [entry_value]
else:
if isinstance(context, list) and len(context) <= key:
context.extend([undefined] * (key - len(context) + 1))
context[key] = entry_value
elif isinstance(current_value, list):
context[key].append(entry_value)
elif isinstance(current_value, dict):
set_value(
current_value, ("", {'last': True}),
current_value.get("", undefined), entry_value)
else:
context[key] = [current_value, entry_value]
return context
else:
if current_value == undefined:
if flags.get('type') == 'array':
context[key] = []
else:
if isinstance(context, list) and len(context) <= key:
context.extend([undefined] * (key - len(context) + 1))
context[key] = {}
return context[key]
elif isinstance(current_value, dict):
return context[key]
elif isinstance(current_value, list):
if flags.get('type') == 'array':
return current_value
else:
obj = {}
for i, item in enumerate(current_value):
if item != undefined:
obj[i] = item
else:
context[key] = obj
return obj
else:
obj = {"": current_value}
context[key] = obj
return obj
def encode(pairs):
"""
The application/json form encoding algorithm.
http://www.w3.org/TR/2014/WD-html-json-forms-20140529/#the-application-json-encoding-algorithm
"""
result = {}
for key, value in pairs:
steps = parse_path(key)
context = result
for step in steps:
try:
current_value = context.get(step[0], undefined)
except AttributeError:
try:
current_value = context[step[0]]
except IndexError:
current_value = undefined
context = set_value(context, step, current_value, value)
return result
```
|
{
"source": "JD-P/library-project",
"score": 3
}
|
#### File: library-project/cgi/book-query.py
```python
import sys
import os
import sqlite3
import codecs
import cgi
def main():
rows = run_query()
if rows is False:
query_failure()
return False
else:
query_response(rows)
return True
def run_query():
"""Run the queries against the sqlite backend and return the resulting rows."""
query = cgi.FieldStorage()
search_by = query.getvalue("searchby")
search_type = query.getvalue("searchtype")
# Convert search type to sql statement chunk
if search_type == 'exact':
search_type = '=?'
else:
search_type = " like ?"
# Prepend and append bits to search statement based on search type
if search_type == " like ?":
try:
search = "%" + query.getvalue("search") + "%"
except TypeError:
return False
else:
search = query.getvalue("search")
books_db_path = os.path.join(
os.path.split(
os.path.abspath(__file__))[0],
"books.sqlite")
books_db = sqlite3.connect(books_db_path)
query_cursor = books_db.cursor()
search_columns = ["title", "author", "summary",
"publisher", "dewey", "locc"]
partial_sql = ("select key, title, author, pubtext, edition,"
" availability from books where ")
# Handle special cases
if search_by == 'subject':
query_cursor.execute("select * from books;")
results = query_cursor.fetchall()
matching_rows = []
for row in results:
subjects = row[7].split(",")
for subject in subjects:
if search_type == " like ?":
if search in subject:
matching_rows.append(row)
else:
pass
else:
if search == subject:
matching_rows.append(row)
else:
pass
return matching_rows
elif search_by == 'isbn':
query_cursor.execute(partial_sql + "isbn10" + search_type + ";", (search,))
intermediate_results = query_cursor.fetchall()
query_cursor.execute(partial_sql + "isbn13" + search_type + ";", (search,))
return intermediate_results + query_cursor.fetchall()
elif search_by == 'availability':
if search[0] == "%":
search = search[1]
else:
pass
if search == '0' or search == 'false' or search == 'False':
query_cursor.execute(partial_sql + "availability=0;")
return query_cursor.fetchall()
elif search == '1' or search == 'true' or search == 'True':
query_cursor.execute(partial_sql + "availability=1;")
return query_cursor.fetchall()
else:
return False
# Handle all other 'generic' cases.
elif search_by in search_columns:
query_cursor.execute(partial_sql + search_by + search_type + ";", (search,))
return query_cursor.fetchall()
else:
return False
def query_response(rows):
writer = codecs.getwriter('utf-8')(sys.stdout.buffer)
html_start_block = (
"""<html>
<head>
<link href="/css/query.css" rel="stylesheet">
<title> List of Books in A&T Library Found By Your Query </title>
<meta charset="utf-8">
</head>
<body>
<table>
<tr> <td>id</td> <td>Title</td> <td>Author</td>
<td>Publish Info</td> <td> Edition </td> <td> Availability </td>
</tr>
""")
html_rows= [["<tr>"] +
["<td>" + str(column_data) + "</td>" for column_data in row] +
["</tr>"] for row in rows]
for html_row in html_rows:
item_id = html_row[1][4:-5]
html_row[1] = ('<td>' '<a href="/cgi/book_lookup.py?id=' + item_id + '">' +
item_id + '</a>' + '</td>')
html_start_block += ''.join(html_row)
html_end_block = (
""" </table>
</body>
</html>
"""
)
html_start_block += html_end_block
#HTTP Headers
writer.write("Content-Type: text/html; charset=utf-8" + '\r\n')
writer.write("Content-Length: " + str(len(html_start_block)) + '\r\n')
# Seperator between header and HTML
writer.write('\r\n')
#HTML
writer.write(html_start_block)
def query_failure():
"""Give an HTTP response indicating the query failed."""
writer = codecs.getwriter('utf-8')(sys.stdout.buffer)
response_html_dir = os.path.join(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0],
"query_failure.html")
response_html = open(response_html_dir).read()
#HTTP Headers
writer.write("Content-Type: text/html; charset=utf-8" + '\r\n')
writer.write("Content-Length: " + str(len(response_html)) + '\r\n')
# Seperator between header and HTML
writer.write('\r\n')
#HTML
writer.write(response_html)
main()
```
|
{
"source": "jdp/pyrebase",
"score": 2
}
|
#### File: jdp/pyrebase/test_pyrebase.py
```python
import os.path
import random
import string
import pytest
import pyrebase
def test_wrap_mapping():
data = 1
wrapped = pyrebase.wrap_mapping(data)
assert wrapped['.value'] == 1
data = {'foo': 'bar'}
wrapped = pyrebase.wrap_mapping(data)
assert '.value' not in wrapped
assert data == wrapped
def test_ref():
f = pyrebase.Firebase('https://pyrebase.firebaseIO.com')
assert f.ref == 'https://pyrebase.firebaseIO.com/'
f = pyrebase.Firebase('https://pyrebase.firebaseIO.com/')
assert f.ref == 'https://pyrebase.firebaseIO.com/'
class MockTransport(object):
def __init__(self):
self.mapping = {}
def normalize(self, ref):
if not ref.endswith('/'):
return ref + '/'
return ref
def get(self, ref, params):
try:
data = self.mapping[self.normalize(ref)]
except KeyError:
return
return data
def set(self, ref, params, data):
if pyrebase.is_mapping(data):
if '.priority' in data:
priority_ref = os.path.join(os.path.dirname(self.normalize(ref)), '.priority')
priority = data.pop('.priority')
self.set(priority_ref, params, priority)
if '.value' in data:
data = data['.value']
self.mapping[self.normalize(ref)] = data
return data
def push(self, ref, params, data):
name = ''.join(random.choice(string.ascii_lowercase) for _ in range(16))
pushed_ref = os.path.join(os.path.dirname(ref), name)
self.set(pushed_ref, params, data)
return {'name': name}
def update(self, ref, params, data):
self.mapping[self.normalize(ref)].update(data)
return data
def remove(self, ref, params):
del self.mapping[self.normalize(ref)]
@pytest.fixture(params=[MockTransport()])
def firebase(request):
import pyrebase
return pyrebase.Firebase('https://pyrebase.firebaseIO.com/', transport=request.param)
def test_child(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/'
c = firebase.child('foo/bar')
assert c.ref == 'https://pyrebase.firebaseIO.com/foo/bar/'
c = firebase.child('.')
assert c.ref == 'https://pyrebase.firebaseIO.com/'
c = firebase.child('..')
assert c.ref == 'https://pyrebase.firebaseIO.com/'
c = firebase.child('foo/bar/..')
assert c.ref == 'https://pyrebase.firebaseIO.com/foo/'
c = firebase.child('foo/../bar')
assert c.ref == 'https://pyrebase.firebaseIO.com/bar/'
def test_child_priority(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi').child('.priority')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/.priority/'
def test_nested_child(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi').child('-Izjh72mPJj7xJm0e4kQ')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ/'
c = firebase.child('-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ')
assert c.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ/'
def test_parent(firebase):
assert firebase.ref == firebase.parent.ref
child = firebase.child('-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ')
parent = child.parent
assert parent.ref == 'https://pyrebase.firebaseIO.com/-Izjg-FkP7eXLa1EXVAi/'
root = parent.parent
assert root.ref == 'https://pyrebase.firebaseIO.com/'
def test_root(firebase):
c = firebase.child('-Izjg-FkP7eXLa1EXVAi/-Izjh72mPJj7xJm0e4kQ')
assert c.root.ref == firebase.ref
def test_prepare_data(firebase):
simple_payload = 'foo'
prepared = firebase.prepare_data(simple_payload, None)
assert prepared == simple_payload
prepared = firebase.prepare_data(simple_payload, 1)
assert prepared['.value'] == 'foo'
assert prepared['.priority'] == 1.0
complex_payload = {'foo': 'bar'}
prepared = firebase.prepare_data(complex_payload, None)
assert prepared == complex_payload
assert '.value' not in prepared
prepared = firebase.prepare_data(complex_payload, 2)
assert '.value' not in prepared
assert prepared['foo'] == 'bar'
assert prepared['.priority'] == 2.0
def test_set(firebase):
assert firebase.set(True) == True
assert firebase.child('bar').set('foo') == 'foo'
assert firebase.set([1, 2, 3]) == [1, 2, 3]
assert firebase.set({'foo': 'bar'}) == {'foo': 'bar'}
assert firebase.set('foo', priority=5) == 'foo'
def test_get(firebase):
firebase.set('foo')
assert firebase.get() == 'foo'
def test_set_priority(firebase):
firebase.set('foo')
assert firebase.set_priority(5) == 5
def test_get_priority(firebase):
firebase.set('foo', priority=5)
assert firebase.get_priority() == 5
def test_push(firebase):
c = firebase.push('foo')
assert c.ref != firebase.ref
assert c.get() == 'foo'
c = firebase.push('bar', priority=3)
assert c.get() == 'bar'
assert c.get_priority() == 3
def test_update(firebase):
firebase.set({'foo': 'bar'})
assert firebase.get() == {'foo': 'bar'}
assert firebase.update({'baz': 'quux'}) == {'baz': 'quux'}
assert firebase.get() == {'foo': 'bar', 'baz': 'quux'}
def test_remove(firebase):
c = firebase.push('foo')
c.remove()
assert c.get() is None
```
|
{
"source": "JD-P/sample-log-converter",
"score": 3
}
|
#### File: sample-log-converter/converters/energymech.py
```python
import os
import time
import datetime
import string
from utilities.time2seconds import time2seconds
def convert(directory, output_handler=None, utc_offset=None):
"""Run the energymech log converters conversion function."""
return energymech_converter.energymech_conv(directory, output_handler)
class energymech_converter():
"""Convert an energymech log file to python datastructures."""
def energymech_conv(directory, output_handler):
"""Convert a set of log files in energymech format to JSON or SQLite."""
if not os.path.isdir(directory):
raise ValueError(directory + " is not a directory.")
files = os.listdir(directory)
files.sort()
if output_handler:
output_handler.begin()
else:
logs = {}
day = []
total_lines = 0
for filename in files:
datestring = filename.split("_")[2].split(".")[0] #See "Misc 0" in project file.
logdate = time.strptime(datestring, "%Y%m%d")
date_timestamp = int(time.mktime(logdate))
logpath = os.path.join(directory, filename)
log_lines = energymech_converter.energymech_parse(logpath, total_lines)
if output_handler:
output_handler.write_day({date_timestamp:log_lines[1]})
else:
logs[date_timestamp] = log_lines[1]
total_lines += log_lines[0]
if output_handler:
output_handler.close()
else:
return (logs)
def energymech_parse(filepath, total_lines):
"""Take a single log file in energymech format and parse it into a python datastructure."""
log = open(filepath, 'r', encoding="latin-1")
lines = log.readlines()
tokenlist = []
lines_in_file = 0
for line in lines:
type_parse = line.split(" ", 2) # Temporary three token space parse to determine type of line.
space_parse = line.split(" ") # Turns every space seperation into a token, doesn't handle closures such as closed parentheses intelligently.
timestamp = time2seconds(type_parse[0][1:-1])
line_id = total_lines + lines_in_file
if type_parse[1] != "***" and type_parse[1][0] == "<":
(nickname, message) = (type_parse[1][1:-1], type_parse[2][:-1])
tokenlist.append([line_id, "PRIVMSG", timestamp, nickname, message])
elif type_parse[1] == "*":
me_elements = line.split(" ", 3)
(nickname, message) = (me_elements[2], "/me " + me_elements[3][:-1])
tokenlist.append([line_id, "PRIVMSG", timestamp, nickname, message])
elif ''.join((type_parse[1][0], type_parse[1][-1])) == "--":
(nickname, message) = (type_parse[1][1:-1], type_parse[2][:-1])
tokenlist.append([line_id, "NOTICE", timestamp, nickname, message])
elif space_parse[2] == "Joins:":
(nickname, hostname) = (space_parse[3], space_parse[4][1:-2])
tokenlist.append([line_id, "JOIN", timestamp, nickname, hostname])
elif space_parse[2] == "Parts:":
part_elements = line.split(" ", 5)[3:]
(nickname, hostname, part_message) = (part_elements[0], part_elements[1][1:-1], part_elements[2][1:-2])
tokenlist.append([line_id, "PART", timestamp, nickname, hostname, part_message])
elif space_parse[2] == "Quits:":
quit_elements = line.split(" ", 5)[3:]
(nickname, hostname, quit_message) = (quit_elements[0], quit_elements[1][1:-1], quit_elements[2][1:-2])
tokenlist.append([line_id, "QUIT", timestamp, nickname, hostname, quit_message])
elif ''.join(space_parse[3:5]) == "waskicked":
(nick_kicked, kicked_by, kick_message) = (space_parse[2], space_parse[6], space_parse[7][1:-2])
tokenlist.append([line_id, "KICK", timestamp, nick_kicked, kicked_by, kick_message])
elif ''.join(space_parse[4:7]) == "nowknownas":
(nick_before, nick_after) = (space_parse[2], space_parse[-1][:-1])
tokenlist.append([line_id, "NICK", timestamp, nick_before, nick_after])
elif ''.join(space_parse[3:5]) == "setsmode:":
setmode_elements = line.split(" ", 5)
(set_by, mode_string) = (setmode_elements[2], setmode_elements[5][:-1])
tokenlist.append([line_id, "SETMODE", timestamp, set_by, mode_string])
elif ''.join(space_parse[3:5]) == "changestopic":
topic_element = line.split(" ", 6)[6]
(changed_by, topic) = (space_parse[2], topic_element[:-1])
tokenlist.append([line_id, "TOPIC", timestamp, changed_by, topic])
lines_in_file += 1
return (lines_in_file, tokenlist)
```
#### File: sample-log-converter/converters/toykeeper.py
```python
import json
import time
from datetime import timedelta
from calendar import timegm
from utilities.time2seconds import time2seconds
def convert(filepath, output_handler=None, utc_offset=None):
"""Run ToyKeeperConverter's conversion function and return the result."""
return ToyKeeperConverter.toykeeper_conv(ToyKeeperConverter,filepath, output_handler, utc_offset)
class ToyKeeperConverter():
"""Converts a custom log format of the form iso standard date, nick and message to json or sqlite."""
def toykeeper_conv(cls, filepath, output_handler, utc_offset):
if output_handler:
output_handler.begin()
else:
logs = {}
def process_line(line_id, line, offset):
components = (lambda space_split : (space_split[0], space_split[1],
space_split[2].split("\t")[0],
space_split[2].split("\t")[1])) \
(line.split(" ", 2))
(date, timestamp, hostmask, contents) = (components[0], components[1],
components[2], components[3])
line_type = cls.toykeeper_linetype(hostmask, contents)
(offset_timestamp, offset_datestamp) = cls.calculate_offset(date, timestamp, offset)
converted_line = cls.construct(line_id, line_type, offset_timestamp, hostmask, contents)
return {"converted_line":converted_line, "offset_datestamp":offset_datestamp,
"date":date, "timestamp":timestamp, "hostmask":hostmask, "contents":contents}
def shift_day(day,current_date) :
if output_handler:
output_handler.write_day({current_date:day})
else:
logs[current_date] = list(day)
day = []
logfile = open(filepath, 'r', encoding='latin-1')
current_date = int(time.mktime(time.strptime(logfile.readline().split(" ")[0], "%Y-%m-%d")))
line_id = 0
for line in logfile:
line_elements = process_line(line_id, line, utc_offset)
day.append(line_elements["converted_line"])
if int(line_elements["offset_datestamp"]) > current_date:
shift_day(day,current_date)
current_date = int(line_elements["offset_datestamp"])
day = []
line_id += 1
#Final shift_day to account for no date change at the end.
shift_day(day,current_date)
if output_handler:
output_handler.close()
else:
return logs
def toykeeper_linetype(hostmask, contents):
"""Classify lines according to their contents and return a dictionary of the
form {line_id:line_type...}
Keyword arguments:
hostmask | The hostmask given as the source for the line.
contents | The actual message of the line.
"""
line_types = {}
if hostmask[0] + hostmask[-1] == "<>":
line_type = "PRIVMSG"
elif hostmask[0] + hostmask[-1] == "[]" and contents[0:6] == "ACTION":
line_type = "ACTION"
elif hostmask[0] + hostmask[-1] == "--":
content_split = contents.split(" ")
if hostmask == "-dircproxy-":
if contents == "You connected\n":
line_type = "CONNECTED"
elif contents == "You disconnected\n":
line_type = "DISCONNECTED"
elif contents == "You joined the channel\n":
line_type = "JOINED"
"""Notices use '--' to denote themselves and have no distinguishing marks besides,
we start by filtering out those with lengths too short for the
other tests.
"""
if len(content_split) < 3:
line_type = "NOTICE"
elif content_split[2] == "joined":
line_type = "JOIN"
elif content_split[2] == "left":
line_type = "PART"
elif content_split[1] + content_split[2] == "kickedoff":
line_type = "KICK"
elif content_split[0][0] + content_split[0][-1] == "[]":
line_type = "NOTICE"
try:
if (content_split[1] + content_split[2] == "changedmode:" or
content_split[2] + content_split[3] == "changedmode:"):
line_type = "SETMODE"
elif (content_split[1] + content_split[2] == "changedtopic:" or
content_split[2] + content_split[3] == "changedtopic:"):
line_type = "TOPIC"
except IndexError:
line_type = "NOTICE"
if contents[0:4] == "CTCP":
line_type = "CTCP"
else:
line_type = "NOTICE"
return line_type
def construct(line_id, line_type, time_, hostmask, contents):
"""Construct a line suitable for output in line with the generic python format energymech log converter uses."""
def type_is(linetype): return (line_type == linetype)
universal = (line_id, line_type, time_)
contents = contents[:-1] # Strip trailing newlines
hostmask = hostmask[1:-1] # Strip buffer characters '<>', '[]', '--'
if type_is("PRIVMSG") or type_is("ACTION") or type_is("NOTICE") or type_is("CTCP"):
return universal + (hostmask, contents)
content_split = contents.split(" ")
if type_is("JOIN") or type_is("PART"):
userhost = cls.strip_hostmask(content_split[1])
(nick, user, hostname) = (content_split[0], userhost[0], userhost[1])
hostmask = cls.construct_hostmask(nick, user, hostname)
return universal + (hostmask,)
elif type_is("KICK"):
kick_split = contents.split(" ", 6)
userhost = cls.strip_hostmask(kick_split[5])
(nick, user, hostname) = (kick_split[4], userhost[0], userhost[1])
hostmask = cls.construct_hostmask(nick, user, hostname)
(nick_kicked, kick_message) = (kick_split[0], kick_split[6])
return universal + (nick_kicked, hostmask, kick_message)
elif type_is("SETMODE"):
setmode_split = contents.split(" ", 3) # The size of setmode varies so we assume it's the shorter version to avoid a ValueError
if setmode_split[2] == "mode:":
(set_by, mode_string) = (setmode_split[0], setmode_split[3])
return universal + (set_by, mode_string)
elif setmode_split[3] == "mode:":
setmode_split = contents.split(" ", 4)
userhost = cls.strip_hostmask(setmode_split[1])
nick = setmode_split[0]
user = hostmask_split[0]
hostname = hostmask_split[1]
set_by = cls.construct_hostmask(nick, user, hostname)
return universal + (set_by, setmode_split[4])
elif type_is("TOPIC"):
topic_split = contents.split(" ", 3) # The size of topicsplit varies so we assume it's the shorter version to avoid a ValueError
if topic_split[2] == "topic:":
(changed_by, topic) = (topic_split[0], topic_split[3])
return universal + (changed_by, topic)
elif topic_split[3] == "topic:":
topic_split = contents.split(" ", 4)
userhost = cls.strip_hostmask(topic_split[1])
(nick, user, hostname, topic) = (topic_split[0], userhost[0],
userhost[1], topic_split[4])
changed_by = cls.construct_hostmask(nick, user, hostname)
return universal + (changed_by, topic)
elif type_is("JOINED") or type_is("CONNECTED") or type_is("DISCONNECTED"):
return universal
else:
raise ValueError("Given type was not in the types of message handled by the toykeeper converter.")
def strip_hostmask(cls, userhost):
"""Strip characters from a set of hostmask components to prepare them for processing by construct_hostmask. (USER@HOSTNAME) OR (~USER@HOSTNAME)"""
userhost_components = userhost[1:-1].split("@")
if userhost_components[0][0] == "~":
return (userhost_components[0][1:], userhost_components[1])
else:
return (userhost_components[0], userhost_components[1])
def construct_hostmask(nick, user, hostname):
"""Takes a nick,user,hostname combo and constructs a string representing it like such: [email protected]"""
return nick + "!~" + user + "@" + "hostname"
def calculate_offset(date, time_, offset):
"""Take date, time and offset and calculate the final time from UTC.
Keyword arguments:
date: A string representing the date in %Y-%m-%d format.
time: A string representing the time that can be accepted by time2seconds
offset: A timedelta representing the UTC offset.
"""
timestamp = int(time.mktime(time.strptime(date, "%Y-%m-%d"))) + time2seconds(time_)
if offset == None:
return (time2seconds(time_), int(time.mktime(time.strptime(date, "%Y-%m-%d"))))
else:
try:
offset_timestamp = timestamp - ((int(offset)) * (60 ** 2))
except ValueError:
raise ValueError("Offset" + str(offset) + " was not a properly formatted UTC offset.")
offset_gmtime = time.gmtime(offset_timestamp)
time_ = time2seconds(time.strftime("%H:%M:%S", offset_gmtime))
date = offset_gmtime[:3] + (0,0,0) + offset_gmtime[6:9]
datestamp = timegm(date)
return (time_, datestamp)
```
#### File: JD-P/sample-log-converter/energymech-log-converter.py
```python
import sys
import argparse
import json
import sqlite3
import importlib
class BufferedOutputHandler():
""" Template output buffer.
Implementations should all handle 'day' updates. """
def __init__(self,filepath):
raise NotImplementedError("Don't intialise the BufferedOutputHandler directly, but one of its implementations.")
def begin(self):
"""Performs a routine that starts the output process."""
pass
def write_day(self,day):
"""Writes a single days worth of logs to the output."""
pass
def close(self):
"""Properly exits and handles finishing touches."""
pass
class BufferedSqliteOutputHandler(BufferedOutputHandler):
""" To be implemented.. """
def __init__(self, filepath):
self.conn = sqlite3.connect(filepath)
def _create_table(cur, table_def):
self.cur.execute("CREATE TABLE IF NOT EXISTS {}")
def begin(self):
self.cur = self.conn.cursor()
self.cur.execute("PRAGMA foreign_keys = ON;")
# If two things are equivalent then they determine the same things.
self._create_table("nicks(id INTEGER PRIMARY KEY, nickname TEXT UNIQUE)")
self._create_table("users(id INTEGER PRIMARY KEY, username TEXT UNIQUE)")
self._create_table("client_hosts(id INTEGER PRIMARY KEY, hostname TEXT UNIQUE)")
self._create_table("servers(id INTEGER PRIMARY KEY, hostname TEXT UNIQUE)")
self._create_table("channels(id INTEGER PRIMARY KEY, nwid INTEGER, channel"
" TEXT, FOREIGN KEY(nwid) REFERENCES networks(nwid),"
" UNIQUE (nwid, channel))")
self._create_table("msg_types(id INTEGER PRIMARY KEY, type TEXT UNIQUE)")
self._create_table("networks(server INTEGER PRIMARY KEY, nwid INTEGER"
" UNIQUE, nw_name TEXT UNIQUE, FOREIGN KEY(server)"
" REFERENCES servers(id))")
self._create_table("hostmasks(id INTEGER PRIMARY KEY, nwid INTEGER, nid INTEGER, uid INTEGER,"
" hid INTEGER, FOREIGN KEY(nwid) REFERENCES networks(nwid),"
" FOREIGN KEY(nid) REFERENCES nicks(id), FOREIGN KEY(uid)"
" REFERENCES users(id), FOREIGN KEY(hid) REFERENCES"
" client_hosts(id), UNIQUE (nwid, nid, uid, hid))")
self._create_table("registered(nwid INTEGER, nid INTEGER, time_of INTEGER,"
" account INTEGER, FOREIGN KEY(nwid) REFERENCES networks(nwid),"
" FOREIGN KEY(nid) REFERENCES nicks(id), FOREIGN"
" KEY(account) REFERENCES nicks(id), PRIMARY KEY"
" (nwid, nid, time_of))")
self._create_table("messages(id INTEGER PRIMARY KEY, timestamp TEXT, channel"
" INTEGER, type TEXT, FOREIGN KEY(channel) REFERENCES"
" channels(id), FOREIGN KEY(type) REFERENCES msg_types(id))")
self._create_table("privmsgs(mid INTEGER PRIMARY KEY, nid INTEGER, message TEXT,"
" FOREIGN KEY(mid) REFERENCES messages(id), FOREIGN"
" KEY(nid) REFERENCES nicks(id))")
self.create_table("actions(mid INTEGER PRIMARY KEY, nid INTEGER, message TEXT,"
" FOREIGN KEY(mid) REFERENCES messages(id), FOREIGN"
" KEY(nid) REFERENCES nicks(id))")
self._create_table("notices(mid INTEGER PRIMARY KEY, nid INTEGER, message TEXT,"
" FOREIGN KEY(mid) REFERENCES messages(id), FOREIGN"
" KEY(nid) REFERENCES nicks(id))")
self._create_table("joins(mid INTEGER PRIMARY KEY, hostmask INTEGER,"
" FOREIGN KEY(mid) REFERENCES messages(id), FOREIGN"
" KEY(hostmask) REFERENCES hostmasks(id))")
self._create_table("parts(mid INTEGER PRIMARY KEY, hostmask INTEGER,"
" part_message TEXT, FOREIGN KEY(mid) REFERENCES"
" messages(id), FOREIGN KEY(hostmask) REFERENCES"
" hostmasks(id))")
self._create_table("quits(mid INTEGER PRIMARY KEY, hostmask INTEGER,"
" quit_message TEXT, FOREIGN KEY(mid) REFERENCES"
" messages(id), FOREIGN KEY(hostmask) REFERENCES"
" hostmasks(id))")
self._create_table("kicks(mid INTEGER PRIMARY KEY, nid_kicked INTEGER, nid_kicked_by"
" INTEGER, kick_message TEXT, FOREIGN KEY(mid) REFERENCES"
" messages(id), FOREIGN KEY(nid_kicked) REFERENCES"
" nicks(id), FOREIGN KEY(nid_kicked_by) REFERENCES"
" nicks(id))")
self._create_table("nick_changes(mid INTEGER PRIMARY KEY, nid_before INTEGER,"
" nid_after INTEGER, FOREIGN KEY(mid) REFERENCES messages(id),"
" FOREIGN KEY(nid_before) REFERENCES nicks(id), FOREIGN"
" KEY(nid_after) REFERENCES nicks(id))")
self._create_table("set_modes(mid INTEGER PRIMARY KEY, nid_set_by INTEGER,"
" mode_string TEXT, FOREIGN KEY(mid) REFERENCES"
" messages(id), FOREIGN KEY(nid_set_by) REFERENCES"
" nicks(id))")
self._create_table("set_topic(mid INTEGER PRIMARY KEY, nid_set_by INTEGER,"
" topic TEXT, FOREIGN KEY(mid) REFERENCES messages(id),"
" FOREIGN KEY(nid_set_by) REFERENCES nicks(id))")
def write_day(self, day):
"""
Accepts a day dictionary with predefined attributes.
Keyword Arguments:
day:
A list with dictionaries of different contents depending on its
"type" attribute.
Types:
PRIVMSG:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[nickname]: Nickname of the client that sent the message.
[message]: The text of the message sent.
ACTION:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[nickname]: Nickname of the client that sent the message.
[message]: The text of the message sent.
NOTICE:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[nickname]: Nickname of the client that sent the message.
[message]: The text of the message sent.
JOIN:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[hostmask]: The hostmask of the client that joined.
PART:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[hostmask]: The hostmask of the client that parted.
[part_message]: The part message given by the client.
QUIT:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[hostmask]: The hostmask of the client that quit.
[quit_message]: The quit message given by the client.
KICK:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[kicked]: The hostmask of the client that was kicked.
[kicked_by]: The hostmask of the client that kicked user.
[kick_message]: The kick message given by the kicking client.
NICK:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[before]: The clients nick before the change.
[after]: The clients nick after the change.
MODE:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[set_by]: The nick of the client that set the mode.
[mode]: The string of the mode that was set by the client.
TOPIC:
[type]: The type of message.
[id]: unique integer identifying the message
[timpstamp]: A Unix timestamp for the message.
[channel]: The name of the channel the message originated from.
[changed_by]: The nick of the client that changed the topic.
[topic]: The text of the new topic.
The following should be normalized to lowercase before insertion
into the database:
- Nicknames
- Channel Names
- Hosts in hostmasks
"""
cur = self.conn.cursor()
date = day.keys()[0]
messages = day[date]
for message in messages:
line_id = message[0]
line_type = message[1]
time = message[2]
def close(self):
self.conn.close()
def insert(self, table, params):
class BufferedJSONOutputHandler(BufferedOutputHandler):
""" Implements buffered output to JSON of logs being read. """
def __init__(self, filepath):
""" Configure the class with output format, path, etc.
Should probably refactor so that format is handled by subclasses
implementing this interface rather than internal logic. """
if isinstance(filepath,str):
self.outfile = open(filepath,'w',encoding='utf-8')
else:
self.outfile = filepath
self.prevday = False
def begin(self):
""" Handle any initial writes. """
self.outfile.write("{\n")
def write_day(self,day):
""" Writes a day's entry.
Arguments:
day: a dict (usually of one entry) which has a 'datestring':array_of_lines format. """
#strip the object braces.
outstr = json.dumps(day,indent=2)[2:-2]
if self.prevday:
self.outfile.write(',\n')
else:
self.prevday = True
self.outfile.write(outstr)
def close(self):
""" Close the log. """
self.outfile.write("\n}\n")
self.outfile.close()
class conversion_wrapper():
"""Wraps the various converters and imports them all into one unified interface."""
def main():
"""Implement command line interface and call subroutines to handle conversions."""
supported_formats = {'json':BufferedJSONOutputHandler,
'sqlite': BufferedSqliteOutputHandler}
parser = argparse.ArgumentParser()
parser.add_argument("logs", help="The logfile or the directory in which the log files reside.")
parser.add_argument("--output", "-o", help="Filepath to store output at, default is standard output.")
formats_help = """\n\nValid formats are:\n\n\tjson\n\tsqlite"""
parser.add_argument("--oformat", help="The format to output." + formats_help)
parser.add_argument("--timezone", "-t", help="Specify the timezone as a UTC offset. (Not implemented.)")
parser.add_argument("--format", "-f", help="Convert log as <FORMAT>.")
arguments = parser.parse_args()
try:
converter = importlib.import_module("converters." + arguments.format)
except ImportError:
raise ValueError("Given format " + arguments.format.upper() + " has no valid converter.")
if not arguments.oformat:
raise ValueError("Did not specify an output format.")
elif arguments.oformat not in supported_formats:
raise ValueError("Do not recognise output format '{}'".format(arguments.ofile))
elif arguments.oformat == 'sqlite':
raise ValueError("SQL formatting not implemented at this time. Check back for further updates to this software.")
ofile = arguments.output if arguments.output else sys.stdout
output_handler = BufferedJSONOutputHandler(ofile)
try:
converter.convert(arguments.logs, output_handler)
except KeyboardInterrupt as ki:
#Should mean we mostly get valid data out of truncated converts.
output_handler.close()
conversion_wrapper.main()
```
|
{
"source": "jdputsch/WPE",
"score": 3
}
|
#### File: WPE/J18_12/j18_12.py
```python
import hashlib
import os
class DirFileHash(object):
def __init__(self, path):
if not os.path.isdir(path):
raise ValueError("Path is not a directory: '%'" % path)
else:
self.path = path
def __getitem__(self, key):
try:
with open(os.path.join(self.path, key), 'rb') as file:
return hashlib.md5(file.read()).hexdigest()
except (FileNotFoundError, TypeError):
return None
def main():
d = DirFileHash('/etc/')
print(d['hosts'])
print(d['no_such_file'])
print(d[2])
if __name__ == '__main__':
main()
```
|
{
"source": "jdpy19/HACScorecard",
"score": 3
}
|
#### File: jdpy19/HACScorecard/test_HACData.py
```python
import pandas as pd
import os
def compareExcelFiles(file1, file2):
df1 = pd.read_excel(file1)
df2 = pd.read_excel(file2)
print(df1[df1 != df2])
def main():
path = os.getcwd()
f1 = os.path.join(path,"HACScorecardData\\tableauNHSNData_test.xlsx")
f2 = os.path.join(path,"HACScorecardData\\tableauNHSNData.xlsx")
compareExcelFiles(f1,f2)
if __name__ == "__main__":
main()
#%%
```
|
{
"source": "jdr45/huckel-solver",
"score": 3
}
|
#### File: jdr45/huckel-solver/huckel.py
```python
import numpy as np
import networkx as nx
import sys
from typing import Iterable, Tuple, List
def lin_polyene_n(n: int) -> np.ndarray:
"""
Returns the hamiltonian matrix for a linear poly-ene with n sites. The matrix entries are as per the Huckel
approximation, with alpha = 0 and beta = -1.
"""
if n < 1:
raise ValueError("A linear poly-ene must have at least 1 site.")
m = np.array([[-1 if i == j - 1 or i == j + 1 else 0 for i in range(n)] for j in range(n)])
return m
def cyc_polyene_n(n: int) -> np.ndarray:
"""
Returns the hamiltonian matrix for a cyclic poly-ene with n sites. This is just the same as for a linear poly-ene,
except now the first and last atom are connected.
"""
if n < 3:
raise ValueError("A cyclic poly-ene must have at least 3 sites.")
m = lin_polyene_n(n)
m[0][n-1] = -1
m[n-1][0] = -1
return m
def platonic(n: int) -> np.ndarray:
"""
Returns the hamiltonian matrix corresponding to the (sp2-hybridised) platonic solid with n vertices.
The possible values are n = 4, 6, 8, 12, 20. Use the pre-defined graphs from networkx, as there is no nice way of
generating them algorithmically (they are just definitions, after all).
"""
if n == 4:
g = nx.tetrahedral_graph()
elif n == 6:
g = nx.octahedral_graph()
elif n == 8:
g = nx.cubical_graph()
elif n == 12:
g = nx.icosahedral_graph()
elif n == 20:
g = nx.dodecahedral_graph()
else:
raise ValueError("Unknown platonic solid")
mat = nx.adjacency_matrix(g)
return - mat.todense()
def buckyball() -> np.ndarray:
"""
Return the hamiltonian matrix (in the Huckel approximation) for buckminsterfullerene, C60. Like for the platonic
solids, there's no straightforward way to generate this algorithmically, so just return it hard-coded.
"""
edges = [(0, 2), (0, 48), (0, 59), (1, 3), (1, 9), (1, 58),
(2, 3), (2, 36), (3, 17), (4, 6), (4, 8), (4, 12),
(5, 7), (5, 9), (5, 16), (6, 7), (6, 20), (7, 21),
(8, 9), (8, 56), (10, 11), (10, 12), (10, 20), (11, 27),
(11, 47), (12, 13), (13, 46), (13, 54), (14, 15), (14, 16),
(14, 21), (15, 25), (15, 41), (16, 17), (17, 40), (18, 19),
(18, 20), (18, 26), (19, 21), (19, 24), (22, 23), (22, 31),
(22, 34), (23, 25), (23, 38), (24, 25), (24, 30), (26, 27),
(26, 30), (27, 29), (28, 29), (28, 31), (28, 35), (29, 44),
(30, 31), (32, 34), (32, 39), (32, 50), (33, 35), (33, 45),
(33, 51), (34, 35), (36, 37), (36, 40), (37, 39), (37, 52),
(38, 39), (38, 41), (40, 41), (42, 43), (42, 46), (42, 55),
(43, 45), (43, 53), (44, 45), (44, 47), (46, 47), (48, 49),
(48, 52), (49, 53), (49, 57), (50, 51), (50, 52), (51, 53),
(54, 55), (54, 56), (55, 57), (56, 58), (57, 59), (58, 59)
]
g = nx.Graph()
g.add_edges_from(edges)
return -nx.adjacency_matrix(g).todense()
def get_eigenvalues_with_degeneracies(evals: Iterable[float]) -> List[Tuple[float, int]]:
"""
Given a set of sorted eigenvalues (possibly including degenerate eigenvalues), return a list of
(eigenvalue, degeneracy) pairs, with eigenvalues represented as floats rounded to 3dp.
"""
cur = None
count = 0
result = []
for e in evals:
e = round(e, 3)
if e == cur:
count += 1
else:
if cur is not None:
result.append((cur, count))
cur = e
count = 1
if count > 0:
result.append((cur, count))
return result
def print_eigenvalues(evals: List[Tuple[float, int]]) -> None:
"""
Format and print a sorted (in ascending order of eigenvalue) list of (eigenvalue, degeneracy) pairs as an
energy-level diagram.
"""
max_degen = np.amax([e[1] for e in evals])
line_length = 4 * max_degen - 2
count = 0
for val, degen in reversed(evals):
count += degen
line = ''
spacing = (line_length - (4 * degen - 2)) // 2
line += ' ' * spacing
line += '―― ' * degen
line += ' ' * spacing
if val < 0:
line += '-'
else:
line += ' '
line += str(abs(val))
print(line)
print()
if count == 1:
print("1 orbital.")
else:
print("%d orbitals." % count)
def print_usage() -> None:
print("usage: huckel.py [-l | --linear-polyene] num-sites\n"
" huckel.py [-c | --cyclic-polyene] num-sites\n"
" huckel.py [-p | --platonic] [4 | 6 | 8 | 12 | 20]\n"
" huckel.py [-b | --buckyball]")
def main() -> None:
if len(sys.argv) < 2:
print_usage()
return
matrix = None
arguments = {
'-l': lin_polyene_n, '--linear-polyene': lin_polyene_n,
'-c': cyc_polyene_n, '--cyclic-polyene': cyc_polyene_n,
'-p': platonic, '--platonic': platonic
}
if sys.argv[1] in arguments:
try:
matrix = arguments.get(sys.argv[1])(int(sys.argv[2]))
except (IndexError, ValueError):
print_usage()
return
elif sys.argv[1] == '-b' or sys.argv[1] == '--buckyball':
matrix = buckyball()
if matrix is not None:
print_eigenvalues(get_eigenvalues_with_degeneracies(np.linalg.eigvalsh(matrix)))
else:
print_usage()
if __name__ == '__main__':
main()
```
|
{
"source": "jdrager/mnl-api",
"score": 3
}
|
#### File: venues/tests/test_models.py
```python
from unittest.mock import patch
from django.test import TestCase
from .mocks import mock_geocode
from venues.models import Arena, Venue
class VenuesModelsTestCase(TestCase):
@patch('venues.models.geocode', side_effect=mock_geocode)
def test_venue_creation(self, patch):
arena_1 = Arena.objects.create(name='Viking Ice Arena',
address_line_1='1555 Woodward Heights',
city='Hazel Park', state='MI')
self.assertEqual(str(arena_1), 'Viking Ice Arena')
self.assertEqual(arena_1.full_address,
'1555 E Woodward Heights Blvd Hazel Park Michigan 48030 US')
self.assertEqual(arena_1.coordinates, (42.4694405, -83.0897482))
self.assertEqual(arena_1.formatted_address,
'1555 E Woodward Heights Blvd, Hazel Park, MI 48030, USA')
arena_2 = Arena.objects.create(name='<NAME> Center',
address_line_1='1819 Big Beaver',
city='Troy', state='MI')
self.assertEqual(str(arena_2), 'Troy Sports Center')
self.assertEqual(arena_2.full_address,
'1819 E Big Beaver Rd Troy Michigan 48083 US')
self.assertEqual(arena_2.coordinates, (42.5653534, -83.111801))
self.assertEqual(arena_2.formatted_address,
'1819 E Big Beaver Rd, Troy, MI 48083, USA')
venue = Venue.objects.create(name='House of Shamrocks',
address_line_1='23420 John R',
city='Hazel Park', state='MI')
self.assertEqual(str(venue), 'House of Shamrocks')
self.assertEqual(venue.full_address,
'23420 John R Rd Hazel Park Michigan 48030 US')
self.assertEqual(venue.coordinates, (42.464645, -83.1037792))
self.assertEqual(venue.formatted_address,
'23420 John R Rd, Hazel Park, MI 48030, USA')
@patch('venues.models.geocode', side_effect=mock_geocode)
def test_change_to_address_reruns_geocode(self, patch):
arena = Arena.objects.create(name='Little Caesars Arena')
self.assertIsNone(arena.coordinates)
self.assertIsNone(arena.formatted_address)
arena.address_line_1 = '2645 Woodward'
arena.city = 'Detroit'
arena.state = 'MI'
arena.save()
self.assertEqual(arena.coordinates, (42.3410478, -83.0551629))
self.assertEqual(arena.formatted_address, '2645 Woodward Ave, Detroit, MI 48201, USA')
@patch('venues.models.geocode', side_effect=mock_geocode)
def test_missing_coordinates_or_formatted_address_reruns_geocode(self, patch):
arena = Arena.objects.create(name='Little Caesars Arena',
address_line_1='2645 Woodward',
city='Detroit', state='MI')
self.assertEqual(arena.coordinates, (42.3410478, -83.0551629))
self.assertEqual(arena.formatted_address, '2645 Woodward Ave, Detroit, MI 48201, USA')
arena.lat = None
arena.lng = None
arena.save()
self.assertEqual(arena.coordinates, (42.3410478, -83.0551629))
arena.formatted_address = None
arena.save()
self.assertEqual(arena.formatted_address, '2645 Woodward Ave, Detroit, MI 48201, USA')
```
|
{
"source": "JDragons/APK_digger",
"score": 2
}
|
#### File: APK_digger/tools/AnalyzerContext.py
```python
import imp,base64
from . import *
from django.utils import timezone
class AnalyzerContext(object):
def __init__(self,writer, args):
self.args = args
self.efficientStringSearchEngine = EfficientStringSearchEngine()
self.filteringEngine = FilteringEngine(ENABLE_EXCLUDE_CLASSES, STR_REGEXP_TYPE_EXCLUDE_CLASSES)
self.isUsingSQLCipher = False
self.isMasterKeyVulnerability = False
self.writer = writer
if args.line_max_output_characters is None:
if platform.system().lower() == "windows":
args.line_max_output_characters = LINE_MAX_OUTPUT_CHARACTERS_WINDOWS - LINE_MAX_OUTPUT_INDENT
else:
args.line_max_output_characters = LINE_MAX_OUTPUT_CHARACTERS_LINUX - LINE_MAX_OUTPUT_INDENT
if not os.path.isdir(args.report_output_dir):
os.mkdir(args.report_output_dir)
self.writer.writeInf_ForceNoPrint("analyze_mode", args.analyze_mode)
self.writer.writeInf_ForceNoPrint("analyze_engine_build", args.analyze_engine_build)
self.writer.writeInf_ForceNoPrint("analyze_tag", args.analyze_tag)
APK_FILE_NAME_STRING = DIRECTORY_APK_FILES + args.apk_file
apk_Path = APK_FILE_NAME_STRING # + ".apk"
if (".." in args.apk_file):
raise ExpectedException("apk_file_name_slash_twodots_error",
"APK file name should not contain slash(/) or two dots(..) (File: " + apk_Path + ").")
if not os.path.isfile(apk_Path):
raise ExpectedException("apk_file_not_exist", "APK file not exist (File: " + apk_Path + ").")
if args.store_analysis_result_in_db:
try:
imp.find_module('pymongo')
found_pymongo_lib = True
except ImportError:
found_pymongo_lib = False
if not found_pymongo_lib:
pass
apk_filepath_absolute = os.path.abspath(apk_Path)
#writer.writeInf_ForceNoPrint("apk_filepath_relative", apk_filepath_relative)
self.writer.writeInf_ForceNoPrint("apk_filepath_absolute", apk_filepath_absolute)
apk_file_size = float(os.path.getsize(apk_filepath_absolute)) / (1024 * 1024)
self.writer.writeInf_ForceNoPrint("apk_file_size", apk_file_size)
self.writer.update_analyze_status("loading_apk")
self.writer.writeInf_ForceNoPrint("time_starting_analyze", datetime.utcnow())
self.a = apk.APK(apk_Path)
self.writer.update_analyze_status("starting_apk")
package_name = self.a.get_package()
if isNullOrEmptyString(package_name, True):
raise ExpectedException("package_name_empty", "Package name is empty (File: " + apk_Path + ").")
self.writer.writeInf("platform", "Android", "Platform")
self.writer.writeInf("package_name", str(package_name), "Package Name")
# Check: http://developer.android.com/guide/topics/manifest/manifest-element.html
if not isNullOrEmptyString(self.a.get_androidversion_name()):
try:
self.writer.writeInf("package_version_name", str(self.a.get_androidversion_name()), "Package Version Name")
except:
self.writer.writeInf("package_version_name", self.a.get_androidversion_name().encode('ascii', 'ignore'),
"Package Version Name")
if not isNullOrEmptyString(self.a.get_androidversion_code()):
# The version number shown to users. This attribute can be set as a raw string or as a reference to a string resource.
# The string has no other purpose than to be displayed to users.
try:
self.writer.writeInf("package_version_code", int(self.a.get_androidversion_code()), "Package Version Code")
except ValueError:
self.writer.writeInf("package_version_code", self.a.get_androidversion_code(), "Package Version Code")
if len(self.a.get_dex()) == 0:
raise ExpectedException("classes_dex_not_in_apk",
"Broken APK file. \"classes.dex\" file not found (File: " + apk_Path + ").")
try:
str_min_sdk_version = self.a.get_min_sdk_version()
if (str_min_sdk_version is None) or (str_min_sdk_version == ""):
raise ValueError
else:
self.int_min_sdk = int(str_min_sdk_version)
self.writer.writeInf("minSdk", self.int_min_sdk, "Min Sdk")
except ValueError:
# Check: http://developer.android.com/guide/topics/manifest/uses-sdk-element.html
# If "minSdk" is not set, the default value is "1"
self.writer.writeInf("minSdk", 1, "Min Sdk")
self.int_min_sdk = 1
try:
str_target_sdk_version = self.a.get_target_sdk_version()
if (str_target_sdk_version is None) or (str_target_sdk_version == ""):
raise ValueError
else:
self.int_target_sdk = int(str_target_sdk_version)
self.writer.writeInf("targetSdk", self.int_target_sdk, "Target Sdk")
except ValueError:
# Check: http://developer.android.com/guide/topics/manifest/uses-sdk-element.html
# If not set, the default value equals that given to minSdkVersion.
self.int_target_sdk = self.int_min_sdk
md5, sha1, sha256, sha512 = get_hashes_by_filename(APK_FILE_NAME_STRING)
self.writer.writeInf("file_md5", md5, "MD5 ")
self.writer.writeInf("file_sha1", sha1, "SHA1 ")
self.writer.writeInf("file_sha256", sha256, "SHA256")
self.writer.writeInf("file_sha512", sha512, "SHA512")
self.writer.update_analyze_status("starting_dvm")
self.d = dvm.DalvikVMFormat(self.a.get_dex())
self.writer.update_analyze_status("starting_analyze")
self.vmx = analysis.VMAnalysis(self.d)
self.writer.update_analyze_status("starting_androbugs")
self.all_permissions = self.a.get_permissions()
self.allstrings = self.d.get_strings()
self.allurls_strip_duplicated = []
# ------------------------------------------------------------------------
#[Important: String Efficient Searching Engine]
# >>>>STRING_SEARCH<<<<
#addSearchItem params: (1)match_id (2)regex or string(url or string you want to find), (3)is using regex for parameter 2
self.efficientStringSearchEngine.addSearchItem("$__possibly_check_root__", re.compile("/system/bin"),
True) # "root" checking
self.efficientStringSearchEngine.addSearchItem("$__possibly_check_su__", "su", False) # "root" checking2
self.efficientStringSearchEngine.addSearchItem("$__sqlite_encryption__", re.compile("PRAGMA\s*key\s*=", re.I),
True) #SQLite encryption checking
self.list_base64_success_decoded_string_to_original_mapping = {}
self.list_base64_excluded_original_string = ["endsWith", "allCells", "fillList", "endNanos", "cityList", "cloudid=",
"Liouciou"] #exclusion list
for line in self.allstrings:
if (isBase64(line)) and (len(line) >= 3):
try:
decoded_string = base64.b64decode(line)
if isSuccessBase64DecodedString(decoded_string):
if len(decoded_string) > 3:
if (decoded_string not in self.list_base64_success_decoded_string_to_original_mapping) and (
line not in self.list_base64_excluded_original_string):
self.list_base64_success_decoded_string_to_original_mapping[decoded_string] = line
# >>>>STRING_SEARCH<<<<
self.efficientStringSearchEngine.addSearchItem(line, line, False)
except Exception,e:
#print e
pass
self.efficientStringSearchEngine.search(self.d, self.allstrings)
self.PermissionName_to_ProtectionLevel = self.a.get_PermissionName_to_ProtectionLevel_mapping()
t = package_name + '-' + sha512 + '-' + str(time.time()) + '-' + str(random.randrange(10000000, 99999999))
self.sig = hashlib.sha256(t).hexdigest()
self.md5 = md5
```
#### File: tools/vectors/base64check.py
```python
import base64
from .. import *
from VulnerabilityVector import VulnerabilityVector
class Base64check(VulnerabilityVector):
def __init__(self,context):
self.context = context
def analyze(self):
#Base64 String decoding:
organized_list_base64_success_decoded_string_to_original_mapping = []
for decoded_string, original_string in self.context.list_base64_success_decoded_string_to_original_mapping.items():
dict_class_to_method_mapping = self.context.efficientStringSearchEngine.get_search_result_dict_key_classname_value_methodlist_by_match_id(
original_string)
if self.context.filteringEngine.is_all_of_key_class_in_dict_not_in_exclusion(dict_class_to_method_mapping):
"""
All of same string found are inside the excluded packages.
Only the strings found the original class will be added.
"""
organized_list_base64_success_decoded_string_to_original_mapping.append(
(decoded_string, original_string, dict_class_to_method_mapping))
if organized_list_base64_success_decoded_string_to_original_mapping: #The result is from the upper code section
list_base64_decoded_urls = {}
self.context.writer.startWriter("HACKER_BASE64_STRING_DECODE", LEVEL_CRITICAL, "Base64 String Encryption",
"Found Base64 encoding \"String(s)\" (Total: " + str(len(
organized_list_base64_success_decoded_string_to_original_mapping)) + "). We cannot guarantee all of the Strings are Base64 encoding and also we will not show you the decoded binary file:",
["Hacker"])
for decoded_string, original_string, dict_class_to_method_mapping in organized_list_base64_success_decoded_string_to_original_mapping:
self.context.writer.write(decoded_string)
self.context.writer.write(" ->Original Encoding String: " + original_string)
if dict_class_to_method_mapping:
for class_name, result_method_list in dict_class_to_method_mapping.items():
for result_method in result_method_list:
source_classes_and_functions = (
result_method.get_class_name() + "->" + result_method.get_name() + result_method.get_descriptor())
self.context.writer.write(" ->From class: " + source_classes_and_functions)
if "http://" in decoded_string:
list_base64_decoded_urls[decoded_string] = original_string
if list_base64_decoded_urls:
self.context.writer.startWriter("HACKER_BASE64_URL_DECODE", LEVEL_CRITICAL, "Base64 String Encryption",
"Base64 encoding \"HTTP URLs without SSL\" from all the Strings (Total: " + str(
len(list_base64_decoded_urls)) + ")", ["SSL_Security", "Hacker"])
for decoded_string, original_string in list_base64_decoded_urls.items():
dict_class_to_method_mapping = self.context.efficientStringSearchEngine.get_search_result_dict_key_classname_value_methodlist_by_match_id(
original_string)
if not self.context.filteringEngine.is_all_of_key_class_in_dict_not_in_exclusion(
dict_class_to_method_mapping): #All of the same string found are inside the excluded packages
continue
self.context.writer.write(decoded_string)
self.context.writer.write(" ->Original Encoding String: " + original_string)
if dict_class_to_method_mapping:
for class_name, result_method_list in dict_class_to_method_mapping.items():
for result_method in result_method_list:
source_classes_and_functions = (
result_method.get_class_name() + "->" + result_method.get_name() + result_method.get_descriptor())
self.context.writer.write(" ->From class: " + source_classes_and_functions)
else:
self.context.writer.startWriter("HACKER_BASE64_STRING_DECODE", LEVEL_INFO, "Base64 String Encryption",
"No encoded Base64 String or Urls found.", ["Hacker"])
```
#### File: tools/vectors/openportcheck.py
```python
from .. import *
from VulnerabilityVector import VulnerabilityVector
class OpenPortCheck(VulnerabilityVector):
def __init__(self,context):
self.context = context
def analyze(self):
#Openport checking:
"""
find open port code and add information to the context writer
Example Java code:
new ServerSocket(port)
Example Bytecode code:
tcp:
new-instance v0, Ljava/net/ServerSocket;
const/16 v1, 0x1388
invoke-direct {v0, v1}, Ljava/net/ServerSocket;-><init>(I)V
udp:
new-instance v1, Ljava/net/DatagramSocket;
const v0, 0xffde
invoke-direct {v1, v0}, Ljava/net/DatagramSocket;-><init>(I)V
"""
path_tcpport = self.context.vmx.get_tainted_packages().search_class_methods_exact_match("Ljava/net/ServerSocket;","<init>", "(I)V")
path_tcpport = self.context.filteringEngine.filter_list_of_paths(self.context.d, path_tcpport)
path_udpport = self.context.vmx.get_tainted_packages().search_class_methods_exact_match("Ljava/net/DatagramSocket;","<init>", "(I)V")
path_udpport = self.context.filteringEngine.filter_list_of_paths(self.context.d, path_udpport)
if (path_tcpport or path_udpport):
self.context.writer.startWriter("OPEN PORT INFO", LEVEL_CRITICAL, "Open Port Checking", "Open Port Code Found: ",["OPEN_PORT"])
if path_tcpport:
self.context.writer.write("TCP Port")
for path in path_tcpport:
self.context.writer.show_Path(self.context.d, path)
if path_udpport:
self.context.writer.write("UDP Port")
for path in path_udpport:
self.context.writer.show_Path(self.context.d, path)
```
#### File: tools/vectors/permissioncheck.py
```python
from .. import *
from VulnerabilityVector import VulnerabilityVector
class PermissionCheck(VulnerabilityVector):
def __init__(self,context):
self.context = context
def analyze(self):
self.Critical_permission_check()
self.dangerous_permission_check()
self.normal_permission_check()
def Critical_permission_check(self):
ACCESS_MOCK_LOCATION = "android.permission.ACCESS_MOCK_LOCATION"
if ACCESS_MOCK_LOCATION in self.context.all_permissions:
self.context.writer.startWriter("USE_PERMISSION_ACCESS_MOCK_LOCATION", LEVEL_CRITICAL, "Unnecessary Permission Checking",
"Permission 'android.permission.ACCESS_MOCK_LOCATION' only works in emulator environment. Please remove this permission if it is a released application.")
else:
self.context.writer.startWriter("USE_PERMISSION_ACCESS_MOCK_LOCATION", LEVEL_INFO, "Unnecessary Permission Checking",
"Permission 'android.permission.ACCESS_MOCK_LOCATION' sets correctly.")
#----------------------------------------------------------------------------------
permissionNameOfWrongPermissionGroup = self.context.a.get_permission_tag_wrong_settings_names()
if permissionNameOfWrongPermissionGroup: #If the list is not empty
self.context.startWriter("PERMISSION_GROUP_EMPTY_VALUE", LEVEL_CRITICAL, "AndroidManifest PermissionGroup Checking",
"Setting the 'permissionGroup' attribute an empty value will make the permission definition become invalid and no other apps will be able to use the permission.")
for name in permissionNameOfWrongPermissionGroup:
self.context.write("Permission name '%s' sets an empty value in `permissionGroup` attribute." % (name))
else:
self.context.writer.startWriter("PERMISSION_GROUP_EMPTY_VALUE", LEVEL_INFO, "AndroidManifest PermissionGroup Checking",
"PermissionGroup in permission tag of AndroidManifest sets correctly.")
#----------------------------------------------------------------------------------
#Critical use-permission check:
user_permission_critical_manufacturer = ["android.permission.INSTALL_PACKAGES",
"android.permission.WRITE_SECURE_SETTINGS"]
user_permission_critical = ["android.permission.MOUNT_FORMAT_FILESYSTEMS",
"android.permission.MOUNT_UNMOUNT_FILESYSTEMS", "android.permission.RESTART_PACKAGES"]
list_user_permission_critical_manufacturer = []
list_user_permission_critical = []
for permission in self.context.all_permissions:
if permission in user_permission_critical_manufacturer:
list_user_permission_critical_manufacturer.append(permission)
if permission in user_permission_critical:
list_user_permission_critical.append(permission)
if list_user_permission_critical_manufacturer or list_user_permission_critical:
if list_user_permission_critical_manufacturer:
self.context.writer.startWriter("USE_PERMISSION_SYSTEM_APP", LEVEL_CRITICAL,
"AndroidManifest System Use Permission Checking",
"This app should only be released and signed by device manufacturer or Google and put under '/system/app'. If not, it may be a malicious app.")
for permission in list_user_permission_critical_manufacturer:
self.context.writer.write("System use-permission found: \"" + permission + "\"")
if list_user_permission_critical:
self.context.writer.startWriter("USE_PERMISSION_CRITICAL", LEVEL_CRITICAL,
"AndroidManifest Critical Use Permission Checking",
"This app has very high privileges. Use it carefully.")
for permission in list_user_permission_critical:
self.context.writer.write("Critical use-permission found: \"" + permission + "\"")
else:
self.context.writer.startWriter("USE_PERMISSION_SYSTEM_APP", LEVEL_INFO, "AndroidManifest System Use Permission Checking",
"No system-level critical use-permission found.")
def dangerous_permission_check(self):
#Find all "dangerous" permission
"""
android:permission
android:readPermission (for ContentProvider)
android:writePermission (for ContentProvider)
"""
#Get a mapping dictionary
self.context.PermissionName_to_ProtectionLevel = self.context.a.get_PermissionName_to_ProtectionLevel_mapping()
dangerous_custom_permissions = []
for name, protectionLevel in self.context.PermissionName_to_ProtectionLevel.items():
if protectionLevel == PROTECTION_DANGEROUS: # 1:"dangerous"
dangerous_custom_permissions.append(name)
if dangerous_custom_permissions:
self.context.writer.startWriter("PERMISSION_DANGEROUS", LEVEL_CRITICAL,
"AndroidManifest Dangerous ProtectionLevel of Permission Checking",
"""The protection level of the below classes is "dangerous", allowing any other apps to access this permission (AndroidManifest.xml).
The app should declare the permission with the "android:protectionLevel" of "signature" or "signatureOrSystem" so that other apps cannot register and receive message for this app.
android:protectionLevel="signature" ensures that apps with request a permission must be signed with same certificate as the application that declared the permission.
Please check some related cases: http://www.wooyun.org/bugs/wooyun-2010-039697
Please change these permissions:""")
for class_name in dangerous_custom_permissions:
self.context.writer.write(class_name)
who_use_this_permission = get_all_components_by_permission(self.context.a.get_AndroidManifest(), class_name)
who_use_this_permission = collections.OrderedDict(sorted(who_use_this_permission.items()))
if who_use_this_permission:
for key, valuelist in who_use_this_permission.items():
for list_item in valuelist:
self.context.writer.write(" -> used by (" + key + ") " + self.context.a.format_value(list_item))
else:
self.context.writer.startWriter("PERMISSION_DANGEROUS", LEVEL_INFO,
"AndroidManifest Dangerous ProtectionLevel of Permission Checking",
"No \"dangerous\" protection level customized permission found (AndroidManifest.xml).")
def normal_permission_check(self):
#Find all "normal" or default permission
normal_or_default_custom_permissions = []
for name, protectionLevel in self.context.PermissionName_to_ProtectionLevel.items():
if protectionLevel == PROTECTION_NORMAL: # 0:"normal" or not set
normal_or_default_custom_permissions.append(name)
if normal_or_default_custom_permissions:
self.context.writer.startWriter("PERMISSION_NORMAL", LEVEL_WARNING,
"AndroidManifest Normal ProtectionLevel of Permission Checking",
"""The protection level of the below classes is "normal" or default (AndroidManifest.xml).
The app should declare the permission with the "android:protectionLevel" of "signature" or "signatureOrSystem" so that other apps cannot register and receive message for this app.
android:protectionLevel="signature" ensures that apps with request a permission must be signed with same certificate as the application that declared the permission.
Please make sure these permission are all really need to be exported or otherwise change to "signature" or "signatureOrSystem" protection level.""")
for class_name in normal_or_default_custom_permissions:
self.context.writer.write(class_name)
who_use_this_permission = get_all_components_by_permission(self.context.a.get_AndroidManifest(), class_name)
who_use_this_permission = collections.OrderedDict(sorted(who_use_this_permission.items()))
if who_use_this_permission:
for key, valuelist in who_use_this_permission.items():
for list_item in valuelist:
self.context.writer.write(" -> used by (" + key + ") " + self.context.a.format_value(list_item))
else:
self.context.writer.startWriter("PERMISSION_NORMAL", LEVEL_INFO,
"AndroidManifest Normal ProtectionLevel of Permission Checking",
"No default or \"normal\" protection level customized permission found (AndroidManifest.xml).")
```
#### File: tools/vectors/screenshotprentcheck.py
```python
from .. import *
from VulnerabilityVector import VulnerabilityVector
class ScreenshotprentCheck(VulnerabilityVector):
def __init__(self,context):
self.context = context
def analyze(self):
#Developers preventing screenshot capturing checking:
"""
Example:
const/16 v1, 0x2000
invoke-super {p0, p1}, Landroid/support/v7/app/AppCompatActivity;->onCreate(Landroid/os/Bundle;)V
invoke-virtual {p0}, Lcom/example/preventscreencapture/MainActivity;->getWindow()Landroid/view/Window;
move-result-object v0
invoke-virtual {v0, v1, v1}, Landroid/view/Window;->setFlags(II)V
getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE, WindowManager.LayoutParams.FLAG_SECURE);
"""
list_code_for_preventing_screen_capture = []
path_code_for_preventing_screen_capture = self.context.vmx.get_tainted_packages().search_class_methods_exact_match(
"Landroid/view/Window;", "setFlags", "(I I)V")
path_code_for_preventing_screen_capture = self.context.filteringEngine.filter_list_of_paths(self.context.d,
path_code_for_preventing_screen_capture)
for i in analysis.trace_Register_value_by_Param_in_source_Paths(self.context.d, path_code_for_preventing_screen_capture):
if (i.getResult()[1] is None) or (i.getResult()[2] is None):
continue
if (not isinstance(i.getResult()[1], (int, long))) or (not isinstance(i.getResult()[2], (int, long))):
continue
if (i.getResult()[1] & 0x2000) and (i.getResult()[2] & 0x2000):
list_code_for_preventing_screen_capture.append(i.getPath())
if list_code_for_preventing_screen_capture:
self.context.writer.startWriter("HACKER_PREVENT_SCREENSHOT_CHECK", LEVEL_NOTICE,
"Code Setting Preventing Screenshot Capturing",
"""This app has code setting the preventing screenshot capturing.
Example: getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE, WindowManager.LayoutParams.FLAG_SECURE);
It is used by the developers to protect the app:""", ["Hacker"])
for interesting_code in list_code_for_preventing_screen_capture:
self.context.writer.show_Path(self.context.d, interesting_code)
else:
self.context.writer.startWriter("HACKER_PREVENT_SCREENSHOT_CHECK", LEVEL_INFO,
"Code Setting Preventing Screenshot Capturing",
"Did not detect this app has code setting preventing screenshot capturing.", ["Hacker"])
```
#### File: tools/vectors/securitycheck.py
```python
from .. import *
from VulnerabilityVector import VulnerabilityVector
class URLs_check(VulnerabilityVector):
def __init__(self,context):
self.context = context
def analyze(self):
regexGerneralRestricted = ".*(config|setting|constant).*";
regexSecurityRestricted = ".*(encrypt|decrypt|encod|decod|aes|sha1|sha256|sha512|md5).*" #No need to add "sha1" and "des"
#show the user which package is excluded
prog = re.compile(regexGerneralRestricted, re.I)
prog_sec = re.compile(regexSecurityRestricted, re.I)
# Security methods finding:
if self.context.args.extra == 2: #The output may be too verbose, so make it an option
list_security_related_methods = []
for method in self.context.d.get_methods():
if prog.match(method.get_name()) or prog_sec.match(method.get_name()):
if self.context.filteringEngine.is_class_name_not_in_exclusion(method.get_class_name()):
# Need to exclude "onConfigurationChanged (Landroid/content/res/Configuration;)V"
if (method.get_name() != 'onConfigurationChanged') and (
method.get_descriptor() != '(Landroid/content/res/Configuration;)V'):
list_security_related_methods.append(method)
if list_security_related_methods:
self.context.startWriter("Security_Methods", LEVEL_NOTICE, "Security Methods Checking",
"Find some security-related method names:")
for method in list_security_related_methods:
self.context.write(method.get_class_name() + "->" + method.get_name() + method.get_descriptor())
else:
self.context.startWriter("Security_Methods", LEVEL_INFO, "Security Methods Checking",
"Did not detect method names containing security related string.")
if self.context.args.extra == 2: #The output may be too verbose, so make it an option
list_security_related_classes = []
for current_class in self.context.d.get_classes():
if prog.match(current_class.get_name()) or prog_sec.match(current_class.get_name()):
if self.context.filteringEngine.is_class_name_not_in_exclusion(current_class.get_name()):
list_security_related_classes.append(current_class)
if list_security_related_classes:
self.context.writer.startWriter("Security_Classes", LEVEL_NOTICE, "Security Classes Checking",
"Find some security-related class names:")
for current_class in list_security_related_classes:
self.context.writer.write(current_class.get_name())
else:
self.context.writer.startWriter("Security_Classes", LEVEL_INFO, "Security Classes Checking",
"Did not detect class names containing security related string.")
```
#### File: tools/vectors/SQLitecheck.py
```python
from .. import *
from VulnerabilityVector import VulnerabilityVector
class SQLiteCheck(VulnerabilityVector):
def __init__(self,context):
self.context = context
def analyze(self):
self.TransactionNonExclusive_check()
self.Information_Disclosure_check()
self.SSE_check()
self.PRAGMA_key_check()
def TransactionNonExclusive_check(self):
# SQLiteDatabase - beginTransactionNonExclusive() checking:
if (self.context.int_min_sdk is not None) and (self.context.int_min_sdk < 11):
path_SQLiteDatabase_beginTransactionNonExclusive = self.context.vmx.get_tainted_packages().search_class_methods_exact_match(
"Landroid/database/sqlite/SQLiteDatabase;", "beginTransactionNonExclusive", "()V")
path_SQLiteDatabase_beginTransactionNonExclusive = self.context.filteringEngine.filter_list_of_paths(self.context.d,
path_SQLiteDatabase_beginTransactionNonExclusive)
if path_SQLiteDatabase_beginTransactionNonExclusive:
output_string = StringHandler()
output_string.append(
"We detect you're using \"beginTransactionNonExclusive\" in your \"SQLiteDatabase\" but your minSdk supports down to " + str(
self.context.int_min_sdk) + ".")
output_string.append(
"\"beginTransactionNonExclusive\" is not supported by API < 11. Please make sure you use \"beginTransaction\" in the earlier version of Android.")
output_string.append(
"Reference: http://developer.android.com/reference/android/database/sqlite/SQLiteDatabase.html#beginTransactionNonExclusive()")
self.context.writer.startWriter("DB_DEPRECATED_USE1", LEVEL_CRITICAL, "SQLiteDatabase Transaction Deprecated Checking",
output_string.get(), ["Database"])
self.context.writer.show_Paths(self.context.d, path_SQLiteDatabase_beginTransactionNonExclusive)
else:
self.context.writer.startWriter("DB_DEPRECATED_USE1", LEVEL_INFO, "SQLiteDatabase Transaction Deprecated Checking",
"Ignore checking \"SQLiteDatabase:beginTransactionNonExclusive\" you're not using it.",
["Database"])
else:
self.context.writer.startWriter("DB_DEPRECATED_USE1", LEVEL_INFO, "SQLiteDatabase Transaction Deprecated Checking",
"Ignore checking \"SQLiteDatabase:beginTransactionNonExclusive\" because your set minSdk >= 11.",
["Database"])
def Information_Disclosure_check(self):
#SQLite databases
is_using_android_dbs = self.context.vmx.get_tainted_packages().has_android_databases(self.context.filteringEngine.get_filtering_regexp())
if is_using_android_dbs:
if self.context.int_min_sdk < 15:
self.context.writer.startWriter("DB_SQLITE_JOURNAL", LEVEL_NOTICE, "Android SQLite Databases Vulnerability Checking",
"""This app is using Android SQLite databases.
Prior to Android 4.0, Android has SQLite Journal Information Disclosure Vulnerability.
But it can only be solved by users upgrading to Android > 4.0 and YOU CANNOT SOLVE IT BY YOURSELF (But you can use encrypt your databases and Journals by "SQLCipher" or other libs).
Proof-Of-Concept Reference:
(1) http://blog.watchfire.com/files/androidsqlitejournal.pdf
(2) http://www.youtube.com/watch?v=oCXLHjmH5rY """, ["Database"], "CVE-2011-3901")
else:
self.context.writer.startWriter("DB_SQLITE_JOURNAL", LEVEL_NOTICE, "Android SQLite Databases Vulnerability Checking",
"This app is using Android SQLite databases but it's \"NOT\" suffering from SQLite Journal Information Disclosure Vulnerability.",
["Database"], "CVE-2011-3901")
else:
self.context.writer.startWriter("DB_SQLITE_JOURNAL", LEVEL_INFO, "Android SQLite Databases Vulnerability Checking",
"This app is \"NOT\" using Android SQLite databases.", ["Database"], "CVE-2011-3901")
def SSE_check(self):
#Find "SQLite Encryption Extension (SEE) on Android"
has_SSE_databases = False
for cls in self.context.d.get_classes():
if cls.get_name() == "Lorg/sqlite/database/sqlite/SQLiteDatabase;": #Don't do the exclusion checking on this one because it's not needed
has_SSE_databases = True
break
if has_SSE_databases:
self.context.writer.startWriter("DB_SEE", LEVEL_NOTICE,
"Android SQLite Databases Encryption (SQLite Encryption Extension (SEE))",
"This app is using SQLite Encryption Extension (SEE) on Android (http://www.sqlite.org/android) to encrypt or decrpyt databases.",
["Database"])
else:
self.context.writer.startWriter("DB_SEE", LEVEL_INFO,
"Android SQLite Databases Encryption (SQLite Encryption Extension (SEE))",
"This app is \"NOT\" using SQLite Encryption Extension (SEE) on Android (http://www.sqlite.org/android) to encrypt or decrpyt databases.",
["Database"])
def PRAGMA_key_check(self):
#Searching SQLite "PRAGMA key" encryption:
result_sqlite_encryption = self.context.efficientStringSearchEngine.get_search_result_by_match_id("$__sqlite_encryption__")
result_sqlite_encryption = self.context.filteringEngine.filter_efficient_search_result_value(result_sqlite_encryption)
if result_sqlite_encryption:
self.context.writer.startWriter("HACKER_DB_KEY", LEVEL_NOTICE, "Key for Android SQLite Databases Encryption",
"Found using the symmetric key(PRAGMA key) to encrypt the SQLite databases. \nRelated code:",
["Database", "Hacker"])
for found_string, method in result_sqlite_encryption:
self.context.writer.write(method.get_class_name() + "->" + method.get_name() + method.get_descriptor())
else:
self.context.writer.startWriter("HACKER_DB_KEY", LEVEL_INFO, "Key for Android SQLite Databases Encryption",
"Did not find using the symmetric key(PRAGMA key) to encrypt the SQLite databases (It's still possible that it might use but we did not find out).",
["Database", "Hacker"])
```
|
{
"source": "jdraiv/lock",
"score": 3
}
|
#### File: auth/helpers/LogAcc.py
```python
import bcrypt
from main import mongo_db
from global_helpers.response import internal_response
class LogAcc:
@staticmethod
def generate_tokens(user_id):
pass
@staticmethod
def valid_credentials(user_id, password):
user_obj = mongo_db.users.find_one({'user_id': user_id})
if user_obj != None:
if bcrypt.checkpw(str.encode(password), user_obj['password']):
return internal_response(status="success", message="User authenticated")
return internal_response(status="error", message="Incorrect username or password")
```
#### File: contacts/helpers/ContactsHandler.py
```python
from main import mongo_db
from global_helpers.response import internal_response
class ContactsHandler:
@staticmethod
def add_contact(user_id, contact_id):
# Verify if the contact exists
# Verify if the user already has the contact
user_obj = mongo_db.users.find_one({'user_id': user_id})
if mongo_db.users.find_one({'user_id': contact_id}) != None:
if not any(contact_id for contact in user_obj['contacts']):
mongo_db.users.update({'user_id': user_id}, {'$push': {'contacts': contact_id}})
return internal_response(status="success", message="The contact has been added")
else:
return internal_response(status="error", message="The contact already exists")
return internal_response(status="error", message="Unkown user id")
@staticmethod
def delete_contact(user_id, contact_id):
user_obj = mongo_db.users.find_one({'user_id': user_id})
if user_obj != None:
if any(contact_id for contact in user_obj['contacts']):
new_list = user_obj['contacts']
new_list.remove(contact_id)
mongo_db.users.update({'user_id': user_id}, {'$set': {'contacts': new_list}})
return internal_response(status="success", message="The contact has been deleted")
else:
return internal_response(status="success", message="The contact does not exists")
return internal_response(status="error", message="Unknown user id")
@staticmethod
def get_contacts(user_id):
user_obj = mongo_db.users.find_one({'user_id': user_id})
if user_obj != None:
return internal_response(status="success", message="Retrieved contacts", data=user_obj['contacts'])
return internal_response(status="success", message="Unknown user id")
```
#### File: lock/global_helpers/response.py
```python
def internal_response(status, message='None', data=None):
if data != None:
return {'status': status, 'message': message, 'data': data}
return {'status': status, 'message': message}
```
#### File: lock/global_helpers/tokens_controller.py
```python
from main import keys
import jwt
import pyrtk
def create_jwt(user_id):
return jwt.encode({'user_id': user_id}, keys['JWT_KEY'], algorithm="HS256").decode('utf-8')
def create_rtk(user_id):
return pyrtk.create_token(user_id, keys['RTK_KEY'])
def decode_jwt(user_id):
pass
def decode_rtk(user_id):
pass
```
#### File: jdraiv/lock/main.py
```python
from env_setup import set_keys
from sanic import Sanic
from sanic.response import json, text
from sanic.websocket import WebSocketProtocol
from pymongo import MongoClient
app = Sanic()
keys = set_keys('MONGO_URL', 'BCRYPT_KEY', 'JWT_KEY', 'RTK_KEY')
mongo_db = MongoClient(keys['MONGO_URL'])['lock-dev']
from blueprints.auth.main import auth_module
from blueprints.contacts.main import contacts_module
from decorators.auth_decorators import jwt_required
app.blueprint(auth_module)
app.blueprint(contacts_module)
@app.route('/')
async def homepage(request):
return text("test")
@app.route('/protected')
@jwt_required()
async def test(request):
return text("Protected route")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True, protocol=WebSocketProtocol)
```
|
{
"source": "jdraiv/PyGaming",
"score": 3
}
|
#### File: games/menu/main.py
```python
import json
import os
import pygame
# Font module
from modules.fonts.Fontom import Fontom
# Events module
from modules.events.Eventoral import Eventoral
from setup_vars import setup_vars
# This class helps the user select the game the user wants to play
class App:
def __init__(self, screen):
self.screen = screen
self.games = self.get_games()
self.fontom = Fontom(self.screen)
self.current_game_num = 0
def show_menu(self):
self.screen.fill([55, 56, 68])
self.fontom.draw_text(
text='PyEngine',
font_size=80,
y_pos=50,
hor_center=True
)
self.fontom.draw_text(
text='Select Game',
font_size=50,
y_pos=340,
hor_center=True
)
self.fontom.draw_text(
text="< %s >" % self.games[self.current_game_num],
font_size=30,
y_pos=400,
hor_center=True
)
def events(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == 97 or event.key == 276:
self.current_game_num -= 1
elif event.key == 100 or event.key == 275:
self.current_game_num += 1
elif event.key == 32:
setup_vars['current_game'] = self.games[self.current_game_num]
print("Play")
# Update current_game index value
self.validate_game_num()
Eventoral().exit(event)
def validate_game_num(self):
if self.current_game_num == len(self.games):
self.current_game_num = 0
elif self.current_game_num < 0:
self.current_game_num = len(self.games) - 1
def get_games(self):
location = "%s/games/games.json" % os.getcwd()
games = json.load(open(location, 'r'))
return [dic['name'] for dic in games['games']]
def container(self):
self.show_menu()
self.events()
```
#### File: games/snake/main.py
```python
import pygame
from modules.events.Eventoral import Eventoral
from .modules.Snake import Snake
from .modules.Apple import Apple
from .modules.UI import UI
from setup_vars import setup_vars
from .game_vars import vars
"""
The body of the snake is implemented as a list. The segments are implemented as a sub_list inside the body list
The sub_list needs to contain the X and Y position
of the snake segment. Example: [x_pos, y_pos]
"""
class App:
def __init__(self, window):
self.window = window
self.apple_class = Apple(self.window)
self.snake_class = Snake(self.window, self.apple_class.apple_pos)
self.ui_class = UI(self.window)
self.events_class = Eventoral()
def events(self):
for event in pygame.event.get():
# Listen to events in the snake module
self.snake_class.snake_controller(event)
# Exit window event
self.events_class.exit(event)
# Pause event
self.events_class.pause(event, 'p')
# Restart event
self.events_class.restart(event, 'r')
def set_vars(self):
setup_vars['time'] = 25
def container(self):
self.set_vars()
self.events()
if not setup_vars['pause']:
self.snake_class.snake_movement()
self.snake_class.draw_snake()
self.ui_class.draw_score()
# Check if the apple is eaten
if self.snake_class.apple_collision():
# Set new apple position
self.apple_class.set_position()
self.snake_class.apple_pos = self.apple_class.apple_pos
# Add point to score
vars['score'] += 1
# Check snake collision
if self.snake_class.tail_collision():
setup_vars['pause'] = True
setup_vars['game_over'] = True
self.apple_class.draw_apple()
elif setup_vars['game_over']:
self.ui_class.game_over()
else:
self.ui_class.pause_menu()
```
#### File: snake/modules/UI.py
```python
from modules.fonts.Fontom import Fontom
from ..game_vars import vars
class UI:
def __init__(self, window):
self.window = window
self.fontom = Fontom(self.window)
def pause_menu(self):
self.fontom.draw_text(
text='Game paused',
font_size=50,
color=vars['main_blue'],
hor_center=True,
ver_center=True,
)
def game_over(self):
self.fontom.draw_text(
text='GAME OVER',
font_size=70,
color=vars['main_red'],
hor_center=True,
ver_center=True
)
def draw_score(self):
self.fontom.draw_text(
text=vars['score'],
font_size=30,
x_pos=30,
y_pos=30
)
```
#### File: PyGaming/src/launcher.py
```python
import pygame
import time
from setup_vars import setup_vars
"""
The Launcher class. The big boss. This class returns the initial screen in which a game is going to be rendered.
This class also contains some functions that affect the game rendering process, like FPS.
"""
"""
In order for a game to be read correctly by the launcher, the game needs to be on his own folder.
The folder needs to contain a file called "main". The launcher is going to take the "main" file as the main/important
file like his namesake. The game needs to be inside a class called "App".
You need to bundle all the application code inside a function called "container".
"""
class Launcher:
def __init__(self):
self.window = pygame.display.set_mode([500, 500])
self.current_app = setup_vars['current_game']
self.game = self.read_app().App(self.window)
def set_caption(self):
pygame.display.set_caption(self.current_app.capitalize())
def read_app(self):
module = __import__('games.%s.main' % self.current_app, globals(), locals(), ['App'], 0)
return module
def start(self):
# Initialize fonts
pygame.font.init()
while True:
self.set_caption()
# Game execution time.
clock = pygame.time.Clock()
clock.tick(setup_vars['time'])
# Check if the application was changed or if the game needs to be restarted.
# If the application was changed or needs to be restarted, reload the new game module
# and clean the screen.
if self.current_app != setup_vars['current_game'] or setup_vars['restart']:
self.current_app = setup_vars['current_game']
self.game = self.read_app().App(self.window)
# Clean screen
self.window.fill((0, 0, 0))
pygame.display.flip()
# Reset restart variable in case that the game needed to be restarted
setup_vars['restart'] = False
# Execute game
self.game.container()
pygame.display.update()
Launcher().start()
```
#### File: modules/fonts/Fontom.py
```python
import pygame
class Fontom(object):
def __init__(self, window, font_family='Lorem Ipsum'):
self.window = window
self.font_family = font_family
def draw_text(self, text, color=[255, 255, 255], x_pos=0, y_pos=0, font_size=30, hor_center=False, ver_center=False):
# This function checks if an object needs to be centered, if not, returns the normal position.
def should_center(pos, w_size, bool):
if bool:
return w_size / 2
else:
return pos
font = pygame.font.SysFont(self.font_family, font_size, True)
text_obj = font.render(str(text), True, color)
window_size = pygame.display.get_surface().get_size()
# Set text element.
text_rect = text_obj.get_rect(center=(
should_center(x_pos, window_size[0], hor_center),
should_center(y_pos, window_size[1], ver_center)))
# Draw text
self.window.blit(text_obj, text_rect)
```
|
{
"source": "jdramani/599-2",
"score": 3
}
|
#### File: q11/tika-similarity-edited/indexer.py
```python
import json
import os
def index_from_json(type, file_name, child_node):
with open(file_name, 'r') as f:
json_obj = json.load(f)
obj_dict = {}
walk_json(type, json_obj, "", child_node, obj_dict)
with open("fixed_%s" % file_name, 'w') as f:
json.dump(obj_dict.values(), f)
def walk_json(type, root, parent_name, child_node, obj_dict):
obj = {key: value for (key, value) in root.items() if key != child_node}
obj['type'] = type
if parent_name not in obj_dict:
obj['parent'] = []
else:
obj['parent'] = obj_dict[parent_name]['parent'] + [parent_name]
obj_dict[obj['name']] = obj
if child_node in root:
for child in root[child_node]:
walk_json(type, child, obj['name'], child_node, obj_dict)
if __name__ == "__main__":
# index_from_json("geo", "circle_geo.json", "children")
# index_from_json("ms", "circle_ms.json", "children")
index_from_json("sweet", "clusters_sweet.json", "children")
```
#### File: q11/tika-similarity-edited/vector.py
```python
import math
def stringify(attribute_value):
if isinstance(attribute_value, list):
attribute_value = [str(val) for val in attribute_value]
try:
return str((", ".join(attribute_value)).encode('utf-8').strip())
except:
return str(", ".join(attribute_value)).strip()
else:
try:
return str(attribute_value.encode('utf-8').strip())
except:
return str(attribute_value)
class Vector:
'''
An instance of this class represents a vector in n-dimensional space
'''
def __init__(self, filename=None, features=None):
'''
Create a vector
@param metadata features
'''
self.features = {}
if filename and features:
self.filename = filename
na_metadata = ["id", "_version_", "Name", "name"]
for na in na_metadata:
features.pop(na, None)
for key in features:
self.features[key] = len(stringify(features[key]))
'''
def __str__(self):
vector_str = "( {0} ): \n".format(self.)
if self.features:
for key in self.features:
vector_str += " {1}: {2} \n".format(key, self.features[key])
return vector_str+"\n"
'''
def getMagnitude(self):
totalMagnitude = 0.0
for key in self.features:
totalMagnitude += self.features[key] ** 2
return math.sqrt(totalMagnitude)
def dotProduct(self, anotherVector):
'''
A = ax+by+cz
B = mx+ny+oz
A.B = a*m + b*n + c*o
'''
dot_product = 0.0
intersect_features = set(self.features) & set(anotherVector.features)
for feature in intersect_features:
dot_product += self.features[feature] * anotherVector.features[feature]
return dot_product
def cosTheta(self, v2):
'''
cosTheta = (V1.V2) / (|V1| |V2|)
cos 0 = 1 implies identical documents
'''
return self.dotProduct(v2) / (self.getMagnitude() * v2.getMagnitude())
def euclidean_dist(self, anotherVector):
'''
dist = ((x1-x2)^2 + (y1-y2)^2 + (z1-z2)^2)^(0.5)
'''
intersect_features = set(self.features) & set(anotherVector.features)
dist_sum = 0.0
for feature in intersect_features:
dist_sum += (self.features[feature] - anotherVector.features[feature]) ** 2
setA = set(self.features) - intersect_features
for feature in setA:
dist_sum += self.features[feature] ** 2
setB = set(anotherVector.features) - intersect_features
for feature in setB:
dist_sum += anotherVector.features[feature] ** 2
return math.sqrt(dist_sum)
```
|
{
"source": "jdramirezl/Octree-Python",
"score": 3
}
|
#### File: jdramirezl/Octree-Python/main.py
```python
from Octree import Octree, Box, Point, Sphere
import random
import math
import pygame
import time
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
from itertools import product, combinations
def time_info(points, query):
time_tot = 0
ind = 0
print("Octree", query)
for point in points:
ind += 1
print("Iteration:", ind) if ind % 1000 == 0 else None
x_local, y_local, z_local = point.x, point.y, point.z
pnts = []
start = time.time()
for pnt in points:
x2, y2, z2 = pnt.x, pnt.y, pnt.z
difx, dify, difz = abs(
x_local - x2), abs(y_local - y2), abs(z_local - z2)
dis = math.sqrt(pow(difx, 2) + pow(dify, 2) + pow(difz, 2))
if dis > 100:
continue
pnts.append(pnt)
time_tot += time.time() - start
print("O(n^2)", time_tot)
def info(in_range_local, x_local, y_local, z_local, points_local):
print()
print("Points in range of [" + str(x_local) +
"," + str(y_local) + "," + str(z_local) + "]")
print("With Oct")
for pnt in in_range_local:
x2, y2, z2 = pnt.x, pnt.y, pnt.z
dis = math.sqrt(pow((x_local - x2), 2) +
pow(y_local - y2, 2) + pow(z_local - z2, 2))
print("\tpoint: [" + str(x2) + "," +
str(y2) + "," + str(z2) + "]", end=" ")
print("-> distance:", dis)
print("With O(n2)")
for pnt in points_local:
x2, y2, z2 = pnt.x, pnt.y, pnt.z
difx, dify, difz = abs(
x_local - x2), abs(y_local - y2), abs(z_local - z2)
dis = math.sqrt(pow(difx, 2) + pow(dify, 2) + pow(difz, 2))
if dis > 100:
continue
print("\tpoint: [" + str(x2) + "," +
str(y2) + "," + str(z2) + "]", end=" ")
print("-> distance:", dis, "dif in cords:", difx, dify, dify)
def pygame_show(screen, ot: Octree, points_in_range: list, point: Point, query: tuple):
# Colors
white = (255, 255, 255)
blue = (0, 0, 255)
red = (204, 0, 0)
black = (0, 0, 0)
green = (65, 199, 52)
clock = pygame.time.Clock()
fps = 3
# Refill Screen
screen.fill(white)
canvas = pygame.Rect((0, 0), (1200, 800))
pygame.draw.rect(screen, black, canvas)
ot.show(screen, points_in_range, point, query)
run = True
counter = 0
pygame.draw.line(screen, green, (0, 400), (1200, 400), 5)
pygame.draw.line(screen, green, (400, 0), (400, 800), 5)
pygame.draw.line(screen, green, (800, 0), (800, 800), 5)
while run:
clock.tick(fps)
pygame.draw.circle(screen, red, (point.x, point.y), 7)
# pygame.draw.rect(screen, blue, query, 1)
if counter == 5:
run = False
counter += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
pygame.display.update()
def with_visualization():
# Data for pygame
pygame.init()
screen_width = 1200
screen_height = 800
screen = pygame.display.set_mode((screen_width, screen_height))
# Data for Octree
width, height, depth = 4000, 40000, 4000
is_box_collider = False
# Range of search (Or radius)
limit_query = (200, 200, 200) if is_box_collider else 100
# Create Octree
limit = Box(0, 0, 0, width, height, depth)
ot = Octree(limit, 4)
# Generate Points
n_of_points = 100
points = [
Point(-75.5495499995, 6.33545000045, 1378.75),
Point(-75.5504500004, 6.33545000045, 1326.07),
Point(-75.5495499995, 6.33454999955, 1318.85),
Point(-75.5504500004, 6.33454999955, 1377.63)
]
# Insert points in tree
for point in points:
ot.insert(point)
tot_time, ind = 0, 0
for point in points:
ind += 1
print("Iteration:", ind) if ind % 1000 == 0 else None
# Data of current point
x, y, z = point.x, point.y, point.z
# Create box or sphere range for point
if is_box_collider:
query = Box(x, y, z, *limit_query)
else:
query = Sphere(x, y, z, limit_query)
# Get points in range
start = time.time()
in_range = ot.query(query, set())
dif = time.time() - start
tot_time += dif
# Get info of Points
info(in_range, x, y, z, points)
# Visualize
pygame_show(screen, ot, in_range, point, limit_query)
time_info(points, tot_time)
pygame.quit()
def only_tree():
# Create Octree
width, height, depth = 0, 0, 0
limit = Box(0, 0, 0, width, height, depth)
ot = Octree(limit, 4)
# Create query
box_measures = [0, 0, 0]
radius = 0
x, y, z = 0, 0, 0
box_query = Box(x, y, z, *box_measures)
sphere_query = Sphere(x, y, z, radius)
list_of_points_in_range = ot.query(box_query)
if __name__ == "__main__":
with_visualization()
```
|
{
"source": "jdramirezl/Teacher-Assistant",
"score": 4
}
|
#### File: Computer Programming 2021-1 Python/Taller4/Problema10.py
```python
def temperatura2(grados, escala, convertir):
resultado = 0
if escala == "Farenheit":
if convertir == "Kelvin":
resultado = ((5*(grados-32))/(9)) + 273.15
elif convertir == "Celsius":
resultado = ((9/5)*grados)+32
elif escala == "Kelvin":
if convertir == "Farenheit":
resultado = (9*(grados-273.15)/(5))+32
elif convertir == "Celsius":
resultado = grados-273.15
elif escala == "Celsius":
if convertir == "Farenheit":
resultado = (9/5)*grados+32
elif convertir == "Kelvin":
resultado = grados + 273.15
return resultado
print(temperatura2(50, "Farenheit", "Kelvin"))
```
|
{
"source": "jdranczewski/Dimension-Surfer-Project",
"score": 3
}
|
#### File: jdranczewski/Dimension-Surfer-Project/sat.py
```python
import math
# Project a given polygon onto an axis.
def project(polygon, normal):
# Create a list of projected vertices.
projected = []
# We treat each vertex coordinates as a position vector
# and iterate on them.
for vect in polygon:
# Calculate the dot product of the position vector and the axis vector.
dp = vect[0] * normal[0] + vect[1] * normal[1]
# Calculate the projection of the position vector on the axis.
projected_v = [normal[0] * dp, normal[1] * dp]
# Calculate the projection's length - this is what we actually need.
projected_l = math.sqrt(projected_v[0] ** 2 + projected_v[1] ** 2)
# Get the direction of the projection relative to the axis direction.
sign_p = projected_v[0] * normal[0] + projected_v[1] * normal[1]
# Apply the direction to the projected length.
projected_l = math.copysign(projected_l, sign_p)
# Append the calculated projection to the list of projected vertices.
projected.append(projected_l)
# After all vertices are processed, return the boundaries of the projection.
return [min(projected), max(projected)]
# Check whether there is overlap.
def checkOverlap(obstacle, player, normal):
# Project the player and the obstacle onto the axis given by the normal vector.
obstacle_p = project(obstacle, normal)
player_p = project(player, normal)
# Test for overlap.
if (obstacle_p[1] < player_p[0]) or (obstacle_p[0] > player_p[1]) or obstacle_p[1]-obstacle_p[0] < 1:
# If the above condition is true,
# it means that the projections do not overlap.
return False
else:
# Else, it means that there is overlap.
return True
# Check for overlap and calculate projection vectors.
def calculateProjectionVectors(obstacle, player, normal):
# Project the player and the obstacle onto the axis given by the normal vector.
obstacle_p = project(obstacle, normal)
player_p = project(player, normal)
# Test for overlap.
if (obstacle_p[1] < player_p[0]) or (obstacle_p[0] > player_p[1]) or obstacle_p[1]-obstacle_p[0] < 1:
# If the above condition is true,
# it means that the projections do not overlap.
return False
else:
# Else, it means that there is overlap.
# Calculate the values of the projection vectors.
value1 = obstacle_p[0] - player_p[1]
value2 = obstacle_p[1] - player_p[0]
# Make them directed along the normal.
vector1 = [normal[0] * value1, normal[1] * value1]
vector2 = [normal[0] * value2, normal[1] * value2]
# Return the necessary data.
return [abs(value1), vector1, abs(value2), vector2]
# Calculate the normal vector for a given edge.
def getNormal(a, b):
# Obtain the vector representing the edge...
edge = [b[0] - a[0], b[1] - a[1]]
# ...and its length.
length = math.sqrt(edge[0]**2 + edge[1]**2)
# Turn the edge vector into a unit vector.
edge = [edge[0] / length, edge[1] / length]
# Create a vector perpendicular to the unit edge vector...
normal = [-edge[1], edge[0]]
# ...and return it.
return normal
```
|
{
"source": "jdranczewski/tkinter-experiments",
"score": 3
}
|
#### File: jdranczewski/tkinter-experiments/dialogclass.py
```python
from tkinter import *
import tkSimpleDialog
from tkinter import messagebox
class MyDialog(tkSimpleDialog.Dialog):
def body(self, master):
Label(master, text="First:").grid(row=0)
Label(master, text="Second:").grid(row=1)
self.e1 = Entry(master)
self.e2 = Entry(master)
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
self.var = IntVar()
self.cb = Checkbutton(master, text="Hardercopy", variable=self.var)
self.cb.grid(row=2, columnspan=2, sticky=W)
return self.e1
def validate(self):
try:
first = int(self.e1.get())
second = int(self.e2.get())
return 1
except ValueError:
messagebox.showerror("Error", "Both values need to be integers!")
return 0
def apply(self):
self.result = (self.e1.get(), self.e2.get(), self.var.get())
def showMyDialog(parent):
d = MyDialog(root)
print(d.result)
return d.result
root = Tk()
Button(root, text="MyDialog", command=lambda: showMyDialog(root)).pack()
root.mainloop()
```
|
{
"source": "JDRanpariya/pyworkable",
"score": 3
}
|
#### File: JDRanpariya/pyworkable/Workable.py
```python
import requests
import json
class Workable(object):
def __init__(self, account, apikey):
self.account = str(account).lower()
self.apikey = str(apikey)
# Authorization headers. Content-Type is not necessary,
# but should workable start providing alternate
# content types such as XML, this won't break
self.request_headers = {
'Content-Type': 'application/json',
'authorization': 'Bearer ' + self.apikey
}
# Base URL Endpoint for all API requests
self.api_base = 'https://www.workable.com/spi/v3/accounts/' + self.account + '/'
# API Endpoints for all jobs, a single job, gettig a list of stages and account members
self.endpoints = {
'jobs': self.api_base + 'jobs',
'job': self.api_base + 'jobs/',
'stages': self.api_base + 'stages/',
'members': self.api_base + 'members'
}
# Increase default limit for downloading lists from 50 to 100,
# so we need to make fewer requests
self.default_limit = 100
#############################################################################################
# Functions
#############################################################################################
def workable_depaginate(self, url, key):
"""
Returns one object based on a given key for a workable API endpoint.
Arguments:
url -- the API endpoint that returns paginated data
key -- the key that contains all the data
"""
list = []
paging = True
while paging == True:
url = url + '?limit=' + str(self.default_limit)
request = requests.get(url, headers=self.request_headers)
response_json = request.json()
list.extend(response_json[key])
try:
url = response_json['paging']['next'] + '?limit=' + self.default_limit
except KeyError:
paging = False
else:
paging = True
return list
def candidate_list(self, job):
"""
Download and return the basic list of all candidates for a given job
"""
job_candidates_url = self.endpoints['job'] + job + '/candidates'
candidate_list = self.workable_depaginate(job_candidates_url, 'candidates')
return candidate_list
def candidate_details(self, candidate_list, job):
"""
Download and return Details for all candidates in a candidate_list
"""
candidates = []
for candidate in candidate_list:
detail = single_candidate_detail(candidate['id'], job)
candidates.append(detail['candidate'])
return candidates
def single_candidate_detail(self, candidate_id, job):
"""
Returns the candidate's detail information, for a given candidate identified by ID
"""
url = self.endpoints['job'] + job + '/candidates/' + candidate_id
request = requests.get(url, headers=self.request_headers)
response = request.json()
return response
def workable_write_json(self, object, filename):
"""
Save the output from workable to a file. Existing files will be overwritten without warning!
:param object: result from calling the workable API, JSON format
:param filename: name the file should be saved as, without .json extension
"""
full_name = filename + '.json'
open(full_name, 'w').close()
file = open(full_name, 'a', encoding='utf-8')
file.write(json.dumps(object, indent=2))
return
def job_list(self, state=''):
"""
Returns a list of all jobs matching the given state
:param state: one of the following: draft, published, archived, closed
:return: Job List
"""
jobs = []
if state != '':
url = self.endpoints['jobs'] + '?state=' + state
else:
url = self.endpoints['jobs']
jobs = workable_depaginate(url, 'jobs')
return jobs
def job_detail(self, job):
"""
Returns detail info for a given job
:param job: Job Shortcode
:return: Job Info
"""
url = self.endpoints['job'] + job
request = requests.get(url, headers=self.request_headers)
job = request.json()
return job
```
|
{
"source": "jdraymon/python-engineio",
"score": 2
}
|
#### File: tests/asyncio/test_asyncio_client.py
```python
import asyncio
import ssl
import sys
import unittest
from unittest import mock
try:
import aiohttp
except ImportError:
aiohttp = None
import pytest
from engineio import asyncio_client
from engineio import client
from engineio import exceptions
from engineio import packet
from engineio import payload
def AsyncMock(*args, **kwargs):
"""Return a mock asynchronous function."""
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
def _run(coro):
"""Run the given coroutine."""
return asyncio.get_event_loop().run_until_complete(coro)
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
class TestAsyncClient(unittest.TestCase):
def test_is_asyncio_based(self):
c = asyncio_client.AsyncClient()
assert c.is_asyncio_based()
def test_already_connected(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
with pytest.raises(ValueError):
_run(c.connect('http://foo'))
def test_invalid_transports(self):
c = asyncio_client.AsyncClient()
with pytest.raises(ValueError):
_run(c.connect('http://foo', transports=['foo', 'bar']))
def test_some_invalid_transports(self):
c = asyncio_client.AsyncClient()
c._connect_websocket = AsyncMock()
_run(c.connect('http://foo', transports=['foo', 'websocket', 'bar']))
assert c.transports == ['websocket']
def test_connect_polling(self):
c = asyncio_client.AsyncClient()
c._connect_polling = AsyncMock(return_value='foo')
assert _run(c.connect('http://foo')) == 'foo'
c._connect_polling.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
c = asyncio_client.AsyncClient()
c._connect_polling = AsyncMock(return_value='foo')
assert _run(c.connect('http://foo', transports=['polling'])) == 'foo'
c._connect_polling.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
c = asyncio_client.AsyncClient()
c._connect_polling = AsyncMock(return_value='foo')
assert (
_run(c.connect('http://foo', transports=['polling', 'websocket']))
== 'foo'
)
c._connect_polling.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
def test_connect_websocket(self):
c = asyncio_client.AsyncClient()
c._connect_websocket = AsyncMock(return_value='foo')
assert _run(c.connect('http://foo', transports=['websocket'])) == 'foo'
c._connect_websocket.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
c = asyncio_client.AsyncClient()
c._connect_websocket = AsyncMock(return_value='foo')
assert _run(c.connect('http://foo', transports='websocket')) == 'foo'
c._connect_websocket.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
def test_connect_query_string(self):
c = asyncio_client.AsyncClient()
c._connect_polling = AsyncMock(return_value='foo')
assert _run(c.connect('http://foo?bar=baz')) == 'foo'
c._connect_polling.mock.assert_called_once_with(
'http://foo?bar=baz', {}, 'engine.io'
)
def test_connect_custom_headers(self):
c = asyncio_client.AsyncClient()
c._connect_polling = AsyncMock(return_value='foo')
assert _run(c.connect('http://foo', headers={'Foo': 'Bar'})) == 'foo'
c._connect_polling.mock.assert_called_once_with(
'http://foo', {'Foo': 'Bar'}, 'engine.io'
)
def test_wait(self):
c = asyncio_client.AsyncClient()
done = []
async def fake_read_look_task():
done.append(True)
c.read_loop_task = fake_read_look_task()
_run(c.wait())
assert done == [True]
def test_wait_no_task(self):
c = asyncio_client.AsyncClient()
c.read_loop_task = None
_run(c.wait())
def test_send(self):
c = asyncio_client.AsyncClient()
saved_packets = []
async def fake_send_packet(pkt):
saved_packets.append(pkt)
c._send_packet = fake_send_packet
_run(c.send('foo'))
_run(c.send('foo'))
_run(c.send(b'foo'))
assert saved_packets[0].packet_type == packet.MESSAGE
assert saved_packets[0].data == 'foo'
assert not saved_packets[0].binary
assert saved_packets[1].packet_type == packet.MESSAGE
assert saved_packets[1].data == 'foo'
assert not saved_packets[1].binary
assert saved_packets[2].packet_type == packet.MESSAGE
assert saved_packets[2].data == b'foo'
assert saved_packets[2].binary
def test_disconnect_not_connected(self):
c = asyncio_client.AsyncClient()
c.state = 'foo'
c.sid = 'bar'
_run(c.disconnect())
assert c.state == 'disconnected'
assert c.sid is None
def test_disconnect_polling(self):
c = asyncio_client.AsyncClient()
client.connected_clients.append(c)
c.state = 'connected'
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.queue.join = AsyncMock()
c.read_loop_task = AsyncMock()()
c.ws = mock.MagicMock()
c.ws.close = AsyncMock()
c._trigger_event = AsyncMock()
_run(c.disconnect())
c.ws.close.mock.assert_not_called()
assert c not in client.connected_clients
c._trigger_event.mock.assert_called_once_with(
'disconnect', run_async=False
)
def test_disconnect_websocket(self):
c = asyncio_client.AsyncClient()
client.connected_clients.append(c)
c.state = 'connected'
c.current_transport = 'websocket'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.queue.join = AsyncMock()
c.read_loop_task = AsyncMock()()
c.ws = mock.MagicMock()
c.ws.close = AsyncMock()
c._trigger_event = AsyncMock()
_run(c.disconnect())
c.ws.close.mock.assert_called_once_with()
assert c not in client.connected_clients
c._trigger_event.mock.assert_called_once_with(
'disconnect', run_async=False
)
def test_disconnect_polling_abort(self):
c = asyncio_client.AsyncClient()
client.connected_clients.append(c)
c.state = 'connected'
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.queue.join = AsyncMock()
c.read_loop_task = AsyncMock()()
c.ws = mock.MagicMock()
c.ws.close = AsyncMock()
_run(c.disconnect(abort=True))
c.queue.join.mock.assert_not_called()
c.ws.close.mock.assert_not_called()
assert c not in client.connected_clients
def test_disconnect_websocket_abort(self):
c = asyncio_client.AsyncClient()
client.connected_clients.append(c)
c.state = 'connected'
c.current_transport = 'websocket'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.queue.join = AsyncMock()
c.read_loop_task = AsyncMock()()
c.ws = mock.MagicMock()
c.ws.close = AsyncMock()
_run(c.disconnect(abort=True))
c.queue.join.mock.assert_not_called()
c.ws.mock.assert_not_called()
assert c not in client.connected_clients
def test_background_tasks(self):
r = []
async def foo(arg):
r.append(arg)
c = asyncio_client.AsyncClient()
c.start_background_task(foo, 'bar')
pending = asyncio.all_tasks(loop=asyncio.get_event_loop()) \
if hasattr(asyncio, 'all_tasks') else asyncio.Task.all_tasks()
asyncio.get_event_loop().run_until_complete(asyncio.wait(pending))
assert r == ['bar']
def test_sleep(self):
c = asyncio_client.AsyncClient()
_run(c.sleep(0))
def test_create_queue(self):
c = asyncio_client.AsyncClient()
q = c.create_queue()
with pytest.raises(q.Empty):
q.get_nowait()
def test_create_event(self):
c = asyncio_client.AsyncClient()
e = c.create_event()
assert not e.is_set()
e.set()
assert e.is_set()
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_polling_connection_failed(self, _time):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock(return_value=None)
with pytest.raises(exceptions.ConnectionError):
_run(c.connect('http://foo', headers={'Foo': 'Bar'}))
c._send_request.mock.assert_called_once_with(
'GET',
'http://foo/engine.io/?transport=polling&EIO=4&t=123.456',
headers={'Foo': 'Bar'},
timeout=5,
)
def test_polling_connection_404(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 404
c._send_request.mock.return_value.json = AsyncMock(
return_value={'foo': 'bar'}
)
try:
_run(c.connect('http://foo'))
except exceptions.ConnectionError as exc:
assert len(exc.args) == 2
assert (
exc.args[0] == 'Unexpected status code 404 in server response'
)
assert exc.args[1] == {'foo': 'bar'}
def test_polling_connection_404_no_json(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 404
c._send_request.mock.return_value.json = AsyncMock(
side_effect=aiohttp.ContentTypeError('foo', 'bar')
)
try:
_run(c.connect('http://foo'))
except exceptions.ConnectionError as exc:
assert len(exc.args) == 2
assert (
exc.args[0] == 'Unexpected status code 404 in server response'
)
assert exc.args[1] is None
def test_polling_connection_invalid_packet(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(return_value=b'foo')
with pytest.raises(exceptions.ConnectionError):
_run(c.connect('http://foo'))
def test_polling_connection_no_open_packet(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(
packet.CLOSE,
{
'sid': '123',
'upgrades': [],
'pingInterval': 10,
'pingTimeout': 20,
},
)
]
).encode().encode('utf-8')
)
with pytest.raises(exceptions.ConnectionError):
_run(c.connect('http://foo'))
def test_polling_connection_successful(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
)
]
).encode().encode('utf-8')
)
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = AsyncMock()
c.on('connect', on_connect)
_run(c.connect('http://foo'))
c._read_loop_polling.mock.assert_called_once_with()
c._read_loop_websocket.mock.assert_not_called()
c._write_loop.mock.assert_called_once_with()
on_connect.mock.assert_called_once_with()
assert c in client.connected_clients
assert (
c.base_url
== 'http://foo/engine.io/?transport=polling&EIO=4&sid=123'
)
assert c.sid == '123'
assert c.ping_interval == 1
assert c.ping_timeout == 2
assert c.upgrades == []
assert c.transport() == 'polling'
def test_polling_https_noverify_connection_successful(self):
c = asyncio_client.AsyncClient(ssl_verify=False)
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
)
]
).encode().encode('utf-8')
)
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = AsyncMock()
c.on('connect', on_connect)
_run(c.connect('https://foo'))
c._read_loop_polling.mock.assert_called_once_with()
c._read_loop_websocket.mock.assert_not_called()
c._write_loop.mock.assert_called_once_with()
on_connect.mock.assert_called_once_with()
assert c in client.connected_clients
assert (
c.base_url
== 'https://foo/engine.io/?transport=polling&EIO=4&sid=123'
)
assert c.sid == '123'
assert c.ping_interval == 1
assert c.ping_timeout == 2
assert c.upgrades == []
assert c.transport() == 'polling'
def test_polling_connection_with_more_packets(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
),
packet.Packet(packet.NOOP),
]
).encode().encode('utf-8')
)
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
c._receive_packet = AsyncMock()
on_connect = AsyncMock()
c.on('connect', on_connect)
_run(c.connect('http://foo'))
assert c._receive_packet.mock.call_count == 1
assert (
c._receive_packet.mock.call_args_list[0][0][0].packet_type
== packet.NOOP
)
def test_polling_connection_upgraded(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': ['websocket'],
'pingInterval': 1000,
'pingTimeout': 2000,
},
)
]
).encode().encode('utf-8')
)
c._connect_websocket = AsyncMock(return_value=True)
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(c.connect('http://foo'))
c._connect_websocket.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
on_connect.assert_called_once_with()
assert c in client.connected_clients
assert (
c.base_url
== 'http://foo/engine.io/?transport=polling&EIO=4&sid=123'
)
assert c.sid == '123'
assert c.ping_interval == 1
assert c.ping_timeout == 2
assert c.upgrades == ['websocket']
def test_polling_connection_not_upgraded(self):
c = asyncio_client.AsyncClient()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': ['websocket'],
'pingInterval': 1000,
'pingTimeout': 2000,
},
)
]
).encode().encode('utf-8')
)
c._connect_websocket = AsyncMock(return_value=False)
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(c.connect('http://foo'))
c._connect_websocket.mock.assert_called_once_with(
'http://foo', {}, 'engine.io'
)
c._read_loop_polling.mock.assert_called_once_with()
c._read_loop_websocket.mock.assert_not_called()
c._write_loop.mock.assert_called_once_with()
on_connect.assert_called_once_with()
assert c in client.connected_clients
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_connection_failed(self, _time):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock(
side_effect=[aiohttp.client_exceptions.ServerConnectionError()]
)
with pytest.raises(exceptions.ConnectionError):
_run(
c.connect(
'http://foo',
transports=['websocket'],
headers={'Foo': 'Bar'},
)
)
c.http.ws_connect.mock.assert_called_once_with(
'ws://foo/engine.io/?transport=websocket&EIO=4&t=123.456',
headers={'Foo': 'Bar'},
)
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_upgrade_failed(self, _time):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock(
side_effect=[aiohttp.client_exceptions.ServerConnectionError()]
)
c.sid = '123'
assert not _run(c.connect('http://foo', transports=['websocket']))
c.http.ws_connect.mock.assert_called_once_with(
'ws://foo/engine.io/?transport=websocket&EIO=4&sid=123&t=123.456',
headers={},
)
def test_websocket_connection_no_open_packet(self):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.CLOSE
).encode()
with pytest.raises(exceptions.ConnectionError):
_run(c.connect('http://foo', transports=['websocket']))
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_connection_successful(self, _time):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
).encode()
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(c.connect('ws://foo', transports=['websocket']))
c._read_loop_polling.mock.assert_not_called()
c._read_loop_websocket.mock.assert_called_once_with()
c._write_loop.mock.assert_called_once_with()
on_connect.assert_called_once_with()
assert c in client.connected_clients
assert c.base_url == 'ws://foo/engine.io/?transport=websocket&EIO=4'
assert c.sid == '123'
assert c.ping_interval == 1
assert c.ping_timeout == 2
assert c.upgrades == []
assert c.transport() == 'websocket'
assert c.ws == ws
c.http.ws_connect.mock.assert_called_once_with(
'ws://foo/engine.io/?transport=websocket&EIO=4&t=123.456',
headers={},
)
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_https_noverify_connection_successful(self, _time):
c = asyncio_client.AsyncClient(ssl_verify=False)
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
).encode()
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(c.connect('wss://foo', transports=['websocket']))
c._read_loop_polling.mock.assert_not_called()
c._read_loop_websocket.mock.assert_called_once_with()
c._write_loop.mock.assert_called_once_with()
on_connect.assert_called_once_with()
assert c in client.connected_clients
assert c.base_url == 'wss://foo/engine.io/?transport=websocket&EIO=4'
assert c.sid == '123'
assert c.ping_interval == 1
assert c.ping_timeout == 2
assert c.upgrades == []
assert c.transport() == 'websocket'
assert c.ws == ws
_, kwargs = c.http.ws_connect.mock.call_args
assert 'ssl' in kwargs
assert isinstance(kwargs['ssl'], ssl.SSLContext)
assert kwargs['ssl'].verify_mode == ssl.CERT_NONE
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_connection_with_cookies(self, _time):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
).encode()
c.http._cookie_jar = [mock.MagicMock(), mock.MagicMock()]
c.http._cookie_jar[0].key = 'key'
c.http._cookie_jar[0].value = 'value'
c.http._cookie_jar[1].key = 'key2'
c.http._cookie_jar[1].value = 'value2'
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(c.connect('ws://foo', transports=['websocket']))
c.http.ws_connect.mock.assert_called_once_with(
'ws://foo/engine.io/?transport=websocket&EIO=4&t=123.456',
headers={},
)
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_connection_with_cookie_header(self, _time):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
).encode()
c.http._cookie_jar = []
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(
c.connect(
'ws://foo',
headers={'Cookie': 'key=value; key2=value2; key3="value3="'},
transports=['websocket'],
)
)
c.http.ws_connect.mock.assert_called_once_with(
'ws://foo/engine.io/?transport=websocket&EIO=4&t=123.456',
headers={},
)
c.http.cookie_jar.update_cookies.assert_called_once_with(
{'key': 'value', 'key2': 'value2', 'key3': '"value3="'}
)
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_websocket_connection_with_cookies_and_headers(self, _time):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
).encode()
c.http._cookie_jar = [mock.MagicMock(), mock.MagicMock()]
c.http._cookie_jar[0].key = 'key'
c.http._cookie_jar[0].value = 'value'
c.http._cookie_jar[1].key = 'key2'
c.http._cookie_jar[1].value = 'value2'
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
_run(
c.connect(
'ws://foo',
headers={'Foo': 'Bar', 'Cookie': 'key3=value3'},
transports=['websocket'],
)
)
c.http.ws_connect.mock.assert_called_once_with(
'ws://foo/engine.io/?transport=websocket&EIO=4&t=123.456',
headers={'Foo': 'Bar'},
)
c.http.cookie_jar.update_cookies.assert_called_once_with(
{'key3': 'value3'}
)
def test_websocket_upgrade_no_pong(self):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.OPEN,
{
'sid': '123',
'upgrades': [],
'pingInterval': 1000,
'pingTimeout': 2000,
},
).encode()
ws.send_str = AsyncMock()
c.sid = '123'
c.current_transport = 'polling'
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
assert not _run(c.connect('ws://foo', transports=['websocket']))
c._read_loop_polling.mock.assert_not_called()
c._read_loop_websocket.mock.assert_not_called()
c._write_loop.mock.assert_not_called()
on_connect.assert_not_called()
assert c.transport() == 'polling'
ws.send_str.mock.assert_called_once_with('2probe')
def test_websocket_upgrade_successful(self):
c = asyncio_client.AsyncClient()
c.http = mock.MagicMock(closed=False)
c.http.ws_connect = AsyncMock()
ws = c.http.ws_connect.mock.return_value
ws.receive = AsyncMock()
ws.receive.mock.return_value.data = packet.Packet(
packet.PONG, 'probe'
).encode()
ws.send_str = AsyncMock()
c.sid = '123'
c.base_url = 'http://foo'
c.current_transport = 'polling'
c._read_loop_polling = AsyncMock()
c._read_loop_websocket = AsyncMock()
c._write_loop = AsyncMock()
on_connect = mock.MagicMock()
c.on('connect', on_connect)
assert _run(c.connect('ws://foo', transports=['websocket']))
c._read_loop_polling.mock.assert_not_called()
c._read_loop_websocket.mock.assert_called_once_with()
c._write_loop.mock.assert_called_once_with()
on_connect.assert_not_called() # was called by polling
assert c not in client.connected_clients # was added by polling
assert c.base_url == 'http://foo' # not changed
assert c.sid == '123' # not changed
assert c.transport() == 'websocket'
assert c.ws == ws
assert ws.send_str.mock.call_args_list[0] == (('2probe',),) # ping
assert ws.send_str.mock.call_args_list[1] == (('5',),) # upgrade
def test_receive_unknown_packet(self):
c = asyncio_client.AsyncClient()
_run(c._receive_packet(packet.Packet(encoded_packet='9')))
# should be ignored
def test_receive_noop_packet(self):
c = asyncio_client.AsyncClient()
_run(c._receive_packet(packet.Packet(packet.NOOP)))
# should be ignored
def test_receive_ping_packet(self):
c = asyncio_client.AsyncClient()
c._send_packet = AsyncMock()
_run(c._receive_packet(packet.Packet(packet.PING)))
assert c._send_packet.mock.call_args_list[0][0][0].encode() == '3'
def test_receive_message_packet(self):
c = asyncio_client.AsyncClient()
c._trigger_event = AsyncMock()
_run(c._receive_packet(packet.Packet(packet.MESSAGE, {'foo': 'bar'})))
c._trigger_event.mock.assert_called_once_with(
'message', {'foo': 'bar'}, run_async=True
)
def test_receive_close_packet(self):
c = asyncio_client.AsyncClient()
c.disconnect = AsyncMock()
_run(c._receive_packet(packet.Packet(packet.CLOSE)))
c.disconnect.mock.assert_called_once_with(abort=True)
def test_send_packet_disconnected(self):
c = asyncio_client.AsyncClient()
c.queue = c.create_queue()
c.state = 'disconnected'
_run(c._send_packet(packet.Packet(packet.NOOP)))
assert c.queue.empty()
def test_send_packet(self):
c = asyncio_client.AsyncClient()
c.queue = c.create_queue()
c.state = 'connected'
_run(c._send_packet(packet.Packet(packet.NOOP)))
assert not c.queue.empty()
pkt = _run(c.queue.get())
assert pkt.packet_type == packet.NOOP
def test_trigger_event_function(self):
result = []
def foo_handler(arg):
result.append('ok')
result.append(arg)
c = asyncio_client.AsyncClient()
c.on('message', handler=foo_handler)
_run(c._trigger_event('message', 'bar'))
assert result == ['ok', 'bar']
def test_trigger_event_coroutine(self):
result = []
async def foo_handler(arg):
result.append('ok')
result.append(arg)
c = asyncio_client.AsyncClient()
c.on('message', handler=foo_handler)
_run(c._trigger_event('message', 'bar'))
assert result == ['ok', 'bar']
def test_trigger_event_function_error(self):
def connect_handler(arg):
return 1 / 0
def foo_handler(arg):
return 1 / 0
c = asyncio_client.AsyncClient()
c.on('connect', handler=connect_handler)
c.on('message', handler=foo_handler)
assert not _run(c._trigger_event('connect', '123'))
assert _run(c._trigger_event('message', 'bar')) is None
def test_trigger_event_coroutine_error(self):
async def connect_handler(arg):
return 1 / 0
async def foo_handler(arg):
return 1 / 0
c = asyncio_client.AsyncClient()
c.on('connect', handler=connect_handler)
c.on('message', handler=foo_handler)
assert not _run(c._trigger_event('connect', '123'))
assert _run(c._trigger_event('message', 'bar')) is None
def test_trigger_event_function_async(self):
result = []
def foo_handler(arg):
result.append('ok')
result.append(arg)
c = asyncio_client.AsyncClient()
c.on('message', handler=foo_handler)
fut = _run(c._trigger_event('message', 'bar', run_async=True))
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['ok', 'bar']
def test_trigger_event_coroutine_async(self):
result = []
async def foo_handler(arg):
result.append('ok')
result.append(arg)
c = asyncio_client.AsyncClient()
c.on('message', handler=foo_handler)
fut = _run(c._trigger_event('message', 'bar', run_async=True))
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['ok', 'bar']
def test_trigger_event_function_async_error(self):
result = []
def foo_handler(arg):
result.append(arg)
return 1 / 0
c = asyncio_client.AsyncClient()
c.on('message', handler=foo_handler)
fut = _run(c._trigger_event('message', 'bar', run_async=True))
with pytest.raises(ZeroDivisionError):
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['bar']
def test_trigger_event_coroutine_async_error(self):
result = []
async def foo_handler(arg):
result.append(arg)
return 1 / 0
c = asyncio_client.AsyncClient()
c.on('message', handler=foo_handler)
fut = _run(c._trigger_event('message', 'bar', run_async=True))
with pytest.raises(ZeroDivisionError):
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['bar']
def test_trigger_unknown_event(self):
c = asyncio_client.AsyncClient()
_run(c._trigger_event('connect', run_async=False))
_run(c._trigger_event('message', 123, run_async=True))
# should do nothing
def test_read_loop_polling_disconnected(self):
c = asyncio_client.AsyncClient()
c.state = 'disconnected'
c._trigger_event = AsyncMock()
c.write_loop_task = AsyncMock()()
_run(c._read_loop_polling())
c._trigger_event.mock.assert_not_called()
# should not block
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_read_loop_polling_no_response(self, _time):
c = asyncio_client.AsyncClient()
c.ping_interval = 25
c.ping_timeout = 5
c.state = 'connected'
c.base_url = 'http://foo'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c._send_request = AsyncMock(return_value=None)
c._trigger_event = AsyncMock()
c.write_loop_task = AsyncMock()()
_run(c._read_loop_polling())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
c._send_request.mock.assert_called_once_with(
'GET', 'http://foo&t=123.456', timeout=30
)
c._trigger_event.mock.assert_called_once_with(
'disconnect', run_async=False
)
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_read_loop_polling_bad_status(self, _time):
c = asyncio_client.AsyncClient()
c.ping_interval = 25
c.ping_timeout = 5
c.state = 'connected'
c.base_url = 'http://foo'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 400
c.write_loop_task = AsyncMock()()
_run(c._read_loop_polling())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
c._send_request.mock.assert_called_once_with(
'GET', 'http://foo&t=123.456', timeout=30
)
@mock.patch('engineio.client.time.time', return_value=123.456)
def test_read_loop_polling_bad_packet(self, _time):
c = asyncio_client.AsyncClient()
c.ping_interval = 25
c.ping_timeout = 60
c.state = 'connected'
c.base_url = 'http://foo'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
c._send_request.mock.return_value.read = AsyncMock(return_value=b'foo')
c.write_loop_task = AsyncMock()()
_run(c._read_loop_polling())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
c._send_request.mock.assert_called_once_with(
'GET', 'http://foo&t=123.456', timeout=65
)
def test_read_loop_polling(self):
c = asyncio_client.AsyncClient()
c.ping_interval = 25
c.ping_timeout = 5
c.state = 'connected'
c.base_url = 'http://foo'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c._send_request = AsyncMock()
c._send_request.mock.side_effect = [
mock.MagicMock(
status=200,
read=AsyncMock(
return_value=payload.Payload(
packets=[
packet.Packet(packet.PING),
packet.Packet(packet.NOOP),
]
).encode().encode('utf-8')
),
),
None,
]
c.write_loop_task = AsyncMock()()
c._receive_packet = AsyncMock()
_run(c._read_loop_polling())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
assert c._send_request.mock.call_count == 2
assert c._receive_packet.mock.call_count == 2
assert c._receive_packet.mock.call_args_list[0][0][0].encode() == '2'
assert c._receive_packet.mock.call_args_list[1][0][0].encode() == '6'
def test_read_loop_websocket_disconnected(self):
c = asyncio_client.AsyncClient()
c.state = 'disconnected'
c.write_loop_task = AsyncMock()()
_run(c._read_loop_websocket())
# should not block
def test_read_loop_websocket_timeout(self):
c = asyncio_client.AsyncClient()
c.ping_interval = 1
c.ping_timeout = 2
c.base_url = 'ws://foo'
c.state = 'connected'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.ws = mock.MagicMock()
c.ws.receive = AsyncMock(side_effect=asyncio.TimeoutError())
c.write_loop_task = AsyncMock()()
_run(c._read_loop_websocket())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
def test_read_loop_websocket_no_response(self):
c = asyncio_client.AsyncClient()
c.ping_interval = 1
c.ping_timeout = 2
c.base_url = 'ws://foo'
c.state = 'connected'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.ws = mock.MagicMock()
c.ws.receive = AsyncMock(
side_effect=aiohttp.client_exceptions.ServerDisconnectedError()
)
c.write_loop_task = AsyncMock()()
_run(c._read_loop_websocket())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
def test_read_loop_websocket_unexpected_error(self):
c = asyncio_client.AsyncClient()
c.ping_interval = 1
c.ping_timeout = 2
c.base_url = 'ws://foo'
c.state = 'connected'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.ws = mock.MagicMock()
c.ws.receive = AsyncMock(side_effect=ValueError)
c.write_loop_task = AsyncMock()()
_run(c._read_loop_websocket())
assert c.state == 'disconnected'
c.queue.put.mock.assert_called_once_with(None)
def test_read_loop_websocket(self):
c = asyncio_client.AsyncClient()
c.ping_interval = 1
c.ping_timeout = 2
c.base_url = 'ws://foo'
c.state = 'connected'
c.queue = mock.MagicMock()
c.queue.put = AsyncMock()
c.ws = mock.MagicMock()
c.ws.receive = AsyncMock(
side_effect=[
mock.MagicMock(data=packet.Packet(packet.PING).encode()),
ValueError,
]
)
c.write_loop_task = AsyncMock()()
c._receive_packet = AsyncMock()
_run(c._read_loop_websocket())
assert c.state == 'disconnected'
assert c._receive_packet.mock.call_args_list[0][0][0].encode() == '2'
c.queue.put.mock.assert_called_once_with(None)
def test_write_loop_disconnected(self):
c = asyncio_client.AsyncClient()
c.state = 'disconnected'
_run(c._write_loop())
# should not block
def test_write_loop_no_packets(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.queue = mock.MagicMock()
c.queue.get = AsyncMock(return_value=None)
_run(c._write_loop())
c.queue.task_done.assert_called_once_with()
c.queue.get.mock.assert_called_once_with()
def test_write_loop_empty_queue(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(side_effect=RuntimeError)
_run(c._write_loop())
c.queue.get.mock.assert_called_once_with()
def test_write_loop_polling_one_packet(self):
c = asyncio_client.AsyncClient()
c.base_url = 'http://foo'
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
RuntimeError,
]
)
c.queue.get_nowait = mock.MagicMock(side_effect=RuntimeError)
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
_run(c._write_loop())
assert c.queue.task_done.call_count == 1
p = payload.Payload(
packets=[packet.Packet(packet.MESSAGE, {'foo': 'bar'})]
)
c._send_request.mock.assert_called_once_with(
'POST',
'http://foo',
body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=5,
)
def test_write_loop_polling_three_packets(self):
c = asyncio_client.AsyncClient()
c.base_url = 'http://foo'
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
RuntimeError,
]
)
c.queue.get_nowait = mock.MagicMock(
side_effect=[
packet.Packet(packet.PING),
packet.Packet(packet.NOOP),
RuntimeError,
]
)
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
_run(c._write_loop())
assert c.queue.task_done.call_count == 3
p = payload.Payload(
packets=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
packet.Packet(packet.PING),
packet.Packet(packet.NOOP),
]
)
c._send_request.mock.assert_called_once_with(
'POST',
'http://foo',
body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=5,
)
def test_write_loop_polling_two_packets_done(self):
c = asyncio_client.AsyncClient()
c.base_url = 'http://foo'
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
RuntimeError,
]
)
c.queue.get_nowait = mock.MagicMock(
side_effect=[packet.Packet(packet.PING), None]
)
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 200
_run(c._write_loop())
assert c.queue.task_done.call_count == 3
p = payload.Payload(
packets=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
packet.Packet(packet.PING),
]
)
c._send_request.mock.assert_called_once_with(
'POST',
'http://foo',
body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=5,
)
assert c.state == 'connected'
def test_write_loop_polling_bad_connection(self):
c = asyncio_client.AsyncClient()
c.base_url = 'http://foo'
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[packet.Packet(packet.MESSAGE, {'foo': 'bar'})]
)
c.queue.get_nowait = mock.MagicMock(side_effect=[RuntimeError])
c._send_request = AsyncMock(return_value=None)
_run(c._write_loop())
assert c.queue.task_done.call_count == 1
p = payload.Payload(
packets=[packet.Packet(packet.MESSAGE, {'foo': 'bar'})]
)
c._send_request.mock.assert_called_once_with(
'POST',
'http://foo',
body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=5,
)
assert c.state == 'connected'
def test_write_loop_polling_bad_status(self):
c = asyncio_client.AsyncClient()
c.base_url = 'http://foo'
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'polling'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[packet.Packet(packet.MESSAGE, {'foo': 'bar'})]
)
c.queue.get_nowait = mock.MagicMock(side_effect=[RuntimeError])
c._send_request = AsyncMock()
c._send_request.mock.return_value.status = 500
_run(c._write_loop())
assert c.queue.task_done.call_count == 1
p = payload.Payload(
packets=[packet.Packet(packet.MESSAGE, {'foo': 'bar'})]
)
c._send_request.mock.assert_called_once_with(
'POST',
'http://foo',
body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=5,
)
assert c.state == 'disconnected'
def test_write_loop_websocket_one_packet(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'websocket'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
RuntimeError,
]
)
c.queue.get_nowait = mock.MagicMock(side_effect=[RuntimeError])
c.ws = mock.MagicMock()
c.ws.send_str = AsyncMock()
_run(c._write_loop())
assert c.queue.task_done.call_count == 1
assert c.ws.send_str.mock.call_count == 1
c.ws.send_str.mock.assert_called_once_with('4{"foo":"bar"}')
def test_write_loop_websocket_three_packets(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'websocket'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
RuntimeError,
]
)
c.queue.get_nowait = mock.MagicMock(
side_effect=[
packet.Packet(packet.PING),
packet.Packet(packet.NOOP),
RuntimeError,
]
)
c.ws = mock.MagicMock()
c.ws.send_str = AsyncMock()
_run(c._write_loop())
assert c.queue.task_done.call_count == 3
assert c.ws.send_str.mock.call_count == 3
assert c.ws.send_str.mock.call_args_list[0][0][0] == '4{"foo":"bar"}'
assert c.ws.send_str.mock.call_args_list[1][0][0] == '2'
assert c.ws.send_str.mock.call_args_list[2][0][0] == '6'
def test_write_loop_websocket_one_packet_binary(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'websocket'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[packet.Packet(packet.MESSAGE, b'foo'), RuntimeError]
)
c.queue.get_nowait = mock.MagicMock(side_effect=[RuntimeError])
c.ws = mock.MagicMock()
c.ws.send_bytes = AsyncMock()
_run(c._write_loop())
assert c.queue.task_done.call_count == 1
assert c.ws.send_bytes.mock.call_count == 1
c.ws.send_bytes.mock.assert_called_once_with(b'foo')
def test_write_loop_websocket_bad_connection(self):
c = asyncio_client.AsyncClient()
c.state = 'connected'
c.ping_interval = 1
c.ping_timeout = 2
c.current_transport = 'websocket'
c.queue = mock.MagicMock()
c.queue.Empty = RuntimeError
c.queue.get = AsyncMock(
side_effect=[
packet.Packet(packet.MESSAGE, {'foo': 'bar'}),
RuntimeError,
]
)
c.queue.get_nowait = mock.MagicMock(side_effect=[RuntimeError])
c.ws = mock.MagicMock()
c.ws.send_str = AsyncMock(
side_effect=aiohttp.client_exceptions.ServerDisconnectedError()
)
_run(c._write_loop())
assert c.state == 'connected'
@mock.patch('engineio.client.original_signal_handler')
def test_signal_handler(self, original_handler):
clients = [mock.MagicMock(), mock.MagicMock()]
client.connected_clients = clients[:]
client.connected_clients[0].is_asyncio_based.return_value = False
client.connected_clients[1].is_asyncio_based.return_value = True
async def test():
asyncio_client.async_signal_handler()
asyncio.get_event_loop().run_until_complete(test())
clients[0].disconnect.assert_not_called()
clients[1].disconnect.assert_called_once_with()
```
|
{
"source": "jdrbc/comic-scraper",
"score": 3
}
|
#### File: jdrbc/comic-scraper/comicscraper.py
```python
import requests, os, re, logging, shelve, threading, math, sys
from bs4 import BeautifulSoup
from os import path
from dataclasses import dataclass
from abc import ABC
from itertools import zip_longest
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
class ComicScraper(ABC):
"""
Extensible comic scraper that downloads images into folder with web comic's name
with format [six zero padded page number]__[page title].[image extension]
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
self.headers = {'User-Agent': 'Mozilla/5.0'}
self.num_threads = 20
self.db = shelve.open('comic_scraper')
def enable_logging(self):
""" enables debug level logging to file and console """
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
os.makedirs('log', exist_ok=True)
fh = logging.FileHandler(f'log/{self.get_base_url()}.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_base_url(self):
""" e.g. xkcd.com """
pass
def get_next_comic_url(self, page_soup, page_number):
""" return the url of the page following the given page """
pass
def get_image_url(self, page_soup, page_number):
""" return the image url on the given page """
pass
def get_page_name(self, page_soup, page_number):
""" return the string that you want to appear in the image file name for this page """
pass
def guess_urls_from_page_number(self, page_number):
""" return list of possible urls for the given page number, each will be tried e.g. ['xkcd.com/2249'] """
pass
def is_last_page(self, page):
""" return true if given ComicPage is the last page -- used to figure out when to stop when no stop page is given """
return page.next_page_url is None
def clear_db(self):
"""
clears the shelved comic pages
"""
if self.db.get(self.get_base_url()) is not None:
self.db[self.get_base_url()] = Comic()
def scrape(self, start=1, stop=None, comic=None):
"""
downloads images from the given page range (ints) into folder same as base url
Parameters
start (int): starting page number
stop (int): download this page and stop
comic (Comic): comic prepopulated with page info
"""
self.logger.info('\n\n============ START SCRAPE ============\n')
if stop is None:
self.logger.info(f'starting scrape of ALL pages')
else:
self.logger.info(f'starting scrape of {stop - start + 1}')
if comic is None:
self.logger.debug('attempting to recover comic info from shelve')
comic = self.db.get(self.get_base_url())
if comic is None:
self.logger.debug('creating new comic')
comic = Comic()
# scrape image urls and comic urls
current_page_number = start - 1
while stop is None or current_page_number < stop:
current_page_number += 1
page = comic.getpage(current_page_number)
if page is None or page.image_url is None:
urls = [self._get_page_url(current_page_number, comic)]
if not isinstance(urls, list):
urls = [urls]
if urls[0] is None:
urls = self.guess_urls_from_page_number(current_page_number)
self.logger.debug(f'guessing urls {urls}')
curr_url = None
for url in urls:
curr_url = url
soup = self._download_page(url)
if soup is None:
self.logger.warning(f'could not generate soup for page {current_page_number}')
break
page = ComicPage(current_page_number, curr_url)
page.next_page_url = self.get_next_comic_url(soup, current_page_number)
page.image_url = self.get_image_url(soup, current_page_number)
page.name = self.get_page_name(soup, current_page_number)
comic.addpage(page)
if page.next_page_url is None:
self.logger.warning(f'could not find next page url in page {current_page_number}')
if stop is None and self.is_last_page(page) :
self.logger.debug(f'found last page {current_page_number}')
break
self.logger.debug('comic page download complete - downloading images')
# download images up to last successfully downloaded comic
os.makedirs(self.get_base_url(), exist_ok=True)
self._start_download_image_threads(range(start, current_page_number + 1), comic)
self.logger.debug('download complete - updating shelve')
self.db[self.get_base_url()] = comic
def _get_image_extension(self, url):
if url is None:
return None
match = re.search('(\.(gif|jpg|jpeg|tiff|png))$', url, re.IGNORECASE)
if match is None:
return None
return match.group(1)
def _get_image_filepath(self, page):
self.logger.debug(f'image url is {page.image_url}')
extension = self._get_image_extension(page.image_url)
self.logger.debug(f'page number {page.number}')
self.logger.debug(f'page name {page.name}')
self.logger.debug(f'ext {extension}')
if extension is None:
return None
return self.get_base_url() + path.sep + str(page.number).zfill(6) + '__' + page.name + extension
def _download_page(self, url):
""" download comic at given page and return resulting soup - return None on error """
self.logger.debug(f'downloading comic at {url}')
if re.search('^https?:\/\/', url) is None:
url = f'http://{url}'
# download page
try:
resp = requests.get(url, headers=self.headers)
if not resp.ok:
self.logger.warning(f'invalid response {resp.status_code}')
self.logger.debug(f'body: {resp.content}')
return None
# return the soup
self.logger.debug(f'comic successfully downloaded')
return BeautifulSoup(resp.content, 'lxml')
except:
self.logger.warn(f'error downloading {url} {sys.exc_info()[0]}')
return None
def _download_image(self, page_number, comic):
self.logger.debug(f'downloading image for page {page_number}')
if page_number is None:
return
page = comic.getpage(page_number)
if page is None:
self.logger.warning(f'could not find page {page_number} information')
return
image_path = self._get_image_filepath(page)
if image_path is None:
self.logger.warning(f'could not discover file path for page {page_number}')
elif path.exists(image_path):
self.logger.debug(f'page {page_number} image already downloaded')
elif page.image_url is None:
self.logger.warning(f'page {page_number} has no image url yet')
else:
self.logger.debug(f'downloading {page.image_url}')
if re.search('^https?:\/\/', page.image_url) is None:
if re.search('^\/\/', page.image_url) is None:
page.image_url = f'http://:{page.image_url}'
else:
page.image_url = f'http:{page.image_url}'
resp = requests.get(page.image_url, headers=self.headers)
if resp.ok:
with open(image_path, 'wb') as f:
self.logger.debug(f'saving to {image_path}')
f.write(resp.content)
else:
logger.warning(f'invalid response {resp.status_code}')
def _download_images(self, page_numbers, comic):
self.logger.debug(f'downloading {len(page_numbers)} images')
for page_number in page_numbers:
self._download_image(page_number, comic)
def _start_download_image_threads(self, all_page_numbers, comic):
# todo - right number of threads?
self.logger.info(f'starting threads to download {len(all_page_numbers)} pages')
threads = []
max_elements_per_chunk = math.ceil(len(all_page_numbers) / self.num_threads)
self.logger.info(f'max elements per threads {max_elements_per_chunk} pages')
chunks = grouper(max_elements_per_chunk, all_page_numbers)
for chunk in chunks:
thread = threading.Thread(target=self._download_images, args=(chunk, comic))
threads.append(thread)
self.logger.info(f'starting {len(threads)} threads')
for thread in threads:
thread.start()
for index, thread in enumerate(threads):
self.logger.info("joining thread %d." % index)
thread.join()
logging.info("Main : thread %d done" % index)
def _get_page_url(self, page_number, comic):
"""
checks target page and previous page for url of given page
"""
page = comic.getpage(page_number)
if page is not None and page.url is not None:
return page.url
prev_page = comic.getpage(page_number - 1)
if prev_page is not None and prev_page.next_page_url is not None:
return prev_page.next_page_url
return None
class Comic:
def __init__(self):
self.pages = {}
def addpage(self, page):
self.pages[str(page.number)] = page
def getpage(self, page_number):
return self.pages.get(str(page_number))
@dataclass
class ComicPage:
number: int
url: str
name: str = 'unknown'
next_page_url: str = None
image_url: str = None
```
|
{
"source": "jdreamerz/bhyve-home-assistant",
"score": 2
}
|
#### File: custom_components/bhyve/sensor.py
```python
import logging
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
TEMP_FAHRENHEIT,
ENTITY_CATEGORY_DIAGNOSTIC,
)
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.components.sensor import (
DEVICE_CLASS_TEMPERATURE,
)
from . import BHyveDeviceEntity
from .const import (
DATA_BHYVE,
DEVICE_SPRINKLER,
DEVICE_FLOOD,
EVENT_CHANGE_MODE,
EVENT_FS_ALARM,
EVENT_DEVICE_IDLE,
)
from .pybhyve.errors import BHyveError
from .util import orbit_time_to_local_time
_LOGGER = logging.getLogger(__name__)
ATTR_BUDGET = "budget"
ATTR_CONSUMPTION_GALLONS = "consumption_gallons"
ATTR_CONSUMPTION_LITRES = "consumption_litres"
ATTR_IRRIGATION = "irrigation"
ATTR_PROGRAM = "program"
ATTR_PROGRAM_NAME = "program_name"
ATTR_RUN_TIME = "run_time"
ATTR_START_TIME = "start_time"
ATTR_STATUS = "status"
async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):
"""Set up BHyve sensors based on a config entry."""
bhyve = hass.data[DATA_BHYVE]
sensors = []
devices = await bhyve.devices
for device in devices:
if device.get("type") == DEVICE_SPRINKLER:
sensors.append(BHyveStateSensor(hass, bhyve, device))
for zone in device.get("zones"):
sensors.append(BHyveZoneHistorySensor(hass, bhyve, device, zone))
if device.get("battery", None) is not None:
sensors.append(BHyveBatterySensor(hass, bhyve, device))
if device.get("type") == DEVICE_FLOOD:
sensors.append(BHyveTemperatureSensor(hass, bhyve, device))
sensors.append(BHyveBatterySensor(hass, bhyve, device))
async_add_entities(sensors, True)
class BHyveBatterySensor(BHyveDeviceEntity):
"""Define a BHyve sensor."""
def __init__(self, hass, bhyve, device):
"""Initialize the sensor."""
name = "{} battery level".format(device.get("name"))
_LOGGER.info("Creating battery sensor: %s", name)
super().__init__(
hass,
bhyve,
device,
name,
"battery",
DEVICE_CLASS_BATTERY,
)
self._unit = "%"
def _setup(self, device):
self._state = None
self._attrs = {}
self._available = device.get("is_connected", False)
battery = device.get("battery")
if battery is not None:
self._state = battery["percent"]
self._attrs[ATTR_BATTERY_LEVEL] = battery["percent"]
@property
def state(self):
"""Return the state of the entity"""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement for the sensor."""
return self._unit
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._device_class == DEVICE_CLASS_BATTERY and self._state is not None:
return icon_for_battery_level(
battery_level=int(self._state), charging=False
)
return self._icon
@property
def should_poll(self):
"""Enable polling."""
return True
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}:{self._device_id}:battery"
@property
def entity_category(self):
"""Battery is a diagnostic category"""
return ENTITY_CATEGORY_DIAGNOSTIC
def _should_handle_event(self, event_name, data):
return event_name in [EVENT_CHANGE_MODE]
async def async_update(self):
"""Retrieve latest state."""
self._ws_unprocessed_events[:] = [] # We don't care about these
await self._refetch_device()
class BHyveZoneHistorySensor(BHyveDeviceEntity):
"""Define a BHyve sensor."""
def __init__(self, hass, bhyve, device, zone):
"""Initialize the sensor."""
self._history = None
self._zone = zone
self._zone_id = zone.get("station")
name = "{0} zone history".format(zone.get("name", "Unknown"))
_LOGGER.info("Creating history sensor: %s", name)
super().__init__(
hass,
bhyve,
device,
name,
"history",
)
def _setup(self, device):
self._state = None
self._attrs = {}
self._available = device.get("is_connected", False)
@property
def state(self):
"""Return the state of the entity"""
return self._state
@property
def should_poll(self):
"""Enable polling."""
return True
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}:{self._device_id}:{self._zone_id}:history"
@property
def entity_category(self):
"""History is a diagnostic category"""
return ENTITY_CATEGORY_DIAGNOSTIC
def _should_handle_event(self, event_name, data):
return event_name in [EVENT_DEVICE_IDLE]
async def async_update(self):
"""Retrieve latest state."""
force_update = True if list(self._ws_unprocessed_events) else False
self._ws_unprocessed_events[:] = [] # We don't care about these
try:
history = await self._fetch_device_history(force_update=force_update) or []
self._history = history
for history_item in history:
zone_irrigation = list(
filter(
lambda i: i.get("station") == self._zone_id,
history_item.get(ATTR_IRRIGATION, []),
)
)
if zone_irrigation:
# This is a bit crude - assumes the list is ordered by time.
latest_irrigation = zone_irrigation[-1]
gallons = latest_irrigation.get("water_volume_gal")
litres = round(gallons * 3.785, 2) if gallons else None
self._state = orbit_time_to_local_time(
latest_irrigation.get("start_time")
)
self._attrs = {
ATTR_BUDGET: latest_irrigation.get(ATTR_BUDGET),
ATTR_PROGRAM: latest_irrigation.get(ATTR_PROGRAM),
ATTR_PROGRAM_NAME: latest_irrigation.get(ATTR_PROGRAM_NAME),
ATTR_RUN_TIME: latest_irrigation.get(ATTR_RUN_TIME),
ATTR_STATUS: latest_irrigation.get(ATTR_STATUS),
ATTR_CONSUMPTION_GALLONS: gallons,
ATTR_CONSUMPTION_LITRES: litres,
ATTR_START_TIME: latest_irrigation.get(ATTR_START_TIME),
}
break
except BHyveError as err:
_LOGGER.warning(f"Unable to retreive data for {self._name}: {err}")
class BHyveStateSensor(BHyveDeviceEntity):
"""Define a BHyve sensor."""
def __init__(self, hass, bhyve, device):
"""Initialize the sensor."""
name = "{0} state".format(device.get("name"))
_LOGGER.info("Creating state sensor: %s", name)
super().__init__(hass, bhyve, device, name, "information")
def _setup(self, device):
self._attrs = {}
self._state = device.get("status", {}).get("run_mode")
self._available = device.get("is_connected", False)
_LOGGER.debug(
f"State sensor {self._name} setup: State: {self._state} | Available: {self._available}"
)
@property
def state(self):
"""Return the state of the entity"""
return self._state
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}:{self._device_id}:state"
@property
def entity_category(self):
"""Run state is a diagnostic category"""
return ENTITY_CATEGORY_DIAGNOSTIC
def _on_ws_data(self, data):
"""
{'event': 'change_mode', 'mode': 'auto', 'device_id': 'id', 'timestamp': '2020-01-09T20:30:00.000Z'}
"""
event = data.get("event")
if event == EVENT_CHANGE_MODE:
self._state = data.get("mode")
def _should_handle_event(self, event_name, data):
return event_name in [EVENT_CHANGE_MODE]
class BHyveTemperatureSensor(BHyveDeviceEntity):
"""Define a BHyve sensor."""
def __init__(self, hass, bhyve, device):
"""Initialize the sensor."""
name = "{0} temperature sensor".format(device.get("name"))
_LOGGER.info("Creating temperature sensor: %s", name)
super().__init__(
hass, bhyve, device, name, "thermometer", DEVICE_CLASS_TEMPERATURE
)
def _setup(self, device):
self._state = device.get("status", {}).get("temp_f")
self._available = device.get("is_connected", False)
self._unit = "°F"
self._attrs = {
"location": device.get("location_name"),
"rssi": device.get("status", {}).get("rssi"),
"temperature_alarm": device.get("status", {}).get("temp_alarm_status"),
}
_LOGGER.debug(
f"Temperature sensor {self._name} setup: State: {self._state} | Available: {self._available}"
)
@property
def state(self):
"""Return the state of the entity"""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement for the sensor."""
return self._unit
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}:{self._device_id}:temp"
def _on_ws_data(self, data):
"""
{"last_flood_alarm_at":"2021-08-29T16:32:35.585Z","rssi":-60,"onboard_complete":true,"temp_f":75.2,"provisioned":true,"phy":"le_1m_1000","event":"fs_status_update","temp_alarm_status":"ok","status_updated_at":"2021-08-29T16:33:17.089Z","identify_enabled":false,"device_id":"612ad9134f0c6c9c9faddbba","timestamp":"2021-08-29T16:33:17.089Z","flood_alarm_status":"ok","last_temp_alarm_at":null}
"""
_LOGGER.info("Received program data update {}".format(data))
event = data.get("event")
if event == EVENT_FS_ALARM:
self._state = data.get("temp_f")
self._attrs["rssi"] = data.get("rssi")
self._attrs["temperature_alarm"] = data.get("temp_alarm_status")
def _should_handle_event(self, event_name, data):
return event_name in [EVENT_FS_ALARM]
```
|
{
"source": "jdreichmann/synapse",
"score": 2
}
|
#### File: tests/server_notices/test_consent.py
```python
import os
import synapse.rest.admin
from synapse.rest.client.v1 import login, room
from synapse.rest.client.v2_alpha import sync
from tests import unittest
class ConsentNoticesTests(unittest.HomeserverTestCase):
servlets = [
sync.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
self.consent_notice_message = "consent %(consent_uri)s"
config = self.default_config()
config["user_consent"] = {
"version": "1",
"template_dir": tmpdir,
"server_notice_content": {
"msgtype": "m.text",
"body": self.consent_notice_message,
},
}
config["public_baseurl"] = "https://example.com/"
config["form_secret"] = "123abc"
config["server_notices"] = {
"system_mxid_localpart": "notices",
"system_mxid_display_name": "test display name",
"system_mxid_avatar_url": None,
"room_name": "Server Notices",
}
hs = self.setup_test_homeserver(config=config)
return hs
def prepare(self, reactor, clock, hs):
self.user_id = self.register_user("bob", "<PASSWORD>")
self.access_token = self.login("bob", "<PASSWORD>")
def test_get_sync_message(self):
"""
When user consent server notices are enabled, a sync will cause a notice
to fire (in a room which the user is invited to). The notice contains
the notice URL + an authentication code.
"""
# Initial sync, to get the user consent room invite
request, channel = self.make_request(
"GET", "/_matrix/client/r0/sync", access_token=self.access_token
)
self.assertEqual(channel.code, 200)
# Get the Room ID to join
room_id = list(channel.json_body["rooms"]["invite"].keys())[0]
# Join the room
request, channel = self.make_request(
"POST",
"/_matrix/client/r0/rooms/" + room_id + "/join",
access_token=self.access_token,
)
self.assertEqual(channel.code, 200)
# Sync again, to get the message in the room
request, channel = self.make_request(
"GET", "/_matrix/client/r0/sync", access_token=self.access_token
)
self.assertEqual(channel.code, 200)
# Get the message
room = channel.json_body["rooms"]["join"][room_id]
messages = [
x for x in room["timeline"]["events"] if x["type"] == "m.room.message"
]
# One message, with the consent URL
self.assertEqual(len(messages), 1)
self.assertTrue(
messages[0]["content"]["body"].startswith(
"consent https://example.com/_matrix/consent"
)
)
```
#### File: tests/storage/test_redaction.py
```python
from mock import Mock
from canonicaljson import json
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
from synapse.types import RoomID, UserID
from tests import unittest
from tests.utils import create_room
class RedactionTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
config = self.default_config()
config["redaction_retention_period"] = "30d"
return self.setup_test_homeserver(
resource_for_federation=Mock(), http_client=None, config=config
)
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.storage = hs.get_storage()
self.event_builder_factory = hs.get_event_builder_factory()
self.event_creation_handler = hs.get_event_creation_handler()
self.u_alice = UserID.from_string("@alice:test")
self.u_bob = UserID.from_string("@bob:test")
self.room1 = RoomID.from_string("!abc123:test")
self.get_success(
create_room(hs, self.room1.to_string(), self.u_alice.to_string())
)
self.depth = 1
def inject_room_member(
self, room, user, membership, replaces_state=None, extra_content={}
):
content = {"membership": membership}
content.update(extra_content)
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Member,
"sender": user.to_string(),
"state_key": user.to_string(),
"room_id": room.to_string(),
"content": content,
},
)
event, context = self.get_success(
self.event_creation_handler.create_new_client_event(builder)
)
self.get_success(self.storage.persistence.persist_event(event, context))
return event
def inject_message(self, room, user, body):
self.depth += 1
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Message,
"sender": user.to_string(),
"state_key": user.to_string(),
"room_id": room.to_string(),
"content": {"body": body, "msgtype": "message"},
},
)
event, context = self.get_success(
self.event_creation_handler.create_new_client_event(builder)
)
self.get_success(self.storage.persistence.persist_event(event, context))
return event
def inject_redaction(self, room, event_id, user, reason):
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Redaction,
"sender": user.to_string(),
"state_key": user.to_string(),
"room_id": room.to_string(),
"content": {"reason": reason},
"redacts": event_id,
},
)
event, context = self.get_success(
self.event_creation_handler.create_new_client_event(builder)
)
self.get_success(self.storage.persistence.persist_event(event, context))
return event
def test_redact(self):
self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
)
msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
# Check event has not been redacted:
event = self.get_success(self.store.get_event(msg_event.event_id))
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"content": {"body": "t", "msgtype": "message"},
},
event,
)
self.assertFalse("redacted_because" in event.unsigned)
# Redact event
reason = "Because I said so"
self.get_success(
self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
)
event = self.get_success(self.store.get_event(msg_event.event_id))
self.assertEqual(msg_event.event_id, event.event_id)
self.assertTrue("redacted_because" in event.unsigned)
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"content": {},
},
event,
)
self.assertObjectHasAttributes(
{
"type": EventTypes.Redaction,
"user_id": self.u_alice.to_string(),
"content": {"reason": reason},
},
event.unsigned["redacted_because"],
)
def test_redact_join(self):
self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
)
msg_event = self.get_success(
self.inject_room_member(
self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"}
)
)
event = self.get_success(self.store.get_event(msg_event.event_id))
self.assertObjectHasAttributes(
{
"type": EventTypes.Member,
"user_id": self.u_bob.to_string(),
"content": {"membership": Membership.JOIN, "blue": "red"},
},
event,
)
self.assertFalse(hasattr(event, "redacted_because"))
# Redact event
reason = "Because I said so"
self.get_success(
self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
)
# Check redaction
event = self.get_success(self.store.get_event(msg_event.event_id))
self.assertTrue("redacted_because" in event.unsigned)
self.assertObjectHasAttributes(
{
"type": EventTypes.Member,
"user_id": self.u_bob.to_string(),
"content": {"membership": Membership.JOIN},
},
event,
)
self.assertObjectHasAttributes(
{
"type": EventTypes.Redaction,
"user_id": self.u_alice.to_string(),
"content": {"reason": reason},
},
event.unsigned["redacted_because"],
)
def test_circular_redaction(self):
redaction_event_id1 = "$redaction1_id:test"
redaction_event_id2 = "$redaction2_id:test"
class EventIdManglingBuilder:
def __init__(self, base_builder, event_id):
self._base_builder = base_builder
self._event_id = event_id
@defer.inlineCallbacks
def build(self, prev_event_ids, auth_event_ids):
built_event = yield defer.ensureDeferred(
self._base_builder.build(prev_event_ids, auth_event_ids)
)
built_event._event_id = self._event_id
built_event._dict["event_id"] = self._event_id
assert built_event.event_id == self._event_id
return built_event
@property
def room_id(self):
return self._base_builder.room_id
@property
def type(self):
return self._base_builder.type
event_1, context_1 = self.get_success(
self.event_creation_handler.create_new_client_event(
EventIdManglingBuilder(
self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Redaction,
"sender": self.u_alice.to_string(),
"room_id": self.room1.to_string(),
"content": {"reason": "test"},
"redacts": redaction_event_id2,
},
),
redaction_event_id1,
)
)
)
self.get_success(self.storage.persistence.persist_event(event_1, context_1))
event_2, context_2 = self.get_success(
self.event_creation_handler.create_new_client_event(
EventIdManglingBuilder(
self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Redaction,
"sender": self.u_alice.to_string(),
"room_id": self.room1.to_string(),
"content": {"reason": "test"},
"redacts": redaction_event_id1,
},
),
redaction_event_id2,
)
)
)
self.get_success(self.storage.persistence.persist_event(event_2, context_2))
# fetch one of the redactions
fetched = self.get_success(self.store.get_event(redaction_event_id1))
# it should have been redacted
self.assertEqual(fetched.unsigned["redacted_by"], redaction_event_id2)
self.assertEqual(
fetched.unsigned["redacted_because"].event_id, redaction_event_id2
)
def test_redact_censor(self):
"""Test that a redacted event gets censored in the DB after a month
"""
self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
)
msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
# Check event has not been redacted:
event = self.get_success(self.store.get_event(msg_event.event_id))
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"content": {"body": "t", "msgtype": "message"},
},
event,
)
self.assertFalse("redacted_because" in event.unsigned)
# Redact event
reason = "Because I said so"
self.get_success(
self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
)
event = self.get_success(self.store.get_event(msg_event.event_id))
self.assertTrue("redacted_because" in event.unsigned)
self.assertObjectHasAttributes(
{
"type": EventTypes.Message,
"user_id": self.u_alice.to_string(),
"content": {},
},
event,
)
event_json = self.get_success(
self.store.db_pool.simple_select_one_onecol(
table="event_json",
keyvalues={"event_id": msg_event.event_id},
retcol="json",
)
)
self.assert_dict(
{"content": {"body": "t", "msgtype": "message"}}, json.loads(event_json)
)
# Advance by 30 days, then advance again to ensure that the looping call
# for updating the stream position gets called and then the looping call
# for the censoring gets called.
self.reactor.advance(60 * 60 * 24 * 31)
self.reactor.advance(60 * 60 * 2)
event_json = self.get_success(
self.store.db_pool.simple_select_one_onecol(
table="event_json",
keyvalues={"event_id": msg_event.event_id},
retcol="json",
)
)
self.assert_dict({"content": {}}, json.loads(event_json))
def test_redact_redaction(self):
"""Tests that we can redact a redaction and can fetch it again.
"""
self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
)
msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
first_redact_event = self.get_success(
self.inject_redaction(
self.room1, msg_event.event_id, self.u_alice, "Redacting message"
)
)
self.get_success(
self.inject_redaction(
self.room1,
first_redact_event.event_id,
self.u_alice,
"Redacting redaction",
)
)
# Now lets jump to the future where we have censored the redaction event
# in the DB.
self.reactor.advance(60 * 60 * 24 * 31)
# We just want to check that fetching the event doesn't raise an exception.
self.get_success(
self.store.get_event(first_redact_event.event_id, allow_none=True)
)
def test_store_redacted_redaction(self):
"""Tests that we can store a redacted redaction.
"""
self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
)
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Redaction,
"sender": self.u_alice.to_string(),
"room_id": self.room1.to_string(),
"content": {"reason": "foo"},
},
)
redaction_event, context = self.get_success(
self.event_creation_handler.create_new_client_event(builder)
)
self.get_success(
self.storage.persistence.persist_event(redaction_event, context)
)
# Now lets jump to the future where we have censored the redaction event
# in the DB.
self.reactor.advance(60 * 60 * 24 * 31)
# We just want to check that fetching the event doesn't raise an exception.
self.get_success(
self.store.get_event(redaction_event.event_id, allow_none=True)
)
```
|
{
"source": "jdrese/ProbeDeformerMaya",
"score": 2
}
|
#### File: jdrese/ProbeDeformerMaya/probeLocator.py
```python
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMayaRender as OpenMayaRender
nodeName = "probeLocator"
id = OpenMaya.MTypeId(0x00000111)
gl = OpenMayaRender.MHardwareRenderer.theRenderer().glFunctionTable()
class probeLocator(OpenMayaMPx.MPxLocatorNode):
type = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
def draw(self, view, path, style, status):
view.beginGL()
gl.glBegin(OpenMayaRender.MGL_LINES)
gl.glColor3f(1, 0, 0)
gl.glVertex3f(-1.0, 0.0, 0.0)
gl.glVertex3f(1.0, 0.0, 0.0)
gl.glColor3f(0, 1, 0)
gl.glVertex3f(0.0, -1.0, 0.0)
gl.glVertex3f(0.0, 1.0, 0.0)
gl.glColor3f(0, 0, 1)
gl.glVertex3f(0.0, 0.0, -1.0)
gl.glVertex3f(0.0, 0.0, 1.0)
gl.glEnd()
view.endGL()
def nodeCreator():
return OpenMayaMPx.asMPxPtr(probeLocator())
def nodeInitializer():
a = 0
def initializePlugin(obj):
plugin = OpenMayaMPx.MFnPlugin(obj)
plugin.registerNode(nodeName, id, nodeCreator, nodeInitializer, OpenMayaMPx.MPxNode.kLocatorNode)
def uninitializePlugin(obj):
plugin = OpenMayaMPx.MFnPlugin(obj)
plugin.deregisterNode(id)
```
#### File: jdrese/ProbeDeformerMaya/ui_probeDeformer.py
```python
import maya.cmds as cmds
import pymel.core as pm
#deformerTypes = ["probeDeformer","probeDeformerARAP","probeDeformerPy","probeLocator"]
deformerTypes = ["probeDeformer","probeDeformerARAP","probeLocator"]
for type in deformerTypes:
try:
cmds.loadPlugin(type)
except:
print("Plugin %s already loaded" %(type))
## prepare interface
class UI_ProbeDeformer:
uiID = "ProbeDeformer"
title = "ProbeDeformerPlugin"
deformers = []
probes = {}
## Constructor
def __init__(self):
if pm.window(self.uiID, exists=True):
pm.deleteUI(self.uiID)
win = pm.window(self.uiID, title=self.title, menuBar=True)
with win:
pm.menu( label='Create', tearOff=True )
for type in deformerTypes:
pm.menuItem( label=type, c=pm.Callback( self.initPlugin, type) )
self._parentLayout = pm.columnLayout( adj=True )
with self._parentLayout:
self.createUISet()
def createUISet(self):
self._childLayout = pm.columnLayout( adj=True )
with self._childLayout:
self.deformers = [pm.ls(type=deformerTypes[i]) for i in range(len(deformerTypes))]
for i in range(len(deformerTypes)):
for node in self.deformers[i]:
self.probes[node] = pm.listConnections(node.pm)
# "probeDeformer" specific
for node in self.deformers[0]:
frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
with frameLayout:
self.createRamp(node)
self.createCommonAttr(node, deformerTypes[0])
indices = cmds.getAttr(node+".pm", multiIndices=True)
if indices:
for j in indices:
with pm.rowLayout(numberOfColumns=1) :
pm.attrFieldSliderGrp(label=node.prw[j].getAlias(), min=0, max=10.0, attribute=node.prw[j])
with pm.rowLayout(numberOfColumns=3) :
pm.attrControlGrp( label="Frechet sum", attribute= node.fs)
pm.attrControlGrp( label="visualisation", attribute= node.vm)
pm.attrFieldSliderGrp( label="visualisation multiplier", min=0.001, max=1000, attribute=node.vmp)
# "probeDeformerARAP" specific
for node in self.deformers[1]:
frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
with frameLayout:
self.createRamp(node)
self.createCommonAttr(node, deformerTypes[1])
indices = cmds.getAttr(node+".pm", multiIndices=True)
if indices:
for j in indices:
with pm.rowLayout(numberOfColumns=2) :
pm.attrFieldSliderGrp(label=node.prw[j].getAlias(), min=0, max=1.0, attribute=node.prw[j])
pm.attrFieldSliderGrp(label=node.prcr[j].getAlias(), min=0, max=1.0, attribute=node.prcr[j])
with pm.rowLayout(numberOfColumns=3) :
pm.button( l="Set supervisor", c=pm.Callback( self.setSupervisor, node))
pm.attrControlGrp( label="tet mode", attribute= node.tm)
pm.attrFieldSliderGrp( label="translation weight", min=0.0, max=1.0, attribute=node.tw)
with pm.rowLayout(numberOfColumns=3) :
pm.attrControlGrp( label="constraint mode", attribute= node.ctm)
pm.attrFieldSliderGrp( label="constraint weight", min=0.001, max=1000, attribute=node.cw)
pm.attrFieldSliderGrp(label="constraint radius", min=0.001, max=10.0, attribute=node.cr)
with pm.rowLayout(numberOfColumns=3) :
pm.attrFieldSliderGrp( label="iteration", min=1, max=20, attribute=node.it)
pm.attrControlGrp( label="visualisation", attribute= node.vm)
pm.attrFieldSliderGrp( label="visualisation multiplier", min=0.001, max=1000, attribute=node.vmp)
with pm.rowLayout(numberOfColumns=3) :
pm.attrControlGrp( label="stiffness mode", attribute=node.stiffnessMode)
# "probeDeformerPy" specific
# for node in self.deformers[2]:
# frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
# with frameLayout:
# self.createRamp(node)
# self.createCommonAttr(node, deformerTypes[2])
# with pm.rowLayout(numberOfColumns=1) :
# pm.attrControlGrp( label="translation mode", attribute= node.tm)
# create deformer node and connection
def initPlugin(self, deformerType):
if deformerType=="probeLocator":
cmds.createNode('probeLocator')
return
# get transform nodes for the selected objects
transforms = pm.selected(tr=1)
if not transforms:
return
pm.select( transforms[-1]) # the deformer is attached to the last selected object
node = pm.ls(cmds.deformer(type=deformerType)[0])[0]
cmds.makePaintable(deformerType, 'weights', attrType='multiFloat', shapeMode='deformer')
if len(transforms)>1:
self.addProbe(node,deformerType,transforms[:-1])
self.updateUI()
# add selected transform as a new probe
def addProbe(self,node,deformerType,newProbes):
indexes = cmds.getAttr(node+".pm", multiIndices=True)
if not indexes:
n=0
else:
n=indexes[-1]+1
# connect pm first to avoid unnecessary arap computations
for j in range(len(newProbes)):
cmds.connectAttr(newProbes[j]+".worldMatrix", node+".pm[%s]" %(j+n))
if deformerType=="probeDeformerARAP" or deformerType=="probeDeformer":
pm.aliasAttr(newProbes[j].name()+"_weight%s" %(j+n), node.prw[j+n].name())
if deformerType=="probeDeformerARAP":
pm.aliasAttr(newProbes[j].name()+"_constraintRadius%s" %(j+n), node.prcr[j+n].name())
for j in range(len(newProbes)):
node.ipm[j+n].set(newProbes[j].worldMatrix.get())
# add selected transform as a new probe
def addSelectedProbe(self,node,deformerType):
newProbes = pm.selected(tr=1)
self.addProbe(node,deformerType,newProbes)
self.updateUI()
# delete deformer node
def deleteNode(self,node):
cmds.delete(node.name())
self.updateUI()
# set selected shapes as supervised mesh
def setSupervisor(self,node):
meshes = pm.selected(tr=1)
if not meshes:
return
for i in range(len(meshes)):
shape=meshes[i].getShapes()[0]
cmds.connectAttr(shape+".outMesh", node.name()+".supervisedMesh[%s]" %(i), force=True)
self.updateUI()
# delete a probe
def deleteProbe(self,node,j):
cmds.disconnectAttr(self.probes[node][j]+".worldMatrix", node+".pm[%s]" %(j) )
self.updateUI()
# redraw UI
def updateUI(self):
pm.deleteUI( self._childLayout )
pm.setParent(self._parentLayout)
self.createUISet()
# create common attributes
def createRamp(self,node):
with pm.rowLayout(numberOfColumns=6) :
pm.text(l='Weight Curve R')
pm.gradientControl( at='%s.wcr' % node.name() )
pm.text(l='S')
pm.gradientControl( at='%s.wcs' % node.name() )
pm.text(l='L')
pm.gradientControl( at='%s.wcl' % node.name() )
def createCommonAttr(self,node,deformerType):
with pm.rowLayout(numberOfColumns=len(self.probes[node])+2) :
pm.button( l="Delete deformer", c=pm.Callback( self.deleteNode, node))
pm.button( l="Add selection to probes", c=pm.Callback( self.addSelectedProbe, node, deformerType) )
for j in range(len(self.probes[node])):
pm.button( l=self.probes[node][j].name(), c=pm.Callback( self.deleteProbe, node, j) )
with pm.rowLayout(numberOfColumns=5) :
pm.attrControlGrp( label="blend mode", attribute= node.bm)
pm.attrControlGrp( label="world mode", attribute= node.worldMode)
pm.attrControlGrp( label="rotation consistency", attribute= node.rc)
pm.attrControlGrp( label="area weight", attribute= node.aw)
pm.attrControlGrp( label="neighbour weighting", attribute= node.nghbrw)
with pm.rowLayout(numberOfColumns=4) :
pm.attrControlGrp( label="Weight mode", attribute= node.wtm)
pm.attrFieldSliderGrp(label="effect radius", min=0.001, max=20.0, attribute=node.er)
pm.attrControlGrp( label="normalise weight", attribute= node.nw)
pm.attrControlGrp( label="normExponent", attribute=node.ne)
```
|
{
"source": "jdrese/ShapeFlowMaya",
"score": 2
}
|
#### File: jdrese/ShapeFlowMaya/ui_shapeFlow.py
```python
#import debugmaya
#debugmaya.startDebug()
# Maya modules
import maya.cmds as cmds
import pymel.core as pm
# load plugin
deformerTypes = ["ShapeFlow","ShapeMatching"]
for type in deformerTypes:
try:
cmds.loadPlugin(type)
except:
print("Plugin %s already loaded" %(type))
# GUI
class UI_ShapeFlow:
uiID = "ShapeFlow"
title = "ShapeFlowPlugin"
deformers = []
## Constructor
def __init__(self):
if pm.window(self.uiID, exists=True):
pm.deleteUI(self.uiID)
win = pm.window(self.uiID, title=self.title, menuBar=True)
with win:
pm.menu( label='Plugin', tearOff=True )
for type in deformerTypes:
pm.menuItem( label=type, c=pm.Callback( self.initPlugin, type) )
self._parentLayout = pm.columnLayout( adj=True )
with self._parentLayout:
self.createUISet()
def createUISet(self):
self._childLayout = pm.columnLayout( adj=True )
with self._childLayout:
pm.text(l="Click target mesh, then shift+click end mesh")
self.deformers = pm.ls(type=deformerTypes[0])
for node in self.deformers:
frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
with frameLayout:
pm.button( l="Del", c=pm.Callback( self.deleteNode, node))
pm.attrControlGrp( label="active", attribute= node.active)
pm.attrFieldSliderGrp(label="delta", min=0.001, max=5.0, attribute=node.delta)
pm.attrFieldSliderGrp(label="shapeMatching", min=0.1, max=10.0, attribute=node.smw)
#
self.deformers = pm.ls(type=deformerTypes[1])
for node in self.deformers:
frameLayout = pm.frameLayout( label=node.name(), collapsable = True)
with frameLayout:
pm.button( l="Del", c=pm.Callback( self.deleteNode, node))
pm.attrControlGrp( label="active", attribute= node.active)
pm.attrFieldSliderGrp(label="delta", min=0.001, max=5.0, attribute=node.delta)
pm.attrFieldSliderGrp(label="stiffness", min=0.001, max=10.0, attribute=node.stf)
pm.attrFieldSliderGrp(label="attenuation", min=0.001, max=1.0, attribute=node.att)
# delete deformer node
def deleteNode(self,node):
cmds.delete(node.name())
self.updateUI()
def initPlugin(self,type):
meshes = pm.selected( type="transform" )
if len(meshes)<2:
return
pm.select( meshes[-1])
deformer = cmds.deformer(type=type)[0]
shape=meshes[-2].getShapes()[0]
cmds.connectAttr(shape+".outMesh", deformer+".startShape")
cmds.connectAttr("time1.outTime", deformer+".slider")
self.updateUI()
def updateUI(self):
pm.deleteUI( self._childLayout )
pm.setParent(self._parentLayout)
self.createUISet()
```
|
{
"source": "jdrese/SIWeightEditor",
"score": 2
}
|
#### File: scripts/siweighteditor/joint_rule_editor.py
```python
from . import qt
from . import lang
import os
import json
import imp
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
try:
imp.find_module('PySide2')
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
except ImportError:
from PySide.QtGui import *
from PySide.QtCore import *
class Option():
def __init__(self):
global WINDOW
try:
WINDOW.closeEvent(None)
WINDOW.close()
except Exception as e:
print e.message
WINDOW = SubWindow()
#WINDOW.init_flag=False
WINDOW.resize(800, 500)
#WINDOW.move(WINDOW.pw-8, WINDOW.ph-31)
WINDOW.show()
class SubWindow(qt.SubWindow):
def __init__(self, parent = None, init_pos=False):
super(self.__class__, self).__init__(parent)
self.init_save()
self._init_ui()
def init_save(self):
self.dir_path = os.path.join(
os.getenv('MAYA_APP_dir'),
'Scripting_Files')
self.start_file = self.dir_path+'/joint_rule_start.json'
self.middle_file = self.dir_path+'/joint_rule_middle.json'
self.end_file = self.dir_path+'/joint_rule_end.json'
self.save_files = [self.start_file, self.middle_file, self.end_file]
def _init_ui(self):
sq_widget = QScrollArea(self)
sq_widget.setWidgetResizable(True)#リサイズに中身が追従するかどうか
sq_widget.setFocusPolicy(Qt.NoFocus)#スクロールエリアをフォーカスできるかどうか
sq_widget.setMinimumHeight(1)#ウィンドウの最小サイズ
self.setWindowTitle(u'- Joint Label Rules Editor-')
self.setCentralWidget(sq_widget)
self.main_layout = QVBoxLayout()
sq_widget.setLayout(self.main_layout)
second_layout = QHBoxLayout()
self.main_layout.addLayout(second_layout)
start_layout = QVBoxLayout()
second_layout.addLayout(start_layout)
msg = lang.Lang(en='- Prefixed LR naming convention -', ja=u'- 先頭のLR命名規則 -').output()
start_layout.addWidget(QLabel(msg))
self.start_list_widget = QTableWidget(self)
start_layout.addWidget(self.start_list_widget)
middle_layout = QVBoxLayout()
second_layout.addLayout(middle_layout)
msg = lang.Lang(en='- between LR naming conventions -', ja=u'- 中間のLR命名規則 -').output()
middle_layout.addWidget(QLabel(msg))
self.middle_list_widget = QTableWidget(self)
middle_layout.addWidget(self.middle_list_widget)
end_layout = QVBoxLayout()
second_layout.addLayout(end_layout)
msg = lang.Lang(en='- Suffixed LR naming convention -', ja=u'- 末尾のLR命名規則 -').output()
end_layout.addWidget(QLabel(msg))
self.end_list_widget = QTableWidget(self)
end_layout.addWidget(self.end_list_widget)
self.rule_table_list = [self.start_list_widget, self.middle_list_widget, self.end_list_widget]
self.main_layout.addWidget(qt.make_h_line())
button_layout = QHBoxLayout()
self.main_layout.addLayout(button_layout)
tip = lang.Lang(en='Reset table to initial state',
ja=u'テーブルを初期状態にリセット').output()
self.reset_but = qt.make_flat_btton(name='- Reset Joint Label Rules -', border_col=180,
flat=True, hover=True, checkable=False, destroy_flag=True, tip=tip)
self.reset_but.clicked.connect(self.table_reset)
button_layout.addWidget(self.reset_but)
tip = lang.Lang(en='Clear table data',
ja=u'テーブルのデータをクリア').output()
self.clear_but = qt.make_flat_btton(name='- Clear Joint Label Rules -', border_col=180,
flat=True, hover=True, checkable=False, destroy_flag=True, tip=tip)
self.clear_but.clicked.connect(self.table_clear)
button_layout.addWidget(self.clear_but)
self.load_data()
self.set_rule_data()
self.table_setup()
def table_setup(self):
for table in self.rule_table_list:
table.verticalHeader().setDefaultSectionSize(20)
table.setSortingEnabled(True)
table.setAlternatingRowColors(True)
def set_rule_data(self):
for table, lr_list in zip(self.rule_table_list, self.all_lr_list):
self.set_table_data(table, lr_list)
table.setHorizontalHeaderLabels(self.header_labels)
def table_clear(self):
for table in self.rule_table_list:
table.clear()
table.setHorizontalHeaderLabels(self.header_labels)
def table_reset(self):
for table, lr_list in zip(self.rule_table_list, self.def_all_lr_list):
table.clear()
self.set_table_data(table, lr_list)
table.setHorizontalHeaderLabels(self.header_labels)
def set_table_data(self, table, data_list):
table.setColumnCount(2)
table.setRowCount(100)
for i, lr_list in enumerate(data_list):
for j, lr in enumerate(lr_list):
#print 'set rule :', lr
item = QTableWidgetItem(lr)
table.setItem(j, i, item)
#初期値
header_labels = ['Left', 'Right']
start_l_list = ['L_', 'l_', 'Left_', 'left_']
start_r_list = ['R_', 'r_', 'Right_', 'right_']
mid_l_list = ['_L_', '_l_', '_Left_', '_left_']
mid_r_list = ['_R_', '_r_', '_Right_', '_right_']
end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']
end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']
start_lr_list = [start_l_list, start_r_list]
mid_lr_list = [mid_l_list, mid_r_list]
end_lr_list = [end_l_list, end_r_list]
def_all_lr_list = [start_lr_list, mid_lr_list, end_lr_list]
def load_data(self):
#セーブデータが無いかエラーした場合はデフォファイルを作成
self.all_lr_list = []
for i, save_file in enumerate(self.save_files):
if os.path.exists(save_file):#保存ファイルが存在したら
try:
with open(save_file, 'r') as f:
save_data = json.load(f)
l_list = save_data.keys()
r_list = save_data.values()
self.all_lr_list.append([l_list, r_list])
except Exception as e:
print e.message
self.all_lr_list.append(self.def_all_lr_list[i])
else:
self.all_lr_list.append(self.def_all_lr_list[i])
def save_data(self):
if not os.path.exists(self.dir_path):
os.makedirs(self.dir_path)
for table, save_file in zip(self.rule_table_list, self.save_files):
save_data = {}
for row in range(100):
left_data = table.item(row, 0)
right_data = table.item(row, 1)
if left_data and right_data:
if not left_data.text() or not right_data.text():
continue
#print 'save data :', left_data.text(), right_data.text()
save_data[left_data.text()] = right_data.text()
with open(save_file, 'w') as f:
json.dump(save_data, f)
def closeEvent(self, e):
self.save_data()
self.deleteLater()
```
#### File: scripts/siweighteditor/weight.py
```python
from maya import mel
from maya import cmds
from . import lang
from . import common
import os
import json
import re
class WeightCopyPaste():
def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto',
threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False):
if viewmsg:
cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5)
'''
ウェイトデータの保存、読み込み関数
mode→コピーするかペーストするか'copy'or'paste'
saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定
method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」
「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。
「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。
「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、
ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。
「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。
nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在
→barycentric、bylinearはMaya2016Extention2から利用可能
weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。
→Mayaコピー時にファイル名指定すると複数保存できないので注意。
threshold→nearest,barycentricの位置検索範囲
'''
self.skinMeshes = skinMeshes
self.saveName = saveName
self.method = method
self.weightFile = weightFile
self.threshold = threshold
self.engine = engine
self.memShapes = {}
self.target = tgt
self.pasteMode = {'index':1, 'nearest':3}
# リストタイプじゃなかったらリストに変換する
if not isinstance(self.skinMeshes, list):
temp = self.skinMeshes
self.skinMeshes = []
self.skinMeshes.append(temp)
# ファイルパスを生成しておく
if path == 'default':
self.filePath = os.getenv('MAYA_APP_DIR') + '\\Scripting_Files\\weight\\' + self.saveName
elif path == 'project':
self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1])
self.protect_path = os.path.join(self.scene_path, 'weight_protector')
try:
if not os.path.exists(self.protect_path):
os.makedirs(self.protect_path)
except Exception as e:
print e.message
return
self.filePath = self.protect_pat+'\\' + self.saveName
self.fileName = os.path.join(self.filePath, self.saveName + '.json')
self.apiName = os.path.join(self.filePath, self.saveName + '.skn')
# コピーかペーストをそれぞれ呼び出し
if mode == 'copy':
self.weightCopy()
if mode == 'paste':
self.weightPaste()
def weightPaste(self):
dummy = cmds.spaceLocator()
for skinMesh in self.skinMeshes:
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
meshName = str(weightFile).replace('|', '__pipe__')
if os.path.exists(self.fileName):
try:
with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
saveData = json.load(f) # ロード
# self.visibility = saveData['visibility']#セーブデータ読み込み
skinningMethod = saveData[';skinningMethod']
dropoffRate = saveData[';dropoffRate']
maintainMaxInfluences = saveData[';maintainMaxInfluences']
maxInfluences = saveData[';maxInfluences']
bindMethod = saveData[';bindMethod']
normalizeWeights = saveData[';normalizeWeights']
influences = saveData[';influences']
# 子のノードがトランスフォームならダミーに親子付けして退避
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut')
influences = cmds.ls(influences, l=True, tr=True)
# バインド
dstSkinCluster = cmds.skinCluster(
skinMesh,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
dstSkinCluster = dstSkinCluster[0]
# 親子付けを戻す
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent')
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
except Exception as e:
print e.message
print 'Error !! Skin bind failed : ' + skinMesh
continue
else:
dstSkinCluster = dstSkinCluster[0]
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
if self.engine == 'maya':
files = os.listdir(self.filePath)
print files
if len(files) == 2:
for file in files:
name, ext = os.path.splitext(file)
if ext == '.xml':
xml_name = file
else:
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
xml_name = meshName + '.xml'
if os.path.isfile(self.filePath + '\\' + xml_name):
if self.method == 'index' or self.method == 'over':
cmds.deformerWeights(xml_name,
im=True,
method=self.method,
deformer=dstSkinCluster,
path=self.filePath + '\\')
else:
cmds.deformerWeights(xml_name,
im=True,
deformer=dstSkinCluster,
method=self.method,
worldSpace=True,
positionTolerance=self.threshold,
path=self.filePath + '\\')
cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True)
print 'Weight paste to : ' + str(skinMesh)
else:
print 'Not exist seved weight XML file : ' + skinMesh
# ダミー親削除
cmds.delete(dummy)
cmds.select(self.skinMeshes, r=True)
# ウェイト情報を保存する関数
def weightCopy(self):
saveData = {}
# 保存ディレクトリが無かったら作成
if not os.path.exists(self.filePath):
os.makedirs(os.path.dirname(self.filePath + '\\')) # 末尾\\が必要なので注意
else: # ある場合は中身を削除
files = os.listdir(self.filePath)
if files is not None:
for file in files:
os.remove(self.filePath + '\\' + file)
skinFlag = False
all_influences = []
for skinMesh in self.skinMeshes:
try:
cmds.bakePartialHistory(skinMesh, ppt=True)
except:
pass
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったら次に移行
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
saveData[';skinningMethod'] = skinningMethod
saveData[';dropoffRate'] = dropoffRate
saveData[';maintainMaxInfluences'] = maintainMaxInfluences
saveData[';maxInfluences'] = maxInfluences
saveData[';bindMethod'] = bindMethod
saveData[';normalizeWeights'] = normalizeWeights
all_influences += influences
#saveData[';influences'] = influences
skinFlag = True
all_influences = list(set(all_influences))
saveData[';influences'] = all_influences
#インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS
for skinMesh in self.skinMeshes:
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったらfor分の次に移行
srcSkinCluster = srcSkinCluster[0]
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
sub_influences = list(set(all_influences) - set(influences))
if sub_influences:
cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0)
if self.engine == 'maya':
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\')
with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
json.dump(saveData, f)
def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True):
'''
スキンウェイトの転送関数
転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド
・引数
skinMesh→転送元メッシュ(1個,リスト形式でも可)
transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫)
transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue
logTransfer→ログ表示するかどうか
returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse
'''
massege01 = lang.Lang(
en=': It does not perform the transfer of weight because it is not a skin mesh.',
ja=u': スキンメッシュではないのでウェイトの転送を行いません'
).output()
massege02 = lang.Lang(
en='Transfer the weight:',
ja=u'ウェイトを転送:'
).output()
massege03 = lang.Lang(
en='Transfer bind influences:',
ja=u'バインド状態を転送:'
).output()
if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す
skinMesh = skinMesh[0] # リストを渡されたときのための保険
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False)
if not srcSkinCluster:
if logTransfer:
print skinMesh + massege01
return False # スキンクラスタがなかったら関数抜ける
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード
# リストタイプじゃなかったらリストに変換する
if not isinstance(transferedMesh, list):
temp = transferedMesh
transferedMesh = []
transferedMesh.append(temp)
for dst in transferedMesh:
#子供のノード退避用ダミーペアレントを用意
dummy = common.TemporaryReparent().main(mode='create')
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut')
shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh')
if not shapes: # もしメッシュがなかったら
continue # 処理を中断して次のオブジェクトへ
# スキンクラスタの有無を取得
dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
# バインド
dstSkinCluster = cmds.skinCluster(
dst,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
if logTransfer:
print massege03 + '[' + skinMesh + '] >>> [' + dst + ']'
dstSkinCluster = dstSkinCluster[0]
if transferWeight:
cmds.copySkinWeights(
ss=srcSkinCluster,
ds=dstSkinCluster,
surfaceAssociation='closestPoint',
influenceAssociation=['name', 'closestJoint', 'oneToOne'],
normalize=True,
noMirror=True
)
if logTransfer:
print massege02 + '[' + skinMesh + '] >>> [' + dst + ']'
#親子付けを戻す
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent')
#ダミーペアレントを削除
common.TemporaryReparent().main(dummyParent=dummy, mode='delete')
if returnInfluences:
return influences
else:
return True
def symmetry_weight(srcNode=None, dstNode=None, symWeight=True):
'''
ウェイトシンメトリする関数
srcNode→反転元
dstNode→反転先
symWeight→ウェイトミラーするかどうか
'''
# スキンクラスタを取得
if srcNode is None:
return
srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh')
if srcShapes:
srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster')
# スキンクラスタがあったらジョイントラベルを設定してウェイトミラー
if srcSkinCluster:
# バインド状態を転送する関数呼び出し
skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得
for skinJoint in skinJointAll:
# ジョイントラベル設定関数呼び出し
joint_label(skinJoint, visibility=False)
if symWeight is False or dstNode is None:
return
transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True)
dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh')
dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False)
cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0],
mirrorMode='YZ', surfaceAssociation='closestComponent',
influenceAssociation='label', normalize=True)
def load_joint_label_rules():
#ロードできなかった時の初期値
start_l_list = ['L_', 'l_', 'Left_', 'left_']
start_r_list = ['R_', 'r_', 'Right_', 'right_']
mid_l_list = ['_L_', '_l_', '_Left_', '_left_']
mid_r_list = ['_R_', '_r_', '_Right_', '_right_']
end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']
end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']
def_left_list_list = [start_l_list, mid_l_list, end_l_list]
def_right_list_list = [start_r_list, mid_r_list, end_r_list]
#左右対称設定ファイルからルールをロードする
dir_path = os.path.join(
os.getenv('MAYA_APP_dir'),
'Scripting_Files')
start_file = dir_path+'/joint_rule_start.json'
middle_file = dir_path+'/joint_rule_middle.json'
end_file = dir_path+'/joint_rule_end.json'
save_files = [start_file, middle_file, end_file]
left_list_list = []
right_list_list = []
for i, save_file in enumerate(save_files):
if os.path.exists(save_file):#保存ファイルが存在したら
try:
with open(save_file, 'r') as f:
save_data = json.load(f)
l_list = save_data.keys()
r_list = save_data.values()
left_list_list.append(l_list)
right_list_list.append(r_list)
except Exception as e:
print e.message
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
else:
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
return left_list_list, right_list_list
def joint_label(object, visibility=False):
'''
ジョイントラベル設定関数
object→オブジェクト、リスト形式可
visibility→ラベルの可視性、省略可能。デフォルトFalse。
'''
#ラベリングルールをロードしておく
left_list_list, right_list_list = load_joint_label_rules()
# リストタイプじゃなかったらリストに変換する
if not isinstance(object, list):
temp = object
object = []
object.append(temp)
for skinJoint in object:
objTypeName = cmds.objectType(skinJoint)
if objTypeName == 'joint':
split_name = skinJoint.split('|')[-1]
# スケルトン名にLRが含まれているかどうかを判定
side = 0
side_name = ''
for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)):
for j, lr_list in enumerate([l_list, r_list]):
for k, lr in enumerate(lr_list):
if i == 0:
if re.match(lr, split_name):
side = j + 1
if i == 1:
if re.search(lr, split_name):
side = j + 1
if i == 2:
if re.match(lr[::-1], split_name[::-1]):
side = j + 1
if side:#対象が見つかってたら全部抜ける
side_name = lr
break
if side:
break
if side:
break
#print 'joint setting :', split_name, side, side_name
# 左右のラベルを設定、どちらでもないときは中央
cmds.setAttr(skinJoint + '.side', side)
# ラベルタイプを”その他”に設定
cmds.setAttr(skinJoint + '.type', 18)
new_joint_name = split_name.replace(side_name.replace('.', ''), '')
# スケルトン名設定
cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string')
# 可視性設定
cmds.setAttr(skinJoint + '.drawLabel', visibility)
else:
print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip Command')
#ウェイトのミュートをトグル
def toggle_mute_skinning():
msg01 = lang.Lang(
en='No mesh selection.\nWould you like to process all of mesh in this scene?.',
ja=u'選択メッシュがありません。\nシーン内のすべてのメッシュを処理しますか?').output()
msg02 = lang.Lang(en='Yes', ja=u'はい').output()
msg03 = lang.Lang(en='No', ja=u'いいえ').output()
msg04 = lang.Lang(
en='Skinning is disabled',
ja=u'スキニングは無効になりました') .output()
msg05 = lang.Lang(
en='Skinning is enabled',
ja=u'スキニングが有効になりました') .output()
cmds.selectMode(o=True)
objects = cmds.ls(sl=True, l=True)
ad_node = []
for node in objects:
children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform')
ad_node += [node]+children
#print len(ad_node)
objects = set(ad_node)
#print len(objects)
if not objects:
all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03)
if all_mesh == msg02:
objects = cmds.ls(type='transform')
if not objects:
return
mute_flag = 1
skin_list = []
for node in objects:
skin = cmds.ls(cmds.listHistory(node), type='skinCluster')
if not skin:
continue
skin_list.append(skin)
if cmds.getAttr(skin[0]+'.envelope') > 0:
mute_flag = 0
for skin in skin_list:
cmds.setAttr(skin[0]+'.envelope', mute_flag)
if mute_flag == 0:
cmds.confirmDialog(m=msg04)
if mute_flag == 1:
cmds.confirmDialog(m=msg05)
```
|
{
"source": "jdrestre/spotify_project",
"score": 2
}
|
#### File: popular_songs/application/music_provider.py
```python
from abc import ABC, abstractmethod
from popular_songs.domain.models import Country
class MusicProvider(ABC):
@abstractmethod
def get_popular_songs(self, country: Country):
pass
```
|
{
"source": "JDReutt/siraj",
"score": 3
}
|
#### File: JDReutt/siraj/MovieRec.py
```python
import numpy as np
import lightfm.datasets import fetch_movielens
import lightfm import LightFM
# fetch data and format it
data = fetch_movielens(min_rating=4.0)
# print training and testing data
print(repr(data['train']))
print(repr(data['test']))
# create model
# warp = weighted approximate-rank pairwise (uses gradient descent to iteratively improve)
model = LightFM(loss='warp')
# train model
model.fit(data['train'], epochs=30, num_threads=2)
def sample_recommendation(model, data, user_ids):
# number of users and movies in training data
n_users, n_items = data['train'].shape
# generate recommendations for each user we input
for user_id in user_ids:
# movies they already like
known_positives = data['item_labels'][data['train'].tocsr()[user_id].indices]
# movies our model predicts they will like
scores = model.predict(user_id, np.arrange(n_items))
# rank movies in order of most liked to least
top_items = data['item_labels'][np.argsort(-scores)]
print("User %s" % user_id)
print(" Known positives:")
for x in known_positives[:3]:
print(" %s" % x)
print(" Recommended:")
for x in top_items[:3]:
print(" %s" % x)
sample_recommendation(model, data, [3, 25, 88])
```
#### File: JDReutt/siraj/trippyimg.py
```python
import numpy as numpy
from functools import partial
import PIL.Image
import tensorflow as tf
import urllib.request
import os
import zipfile
def main():
#Step 1 - download pretrained neural network
url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
model_name = os.path.split(url)[-1]
local_zip_file = os.path.join(data_dir, model_name)
if not os.path.exists(local_zip_file):
#download
model_url = urllib.request.urlopen(url)
with open(local_zip_file, 'wb') as output:
output.write(model_url.read())
#Extract
with zipfile.ZipFile(local_zip_file, 'r') as zip_ref:
zip_ref.extractall(data_dir)
#start with a gray image with noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
model_fn = 'tensorflow_inception_graph.pb'
#step2 creating tensorflow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(os.path.join(data_dir, model_fn), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') #define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean,0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+': 0').get_shape()[-1] for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
```
|
{
"source": "jdrew82/nautobot-plugin-tunnel-tracker",
"score": 2
}
|
#### File: nautobot-plugin-tunnel-tracker/nautobot_tunnel_tracker/filters.py
```python
import django_filters
from django.db.models import Q
from nautobot.extras.filters import CreatedUpdatedFilterSet
from .models import Tunnel
class TunnelFilter(CreatedUpdatedFilterSet):
"""Filter capabilities for Tunnel instances."""
q = django_filters.CharFilter(
method="search",
label="Search",
)
name = django_filters.ModelMultipleChoiceFilter(
field_name="name__slug",
queryset=Tunnel.objects.all(),
to_field_name="slug",
label="Tunnel Name (slug)",
)
status = django_filters.ModelMultipleChoiceFilter(
field_name="status__slug",
queryset=Tunnel.objects.all(),
to_field_name="slug",
label="Tunnel Status (slug)",
)
tunnel_type = django_filters.ModelMultipleChoiceFilter(
field_name="tunnel_type__slug",
queryset=Tunnel.objects.all(),
to_field_name="slug",
label="Tunnel Type (slug)",
)
class Meta:
"""Class to define what is used for filtering tunnels with the search box."""
model = Tunnel
fields = ["name", "status", "tunnel_type"]
def search(self, queryset, value): # pylint: disable=no-self-use
"""Perform the filtered search."""
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(status__icontains=value) | Q(tunnel_type__icontains=value)
return queryset.filter(qs_filter)
```
#### File: nautobot-plugin-tunnel-tracker/nautobot_tunnel_tracker/views.py
```python
from nautobot.core.views import generic
from .filters import TunnelFilter
from .forms import TunnelCreationForm, TunnelFilterForm, TunnelCreationCSVForm
from .models import Tunnel
from .tables import TunnelTable, TunnelBulkTable
class TunnelListView(generic.ObjectListView):
"""View for listing all Tunnels."""
queryset = Tunnel.objects.all()
filterset = TunnelFilter
filterset_form = TunnelFilterForm
table = TunnelTable
template_name = "nautobot_tunnel_tracker/tunnel_list.html"
class TunnelView(generic.ObjectView):
"""View for single Tunnel instance."""
queryset = Tunnel.objects.all()
def get_extra_context(self, request, instance):
"""Add extra data to detail view for Nautobot."""
return {}
class TunnelBulkImportView(generic.BulkImportView):
"""View for bulk-importing a CSV file to create Tunnels."""
queryset = Tunnel.objects.all()
model_form = TunnelCreationCSVForm
table = TunnelBulkTable
class TunnelEditView(generic.ObjectEditView):
"""View for managing Tunnels."""
queryset = Tunnel.objects.all()
model_form = TunnelCreationForm
class TunnelDeleteView(generic.ObjectDeleteView):
"""View for deleting Tunnels."""
queryset = Tunnel.objects.all()
class TunnelBulkDeleteView(generic.BulkDeleteView):
"""View for deleting one or more Tunnels."""
queryset = Tunnel.objects.filter()
table = TunnelTable
```
|
{
"source": "jdries/inspire-validation",
"score": 2
}
|
#### File: src/inspirevalidator/__init__.py
```python
import os
from pathlib import Path
os.environ['XML_DEBUG_CATALOG'] = 'TRUE'
from lxml import etree
gmi_schema = Path(__file__).parent / 'gmd' / 'iso19115' / 'gmi' / '1.0' / 'gmi.xsd'
catalog = Path(__file__).parent / 'gmd' / 'catalog.xml'
os.environ['XML_CATALOG_FILES'] = str(catalog)
xmlschema_doc = etree.parse(str(gmi_schema))
xmlschema = etree.XMLSchema(xmlschema_doc)
def assert_valid_inspire_metadata(my_xml_file):
doc = etree.parse(my_xml_file)
xmlschema.assertValid(doc)
```
|
{
"source": "jdries/openeo-python-client",
"score": 2
}
|
#### File: openeo/internal/process_graph_visitor.py
```python
import json
from abc import ABC
from typing import Union, Tuple, Any
from deprecated import deprecated
from openeo.rest import OpenEoClientException
class ProcessGraphVisitException(OpenEoClientException):
pass
class ProcessGraphVisitor(ABC):
"""
Hierarchical Visitor for (nested) process graphs structures.
"""
def __init__(self):
self.process_stack = []
@classmethod
def dereference_from_node_arguments(cls, process_graph: dict) -> str:
"""
Walk through the given (flat) process graph and replace (in-place) "from_node" references in
process arguments (dictionaries or lists) with the corresponding resolved subgraphs
:param process_graph: process graph dictionary to be manipulated in-place
:return: name of the "result" node of the graph
"""
# TODO avoid manipulating process graph in place? make it more explicit? work on a copy?
# TODO call it more something like "unflatten"?. Split this off of ProcessGraphVisitor?
# TODO implement this through `ProcessGraphUnflattener` ?
def resolve_from_node(process_graph, node, from_node):
if from_node not in process_graph:
raise ProcessGraphVisitException('from_node {f!r} (referenced by {n!r}) not in process graph.'.format(
f=from_node, n=node))
return process_graph[from_node]
result_node = None
for node, node_dict in process_graph.items():
if node_dict.get("result", False):
if result_node:
raise ProcessGraphVisitException("Multiple result nodes: {a}, {b}".format(a=result_node, b=node))
result_node = node
arguments = node_dict.get("arguments", {})
for arg in arguments.values():
if isinstance(arg, dict):
if "from_node" in arg:
arg["node"] = resolve_from_node(process_graph, node, arg["from_node"])
else:
for k, v in arg.items():
if isinstance(v, dict) and "from_node" in v:
v["node"] = resolve_from_node(process_graph, node, v["from_node"])
elif isinstance(arg, list):
for i, element in enumerate(arg):
if isinstance(element, dict) and "from_node" in element:
arg[i] = resolve_from_node(process_graph, node, element['from_node'])
if result_node is None:
dump = json.dumps(process_graph, indent=2)
raise ProcessGraphVisitException("No result node in process graph: " + dump[:1000])
return result_node
def accept_process_graph(self, graph: dict) -> 'ProcessGraphVisitor':
"""
Traverse a (flat) process graph
:param graph:
:return:
"""
# TODO: this is driver specific functionality, working on flattened graph structures. Make this more clear?
top_level_node = self.dereference_from_node_arguments(graph)
self.accept_node(graph[top_level_node])
return self
@deprecated(reason="Use accept_node() instead")
def accept(self, node: dict):
self.accept_node(node)
def accept_node(self, node: dict):
pid = node['process_id']
arguments = node.get('arguments', {})
namespace = node.get("namespace", None)
self._accept_process(process_id=pid, arguments=arguments, namespace=namespace)
def _accept_process(self, process_id: str, arguments: dict, namespace: Union[str, None]):
self.process_stack.append(process_id)
self.enterProcess(process_id=process_id, arguments=arguments, namespace=namespace)
for arg_id, value in sorted(arguments.items()):
if isinstance(value, list):
self.enterArray(argument_id=arg_id)
self._accept_argument_list(value)
self.leaveArray(argument_id=arg_id)
elif isinstance(value, dict):
self.enterArgument(argument_id=arg_id, value=value)
self._accept_argument_dict(value)
self.leaveArgument(argument_id=arg_id, value=value)
else:
self.constantArgument(argument_id=arg_id, value=value)
self.leaveProcess(process_id=process_id, arguments=arguments, namespace=namespace)
assert self.process_stack.pop() == process_id
def _accept_argument_list(self, elements: list):
for element in elements:
if isinstance(element, dict):
self._accept_argument_dict(element)
self.arrayElementDone(element)
else:
self.constantArrayElement(element)
def _accept_argument_dict(self, value: dict):
if 'node' in value and 'from_node' in value:
# TODO: this looks bit weird (or at least very specific).
self.accept_node(value['node'])
elif value.get("from_node"):
self.accept_node(value['from_node'])
elif "process_id" in value:
self.accept_node(value)
elif "from_parameter" in value:
self.from_parameter(value['from_parameter'])
else:
self._accept_dict(value)
def _accept_dict(self, value: dict):
pass
def from_parameter(self,parameter_id:str):
pass
def enterProcess(self, process_id: str, arguments: dict, namespace: Union[str, None]):
pass
def leaveProcess(self, process_id: str, arguments: dict, namespace: Union[str, None]):
pass
def enterArgument(self, argument_id: str, value):
pass
def leaveArgument(self, argument_id: str, value):
pass
def constantArgument(self, argument_id: str, value):
pass
def enterArray(self, argument_id: str):
pass
def leaveArray(self, argument_id: str):
pass
def constantArrayElement(self, value):
pass
def arrayElementDone(self, value: dict):
pass
def find_result_node(flat_graph: dict) -> Tuple[str, dict]:
"""
Find result node in flat graph
:return: tuple with node id (str) and node dictionary of the result node.
"""
result_nodes = [(key, node) for (key, node) in flat_graph.items() if node.get("result")]
if len(result_nodes) == 1:
return result_nodes[0]
elif len(result_nodes) == 0:
raise ProcessGraphVisitException("Found no result node in flat process graph")
else:
keys = [k for (k, n) in result_nodes]
raise ProcessGraphVisitException(
"Found multiple result nodes in flat process graph: {keys!r}".format(keys=keys))
class ProcessGraphUnflattener:
"""
Base class to process a flat graph representation of a process graph
and unflatten it by resolving the "from_node" references.
Subclassing and overriding certain methods allows to build a desired unflattened graph structure.
"""
# Sentinel object for flagging a node "under construction" and detect graph cycles.
_UNDER_CONSTRUCTION = object()
def __init__(self, flat_graph: dict):
self._flat_graph = flat_graph
self._nodes = {}
@classmethod
def unflatten(cls, flat_graph: dict, **kwargs):
"""Class method helper to unflatten given flat process graph"""
return cls(flat_graph=flat_graph, **kwargs).process()
def process(self):
"""Process the flat process graph: unflatten it."""
result_key, result_node = find_result_node(flat_graph=self._flat_graph)
return self.get_node(result_key)
def get_node(self, key: str) -> Any:
"""Get processed node by node key."""
if key not in self._nodes:
self._nodes[key] = self._UNDER_CONSTRUCTION
node = self._process_node(self._flat_graph[key])
self._nodes[key] = node
elif self._nodes[key] is self._UNDER_CONSTRUCTION:
raise ProcessGraphVisitException("Cycle in process graph")
return self._nodes[key]
def _process_node(self, node: dict) -> Any:
"""
Overridable: generate process graph node from flat_graph data.
"""
# Default implementation: basic validation/whitelisting, and only traverse arguments
return dict(
process_id=node["process_id"],
arguments=self._process_value(value=node["arguments"]),
**{k: node[k] for k in ["namespace", "description", "result"] if k in node}
)
def _process_from_node(self, key: str, node: dict) -> Any:
"""
Overridable: generate a node from a flat_graph "from_node" reference
"""
# Default/original implementation: keep "from_node" key and add resolved node under "node" key.
# TODO: just return `self.get_node(key=key)`
return {
"from_node": key,
"node": self.get_node(key=key)
}
def _process_from_parameter(self, name: str) -> Any:
"""
Overridable: generate a node from a flat_graph "from_parameter" reference
"""
# Default implementation:
return {"from_parameter": name}
def _resolve_from_node(self, key: str) -> dict:
if key not in self._flat_graph:
raise ProcessGraphVisitException("from_node reference {k!r} not found in process graph".format(k=key))
return self._flat_graph[key]
def _process_value(self, value) -> Any:
if isinstance(value, dict):
if "from_node" in value:
key = value["from_node"]
node = self._resolve_from_node(key=key)
return self._process_from_node(key=key, node=node)
elif "from_parameter" in value:
name = value["from_parameter"]
return self._process_from_parameter(name=name)
elif "process_graph" in value:
# Don't traverse child process graphs
# TODO: should/can we? Can we know available parameters for validation, or do we skip validation?
return value
else:
return {k: self._process_value(v) for (k, v) in value.items()}
elif isinstance(value, (list, tuple)):
return [self._process_value(v) for v in value]
else:
return value
```
#### File: openeo/udf/feature_collection.py
```python
from typing import Optional, Union, Any, List
import pandas
import shapely.geometry
# Geopandas is optional dependency for now
try:
from geopandas import GeoDataFrame
except ImportError:
class GeoDataFrame:
pass
class FeatureCollection:
"""
A feature collection that represents a subset or a whole feature collection
where single vector features may have time stamps assigned.
"""
def __init__(
self,
id: str,
data: GeoDataFrame,
start_times: Optional[Union[pandas.DatetimeIndex, List[str]]] = None,
end_times: Optional[Union[pandas.DatetimeIndex, List[str]]] = None
):
"""
Constructor of the of a vector collection
:param id: The unique id of the vector collection
:param data: A GeoDataFrame with geometry column and attribute data
:param start_times: The vector with start times for each spatial x,y slice
:param end_times: The pandas.DateTimeIndex vector with end times
for each spatial x,y slice, if no
end times are defined, then time instances are assumed not intervals
"""
self.id = id
self._data = data
self._start_times = self._as_datetimeindex(start_times, expected_length=len(self.data))
self._end_times = self._as_datetimeindex(end_times, expected_length=len(self.data))
def __repr__(self):
return f"<{type(self).__name__} with {type(self._data).__name__}>"
@staticmethod
def _as_datetimeindex(dates: Any, expected_length: int = None) -> Union[pandas.DatetimeIndex, None]:
if dates is None:
return dates
if not isinstance(dates, pandas.DatetimeIndex):
dates = pandas.DatetimeIndex(dates)
if expected_length is not None and expected_length != len(dates):
raise ValueError("Expected size {e} but got {a}: {d}".format(e=expected_length, a=len(dates), d=dates))
return dates
@property
def data(self) -> GeoDataFrame:
"""
Get the geopandas.GeoDataFrame that contains the geometry column and any number of attribute columns
:return: A data frame that contains the geometry column and any number of attribute columns
"""
return self._data
@property
def start_times(self) -> Union[pandas.DatetimeIndex, None]:
return self._start_times
@property
def end_times(self) -> Union[pandas.DatetimeIndex, None]:
return self._end_times
def to_dict(self) -> dict:
"""
Convert this FeatureCollection into a dictionary that can be converted into
a valid JSON representation
"""
data = {
"id": self.id,
"data": shapely.geometry.mapping(self.data),
}
if self.start_times is not None:
data["start_times"] = [t.isoformat() for t in self.start_times]
if self.end_times is not None:
data["end_times"] = [t.isoformat() for t in self.end_times]
return data
@classmethod
def from_dict(cls, data: dict) -> "FeatureCollection":
"""
Create a feature collection from a python dictionary that was created from
the JSON definition of the FeatureCollection
:param data: The dictionary that contains the feature collection definition
:return: A new FeatureCollection object
"""
return cls(
id=data["id"],
data=GeoDataFrame.from_features(data["data"]),
start_times=data.get("start_times"),
end_times=data.get("end_times"),
)
```
#### File: openeo/udf/udf_data.py
```python
from typing import Optional, List, Union
from openeo.udf.feature_collection import FeatureCollection
from openeo.udf.structured_data import StructuredData
from openeo.udf.xarraydatacube import XarrayDataCube
class UdfData:
"""
Container for data passed to a user defined function (UDF)
"""
# TODO: original implementation in `openeo_udf` project had `get_datacube_by_id`, `get_feature_collection_by_id`: is it still useful to provide this?
# TODO: original implementation in `openeo_udf` project had `server_context`: is it still useful to provide this?
def __init__(
self,
proj: dict = None,
datacube_list: Optional[List[XarrayDataCube]] = None,
feature_collection_list: Optional[List[FeatureCollection]] = None,
structured_data_list: Optional[List[StructuredData]] = None,
user_context: Optional[dict] = None,
):
"""
The constructor of the UDF argument class that stores all data required by the
user defined function.
:param proj: A dictionary of form {"proj type string": "projection description"} i. e. {"EPSG":4326}
:param datacube_list: A list of data cube objects
:param feature_collection_list: A list of VectorTile objects
:param structured_data_list: A list of structured data objects
"""
self.datacube_list = datacube_list
self.feature_collection_list = feature_collection_list
self.structured_data_list = structured_data_list
self.proj = proj
self._user_context = user_context or {}
def __repr__(self) -> str:
fields = " ".join(
f"{f}:{getattr(self, f)!r}" for f in
["datacube_list", "feature_collection_list", "structured_data_list"]
)
return f"<{type(self).__name__} {fields}>"
@property
def user_context(self) -> dict:
"""Return the user context that was passed to the run_udf function"""
return self._user_context
def get_datacube_list(self) -> Union[List[XarrayDataCube], None]:
"""Get the data cube list"""
return self._datacube_list
def set_datacube_list(self, datacube_list: Union[List[XarrayDataCube], None]):
"""
Set the data cube list
:param datacube_list: A list of data cubes
"""
self._datacube_list = datacube_list
datacube_list = property(fget=get_datacube_list, fset=set_datacube_list)
def get_feature_collection_list(self) -> Union[List[FeatureCollection], None]:
"""get all feature collections as list"""
return self._feature_collection_list
def set_feature_collection_list(self, feature_collection_list: Union[List[FeatureCollection], None]):
self._feature_collection_list = feature_collection_list
feature_collection_list = property(fget=get_feature_collection_list, fset=set_feature_collection_list)
def get_structured_data_list(self) -> Union[List[StructuredData], None]:
"""
Get all structured data entries
:return: A list of StructuredData objects
"""
return self._structured_data_list
def set_structured_data_list(self, structured_data_list: Union[List[StructuredData], None]):
"""
Set the list of structured data
:param structured_data_list: A list of StructuredData objects
"""
self._structured_data_list = structured_data_list
structured_data_list = property(fget=get_structured_data_list, fset=set_structured_data_list)
def to_dict(self) -> dict:
"""
Convert this UdfData object into a dictionary that can be converted into
a valid JSON representation
"""
return {
"datacubes": [x.to_dict() for x in self.datacube_list] \
if self.datacube_list else None,
"feature_collection_list": [x.to_dict() for x in self.feature_collection_list] \
if self.feature_collection_list else None,
"structured_data_list": [x.to_dict() for x in self.structured_data_list] \
if self.structured_data_list else None,
"proj": self.proj,
"user_context": self.user_context,
}
@classmethod
def from_dict(cls, udf_dict: dict) -> "UdfData":
"""
Create a udf data object from a python dictionary that was created from
the JSON definition of the UdfData class
:param udf_dict: The dictionary that contains the udf data definition
"""
datacubes = [XarrayDataCube.from_dict(x) for x in udf_dict.get("datacubes", [])]
feature_collection_list = [FeatureCollection.from_dict(x) for x in udf_dict.get("feature_collection_list", [])]
structured_data_list = [StructuredData.from_dict(x) for x in udf_dict.get("structured_data_list", [])]
udf_data = cls(
proj=udf_dict.get("proj"),
datacube_list=datacubes,
feature_collection_list=feature_collection_list,
structured_data_list=structured_data_list,
user_context=udf_dict.get("user_context")
)
return udf_data
```
#### File: rest/datacube/test_bandmath.py
```python
import numpy as np
import pytest
from openeo.rest import BandMathException
from .. import get_download_graph
from ..conftest import reset_graphbuilder
from ... import load_json_resource
from .test_datacube import _get_leaf_node
def test_band_basic(connection, api_version):
cube = connection.load_collection("SENTINEL2_RADIOMETRY_10M")
expected_graph = load_json_resource('data/%s/band0.json' % api_version)
assert cube.band(0).flat_graph() == expected_graph
reset_graphbuilder()
assert cube.band("B02").flat_graph() == expected_graph
def test_indexing_040(con040):
cube = con040.load_collection("SENTINEL2_RADIOMETRY_10M")
expected_graph = load_json_resource('data/0.4.0/band_red.json')
reset_graphbuilder()
assert cube.band("B04").flat_graph() == expected_graph
reset_graphbuilder()
assert cube.band("red").flat_graph() == expected_graph
reset_graphbuilder()
assert cube.band(2).flat_graph() == expected_graph
cube2 = cube.filter_bands(['B04', 'B03'])
expected_graph = load_json_resource('data/0.4.0/band_red_filtered.json')
reset_graphbuilder()
assert cube2.band("B04").flat_graph() == expected_graph
reset_graphbuilder()
assert cube2.band("red").flat_graph() == expected_graph
reset_graphbuilder()
assert cube2.band(0).flat_graph() == expected_graph
def test_indexing_100(con100):
cube = con100.load_collection("SENTINEL2_RADIOMETRY_10M")
expected_graph = load_json_resource('data/1.0.0/band_red.json')
assert cube.band("B04").flat_graph() == expected_graph
assert cube.band("red").flat_graph() == expected_graph
assert cube.band(2).flat_graph() == expected_graph
cube2 = cube.filter_bands(['red', 'green'])
expected_graph = load_json_resource('data/1.0.0/band_red_filtered.json')
assert cube2.band("B04").flat_graph() == expected_graph
assert cube2.band("red").flat_graph() == expected_graph
assert cube2.band(0).flat_graph() == expected_graph
def test_evi(connection, api_version):
cube = connection.load_collection("SENTINEL2_RADIOMETRY_10M")
B02 = cube.band('B02')
B04 = cube.band('B04')
B08 = cube.band('B08')
evi_cube = (2.5 * (B08 - B04)) / ((B08 + 6.0 * B04 - 7.5 * B02) + 1.0)
actual_graph = get_download_graph(evi_cube)
expected_graph = load_json_resource('data/%s/evi_graph.json' % api_version)
assert actual_graph == expected_graph
@pytest.mark.parametrize("process", [
(lambda b2: b2 ** 3.14),
(lambda b2: b2.power(3.14)),
])
def test_power(con100, process):
b2 = con100.load_collection("SENTINEL2_RADIOMETRY_10M").band("B02")
res = process(b2)
assert _get_leaf_node(res) == {
"process_id": "reduce_dimension",
"arguments": {
"data": {"from_node": "loadcollection1"},
"dimension": "bands",
"reducer": {"process_graph": {
"arrayelement1": {
"process_id": "array_element",
"arguments": {"data": {"from_parameter": "data"}, "index": 0},
},
"power1": {
"process_id": "power",
"arguments": {"base": {"from_node": "arrayelement1"}, "p": 3.14},
"result": True}
}}
},
"result": True}
@pytest.mark.parametrize("process", [
(lambda b2: 2 ** b2),
# TODO: non-operator way to express `2 ** b2` band math?
])
def test_power_reverse(con100, process):
b2 = con100.load_collection("SENTINEL2_RADIOMETRY_10M").band("B02")
res = process(b2)
assert _get_leaf_node(res) == {
"process_id": "reduce_dimension",
"arguments": {
"data": {"from_node": "loadcollection1"},
"dimension": "bands",
"reducer": {"process_graph": {
"arrayelement1": {
"process_id": "array_element",
"arguments": {"data": {"from_parameter": "data"}, "index": 0},
},
"power1": {
"process_id": "power",
"arguments": {"base": 2, "p": {"from_node": "arrayelement1"}},
"result": True}
}}
},
"result": True}
def test_db_to_natural(con100):
cube = con100.load_collection("SENTINEL2_RADIOMETRY_10M")
B02 = cube.band('B02')
natural = 10 ** ((B02 * 0.001 - 45) / 10)
expected_graph = load_json_resource('data/1.0.0/db_to_natural.json')
assert natural.flat_graph() == expected_graph
def test_ndvi_udf(connection, api_version):
s2_radio = connection.load_collection("SENTINEL2_RADIOMETRY_10M")
ndvi_coverage = s2_radio.reduce_bands_udf("def myfunction(tile):\n"
" print(tile)\n"
" return tile")
actual_graph = get_download_graph(ndvi_coverage)
expected_graph = load_json_resource('data/%s/udf_graph.json' % api_version)["process_graph"]
assert actual_graph == expected_graph
def test_ndvi_udf_v100(con100):
s2_radio = con100.load_collection("SENTINEL2_RADIOMETRY_10M")
ndvi_coverage = s2_radio.reduce_bands_udf("def myfunction(tile):\n"
" print(tile)\n"
" return tile")
actual_graph = get_download_graph(ndvi_coverage)
expected_graph = load_json_resource('data/1.0.0/udf_graph.json')["process_graph"]
assert actual_graph == expected_graph
@pytest.mark.parametrize(["process", "expected"], [
((lambda b: b + 3), {
"add1": {"process_id": "add", "arguments": {"x": {"from_node": "arrayelement1"}, "y": 3}, "result": True}
}),
((lambda b: 3 + b), {
"add1": {"process_id": "add", "arguments": {"x": 3, "y": {"from_node": "arrayelement1"}}, "result": True}
}),
((lambda b: 3 + b + 5), {
"add1": {"process_id": "add", "arguments": {"x": 3, "y": {"from_node": "arrayelement1"}}},
"add2": {"process_id": "add", "arguments": {"x": {"from_node": "add1"}, "y": 5}, "result": True}
}
),
((lambda b: b - 3), {
"subtract1": {"process_id": "subtract", "arguments": {"x": {"from_node": "arrayelement1"}, "y": 3},
"result": True}
}),
((lambda b: 3 - b), {
"subtract1": {"process_id": "subtract", "arguments": {"x": 3, "y": {"from_node": "arrayelement1"}},
"result": True}
}),
((lambda b: 2 * b), {
"multiply1": {"process_id": "multiply", "arguments": {"x": 2, "y": {"from_node": "arrayelement1"}},
"result": True}
}),
((lambda b: b * 6), {
"multiply1": {"process_id": "multiply", "arguments": {"x": {"from_node": "arrayelement1"}, "y": 6},
"result": True}
}),
((lambda b: -b), {
"multiply1": {"process_id": "multiply", "arguments": {"x": {"from_node": "arrayelement1"}, "y": -1},
"result": True}
}),
((lambda b: b / 8), {
"divide1": {"process_id": "divide", "arguments": {"x": {"from_node": "arrayelement1"}, "y": 8}, "result": True}
}),
])
def test_band_operation(con100, process, expected):
s2 = con100.load_collection("S2")
b = s2.band('B04')
c = process(b)
callback = {"arrayelement1": {
"process_id": "array_element", "arguments": {"data": {"from_parameter": "data"}, "index": 2}
}}
callback.update(expected)
assert c.flat_graph() == {
"loadcollection1": {
"process_id": "load_collection",
"arguments": {"id": "S2", "spatial_extent": None, "temporal_extent": None}
},
"reducedimension1": {
"process_id": "reduce_dimension",
"arguments": {
"data": {"from_node": "loadcollection1"},
"reducer": {"process_graph": callback},
"dimension": "bands",
},
"result": True,
}
}
def test_merge_issue107(con100):
"""https://github.com/Open-EO/openeo-python-client/issues/107"""
s2 = con100.load_collection("S2")
a = s2.filter_bands(['B02'])
b = s2.filter_bands(['B04'])
c = a.merge(b)
flat = c.flat_graph()
# There should be only one `load_collection` node (but two `filter_band` ones)
processes = sorted(n["process_id"] for n in flat.values())
assert processes == ["filter_bands", "filter_bands", "load_collection", "merge_cubes"]
def test_invert_band(connection, api_version):
cube = connection.load_collection("S2")
band = cube.band('B04')
result = (~band)
assert result.flat_graph() == load_json_resource('data/%s/bm_invert_band.json' % api_version)
def test_eq_scalar(connection, api_version):
cube = connection.load_collection("S2")
band = cube.band('B04')
result = (band == 42)
assert result.flat_graph() == load_json_resource('data/%s/bm_eq_scalar.json' % api_version)
def test_gt_scalar(connection, api_version):
cube = connection.load_collection("S2")
band = cube.band('B04')
result = (band > 42)
assert result.flat_graph() == load_json_resource('data/%s/bm_gt_scalar.json' % api_version)
@pytest.mark.parametrize(["operation", "expected"], (
(lambda b: b == 42, "eq"),
(lambda b: b != 42, "neq"),
(lambda b: b > 42, "gt"),
(lambda b: b >= 42, "gte"),
(lambda b: b < 42, "lt"),
(lambda b: b <= 42, "lte"),
))
def test_comparison(connection, api_version, operation, expected):
cube = connection.load_collection("S2")
band = cube.band('B04')
result = operation(band)
assert result.flat_graph() == load_json_resource(
'data/%s/bm_comparison.json' % api_version,
preprocess=lambda data: data.replace("OPERATOR", expected)
)
def test_add_sub_mul_div_scalar(connection, api_version):
cube = connection.load_collection("S2")
band = cube.band('B04')
result = (((band + 42) - 10) * 3) / 2
assert result.flat_graph() == load_json_resource('data/%s/bm_add_sub_mul_div_scalar.json' % api_version)
def test_negative(connection, api_version):
cube = connection.load_collection("S2")
band = cube.band('B04')
result = -band
assert result.flat_graph() == load_json_resource('data/%s/bm_negative.json' % api_version)
def test_add_bands(connection, api_version):
cube = connection.load_collection("S2")
b4 = cube.band("B04")
b3 = cube.band("B03")
result = b4 + b3
assert result.flat_graph() == load_json_resource('data/%s/bm_add_bands.json' % api_version)
def test_add_bands_different_collection(connection, api_version):
if api_version == "0.4.0":
pytest.skip("0.4.0 generates invalid result")
b4 = connection.load_collection("S2").band("B04")
b3 = connection.load_collection("SENTINEL2_RADIOMETRY_10M").band("B02")
with pytest.raises(BandMathException):
# TODO #123 implement band math with bands of different collections
b4 + b3
def test_logical_not_equal(connection, api_version):
s2 = connection.load_collection("SENTINEL2_SCF")
scf_band = s2.band("SCENECLASSIFICATION")
mask = scf_band != 4
actual = get_download_graph(mask)
assert actual == load_json_resource('data/%s/notequal.json' % api_version)
def test_logical_or(connection, api_version):
s2 = connection.load_collection("SENTINEL2_SCF")
scf_band = s2.band("SCENECLASSIFICATION")
mask = (scf_band == 2) | (scf_band == 5)
actual = get_download_graph(mask)
assert actual == load_json_resource('data/%s/logical_or.json' % api_version)
def test_logical_and(connection, api_version):
s2 = connection.load_collection("SENTINEL2_SCF")
b1 = s2.band("SCENECLASSIFICATION")
b2 = s2.band("MSK")
mask = (b1 == 2) & (b2 == 5)
actual = get_download_graph(mask)
assert actual == load_json_resource('data/%s/logical_and.json' % api_version)
def test_merge_cubes_or(connection, api_version):
s2 = connection.load_collection("S2")
b1 = s2.band("B02") > 1
b2 = s2.band("B03") > 2
b1 = b1.linear_scale_range(0, 1, 0, 2)
b2 = b2.linear_scale_range(0, 1, 0, 2)
combined = b1 | b2
actual = get_download_graph(combined)
assert actual == load_json_resource('data/%s/merge_cubes_or.json' % api_version)
def test_merge_cubes_multiple(connection, api_version):
if api_version == "0.4.0":
pytest.skip("doesn't work in 0.4.0")
s2 = connection.load_collection("S2")
b1 = s2.band("B02")
b1 = b1.linear_scale_range(0, 1, 0, 2)
combined = b1 + b1 + b1
actual = get_download_graph(combined)
assert sorted(n["process_id"] for n in actual.values()) == [
"apply", "load_collection",
"merge_cubes", "merge_cubes", "reduce_dimension", "save_result"]
assert actual == load_json_resource('data/%s/merge_cubes_multiple.json' % api_version)
def test_merge_cubes_no_resolver(connection, api_version):
s2 = connection.load_collection("S2")
mask = connection.load_collection("MASK")
merged = s2.merge(mask)
assert merged.flat_graph() == load_json_resource('data/%s/merge_cubes_no_resolver.json' % api_version)
def test_merge_cubes_max_resolver(connection, api_version):
s2 = connection.load_collection("S2")
mask = connection.load_collection("MASK")
merged = s2.merge(mask, overlap_resolver="max")
assert merged.flat_graph() == load_json_resource('data/%s/merge_cubes_max.json' % api_version)
def test_fuzzy_mask(connection, api_version):
s2 = connection.load_collection("SENTINEL2_SCF")
scf_band = s2.band("SCENECLASSIFICATION")
clouds = scf_band == 4
fuzzy = clouds.apply_kernel(kernel=0.1 * np.ones((3, 3)))
mask = fuzzy > 0.3
assert mask.flat_graph() == load_json_resource('data/%s/fuzzy_mask.json' % api_version)
def test_fuzzy_mask_band_math(connection, api_version):
s2 = connection.load_collection("SENTINEL2_SCF")
scf_band = s2.band("SCENECLASSIFICATION")
clouds = scf_band == 4
fuzzy = clouds.apply_kernel(kernel=0.1 * np.ones((3, 3)))
mask = fuzzy.add_dimension("bands", "mask", "bands").band("mask") > 0.3
assert mask.flat_graph() == load_json_resource('data/%s/fuzzy_mask_add_dim.json' % api_version)
def test_normalized_difference(connection, api_version):
cube = connection.load_collection("S2")
nir = cube.band("B08")
red = cube.band("B04")
result = nir.normalized_difference(red)
assert result.flat_graph() == load_json_resource('data/%s/bm_nd_bands.json' % api_version)
def test_ln(con100):
result = con100.load_collection("S2").band('B04').ln()
assert result.flat_graph() == load_json_resource('data/1.0.0/bm_ln.json' )
def test_log10(con100):
result = con100.load_collection("S2").band('B04').log10()
assert result.flat_graph() == load_json_resource('data/1.0.0/bm_log.json')
def test_log2(con100):
result = con100.load_collection("S2").band('B04').log2()
assert result.flat_graph() == load_json_resource(
'data/1.0.0/bm_log.json',
preprocess=lambda s: s.replace('"base": 10', '"base": 2')
)
def test_log3(con100):
result = con100.load_collection("S2").band('B04').logarithm(base=3)
assert result.flat_graph() == load_json_resource(
'data/1.0.0/bm_log.json',
preprocess=lambda s: s.replace('"base": 10', '"base": 3')
)
```
#### File: tests/udf/test_udf_data.py
```python
import numpy
import pytest
import xarray
from geopandas import GeoDataFrame
from shapely.geometry import Point
from openeo.udf import StructuredData, UdfData, XarrayDataCube, FeatureCollection
def test_structured_data_list():
sd1 = StructuredData([1, 2, 3, 5, 8])
sd2 = StructuredData({"a": [3, 5], "b": "red"})
udf_data = UdfData(structured_data_list=[sd1, sd2])
assert udf_data.to_dict() == {
"datacubes": None,
"feature_collection_list": None,
"structured_data_list": [
{"data": [1, 2, 3, 5, 8], "description": "list", "type": "list"},
{"data": {"a": [3, 5], "b": "red"}, "description": "dict", "type": "dict"}
],
"proj": None,
"user_context": {}
}
assert repr(udf_data) \
== '<UdfData datacube_list:None feature_collection_list:None structured_data_list:[<StructuredData with list>, <StructuredData with dict>]>'
def test_datacube_list():
xa = xarray.DataArray(numpy.zeros((2, 3)), coords={"x": [1, 2], "y": [3, 4, 5]}, dims=("x", "y"), name="testdata")
cube = XarrayDataCube(xa)
udf_data = UdfData(datacube_list=[cube], user_context={"kernel": 3})
assert udf_data.to_dict() == {
"datacubes": [
{
"id": "testdata",
"data": [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
"dimensions": [
{"name": "x", "coordinates": [1, 2]},
{"name": "y", "coordinates": [3, 4, 5]}
],
}],
"feature_collection_list": None,
"structured_data_list": None,
"proj": None,
"user_context": {"kernel": 3}
}
assert repr(udf_data) \
== '<UdfData datacube_list:[<XarrayDataCube shape:(2, 3)>] feature_collection_list:None structured_data_list:None>'
@pytest.mark.skipif(GeoDataFrame is None, reason="Requires geopandas")
def test_feature_collection_list():
data = GeoDataFrame({"a": [1, 4], "b": [2, 16]}, geometry=[Point(1, 2), Point(3, 5)])
fc = FeatureCollection(id="test", data=data)
udf_data = UdfData(feature_collection_list=[fc])
assert udf_data.to_dict() == {
'datacubes': None,
'feature_collection_list': [{
'data': {
'type': 'FeatureCollection',
'features': [
{'id': '0', 'type': 'Feature', 'geometry': {'coordinates': (1.0, 2.0), 'type': 'Point'},
'properties': {'a': 1, 'b': 2}, 'bbox': (1.0, 2.0, 1.0, 2.0), },
{'id': '1', 'type': 'Feature', 'geometry': {'coordinates': (3.0, 5.0), 'type': 'Point'},
'properties': {'a': 4, 'b': 16}, 'bbox': (3.0, 5.0, 3.0, 5.0), }
],
'bbox': (1.0, 2.0, 3.0, 5.0),
},
'id': 'test'}
],
'structured_data_list': None,
'proj': None,
'user_context': {}
}
assert repr(udf_data) \
== '<UdfData datacube_list:None feature_collection_list:[<FeatureCollection with GeoDataFrame>] structured_data_list:None>'
def test_udf_data_from_dict_empty():
udf_data = UdfData.from_dict({})
assert udf_data.to_dict() == {
'datacubes': None,
"feature_collection_list": None,
'structured_data_list': None,
'proj': None, 'user_context': {},
}
assert repr(udf_data) \
== "<UdfData datacube_list:[] feature_collection_list:[] structured_data_list:[]>"
def test_udf_data_from_dict_structured_data():
udf_data = UdfData.from_dict({"structured_data_list": [{"data": [1, 2, 3]}]})
assert udf_data.to_dict() == {
'datacubes': None,
"feature_collection_list": None,
'structured_data_list': [{"data": [1, 2, 3], "type": "list", "description": "list"}],
'proj': None, 'user_context': {},
}
assert repr(udf_data) \
== "<UdfData datacube_list:[] feature_collection_list:[] structured_data_list:[<StructuredData with list>]>"
def test_udf_data_from_dict_datacube():
udf_data = UdfData.from_dict({
"datacubes": [
{"data": [1, 2, 3], "dimensions": [{"name": "x"}]}
]
})
assert udf_data.to_dict() == {
'datacubes': [{"data": [1, 2, 3], "dimensions": [{"name": "x"}]}],
"feature_collection_list": None,
'structured_data_list': None,
'proj': None, 'user_context': {},
}
assert repr(udf_data) \
== "<UdfData datacube_list:[<XarrayDataCube shape:(3,)>] feature_collection_list:[] structured_data_list:[]>"
```
|
{
"source": "JDrit/AnomalyDetection",
"score": 3
}
|
#### File: JDrit/AnomalyDetection/parser.py
```python
import datetime
import time
import sys
def readFile(input_file, output_file):
output = open(output_file, 'w')
f = open(input_file) # the xml file to read from
start = False
pageTitle = '' # article's title
links = []
count = 0
line = f.readline()
while not line == '':
if '<text' in line or start:
start = True
while '[[' in line:
link = line[line.find('[[') + 2:line.find(']]')]
if '|' in link: # removes the second part of the link
link = link[:link.find('|')]
if '#' in link: # this removes the href second part of a link
link = link[:link.find('#')]
if not ':' in link: # if it has a ':', it is a file or something
if not link == '':
link = link[0].upper() + link[1:] # uppercases the first letter
links.append(link)
line = line[line.find(']]') + 2:]
if '<title>' in line:
pageTitle = line[11:-9]
if '</text>' in line:
count += 1
output.write(pageTitle + "\t" + "\t".join(links) + "\n")
start = False
links = []
if count % 1000000 == 0:
print(format(count, ",d") + " done.")
line = f.readline()
f.close()
output.close()
print('element count: ' + format(count, ",d"))
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Need to specify the input and output files\n python %s [input] [output]" % sys.argv[0])
sys.exit(1)
start = time.clock()
readFile(sys.argv[1], sys.argv[2])
end = time.clock()
print('time diff: ' + str(datetime.timedelta(seconds=(end - start))))
```
|
{
"source": "jdro10/ESTG-ML-Spam-Detection",
"score": 2
}
|
#### File: ESTG-ML-Spam-Detection/rest_api/app.py
```python
from flask import Flask
from flask import request
from rest_api.controllers.controller import *
app = Flask(__name__)
@app.route('/')
def index():
return index_test()
@app.route('/evaluateSpam', methods=['POST'])
def spam_or_not():
data = request.data
string_decoded = data.decode('UTF-8')
return spam_or_not_spam(string_decoded)
if __name__ == "__main__":
app.run(debug=True)
```
|
{
"source": "jdro10/ESTG-PF-AcessoAutomaticoParqueAutomovel",
"score": 3
}
|
#### File: plate_detection/detection_algorithm/menu.py
```python
import os
from threading import Thread
from park_access import ParkAccess
from alpr_exception import AlprException
from plate_detection import PlateDetection
class Menu:
def __init__(self):
self.entry_park_lights = None
self.exit_park_lights = None
self.entry_plate_detection = None
self.exit_plate_detection = None
def menu(self):
try:
print("DETEÇÃO DE MATRÍCULAS")
print("1 - Deteção de matrículas na entrada")
print("2 - Deteção de matrículas na saída")
print("0 - Sair")
print("Introduza a sua escolha (1/2/0): ", end='')
choice = input()
if choice == '1':
try:
self.entry_park_lights = ParkAccess("entry_queue_p")
self.entry_plate_detection = PlateDetection("entry_queue", '192.168.1.65', 9000, "Entrada")
Thread(target=self.entry_plate_detection.read_stream).start()
Thread(target=self.entry_park_lights.loop).start()
print("\nDeteção de matrículas iniciada na entrada.\n")
except AlprException:
print("\nERRO: Verifique se o OpenALPR está corretamente instalado.\n")
print("Programa terminado.\n\n")
os._exit(0)
except ConnectionRefusedError:
print("\nERRO: Verifique a ligação de rede com o computador responsável pela captura de imagens.\n")
print("Programa terminado.\n\n")
os._exit(0)
except IOError as e:
if e.errno == 101:
print("\nERRO: Verifique a sua conexão à rede.\n")
print("Programa terminado.\n\n")
os._exit(0)
elif choice == '2':
try:
self.exit_park_lights = ParkAccess("exit_queue_p")
self.exit_plate_detection = PlateDetection("exit_queue", '192.168.1.65', 9001, "Saida")
Thread(target=self.exit_plate_detection.read_stream).start()
Thread(target=self.exit_park_lights.loop).start()
print("\nDeteção de matrículas iniciada na saída.\n")
except AlprException:
print("\nERRO: Verifique se o OpenALPR está corretamente instalado.\n")
print("Programa terminado.\n\n")
os._exit(0)
except ConnectionRefusedError:
print("\nERRO: Verifique a ligação de rede com o computador responsável pela captura de imagens.\n")
print("Programa terminado.\n\n")
os._exit(0)
except IOError as e:
if e.errno == 101:
print("\nERRO: Verifique a sua conexão à rede.\n")
print("Programa terminado.\n\n")
os._exit(0)
elif choice == '0':
print("Programa terminado.\n\n")
os._exit(0)
else:
print("Escolha inválida.")
print("Programa terminado.\n\n")
os._exit(0)
end_program = None
print("Para terminar o programa, introduza um input.")
end_program = input()
if end_program != None:
print("Programa terminado.\n\n")
os._exit(0)
except KeyboardInterrupt:
print("Programa terminado.\n\n")
os._exit(0)
if __name__ == "__main__":
menu = Menu()
menu.menu()
```
|
{
"source": "jdrober9/wemake-python-styleguide",
"score": 2
}
|
#### File: test_ast/test_compares/test_heterogenous_compare.py
```python
import pytest
from wemake_python_styleguide.violations.best_practices import (
HeterogenousCompareViolation,
)
from wemake_python_styleguide.visitors.ast.compares import CompareSanityVisitor
@pytest.mark.parametrize('code', [
'x > y < z',
'x >= y < z',
'x > y <= z',
'x >= y <= z',
'x < y > z',
'x <= y > z',
'x < y >= z',
'x <= y >= z',
'x > y != 0',
'x < y == 0',
'x >= y != 0',
'x <= y == 0',
'x == y != z',
'long == x == y >= z',
'call() != attr.prop in array',
'item not in array == value',
])
def test_heterogenous_compare(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that compares with diffrent operators raise."""
tree = parse_ast_tree(code)
visitor = CompareSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [HeterogenousCompareViolation])
@pytest.mark.parametrize('code', [
'x == y == z',
'z != y != x',
'call() == other.prop',
'x in y',
'x not in y',
])
def test_correct_compare_operators(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that compares work well."""
tree = parse_ast_tree(code)
visitor = CompareSanityVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
```
|
{
"source": "jdrodriguezh/flask-template",
"score": 3
}
|
#### File: app/cars/routes.py
```python
from flask import Blueprint, request
from app import db
from app.models import CarsModel
cars_api = Blueprint('cars_api', __name__)
@cars_api.route('/cars', methods=['POST', 'GET'])
def handle_cars():
if request.method == 'POST':
if request.is_json:
data = request.get_json()
new_car = CarsModel(
name=data['name'], model=data['model'], doors=data['doors'])
db.session.add(new_car)
db.session.commit()
return {"message": f"car {new_car.name} has been created successfully."}
else:
return {"error": "The request payload is not in JSON format"}
elif request.method == 'GET':
cars = CarsModel.query.all()
results = [
{
"name": car.name,
"model": car.model,
"doors": car.doors
} for car in cars]
return {"count": len(results), "cars": results}
@cars_api.route('/cars/<car_id>', methods=['GET', 'PUT', 'DELETE'])
def handle_car(car_id):
car = CarsModel.query.get_or_404(car_id)
if request.method == 'GET':
response = {
"name": car.name,
"model": car.model,
"doors": car.doors
}
return {"message": "success", "car": response}
elif request.method == 'PUT':
data = request.get_json()
car.name = data['name']
car.model = data['model']
car.doors = data['doors']
db.session.add(car)
db.session.commit()
return {"message": f"car {car.name} successfully updated"}
elif request.method == 'DELETE':
db.session.delete(car)
db.session.commit()
return {"message": f"Car {car.name} successfully deleted."}
```
#### File: app/employees/routes.py
```python
from flask import Blueprint
employees_api = Blueprint('employees_api', __name__)
@employees_api.route('/employees', methods=['GET'])
def hello():
return "This is the employees"
```
#### File: flask-template/app/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import config
db = SQLAlchemy()
# Create Application factory
def create_app(config_env=""):
app = Flask(__name__)
if not config_env:
config_env = app.env
# This could be changed dynamically with an env variable
# "config.{}Config".format(config_env.capitalize())
app.config.from_object(config.DevelopmentConfig)
db.init_app(app)
from app.cars.routes import cars_api
from app.employees.routes import employees_api
from app.main.routes import main_api
app.register_blueprint(cars_api, url_prefix='/cars')
app.register_blueprint(employees_api, url_prefix='/employees')
app.register_blueprint(main_api)
CORS(app)
return app
```
#### File: app/main/routes.py
```python
from flask import Blueprint
main_api = Blueprint('main_api', __name__)
@main_api.route('/')
def hello():
return {"hello": "world"}
```
#### File: flask-template/tests/test_main.py
```python
import unittest
from flask import Flask
from app.main.routes import main_api
from app import create_app
test_app = create_app()
test_app.register_blueprint(main_api, name="test_main_api")
class TestMainBlueprint(unittest.TestCase):
def setUp(self):
# happens before each test
self.app = test_app.test_client()
def tearDown(self):
# happens after each test
return
def test_home_route(self):
resp = self.app.get("/")
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JDRomano2/comptox_ai",
"score": 3
}
|
#### File: comptox_ai/db/graph_db.py
```python
from logging import warn
import os
import warnings
from pathlib import Path
from neo4j.api import Version
from yaml import load, Loader
from dataclasses import dataclass
from typing import List, Dict
from textwrap import dedent
import ipdb
from neo4j import GraphDatabase
from neo4j.exceptions import ClientError, AuthError, CypherSyntaxError, ServiceUnavailable
import comptox_ai.db
def _get_default_config_file():
root_dir = Path(__file__).resolve().parents[2]
if os.path.exists(os.path.join(root_dir, 'CONFIG.yaml')):
default_config_file = os.path.join(root_dir, 'CONFIG.yaml')
else:
default_config_file = os.path.join(root_dir, 'CONFIG-default.yaml')
return default_config_file
@dataclass
class Metagraph:
"""
A metagraph containing the node types and relationship types (and their
connectivity) in the overall graph database.
Parameters
----------
node_labels : list of str
A list of node labels in the graph database.
node_label_counts : dict of int
A mapping from all node labels in `node_labels` to the corresponding number
of nodes of that type in the graph database.
relationship_types : list of str
A list of relationship types in the graph database.
relationship_path_schema : dict of list
A mapping from each relationship type present in the graph database to a
list describing valid (subject, object) pairs (in other words, all existing
'from' node types and 'to' node types linked by the given relationship).
"""
node_labels: List[str]
node_label_counts: Dict[str, int]
relationship_types: List[str]
relationship_path_schema: Dict[str, Dict[str, int]]
class Graph(object):
"""
A Neo4j graph, as defined by the Neo4j Graph Data Science Library. In
general, users shouldn't instantiate this class directly - let a GraphDB
instance handle that for you instead.
See https://neo4j.com/docs/graph-data-science/current/management-ops/.
Parameters
----------
parent_db : comptox_ai.db.GraphDb
A Neo4j graph database object, as defined in ComptoxAI.
name : str
A name that can be used to identify the graph.
"""
def __init__(self, parent_db, name):
self._db = parent_db
self.name = name
def _validate_config_file_contents(conf):
"""Validate the contents of a ComptoxAI config file.
If anything doesn't pass the validations, a warning is displayed and this
function returns False. Otherwise, no warning is given and the function
returns True.
"""
if 'neo4j' not in conf:
warnings.warn("Warning: No `neo4j` block found in config file")
return False
for val in ['username', 'password', 'hostname', 'port']:
if (val not in conf['neo4j']):
warnings.warn("Warning: No value given for `{0}`".format(val))
return False
return True
class GraphDB(object):
"""
A Neo4j graph database containing ComptoxAI graph data.
Parameters
----------
config_file : str, default None
Relative path to a config file containing a "NEO4J" block, as described
below. If None, ComptoxAI will look in the ComptoxAI root directory for
either a "CONFIG.cfg" file or "CONFIG-default.cfg", in that order. If no
config file can be found in any of those locations, an exception will be
raised.
verbose: bool, default True
Sets verbosity to on or off. If True, status information will be returned
to the user occasionally.
"""
def __init__(self, config_file=None, verbose=False, username=None, password=<PASSWORD>, hostname=None):
self.is_connected = False
self.verbose = verbose
if not config_file:
if hostname:
self.config_file = None
self.username = username
self.password = password
self.hostname = hostname
else:
self.config_file = _get_default_config_file()
else:
self.config_file = config_file
self._connect()
self.exporter = comptox_ai.db.GraphExporter(self)
def __repr__(self):
return(
dedent(f"""\
------------------------
ComptoxAI graph database
------------------------
Hostname: {self.hostname}
Username: {self.username}
Statistics
----------
Node count: {self.graph_stats['nodeCount']}
Edge count: {self.graph_stats['relCount']}
Node type count: {self.graph_stats['labelCount']}
Edge type count: {self.graph_stats['relTypeCount']}""")
)
def _connect(self):
if self.config_file is not None:
try:
with open(self.config_file, 'r') as fp:
cnf = load(fp, Loader=Loader)
except FileNotFoundError as e:
raise RuntimeError("Config file not found at the specified location - using default configuration")
if not _validate_config_file_contents(cnf):
raise RuntimeError("Config file has an invalid format. Please see `CONFIG-default.yaml`.")
username = cnf['neo4j']['username']
password = cnf['<PASSWORD>']['password']
hostname = cnf['neo4j']['password']
else:
username = self.username
password = <PASSWORD>
hostname = self.hostname
if hostname == 'localhost':
uri = "bolt://localhost:7687"
else:
uri = f"neo4j://{hostname}"
# Create the graph database driver
try:
if (username is None) and (password is None):
self._driver = GraphDatabase.driver(uri)
else:
self._driver = GraphDatabase.driver(uri, auth=(username, password))
except AuthError as e:
raise RuntimeError("Could not find a database using the configuration provided.")
# Test the connection to make sure we are connected to a database
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "The configuration may change in the future.")
conn_result = self._driver.verify_connectivity()
except ServiceUnavailable:
raise RuntimeError("Neo4j driver created but we couldn't connect to any routing servers. You might be using an invalid hostname.")
except ValueError:
raise RuntimeError("Neo4j driver created but the host address couldn't be resolved. Check your hostname, port, and/or protocol.")
if (conn_result is None):
raise RuntimeError("Neo4j driver created but a valid connection hasn't been established. You might be using an invalid hostname.")
self.graph_stats = self.get_graph_statistics()
def _disconnect(self):
self._driver.close()
@staticmethod
def _run_transaction(tx, query):
result = tx.run(query)
return result.data()
def run_cypher(self, qry_str, verbose=True):
"""
Execute a Cypher query on the Neo4j graph database.
The
Parameters
----------
qry_str : str
A string containing the Cypher query to run on the graph database server.
Returns
-------
list
The data returned in response to the Cypher query.
Examples
--------
>>> from comptox_ai.db import GraphDB
>>> g = GraphDB()
>>> g.run_cypher("MATCH (c:Chemical) RETURN COUNT(c) AS num_chems;")
[{'num_chems': 719599}]
"""
with self._driver.session() as session:
if self.verbose or verbose:
print(f"Writing Cypher transaction: \n {qry_str}")
try:
res = session.write_transaction(self._run_transaction, qry_str)
except CypherSyntaxError as e:
warnings.warn("Neo4j returned a Cypher syntax error. Please check your query and try again.")
print(f"\nThe original error returned by Neo4j is:\n\n {e}")
return None
return res
def get_graph_statistics(self):
"""Fetch statistics for the connected graph database.
This method essentially calls APOC.meta.stats(); and formats the output.
Returns
-------
dict
Dict of statistics describing the graph database.
Raises
------
RuntimeError
If not currently connected to a graph database or the APOC.meta
procedures are not installed/available.
"""
qry = "CALL apoc.meta.stats();"
response = self.run_cypher(qry)
assert len(response) == 1
response = response[0]
stats = {k:response[k] for k in ('nodeCount', 'relCount', 'labelCount', 'relTypeCount') if k in response}
return stats
def fetch(self, field, operator, value, what='both', register_graph=True,
negate=False, query_type='cypher', **kwargs):
"""
Create and execute a query to retrieve nodes, edges, or both.
Warnings
--------
This function is incomplete and should not be used until we can fix its
behavior. Specifically, Neo4j's GDS library does not support non-numeric
node or edge properties in any of its graph catalog-related subroutines.
Parameters
----------
field : str
A property label.
what : {'both', 'nodes', edges'}
The type of objects to fetch from the graph database. Note that this
functions independently from any subgraph registered in Neo4j during
query execution - if `register_graph` is `True`, an induced subgraph
will be registered in the database, but the components returned by this
method call may be only the nodes or edges contained in that subgraph.
filter : str
'Cypher-like' filter statement, equivalent to a `WHERE` clause used in
a Neo4j Cypher query (analogous to SQL `WHERE` clauses).
query_type : {'cypher', 'native'}
Whether to create a graph using a Cypher projection or a native
projection. The 'standard' approach is to use a Cypher projection, but
native projections can be (a.) more highly performant and (b.) easier
for creating very large subgraphs (e.g., all nodes of several or more
types that exist in all of ComptoxAI). See "Notes", below, for more
information, as well as https://neo4j.com/docs/graph-data-science/current/management-ops/graph-catalog-ops/#catalog-graph-create.
"""
raise NotImplementedError("Error: GraphDB.fetch() not yet implemented - see documentation notes.")
if query_type == 'cypher':
new_graph = self.build_graph_cypher_projection()
elif query_type == 'native':
new_graph = self.build_graph_native_projection()
else:
raise ValueError("'query_type' must be either 'cypher' or 'native'")
# consume results
# (optionally) build Graph object
# Return results to user
def find_node(self, name=None, properties=None):
"""
Find a single node either by name or by property filter(s).
"""
if name:
# search by name
query = "MATCH (n {{ commonName: \"{0}\" }}) RETURN n LIMIT 1;".format(name)
else:
if not properties:
raise ValueError("Error: Must provide a value for `name` or `properties`.")
# search by properties
# first, separate out properties with special meaning (e.g., `id`)
# then, construct a MATCH clause suing the remaining properties
# strings should be enclosed in
prop_string = ", ".join([f"{k}: '{v}'" if type(v) == str else f"{k}: {v}" for k, v in properties.items()])
match_clause = f"MATCH (n {{ {prop_string} }})"
# assemble the complete query
query = f"{match_clause} RETURN n;"
node_response = self.run_cypher(query)
if len(node_response) < 1:
warnings.warn("Warning: No node found matching the query you provided.")
return False
elif len(node_response) > 1:
warnings.warn("Warning: Multiple nodes found for query - only returning one (see `find_nodes` if you want all results).")
return node_response[0]['n']
def find_nodes(self, properties={}, node_types=[]):
"""
Find multiple nodes by node properties and/or labels.
Parameters
----------
properties : dict
Dict of property values to match in the database query. Each key of
`properties` should be a (case-sensitive) node property, and each value
should be the value of that property (case- and type-sensitive).
node_types : list of str
Case sensitive list of strings representing node labels (node types) to
include in the results. Two or more node types in a single query may
significantly increase runtime. When multiple node labels are given, the
results will be the union of all property queries when applied
Returns
-------
generator of dict
A generator containing dict representations of nodes matching the given
query.
Notes
-----
The value returned in the event of a successful query can be extremely
large. To improve performance, the results are returned as a generator
rather than a list.
"""
if (not properties) and (len(node_types) == 0):
raise ValueError("Error: Query must contain at least one node property or node type.")
if not properties:
warnings.warn("Warning: No property filters given - the query result may be very large!")
prop_string = ", ".join([f"{k}: '{v}'" if type(v) == str else f"{k}: {v}" for k, v in properties.items()])
# Use a WHERE clause when multiple node types are given
if len(node_types) == 1:
# Only 1 node label - include it in the MATCH clause
match_clause = f"MATCH (n:{node_types[0]} {{ {prop_string} }})"
where_clause = ""
elif len(node_types) > 1:
# Multiple node labels - include them in the WHERE clause
match_clause = f"MATCH (n {{ {prop_string} }})"
where_clause = " WHERE n:"+" OR n:".join(node_types)
else:
# No node labels - just use bare MATCH clause and no WHERE clause
match_clause = f"MATCH (n {{ {prop_string} }})"
where_clause = ""
query = match_clause + where_clause + " RETURN n;"
print(query)
nodes_response = self.run_cypher(query)
return (n['n'] for n in nodes_response)
def find_relationships(self):
"""
Find relationships by subject/object nodes and/or relationship type.
"""
pass
def build_graph_native_projection(self, graph_name, node_types,
relationship_types="all", config_dict=None):
"""
Create a new graph in the Neo4j Graph Catalog via a native projection.
Parameters
----------
graph_name : str
A (string) name for identifying the new graph. If a graph already exists
with this name, a ValueError will be raised.
node_proj : str, list of str, or dict of
Node projection for the new graph. This can be either a single node
label, a list of node labels, or a node projection
Notes
-----
ComptoxAI is meant to hide the implementation and usage details of graph
databases from the user, but some advanced features do expose the syntax
used in the Neo4j and MongoDB internals. This is especially true when
building graph projections in the graph catalog. The following components
NODE PROJECTIONS:
*(corresponding argument: `node_proj`)*
Node projections take the following format::
{
<node-label-1>: {
label: <neo4j-label>,
properties: <node-property-mappings>
},
<node-label-2>: {
label: <neo4j-label>,
properties: <node-property-mappings>
},
// ...
<node-label-n>: {
label: <neo4j-label>,
properties: <node-property-mappings>
}
}
where ``node-label-i`` is a name for a node label in the projected graph
(it can be the same as or different from the label already in neo4j),
``neo4j-label`` is a node label to match against in the graph database, and
``node-property-mappings`` are filters against Neo4j node properties, as
defined below.
NODE PROPERTY MAPPINGS:
RELATIONSHIP PROJECTIONS:
Examples
--------
>>> g = GraphDB()
>>> g.build_graph_native_projection(
graph_name = "g1",
node_proj = ['Gene', 'StructuralEntity'],
relationship_proj = "*"
)
>>>
"""
create_graph_query_template = """
CALL gds.graph.create({0},{1},{2}{3})
YIELD graphName, nodeCount, relationshipCount, createMillis;
"""[1:-1]
graph_name_str = "'{0}'".format(graph_name)
node_proj_str = self._make_node_projection_str(node_types)
# relationship_proj_str = "'{0}'".format(relationship_proj)
relationship_proj_str = self._make_node_projection_str(relationship_types)
#config_dict_str = str(config_dict)
if config_dict is None:
config_dict_str = ""
else:
config_dict_str = ", {0}".format(str(config_dict))
create_graph_query = create_graph_query_template.format(
graph_name_str,
node_proj_str,
relationship_proj_str,
config_dict_str
)
if self.verbose:
print(create_graph_query)
res = self.run_cypher(create_graph_query)
return res
def build_graph_cypher_projection(self, graph_name, node_query,
relationship_query, config_dict=None):
"""
Create a new graph in the Neo4j Graph Catalog via a Cypher projection.
Examples
--------
>>> g = GraphDB()
>>> g.build_graph_cypher_projection(...)
>>>
"""
create_graph_query_template = """
CALL gds.graph.create.cypher({0},{1},{2}{3})
YIELD graphName, nodeCount, relationshipCount, createMillis;
"""[1:-1]
graph_name_str = "'{0}'".format(graph_name)
node_query_str = "'{0}'".format(node_query)
relationship_query_str = "'{0}'".format(relationship_query)
if config_dict is None:
config_dict_str = ""
else:
config_dict_str = ", configuration: {0}".format(str(config_dict))
create_graph_query = create_graph_query_template.format(
graph_name_str,
node_query_str,
relationship_query_str,
config_dict_str
)
if self.verbose:
print(create_graph_query)
res = self.run_cypher(create_graph_query)
return res
def _make_node_projection_str(self, node_proj_arg):
if isinstance(node_proj_arg, str):
# We need to wrap any string in double quotes
if node_proj_arg == 'all':
return '"*"'
# e.g., 'Chemical'
return f'"{node_proj_arg}"'
elif isinstance(node_proj_arg, list):
# e.g., ['Chemical', 'Disease']
return '{0}'.format(str(node_proj_arg))
elif isinstance(node_proj_arg, dict):
return
def _make_rel_projection_str(self, rel_proj_arg):
pass
def fetch_nodes(self, node_type, property, values):
"""
Fetch nodes by node property value.
Allows users to filter by a single node type (i.e., ontology class).
Parameters
----------
node_type : str
Node type on which to filter all results. Can speed up queries
significantly.
property : str
Node property to match against.
values : str or list
Value or list of values on which to match `property`.
Returns
-------
list of dict
Each element in the list corresponds to a single node. If no matches are
found in the database, an empty list will be returned.
"""
def fetch_chemical_list(self, list_name):
"""
Fetch all chemicals that are members of a chemical list.
Parameters
----------
list_name : str
Name (or acronym) corresponding to a Chemical List in ComptoxAI's graph
database.
Returns
-------
list_data : dict
Metadata corresponding to the matched list
chemicals : list of dict
Chemical nodes that are members of the chemical list
"""
res = self.run_cypher(f"MATCH (l:ChemicalList {{ listAcronym: \"{list_name}\" }})-[:LISTINCLUDESCHEMICAL]->(c:Chemical) RETURN l, c")
return (res[0]['l'], [r['c'] for r in res])
def fetch_node_type(self, node_label):
"""
Fetch an entire class of nodes from the Neo4j graph database.
Parameters
----------
node_label : str
Node label corresponding to a class of entities in the database.
Returns
-------
generator of dict
Warnings
--------
Since many entities may be members of a single class, users are cautioned
that this method may take a very long time to run and/or be very demanding
on computing resources.
"""
res = self.run_cypher(f"MATCH (n:{node_label}) RETURN n;")
return (r['n'] for r in res)
def fetch_relationships(self, relationship_type, from_label, to_label):
"""
Fetch edges (relationships) from the Neo4j graph database.
"""
res = self.run_cypher(f"MATCH (s:{from_label})-[r:{relationship_type}]->(o:{to_label}) RETURN s, r, o;")
return ((r['r'][0]['uri'], r['r'][1], r['r'][2]['uri']) for r in res)
def get_metagraph(self):
"""
Examine the graph and construct a metagraph, which describes all of the
node types and relationship types in the overall graph database.
Notes
-----
We currently don't run this upon GraphDB instantiation, but it may be
prudent to start doing that at some point in the future. It's not an
extremely quick operation, but it's also not prohibitively slow.
"""
meta = self.run_cypher("CALL apoc.meta.graph();")[0]
node_labels = []
for n in meta['nodes']:
node_labels.append(n['name'])
node_labels = [n['name'] for n in meta['nodes']]
node_label_counts = dict([(n['name'], n['count']) for n in meta['nodes']])
rel_types = []
rel_path_schema = dict()
for r in meta['relationships']:
if r[1] not in rel_types:
rel_types.append(r[1])
rel_path_schema[r[1]] = []
rel_path_schema[r[1]].append({
'from': r[0]['name'],
'to': r[2]['name']
})
metagraph = Metagraph(
node_labels=node_labels,
node_label_counts=node_label_counts,
relationship_types=rel_types,
relationship_path_schema=rel_path_schema
)
return metagraph
def list_existing_graphs(self):
"""
Fetch a list of projected subgraphs stored in the GDS graph catalog.
Returns
-------
list
A list of graphs in the GDS graph catalog. If no graphs exist, this will
be the empty list ``[]``.
"""
graphs = self.run_cypher("CALL gds.graph.list();")
if self.verbose:
if len(graphs) == 0:
print("Graph catalog is currently empty.")
else:
print("Number of graphs currently in GDS graph catalog: {0}".format(len(graphs)))
return graphs
def drop_existing_graph(self, graph_name):
"""
Delete a single graph from the GDS graph catalog by graph name.
Parameters
----------
graph_name : str
A name of a graph, corresponding to the `'graphName'` field in the
graph's entry within the GDS graph catalog.
Returns
-------
dict
A dict object describing the graph that was dropped as a result of
calling this method. The dict follows the same format as one of the list
elements returned by calling `list_current_graphs()`.
"""
try:
res = self.run_cypher(
"CALL gds.graph.drop(\"{0}\")".format(graph_name)
)
return res[0]
except ClientError:
if self.verbose:
print("Error: Graph {0} does not exist.".format(graph_name))
return None
def drop_all_existing_graphs(self):
"""
Delete all graphs currently stored in the GDS graph catalog.
Returns
-------
list
A list of dicts describing the graphs that were dropped as a result of
calling this method. The dicts follow the same format as one of the list
elements returned by calling `list_current_graphs()`.
"""
current_graphs = self.list_existing_graphs()
deleted_graphs = list()
if current_graphs is None:
if self.verbose:
print("Warning - the graph catalog is already empty.")
else:
for cg in current_graphs:
deleted_graph = self.drop_existing_graph(cg['graphName'])
deleted_graphs.append(deleted_graph)
return deleted_graphs
def export_graph(self, graph_name, to='db'):
"""
Export a graph stored in the GDS graph catalog to a set of CSV files.
Parameters
----------
graph_name : str
A name of a graph, corresponding to the `'graphName'` field in the
graph's entry within the GDS graph catalog.
"""
if to == 'csv':
res = self.run_cypher(f"CALL gds.beta.graph.export('{graph_name}', {{exportName: '{graph_name}'}})")
elif to == 'db':
res = self.run_cypher(f"CALL gds.graph.export('{graph_name}', {{dbName: '{graph_name}'}});")
return res
def stream_named_graph(self, graph_name):
"""
Stream a named GDS graph into Python for further processing.
Parameters
----------
graph_name : str
A name of a graph in the GDS catalog.
"""
self.exporter.stream_subgraph
# TODO: Recycle this code to send graphs to DGL instead of Pytorch Geometric
# def to_pytorch(self, graph_name, node_list):
# """
# Construct dataset from exported graph to be used by PyTorch Geometric.
# Parameters
# ----------
# graph_name : str
# A name of a graph, corresponding to the `'graphName'` field in the
# graph's entry within the GDS graph catalog.
# """
# dir_abspath = os.path.join(os.getcwd(), 'comptox_ai/db/exports', f"{graph_name}")
# ## create dataframe containing all nodes
# node_df = pd.DataFrame()
# for node in node_list:
# node_files = glob.glob(f"{dir_abspath}/nodes_{node}_[0-9].csv")
# curr_df = pd.concat([pd.read_csv(fp, names=['id'], index_col=False) for fp in node_files])
# curr_df.insert(loc=1, column='type', value=f"{node}")
# node_df = pd.concat([node_df, curr_df])
# ## map node IDs to indices
# node_indices = dict(zip(node_df['id'].to_list(), range(len(node_df['id']))))
# node_df['index'] = node_df['id'].map(node_indices)
# ## convert node type to one hot encoded values
# node_df['type_encoded'] = LabelEncoder().fit_transform(node_df['type'])
# ohe = OneHotEncoder(sparse=False).fit_transform(node_df['type_encoded'].values.reshape(len(node_df), 1))
# x = torch.LongTensor(ohe)
# ## create dataframe containing all edges
# edge_files = glob.glob(f"{dir_abspath}/relationships_*_[0-9].csv")
# edge_df = pd.concat([pd.read_csv(fp, names=['start_id', 'end_id'], index_col=False) for fp in edge_files])
# ## map edges to indices
# edge_df['start_index'] = edge_df['start_id'].map(node_indices)
# edge_df['end_index'] = edge_df['end_id'].map(node_indices)
# edge_index = torch.tensor([edge_df['start_index'].to_numpy(), edge_df['end_index'].to_numpy()], dtype=torch.long)
# ## create torch_geometric data object
# data = Data(x=x, edge_index=edge_index)
# data.train_mask = data.val_mask = data.test_mask = data.y = None
# return data
```
|
{
"source": "jdrouet/sailfish",
"score": 3
}
|
#### File: sailfish/scripts/build-docs.py
```python
import os
import shutil
import subprocess
def build_docs(input_dir: str, output_dir: str):
subprocess.call('python3 -m pip install --upgrade pip', shell=True, cwd=input_dir)
subprocess.call('python3 -m pip install mkdocs', shell=True, cwd=input_dir)
subprocess.call('python3 -m mkdocs build', shell=True, cwd=input_dir)
site_dir = os.path.join(input_dir, 'site')
shutil.copytree(site_dir, output_dir)
def main() -> None:
if os.path.exists('site'):
if os.path.isfile('site') or os.path.islink('site'):
os.unlink('site')
else:
shutil.rmtree('site')
os.mkdir('site')
# get the path of the current directory
docs_path = os.path.join(os.getcwd(), "docs/en")
print(docs_path)
build_docs(docs_path, output_dir='site/en')
if __name__ == '__main__':
main()
```
|
{
"source": "jdrowne/monorepo",
"score": 2
}
|
#### File: default/tests/section_1_1_2.py
```python
import pytest
# 1.1.2 Ensure separate partition exists for /tmp
def test_cis_benchmark_1_1_2(host):
assert host.mount_point('/tmp').exists
```
|
{
"source": "jdrprod/deep_checker",
"score": 3
}
|
#### File: deep_checker/statistics/mlp_old.py
```python
from utils import vectorize_move
import numpy as np
import joblib
import sys
def usage():
print(
f"{sys.argv[0]} init_p1 init_p2 state1_p1 state1_p2 [state2_p1 state2_p2 ...]"
)
if __name__ == "__main__":
mlp = joblib.load("mlp.save", "r")
if len(sys.argv) < 5 or (len(sys.argv) - 3) % 2 == 1:
usage()
exit(1)
init_p1 = int(sys.argv[1], 16)
init_p2 = int(sys.argv[2], 16)
scores = []
imax = 0
for i in range(3, len(sys.argv) - 1, 2):
state_p1 = int(sys.argv[i], 16)
state_p2 = int(sys.argv[i + 1], 16)
move = vectorize_move(((init_p1, init_p2), (state_p1, state_p2)))
move_vec = np.array([move])
score = mlp.predict(move_vec)
scores.append(score)
if score > scores[imax]:
imax = (i - 3) // 2
print(imax)
```
|
{
"source": "jdrprod/Logik",
"score": 4
}
|
#### File: jdrprod/Logik/evaluator.py
```python
from parser import *
from itertools import product
def get_vars(ast):
"""List all free variables in an expression"""
var_list = []
def r_get_var(ast):
typ = ast[0]
if typ == 'value':
return
elif typ == 'symb':
if ast[1] not in var_list:
var_list.append(ast[1])
elif typ == 'non':
r_get_var(ast[1])
else:
r_get_var(ast[1])
r_get_var(ast[2])
r_get_var(ast)
return var_list
def make_env(var_list):
"""Build a truth table for an expression"""
tab = list(product([0, 1], repeat=len(var_list)))
env_list = []
for lines in tab:
env = {}
for i, v in enumerate(var_list):
env[v] = lines[i]
env_list.append(env)
return (env_list, tab)
def evaluate(ast, env):
"""
Evaluate an expression according to the valuation
described by 'env'
"""
typ = ast[0]
if typ == 'symb':
return env[ast[1]]
elif typ == 'value':
return ast[1]
elif typ == 'non':
return 1 - evaluate(ast[1], env)
elif typ == 'ou':
return max(evaluate(ast[1], env), evaluate(ast[2], env))
elif typ == 'et':
return min(evaluate(ast[1], env), evaluate(ast[2], env))
elif typ == '->':
return max(1 - evaluate(ast[1], env), evaluate(ast[2], env))
def evaluate_all(ast):
"""
Evaluate an expression and if it contains free variables,
display the truth table.
"""
var_list = get_vars(ast)
envs, tab = make_env(var_list)
if len(var_list) > 0:
print("\nTruth table : \n")
print(*var_list)
print('--'*(len(var_list)))
for i, row in enumerate(envs):
print(*tab[i], end=' ')
print(evaluate(ast, row))
else:
print("\nValue : \n")
print(evaluate(ast, {}))
if __name__ == '__main__':
# input
string = input('>>> ')
# lexing
tokens = lex(string)
# parsing
seq = Seq(list(tokens))
ast = parse(seq)
# display the AST
print('\nSyntax tree :\n')
pprint(ast)
evaluate_all(ast)
```
|
{
"source": "jdrprod/Pym-s",
"score": 3
}
|
#### File: Pym-s/examples/bintree.py
```python
class binary_tree:
def __init__(self):
pass
class Node(binary_tree):
__field_0 : int
__field_1 : binary_tree
__field_2 : binary_tree
def __init__(self, arg_0 : int , arg_1 : binary_tree , arg_2 : binary_tree ):
self.__field_0 = arg_0
self.__field_1 = arg_1
self.__field_2 = arg_2
def __iter__(self):
yield self.__field_0
yield self.__field_1
yield self.__field_2
class Leaf(binary_tree):
__field_0 : int
def __init__(self, arg_0 : int ):
self.__field_0 = arg_0
def __iter__(self):
yield self.__field_0
```
|
{
"source": "jdrtommey/adiabatic_evolution",
"score": 3
}
|
#### File: hohi/floquet/floquet_basis.py
```python
def floquet_basis(index,q,space_length,q_bands):
"""
finds the index of a |state,q> in the expanded floquet basis.
finds the first index in a subblock then adds the index of the
state in the non-expanded basis.
Parameters
----------
index: int
the index in the matrix before expanding to floquet basis
q: int
the sideband order
space_length: int
the dimension of the state-space before expansion.
q_bands:
the total number of q bands in space
"""
if index > space_length:
raise IndexError("index larger than dimension of space. index: " + str(index) + "space length:" + str(space_length))
if abs(q) > abs(q_bands):
raise IndexError("sideband q larger than dimension of space. index: " + str(q) + "space length:" + str(q_bands))
return (q+q_bands)*space_length + index
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.