metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jetsie/sherlock",
"score": 3
}
|
#### File: sherlock/sherlock/notify.py
```python
from result import QueryStatus
class QueryNotify():
def __init__(self, result=None):
self.result = result
return
def start(self, message=None):
return
def update(self, result):
self.result = result
def finish(self, message=None):
return
def __str__(self):
result = str(self.result)
return result
class QueryNotifyPrint(QueryNotify):
def __init__(self):
super().__init__(None)
return
def start(self, message):
print(f"[*] Checking username {message} on:")
return
def update(self, result):
self.result = result
if self.result.query_time is None:
response_time_text = ""
else:
response_time_text = f" [{round(self.result.query_time * 1000)} ms]"
if result.status == QueryStatus.CLAIMED:
print(f"[+]{response_time_text} {self.result.site_name}: {self.result.site_url_user}")
return
def __str__(self):
result = str(self.result)
return result
```
#### File: sherlock/sherlock/result.py
```python
from enum import Enum
class QueryStatus(Enum):
CLAIMED = "Claimed" # Username Detected
AVAILABLE = "Available" # Username Not Detected
UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username
ILLEGAL = "Illegal" # Username Not Allowable For This Site
def __str__(self):
return self.value
class QueryResult():
def __init__(self, username, site_name, site_url_user, status, query_time=None, context=None):
self.username = username
self.site_name = site_name
self.site_url_user = site_url_user
self.status = status
self.query_time = query_time
self.context = context
return
def __str__(self):
status = str(self.status)
if self.context is not None:
status += f" ({self.context})"
return status
```
|
{
"source": "jetske-beks/celp",
"score": 4
}
|
#### File: jetske-beks/celp/recommender.py
```python
from data import UTILITY, SIMILARITY, UTILITY_CATEGORIES, SIMILARITY_CATEGORIES, CITIES
from operator import itemgetter
import data
import numpy
import pandas
import random
import numpy
def select_neighborhood(user, business):
""" Selects all items with similarity > 0. """
# error handling
if user not in UTILITY or business not in SIMILARITY:
return pandas.Series()
# get all businesses that the target user has rated
rated = UTILITY[user].dropna().index
# get all similarity scores of those businesses with the target
scores = SIMILARITY[business].loc[rated]
# drop those lower than 0
return scores[scores > 0]
def weighted_mean(neighborhood, user):
# error handling
if neighborhood.empty:
return 0
# get the user ratings for the neighborhood
ratings = UTILITY[user].loc[neighborhood.index]
# calculate the predicted rating
return (ratings * neighborhood).sum() / neighborhood.sum()
def mse(predicted):
# get the difference
difference = predicted['stars'] - predicted['prediction']
# return the mean square error
return numpy.square(difference).sum() / len(predicted)
def baseline():
data = pandas.DataFrame(index=UTILITY.index, columns=UTILITY.columns)
avg = UTILITY.apply(numpy.mean)
def baseline_prediction(data):
# get all unique ids
business_ids = data['business_id'].unique()
user_ids = data['user_id'].unique()
# add a 'predicted rating' column
data['prediction'] = pandas.Series(numpy.nan, index=data.index)
print(" * Starting predict test for %i businesses..." % len(business_ids))
# predict a rating for every business
count = 0
for business in business_ids:
count += 1
print(" %i" % count)
for user in user_ids:
# calculate neighborhood & get prediction
prediction = data.loc[data['business_id'] == business, 'stars'].mean()
# add to the data
data.loc[(data['business_id'] == business) & (data['user_id'] == user), 'prediction'] = prediction
return data
def predict(data):
""" Predict the ratings for all items in the data. """
# get all unique ids
business_ids = data['business_id'].unique()
user_ids = data['user_id'].unique()
# add a 'predicted rating' column
data['prediction'] = pandas.Series(numpy.nan, index=data.index)
print(" * Starting predict test for %i businesses..." % len(business_ids))
# predict a rating for every business
count = 0
for business in business_ids:
count += 1
print(" %i" % count)
for user in user_ids:
# calculate neighborhood & get prediction
prediction = weighted_mean(select_neighborhood(user, business), user)
# add to the data
data.loc[(data['business_id'] == business) & (data['user_id'] == user), 'prediction'] = prediction
return data
def content_prediction(user_id, business_ids, utility, similarity):
"""
Make prediction for all businesses based on content similarity
"""
ratings = utility[user_id].dropna()
predictions = pandas.Series()
for business in business_ids:
if not business in similarity:
continue
sim = similarity[business]
predictions.at[business] = (ratings * sim.loc[ratings.index]).mean()
return predictions
def recommend_collab(businesses, user, business_id=None):
prediction_list = []
for business in businesses:
if business['business_id'] == business_id:
continue
# save info about the business
prediction = {
'id': business['business_id'],
'count': business['review_count'],
'avg': business['stars'],
'city': business['city'].lower()
}
# get prediction
prediction['rating'] = weighted_mean(select_neighborhood(user['user_id'], prediction['id']), user['user_id'])
prediction_list.append(prediction)
sorted_list = sorted(prediction_list, key=itemgetter('city', 'rating', 'count'))
return sorted_list
def recommend_content(businesses, user, business_id=None, utility=None):
business_ids = [b['business_id'] for b in businesses]
predictions = content_prediction(user['user_id'], business_ids, UTILITY, SIMILARITY_CATEGORIES)
predictions.sort_values(inplace=True);
return predictions
def recommend(user=None, business_id=None, city=None, n=10, method='collab'):
"""
Returns n recommendations as a list of dicts.
Optionally takes in a user, business_id and/or city.
A recommendation is a dictionary in the form of:
{
business_id:str
stars:str
name:str
city:str
adress:str
}
"""
global UTILITY, SIMILARITY, UTILITY_CATEGORIES, SIMILARITY_CATEGORIES, CITIES
# read in matrices
if not UTILITY:
UTILITY = pandas.read_pickle('utility.pkl')
if not SIMILARITY:
data.SIMILARITY = pandas.read_pickle('similarity.pkl')
if not UTILITY_CATEGORIES:
UTILITY_CATEGORIES = pandas.read_pickle('utility_content.pkl')
if not SIMILARITY_CATEGORIES:
SIMILARITY_CATEGORIES = pandas.read_pickle('similarity_content.pkl')
# fill in user, city
if not user:
user = data.get_city_users('eastlake')[0]
if not city:
city = 'eastlake'
state = data.get_state(city)
cities = data.load_cities(state)
# get state businesses for recommendations
businesses = data.get_businesses(cities)
# make predictions for all businesses in the cities
if method is 'collab':
predictions = recommend_collab(businesses, user, business_id)
elif method is 'content':
predictions = recommend_content(businesses, user)
elif method is 'hybrid':
predictions = recommend_hybrid(businesses, user)
if predictions.empty:
raise Exception("Predictions are empty!")
recommend_list = []
for p in predictions[:n].index:
business = [b for b in businesses if b['business_id'] == p][0]
recommend_list.append({
'business_id': p,
'stars': business['stars'],
'name': business['name'],
'city': business['city'],
'address': business['address']
})
return (recommend_list * n)[:n]
if __name__ == '__main__':
recommend()
```
|
{
"source": "jetskipenguin/Scweet",
"score": 3
}
|
#### File: Scweet/Scweet/utils.py
```python
from io import StringIO, BytesIO
import os
import re
from time import sleep
import random
import chromedriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import datetime
import pandas as pd
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from . import const
import urllib
from .const import get_username, get_password
def get_data(card, save_images = False, save_dir = None):
"""Extract data from tweet card"""
image_links = []
try:
username = card.find_element_by_xpath('.//span').text
except:
return
try:
handle = card.find_element_by_xpath('.//span[contains(text(), "@")]').text
except:
return
try:
postdate = card.find_element_by_xpath('.//time').get_attribute('datetime')
except:
return
try:
text = card.find_element_by_xpath('.//div[2]/div[2]/div[1]').text
except:
text = ""
try:
embedded = card.find_element_by_xpath('.//div[2]/div[2]/div[2]').text
except:
embedded = ""
try:
reply_cnt = card.find_element_by_xpath('.//div[@data-testid="reply"]').text
except:
reply_cnt = 0
try:
retweet_cnt = card.find_element_by_xpath('.//div[@data-testid="retweet"]').text
except:
retweet_cnt = 0
try:
like_cnt = card.find_element_by_xpath('.//div[@data-testid="like"]').text
except:
like_cnt = 0
try:
elements = card.find_elements_by_xpath('.//div[2]/div[2]//img[contains(@src, "https://pbs.twimg.com/")]')
for element in elements:
# modifies url to get larger image
tmp = element.get_attribute('src')
index = tmp.find('name')
result = ''
for i in range(0, index):
result += tmp[i]
result += 'name=large'
# make large if possible
if(result == 'name=large'):
image_links.append(tmp)
else:
image_links.append(result)
print("Image URL", image_links)
except:
image_links = []
print("Couldn't resolve URL, skipping..")
try:
promoted = card.find_element_by_xpath('.//div[2]/div[2]/[last()]//span').text == "Promoted"
except:
promoted = False
if promoted:
return
# get a string of all emojis contained in the tweet
try:
emoji_tags = card.find_elements_by_xpath('.//img[contains(@src, "emoji")]')
except:
return
emoji_list = []
for tag in emoji_tags:
try:
filename = tag.get_attribute('src')
emoji = chr(int(re.search(r'svg\/([a-z0-9]+)\.svg', filename).group(1), base=16))
except AttributeError:
continue
if emoji:
emoji_list.append(emoji)
emojis = ' '.join(emoji_list)
# tweet url
try:
element = card.find_element_by_xpath('.//a[contains(@href, "/status/")]')
tweet_url = element.get_attribute('href')
except:
return
tweet = (username, handle, postdate, text, embedded, emojis, reply_cnt, retweet_cnt, like_cnt, image_links, tweet_url)
return tweet
def init_driver(headless=True, proxy=None, show_images=False, option=None):
""" initiate a chromedriver instance
--option : other option to add (str)
"""
# create instance of web driver
chromedriver_path = chromedriver_autoinstaller.install()
# options
options = Options()
if headless is True:
print("Scraping on headless mode.")
options.add_argument('--disable-gpu')
options.headless = True
else:
options.headless = False
options.add_argument('log-level=3')
if proxy is not None:
options.add_argument('--proxy-server=%s' % proxy)
print("using proxy : ", proxy)
if show_images == False:
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
if option is not None:
options.add_argument(option)
driver = webdriver.Chrome(options=options, executable_path=chromedriver_path)
driver.set_page_load_timeout(100)
return driver
def log_search_page(driver, since, until_local, lang, display_type, words, to_account, from_account, mention_account, hashtag, filter_replies, proximity,
geocode, minreplies,minlikes,minretweets,userprofile):
""" Search for this query between since and until_local"""
# format the <from_account>, <to_account> and <hash_tags>
from_account = "(from%3A" + from_account + ")%20" if from_account is not None else ""
to_account = "(to%3A" + to_account + ")%20" if to_account is not None else ""
mention_account = "(%40" + mention_account + ")%20" if mention_account is not None else ""
hash_tags = "(%23" + hashtag + ")%20" if hashtag is not None else ""
if words is not None:
if len(words)==1:
words = "(" + str(''.join(words)) + ")%20"
else :
words = "(" + str('%20OR%20'.join(words)) + ")%20"
else:
words = ""
if lang is not None:
lang = 'lang%3A' + lang
else:
lang = ""
until_local = "until%3A" + until_local + "%20"
since = "since%3A" + since + "%20"
if display_type == "Latest" or display_type == "latest":
display_type = "&f=live"
elif display_type == "Image" or display_type == "image":
display_type = "&f=image"
else:
display_type = ""
# filter replies
if filter_replies == True:
filter_replies = "%20-filter%3Areplies"
else :
filter_replies = ""
# geo
if geocode is not None:
geocode = "%20geocode%3A"+geocode
else:
geocode=""
# min number of replies
if minreplies is not None:
minreplies = "%20min_replies%3A"+str(minreplies)
else:
minreplies = ""
# min number of likes
if minlikes is not None:
minlikes = "%20min_faves%3A"+str(minlikes)
else:
minlikes = ""
# min number of retweets
if minretweets is not None:
minretweets = "%20min_retweets%3A"+str(minretweets)
else:
minretweets = ""
# proximity
if proximity == True:
proximity = "&lf=on" # at the end
else :
proximity = ""
path = 'https://twitter.com/search?q='+words+from_account+to_account+mention_account+hash_tags+until_local+since+lang+filter_replies+geocode+minreplies+minlikes+minretweets+'&src=typed_query'+display_type+proximity
if(userprofile):
path = userprofile
driver.get(path)
return path
def get_last_date_from_csv(path):
df = pd.read_csv(path)
return datetime.datetime.strftime(max(pd.to_datetime(df["Timestamp"])), '%Y-%m-%dT%H:%M:%S.000Z')
def log_in(driver, env, timeout=10):
username = get_username(env) #const.USERNAME
password = <PASSWORD>_password(env) #const.PASSWORD
driver.get('https://twitter.com/i/flow/login')
username_css = 'label input'
password_css = 'label input[type="password"]'
username_el = WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.CSS_SELECTOR, username_css)))
sleep(1)
username_el.send_keys(username)
sleep(1)
username_el.send_keys(Keys.RETURN)
sleep(1)
password_el = WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.CSS_SELECTOR, password_css)))
sleep(1)
password_el.send_keys(password)
password_el.send_keys(Keys.RETURN)
def keep_scrolling(driver, data, writer, tweet_ids, scrolling, tweet_parsed, limit, scroll, last_position, save_images = False):
""" scrolling function for tweets crawling"""
save_images_dir = "/images"
if save_images == True:
if not os.path.exists(save_images_dir):
os.mkdir(save_images_dir)
while scrolling and tweet_parsed < limit:
sleep(random.uniform(0.5, 1.5))
# get the card of tweets
page_cards = driver.find_elements_by_xpath('//article[@data-testid="tweet"]') # changed div by article
for card in page_cards:
tweet = get_data(card, save_images, save_images_dir)
if tweet:
# check if the tweet is unique
tweet_id = ''.join(str(tweet[:-2]))
if tweet_id not in tweet_ids:
tweet_ids.add(tweet_id)
data.append(tweet)
last_date = str(tweet[2])
print("Tweet made at: " + str(last_date) + " is found.")
writer.writerow(tweet)
tweet_parsed += 1
if tweet_parsed >= limit:
break
scroll_attempt = 0
while tweet_parsed < limit:
# check scroll position
scroll += 1
print("scroll ", scroll)
sleep(random.uniform(0.5, 1.5))
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
curr_position = driver.execute_script("return window.pageYOffset;")
if last_position == curr_position:
scroll_attempt += 1
# end of scroll region
if scroll_attempt >= 2:
scrolling = False
break
else:
sleep(random.uniform(0.5, 1.5)) # attempt another scroll
else:
last_position = curr_position
break
return driver, data, writer, tweet_ids, scrolling, tweet_parsed, scroll, last_position
def download_images(urls, save_dir):
count = 0
for i, url_v in enumerate(urls):
sleep(2)
for j, url in enumerate(url_v):
count += 1
print("Saving image {}".format(count))
urllib.request.urlretrieve(url, save_dir + '/' + str(i+1) + '_' + str(j+1) + ".jpg")
print("Saved {} images".format(count))
```
|
{
"source": "jetsmith/Estimator_Test",
"score": 2
}
|
#### File: test_estimator/nets/vgg16.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def slim_repeat(input_x, repeat_num, layer_type, layer_width, kernel_shape, scope, \
clip_flag=True, min_value=-128.0, max_value=128.0):
net = input_x
for i in range(repeat_num):
net = slim.conv2d(net, layer_width, kernel_shape, scope='{0}/{0}_{1}'.format(scope, i+1))
if clip_flag:
net = tf.clip_by_value(net, min_value, max_value)
return net
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg16_clip(inputs,
num_classes=None,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim_repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim_repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim_repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim_repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim_repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# # Use conv2d instead of fully_connected layers.
# net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
# net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
#net = slim.conv2d(net, num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='fc1')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc1/squeezed')
end_points[sc.name + '/fc1'] = net
net = slim.batch_norm(net, is_training=is_training)
end_points['BN']=net
return net, end_points
vgg16_clip.default_image_size = 224
def vgg16_face0(inputs,
num_classes=None,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# # Use conv2d instead of fully_connected layers.
# net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
# net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
net = slim.conv2d(net, 512, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc1')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc1/squeezed')
end_points[sc.name + '/fc1'] = net
#net = slim.batch_norm(net, is_training=is_training)
#end_points['BN']=net
return net, end_points
vgg16_face0.default_image_size = 224
def vgg16_BN(inputs,
num_classes=None,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], is_training=is_training, center=True, scale=True, decay=0.99):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
#net = slim.flatten(net, scope='flatten')
# Use conv2d instead of fully_connected layers.
#net = slim.conv2d(net, 4096, [4, 4], padding=fc_conv_padding, scope='fc6')
#net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
#net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc1')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc1/squeezed')
end_points[sc.name + '/fc1'] = net
print('net shape: ', net.get_shape())
return net, end_points
vgg16_BN.default_image_size = 224
def vgg16_MT(inputs,
num_classes=None,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], is_training=is_training, center=True, scale=True, decay=0.99):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
#net = slim.flatten(net, scope='flatten')
#Use conv2d instead of fully_connected layers.
#net = slim.conv2d(net, 4096, [4, 4], padding=fc_conv_padding, scope='fc6')
#net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
#net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
logits = {}
num_classes = num_classes.split(',')
#for i, attribute in enumerate(['jc', 'pc', 'pt', 'age', 'gender']):
for i, attribute in enumerate(['age', 'gender']):
if num_classes:
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
#tmp_net = slim.fully_connected(net,
# int(num_classes[i]), activation_fn=None,
# normalizer_fn=None, scope= attribute + '/fc1')
tmp_net = slim.conv2d(net, int(num_classes[i]), [1, 1],
activation_fn=None,
normalizer_fn=None,
scope= attribute + '/fc1')
if spatial_squeeze:
logits[attribute] = tf.squeeze(tmp_net, [1, 2], name='fc1/squeezed/' + attribute)
end_points[sc.name + '/fc1/' + attribute] = logits[attribute]
#logits[attribute] = tmp_net
return logits, end_points
vgg16_MT.default_image_size = 224
def vgg16_mt_fc(inputs,
num_classes=None,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.conv2d, slim.fully_connected], normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], is_training=is_training, center=True, scale=True, decay=0.99):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
#net = slim.flatten(net, scope='flatten')
#net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
#net = slim.max_pool2d(net, [2, 2], scope='pool4')
#net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
#net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='squeezed')
logits = {}
num_classes = num_classes.split(',')
for i, attribute in enumerate(['jc', 'pc', 'pt', 'age', 'gender']):
tmp_net = slim.fully_connected(net, 1024, scope=
attribute + '/fc1')
tmp_net = slim.dropout(tmp_net, dropout_keep_prob, is_training=is_training,
scope= attribute + '/dropout1')
tmp_net = slim.fully_connected(tmp_net, 1024, scope=
attribute + '/fc2')
if num_classes:
tmp_net = slim.dropout(tmp_net, dropout_keep_prob, is_training=is_training,
scope= attribute + '/dropout2')
tmp_net = slim.fully_connected(tmp_net,
int(num_classes[i]), activation_fn=None,
normalizer_fn=None, scope= attribute + '/fc3')
#if spatial_squeeze:
# logits[attribute] = tf.squeeze(tmp_net, [1, 2], name='fc1/squeezed' + '/' + attribute)
logits[attribute] = tmp_net
end_points[sc.name + '/fc3/' + attribute] = logits[attribute]
return logits, end_points
vgg16_mt_fc.default_image_size = 224
def vgg16_mt_conv(inputs,
num_classes=None,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16',
fc_conv_padding='VALID',
global_pool=True):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer are returned instead.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output.
Otherwise, the output prediction map will be (input / 32) - 6 in case of
'VALID' padding.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original VGG architecture.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the input to the logits layer (if num_classes is 0 or None).
end_points: a dict of tensors with intermediate activations.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], is_training=is_training, center=True, scale=True, decay=0.99):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
logits = {}
num_classes = num_classes.split(',')
for i, attribute in enumerate(['jc', 'pc', 'pt', 'age', 'gender']):
tmp_net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
scope= attribute + '/conv4')
tmp_net = slim.max_pool2d(tmp_net, [2, 2], scope= attribute
+ '/pool4')
tmp_net = slim.repeat(tmp_net, 3, slim.conv2d, 512, [3, 3], scope= attribute + '/conv5')
tmp_net = slim.max_pool2d(tmp_net, [2, 2], scope=attribute + '/pool5')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
tmp_net = tf.reduce_mean(tmp_net, [1, 2],
keep_dims=True, name=attribute + '/global_pool')
end_points[attribute + '/global_pool'] = tmp_net
if num_classes:
#net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
tmp_net = slim.conv2d(tmp_net, int(num_classes[i]), [1, 1],
activation_fn=None,
normalizer_fn=None,
scope=attribute + '/fc1')
if spatial_squeeze:
tmp_net = tf.squeeze(tmp_net, [1, 2], name=attribute +
'/fc1/squeezed')
end_points[sc.name + '/fc1'] = tmp_net
logits[attribute] = tmp_net
return logits, end_points
vgg16_mt_conv.default_image_size = 224
def test_net(inputs,
num_classes=None,
is_training=True,
spatial_squeeze=True,
scope='test_net',
global_pool=True):
with tf.variable_scope(scope, 'test_net', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.conv2d(inputs, 32, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net_branch = slim.conv2d(net, 64, [3, 3], scope='conv2')
net_branch = slim.conv2d(net_branch, 64, [3, 3], scope='conv3')
net = tf.concat([net, net_branch], 3, name='concat')
print('net shape: ', net.get_shape())
net = slim.conv2d(net, 64, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
net = slim.conv2d(net, num_classes, [3, 3],
activation_fn=None,
scope='logit')
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='squeezed')
end_points[sc.name + '/squeeze'] = net
return net, end_points
test_net.default_image_size = 64
```
|
{
"source": "jetsnake/spiral_matrix",
"score": 3
}
|
#### File: spiral_matrix/spiral_matrix/spiral_matrix.py
```python
import aiohttp
import re
from typing import List
async def get_matrix(url: str) -> List[int]:
async with aiohttp.ClientSession() as session:
try:
async with session.get(url) as response:
response.raise_for_status()
matrix = await response.text()
listTraversingMatrixByRows = list(map(int, filter(None, re.split('\\D+', matrix))))
n = int(len(listTraversingMatrixByRows) ** 0.5)
result = []
left = 0
top = 0
right = n - 1
bottom = n - 1
while left <= right and top <= bottom:
for i in range(top, bottom + 1):
result.append(listTraversingMatrixByRows[left + i * n])
left += 1
for i in range(left, right + 1):
result.append(listTraversingMatrixByRows[bottom * n + i])
bottom -= 1
for i in range(bottom, top - 1, -1):
result.append(listTraversingMatrixByRows[right + i * n])
right -= 1
for i in range(right, left - 1, -1):
result.append(listTraversingMatrixByRows[top * n + i])
top += 1
return result
except aiohttp.InvalidURL as err:
print("The specified URL is invalid!")
print(err)
except aiohttp.ClientResponseError as err:
print(f"Status code: {err.status}")
print(err.message)
except aiohttp.ClientConnectorError as err:
print(err)
except aiohttp.ClientError as err:
print(err)
except aiohttp.ServerTimeoutError as err:
print(err)
except aiohttp.ServerConnectionError as err:
print(err)
except Exception:
print("Unknown error!")
```
|
{
"source": "jetsonhacks/CSI-Camera",
"score": 3
}
|
#### File: jetsonhacks/CSI-Camera/dual_camera.py
```python
import cv2
import threading
import numpy as np
class CSI_Camera:
def __init__(self):
# Initialize instance variables
# OpenCV video capture element
self.video_capture = None
# The last captured image from the camera
self.frame = None
self.grabbed = False
# The thread where the video capture runs
self.read_thread = None
self.read_lock = threading.Lock()
self.running = False
def open(self, gstreamer_pipeline_string):
try:
self.video_capture = cv2.VideoCapture(
gstreamer_pipeline_string, cv2.CAP_GSTREAMER
)
# Grab the first frame to start the video capturing
self.grabbed, self.frame = self.video_capture.read()
except RuntimeError:
self.video_capture = None
print("Unable to open camera")
print("Pipeline: " + gstreamer_pipeline_string)
def start(self):
if self.running:
print('Video capturing is already running')
return None
# create a thread to read the camera image
if self.video_capture != None:
self.running = True
self.read_thread = threading.Thread(target=self.updateCamera)
self.read_thread.start()
return self
def stop(self):
self.running = False
# Kill the thread
self.read_thread.join()
self.read_thread = None
def updateCamera(self):
# This is the thread to read images from the camera
while self.running:
try:
grabbed, frame = self.video_capture.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
except RuntimeError:
print("Could not read image from camera")
# FIX ME - stop and cleanup thread
# Something bad happened
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def release(self):
if self.video_capture != None:
self.video_capture.release()
self.video_capture = None
# Now kill the thread
if self.read_thread != None:
self.read_thread.join()
"""
gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
Flip the image by setting the flip_method (most common values: 0 and 2)
display_width and display_height determine the size of each camera pane in the window on the screen
Default 1920x1080
"""
def gstreamer_pipeline(
sensor_id=0,
capture_width=1920,
capture_height=1080,
display_width=1920,
display_height=1080,
framerate=30,
flip_method=0,
):
return (
"nvarguscamerasrc sensor-id=%d ! "
"video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
sensor_id,
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
def run_cameras():
window_title = "Dual CSI Cameras"
left_camera = CSI_Camera()
left_camera.open(
gstreamer_pipeline(
sensor_id=0,
capture_width=1920,
capture_height=1080,
flip_method=0,
display_width=960,
display_height=540,
)
)
left_camera.start()
right_camera = CSI_Camera()
right_camera.open(
gstreamer_pipeline(
sensor_id=1,
capture_width=1920,
capture_height=1080,
flip_method=0,
display_width=960,
display_height=540,
)
)
right_camera.start()
if left_camera.video_capture.isOpened() and right_camera.video_capture.isOpened():
cv2.namedWindow(window_title, cv2.WINDOW_AUTOSIZE)
try:
while True:
_, left_image = left_camera.read()
_, right_image = right_camera.read()
# Use numpy to place images next to each other
camera_images = np.hstack((left_image, right_image))
# Check to see if the user closed the window
# Under GTK+ (Jetson Default), WND_PROP_VISIBLE does not work correctly. Under Qt it does
# GTK - Substitute WND_PROP_AUTOSIZE to detect if window has been closed by user
if cv2.getWindowProperty(window_title, cv2.WND_PROP_AUTOSIZE) >= 0:
cv2.imshow(window_title, camera_images)
else:
break
# This also acts as
keyCode = cv2.waitKey(30) & 0xFF
# Stop the program on the ESC key
if keyCode == 27:
break
finally:
left_camera.stop()
left_camera.release()
right_camera.stop()
right_camera.release()
cv2.destroyAllWindows()
else:
print("Error: Unable to open both cameras")
left_camera.stop()
left_camera.release()
right_camera.stop()
right_camera.release()
if __name__ == "__main__":
run_cameras()
```
|
{
"source": "jetsonhacks/gst-explorer",
"score": 2
}
|
#### File: jetsonhacks/gst-explorer/gst-inspector.py
```python
import base64
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import (QAction, QApplication,
QComboBox, QFrame, QHBoxLayout, QLabel, QLineEdit,
QListWidget, QMainWindow, QMenu,
QMenuBar, QSizePolicy, QSplitter, QTextEdit,
QVBoxLayout)
from gst_controller import Gst_Inspector_Controller
from gst_model import Gst_Inspector_Model
""" We use a Model, View, Controller pattern.
Gst_Inspector_Model contains the data. The data is gathered from running gst-inspect-1.0
Gst_Inspector_Controller is the controller which arbitrates between the model and the view """
class Gst_Inspector_View(QMainWindow):
LIST_WIDTH = 280
WINDOW_WIDTH = 960
WINDOW_HEIGHT = 540
def setup(self, controller):
""" Initialize the main parts of the UI. These are two frames placed in a horizontal splitter."""
# The Left frame is the list of plugins, elements and types.
left_frame = self.setup_left_frame(controller)
# The right frame is a text frame which displays the detailed info of the above.
right_frame = self.setup_right_frame(controller)
# Make up the horizontal splitter, then add the left and right frames
horizontal_splitter = QSplitter(Qt.Horizontal)
horizontal_splitter.addWidget(left_frame)
horizontal_splitter.addWidget(right_frame)
horizontal_splitter.setChildrenCollapsible(False)
# With point size 12, the list widget should be large enough to
# avoid a horizontal scroller.
horizontal_splitter.setSizes(
[Gst_Inspector_View.LIST_WIDTH, Gst_Inspector_View.WINDOW_WIDTH-Gst_Inspector_View.LIST_WIDTH])
# Add the splitter to the main window
self.setCentralWidget(horizontal_splitter)
# Set the font of the window a little larger; 12 point
qfont = self.font()
qfont.setPointSize(12)
self.setFont(qfont)
# Initial layout window size
self.setGeometry(100, 100, Gst_Inspector_View.WINDOW_WIDTH,
Gst_Inspector_View.WINDOW_HEIGHT)
self.setWindowTitle('GStreamer Inspector')
self.setup_menu_bar()
def setup_left_frame(self, controller: Gst_Inspector_Controller):
""" The Left frame contains the list of plugins, elements and types.
There is a list, a search box and a combo box which select the type of element """
left_frame = QFrame()
left_vbox = QVBoxLayout()
left_frame.setLayout(left_vbox)
# Create the text filter box
# Note: We should be able to add a Completer here also
self.search_widget = QLineEdit()
self.search_widget.setClearButtonEnabled(True)
self.search_widget.setPlaceholderText("Search")
self.search_widget.textChanged.connect(
controller.on_filter_text_changed)
left_vbox.addWidget(self.search_widget)
# Gst type of element filter
# Combo box that selects the type of Gst element to be examined
filter_box = QHBoxLayout()
filter_label = QLabel("Filter")
filter_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
filter_box.addWidget(filter_label)
self.filter_combo_box = QComboBox(self)
self.filter_combo_box.addItems(["All", "Plugins", "Elements", "Types"])
self.filter_combo_box.currentTextChanged.connect(
controller.on_filter_box_changed)
filter_box.addWidget(self.filter_combo_box)
left_vbox.addLayout(filter_box)
# The list of plugins, elements and types
self.list_widget = QListWidget()
self.list_widget.setSortingEnabled(True)
self.list_widget.setMinimumWidth(280)
self.list_widget.itemClicked.connect(controller.on_list_clicked)
left_vbox.addWidget(self.list_widget)
# A label that displays the number of items being displayed
self.info_label = QLabel("")
left_vbox.addWidget(self.info_label)
return left_frame
def setup_right_frame(self, controller):
""" The right frame is a text display which displays
detailed info about the plugin, element or type """
right_frame = QFrame()
right_vbox = QVBoxLayout()
right_frame.setLayout(right_vbox)
self.element_text_edit = QTextEdit()
# Use the default Terminal font
# This is a monospaced font, also called fixed-pitch or fixed-width
# Fixed width characters are used so that the columns 'line up' in the text
self.element_text_edit.setFontFamily('Monospace')
self.element_text_edit.setFontPointSize(11)
self.element_text_edit.setReadOnly(True)
right_vbox.addWidget(self.element_text_edit)
return right_frame
# For demonstration purposes, we introduce the Google "baseline_list_alt_black_24dp.png" directly
# into the Python script as a binary string. This is so we don't have to depend on the file being present.
def get_plugin_icon(self):
""" Return an icon with the Google 'baseline_list_alt_black_24dp.png' as its image """
encoded_list_png = b'<KEY>
decoded_list_png = base64.b64decode(encoded_list_png)
pix_map = QPixmap()
pix_map.loadFromData(decoded_list_png)
icon = QIcon(pix_map)
return icon
def setup_menu_bar(self):
""" Mostly here as a placeholder """
menuBar = QMenuBar(self)
self.setMenuBar(menuBar)
fileMenu = QMenu("&File", self)
menuBar.addMenu(fileMenu)
fileMenu.addSeparator()
self.exitAction = QAction("&Exit", self)
fileMenu.addAction(self.exitAction)
# This closes the window, then exits the program
self.exitAction.triggered.connect(self.close)
def main():
app = QApplication(sys.argv)
controller = Gst_Inspector_Controller(
Gst_Inspector_Model(), Gst_Inspector_View())
controller.setup()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
|
{
"source": "jetsonhacks/USB-Camera",
"score": 3
}
|
#### File: jetsonhacks/USB-Camera/usb-camera-gst.py
```python
import sys
import cv2
window_title = "USB Camera"
# ASSIGN CAMERA ADRESS to DEVICE HERE!
pipeline = " ! ".join(["v4l2src device=/dev/video0",
"video/x-raw, width=640, height=480, framerate=30/1",
"videoconvert",
"video/x-raw, format=(string)BGR",
"appsink"
])
# Sample pipeline for H.264 video, tested on Logitech C920
h264_pipeline = " ! ".join(["v4l2src device=/dev/video0",
"video/x-h264, width=1280, height=720, framerate=30/1, format=H264",
"avdec_h264",
"videoconvert",
"video/x-raw, format=(string)BGR",
"appsink sync=false"
])
def show_camera():
# Full list of Video Capture APIs (video backends): https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html
# For webcams, we use V4L2
video_capture = cv2.VideoCapture(pipeline, cv2.CAP_GSTREAMER)
if video_capture.isOpened():
try:
window_handle = cv2.namedWindow(
window_title, cv2.WINDOW_AUTOSIZE)
# Window
while True:
ret_val, frame = video_capture.read()
# Check to see if the user closed the window
# Under GTK+ (Jetson Default), WND_PROP_VISIBLE does not work correctly. Under Qt it does
# GTK - Substitute WND_PROP_AUTOSIZE to detect if window has been closed by user
if cv2.getWindowProperty(window_title, cv2.WND_PROP_AUTOSIZE) >= 0:
cv2.imshow(window_title, frame)
else:
break
keyCode = cv2.waitKey(10) & 0xFF
# Stop the program on the ESC key or 'q'
if keyCode == 27 or keyCode == ord('q'):
break
finally:
video_capture.release()
cv2.destroyAllWindows()
else:
print("Error: Unable to open camera")
if __name__ == "__main__":
show_camera()
```
#### File: jetsonhacks/USB-Camera/usb-camera-simple.py
```python
import sys
import cv2
window_title = "USB Camera"
def show_camera():
# ASSIGN CAMERA ADDRESS HERE
camera_id = "/dev/video0"
# Full list of Video Capture APIs (video backends): https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html
# For webcams, we use V4L2
video_capture = cv2.VideoCapture(camera_id, cv2.CAP_V4L2)
"""
# How to set video capture properties using V4L2:
# Full list of Video Capture Properties for OpenCV: https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html
#Select Pixel Format:
# video_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'YUYV'))
# Two common formats, MJPG and H264
# video_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
# Default libopencv on the Jetson is not linked against libx264, so H.264 is not available
# video_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'H264'))
# Select frame size, FPS:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
video_capture.set(cv2.CAP_PROP_FPS, 30)
"""
if video_capture.isOpened():
try:
window_handle = cv2.namedWindow(
window_title, cv2.WINDOW_AUTOSIZE )
# Window
while True:
ret_val, frame = video_capture.read()
# Check to see if the user closed the window
# Under GTK+ (Jetson Default), WND_PROP_VISIBLE does not work correctly. Under Qt it does
# GTK - Substitute WND_PROP_AUTOSIZE to detect if window has been closed by user
if cv2.getWindowProperty(window_title, cv2.WND_PROP_AUTOSIZE) >= 0:
cv2.imshow(window_title, frame)
else:
break
keyCode = cv2.waitKey(10) & 0xFF
# Stop the program on the ESC key or 'q'
if keyCode == 27 or keyCode == ord('q'):
break
finally:
video_capture.release()
cv2.destroyAllWindows()
else:
print("Unable to open camera")
if __name__ == "__main__":
show_camera()
```
|
{
"source": "JetStarBlues/Intel-8080-Emulator",
"score": 2
}
|
#### File: JetStarBlues/Intel-8080-Emulator/cpu_8080.py
```python
'''
From 8080 Docs...
Flags
0 - carry, set when addition result overflows or rotate/shift has shifted-out a '1'
2 - parity, set when modulo 2 sum of bits is zero
4 - auxiliary carry, set when half carry occurs
6 - zero, set when result of operation is 0
7 - sign, set when MSB of result is 1
OpCodes
- Destination / Source
. B -> 000
. C -> 001
. D -> 010
. E -> 011
. H -> 100
. L -> 101
. Memory -> 110
. A -> 111
- Move, load, store
. MOV R1,R2 -> 01DDDSSS -> 1 -> Move register to register
. MOV M,R -> 01110SSS -> 2 -> Move register to memory
. MOV R,M -> 01DDD110 -> 2 -> Move memory to register
. MVI R,data -> 00DDD110 -> 2 -> Move immediate to register
. MVI M,data -> 00110110 -> 3 -> Move immediate to memory
. LXI BC,data16 -> 00000001 -> 3 -> Load immediate to register pair B & C
. LXI DE,data16 -> 00010001 -> 3 -> Load immediate to register pair D & E
. LXI HL,data16 -> 00100001 -> 3 -> Load immediate to register pair H & L
. LXI SP,data16 -> 00110001 -> 3 -> Load immediate to stack pointer
. LDA addr -> 00111010 -> 4 -> Load A direct
. STA addr -> 00110010 -> 4 -> Store A direct
. LHLD addr -> 00101010 -> 5 -> Load H & L direct
. SHLD addr -> 00100010 -> 5 -> Store H & L direct
. LDAX BC -> 00001010 -> 2 -> Load A indirect
. LDAX DE -> 00011010 -> 2 -> Load A indirect
. STAX BC -> 00000010 -> 2 -> Store A indirect
. STAX DE -> 00010010 -> 2 -> Store A indirect
. XCHG -> 11101011 -> 1 -> Exchange register pair D & E with H & L
- Stack ops
. PUSH BC -> 11000101 -> 3 -> Push register pair B & C on stack
. PUSH DE -> 11010101 -> 3 -> Push register pair D & E on stack
. PUSH HL -> 11100101 -> 3 -> Push register pair H & L on stack
. PUSH PSW -> 11110101 -> 3 -> Push A and Flags on stack
. POP BC -> 11000001 -> 3 -> Pop top of stack onto register pair B & C
. POP DE -> 11010001 -> 3 -> Pop top of stack onto register pair D & E
. POP HL -> 11100001 -> 3 -> Pop top of stack onto register pair H & L
. POP PSW -> 11110001 -> 3 -> Pop top of stack onto A and Flags
. XTHL -> 11100011 -> 5 -> Exchange H & L with contents of location specified by stack pointer
. SPHL -> 11111001 -> 1 -> H & L to stack pointer
- Jump
. JMP addr -> 11000011 -> 3 -> Jump unconditional
. JNZ addr -> 11000010 -> 3 -> Jump on not zero
. JZ addr -> 11001010 -> 3 -> Jump on zero
. JNC addr -> 11010010 -> 3 -> Jump on no carry
. JC addr -> 11011010 -> 3 -> Jump on carry
. JPO addr -> 11100010 -> 3 -> Jump on parity odd
. JPE addr -> 11101010 -> 3 -> Jump on parity even
. JP addr -> 11110010 -> 3 -> Jump on positive
. JM addr -> 11111010 -> 3 -> Jump on minus
. PCHL -> 11101001 -> 1 -> H & L to program counter
- Call
. CALL addr -> 11001101 -> 5 -> Call unconditional
. CNZ addr -> 11000100 -> 3/5 -> Call on not zero
. CZ addr -> 11001100 -> 3/5 -> Call on zero
. CNC addr -> 11010100 -> 3/5 -> Call on no carry
. CC addr -> 11011100 -> 3/5 -> Call on carry
. CPO addr -> 11100100 -> 3/5 -> Call on parity odd
. CPE addr -> 11101100 -> 3/5 -> Call on parity even
. CP addr -> 11110100 -> 3/5 -> Call on positive
. CM addr -> 11111100 -> 3/5 -> Call on minus
- Return
. RET -> 11001001 -> 3 -> Return unconditional
. RNZ -> 11000000 -> 1/3 -> Return on not zero
. RZ -> 11001000 -> 1/3 -> Return on zero
. RNC -> 11010000 -> 1/3 -> Return on no carry
. RC -> 11011000 -> 1/3 -> Return on carry
. RPO -> 11100000 -> 1/3 -> Return on parity odd
. RPE -> 11101000 -> 1/3 -> Return on parity even
. RP -> 11110000 -> 1/3 -> Return on positive
. RM -> 11111000 -> 1/3 -> Return on minus
- Restart
. RST -> 11NNN111 -> 3 -> Restart
- Increment and decrement
. INR R -> 00DDD100 -> 1 -> Increment register
. INR M -> 00110100 -> 3 -> Increment memory
. INX BC -> 00000011 -> 1 -> Increment B & C registers
. INX DE -> 00010011 -> 1 -> Increment D & E registers
. INX HL -> 00100011 -> 1 -> Increment H & L registers
. INX SP -> 00110011 -> 1 -> Increment stack pointer
. DCR R -> 00DDD101 -> 1 -> Decrement register
. DCR M -> 00110101 -> 3 -> Decrement memory
. DCX BC -> 00001011 -> 1 -> Decrement B & C registers
. DCX DE -> 00011011 -> 1 -> Decrement D & E registers
. DCX HL -> 00101011 -> 1 -> Decrement H & L registers
. DCX SP -> 00111011 -> 1 -> Decrement stack pointer
- Add
. ADD R -> 10000SSS -> 1 -> Add register to A
. ADD M -> 10000110 -> 2 -> Add memory to A
. ADI data -> 11000110 -> 2 -> Add immediate to A
. ADC R -> 10001SSS -> 1 -> Add register to A with carry
. ADC M -> 10001110 -> 2 -> Add memory to A with carry
. ACI data -> 11001110 -> 2 -> Add immediate to A with carry
. DAD BC -> 00001001 -> 3 -> Add B & C to H & L
. DAD DE -> 00011001 -> 3 -> Add D & E to H & L
. DAD HL -> 00101001 -> 3 -> Add H & L to H & L
. DAD SP -> 00111001 -> 3 -> Add stack pointer to H & L
- Subtract
. SUB R -> 10010SSS -> 1 -> Subtract register from A
. SUB M -> 10010110 -> 2 -> Subtract memory from A
. SUI data -> 11010110 -> 2 -> Subtract immediate from A
. SBB R -> 10011SSS -> 1 -> Subtract register from A with borrow
. SBB M -> 10011110 -> 2 -> Subtract memory from A with borrow
. SBI data -> 11011110 -> 2 -> Subtract immediate from A with borrow
- Logical
. ANA R -> 10100SSS -> 1 -> And register with A
. ANA M -> 10100110 -> 2 -> And memory with A
. ANI data -> 11100110 -> 2 -> And immediate with A
. XRA R -> 10101SSS -> 1 -> Exclusive or register with A
. XRA M -> 10101110 -> 2 -> Exclusive or memory with A
. XRI data -> 11101110 -> 2 -> Exclusive or immediate with A
. ORA R -> 10110SSS -> 1 -> Or register with A
. ORA M -> 10110110 -> 2 -> Or memory with A
. ORI data -> 11110110 -> 2 -> Or immediate with A
. CMP R -> 10111SSS -> 1 -> Compare register with A
. CMP M -> 10111110 -> 2 -> Compare memory with A
. CPI data -> 11111110 -> 2 -> Compare immediate with A
- Rotate
. RLC -> 00000111 -> 1 -> Rotate A left
. RRC -> 00001111 -> 1 -> Rotate A right
. RAL -> 00010111 -> 1 -> Rotate A left through carry
. RAR -> 00011111 -> 1 -> Rotate A right through carry
- Specials
. CMA -> 00101111 -> 1 -> Complement A
. CMC -> 00111111 -> 1 -> Complement carry
. STC -> 00110111 -> 1 -> Set carry
. DAA -> 00100111 -> x -> Decimal adjust A
- Input / Output
. IN port -> 11011011 -> 3 -> Input. Read byte from specified port and load to A
. OUT port -> 11010011 -> 3 -> Output. Places contents of A onto data bus and the
selected port number onto the address bus
- Control
. EI -> 11111011 -> 1 -> Enable interrupts
. DI -> 11110011 -> 1 -> Disable interrupt
. HLT -> 01110110 -> 1 -> Halt
. NOP -> 00000000 -> 1 -> No operation
- 8085 instructions
. SIM -> .. -> Set interrupt mask
. RIM -> .. -> Read interrupt mask
. review AND/ANI operation, which sets the AC flag differently
- 8051 instructions
. interrupts
. timers
Instructions greater than 8 bits
There are times when execution of instruction requires more information than 8 bits can convey.
In such a case, two or three byte instructions are used. Successive instruction bytes are stored
sequentially in adjacent memory locations, and the processor performs two or three fetches in
succession to obtain the full instruction. The first byte retrieved is placed in the instruction
register and subsequent bytes in temporary storage. The processor then proceeds with execution.
Control circuitry
Using clock inputs, the cc maintains proper sequence of events required to process a task. After
an instruction is fetched and decoded, the cc issues appropriate signals (internal and external to CPU)
for initiating proper processing action.
Signals:
. Data bus in (output) - indicates to external circuits that databus is in input mode
. Ready (input) - indicates that valid data is available on the databus
. Wait (output) - indicates CPU is in a wait_state
. Write (output) - ?. Active low
. Hold (input) - requests CPU to enter hold_state
. Hold acknowledge (output) - indicates ...
. Interrupt enable (output) - indicates content of interruptEn flipflop
. Interrupts request (input) - ...
. Reset (input) - when toggles to high, PC address is cleared to 0.
The interruptEn and holdAck flipflops are also reset.
Interrupts
An interrupt request will cause the control circuitry to temporarily
interrupt main program execution, jump to a special routine to service the interrupting device,
then automatically return to the main program.
A wait request is often issued by a memory or IO device that operates slower than the CPU. The cc
will idle the CPU until the memory or IO port device frees the WAIT line.
In principle, an interrupt is similar to a subroutine call, except that the jump is initiated
externally rather than by the program
Direct Memory Access
In ordinary IO operations, the processor supervises data transfer. Info to bo be placed in memory
is transferred from input device to processor, then from processor to memory (and vis a vis output).
If a large quantity of data must be transferred, it's ideal to have the device interface with memory
directly. To achieve this, the processor must temporarily suspend its operation during such a transfer
to prevent conflict. This is achieved via a HOLD signal sent to CPU.
Instruction Cycle
Instruction fetch
- instruction retrieved from memory (address from PC) and store in instruction register
- once fetched, pc incremented
Instruction decode
-
Instruction execution
-
A machine cycle is required each time the CPU accesses memory or an IO port.
Thus duration of an instruction cycle consists of one machine cycle to fetch the instruction plus
n machine cycles for subsequent mem / IO accesses needed to accomplish instruction.
Stack pointer
Decremented when data pushed onto stack, incremented when popped (i.e. grows downward)
Registers
PC, SP, BC, DE, HL, WZ
Data bytes can be transferred from the internal bus (8bit) to a register via the
register-select multiplexer.
16 bit transfers can proceed between the register array and address buffer
Instruction register
During an instruction fetch, the first byte of an instruction is transfered to the
instruction register. This is in turn available to the instruction decoder.
Data bus buffer
Bidirectional
3-state
Used to isolate the CPU's internal bus from the external data bus
In output mode, internal bus content is loaded
In input mode, external bus content is loaded
Processor cycle
Instruction cycle
Time it takes to fetch and execute an instruction.
Every instruction cycle consists of one to five machine cycles.
Machine cycle
Time it takes for CPU o access memory or an IO port.
Fetching an instruction takes one machine cycle per byte
Some instructions do not require additional machine cycles, while
others do such as those reading/writing to memory/IO.
Each machine cycle consists of three to five states
Events with one machine cycle duration:
instruction fetch, memory read, memory write, stack read, stack write,
input, output, interrupt, halt
States
Smallest unit of processing activity.
Duration is interval between two successive rising edges (i.e. one clock period) of the
delta1 clock signal.
Exceptions to the duration are wait_state, hold_state, and halt_state. All three
depend on external events and thusly are of indeterminate length.
Machine cycle identification
The processor identifies the machine cycle in progress by transmitting a status byte
during the first state of each machine cycle.
D7 - Indicates that databus will be used for memory read
D6 - Indicates address bus contains address of an input device and input data should
be placed on the databus when DBIN (databusIn) is active
D5 - Indicates CPU is in fetch cycle for first byte of instruction
D4 - Indicates address bus contains address of an output device and the databus will
contain the output data when WR (write) is active
D3 - Indicates acknowledge of HALT instruction
D2 - Indicates address bus holds the pushdown? address from SP
D1 - When low, indicates operation in current machine cycle will be a memory write or
output function. When high, memory read or input.
D0 - Indicates acknowledge of interrupt request.
Machine cycle 76543210
------------- --------
Instruction fetch 10100010
Memory read 10000010
Memory write 00000000
Stack read 10000110
Stack write 00000100
Input read 01000010
Output write 00010000
Interrupt Ack 00100011
Halt Ack 10001010
Interrupt Ack while halt 00101011
State Transition Sequence
...
'''
from memory import *
from time import sleep
class CPU():
def __init__( self, memory ):
# Control signals (wip) ----------------------------------
self.halt = False # temp for now
# self.hold = False # input, facilitates DMA by peripherals
# IO
self.IO_RD = 0 # output
self.IO_WR = 0 # output
# Peripherals --------------------------------------
self.memory = memory
self.ioDevices = [ # index simulates port number
# terminal,
]
# Components ---------------------------------------
self.addressBus = None # 16bit. However only using it for IO so treat as 8bit
self.dataBus = None # 8bit, bidirectional
self.register_AF = Register() # accumulator & flags
self.register_BC = Register() # general purpose
self.register_DE = Register() # general purpose
self.register_HL = Register() # general purpose
self.register_SP = Register() # stack pointer
self.register_PC = Register() # program counter
# Helpers ------------------------------------------
self.nBits = 8
self.largestPositiveInt = 2 ** ( self.nBits - 1 ) - 1
self.negativeOne = 2 ** self.nBits - 1 # two's complement
self.flagALU_carry = 0
self.flagALU_parity = 0
self.flagALU_auxCarry = 0
self.flagALU_zero = 0
self.flagALU_sign = 0
self.instruction = None
# Instruction decode -------------------------------
self.instructionLookup = {
# Move, load, store ---
# MOV R1,R2 -> 01DDDSSS -> Move register to register
0b01000000 : ( self.MOV_R1R2, ( self.register_BC, self.register_BC, True, True ) ),
0b01000001 : ( self.MOV_R1R2, ( self.register_BC, self.register_BC, True, False ) ),
0b01000010 : ( self.MOV_R1R2, ( self.register_BC, self.register_DE, True, True ) ),
0b01000011 : ( self.MOV_R1R2, ( self.register_BC, self.register_DE, True, False ) ),
0b01000100 : ( self.MOV_R1R2, ( self.register_BC, self.register_HL, True, True ) ),
0b01000101 : ( self.MOV_R1R2, ( self.register_BC, self.register_HL, True, False ) ),
0b01000111 : ( self.MOV_R1R2, ( self.register_BC, self.register_AF, True, True ) ),
0b01001000 : ( self.MOV_R1R2, ( self.register_BC, self.register_BC, False, True ) ),
0b01001001 : ( self.MOV_R1R2, ( self.register_BC, self.register_BC, False, False ) ),
0b01001010 : ( self.MOV_R1R2, ( self.register_BC, self.register_DE, False, True ) ),
0b01001011 : ( self.MOV_R1R2, ( self.register_BC, self.register_DE, False, False ) ),
0b01001100 : ( self.MOV_R1R2, ( self.register_BC, self.register_HL, False, True ) ),
0b01001101 : ( self.MOV_R1R2, ( self.register_BC, self.register_HL, False, False ) ),
0b01001111 : ( self.MOV_R1R2, ( self.register_BC, self.register_AF, False, True ) ),
0b01010000 : ( self.MOV_R1R2, ( self.register_DE, self.register_BC, True, True ) ),
0b01010001 : ( self.MOV_R1R2, ( self.register_DE, self.register_BC, True, False ) ),
0b01010010 : ( self.MOV_R1R2, ( self.register_DE, self.register_DE, True, True ) ),
0b01010011 : ( self.MOV_R1R2, ( self.register_DE, self.register_DE, True, False ) ),
0b01010100 : ( self.MOV_R1R2, ( self.register_DE, self.register_HL, True, True ) ),
0b01010101 : ( self.MOV_R1R2, ( self.register_DE, self.register_HL, True, False ) ),
0b01010111 : ( self.MOV_R1R2, ( self.register_DE, self.register_AF, True, True ) ),
0b01011000 : ( self.MOV_R1R2, ( self.register_DE, self.register_BC, False, True ) ),
0b01011001 : ( self.MOV_R1R2, ( self.register_DE, self.register_BC, False, False ) ),
0b01011010 : ( self.MOV_R1R2, ( self.register_DE, self.register_DE, False, True ) ),
0b01011011 : ( self.MOV_R1R2, ( self.register_DE, self.register_DE, False, False ) ),
0b01011100 : ( self.MOV_R1R2, ( self.register_DE, self.register_HL, False, True ) ),
0b01011101 : ( self.MOV_R1R2, ( self.register_DE, self.register_HL, False, False ) ),
0b01011111 : ( self.MOV_R1R2, ( self.register_DE, self.register_AF, False, True ) ),
0b01100000 : ( self.MOV_R1R2, ( self.register_HL, self.register_BC, True, True ) ),
0b01100001 : ( self.MOV_R1R2, ( self.register_HL, self.register_BC, True, False ) ),
0b01100010 : ( self.MOV_R1R2, ( self.register_HL, self.register_DE, True, True ) ),
0b01100011 : ( self.MOV_R1R2, ( self.register_HL, self.register_DE, True, False ) ),
0b01100100 : ( self.MOV_R1R2, ( self.register_HL, self.register_HL, True, True ) ),
0b01100101 : ( self.MOV_R1R2, ( self.register_HL, self.register_HL, True, False ) ),
0b01100111 : ( self.MOV_R1R2, ( self.register_HL, self.register_AF, True, True ) ),
0b01101000 : ( self.MOV_R1R2, ( self.register_HL, self.register_BC, False, True ) ),
0b01101001 : ( self.MOV_R1R2, ( self.register_HL, self.register_BC, False, False ) ),
0b01101010 : ( self.MOV_R1R2, ( self.register_HL, self.register_DE, False, True ) ),
0b01101011 : ( self.MOV_R1R2, ( self.register_HL, self.register_DE, False, False ) ),
0b01101100 : ( self.MOV_R1R2, ( self.register_HL, self.register_HL, False, True ) ),
0b01101101 : ( self.MOV_R1R2, ( self.register_HL, self.register_HL, False, False ) ),
0b01101111 : ( self.MOV_R1R2, ( self.register_HL, self.register_AF, False, True ) ),
0b01111000 : ( self.MOV_R1R2, ( self.register_AF, self.register_BC, True, True ) ),
0b01111001 : ( self.MOV_R1R2, ( self.register_AF, self.register_BC, True, False ) ),
0b01111010 : ( self.MOV_R1R2, ( self.register_AF, self.register_DE, True, True ) ),
0b01111011 : ( self.MOV_R1R2, ( self.register_AF, self.register_DE, True, False ) ),
0b01111100 : ( self.MOV_R1R2, ( self.register_AF, self.register_HL, True, True ) ),
0b01111101 : ( self.MOV_R1R2, ( self.register_AF, self.register_HL, True, False ) ),
0b01111111 : ( self.MOV_R1R2, ( self.register_AF, self.register_AF, True, True ) ),
# MOV M,R -> 01110SSS -> Move register to memory
0b01110000 : ( self.MOV_MR, ( self.register_BC, True ) ),
0b01110001 : ( self.MOV_MR, ( self.register_BC, False ) ),
0b01110010 : ( self.MOV_MR, ( self.register_DE, True ) ),
0b01110011 : ( self.MOV_MR, ( self.register_DE, False ) ),
0b01110100 : ( self.MOV_MR, ( self.register_HL, True ) ),
0b01110101 : ( self.MOV_MR, ( self.register_HL, False ) ),
0b01110111 : ( self.MOV_MR, ( self.register_AF, True ) ),
# MOV R,M -> 01DDD110 -> Move memory to register
0b01000110 : ( self.MOV_RM, ( self.register_BC, True ) ),
0b01001110 : ( self.MOV_RM, ( self.register_BC, False ) ),
0b01010110 : ( self.MOV_RM, ( self.register_DE, True ) ),
0b01011110 : ( self.MOV_RM, ( self.register_DE, False ) ),
0b01100110 : ( self.MOV_RM, ( self.register_HL, True ) ),
0b01101110 : ( self.MOV_RM, ( self.register_HL, False ) ),
0b01111110 : ( self.MOV_RM, ( self.register_AF, True ) ),
# MVI R,data -> 00DDD110 -> Move immediate to register
0b00000110 : ( self.MVI_RData, ( self.register_BC, True ) ),
0b00001110 : ( self.MVI_RData, ( self.register_BC, False ) ),
0b00010110 : ( self.MVI_RData, ( self.register_DE, True ) ),
0b00011110 : ( self.MVI_RData, ( self.register_DE, False ) ),
0b00100110 : ( self.MVI_RData, ( self.register_HL, True ) ),
0b00101110 : ( self.MVI_RData, ( self.register_HL, False ) ),
0b00111110 : ( self.MVI_RData, ( self.register_AF, True ) ),
# MVI M,data -> 00110110 -> Move immediate to memory
0b00110110 : ( self.MVI_MData, () ),
# LXI BC,data16 -> 00000001 -> Load immediate to register pair B & C
0b00000001 : ( self.LXI_RData16, ( self.register_BC, ) ),
# LXI DE,data16 -> 00010001 -> Load immediate to register pair D & E
0b00010001 : ( self.LXI_RData16, ( self.register_DE, ) ),
# LXI HL,data16 -> 00100001 -> Load immediate to register pair H & L
0b00100001 : ( self.LXI_RData16, ( self.register_HL, ) ),
# LXI SP,data16 -> 00110001 -> Load immediate to stack pointer
0b00110001 : ( self.LXI_RData16, ( self.register_SP, ) ),
# LDA addr -> 00111010 -> Load A direct
0b00111010 : ( self.LDA_Addr, () ),
# STA addr -> 00110010 -> Store A direct
0b00110010 : ( self.STA_Addr, () ),
# LHLD addr -> 00101010 -> Load H & L direct
0b00101010 : ( self.LHLD_Addr, () ),
# SHLD addr -> 00100010 -> Store H & L direct
0b00100010 : ( self.SHLD_Addr, () ),
# LDAX BC -> 00001010 -> Load A indirect
# LDAX DE -> 00011010 -> Load A indirect
0b00001010 : ( self.LDAX_R, ( self.register_BC, ) ),
0b00011010 : ( self.LDAX_R, ( self.register_DE, ) ),
# STAX BC -> 00000010 -> Store A indirect
# STAX DE -> 00010010 -> Store A indirect
0b00000010 : ( self.STAX_R, ( self.register_BC, ) ),
0b00010010 : ( self.STAX_R, ( self.register_DE, ) ),
# XCHG -> 11101011 -> Exchange register pair D & E with H & L
0b11101011 : ( self.XCHG, () ),
# Stack ops ---
# PUSH BC -> 11000101 -> Push register pair B & C on stack
0b11000101 : ( self.PUSH_R, ( self.register_BC, ) ),
# PUSH DE -> 11010101 -> Push register pair D & E on stack
0b11010101 : ( self.PUSH_R, ( self.register_DE, ) ),
# PUSH HL -> 11100101 -> Push register pair H & L on stack
0b11100101 : ( self.PUSH_R, ( self.register_HL, ) ),
# PUSH PSW -> 11110101 -> Push A and Flags on stack
0b11110101 : ( self.PUSH_PSW, () ),
# POP BC -> 11000001 -> Pop top of stack onto register pair B & C
0b11000001 : ( self.POP_R, ( self.register_BC, ) ),
# POP DE -> 11010001 -> Pop top of stack onto register pair D & E
0b11010001 : ( self.POP_R, ( self.register_DE, ) ),
# POP HL -> 11100001 -> Pop top of stack onto register pair H & L
0b11100001 : ( self.POP_R, ( self.register_HL, ) ),
# POP PSW -> 11110001 -> Pop top of stack onto A and Flags
0b11110001 : ( self.POP_PSW, () ),
# XTHL -> 11100011 -> Exchange H & L with contents of location specified by stack pointer
0b11100011 : ( self.XTHL, () ),
# SPHL -> 11111001 -> H & L to stack pointer
0b11111001 : ( self.SPHL, () ),
# Jump ---
# JMP addr -> 11000011 -> Jump unconditional
0b11000011 : ( self.JMP, () ),
# JNZ addr -> 11000010 -> Jump on not zero
0b11000010 : ( self.JNZ, () ),
# JZ addr -> 11001010 -> Jump on zero
0b11001010 : ( self.JZ, () ),
# JNC addr -> 11010010 -> Jump on no carry
0b11010010 : ( self.JNC, () ),
# JC addr -> 11011010 -> Jump on carry
0b11011010 : ( self.JC, () ),
# JPO addr -> 11100010 -> Jump on parity odd
0b11100010 : ( self.JPO, () ),
# JPE addr -> 11101010 -> Jump on parity even
0b11101010 : ( self.JPE, () ),
# JP addr -> 11110010 -> Jump on positive
0b11110010 : ( self.JP, () ),
# JM addr -> 11111010 -> Jump on minus
0b11111010 : ( self.JM, () ),
# PCHL -> 11101001 -> H & L to program counter
0b11101001 : ( self.PCHL, () ),
# Call ---
# CALL addr -> 11001101 -> Call unconditional
0b11001101 : ( self.CALL, () ),
# CNZ addr -> 11000100 -> Call on not zero
0b11000100 : ( self.CNZ, () ),
# CZ addr -> 11001100 -> Call on zero
0b11001100 : ( self.CZ, () ),
# CNC addr -> 11010100 -> Call on no carry
0b11010100 : ( self.CNC, () ),
# CC addr -> 11011100 -> Call on carry
0b11011100 : ( self.CC, () ),
# CPO addr -> 11100100 -> Call on parity odd
0b11100100 : ( self.CPO, () ),
# CPE addr -> 11101100 -> Call on parity even
0b11101100 : ( self.CPE, () ),
# CP addr -> 11110100 -> Call on positive
0b11110100 : ( self.CP, () ),
# CM addr -> 11111100 -> Call on minus
0b11111100 : ( self.CM, () ),
# Return ---
# RET -> 11001001 -> Return unconditional
0b11001001 : ( self.RET, () ),
# RNZ -> 11000000 -> Return on not zero
0b11000000 : ( self.RNZ, () ),
# RZ -> 11001000 -> Return on zero
0b11001000 : ( self.RZ, () ),
# RNC -> 11010000 -> Return on no carry
0b11010000 : ( self.RNC, () ),
# RC -> 11011000 -> Return on carry
0b11011000 : ( self.RC, () ),
# RPO -> 11100000 -> Return on parity odd
0b11100000 : ( self.RPO, () ),
# RPE -> 11101000 -> Return on parity even
0b11101000 : ( self.RPE, () ),
# RP -> 11110000 -> Return on positive
0b11110000 : ( self.RP, () ),
# RM -> 11111000 -> Return on minus
0b11111000 : ( self.RM, () ),
# Restart ---
# RST -> 11NNN111 -> Restart
0b11000111 : ( self.RST, ( 0, ) ),
0b11001111 : ( self.RST, ( 1, ) ),
0b11010111 : ( self.RST, ( 2, ) ),
0b11011111 : ( self.RST, ( 3, ) ),
0b11100111 : ( self.RST, ( 4, ) ),
0b11101111 : ( self.RST, ( 5, ) ),
0b11110111 : ( self.RST, ( 6, ) ),
0b11111111 : ( self.RST, ( 7, ) ),
# Increment and decrement ---
# INR R -> 00DDD100 -> Increment register
0b00000100 : ( self.INR_R, ( self.register_BC, True ) ),
0b00001100 : ( self.INR_R, ( self.register_BC, False ) ),
0b00010100 : ( self.INR_R, ( self.register_DE, True ) ),
0b00011100 : ( self.INR_R, ( self.register_DE, False ) ),
0b00100100 : ( self.INR_R, ( self.register_HL, True ) ),
0b00101100 : ( self.INR_R, ( self.register_HL, False ) ),
0b00111100 : ( self.INR_R, ( self.register_AF, True ) ),
# INR M -> 00110100 -> Increment memory
0b00110100 : ( self.INR_M, () ),
# INX BC -> 00000011 -> Increment B & C registers
0b00000011 : ( self.INX_R, ( self.register_BC, ) ),
# INX DE -> 00010011 -> Increment D & E registers
0b00010011 : ( self.INX_R, ( self.register_DE, ) ),
# INX HL -> 00100011 -> Increment H & L registers
0b00100011 : ( self.INX_R, ( self.register_HL, ) ),
# INX SP -> 00110011 -> Increment stack pointer
0b00110011 : ( self.INX_R, ( self.register_SP, ) ),
# DCR R -> 00DDD101 -> Decrement register
0b00000101 : ( self.DCR_R, ( self.register_BC, True ) ),
0b00001101 : ( self.DCR_R, ( self.register_BC, False ) ),
0b00010101 : ( self.DCR_R, ( self.register_DE, True ) ),
0b00011101 : ( self.DCR_R, ( self.register_DE, False ) ),
0b00100101 : ( self.DCR_R, ( self.register_HL, True ) ),
0b00101101 : ( self.DCR_R, ( self.register_HL, False ) ),
0b00111101 : ( self.DCR_R, ( self.register_AF, True ) ),
# DCR M -> 00110101 -> Decrement memory
0b00110101 : ( self.DCR_M, () ),
# DCX BC -> 00001011 -> Decrement B & C registers
0b00001011 : ( self.DCX_R, ( self.register_BC, ) ),
# DCX DE -> 00011011 -> Decrement D & E registers
0b00011011 : ( self.DCX_R, ( self.register_DE, ) ),
# DCX HL -> 00101011 -> Decrement H & L registers
0b00101011 : ( self.DCX_R, ( self.register_HL, ) ),
# DCX SP -> 00111011 -> Decrement stack pointer
0b00111011 : ( self.DCX_R, ( self.register_SP, ) ),
# Add ---
# ADD R -> 10000SSS -> Add register to A
0b10000000 : ( self.ADD_R, ( self.register_BC, True ) ),
0b10000001 : ( self.ADD_R, ( self.register_BC, False ) ),
0b10000010 : ( self.ADD_R, ( self.register_DE, True ) ),
0b10000011 : ( self.ADD_R, ( self.register_DE, False ) ),
0b10000100 : ( self.ADD_R, ( self.register_HL, True ) ),
0b10000101 : ( self.ADD_R, ( self.register_HL, False ) ),
0b10000111 : ( self.ADD_R, ( self.register_AF, True ) ),
# ADD M -> 10000110 -> Add memory to A
0b10000110 : ( self.ADD_M, () ),
# ADI data -> 11000110 -> Add immediate to A
0b11000110 : ( self.ADI_Data, () ),
# ADC R -> 10001SSS -> Add register to A with carry
0b10001000 : ( self.ADC_R, ( self.register_BC, True ) ),
0b10001001 : ( self.ADC_R, ( self.register_BC, False ) ),
0b10001010 : ( self.ADC_R, ( self.register_DE, True ) ),
0b10001011 : ( self.ADC_R, ( self.register_DE, False ) ),
0b10001100 : ( self.ADC_R, ( self.register_HL, True ) ),
0b10001101 : ( self.ADC_R, ( self.register_HL, False ) ),
0b10001111 : ( self.ADC_R, ( self.register_AF, True ) ),
# ADC M -> 10001110 -> Add memory to A with carry
0b10001110 : ( self.ADC_M, () ),
# ACI data -> 11001110 -> Add immediate to A with carry
0b11001110 : ( self.ACI_Data, () ),
# DAD BC -> 00001001 -> Add B & C to H & L
0b00001001 : ( self.DAD_R, ( self.register_BC, ) ),
# DAD DE -> 00011001 -> Add D & E to H & L
0b00011001 : ( self.DAD_R, ( self.register_DE, ) ),
# DAD HL -> 00101001 -> Add H & L to H & L
0b00101001 : ( self.DAD_R, ( self.register_HL, ) ),
# DAD SP -> 00111001 -> Add stack pointer to H & L
0b00111001 : ( self.DAD_R, ( self.register_SP, ) ),
# Subtract ---
# SUB R -> 10010SSS -> Subtract register from A
0b10010000 : ( self.SUB_R, ( self.register_BC, True ) ),
0b10010001 : ( self.SUB_R, ( self.register_BC, False ) ),
0b10010010 : ( self.SUB_R, ( self.register_DE, True ) ),
0b10010011 : ( self.SUB_R, ( self.register_DE, False ) ),
0b10010100 : ( self.SUB_R, ( self.register_HL, True ) ),
0b10010101 : ( self.SUB_R, ( self.register_HL, False ) ),
0b10010111 : ( self.SUB_R, ( self.register_AF, True ) ),
# SUB M -> 10010110 -> Subtract memory from A
0b10010110 : ( self.SUB_M, () ),
# SUI data -> 11010110 -> Subtract immediate from A
0b11010110 : ( self.SUI_Data, () ),
# SBB R -> 10011SSS -> Subtract register from A with borrow
0b10011000 : ( self.SBB_R, ( self.register_BC, True ) ),
0b10011001 : ( self.SBB_R, ( self.register_BC, False ) ),
0b10011010 : ( self.SBB_R, ( self.register_DE, True ) ),
0b10011011 : ( self.SBB_R, ( self.register_DE, False ) ),
0b10011100 : ( self.SBB_R, ( self.register_HL, True ) ),
0b10011101 : ( self.SBB_R, ( self.register_HL, False ) ),
0b10011111 : ( self.SBB_R, ( self.register_AF, True ) ),
# SBB M -> 10011110 -> Subtract memory from A with borrow
0b10011110 : ( self.SBB_M, () ),
# SBI data -> 11011110 -> Subtract immediate from A with borrow
0b11011110 : ( self.SBI_Data, () ),
# Logical ---
# ANA R -> 10100SSS -> And register with A
0b10100000 : ( self.ANA_R, ( self.register_BC, True ) ),
0b10100001 : ( self.ANA_R, ( self.register_BC, False ) ),
0b10100010 : ( self.ANA_R, ( self.register_DE, True ) ),
0b10100011 : ( self.ANA_R, ( self.register_DE, False ) ),
0b10100100 : ( self.ANA_R, ( self.register_HL, True ) ),
0b10100101 : ( self.ANA_R, ( self.register_HL, False ) ),
0b10100111 : ( self.ANA_R, ( self.register_AF, True ) ),
# ANA M -> 10100110 -> And memory with A
0b10100110 : ( self.ANA_M, () ),
# ANI data -> 11100110 -> And immediate with A
0b11100110 : ( self.ANI_Data, () ),
# XRA R -> 10101SSS -> Exclusive or register with A
0b10101000 : ( self.XRA_R, ( self.register_BC, True ) ),
0b10101001 : ( self.XRA_R, ( self.register_BC, False ) ),
0b10101010 : ( self.XRA_R, ( self.register_DE, True ) ),
0b10101011 : ( self.XRA_R, ( self.register_DE, False ) ),
0b10101100 : ( self.XRA_R, ( self.register_HL, True ) ),
0b10101101 : ( self.XRA_R, ( self.register_HL, False ) ),
0b10101111 : ( self.XRA_R, ( self.register_AF, True ) ),
# XRA M -> 10101110 -> Exclusive or memory with A
0b10101110 : ( self.XRA_M, () ),
# XRI data -> 11101110 -> Exclusive or immediate with A
0b11101110 : ( self.XRI_Data, () ),
# ORA R -> 10110SSS -> Or register with A
0b10110000 : ( self.ORA_R, ( self.register_BC, True ) ),
0b10110001 : ( self.ORA_R, ( self.register_BC, False ) ),
0b10110010 : ( self.ORA_R, ( self.register_DE, True ) ),
0b10110011 : ( self.ORA_R, ( self.register_DE, False ) ),
0b10110100 : ( self.ORA_R, ( self.register_HL, True ) ),
0b10110101 : ( self.ORA_R, ( self.register_HL, False ) ),
0b10110111 : ( self.ORA_R, ( self.register_AF, True ) ),
# ORA M -> 10110110 -> Or memory with A
0b10110110 : ( self.ORA_M, () ),
# ORI data -> 11110110 -> Or immediate with A
0b11110110 : ( self.ORI_Data, () ),
# CMP R -> 10111SSS -> Compare register with A
0b10111000 : ( self.CMP_R, ( self.register_BC, True ) ),
0b10111001 : ( self.CMP_R, ( self.register_BC, False ) ),
0b10111010 : ( self.CMP_R, ( self.register_DE, True ) ),
0b10111011 : ( self.CMP_R, ( self.register_DE, False ) ),
0b10111100 : ( self.CMP_R, ( self.register_HL, True ) ),
0b10111101 : ( self.CMP_R, ( self.register_HL, False ) ),
0b10111111 : ( self.CMP_R, ( self.register_AF, True ) ),
# CMP M -> 10111110 -> Compare memory with A
0b10111110 : ( self.CMP_M, () ),
# CPI data -> 11111110 -> Compare immediate with A
0b11111110 : ( self.CPI_Data, () ),
# Rotate ---
# RLC -> 00000111 -> Rotate A left
0b00000111 : ( self.RLC, () ),
# RRC -> 00001111 -> Rotate A right
0b00001111 : ( self.RRC, () ),
# RAL -> 00010111 -> Rotate A left through carry
0b00010111 : ( self.RAL, () ),
# RAR -> 00011111 -> Rotate A right through carry
0b00011111 : ( self.RAR, () ),
# Specials ---
# CMA -> 00101111 -> Complement A
0b00101111 : ( self.CMA, () ),
# CMC -> 00111111 -> Complement carry
0b00111111 : ( self.CMC, () ),
# STC -> 00110111 -> Set carry
0b00110111 : ( self.STC, () ),
# DAA -> 00100111 -> Decimal adjust A
0b00100111 : ( self.DAA, () ),
# Input / Output ---
# IN port -> 11011011 -> Input
0b11011011 : ( self.IN, () ),
# OUT port -> 11010011 -> Output
0b11010011 : ( self.OUT, () ),
# Control ---
# EI -> 11111011 -> Enable interrupts
0b11111011 : ( self.EI, () ),
# DI -> 11110011 -> Disable interrupt
0b11110011 : ( self.DI, () ),
# HLT -> 01110110 -> Halt
0b01110110 : ( self.HLT, () ),
# NOP -> 00000000 -> No operation
0b00000000 : ( self.NOP, () ),
}
# Serial simulation ----------------------------------------
def receive( self ): # IN command
# Bypass need for UART and get data directly from IO device
ioDevice = self.ioDevices[ self.addressBus ]
data = ioDevice.transmit()
return data
def transmit( self, data ): # OUT command
# Bypass need for UART and send data directly to IO device
ioDevice = self.ioDevices[ self.addressBus ]
ioDevice.receive( data )
def jumpToISR( self, loc ):
# Simulate interrupt handling
self.RST( loc )
# Helpers --------------------------------------------------
def toWord( self, lo, hi ):
return ( hi << 8 ) | lo
def getUpperByte( self, x ):
return ( x >> 8 ) & 0xff
def getLowerByte( self, x ):
return x & 0xff
def read_A( self ):
return self.register_AF.readUpperByte()
def write_A( self, value ):
self.register_AF.writeUpperByte( value )
def read_M( self, address ):
return self.memory[ address ]
def write_M( self, address, value ):
self.memory[ address ] = value
def toBin( self, x ):
return bin( x )[ 2 : ].zfill( self.nBits )
def toInt( self, x ):
return int( x, 2 )
def getParity( self, x ):
# modulo 2 sum of bits
return sum( map( int, self.toBin( x ) ) ) % 2
def negate( self, x ):
# two's complement
if x == 0 :
return x
else:
return ( abs( x ) ^ self.negativeOne ) + 1
def add( self, a, b, c = 0 ):
z = a + b + c
# Update carry flag
if z > self.negativeOne:
self.flagALU_carry = 1
else:
self.flagALU_carry = 0
z &= self.negativeOne # discard overflow bits
# Update parity, zero, sign flags
self.updateALUFlags_PZS( z )
return z
def sub( self, a, b, c = 0 ):
z = a + self.negate( b + c )
# Update carry flag
# https://retrocomputing.stackexchange.com/a/5956/
if ( b + c ) > a:
self.flagALU_carry = 1
else:
self.flagALU_carry = 0
z &= self.negativeOne # discard overflow bits
# Update parity, zero, sign flags
self.updateALUFlags_PZS( z )
return z
def and_( self, a, b ):
z = a & b
# Update carry flag
self.flagALU_carry = 0
# Update parity, zero, sign flags
self.updateALUFlags_PZS( z )
return z
def or_( self, a, b ):
z = a | b
# Update carry flag
self.flagALU_carry = 0
# Update parity, zero, sign flags
self.updateALUFlags_PZS( z )
return z
def xor_( self, a, b ):
z = a ^ b
# Update carry flag
self.flagALU_carry = 0
# Update parity, zero, sign flags
self.updateALUFlags_PZS( z )
return z
def skip2Bytes( self ):
self.fetchInstruction()
self.fetchInstruction()
# Flags ----------------------------------------------------
def genByteFromALUFlags( self ):
# SZ0A0P1C (Order as seen in pg.4-13 of 8080 User Manual)
b = self.flagALU_carry
b |= self.flagALU_parity << 2
b |= self.flagALU_auxCarry << 4
b |= self.flagALU_zero << 6
b |= self.flagALU_sign << 7
return b
def setALUFlagsFromByte( self, b ):
self.flagALU_carry = b & 1
self.flagALU_parity = ( b >> 2 ) & 1
self.flagALU_auxCarry = ( b >> 4 ) & 1
self.flagALU_zero = ( b >> 6 ) & 1
self.flagALU_sign = ( b >> 7 ) & 1
def updateALUFlags_Register( self ):
self.register_AF.writeLowerByte( self.genByteFromALUFlags() )
def updateALUFlags_Variables( self ):
self.setALUFlagsFromByte( self.register_AF.readLowerByte() )
def updateALUFlags_PZS( self, value ):
# Called after arithmetic or logical operation performed by ALU
# i.e. flags set according to value ALU outputs
self.flagALU_parity = 0
self.flagALU_zero = 0
self.flagALU_sign = 0
if self.getParity( value ) == 0:
self.flagALU_parity = 1
if value == 0:
self.flagALU_zero = 1
if value > self.largestPositiveInt: # two's complement
self.flagALU_sign = 1
self.updateALUFlags_Register()
# Run ------------------------------------------------------
def run( self ):
while not self.halt:
self.fetchInstruction()
self.executeInstruction()
print( '8080 has halted' )
def step( self ):
self.fetchInstruction()
self.executeInstruction()
# Fetch Instruction ----------------------------------------
def fetchInstruction( self ):
instructionAddress = self.register_PC.read()
self.register_PC.write( instructionAddress + 1 ) # increment
self.instruction = self.memory[ instructionAddress ]
# print( instructionAddress, self.instruction )
return self.instruction
# Execute Instruction --------------------------------------
def executeInstruction( self ):
func, args = self.instructionLookup[ self.instruction ]
# print( '>', func.__name__ )
# print( func, args )
func( *args )
# Move, load, store ---
# MOV R1,R2 -> 01DDDSSS -> 1 -> Move register to register
def MOV_R1R2( self, R1, R2, R1_upper, R2_upper ):
if R1_upper:
if R2_upper:
R1.writeUpperByte( R2.readUpperByte() )
else:
R1.writeUpperByte( R2.readLowerByte() )
else:
if R2_upper:
R1.writeLowerByte( R2.readUpperByte() )
else:
R1.writeLowerByte( R2.readLowerByte() )
# MOV M,R -> 01110SSS -> 2 -> Move register to memory
def MOV_MR( self, R, R_upper ):
address = self.register_HL.read()
if R_upper:
self.write_M( address, R.readUpperByte() )
else:
self.write_M( address, R.readLowerByte() )
# MOV R,M -> 01DDD110 -> 2 -> Move memory to register
def MOV_RM( self, R, R_upper ):
address = self.register_HL.read()
if R_upper:
R.writeUpperByte( self.read_M( address ) )
else:
R.writeLowerByte( self.read_M( address ) )
# MVI R,data -> 00DDD110 -> 2 -> Move immediate to register
def MVI_RData( self, R, R_upper ):
byte2 = self.fetchInstruction()
if R_upper:
R.writeUpperByte( byte2 )
else:
R.writeLowerByte( byte2 )
# MVI M,data -> 00110110 -> 3 -> Move immediate to memory
def MVI_MData( self ):
byte2 = self.fetchInstruction()
address = self.register_HL.read()
self.write_M( address, byte2 )
# LXI BC,data16 -> 00000001 -> 3 -> Load immediate to register pair B & C
# LXI DE,data16 -> 00010001 -> 3 -> Load immediate to register pair D & E
# LXI HL,data16 -> 00100001 -> 3 -> Load immediate to register pair H & L
# LXI SP,data16 -> 00110001 -> 3 -> Load immediate to stack pointer
def LXI_RData16( self, R ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
R.writeLowerByte( byte2 )
R.writeUpperByte( byte3 )
# LDA addr -> 00111010 -> 4 -> Load A direct
def LDA_Addr( self ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
address = self.toWord( byte2, byte3 )
self.write_A( self.read_M( address ) )
# STA addr -> 00110010 -> 4 -> Store A direct
def STA_Addr( self ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
address = self.toWord( byte2, byte3 )
self.write_M( address, self.read_A() )
# LHLD addr -> 00101010 -> 5 -> Load H & L direct
def LHLD_Addr( self ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
address = self.toWord( byte2, byte3 )
self.register_HL.writeLowerByte( self.read_M( address ) )
address += 1
self.register_HL.writeUpperByte( self.read_M( address ) )
# SHLD addr -> 00100010 -> 5 -> Store H & L direct
def SHLD_Addr( self ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
address = self.toWord( byte2, byte3 )
self.write_M( address, self.register_HL.readLowerByte() )
address += 1
self.write_M( address, self.register_HL.readUpperByte() )
# LDAX BC -> 00001010 -> 2 -> Load A indirect
# LDAX DE -> 00011010 -> 2 -> Load A indirect
def LDAX_R( self, R ):
address = R.read()
self.write_A( self.read_M( address ) )
# STAX BC -> 00000010 -> 2 -> Store A indirect
# STAX DE -> 00010010 -> 2 -> Store A indirect
def STAX_R( self, R ):
address = R.read()
self.write_M( address, self.read_A() )
# XCHG -> 11101011 -> 1 -> Exchange register pair D & E with H & L
def XCHG( self ):
temp = self.register_HL.read()
self.register_HL.write( self.register_DE.read() )
self.register_DE.write( temp )
# Stack ops ---
# PUSH BC -> 11000101 -> 3 -> Push register pair B & C on stack
# PUSH DE -> 11010101 -> 3 -> Push register pair D & E on stack
# PUSH HL -> 11100101 -> 3 -> Push register pair H & L on stack
def PUSH_R( self, R ):
SP = self.register_SP.read()
self.write_M( SP - 1, R.readUpperByte() )
self.write_M( SP - 2, R.readLowerByte() )
self.register_SP.write( SP - 2 )
# PUSH PSW -> 11110101 -> 3 -> Push A and Flags on stack
def PUSH_PSW( self ):
SP = self.register_SP.read()
self.write_M( SP - 1, self.register_AF.readUpperByte() )
self.write_M( SP - 2, self.register_AF.readLowerByte() )
self.register_SP.write( SP - 2 )
# POP BC -> 11000001 -> 3 -> Pop top of stack onto register pair B & C
# POP DE -> 11010001 -> 3 -> Pop top of stack onto register pair D & E
# POP HL -> 11100001 -> 3 -> Pop top of stack onto register pair H & L
def POP_R( self, R ):
SP = self.register_SP.read()
R.writeLowerByte( self.read_M( SP ) )
R.writeUpperByte( self.read_M( SP + 1 ) )
self.register_SP.write( SP + 2 )
# POP PSW -> 11110001 -> 3 -> Pop top of stack onto A and Flags
def POP_PSW( self ):
SP = self.register_SP.read()
self.register_AF.writeLowerByte( self.read_M( SP ) )
self.register_AF.writeUpperByte( self.read_M( SP + 1 ) )
self.register_SP.write( SP + 2 )
self.updateALUFlags_Variables()
# XTHL -> 11100011 -> 5 -> Exchange H & L with contents of location specified by stack pointer
def XTHL( self ):
SP = self.register_SP.read()
temp_lo = self.read_M( SP )
temp_hi = self.read_M( SP + 1 )
self.write_M( SP, self.register_HL.readLowerByte() )
self.write_M( SP + 1, self.register_HL.readUpperByte() )
self.register_HL.writeLowerByte( temp_lo )
self.register_HL.writeUpperByte( temp_hi )
# SPHL -> 11111001 -> 1 -> H & L to stack pointer
def SPHL( self ):
self.register_SP.write( self.register_HL.read() )
# Jump ---
# JMP addr -> 11000011 -> 3 -> Jump unconditional
def JMP( self ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
address = self.toWord( byte2, byte3 )
self.register_PC.write( address )
# JNZ addr -> 11000010 -> 3 -> Jump on not zero
def JNZ( self ):
if self.flagALU_zero == 0: self.JMP()
else: self.skip2Bytes()
# JZ addr -> 11001010 -> 3 -> Jump on zero
def JZ( self ):
if self.flagALU_zero == 1: self.JMP()
else: self.skip2Bytes()
# JNC addr -> 11010010 -> 3 -> Jump on no carry
def JNC( self ):
if self.flagALU_carry == 0: self.JMP()
else: self.skip2Bytes()
# JC addr -> 11011010 -> 3 -> Jump on carry
def JC( self ):
if self.flagALU_carry == 1: self.JMP()
else: self.skip2Bytes()
# JPO addr -> 11100010 -> 3 -> Jump on parity odd
def JPO( self ):
if self.flagALU_parity == 0: self.JMP()
else: self.skip2Bytes()
# JPE addr -> 11101010 -> 3 -> Jump on parity even
def JPE( self ):
if self.flagALU_parity == 1: self.JMP()
else: self.skip2Bytes()
# JP addr -> 11110010 -> 3 -> Jump on positive
def JP( self ):
if self.flagALU_sign == 0: self.JMP()
else: self.skip2Bytes()
# JM addr -> 11111010 -> 3 -> Jump on minus
def JM( self ):
if self.flagALU_sign == 1: self.JMP()
else: self.skip2Bytes()
# PCHL -> 11101001 -> 1 -> H & L to program counter
def PCHL( self ):
self.register_PC.write( self.register_HL.read() )
# Call ---
# CALL addr -> 11001101 -> 5 -> Call unconditional
def CALL( self ):
byte2 = self.fetchInstruction()
byte3 = self.fetchInstruction()
# Save return address onto stack
SP = self.register_SP.read()
self.write_M( SP - 1, self.register_PC.readUpperByte() )
self.write_M( SP - 2, self.register_PC.readLowerByte() )
self.register_SP.write( SP - 2 )
# Goto called address
address = self.toWord( byte2, byte3 )
self.register_PC.write( address )
# CNZ addr -> 11000100 -> 3/5 -> Call on not zero
def CNZ( self ):
if self.flagALU_zero == 0: self.CALL()
else: self.skip2Bytes()
# CZ addr -> 11001100 -> 3/5 -> Call on zero
def CZ( self ):
if self.flagALU_zero == 1: self.CALL()
else: self.skip2Bytes()
# CNC addr -> 11010100 -> 3/5 -> Call on no carry
def CNC( self ):
if self.flagALU_carry == 0: self.CALL()
else: self.skip2Bytes()
# CC addr -> 11011100 -> 3/5 -> Call on carry
def CC( self ):
if self.flagALU_carry == 1: self.CALL()
else: self.skip2Bytes()
# CPO addr -> 11100100 -> 3/5 -> Call on parity odd
def CPO( self ):
if self.flagALU_parity == 0: self.CALL()
else: self.skip2Bytes()
# CPE addr -> 11101100 -> 3/5 -> Call on parity even
def CPE( self ):
if self.flagALU_parity == 1: self.CALL()
else: self.skip2Bytes()
# CP addr -> 11110100 -> 3/5 -> Call on positive
def CP( self ):
if self.flagALU_sign == 0: self.CALL()
else: self.skip2Bytes()
# CM addr -> 11111100 -> 3/5 -> Call on minus
def CM( self ):
if self.flagALU_sign == 1: self.CALL()
else: self.skip2Bytes()
# Return ---
# RET -> 11001001 -> 3 -> Return
def RET( self ):
# Goto return address saved on stack
SP = self.register_SP.read()
self.register_PC.writeLowerByte( self.read_M( SP ) )
self.register_PC.writeUpperByte( self.read_M( SP + 1 ) )
self.register_SP.write( SP + 2 )
# RNZ -> 11000000 -> 1/3 -> Return on not zero
def RNZ( self ):
if self.flagALU_zero == 0: self.RET()
# RZ -> 11001000 -> 1/3 -> Return on zero
def RZ( self ):
if self.flagALU_zero == 1: self.RET()
# RNC -> 11010000 -> 1/3 -> Return on no carry
def RNC( self ):
if self.flagALU_carry == 0: self.RET()
# RC -> 11011000 -> 1/3 -> Return on carry
def RC( self ):
if self.flagALU_carry == 1: self.RET()
# RPO -> 11100000 -> 1/3 -> Return on parity odd
def RPO( self ):
if self.flagALU_parity == 0: self.RET()
# RPE -> 11101000 -> 1/3 -> Return on parity even
def RPE( self ):
if self.flagALU_parity == 1: self.RET()
# RP -> 11110000 -> 1/3 -> Return on positive
def RP( self ):
if self.flagALU_sign == 0: self.RET()
# RM -> 11111000 -> 1/3 -> Return on minus
def RM( self ):
if self.flagALU_sign == 1: self.RET()
# Restart ---
# RST -> 11NNN111 -> 3 -> Restart
def RST( self, NNN ):
# Save return address onto stack
SP = self.register_SP.read()
self.write_M( SP - 1, self.register_PC.readUpperByte() )
self.write_M( SP - 2, self.register_PC.readLowerByte() )
self.register_SP.write( SP - 2 )
# Goto address 8 * NNN
self.register_PC.write( 8 * NNN )
# Increment and decrement ---
# INR R -> 00DDD100 -> 1 -> Increment register
def INR_R( self, R, R_upper ):
savedCarry = self.flagALU_carry # according to docs, all condition flags affected except carry
if R_upper:
z = self.add( R.readUpperByte(), 1 )
R.writeUpperByte( z )
else:
z = self.add( R.readLowerByte(), 1 )
R.writeLowerByte( z )
self.flagALU_carry = savedCarry
self.updateALUFlags_Register()
# INR M -> 00110100 -> 3 -> Increment memory
def INR_M( self ):
savedCarry = self.flagALU_carry # according to docs, all condition flags affected except carry
address = self.register_HL.read()
z = self.add( self.read_M( address ), 1 )
self.write_M( address, z )
self.flagALU_carry = savedCarry
self.updateALUFlags_Register()
# INX BC -> 00000011 -> 1 -> Increment B & C register pair
# INX DE -> 00010011 -> 1 -> Increment D & E register pair
# INX HL -> 00100011 -> 1 -> Increment H & L register pair
# INX SP -> 00110011 -> 1 -> Increment stack pointer
def INX_R( self, R ):
z = R.read() + 1 # according to docs, no ALU flags are affected
z &= 0xffff # discard overflow bits
R.write( z )
# DCR R -> 00DDD101 -> 1 -> Decrement register
def DCR_R( self, R, R_upper ):
savedCarry = self.flagALU_carry # according to docs, all condition flags affected except carry
if R_upper:
z = self.sub( R.readUpperByte(), 1 )
R.writeUpperByte( z )
else:
z = self.sub( R.readLowerByte(), 1 )
R.writeLowerByte( z )
self.flagALU_carry = savedCarry
self.updateALUFlags_Register()
# DCR M -> 00110101 -> 3 -> Decrement memory
def DCR_M( self ):
savedCarry = self.flagALU_carry # according to docs, all condition flags affected except carry
address = self.register_HL.read()
z = self.sub( self.read_M( address ), 1 )
self.write_M( address, z )
self.flagALU_carry = savedCarry
self.updateALUFlags_Register()
# DCX BC -> 00001011 -> 1 -> Decrement B & C register pair
# DCX DE -> 00011011 -> 1 -> Decrement D & E register pair
# DCX HL -> 00101011 -> 1 -> Decrement H & L register pair
# DCX SP -> 00111011 -> 1 -> Decrement stack pointer
def DCX_R( self, R ):
curVal = R.read()
# according to docs, no ALU flags are affected
if curVal == 0:
z = 0xffff # two's complement negative one
else:
z = curVal - 1
R.write( z )
# Add ---
# ADD R -> 10000SSS -> 1 -> Add register to A
def ADD_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.add( curVal, R.readUpperByte() )
else:
z = self.add( curVal, R.readLowerByte() )
self.write_A( z )
# ADD M -> 10000110 -> 2 -> Add memory to A
def ADD_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.add( curVal, self.read_M( address ) )
self.write_A( z )
# ADI data -> 11000110 -> 2 -> Add immediate to A
def ADI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.add( curVal, byte2 )
self.write_A( z )
# ADC R -> 10001SSS -> 1 -> Add register to A with carry
def ADC_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.add( curVal, R.readUpperByte(), self.flagALU_carry )
else:
z = self.add( curVal, R.readLowerByte(), self.flagALU_carry )
self.write_A( z )
# ADC M -> 10001110 -> 2 -> Add memory to A with carry
def ADC_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.add( curVal, self.read_M( address ), self.flagALU_carry )
self.write_A( z )
# ACI data -> 11001110 -> 2 -> Add immediate to A with carry
def ACI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.add( curVal, byte2, self.flagALU_carry )
self.write_A( z )
# DAD BC -> 00001001 -> 3 -> Add B & C to H & L
# DAD DE -> 00011001 -> 3 -> Add D & E to H & L
# DAD HL -> 00101001 -> 3 -> Add H & L to H & L
# DAD SP -> 00111001 -> 3 -> Add stack pointer to H & L
def DAD_R( self, R ):
z = self.register_HL.read() + R.read()
# according to docs, only carry flag is affected
if z > 0xffff:
self.flagALU_carry = 1
else:
self.flagALU_carry = 0
z &= 0xffff # discard overflow bits
self.register_HL.write( z )
self.updateALUFlags_Register()
# Subtract ---
# SUB R -> 10010SSS -> 1 -> Subtract register from A
def SUB_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.sub( curVal, R.readUpperByte() )
else:
z = self.sub( curVal, R.readLowerByte() )
self.write_A( z )
# SUB M -> 10010110 -> 2 -> Subtract memory from A
def SUB_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.sub( curVal, self.read_M( address ) )
self.write_A( z )
# SUI data -> 11010110 -> 2 -> Subtract immediate from A
def SUI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.sub( curVal, byte2 )
self.write_A( z )
# SBB R -> 10011SSS -> 1 -> Subtract register from A with borrow
def SBB_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.sub( curVal, R.readUpperByte(), self.flagALU_carry )
else:
z = self.sub( curVal, R.readLowerByte(), self.flagALU_carry )
self.write_A( z )
# SBB M -> 10011110 -> 2 -> Subtract memory from A with borrow
def SBB_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.sub( curVal, self.read_M( address ), self.flagALU_carry )
self.write_A( z )
# SBI data -> 11011110 -> 2 -> Subtract immediate from A with borrow
def SBI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.sub( curVal, byte2, self.flagALU_carry )
self.write_A( z )
# Logical ---
# ANA R -> 10100SSS -> 1 -> And register with A
def ANA_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.and_( curVal, R.readUpperByte() )
else:
z = self.and_( curVal, R.readLowerByte() )
self.write_A( z )
# ANA M -> 10100110 -> 2 -> And memory with A
def ANA_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.and_( curVal, self.read_M( address ) )
self.write_A( z )
# ANI data -> 11100110 -> 2 -> And immediate with A
def ANI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.and_( curVal, byte2 )
self.write_A( z )
# XRA R -> 10101SSS -> 1 -> Exclusive or register with A
def XRA_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.xor_( curVal, R.readUpperByte() )
else:
z = self.xor_( curVal, R.readLowerByte() )
self.write_A( z )
# XRA M -> 10101110 -> 2 -> Exclusive or memory with A
def XRA_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.xor_( curVal, self.read_M( address ) )
self.write_A( z )
# XRI data -> 11101110 -> 2 -> Exclusive or immediate with A
def XRI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.xor_( curVal, byte2 )
self.write_A( z )
# ORA R -> 10110SSS -> 1 -> Or register with A
def ORA_R( self, R, R_upper ):
curVal = self.read_A()
if R_upper:
z = self.or_( curVal, R.readUpperByte() )
else:
z = self.or_( curVal, R.readLowerByte() )
self.write_A( z )
# ORA M -> 10110110 -> 2 -> Or memory with A
def ORA_M( self ):
curVal = self.read_A()
address = self.register_HL.read()
z = self.or_( curVal, self.read_M( address ) )
self.write_A( z )
# ORI data -> 11110110 -> 2 -> Or immediate with A
def ORI_Data( self ):
byte2 = self.fetchInstruction()
curVal = self.read_A()
z = self.or_( curVal, byte2 )
self.write_A( z )
# CMP R -> 10111SSS -> 1 -> Compare register with A
def CMP_R( self, R, R_upper ):
if R_upper:
r = R.readUpperByte()
else:
r = R.readLowerByte()
self.sub( self.read_A(), r )
# CMP M -> 10111110 -> 2 -> Compare memory with A
def CMP_M( self ):
address = self.register_HL.read()
self.sub( self.read_A(), self.read_M( address ) )
# CPI data -> 11111110 -> 2 -> Compare immediate with A
def CPI_Data( self ):
byte2 = self.fetchInstruction()
self.sub( self.read_A(), byte2 )
# Rotate ---
# RLC -> 00000111 -> 1 -> Rotate A left
def RLC( self ):
b = self.toBin( self.read_A() )
sout = b[ 0 ]
b = b[ 1 : ] + sout
self.write_A( self.toInt( b ) )
self.flagALU_carry = int( sout )
self.updateALUFlags_Register()
# RRC -> 00001111 -> 1 -> Rotate A right
def RRC( self ):
b = self.toBin( self.read_A() )
sout = b[ self.nBits - 1 ]
b = sout + b[ : - 1 ]
self.write_A( self.toInt( b ) )
self.flagALU_carry = int( sout )
self.updateALUFlags_Register()
# RAL -> 00010111 -> 1 -> Rotate A left through carry
def RAL( self ):
b = self.toBin( self.read_A() )
sout = b[ 0 ]
b = b[ 1 : ] + str( self.flagALU_carry )
self.write_A( self.toInt( b ) )
self.flagALU_carry = int( sout )
self.updateALUFlags_Register()
# RAR -> 00011111 -> 1 -> Rotate A right through carry
def RAR( self ):
b = self.toBin( self.read_A() )
sout = b[ self.nBits - 1 ]
b = str( self.flagALU_carry ) + b[ : - 1 ]
self.write_A( self.toInt( b ) )
self.flagALU_carry = int( sout )
self.updateALUFlags_Register()
# Specials ---
# CMA -> 00101111 -> 1 -> Complement A
def CMA( self ):
self.write_A( self.read_A() ^ self.negativeOne ) # flip bits
# CMC -> 00111111 -> 1 -> Complement carry
def CMC( self ):
self.flagALU_carry ^= 1 # flip
self.updateALUFlags_Register()
# STC -> 00110111 -> 1 -> Set carry
def STC( self ):
self.flagALU_carry = 1
self.updateALUFlags_Register()
# DAA -> 00100111 -> x -> Decimal adjust A
def DAA( self ):
raise Exception( 'DAA instruction not implemented' )
# Input / Output ---
# IN port -> 11011011 -> 3 -> Input. Read byte from specified port and load to A
def IN( self ):
byte2 = self.fetchInstruction() # get port number. Used to select IO device
self.addressBus = byte2
# self.IO_WR = 1 # signal IO device to write to databus
# self.write_A( self.dataBus ) # get data placed on databus by IO device
# self.IO_WR = 0 # signal IO device to stop writing to databus
data = self.receive() # simulate
self.write_A( data )
# OUT port -> 11010011 -> 3 -> Output. Places contents of A onto data bus and the
# selected port number onto the address bus
def OUT( self ):
byte2 = self.fetchInstruction() # get port number. Used to select IO device
self.addressBus = byte2
# self.dataBus = self.read_A() # place contents of A onto databus
# self.IO_RD = 1 # signal IO device to read from databus
# sleep( 0.1 ) # wait for IO device to read?? one clock pulse??
# self.IO_RD = 0 # signal IO device to stop reading from databus
# simulate
data = self.read_A()
self.transmit( data )
# Control ---
# EI -> 11111011 -> 1 -> Enable interrupts (takes effect next instruction)
def EI( self ):
# self.flagCC_interruptEnable = 1
pass
# DI -> 11110011 -> 1 -> Disable interrupt (takes effect immediately)
def DI( self ):
# self.flagCC_interruptEnable = 0
pass
# HLT -> 01110110 -> 1 -> Halt
def HLT( self ):
# Temp for now
self.halt = True
print( 'HLT instruction executed' )
# NOP -> 00000000 -> 1 -> No operation
def NOP( self ): pass
```
#### File: Intel-8080-Emulator/OldFiles/terminal_old.py
```python
'''
________ _________
| | | |
| | | | txData
| | databus | | ----------->
| DEVICE | <-----------> | |
| | | | rxData
| | rd | | <----------
| | ------------> | UART |
| | | |
| | wr | |
| | ------------> | |
| | | |
| | | |
|________| |_________|
^
|
|
clk
'''
import threading
class Terminal:
def __init__( self ):
self.on = True
self.txData = None
self.rxData = None
self.rxBuffer = ''
# self.rxReady = 0
self.clk = 0
# self.sampleFrequency = 2 # seconds
self.setupThreads()
def setupThreads( self ):
# Handle as threads to emulate concurrency of physical wires
t1 = threading.Thread(
name = 'tx_thread',
target = self.transmit
)
# t2 = threading.Thread(
# name = 'rx_thread',
# target = self.receive
# )
# t3 = threading.Thread(
# name = 'rxReady_thread',
# target = self.sampleRxReady
# )
t1.start()
# t2.start()
# t3.start()
def sampleRxReady( self ):
# TODO, instead of sampling trigger on signal
while self.on:
if self.clk and self.rxReady:
self.receive()
print( '{} has exited'.format( threading.current_thread().getName() ) ) # debug
def receive( self ):
# print( 'Receiving {}'.format( self.rxData ) ) # debug
# Gather until newline character, then print
# if self.rxData == 10: # newline
# print( self.rxBuffer )
# self.rxBuffer = ''
# else:
# self.rxBuffer += chr( self.rxData )
print( chr( self.rxData ), end = '' )
def transmit( self ):
while self.on:
# Get user input
try:
user_input = input()
# Exit conditions
except EOFError: # User pressed CTRL+C or CTRL+Z
self.exit()
break
# Append newline character (omitted by input())
# user_input += '\n'
# Python input() can receive arbitrary length input from user
# We want to send it out one character at a time '''
for c in user_input:
# Drive TX line
self.txData = c
print( 'Transmitting {}'.format( c ) ) # debug
# sleep( self.clockPeriod )
print( '{} has exited'.format( threading.current_thread().getName() ) ) # debug
def exit( self ):
self.on = False
print( 'See you later!' )
# Tests ------------------------------------------
from time import sleep, time
term = Terminal()
# term.rxReady = 1
# term.rxData = "greetings"
t_idx = 0
t_string = "Hello\nPlants\nOk"
clock = 0
startTime = time()
while True:
elapsedTime = time() - startTime
# print( elapsedTime, int( elapsedTime % 2 ) )
if elapsedTime >= 35:
break
if int( elapsedTime % 2 ) == 0 and t_idx < len( t_string ):
# print( '.' )
term.rxData = ord( t_string[ t_idx ] )
term.receive()
t_idx += 1
clock ^= 1 # tick/tock
sleep( 1 ) # pulse width
term.clk = clock
term.exit()
```
#### File: JetStarBlues/Intel-8080-Emulator/terminal.py
```python
import tkinter
import threading
class Terminal():
def __init__ ( self, width=400, height=400 ):
self.keyBuffer = []
self.displayBuffer = ''
self.K_CTRL_C = 0x03
self.K_BACKSPACE = 0x08
self.K_LF = 0x0A
self.K_CR = 0x0D # used as end-of-(line,string,input) marker in TB
self.K_RUB_OUT = 0x7F # backspace
self.debugMode = False
self.width = width
self.height = height
self.textColor = '#689497'
self.bgColor = '#fff4dc'
self.tkRoot = None
self.tkTextBox = None
self.tkCanvas = None
self.tkCanvasFrame = None
# self.tkThread = threading.Thread(
# target = self.setupTkinter,
# name = 'tk_thread'
# ).start()
# Communication -----------------------------------
def transmit ( self ):
# Send data to CPU
if len( self.keyBuffer ) > 0:
return self.keyBuffer.pop()
else:
return 0
def receive ( self, data ):
# Receive data from CPU
self.displayCharacter( data )
def sendInterrupt ( self, isrLoc ):
'''
Real procedure,
set INT high
send RST instr
set INT low
ISR code will call IN to get data
'''
self.CPU.jumpToISR( isrLoc )
# Helper ------------------------------------------
def tooLazyToType ( self, filepath ):
with open( filepath, 'r' ) as file:
for line in file:
for char in line:
keyCode = ord( char )
if keyCode == self.K_LF:
keyCode = self.K_CR # tiny basic expects CR as delimiter
# print( char, keyCode )
self.addKeyToBuffer( keyCode )
# Keypress ----------------------------------------
def addKeyToBuffer ( self, key ):
self.keyBuffer.insert( 0, key )
# self.sendInterrupt() # interrupt vs waiting for poll
def handleKeypress ( self, event ):
if event.char: # modifier keys like SHIFT are None?
char = event.char
keyCode = ord( char )
# print( char, keyCode )
if keyCode == self.K_CTRL_C:
self.addKeyToBuffer( keyCode )
elif keyCode == self.K_BACKSPACE:
self.addKeyToBuffer( self.K_RUB_OUT )
elif keyCode == self.K_CR or keyCode == self.K_LF:
self.addKeyToBuffer( self.K_CR )
elif keyCode >= 32 and keyCode <= 126:
self.addKeyToBuffer( keyCode )
else:
# print( 'Key not handled - {}'.format( keyCode ) )
pass
# Display -----------------------------------------
def displayCharacter ( self, keyCode ):
if self.debugMode:
print( 'tkRaw ->', keyCode )
if keyCode == 0:
return
if keyCode >= 32 and keyCode <= 126:
self.displayBuffer += chr( keyCode )
elif keyCode == self.K_CR:
pass
elif keyCode == self.K_LF:
self.displayBuffer += '\n'
elif keyCode == self.K_BACKSPACE:
if len( self.displayBuffer ) > 0:
self.displayBuffer = self.displayBuffer[ : - 1 ] # remove from display buffer
self.updateDisplay()
def tk_onFrameConfigure( self, event ):
# resize canvas
self.tkCanvas.configure( scrollregion = self.tkCanvas.bbox( 'all' ) )
def tk_onCanvasConfigure( self, event ):
# resize frame
self.tkCanvas.itemconfigure( self.tkCanvasFrame, width = event.width )
def setupTkinter( self ):
self.tkRoot = tkinter.Tk()
self.tkRoot.title( '8080 Sim' )
self.tkCanvas = tkinter.Canvas( self.tkRoot )
self.tkCanvas.pack( side = tkinter.LEFT, expand = True, fill = 'both' )
self.tkCanvas.configure(
width = self.width,
height = self.height,
highlightthickness = 0,
bg = self.bgColor
)
scrollbar = tkinter.Scrollbar( self.tkRoot )
scrollbar.pack( side = tkinter.RIGHT, fill = 'y' )
scrollbar.configure(
orient = 'vertical',
command = self.tkCanvas.yview
)
self.tkCanvas.configure( yscrollcommand = scrollbar.set )
frame = tkinter.Frame( self.tkCanvas )
self.tkCanvasFrame = self.tkCanvas.create_window(
( 0, 0 ),
window = frame,
anchor = 'nw'
)
self.tkTextBox = tkinter.Label( frame )
self.tkTextBox.pack( expand = True, fill = 'both' )
self.tkTextBox[ 'text' ] = self.displayBuffer
self.tkTextBox.config(
fg = self.textColor,
bg = self.bgColor,
anchor = 'nw',
justify = tkinter.LEFT,
wraplength = self.width - 5
)
frame.bind( '<Configure>', self.tk_onFrameConfigure )
self.tkCanvas.bind( '<Configure>', self.tk_onCanvasConfigure )
self.tkRoot.bind( '<KeyPress>', self.handleKeypress )
self.tkRoot.protocol( 'WM_DELETE_WINDOW', self.quitTkinter )
self.tkRoot.mainloop()
def quitTkinter( self ):
self.tkRoot.quit()
print( 'Tkinter has exited' )
def updateDisplay( self ):
self.tkTextBox[ 'text' ] = self.displayBuffer
self.tkCanvas.yview_moveto( 1 ) # scroll to bottom (show latest)
# t = Terminal()
```
|
{
"source": "JetStarBlues/Nand-2-Tetris",
"score": 2
}
|
#### File: JetStarBlues/Nand-2-Tetris/commonHelpers.py
```python
import Components._0__globalConstants as GC
'''----------------------------- Main -----------------------------------'''
negativeOne = 2 ** GC.N_BITS - 1
largestInt = 2 ** ( GC.N_BITS - 1 ) - 1
def trim( x ):
return x & negativeOne # discard overflow bits
def negate( x ):
return trim( ( x ^ negativeOne ) + 1 ) # twos complement
def isNegative( x ):
return x > largestInt
def toBinary( x, N ):
return bin( x )[ 2 : ].zfill( N )
# return '{:b}'.format( x ).zfill( N )
```
#### File: Nand-2-Tetris/Components/_4__flipFlops.py
```python
import threading, time
from random import random
# Hack computer
from ._x__components import *
'''--------------------------- Flip flops ---------------------------'''
class JKFlipFlop():
def __init__( self ):
# Assign random start values
# Truer to indeterminate startup state
# http://forum.allaboutcircuits.com/threads/sr-latch-initial-output-values.80855/
self.q0 = 0 if random() >= 0.5 else 1
self._q0 = not_( self.q0 )
# Output. Initialize with random value
self.q1 = 0 if random() >= 0.5 else 1
self._q1 = not_( self.q1 )
# Faux mechanical delay
self.propogationDelay = CLOCK_HALF_PERIOD * 0.2 #seconds
def doTheThing( self, e, j, k ):
# https://pymotw.com/2/threading/
# execute only after delay...
t = threading.Timer(
self.propogationDelay,
self.doTheThing_,
args = ( e, j, k )
)
t.setName( 'jkff_thread' )
t.start()
def doTheThing_( self, e, j, k ):
# print( "executing ", threading.currentThread().getName() )
#
r = and_( e, k )
s = and_( e, j )
# feedback to prevent 'invalid'
r = and_( r, self.q0 )
s = and_( s, self._q0 )
#
self.q1 = nor_( r, self._q0 )
self._q1 = nor_( s, self.q0 )
# do it twice (write requires twice)
# see math/logic here https://youtu.be/XETZoRYdtkw
self.q0 = self.q1;
self._q0 = self._q1;
self.q1 = nor_( r, self._q0 );
self._q1 = nor_( s, self.q0 );
# Set cur to prev in prep for next call
self.q0 = self.q1
self._q0 = self._q1
def clear( self ):
self.q1 = 0
self._q1 = 1
def set( self ):
self.q1 = 1
self._q1 = 0
class DFlipFlop():
def __init__( self ):
# Assign random start values
# Truer to indeterminate startup state
# http://forum.allaboutcircuits.com/threads/sr-latch-initial-output-values.80855/
self.q0 = 0 if random() >= 0.5 else 1
self._q0 = not_( self.q0 )
# Output. Initialize with random value
self.q1 = 0 if random() >= 0.5 else 1
self._q1 = not_( self.q1 )
# Faux mechanical delay
self.propogationDelay = CLOCK_HALF_PERIOD * 0.2 #seconds
def doTheThing( self, e, clr, sett, d ):
# execute only after delay...
t = threading.Timer(
self.propogationDelay,
self.doTheThing_,
args = ( e, clr, sett, d )
)
t.setName( 'dff_thread' )
t.start()
def doTheThing_( self, e, clear_, set_, d ):
if clear_:
self.clear()
return
elif set_:
self.set()
return
#
r = and_( e, not_( d ) )
s = and_( e, d )
#
self.q1 = nor_( r, self._q0 )
self._q1 = nor_( s, self.q0 )
# do it twice (write requires twice)
# see math/logic here https://youtu.be/XETZoRYdtkw
self.q0 = self.q1;
self._q0 = self._q1;
self.q1 = nor_( r, self._q0 );
self._q1 = nor_( s, self.q0 );
# Set cur to prev in prep for next call
self.q0 = self.q1
self._q0 = self._q1
def read( self ):
return self.q1
def clear( self ):
self.q1 = 0
self._q1 = 1
def set( self ):
self.q1 = 1
self._q1 = 0
```
#### File: Nand-2-Tetris/Components/_6__counter.py
```python
from ._x__components import *
'''----------------------------- Counter -----------------------------'''
class CounterN_():
''' N bit counter
if rst(t-1) : out(t) = 0
elif write(t-1) : out(t) = in(t-1)
elif inc(t-1) : out(t) = out(t-1) + 1
else : out(t) = out(t-1)
'''
def __init__( self, N ):
self.N = N
self.register = RegisterN_( N )
def doTheThing( self, clk, rst, x, write, inc ):
change = or3_( write, inc, rst )
d = muxN_(
# Reset
self.N,
zeroN_( self.N ),
muxN_(
# Jump
self.N,
x,
muxN_(
# Increment
self.N,
incrementN_( self.N, self.register.read() ),
self.register.read(),
inc
),
write
),
rst
)
self.register.write( clk, d, change )
def read( self ):
return self.register.read()
def readDecimal( self ):
return self.register.readDecimal()
```
#### File: Nand-2-Tetris/Components/_7__cpu.py
```python
import math
# Hack computer
from ._x__components import *
import Assembler.disassembler as dis
'''------------------------------- CPU -------------------------------'''
'''
Instruction - FEDCBA9876543210 // msb to lsb
0123456789ABCDEF // array indexing
F . 0 -> TECS instruction type (C if 1, @ if 0)
E . 1 -> op
D . 2 -> op
C . 3 -> op
B . 4 -> op
A . 5 -> op
9 . 6 -> xSel
8 . 7 -> xSel
7 . 8 -> ySel
6 . 9 -> ySel
5 . A -> dst
4 . B -> dst
3 . C -> dst
2 . D -> jmp
1 . E -> jmp
0 . F -> jmp
x/y sel
0 D
1 A
2 B
3 M
dst
0 NULL
1 D
2 A
3 B
4 M
5 unused
6 unused
7 unused
jmp
0 NULL
1 JGT
2 JEQ
3 JGE
4 JLT
5 JNE
6 JLE
7 JMP
'''
class CPU_():
''' Fetches and executes program instructions '''
def __init__( self, N ):
self.debugMode = False
self.N = N
# Program counter
self.programCounter = CounterN_( 2 * N ) # TODO...this can be 26 instead
# Microstep counter
nStepsPerInstruction = 4
nBitsInCounter = 2 # int( math.log( nStepsPerInstruction, 2 ) )
self.microCounter = CounterN_( nBitsInCounter )
# Microcode ROM
nControlSignals = 18
nInstructionTypes = 8
# self.nBitsInInstructionType = 3 # math.ceil( math.log( nInstructionTypes, 2 ) )
nEntriesMicrocodeROM = nInstructionTypes * nStepsPerInstruction
self.microcodeROM = ROMXN_( nEntriesMicrocodeROM, nControlSignals )
# ALU ROM
nEntriesALUROM = 32
nBitsInFxSel = 4
nBitsInFxFlags = 5
self.ALUROM = ROMXN_( nEntriesALUROM, nBitsInFxSel + nBitsInFxFlags )
self.initInternalROM()
# Registers
self.A_register = RegisterN_( N )
self.D_register = RegisterN_( N )
self.B_register = RegisterN_( N )
self.AA_register = RegisterN_( N )
self.instruction_register = RegisterN_( N )
self.IOInput_register = RegisterN_( N )
self.ABkp_register = RegisterN_( N )
self.DBkp_register = RegisterN_( N )
self.BBkp_register = RegisterN_( N )
self.AABkp_register = RegisterN_( N )
self.instructionBkp_register = RegisterN_( N )
self.PCBkp_register = RegisterN_( 2 * N )
# Flip flops
self.interruptsEnabled_ff = DFlipFlop()
self.interruptAcknowledged_ff = DFlipFlop()
self.backupEnabled_ff = DFlipFlop()
# Instruction decode
self.TECSInstrType = 0
self.op = 1
self.xSel = 6
self.ySel = 8
self.dst = 10
self.jmp = 13
self.nBitsInOp = 5
# Instruction types
self.i_Aimmed = ( 1, 1, 0, 0, 0 )
self.i_AAimmed = ( 1, 1, 0, 0, 1 )
self.i_dstEqCmpJmp = ( 1, 1, 0, 1, 0 )
self.i_dstEqIOBus = ( 1, 1, 0, 1, 1 )
self.i_intAck = ( 1, 1, 1, 0, 0 )
self.i_reti = ( 1, 1, 1, 0, 1 )
self.i_nop = ( 1, 1, 1, 1, 0 )
self.i_halt = ( 1, 1, 1, 1, 1 )
# Location of ISRHandler in program
self.ISRHandlerAddress = self.intToBitArray( 0, 2 * N ) # TODO
# Miscellaneous
self.zero = self.intToBitArray( 0, N )
self.AA_registerMask = ( 0, ) * 6 + ( 1, ) * 10 # ???
# Temp debug
self.instructionTypeLookup = {
( 1, 1, 0, 0, 0 ) : 'i_Aimmed',
( 1, 1, 0, 0, 1 ) : 'i_AAimmed',
( 1, 1, 0, 1, 0 ) : 'i_dstEqCmpJmp',
( 1, 1, 0, 1, 1 ) : 'i_dstEqIOBus',
( 1, 1, 1, 0, 0 ) : 'i_intAck',
( 1, 1, 1, 0, 1 ) : 'i_reti',
( 1, 1, 1, 1, 0 ) : 'i_nop',
( 1, 1, 1, 1, 1 ) : 'i_halt',
}
self.ALUFxLookup = {
( 0, 0, 0, 0, 0 ) : '0',
( 0, 0, 0, 0, 1 ) : '1',
( 0, 0, 0, 1, 0 ) : '-1',
( 0, 0, 0, 1, 1 ) : 'x',
( 0, 0, 1, 0, 0 ) : '! x',
( 0, 0, 1, 0, 1 ) : '- x',
( 0, 0, 1, 1, 0 ) : 'x + 1',
( 0, 0, 1, 1, 1 ) : 'x - 1',
( 0, 1, 0, 0, 0 ) : 'x + y',
( 0, 1, 0, 0, 1 ) : 'x - y',
( 0, 1, 0, 1, 0 ) : 'x & y',
( 0, 1, 0, 1, 1 ) : 'x | y',
( 0, 1, 1, 0, 0 ) : 'x ^ y',
( 0, 1, 1, 0, 1 ) : 'x >> y',
( 0, 1, 1, 1, 0 ) : 'x << y',
( 0, 1, 1, 1, 1 ) : 'x * y',
( 1, 0, 0, 0, 0 ) : 'x / y',
}
self.xyLookup = {
( 0, 0 ) : 'D',
( 0, 1 ) : 'A',
( 1, 0 ) : 'B',
( 1, 1 ) : 'M',
}
def intToBitArray( self, x, N ):
z = bin( x )[ 2 : ].zfill( N )
return tuple( map( int, z ) )
def bitArrayToBinaryString( self, x ):
return ''.join( map( str, x ) )
def bitArrayToInt( self, x ):
return int( ''.join( map( str, x ) ), 2 )
def initInternalROM( self ):
# Microcode ROM
'''
| i_Aimmed | i_AAimmed | i_dstEqCmpJmp | i_dstEqIOBus | i_intAck | i_reti | i_nop | i_halt |
| 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 | 0 1 2 3 |
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
c_cInst | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_ARegisterWr | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_ARegisterInSel_instructionRegister | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_AARegisterWr | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_instructionRegisterWr | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 |
c_PCIncrement | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 | 1 0 0 0 |
c_PCWr | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_PCInSel_ISRHandler | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_readIODatabus | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_dstInSel_IOInputRegister | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_enableInterrupts | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 |
c_disableInterrupts | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_acknowledgeInterrupt | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_servicedInterrupt | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 |
c_enableRegisterBackup | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 1 0 | 0 0 0 0 | 0 0 0 0 |
c_disableRegisterBackup | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 |
c_restoreRegisters | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 0 0 | 0 0 0 0 | 0 0 0 0 |
c_halt | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 1 1 1 |
'''
# i_Aimmed
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 0 )
self.microcodeROM.write( 1, ( 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 1 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 2 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 3 )
# i_AAimmed
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 4 )
self.microcodeROM.write( 1, ( 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 5 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 6 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 7 )
# i_dstEqCmpJmp
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 8 )
self.microcodeROM.write( 1, ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 9 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 10 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 11 )
# i_dstEqIOBus
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 12 )
self.microcodeROM.write( 1, ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 13 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 14 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 15 )
# i_intAck
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 16 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0 ), 1, 17 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 18 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 19 )
# i_reti
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 20 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ), 1, 21 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ), 1, 22 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 23 )
# i_nop
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 24 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 25 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 26 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 27 )
# i_halt
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 28 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 1, 29 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 1, 30 )
self.microcodeROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ), 1, 31 )
# ALU ROM
'''
op fsel flags composite
----- ---- ----- ----------
0 add zx, zy 0000 10100
1 add zx, nx, zy, ny, no 0000 11111
- 1 add zx, nx, zy 0000 11100
x and zy, ny 0001 00110
! x and zy, ny, no 0001 00111
- x add zy, ny, no 0000 00111
x + 1 add nx, zy, ny, no 0000 01111
x - 1 add zy, ny 0000 00110
x + y add 0000 00000
x - y add nx, no 0000 01001
x & y and 0001 00000
x | y and nx, ny, no 0001 01011
x ^ y xor 0010 00000
x >> y lsr 0011 00000
x << y lsl 0100 00000
x * y mul 0101 00000
x / y div 0110 00000
'''
self.ALUROM.write( 1, ( 0, 0, 0, 0, 1, 0, 1, 0, 0 ), 1, 0 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 1, 1, 1, 1, 1 ), 1, 1 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 1, 1, 1, 0, 0 ), 1, 2 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 0, 1, 1, 0 ), 1, 3 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 0, 1, 1, 1 ), 1, 4 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 0, 1, 1, 1 ), 1, 5 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 1, 1, 1, 1 ), 1, 6 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 0, 1, 1, 0 ), 1, 7 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 0, 0, 0, 0 ), 1, 8 )
self.ALUROM.write( 1, ( 0, 0, 0, 0, 0, 1, 0, 0, 1 ), 1, 9 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 0, 0, 0, 0 ), 1, 10 )
self.ALUROM.write( 1, ( 0, 0, 0, 1, 0, 1, 0, 1, 1 ), 1, 11 )
self.ALUROM.write( 1, ( 0, 0, 1, 0, 0, 0, 0, 0, 0 ), 1, 12 )
self.ALUROM.write( 1, ( 0, 0, 1, 1, 0, 0, 0, 0, 0 ), 1, 13 )
self.ALUROM.write( 1, ( 0, 1, 0, 0, 0, 0, 0, 0, 0 ), 1, 14 )
self.ALUROM.write( 1, ( 0, 1, 0, 1, 0, 0, 0, 0, 0 ), 1, 15 )
self.ALUROM.write( 1, ( 0, 1, 1, 0, 0, 0, 0, 0, 0 ), 1, 16 )
def compareOp( self, a, b ):
# if a == b, a ^ b == 0
# submodule, dry
c = xorN_( self.nBitsInOp, a, b )
d = not_( orNto1_( self.nBitsInOp, c ) )
return d
def doTheThing(
self,
computer, # ...
clk, # input
RESET, # input
interruptRequested, # input
IODatabus # bidirectional
):
'''
. Everything happens at once/simultaneously
. Assumes all memory modules can be read asynchronously
'''
# Alias -
data_memory = computer.data_memory
program_memory = computer.program_memory
# Constants -
# Always increment microCounter
microCounterIn = self.zero
microCounterWr = 0
microCounterIncrement = 1
# Read memory -
D_registerOut = self.D_register.read()
A_registerOut = self.A_register.read()
B_registerOut = self.B_register.read()
AA_registerOut = self.AA_register.read()
instruction_registerOut = self.instruction_register.read()
IOInput_registerOut = self.IOInput_register.read()
ABkp_registerOut = self.ABkp_register.read()
DBkp_registerOut = self.DBkp_register.read()
BBkp_registerOut = self.BBkp_register.read()
AABkp_registerOut = self.AABkp_register.read()
instructionBkp_registerOut = self.instructionBkp_register.read()
PCBkp_registerOut = self.PCBkp_register.read()
# interruptsEnabled = self.interruptsEnabled_ff.read()
# interruptAcknowledged = self.interruptAcknowledged_ff.read()
# backupEnabled = self.backupEnabled_ff.read()
instruction = instruction_registerOut
lowerAddress = A_registerOut
upperAddress = AA_registerOut
dataMemoryOut = data_memory.read( lowerAddress )
instructionAddress = self.programCounter.read()
microStep = self.microCounter.read()
if self.debugMode:
print( 'instruction {}'.format( self.bitArrayToBinaryString( instruction ) ) )
print( ' {}'.format( dis.disassemble( self.bitArrayToBinaryString( instruction ) ) ) )
print( 'instructionAddress {}'.format( self.programCounter.readDecimal() ) )
print( 'microStep {}'.format( self.bitArrayToInt( microStep ) ) )
programMemoryOut = program_memory.read( self.programCounter.read() )
# Decode -
interruptsEnabled = 1 # TODO, fix me!
op = instruction[ self.op : self.op + self.nBitsInOp ]
isAimmed = not_( instruction[ self.TECSInstrType ] )
iDecode2 = muxN_(
self.nBitsInOp,
op, # 11xxx (special op)
self.i_dstEqCmpJmp, # everything else ('dst=cmp;jmp')
and_( instruction[ self.op ], instruction[ self.op + 1 ] )
)
iDecode1 = muxN_(
self.nBitsInOp,
self.i_Aimmed, # '@' instruction
iDecode2,
isAimmed
)
instructionType = muxN_(
self.nBitsInOp,
self.i_intAck, # interrupt acknowledge
iDecode1,
and_( interruptRequested, interruptsEnabled )
)
microAddress = instructionType[ 2 : ] + microStep # 3bits(8) + 2bits(4)
microInstruction = self.microcodeROM.read( microAddress )
if self.debugMode:
print( 'instructionType {} {}'.format( instructionType, self.instructionTypeLookup[ instructionType ] ) )
if instructionType == self.i_dstEqCmpJmp:
print( ' alu op {}'.format( self.ALUFxLookup[ op ] ) )
# Control signals -
c_cInst = microInstruction[ 0 ]
c_ARegisterWr = microInstruction[ 1 ]
c_ARegisterInSel_instructionRegister = microInstruction[ 2 ]
c_AARegisterWr = microInstruction[ 3 ]
c_instructionRegisterWr = microInstruction[ 4 ]
c_PCIncrement = microInstruction[ 5 ]
c_PCWr = microInstruction[ 6 ]
c_PCInSel_ISRHandler = microInstruction[ 7 ]
c_readIODatabus = microInstruction[ 8 ]
c_dstInSel_IOInputRegister = microInstruction[ 9 ]
c_enableInterrupts = microInstruction[ 10 ]
c_disableInterrupts = microInstruction[ 11 ]
c_acknowledgeInterrupt = microInstruction[ 12 ]
c_servicedInterrupt = microInstruction[ 13 ]
c_enableRegisterBackup = microInstruction[ 14 ]
c_disableRegisterBackup = microInstruction[ 15 ]
c_restoreRegisters = microInstruction[ 16 ]
c_halt = microInstruction[ 17 ]
if self.debugMode:
print( 'controlSignals ', end='' )
if c_cInst: print( 'c_cInst', end = ' | ' )
if c_ARegisterWr: print( 'c_ARegisterWr', end = ' | ' )
if c_ARegisterInSel_instructionRegister: print( 'c_ARegisterInSel_instructionRegister', end = ' | ' )
if c_AARegisterWr: print( 'c_AARegisterWr', end = ' | ' )
if c_instructionRegisterWr: print( 'c_instructionRegisterWr', end = ' | ' )
if c_PCIncrement: print( 'c_PCIncrement', end = ' | ' )
if c_PCWr: print( 'c_PCWr', end = ' | ' )
if c_PCInSel_ISRHandler: print( 'c_PCInSel_ISRHandler', end = ' | ' )
if c_readIODatabus: print( 'c_readIODatabus', end = ' | ' )
if c_dstInSel_IOInputRegister: print( 'c_dstInSel_IOInputRegister', end = ' | ' )
if c_enableInterrupts: print( 'c_enableInterrupts', end = ' | ' )
if c_disableInterrupts: print( 'c_disableInterrupts', end = ' | ' )
if c_acknowledgeInterrupt: print( 'c_acknowledgeInterrupt', end = ' | ' )
if c_servicedInterrupt: print( 'c_servicedInterrupt', end = ' | ' )
if c_enableRegisterBackup: print( 'c_enableRegisterBackup', end = ' | ' )
if c_disableRegisterBackup: print( 'c_disableRegisterBackup', end = ' | ' )
if c_restoreRegisters: print( 'c_restoreRegisters', end = ' | ' )
if c_halt: print( 'c_halt', end = ' | ' )
print()
# Hold value over time (via register), but switch immediately with control signal
'''
en | 100x
dis | 001x
regOut | x110
desired | 110x
'''
interruptsEnabled = and_(
or_( c_enableInterrupts, self.interruptsEnabled_ff.read() ),
not_( c_disableInterrupts )
)
interruptAcknowledged = and_(
or_( c_acknowledgeInterrupt, self.interruptAcknowledged_ff.read() ),
not_( c_servicedInterrupt )
)
backupEnabled = and_(
or_( c_enableRegisterBackup, self.backupEnabled_ff.read() ),
not_( c_disableRegisterBackup )
)
# x,y select -
x = muxN4to1_(
self.N,
dataMemoryOut,
B_registerOut,
A_registerOut,
D_registerOut,
instruction[ self.xSel + 0 ], instruction[ self.xSel + 1 ]
)
y = muxN4to1_(
self.N,
dataMemoryOut,
B_registerOut,
A_registerOut,
D_registerOut,
instruction[ self.ySel + 0 ], instruction[ self.ySel + 1 ]
)
# ALU -
ALU_control = self.ALUROM.read( op )
ALU_out = ALU_( self.N, x, y, ALU_control )
z = ALU_out[ 0 ] # result of computation
zr = ALU_out[ 1 ] # result is zero
ng = ALU_out[ 2 ] # result is negative
if self.debugMode:
# print( 'ALU_control {}'.format( ALU_control ) )
print( 'x {} {} {}'.format( x, self.xyLookup[ instruction[ self.xSel : self.xSel + 2 ] ], self.bitArrayToInt( x ) ) )
print( 'y {} {} {}'.format( y, self.xyLookup[ instruction[ self.ySel : self.ySel + 2 ] ], self.bitArrayToInt( y ) ) )
print( 'z {} {}'.format( z, self.bitArrayToInt( z ) ) )
# Jump -
jump = mux8to1_(
1, # JMP
or_( zr, ng ), # JLE
not_( zr ), # JNE
ng, # JLT
not_( ng ), # JGE
zr, # JEQ
not_( or_( zr, ng ) ), # JGT
0, # NULL
instruction[ self.jmp + 0 ], instruction[ self.jmp + 1 ], instruction[ self.jmp + 2 ]
)
# Write data select -
D_registerIn = muxN4to1_(
self.N,
self.zero,
DBkp_registerOut,
IOInput_registerOut,
z,
c_restoreRegisters, c_dstInSel_IOInputRegister
)
B_registerIn = muxN4to1_(
self.N,
self.zero,
BBkp_registerOut,
IOInput_registerOut,
z,
c_restoreRegisters, c_dstInSel_IOInputRegister
)
A_registerIn = muxN8to1_(
self.N,
self.zero,
self.zero,
self.zero,
instruction,
self.zero,
ABkp_registerOut,
IOInput_registerOut,
z,
c_ARegisterInSel_instructionRegister, c_restoreRegisters, c_dstInSel_IOInputRegister
)
AA_registerIn = andN_( self.N, instruction, self.AA_registerMask )
IOInput_registerIn = bufferN_( self.N, IODatabus, c_readIODatabus )
dataMemoryIn = muxN_(
self.N,
IOInput_registerOut,
z,
c_dstInSel_IOInputRegister
)
PCIn = muxN4to1_(
self.N * 2,
self.zero + self.zero,
PCBkp_registerOut,
self.zero + self.ISRHandlerAddress,
upperAddress + lowerAddress,
c_restoreRegisters, c_PCInSel_ISRHandler
)
# Write dst select -
dst = decoder3to8_( # returns ( q7, q6, q5, q4, q3, q2, q1, q0 )
instruction[ self.dst + 0 ],
instruction[ self.dst + 1 ],
instruction[ self.dst + 2 ],
)
D_registerWr = and_( dst[ 7 - 1 ], c_cInst )
A_registerWr = or_( and_( dst[ 7 - 2 ], c_cInst ), c_ARegisterWr )
B_registerWr = and_( dst[ 7 - 3 ], c_cInst )
dataMemoryWr = and_( dst[ 7 - 4 ], c_cInst )
PCWr = or_( and_( jump, c_cInst ), c_PCWr )
# Write memory -
self.D_register.write ( clk, D_registerIn, D_registerWr )
self.A_register.write ( clk, A_registerIn, A_registerWr )
self.B_register.write ( clk, B_registerIn, B_registerWr )
self.AA_register.write ( clk, AA_registerIn, c_AARegisterWr )
self.instruction_register.write ( clk, programMemoryOut, c_instructionRegisterWr )
self.IOInput_register.write ( clk, IOInput_registerIn, c_readIODatabus )
self.DBkp_register.write ( clk, D_registerIn, and_( backupEnabled, D_registerWr ) )
self.ABkp_register.write ( clk, A_registerIn, and_( backupEnabled, A_registerWr ) )
self.BBkp_register.write ( clk, B_registerIn, and_( backupEnabled, B_registerWr ) )
self.AABkp_register.write ( clk, AA_registerOut, and_( backupEnabled, c_AARegisterWr ) )
self.instructionBkp_register.write( clk, instruction_registerOut, and_( backupEnabled, c_instructionRegisterWr ) )
self.PCBkp_register.write ( clk, instructionAddress, and_( backupEnabled, c_instructionRegisterWr ) )
self.interruptsEnabled_ff.doTheThing ( clk, c_disableInterrupts, or_( RESET, c_enableInterrupts ), 0 )
self.interruptAcknowledged_ff.doTheThing( clk, or_( RESET, c_servicedInterrupt ), c_acknowledgeInterrupt, 0 )
self.backupEnabled_ff.doTheThing ( clk, c_disableRegisterBackup, or_( RESET, c_enableRegisterBackup ), 0 )
data_memory.write( clk, dataMemoryIn, dataMemoryWr, lowerAddress )
if self.debugMode:
print( 'dataMemoryWr {}'.format( dataMemoryWr ) )
print( 'dataMemoryIn {} {}'.format( dataMemoryIn, self.bitArrayToInt( dataMemoryIn ) ) )
# print( 'lowerAddress', lowerAddress )
self.programCounter.doTheThing( clk, RESET, PCIn, PCWr, c_PCIncrement )
self.microCounter.doTheThing( clk, RESET, microCounterIn, microCounterWr, microCounterIncrement )
if self.debugMode:
print( 'ARegOut {}'.format( self.A_register.readDecimal() ) )
print( 'DRegOut {}'.format( self.D_register.readDecimal() ) )
print( 'BRegOut {}'.format( self.B_register.readDecimal() ) )
# print( 'mem_16 ', data_memory.readDecimal( 16 ) )
# print( 'mem_17 ', data_memory.readDecimal( 17 ) )
# print( 'mem_0 ', data_memory.readDecimal( 0 ) )
# print( 'mem_1 ', data_memory.readDecimal( 1 ) )
print()
# Set output signals -
computer.halted = c_halt
```
#### File: Nand-2-Tetris/Emulators/pythonNBitArithmetic.py
```python
class NBitArithmetic ():
'''
Emulate N-bit arithmetic using Python's standard arithmetic
Used by the VM and HL emulators
'''
def __init__ ( self, N ):
self.N = N
# Keep in sync with 'commonHelpers.py'
self.negativeOne = 2 ** N - 1
self.largestInt = 2 ** ( N - 1 ) - 1
# Keep in sync with 'commonHelpers.py'
def trim( self, x ):
return x & self.negativeOne # discard overflow bits
def negate( self, x ):
return self.trim( ( x ^ self.negativeOne ) + 1 ) # twos complement
def isNegative( self, x ):
return x > self.largestInt
# Arithmetic and logic
def _not( self, a ):
return a ^ self.negativeOne # flip bits
def _neg( self, a ):
return self.negate( a )
def _and( self, a, b ):
return a & b
def _or( self, a, b ):
return a | b
def _xor( self, a, b ):
return a ^ b
def _lsl( self, a, b ):
return self.trim( a << b )
def _lsr( self, a, b ):
return a >> b # logical shift (assuming a is positive)
def _add( self, a, b ):
return self.trim( a + b )
def _sub( self, a, b ):
return self.trim( a + self.negate( b ) )
def _mul( self, a, b ):
return self.trim( a * b )
def _div( self, a, b ):
# Divide using absolutes and add signs after
aIsNeg = self.isNegative( a )
bIsNeg = self.isNegative( b )
# Get absolute values
if aIsNeg: a = self.negate( a )
if bIsNeg: b = self.negate( b )
# Divide
value = a // b
# If opposite signs, negate answer
if aIsNeg ^ bIsNeg:
value = self.negate( value )
return value
def _eq ( self, a, b ):
d = self._sub( a, b )
return d == 0
def _ne ( self, a, b ):
d = self._sub( a, b )
return d != 0
# For gt, gte, lt, lte see discussion here,
# http://nand2tetris-questions-and-answers-forum.32033.n3.nabble.com/Greater-or-less-than-when-comparing-numbers-with-different-signs-td4031520.html
# Code based on @cadet1620's answer
def _lt ( self, a, b ):
# return ng # simple, but inaccurate for opposite signs
if self.isNegative( a ):
if not self.isNegative( b ):
return True
# aIsNeg and bIsNeg
else:
if self.isNegative( b ):
return False
# aIsNotNeg and bIsNotNeg
# same signs
d = self._sub( a, b ) # won't oveflow
if self.isNegative( d ):
return True
else:
return False
def _lte ( self, a, b ):
# return zr or ng # simple, but inaccurate for opposite signs
if self.isNegative( a ):
if not self.isNegative( b ):
return True
# aIsNeg and bIsNeg
else:
if self.isNegative( b ):
return False
# aIsNotNeg and bIsNotNeg
# same signs
d = self._sub( a, b ) # won't oveflow
if self.isNegative( d ):
return True
else:
return d == 0
def _gt ( self, a, b ):
# return not( zr or ng ) # simple, but inaccurate for opposite signs
return not self._lte( a, b )
def _gte ( self, a, b ):
# return not( ng ) # simple, but inaccurate for opposite signs
return not self._lt( a, b )
```
#### File: Nand-2-Tetris/Emulators/vmEmulator.py
```python
import re
import time
import yappi
# Hack computer
import Components
from commonHelpers import *
from .pythonNBitArithmetic import *
# Configure computer ---------------
# VMX file containing all necessary program code
programPath = ''
debugPath = 'Debug/VMEmulator/' # Folder where logs go
debugMode = False
runYappiProfile = False
# Setup computer -------------------
nBits = Components.N_BITS
ALU = NBitArithmetic( nBits )
PC = 0
PC_prev = 0
PC_jump = False
RAM = [ 0 ] * ( 2 ** 16 )
ROM = [] # Psuedo ROM, loaded with VM code
clock = None
io = None
startTime = None
sysHalt = None
yieldToExternal = False # Suspend tick
static_segment_start = Components.STATIC_START
static_segment_end = Components.STATIC_END
stack_segment_start = Components.STACK_END
heap_segment_start = Components.HEAP_START
heap_segment_end = Components.HEAP_END
# Setup pointers -------------------
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
TEMP = 5
# GP = 13
STATIC = 16
# IO Helpers ------------------------
class RAMWrapper():
def __init__( self, ram ):
self.ram = ram
def read( self, address ):
return self.ram[ address ]
def write( self, clk, x, write, address ):
if clk == 1 and write == 1:
self.ram[ address ] = x
# VM Helpers ------------------------
# unaryOps = [ 'not', 'neg' ]
# binaryOps = [ 'and', 'or', 'add', 'sub', 'xor', 'lsl', 'lsr' ]
# comparisonOps = [ 'eq', 'gt', 'lt', 'gte', 'lte', 'ne' ]
# operations = [ unaryOps + binaryOps + comparisonOps ]
unaryOps = set( [ 'not', 'neg' ] )
binaryOps = set( [ 'and', 'or', 'add', 'sub', 'xor', 'lsl', 'lsr', 'mul', 'div' ] )
comparisonOps = set( [ 'eq', 'gt', 'lt', 'gte', 'lte', 'ne' ] )
operations = unaryOps | binaryOps | comparisonOps # Set marginally faster to lookup than list
addressLookup = {}
staticLookup = {}
# VM instructions -------------------
def executeInstruction( cmd ):
cmdType = cmd[ 0 ]
if cmdType == 'push':
push( cmd[ 1 ], cmd[ 2 ], cmd )
elif cmdType == 'pop':
pop( cmd[ 1 ], cmd[ 2 ], cmd )
elif cmdType in operations:
operation( cmdType )
elif cmdType == 'goto':
goto( cmd[ 1 ] )
elif cmdType == 'if-goto':
ifgoto( cmd[ 1 ] )
elif cmdType == 'call':
call( cmd[ 1 ], cmd[ 2 ] )
elif cmdType == 'return':
ret()
elif cmdType == 'label':
label( cmd[ 1 ] )
elif cmdType == 'function':
function( cmd[ 1 ], cmd[ 2 ] )
else:
raise Exception( "Don't know how to execute the command - {}".format( cmd ) )
def push( seg, index, cmd ):
addr = RAM[ SP ]
if seg == 'constant':
RAM[ addr ] = index
elif seg == 'pointer':
if index == 0: RAM[ addr ] = RAM[ THIS ]
else: RAM[ addr ] = RAM[ THAT ]
elif seg == 'static':
RAM[ addr ] = RAM[ staticLookup[ cmd[ 3 ] ] ]
elif seg == 'temp':
RAM[ addr ] = RAM[ TEMP + index ]
elif seg == 'argument':
RAM[ addr ] = RAM[ RAM[ ARG ] + index ]
elif seg == 'local':
RAM[ addr ] = RAM[ RAM[ LCL ] + index ]
elif seg == 'this':
RAM[ addr ] = RAM[ RAM[ THIS ] + index ]
elif seg == 'that':
RAM[ addr ] = RAM[ RAM[ THAT ] + index ]
else:
raise Exception( 'Unknown segment - {}'.format( seg ) )
# Update SP
RAM[ SP ] += 1
# if RAM[ SP ] >= heap_segment_start:
# raiseException( 'Stack overflow' )
def pop( seg, index, cmd ):
addr = RAM[ SP ] - 1
value = RAM[ addr ]
if seg == 'pointer':
if index == 0: RAM[ THIS ] = value
else: RAM[ THAT ] = value
elif seg == 'static':
RAM[ staticLookup[ cmd[ 3 ] ] ] = value
elif seg == 'temp':
RAM[ TEMP + index ] = value
elif seg == 'argument':
RAM[ RAM[ ARG ] + index ] = value
elif seg == 'local':
RAM[ RAM[ LCL ] + index ] = value
elif seg == 'this':
RAM[ RAM[ THIS ] + index ] = value
elif seg == 'that':
RAM[ RAM[ THAT ] + index ] = value
else:
raise Exception( 'Unknown segment - {}'.format( seg ) )
# Update SP
RAM[ SP ] -= 1
def operation( op ):
if op in unaryOps:
addr = RAM[ SP ] - 1
a = RAM[ addr ]
if op == 'not':
RAM[ addr ] = ALU._not( a )
elif op == 'neg':
RAM[ addr ] = ALU._neg( a )
elif op in binaryOps:
addr_a = RAM[ SP ] - 2
addr_b = RAM[ SP ] - 1
a = RAM[ addr_a ]
b = RAM[ addr_b ]
value = None
if op == 'and':
value = ALU._and( a, b )
elif op == 'or':
value = ALU._or( a, b )
elif op == 'xor':
value = ALU._xor( a, b )
elif op == 'lsl':
value = ALU._lsl( a, b )
elif op == 'lsr':
value = ALU._lsr( a, b )
elif op == 'add':
value = ALU._add( a, b )
elif op == 'sub':
value = ALU._sub( a, b )
elif op == 'mul':
value = ALU._mul( a, b )
elif op == 'div':
value = ALU._div( a, b )
RAM[ addr_a ] = value
# Update SP
RAM[ SP ] -= 1
elif op in comparisonOps:
addr_a = RAM[ SP ] - 2
addr_b = RAM[ SP ] - 1
a = RAM[ addr_a ]
b = RAM[ addr_b ]
value = None
if op == 'eq':
value = ALU._eq( a, b )
elif op == 'ne':
value = ALU._ne( a, b )
elif op == 'gt':
value = ALU._gt( a, b )
elif op == 'gte':
value = ALU._gte( a, b )
elif op == 'lt':
value = ALU._lt( a, b )
elif op == 'lte':
value = ALU._lte( a, b )
if value:
RAM[ addr_a ] = negativeOne # 111111 so that !True = 00000
else:
RAM[ addr_a ] = 0
# Update SP
RAM[ SP ] -= 1
def goto( loc ):
global PC
global PC_jump
PC = addressLookup[ loc ]
PC_jump = True
def ifgoto( loc ):
global PC
global PC_jump
addr = RAM[ SP ] - 1
value = RAM[ addr ]
if value != 0:
# if value:
PC = addressLookup[ loc ]
PC_jump = True
# Update SP
RAM[ SP ] -= 1
def call( fxName, nArgs ):
addr = RAM[ SP ]
# Save return position
RAM[ addr ] = PC + 1
addr += 1
# Save segment pointers
RAM[ addr ] = RAM[ LCL ]
addr += 1
RAM[ addr ] = RAM[ ARG ]
addr += 1
RAM[ addr ] = RAM[ THIS ]
addr += 1
RAM[ addr ] = RAM[ THAT ]
addr += 1
# Set ARG pointer
RAM[ ARG ] = RAM[ SP ] - nArgs
# Set LCL pointer
RAM[ LCL ] = addr
# Set SP
RAM[ SP ] = addr
# Goto function
goto( fxName )
def ret():
global PC
global PC_jump
global yieldToExternal
# Save current LCL pointer
curLCL = RAM[ LCL ]
# Save return address
retAddr = RAM[ curLCL - 5 ]
# Copy return value into arg0
addr_a = RAM[ ARG ]
addr_r = RAM[ SP ] - 1
RAM[ addr_a ] = RAM[ addr_r ]
# Reposition SP for caller (to just after return value)
RAM[ SP ] = addr_a + 1
# Restore segment pointers of caller
curLCL -= 1
RAM[ THAT ] = RAM[ curLCL ]
curLCL -= 1
RAM[ THIS ] = RAM[ curLCL ]
curLCL -= 1
RAM[ ARG ] = RAM[ curLCL ]
curLCL -= 1
RAM[ LCL ] = RAM[ curLCL ]
# Jump to return position
PC = retAddr
PC_jump = True
yieldToExternal = False # temp...
def label( loc ): pass
def function( fxName, nLocals ):
global yieldToExternal
# print( 'curFx - ', fxName )
# Init locals to zeros
for i in range( nLocals ):
addr = RAM[ LCL ] + i
RAM[ addr ] = 0
RAM[ SP ] += nLocals
# If exists, execute python equivalent
if fxName in OSWrappers:
yieldToExternal = True
OSWrappers[ fxName ]()
# OS Wrappers -----------------------
# Sys ---
def Sys_wait():
# Retrieve args ---
argBase = RAM[ ARG ]
duration = RAM[ argBase ]
# Subroutine body ---
'''
if ( duration <= 0 ) {
Sys.error( 1 );
// Sys.raiseException( 'Sys.wait duration must be greater than zero' );
}
'''
if duration <= 0:
print( 'ERROR: Sys.wait duration must be greater than zero' )
# Halt program
haltOnError()
return
# print( 'About to sleep for {} ms'.format( duration ) )
time.sleep( duration / 1000 ) # convert msec to sec
# Return ---
push( 'constant', 0, None )
ret()
# ---
OSWrappers = {
'Sys.wait' : Sys_wait
}
# Load program ----------------------
cmdPattern = '''
^ # from beginning of string
.*? # select all characters until
(?=\/\/|[\r\n]) # reach start of a comment or the string's end
'''
cmdPattern = re.compile( cmdPattern, re.X )
def extractCmd( line ):
found = re.search( cmdPattern, line ) # select everything that is not a comment
if found:
cmd = found.group( 0 )
cmd = cmd.strip() # remove leading and trailing whitespace
return cmd.split( ' ' ) # split on spaces
else:
return None
def extractProgram( inputFilePath ):
addr = 0
curFx = ''
curClass = ''
freeAddress = static_segment_start
with open( inputFilePath, 'r' ) as file:
for line in file:
cmd = extractCmd( line )
if cmd:
cmdType = cmd[ 0 ]
if cmdType == 'function':
curFx = cmd[ 1 ]
curClass = curFx.split( '.' )[ 0 ]
addressLookup[ cmd[ 1 ] ] = addr
cmd[ 2 ] = int( cmd[ 2 ] ) # cast nLocals to int
ROM.append( cmd )
elif cmdType == 'label' or cmdType == 'goto' or cmdType == 'if-goto':
# Make labels globally unique
newLabel = '{}_{}'.format( curFx, cmd[ 1 ] )
if cmdType == 'label':
addressLookup[ newLabel ] = addr
ROM.append( [ cmdType, newLabel ] )
elif cmdType == 'push' or cmdType == 'pop':
cmd[ 2 ] = int( cmd[ 2 ] ) # cast index to int
if cmd[ 1 ] == 'static':
# Make static references globally unique
if len( cmd ) == 4: # 'push/pop static index className' vs 'push/pop static index'
className = cmd[ 3 ]
else:
className = curClass
refName = '{}_{}'.format( className, cmd[ 2 ] )
if refName not in staticLookup:
if freeAddress <= static_segment_end:
staticLookup[ refName ] = freeAddress
freeAddress += 1
else:
raise Exception( 'Ran out of static space' )
if len( cmd ) == 4: # 'push/pop static index className' vs 'push/pop static index'
cmd[ 3 ] = refName
else:
cmd += [ refName ]
ROM.append( cmd )
elif cmdType == 'call':
cmd[ 2 ] = int( cmd[ 2 ] ) # cast nArgs to int
ROM.append( cmd )
else:
ROM.append( cmd )
addr += 1
# Debug -----------------------------
def updateWithDebug():
update()
if breakpoint():
clock.stop()
# print( 'Breakpoint reached' )
print( 'Breakpoint reached after {} clock cycles'.format( clock.currentCycle ) )
print( 'Took {} seconds to reach breakpoint'.format( time.time() - startTime ) )
debug2File()
def breakpoint():
# pass
return PC == sysHalt
# return PC == addressLookup[ 'GFX.fillRect' ]
# return clock.currentCycle == 384381
def debug2File():
filePath = debugPath + str( clock.currentCycle )
with open( filePath, 'w' ) as file:
file.write( '{} ------------'.format( PC_prev ) + '\n' )
file.write( ' '.join( map( str, ROM[ PC_prev ] ) ) + '\n' )
file.write( '' + '\n' )
file.write( 'SP {}'.format( RAM[ 0 ] ) + '\n' )
file.write( 'LCL {}'.format( RAM[ 1 ] ) + '\n' )
file.write( 'ARG {}'.format( RAM[ 2 ] ) + '\n' )
file.write( 'THIS {}'.format( RAM[ 3 ] ) + '\n' )
file.write( 'THAT {}'.format( RAM[ 4 ] ) + '\n' )
file.write( 'TMP0 {}'.format( RAM[ 5 ] ) + '\n' )
file.write( 'TMP1 {}'.format( RAM[ 6 ] ) + '\n' )
file.write( 'TMP2 {}'.format( RAM[ 7 ] ) + '\n' )
file.write( 'TMP3 {}'.format( RAM[ 8 ] ) + '\n' )
file.write( 'TMP4 {}'.format( RAM[ 9 ] ) + '\n' )
file.write( 'TMP5 {}'.format( RAM[ 10 ] ) + '\n' )
file.write( 'TMP6 {}'.format( RAM[ 11 ] ) + '\n' )
file.write( 'TMP7 {}'.format( RAM[ 12 ] ) + '\n' )
file.write( 'GP0 {}'.format( RAM[ 13 ] ) + '\n' )
file.write( 'GP1 {}'.format( RAM[ 14 ] ) + '\n' )
file.write( 'GP2 {}'.format( RAM[ 15 ] ) + '\n' )
file.write( '' + '\n' )
# static
file.write( 'Static' + '\n' )
for i in range( static_segment_start, stack_segment_start ):
file.write( '\t{:<3} {}'.format( i, RAM[ i ] ) + '\n' )
file.write( '' + '\n' )
# stack
sp = RAM[ 0 ]
file.write( 'Stack' + '\n' )
for i in range( stack_segment_start, sp ):
file.write( '\t{:<4} {}'.format( i, RAM[ i ] ) + '\n' )
file.write( '\t{:<4} .. ({})'.format( sp, RAM[ sp ] ) + '\n' )
file.write( '' + '\n' )
# heap
file.write( 'Heap' + '\n' )
for i in range( heap_segment_start, heap_segment_end + 1 ):
file.write( '\t{:<5} {}'.format( i, RAM[ i ] ) + '\n' )
file.write( '' + '\n' )
def dumpROMnAddresses():
# Dump ROM
with open( debugPath + 'romDump', 'w' ) as file:
for e in ROM:
file.write( ' '.join( map( str, e ) ) + '\n' )
# Dump addresses
with open( debugPath + 'addressDump', 'w' ) as file:
# Dump generated label addresses
for kv in sorted( addressLookup.items(), key = lambda x : x[ 1 ] ):
file.write( '{:<5} - {}\n'.format( kv[ 1 ], kv[ 0 ] ) )
file.write( '\n\n' )
# Dump generated static addresses
for kv in sorted( staticLookup.items(), key = lambda x : x[ 1 ] ):
file.write( '{:<3} - {}\n'.format( kv[ 1 ], kv[ 0 ] ) )
# Computer --------------------------
def haltOnError():
global PC
global yieldToExternal
PC = sysHalt # end program
yieldToExternal = True # prevent tick
if debugMode:
debug2File()
update()
def setup():
global clock
global io
global startTime
global sysHalt
#
if not Components.PERFORMANCE_MODE:
raise Exception( 'The VM Emulator only works when GC.PERFORMANCE_MODE is True' )
# Setup RAM
RAM[ SP ] = 256
RAM[ LCL ] = 256
RAM[ ARG ] = 256
RAM[ THIS ] = 9999
RAM[ THAT ] = 9999
# Setup ROM
startTime = time.time()
extractProgram( programPath )
print( 'Completed ROM flash. Took {0:.2f} seconds.'.format( time.time() - startTime ) )
if debugMode:
# Dump ROM and addresses
dumpROMnAddresses()
# Retrive location
sysHalt = addressLookup[ 'Sys.halt' ]
# Initialize clock
clock = Components.Clock()
# Setup callbacks
if debugMode:
clock.callbackRising = updateWithDebug
else:
clock.callbackRising = update
# Initialize IO
io = Components.IO( nBits, RAMWrapper( RAM ) )
def tick():
global PC
global PC_prev
global PC_jump
PC_prev = PC # helps with debugging
# Fetch instruction
instruction = ROM[ PC ]
# print( '{} - {}'.format( PC, instruction ) )
# Execute instruction
executeInstruction( instruction )
# Increment PC
if PC_jump == False:
PC += 1
else:
PC_jump = False
''' Kinda hacky, workaround for different clocks.
Make IO screen updates run on CPU clock.
'''
io.updateScreen()
def update():
if not yieldToExternal:
tick()
# Handle exit via IO
if io.hasExited:
if debugMode:
debug2File()
clock.stop()
print( 'See you later!' )
# Profile... temp
if runYappiProfile:
yappi.get_func_stats().print_all()
# Stop running when reach Sys.halt
if PC == sysHalt:
# Stop clock
clock.stop()
# Stop (lower) screen update
io.maxFps = 1 # lowest can go is 1 FPS
print( 'Sys.halt reached. Took {0:.2f} seconds.'.format( time.time() - startTime ) )
# Profile... temp
if runYappiProfile:
yappi.get_func_stats().print_all()
# Run -------------------------------
def run( programPath_ ):
global programPath
# Specify program
if programPath_:
programPath = programPath_
# Setup
setup()
# Profile... temp
if runYappiProfile:
yappi.start()
# Start IO
io.runAsThread()
# Start clock
clock.run()
print( 'Program has started' )
startTime = time.time()
```
#### File: v1.0/Assembler/vm2asm.py
```python
'''
TODO
- optimize for 'x+1', 'x-1', 0, 1, -1
'''
# == Imports =======================================================
# Built ins
import re
import os
# == Main ==========================================================
# -- Lookup tables ---------------------------------
segmentPointer = {
'argument' : '@ARG',
'local' : '@LCL',
'this' : '@THIS',
'that' : '@THAT',
}
unaryOps = {
'not' : '!',
'neg' : '-',
}
binaryOps = {
'and' : '&',
'or' : '|',
'add' : '+',
'sub' : '-',
'xor' : '^',
'lsr' : '>>',
'lsl' : '<<',
}
comparisonOps = {
'eq' : 'JEQ',
'gt' : 'JGT',
'lt' : 'JLT',
'gte' : 'JGE',
'lte' : 'JLE',
'ne' : 'JNE',
}
# operations = unaryOps + binaryOps + comparisonOps
operations = dict( unaryOps )
operations.update( binaryOps )
operations.update( comparisonOps )
# -- Extraction -------------------------------------
# With regex --
# Select everything that is not a comment
cmdPattern = '''
^ # from beginning of string
.*? # select all characters until
(?=\/\/|[\r\n]) # reach start of a comment or the string's end
'''
cmdPattern = re.compile( cmdPattern, re.X )
# def extractCmd_wRe( line ):
def extractCmd( line ):
found = re.search( cmdPattern, line ) # select everything that is not a comment
if found:
cmd = found.group( 0 )
cmd = cmd.strip() # remove leading and trailing whitespace
return cmd.split( ' ' ) # split on spaces
else:
return None
# Without regex --
def strip ( s ):
# Remove leading and trailing whitespace
whitespace = ' \t\r\n\f'
start = 0
end = len( s ) - 1
while s[ start ] in whitespace:
start += 1
while s[ end ] in whitespace:
end -= 1
return s[ start : end + 1 ]
def split ( s, sep ):
# Split into substirings using sep (single char) as delimiter
items = []
i = 0
sLen = len( s )
while i < sLen:
item = ''
while i < sLen and s[ i ] != sep:
item += s[i]
i += 1
items.append( item )
i += 1 # skip sep
if s[ sLen - 1 ] == sep:
items.append( '' ) # make return length consistent
return items
# s = 'pants'
# s = "----"
# s = 'pants-are-cool'
# s = 'pants-are--cool'
# s = 'pants--are-cool-----'
# print( split( s, '-' ) )
# print( s.split( '-' ) )
def extractCmd_woutRe( line ):
# def extractCmd( line ):
s = ''
i = 0
tok = line[ i ]
# Select everything that is not a comment
while tok != '\n' and tok != '/':
s += tok
i += 1
tok = line[ i ]
if i:
s = strip( s ) # remove leading and trailing whitespace
return split( s, ' ' ) # split on spaces
else:
return None
# -- Translation -------------------------------------
class Feed():
def __init__( self, cmdList ):
self.pos = 0
self.cmdList = cmdList
self.end = len( cmdList )
def next( self ):
if self.eof( self.pos ):
return None
else:
cmd = self.cmdList[ self.pos ]
self.pos += 1
return cmd
def peek( self ):
if self.eof( self.pos ):
return None
return self.cmdList[ self.pos ]
def peekpeek( self ):
if self.eof( self.pos + 1 ):
return None
return self.cmdList[ self.pos + 1 ]
def eof( self, pos ):
return pos >= self.end
class Compiler():
def __init__( self ):
# Set SP
self.SP = 256
# Set corresponding RAM addresses
self.TEMP = 5 # 5..12
self.GP = 13 # 13..15
# Track scope
self.curClassName = None
self.curFunctionName = None
# Use generic functions
self.useGenerics = True
# Include comments in generated asm file
self.debug = False
# Skip cmd
self.skip = False
self.skip2 = False
def setup( self ):
self.compCount = 0 # track jump positions of comparison operations
self.returnPosCount = 0 # track return positions of calls
self.returnGenericPosCount = 0 # track return positions of generic ASM calls
def compile( self, cmds, debug = False ):
self.debug = debug
self.setup()
out = []
out.append( self.compile_bootstrap() )
for className in cmds:
if self.debug:
out.append( '\n// === {} ===\n'.format( className ) )
out.append( self.compile_topLevel( className, cmds[ className ] ) )
return self.a2s( out )
def compile_topLevel( self, className, cmdList ):
self.curClassName = className
self.input = Feed( cmdList )
return self.compile_statements()
# --------------------------------------
def a2s( self, a ):
# Return newline delimited string
return '\n'.join( a )
def at( self, x ):
return '@' + str( x )
def atTemp( self, index ):
return self.at( self.TEMP + index )
def atGP( self, index ):
return self.at( self.GP + index )
# def atStatic( self, index ):
def atStatic( self, index, className = None ):
return '@{}.{}'.format( self.curClassName, index )
# TODO, support for static access outside class
# return '@{}.{}'.format( className, index )
def label( self, loc ):
return '({})'.format( loc )
def pushDToStack( self ):
s = []
# Push it to stack
s.append( '@SP' )
s.append( 'A = M' ) # set A reg to address held by SP
s.append( 'M = D' ) # set value at said address
# Increment address held by SP
s.append( '@SP' )
s.append( 'M = M + 1' )
return self.a2s( s )
def popStackToD( self ):
s = []
# Decrement address held by SP, and get the value
s.append( '@SP' )
s.append( 'AM = M - 1' )
s.append( 'D = M' )
return self.a2s( s )
# --------------------------------------
def compile_statements( self, exitCondition = None ):
s = []
while True:
cmd = self.input.next()
if cmd:
if self.skip2:
self.skip = True
self.skip2 = False
continue # go to next iteration
if self.skip:
self.skip = False
continue # go to next iteration
if self.debug:
s.append( '// {}'.format( ' '.join( cmd ) ) )
s.append( self.compile_statement( cmd ) )
else:
break
return self.a2s( s )
def compile_statement( self, cmd ):
cmdType = cmd[0]
if cmdType == 'push':
cmd2 = self.input.peek()
if ( cmd2 and cmd2[0] == 'pop' and
( cmd2[1] == 'pointer' or cmd2[1] == 'static' or cmd2[1] == 'temp' ) ):
# 'compile_push_pop' generates less code than seperate 'compile_push' and 'compile_pop'
self.skip = True
return self.compile_push_pop( cmd[1], int( cmd[2] ), cmd2[1], int( cmd2[2] ) )
# elif cmd2 and cmd2[0] in binaryOps:
elif ( cmd2 and cmd2[0] in binaryOps and
cmd2[0] != 'lsl' and cmd2[0] != 'lsr' ):
cmd3 = self.input.peekpeek()
if cmd3 and cmd3[0] == 'if-goto':
# compile_push_binaryOp_ifgoto generates less code than seperate
# 'compile_push', 'compile_operation', and 'compile_ifgoto'
self.skip2 = True
return self.compile_push_binaryOp_ifgoto( cmd[1], int( cmd[2] ), cmd2[0], cmd3[1] )
else:
# compile_push_binaryOp generates less code than seperate 'compile_push' and 'compile_operation'
self.skip = True
return self.compile_push_binaryOp( cmd[1], int( cmd[2] ), cmd2[0] )
elif cmd2 and cmd2[0] in comparisonOps:
cmd3 = self.input.peekpeek()
if cmd3 and cmd3[0] == 'if-goto':
# compile_push_comparisonOp_ifgoto generates less code than seperate
# 'compile_push', 'compile_operation', and 'compile_ifgoto'
self.skip2 = True
return self.compile_push_comparisonOp_ifgoto( cmd[1], int( cmd[2] ), cmd2[0], cmd3[1] )
else:
# compile_push_comparisonOp generates less code than seperate 'compile_push' and 'compile_operation'
self.skip = True
return self.compile_push_comparisonOp( cmd[1], int( cmd[2] ), cmd2[0] )
else:
return self.compile_push( cmd[1], int( cmd[2] ) )
elif cmdType == 'pop':
return self.compile_pop( cmd[1], int( cmd[2] ) )
elif cmdType in operations:
cmd2 = self.input.peek()
if cmd2 and cmd2[0] == 'if-goto':
# compile_yyyyOp_ifgoto generates less code than seperate compile_operation' and 'compile_ifgoto'
self.skip = True
if cmdType in unaryOps:
return self.compile_unaryOp_ifgoto( cmdType, cmd2[1] )
elif cmdType in binaryOps:
return self.compile_binaryOp_ifgoto( cmdType, cmd2[1] )
elif cmdType in comparisonOps:
return self.compile_comparisonOp_ifgoto( cmdType, cmd2[1] )
else:
return self.compile_operation( cmdType )
elif cmdType == 'label':
return self.compile_label( cmd[1] )
elif cmdType == 'goto':
return self.compile_goto( cmd[1] )
elif cmdType == 'if-goto':
return self.compile_ifgoto( cmd[1] )
elif cmdType == 'call':
return self.compile_call( cmd[1], int( cmd[2] ) )
elif cmdType == 'function':
return self.compile_function( cmd[1], int( cmd[2] ) )
elif cmdType == 'return':
return self.compile_return()
else:
raise Exception( "Don't know how to compile the command - {}".format( cmd ) )
def compile_push_( self, seg, index ):
s = []
# Get value from segment
if seg == 'constant':
s.append( self.at( index ) )
s.append( 'D = A' )
elif seg == 'pointer':
if index == 0: s.append( '@THIS' )
else: s.append( '@THAT' )
s.append( 'D = M' )
elif seg == 'static':
s.append( self.atStatic( index ) )
s.append( 'D = M' )
elif seg == 'temp':
s.append( self.atTemp( index ) )
s.append( 'D = M' )
else: # arg, local, this, that
if index == 0:
s.append( segmentPointer[ seg ] )
s.append( 'A = M' )
else:
s.append( self.at( index ) )
s.append( 'D = A' )
s.append( segmentPointer[ seg ] )
s.append( 'A = M + D' )
s.append( 'D = M' )
return self.a2s( s )
def compile_push( self, seg, index ):
s = []
# Get value from segment
s.append( self.compile_push_( seg, index ) )
# Push it to stack
s.append( self.pushDToStack() )
return self.a2s( s )
def compile_pop( self, seg, index ):
s = []
if seg == 'pointer' or seg == 'static' or seg == 'temp':
# Get value from stack
s.append( self.popStackToD() )
# Get target address
if seg == 'pointer':
if index == 0: s.append( '@THIS' )
else: s.append( '@THAT' )
elif seg == 'static':
s.append( self.atStatic( index ) )
elif seg == 'temp':
s.append( self.atTemp( index ) )
# Pop value to target address
s.append( 'M = D' )
else: # arg, local, this, that
# Get target address
if index == 0:
s.append( segmentPointer[ seg ] )
s.append( 'D = M' )
else:
s.append( self.at( index ) )
s.append( 'D = A' )
s.append( segmentPointer[ seg ] )
s.append( 'D = M + D' )
# Save address to temporary location
s.append( self.atGP( 0 ) )
s.append( 'M = D' )
# Get value from stack
s.append( self.popStackToD() )
# Pop value to target address
s.append( self.atGP( 0 ) )
s.append( 'A = M' )
s.append( 'M = D' )
return self.a2s( s )
def compile_push_pop( self, seg1, index1, seg2, index2 ):
s = []
# Get value from segment
s.append( self.compile_push_( seg1, index1 ) )
# Get target address
if seg2 == 'pointer':
if index2 == 0: s.append( '@THIS' )
else: s.append( '@THAT' )
elif seg2 == 'static':
s.append( self.atStatic( index2 ) )
elif seg2 == 'temp':
s.append( self.atTemp( index2 ) )
else:
raise Exception( "Shouldn't reach here" )
# Pop value to target address
s.append( 'M = D' )
return self.a2s( s )
def compile_operation( self, op ):
'''
12 12 12
7 sub -> 4 neg -> -4
3 SP SP
SP
'''
s = []
if op in unaryOps:
s.append( '@SP' )
s.append( 'A = M - 1' )
s.append( 'M = {} M'.format( unaryOps[ op ] ) )
elif op in binaryOps:
if op == 'lsl' or op == 'lsr':
# Workaround for available shift instructions ( D << M or D >> M )
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
s.append( self.atGP( 0 ) )
s.append( 'M = D' ) # Temp[1] = prev_val
# Get prevprev value
s.append( '@SP' )
s.append( 'A = M - 1' ) # aReg = prevprev_addr
s.append( 'D = M' ) # D = prevprev_val
# Apply op
s.append( self.atGP( 0 ) )
s.append( 'D = D {} M'.format( binaryOps[ op ] ) ) # D = prevprev_val op prev_val
s.append( '@SP' )
s.append( 'A = M - 1' ) # aReg = prevprev_addr
s.append( 'M = D' ) # stack = D
else:
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
# Get prevprev value
s.append( 'A = A - 1' ) # aReg = prevprev_addr
# Apply op
s.append( 'M = M {} D'.format( binaryOps[ op ] ) ) # stack = prevprev_val op prev_val
elif op in comparisonOps:
s.append( self.compile_comparisonOp( op ) )
return self.a2s( s )
def compile_push_binaryOp( self, seg, index, op ):
s = []
# Get value from segment
s.append( self.compile_push_( seg, index ) )
# Get prevprev value
s.append( '@SP' )
s.append( 'A = M - 1' ) # aReg = prevprev_addr
# Apply op
s.append( 'M = M {} D'.format( binaryOps[ op ] ) ) # stack = prevprev_val op segment_val
return self.a2s( s )
def compile_push_binaryOp_ifgoto( self, seg, index, op, loc ):
s = []
# Get value from segment
s.append( self.compile_push_( seg, index ) )
# Get prevprev value, and
# decrement address held by SP (this would usually be done by ifgoto when retrieving comparison result)
s.append( '@SP' )
s.append( 'AM = M - 1' ) # aReg = prevprev_addr
# Apply op
s.append( 'D = M {} D'.format( binaryOps[ op ] ) ) # stack = prevprev_val op segment_val
# Conditional jump
s.append( self.compile_ifgoto_( loc ) )
return self.a2s( s )
def compile_binaryOp_ifgoto( self, op, loc ):
# Only one line saved by doing this instead of using popStackToD in ifgoto
s = []
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
# Get prevprev value
s.append( 'A = A - 1' ) # aReg = prevprev_addr
# Apply op
s.append( 'D = M {} D'.format( binaryOps[ op ] ) ) # stack = prevprev_val op prev_val
# Decrement address held by SP (this would usually be done by ifgoto when retrieving comparison result)
s.append( '@SP' )
s.append( 'M = M - 1' )
# Conditional jump
s.append( self.compile_ifgoto_( loc ) )
return self.a2s( s )
def compile_unaryOp_ifgoto( self, op, loc ):
# Three lines saved by doing this instead of using popStackToD in ifgoto
s = []
# Apply op, and
# decrement address held by SP (this would usually be done by ifgoto when retrieving comparison result)
s.append( '@SP' )
s.append( 'AM = M - 1' )
s.append( 'D = {} M'.format( unaryOps[ op ] ) )
# Conditional jump
s.append( self.compile_ifgoto_( loc ) )
return self.a2s( s )
'''
def compile_comparisonOp_( self, op ):
s = []
cTrue = 'comp_true{}'.format( self.compCount )
cEnd = 'comp_end{}'.format( self.compCount )
self.compCount += 1
# Compare
s.append( 'D = M - D' ) # D = prevprev_val - prev_val
s.append( self.at( cTrue ) )
s.append( 'D ; {}'.format( comparisonOps[ op ] ) )
# False
s.append( 'D = 0' )
s.append( self.at( cEnd ) )
s.append( '0 ; JMP' )
# True
s.append( self.label( cTrue ) )
s.append( 'D = 1' )
# End/continue
s.append( self.label( cEnd ) )
def compile_comparisonOp( self, op ):
s = []
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
# Get prevprev value
s.append( 'A = A - 1' ) # aReg = prevprev_addr
# Compare
s.append( self.compile_comparisonOp_( op ) )
# Update stack
s.append( '@SP' )
s.append( 'A = M - 1' )
s.append( 'M = D' )
return self.a2s( s )
'''
def compile_comparisonOp2_generic( self, op ):
# TODO - Change gt, gte, lt, lte to reflect,
# http://nand2tetris-questions-and-answers-forum.32033.n3.nabble.com/Greater-or-less-than-when-comparing-numbers-with-different-signs-td4031520.html
s = []
# Diff
self.atTemp( 2 ) # b
'D = M'
self.atTemp( 1 ) # a
s.append( 'D = M - D' )
# Diff is zero
self.at( '..._true{}' )
'D ; JEQ'
self.atTemp( 3 )
'M = 0'
self.at( '..._end{}' )
'0 ; JMP'
self.label( '..._true{}' )
self.atTemp( 3 )
'M = - 1'
self.label( '..._end{}' )
# Diff is negative
self.at( '..._true{}' )
'D ; JLT'
self.atTemp( 4 )
'M = 0'
self.at( '..._end{}' )
'0 ; JMP'
self.label( '..._true{}' )
self.atTemp( 4 )
'M = - 1'
self.label( '..._end{}' )
# a is negative
self.atTemp( 1 )
'D = M'
self.at( '..._true{}' )
'D ; JLT'
self.atTemp( 1 )
'M = 0'
self.at( '..._end{}' )
self.label( '..._true{}' )
self.atTemp( 1 )
'M = - 1'
self.label( '..._end{}' )
# b is negative
self.atTemp( 2 )
'D = M'
self.at( '..._true{}' )
'D ; JLT'
self.atTemp( 2 )
'M = 0'
self.at( '..._end{}' )
self.label( '..._true{}' )
self.atTemp( 2 )
'M = - 1'
self.label( '..._end{}' )
# Signs are opposite
self.atTemp( 1 )
'D = M'
self.atTemp( 2 )
'M = M ^ D'
if op == 'gt':
# if opposite signs
self.atTemp( 2 )
'D = M'
self.at( '..false1' )
'D ; JEQ'
# true1
self.atTemp( 1 )
'D = M'
self.at( '..false2' )
'D : JEQ'
#true2
'D = 0'
self.at( '..end2' )
'0 ; JMP'
#false2
self.label( '..false2' )
'D = - 1'
#end2
self.label( '..end2' )
self.at( '..end1' )
'0 ; JMP'
# false1
self.label( '..false1' )
self.atTemp( 3 )
'D = M'
self.atTemp( 4 )
'D = M | D'
'D = ! D'
# end1
self.label( '..end1' )
# _ if aIsNeg : value = 0
# _ else : value = -1
# else : value = ! ( zr | ng )
elif op == 'gte': pass
elif op == 'lt': pass
elif op == 'lte': pass
return self.a2s( s )
def compile_comparisonOp( self, op ):
s = []
if op == 'eq' or op == 'ne':
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
# Get prevprev value
s.append( 'A = A - 1' ) # aReg = prevprev_addr
# Compare
s.append( self.compile_comparisonOp_( op ) )
else:
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
self.atTemp( 2 ) # b
'M = D'
# Get prevprev value
'@SP'
'A = M - 1' # aReg = prevprev_addr
'D = M'
self.atTemp( 1 ) # a
'M = D'
# Compare
s.append( self.compile_comparisonOp2_( op ) )
# Update stack
s.append( '@SP' )
s.append( 'A = M - 1' )
s.append( 'M = D' )
return self.a2s( s )
def compile_push_comparisonOp( self, seg, index, op ):
s = []
# Get value from segment
s.append( self.compile_push_( seg, index ) )
# Get prevprev value
s.append( '@SP' )
s.append( 'A = M - 1' ) # aReg = prevprev_addr
# Compare
s.append( self.compile_comparisonOp_( op ) )
# Update stack
s.append( '@SP' )
s.append( 'A = M - 1' )
s.append( 'M = D' )
return self.a2s( s )
def compile_push_comparisonOp_ifgoto( self, seg, index, op, loc ):
s = []
# Get value from segment
s.append( self.compile_push_( seg, index ) )
# Get prevprev value
s.append( '@SP' )
s.append( 'A = M - 1' ) # aReg = prevprev_addr
# Compare
s.append( self.compile_comparisonOp_( op ) )
# Decrement address held by SP (this would usually be done by ifgoto when retrieving comparison result)
s.append( '@SP' )
s.append( 'M = M - 1' )
# Conditional jump
s.append( self.compile_ifgoto_( loc ) )
return self.a2s( s )
def compile_comparisonOp_ifgoto( self, op, loc ):
# Only one line saved by doing this instead of using popStackToD in ifgoto.
# However, also save 3 lines by not pushing comparison result onto stack
s = []
# Get prev value
s.append( self.popStackToD() ) # D = prev_val
# Get prevprev value
s.append( 'A = A - 1' ) # aReg = prevprev_addr
# Compare
s.append( self.compile_comparisonOp_( op ) )
# Decrement address held by SP (this would usually be done by ifgoto when retrieving comparison result)
s.append( '@SP' )
s.append( 'M = M - 1' )
# Conditional jump
s.append( self.compile_ifgoto_( loc ) )
return self.a2s( s )
def compile_label( self, loc ):
return self.label( '{}.{}'.format( self.curFunctionName, loc ) )
def compile_goto( self, loc ):
s = []
s.append( self.at( '{}.{}'.format( self.curFunctionName, loc ) ) )
s.append( '0 ; JMP' )
return self.a2s( s )
def compile_ifgoto_( self, loc ):
s = []
# Jump only when cond != 0 i.e. when true
s.append( self.at( '{}.{}'.format( self.curFunctionName, loc ) ) )
s.append( 'D ; JNE' )
return self.a2s( s )
def compile_ifgoto( self, loc ):
s = []
# Condition
s.append( self.popStackToD() )
# Conditional jump
s.append( self.compile_ifgoto_( loc ) )
return self.a2s( s )
def compile_call( self, fxName, nArgs ):
if self.useGenerics:
return self.compile_call_generic( fxName, nArgs )
else:
return self.compile_call_inline( fxName, nArgs )
def compile_call_inline( self, fxName, nArgs ):
s = []
returnPos = 'returnPosition{}'.format( self.returnPosCount )
self.returnPosCount += 1
# Save return position
s.append( self.at( returnPos ) )
s.append( 'D = A' )
s.append( self.pushDToStack() )
# Save segment pointers
for ptr in [ 'LCL', 'ARG', 'THIS', 'THAT' ]:
# get current val
s.append( self.at( ptr ) )
s.append( 'D = M' )
# push to stack
s.append( self.pushDToStack() )
# Set ARG
# ARG = SP - ( nArgs + 5 )
s.append( self.at( nArgs + 5 ) )
s.append( 'D = A' )
s.append( '@SP' )
s.append( 'D = M - D' )
s.append( '@ARG' )
s.append( 'M = D' )
# Set LCL
s.append( '@SP' )
s.append( 'D = M' )
s.append( '@LCL' )
s.append( 'M = D' )
# Goto function
s.append( self.at( fxName ) )
s.append( '0 ; JMP' )
# Create label for returnAddress
s.append( self.label( returnPos ) )
return self.a2s( s )
def compile_call_generic( self, fxName, nArgs ):
s = []
# Save fx position at R14
s.append( self.at( fxName ) )
s.append( 'D = A' )
s.append( self.atGP( 1 ) )
s.append( 'M = D' )
# Save nArgs at R15
s.append( self.at( nArgs ) )
s.append( 'D = A' )
s.append( self.atGP( 2 ) )
s.append( 'M = D' )
# Save return position
returnPos = '$_returnFromGenericFunction{}'.format( self.returnGenericPosCount )
self.returnGenericPosCount += 1
s.append( self.at( returnPos ) )
s.append( 'D = A' )
# Goto genericCall function
s.append( self.at( '$_genericCall' ) )
s.append( '0 ; JMP' )
# Create return position
s.append( self.label( returnPos ) )
return self.a2s( s )
def compile_call_bootstrap( self ):
s = []
# Create label
s.append( self.label( '$_genericCall' ) )
# Save return position
s.append( self.pushDToStack() )
# Save segment pointers
for ptr in [ 'LCL', 'ARG', 'THIS', 'THAT' ]:
# get current val
s.append( self.at( ptr ) )
s.append( 'D = M' )
# push to stack
s.append( self.pushDToStack() )
# Set ARG
# ARG = SP - ( nArgs + 5 )
s.append( self.atGP( 2 ) ) # get nArgs from R15
s.append( 'D = M' )
s.append( self.at( 5 ) )
s.append( 'D = D + A' ) # nArgs + 5
s.append( '@SP' )
s.append( 'D = M - D' )
s.append( '@ARG' )
s.append( 'M = D' )
# Set LCL
s.append( '@SP' )
s.append( 'D = M' )
s.append( '@LCL' )
s.append( 'M = D' )
# Goto function
s.append( self.atGP( 1 ) ) # get fx position from R14
s.append( 'A = M' )
s.append( '0 ; JMP' )
return self.a2s( s )
def compile_function( self, fxName, nLocals ):
s = []
self.curFunctionName = fxName
# Create label
s.append( self.label( fxName ) )
# Init locals to zeros
if nLocals == 1:
s.append( '@SP' )
s.append( 'A = M' )
s.append( 'M = 0' )
s.append( '@SP' )
s.append( 'M = M + 1' )
elif nLocals > 1:
s.append( '@SP' )
s.append( 'A = M' )
s.append( 'M = 0' )
for i in range( nLocals - 1 ):
s.append( '@SP' )
s.append( 'AM = M + 1' )
s.append( 'M = 0' )
s.append( '@SP' )
s.append( 'M = M + 1' )
return self.a2s( s )
def compile_return( self ):
if self.useGenerics:
return self.compile_return_generic()
else:
return self.compile_return_inline()
def compile_return_inline( self ):
s = []
curLCL = 'curLCL_' + self.curFunctionName
# Save current LCL
s.append( '@LCL' )
s.append( 'D = M' )
s.append( self.at( curLCL ) )
s.append( 'M = D' )
# Save return address, @(curLCL - 5)
s.append( self.at( 5 ) )
s.append( 'A = D - A' )
s.append( 'D = M' )
s.append( self.atGP( 0 ) )
s.append( 'M = D' )
# Copy return value onto arg0
s.append( '@SP' )
s.append( 'A = M - 1' )
s.append( 'D = M' )
s.append( '@ARG' )
s.append( 'A = M' )
s.append( 'M = D' )
# Reposition SP for caller (to just after return value)
s.append( '@ARG' )
s.append( 'D = M' )
s.append( '@SP' )
s.append( 'M = D + 1' )
# Restore segment pointers of caller
s.append( self.at( curLCL ) )
s.append( 'A = M - 1' )
s.append( 'D = M' )
s.append( '@THAT' )
s.append( 'M = D' )
segs = [ None, None, 'THIS', 'ARG', 'LCL' ]
for i in range( 2, 5 ):
s.append( self.at( i ) )
s.append( 'D = A' )
s.append( self.at( curLCL ) )
s.append( 'A = M - D' )
s.append( 'D = M' )
s.append( self.at( segs[i] ) )
s.append( 'M = D' )
# Jump to return position
s.append( self.atGP( 0 ) )
s.append( 'A = M' )
s.append( '0 ; JMP' )
return self.a2s( s )
def compile_return_generic( self ):
s = []
# Goto genericReturn function
s.append( self.at( '$_genericReturn' ) )
s.append( '0 ; JMP' )
return self.a2s( s )
def compile_return_bootstrap( self ):
s = []
curLCL = 'curLCL'
# Create label
s.append( self.label( '$_genericReturn' ) )
# Save current LCL
s.append( '@LCL' )
s.append( 'D = M' )
s.append( self.at( curLCL ) )
s.append( 'M = D' )
# Save return address, @(curLCL - 5)
s.append( self.at( 5 ) )
s.append( 'A = D - A' )
s.append( 'D = M' )
s.append( self.atGP( 0 ) )
s.append( 'M = D' )
# Copy return value onto arg0
s.append( '@SP' )
s.append( 'A = M - 1' )
s.append( 'D = M' )
s.append( '@ARG' )
s.append( 'A = M' )
s.append( 'M = D' )
# Reposition SP for caller (to just after return value)
s.append( '@ARG' )
s.append( 'D = M' )
s.append( '@SP' )
s.append( 'M = D + 1' )
# Restore segment pointers of caller
# s.append( 'A = D - 1' )
s.append( self.at( curLCL ) )
s.append( 'A = M - 1' )
s.append( 'D = M' )
s.append( '@THAT' )
s.append( 'M = D' )
segs = [ None, None, 'THIS', 'ARG', 'LCL' ]
for i in range( 2, 5 ):
s.append( self.at( i ) )
s.append( 'D = A' )
s.append( self.at( curLCL ) )
s.append( 'A = M - D' )
s.append( 'D = M' )
s.append( self.at( segs[i] ) )
s.append( 'M = D' )
# Jump to return position
s.append( self.atGP( 0 ) )
s.append( 'A = M' )
s.append( '0 ; JMP' )
return self.a2s( s )
# --------------------------------------
def compile_bootstrap( self ):
# Present in every asm file generated
s = []
# Setup pointers
if self.debug:
s.append( '// --- Begin pointer setup' )
s.append( '\n// set SP' )
s.append( self.at( self.SP ) )
s.append( 'D = A' )
s.append( '@SP' )
s.append( 'M = D' )
if self.debug:
s.append( '\n// set LCL' )
s.append( '@LCL' )
s.append( 'M = D' )
if self.debug:
s.append( '\n// set ARG' )
s.append( '@ARG' )
s.append( 'M = D' )
if self.debug:
s.append( '\n// set THIS' )
s.append( self.at( 9999 ) ) # arbitrary
s.append( 'D = A' )
s.append( '@THIS' )
s.append( 'M = D' )
if self.debug:
s.append( '\n// set THAT' )
s.append( '@THAT' )
s.append( 'M = D' )
if self.debug:
s.append( '\n// --- end pointer setup\n' )
# Insert call to Sys.init
if self.debug:
s.append( '\n// --- Call Sys.init()' )
s.append( '@Sys.init' )
s.append( '0 ; JMP' )
if self.debug:
s.append( '' )
# Insert generic functions
# Generate often used and generic code once.
# Based on @cadet1620's idea and code
# http://nand2tetris-questions-and-answers-forum.32033.n3.nabble.com/What-is-a-reasonable-amount-of-assembly-code-to-implement-gt-lt-eq-td4030839.html
if self.useGenerics:
if self.debug:
s.append( '\n// --- Begin generic functions' )
s.append( '\n// genericReturn' )
s.append( self.compile_return_bootstrap() )
if self.debug:
s.append( '\n// genericCall' )
s.append( self.compile_call_bootstrap() )
if self.debug:
s.append( '\n// --- end generic functions\n' )
return self.a2s( s )
# -- Run ------------------------------------------
def genASMFile_single( inputFilePath, outputFilePath, debug = False ):
compiler = Compiler() # init compiler
fileName = inputFilePath.split( '/' )[ -1 ]
className = fileName[ : -3 ]
vmCode = { className : [] }
# Read
with open( inputFilePath, 'r' ) as file:
for line in file:
cmd = extractCmd( line )
if cmd:
vmCode[ className ].append( cmd )
# Translate
asmCode = compiler.compile( vmCode, debug )
# Write
with open( outputFilePath, 'w' ) as file:
file.write( asmCode )
file.write( '\n' )
print( 'Done' )
def getVMFilesFromDir( dirPath ):
fileNames = os.listdir( dirPath )
filePaths = []
for fileName in fileNames:
if fileName[ -2 : ] == 'vm':
filePath = dirPath + '/' + fileName
filePaths.append( filePath )
return filePaths
def genASMFile( inputDirPath, debug = False, libraryPaths = None ):
''' Translate the vm code in a directory to assembly code,
and generate a single outputFile containing the translated code '''
# Init compiler
compiler = Compiler()
# Get input file paths
inputFilePaths = getVMFilesFromDir( inputDirPath )
if libraryPaths:
inputFilePaths.extend( libraryPaths )
# Read
vmCode = {}
for inputFilePath in inputFilePaths:
className = re.search( '\w+(?=\.vm)', inputFilePath ).group( 0 )
if className in vmCode:
raise Exception( 'Error: More than one class is named {}\n\t{}\n'.format(
className,
'\n\t'.join( inputFilePaths )
) )
print( ' - Translating {}'.format( inputFilePath ) )
vmCode[ className ] = []
with open( inputFilePath, 'r' ) as file:
for line in file:
cmd = extractCmd( line )
if cmd:
vmCode[ className ].append( cmd )
# Translate
asmCode = compiler.compile( vmCode, debug )
# Write
outputFilePath = inputDirPath + '/Main.asm'
with open( outputFilePath, 'w' ) as file:
file.write( asmCode )
file.write( '\n' )
# print( 'Done' )
# inputFilePath = ''
# outputFilePath = ''
# genASMFile_single( inputFilePath, outputFilePath )
# inputDirPath = ''
# genASMFile( inputDirPath )
```
#### File: v1.0/Components/_9__inputOutput_performance.py
```python
import threading
import pygame, numpy
from math import ceil
# Hack computer
from ._x__components import *
from commonHelpers import *
'''------------------------------- I/O -------------------------------'''
class IO():
''' Input and output devices '''
''' Currently consists of,
input: keyboard, mouse
ouput: screen
'''
'''
Input devices bypass I/O interrupt handling by CPU as they
write directly to main_memory. In the physical implementation,
only the CPU would have access to main_memory requiring the use
of interrupt handling logic.
See, www.cs.umd.edu/class/sum2003/cmsc311/Notes/IO/extInt.html
'''
def __init__( self, N, main_memory ):
# General ---
self.N = N
self.main_memory = main_memory
self.hasExited = False
# Pygame ---
self.maxFps = SCREEN_FPS
self.surface = None
self.clock = None
# Screen ---
self.width = 512
self.height = 256
self.screenMemStart = SCREEN_MEMORY_MAP
self.newContent = False
# 1Bit color mode ---
self.fgColor = self.hex2rgb( SCREEN_FOREGROUND_COLOR )
self.bgColor = self.hex2rgb( SCREEN_BACKGROUND_COLOR )
self.colors = {
'0' : self.bgColor,
'1' : self.fgColor
}
self.nRegistersPerRow = self.width // self.N
self.nPixelsPerWord = self.N
self.screenMemEnd = self.screenMemStart + self.height * self.nRegistersPerRow;
# 4Bit color mode ---
if COLOR_MODE_4BIT:
self.colors = {}
for key, value in COLOR_PALETTE_4BIT.items():
self.colors[ key ] = self.hex2rgb( value )
self.fgColor = self.colors[ '0111' ]
self.bgColor = self.colors[ '0000' ]
self.nRegistersPerRow *= 4
self.nPixelsPerWord = 4
self.screenMemEnd = self.screenMemStart + self.height * self.nRegistersPerRow;
# Pixel array ---
# Pygame 'blit_array' expects a numpy array with [x][y] indexing (i.e. [column][row])
self.pixelArray = numpy.full( ( self.width, self.height, 3 ), self.bgColor ) # np( nCols, nRows, z )
self.curColor = self.fgColor
# Initialize Pygame ---
# threading.Thread(
# target = self.initPygame,
# name = 'io_thread'
# ).start()
def hex2rgb( self, h ):
r = int( h[ -6 : -4 ], 16 )
g = int( h[ -4 : -2 ], 16 )
b = int( h[ -2 : ], 16 )
return( r, g, b )
def runAsThread( self ):
threading.Thread(
target = self.initPygame,
name = 'io_thread'
).start()
def initPygame( self ):
pygame.init()
pygame.display.set_caption( 'Hack Computer' )
icon = pygame.image.load( 'Components/favicon.png' )
pygame.display.set_icon( icon )
pygame.display.set_mode( ( self.width, self.height ) )
self.surface = pygame.display.get_surface()
self.clock = pygame.time.Clock()
# Init background
self.surface.fill( self.bgColor )
pygame.display.flip()
# Start loop
self.run()
def quitPygame( self ):
pygame.quit()
self.hasExited = True
print( 'Exited Pygame' )
def run( self ):
while not self.hasExited:
# Poll input devices (mouse, keyboard)
for event in pygame.event.get():
if event.type == pygame.QUIT: # click X
self.quitPygame()
return
if event.type == pygame.KEYDOWN:
if event.key == 27: # Escape
self.quitPygame()
return
else:
modifier = pygame.key.get_mods()
self.handleKeyPressed( event.key, modifier )
if event.type == pygame.KEYUP:
self.handleKeyReleased()
if event.type == pygame.MOUSEBUTTONDOWN:
self.handleMousePressed( event.button, event.pos )
if event.type == pygame.MOUSEBUTTONUP:
self.handleMouseReleased( event.button )
# Update screen
self.updateScreen()
# Tick
self.clock.tick( self.maxFps )
# Screen ----------------------------------------------------
def updateScreen( self ):
if self.newContent: # update only if there's a change
# Blit pixel values
pygame.surfarray.blit_array( self.surface, self.pixelArray )
# Update display
pygame.display.flip()
self.newContent = False
def setColor( self, colorCode ):
if COLOR_MODE_4BIT:
key = toBinary( colorCode, 4 )
if not key in self.colors:
raise Exception( 'Color selection is not valid' )
self.curColor = self.colors[ key ] # temp
else:
key = str( colorCode )
if not key in self.colors:
raise Exception( 'Color selection is not valid - {}'.format( colorCode ) )
self.curColor = self.colors[ key ] # temp
def drawPixel( self, x, y ):
''' Update only the relevant pixel '''
# Check if coordinates are valid
if( x < 0 or x >= self.width or y < 0 or y >= self.height ):
raise Exception( 'drawPixel received invalid argument(s): ( {}, {} )'.format( x, y ) )
# Draw pixel
self.pixelArray[ x ][ y ] = self.curColor
# self.surface.set_at( ( x, y ), self.curColor )
# Mark screen for update
self.newContent = True
def flood( self, x, y, len ):
''' Write words to display RAM
Assumes display RAM allocates one register per pixel '''
for i in range( len ):
self.pixelArray[ x ][ y ] = self.curColor
x += 1
if ( x == self.width ):
x = 0
y += 1
def drawFastVLine( self, x, y, h ):
# I can easily make this fast for numpy array, but what would be hardware equivalent?
pass
def drawFastHLine( self, x, y, w ):
# Check if coordinates are valid
if(
w <= 0 or
x < 0 or ( x + w ) > self.width or
y < 0 or y > self.height
):
raise Exception( 'drawFastHLine received invalid argument(s): ( {}, {}, {} )'.format( x, y, w ) )
# Draw line
self.flood( x, y, w )
# Mark screen for update
self.newContent = True
def fillScreen( self ):
self.flood( 0, 0, self.width * self.height )
# Mark screen for update
self.newContent = True
def getPixel( self, x, y ):
# Check if coordinates are valid
if(
x < 0 or x >= self.width or
y < 0 or y >= self.height
):
raise Exception( 'getPixel received invalid argument(s): ( {}, {} )'.format( x, y ) )
# Get color
color = self.pixelArray[ x ][ y ]
# Lookup colorCode
for key, value in self.colors.items():
if value == color:
colorCode = key
# Write to memory
self.main_memory.write( 1, colorCode, 1, SCREEN_MEMORY_MAP ) # TODO, get better location
def replaceMainWithDisplayMemory( self, x, y, w, h ):
#TODO
pass
def replaceDisplayWithMainMemory( self, x, y, w, h ):
# Slow because iterating per pixel to convert between the two representations
# Check if coordinates are valid
if(
w <= 0 or
h <= 0 or
x < 0 or ( x + w ) > self.width or
y < 0 or ( y + h ) > self.height
):
raise Exception( 'replaceDisplayWithMainMemory received invalid argument(s): ( {}, {}, {}, {} )'.format( x, y, w, h ) )
# Replace
if( w == self.width and h == self.height ):
if COLOR_MODE_4BIT:
self.getPixelsFromMain_4BitMode_fast( x, y, w, h )
else:
self.getPixelsFromMain_1BitMode_fast( x, y, w, h )
else:
if COLOR_MODE_4BIT:
self.getPixelsFromMain_4BitMode( x, y, w, h )
else:
self.getPixelsFromMain_1BitMode( x, y, w, h )
# Mark screen for update
self.newContent = True
def getPixelsFromMain_1BitMode( self, x, y, w, h ):
startX = x
endX = x + w
startWord = startX // self.nPixelsPerWord
endWord = ceil( endX / self.nPixelsPerWord )
startWordOffset = startX % 16
endWordOffset = endX % 16
regIdx = self.screenMemStart + ( y * self.nRegistersPerRow )
for y in range( y, y + h ):
x = startX
word = startWord
for word in range( startWord, endWord + 1 ):
register = self.main_memory.read( regIdx + word )
register = toBinary( register, self.N )
for i in range( self.nPixelsPerWord ):
if word == startWord and i < startWordOffset:
continue # skip
elif word == endWord and i >= endWordOffset:
break # done with word
pixel = register[ i ]
color = self.colors[ pixel ] # look up corresponding color
self.pixelArray[ x ][ y ] = color
x += 1
regIdx += self.nRegistersPerRow
def getPixelsFromMain_1BitMode_fast( self ):
x = 0
y = 0
for regIdx in range( self.screenMemStart, self.screenMemEnd ):
register = self.main_memory.read( regIdx )
register = toBinary( register, self.N )
for i in range( self.N ):
pixel = register[ i ]
color = self.colors[ pixel ] # look up corresponding color
self.pixelArray[ x ][ y ] = color
x += 1
if ( x == self.width ):
x = 0
y += 1
def getPixelsFromMain_4BitMode( self, x, y, w, h ):
# TODO, and test!
pass
def getPixelsFromMain_4BitMode_fast( self ):
x = 0
y = 0
for idx in range( self.screenMemStart, self.screenMemEnd ):
register = self.main_memory.read( idx )
register = toBinary( register, self.N )
for i in range( 0, self.N, 4 ):
pixel = register[ i : i + 4 ]
color = self.colors[ pixel ] # look up corresponding color
self.pixelArray[ x ][ y ] = color
x += 1
if ( x == self.width ):
x = 0
y += 1
# Mouse -----------------------------------------------------
def handleMousePressed( self, button, pos ):
''' If mouse button is pressed, write 1 and update mouseX and mouseY '''
# print( 'Mouse pressed', pos )
if button == 1: # left button
# Write to memory
# clk, data, write, address
self.main_memory.write( 1, 1, 1, MOUSE_MEMORY_MAP )
self.main_memory.write( 1, pos[ 0 ], 1, MOUSEX_MEMORY_MAP )
self.main_memory.write( 1, pos[ 1 ], 1, MOUSEY_MEMORY_MAP )
def handleMouseReleased( self, button ):
''' If mouse button is released, write 0 '''
# if button == 1: # left button
# # Write to memory
# self.main_memory.write( 1, 0, 1, MOUSE_MEMORY_MAP )
pass # Too fast, cleared long before Hack program has chance to see
# Keyboard --------------------------------------------------
def handleKeyPressed( self, key, modifier ):
''' If key is pressed, write keyCode '''
# Lookup keyCode
keyCode = self.lookupKey( key, modifier )
print( 'Key pressed', key, modifier, keyCode )
# Write to memory
self.main_memory.write( 1, keyCode, 1, KBD_MEMORY_MAP )
def handleKeyReleased( self ):
''' If key is released, write 0 '''
# Write to memory
# self.main_memory.write( 1, 0, 1, KBD_MEMORY_MAP )
pass # Too fast, cleared long before Hack program has chance to see
def lookupKey( self, key, modifier ):
# If valid, return relevant keyCode
if key in lookup_keys:
# Handle shift modified presses
if modifier == 3 :
if key in lookup_shiftModifiedKeys:
return lookup_shiftModifiedKeys[ key ][ 0 ]
else:
'''
Ideally shift modifer would be 0 when shift key pressed alone.
However not the case. Sometimes it's 0 sometimes it's set (1 or 2).
Not sure how to work around. For now, ignoring all shift key presses
where shift modifier is set.
TLDR, shift key will not register consistently unless used as a modifier
'''
return 0
# Handle caps_lock modified presses
elif modifier == 8192:
if key in range( 97, 123 ): # is a letter
return lookup_shiftModifiedKeys[ key ][ 0 ]
else:
return lookup_keys[ key ][ 0 ]
else:
return lookup_keys[ key ][ 0 ]
else:
return 0
'''
Pygame keyConstants
www.pygame.org/docs/ref/key.html
'''
lookup_keyModifiers = [
0, # None
1, # Shift_left
2, # Shift_right
3, # Shift
8192, # Caps
64, # Ctrl_left
128, # Ctrl_right
192, # Ctrl
256, # Alt_left
512, # Alt_right
768, # Alt
# 1024, # Meta_left
# 2048, # Meta_right
# 3072, # Meta
# 4096, # Num
# 16384, # Mode
]
lookup_keys = {
# Pygame_keyConstant : [ Hack_keyCode, ASCII_character ]
32 : [ 32 , ' ' ], # Space
39 : [ 39 , "'" ], # Quote right
44 : [ 44 , ',' ], # Comma
45 : [ 45 , '-' ], # Minus
46 : [ 46 , '.' ], # Period
47 : [ 47 , '/' ], # Slash
48 : [ 48 , '0' ],
49 : [ 49 , '1' ],
50 : [ 50 , '2' ],
51 : [ 51 , '3' ],
52 : [ 52 , '4' ],
53 : [ 53 , '5' ],
54 : [ 54 , '6' ],
55 : [ 55 , '7' ],
56 : [ 56 , '8' ],
57 : [ 57 , '9' ],
59 : [ 59 , ';' ], # Semicolon
61 : [ 61 , '=' ], # Equal
91 : [ 91 , '[' ], # Bracket left
92 : [ 92 , '\\' ], # Backslash
93 : [ 93 , ']' ], # Bracket right
96 : [ 96 , '`' ], # Quote left
97 : [ 97 , 'a' ],
98 : [ 98 , 'b' ],
99 : [ 99 , 'c' ],
100 : [ 100 , 'd' ],
101 : [ 101 , 'e' ],
102 : [ 102 , 'f' ],
103 : [ 103 , 'g' ],
104 : [ 104 , 'h' ],
105 : [ 105 , 'i' ],
106 : [ 106 , 'j' ],
107 : [ 107 , 'k' ],
108 : [ 108 , 'l' ],
109 : [ 109 , 'm' ],
110 : [ 110 , 'n' ],
111 : [ 111 , 'o' ],
112 : [ 112 , 'p' ],
113 : [ 113 , 'q' ],
114 : [ 114 , 'r' ],
115 : [ 115 , 's' ],
116 : [ 116 , 't' ],
117 : [ 117 , 'u' ],
118 : [ 118 , 'v' ],
119 : [ 119 , 'w' ],
120 : [ 120 , 'x' ],
121 : [ 121 , 'y' ],
122 : [ 122 , 'z' ],
9 : [ 127 , None ], # Tab
13 : [ 128 , None ], # Enter
8 : [ 129 , None ], # Backspace
276 : [ 130 , None ], # Arrow left
273 : [ 131 , None ], # Arraw up
275 : [ 132 , None ], # Arrow right
274 : [ 133 , None ], # Arrow down
278 : [ 134 , None ], # Home
279 : [ 135 , None ], # End
280 : [ 136 , None ], # Page up
281 : [ 137 , None ], # Page down
277 : [ 138 , None ], # Insert
127 : [ 139 , None ], # Delete
27 : [ 140 , None ], # Escape
282 : [ 141 , None ], # F1
283 : [ 142 , None ], # F2
284 : [ 143 , None ], # F3
285 : [ 144 , None ], # F4
286 : [ 145 , None ], # F5
287 : [ 146 , None ], # F6
288 : [ 147 , None ], # F7
289 : [ 148 , None ], # F8
290 : [ 149 , None ], # F9
291 : [ 150 , None ], # F10
292 : [ 151 , None ], # F11
293 : [ 152 , None ], # F12
304 : [ 153 , None ], # Shift left
303 : [ 154 , None ], # Shift right
306 : [ 155 , None ], # Control left
305 : [ 156 , None ], # Control right
308 : [ 157 , None ], # Alt left
307 : [ 158 , None ], # Alt right
268 : [ 42 , '*' ], # keypad asterisk
270 : [ 43 , '+' ], # keypad plus
269 : [ 45 , '-' ], # keypad minus
266 : [ 46 , '.' ], # keypad period
267 : [ 47 , '/' ], # keypad slash
256 : [ 48 , '0' ], # keypad 0
257 : [ 49 , '1' ], # keypad 1
258 : [ 50 , '2' ], # keypad 2
259 : [ 51 , '3' ], # keypad 3
260 : [ 52 , '4' ], # keypad 4
261 : [ 53 , '5' ], # keypad 5
262 : [ 54 , '6' ], # keypad 6
263 : [ 55 , '7' ], # keypad 7
264 : [ 56 , '8' ], # keypad 8
265 : [ 57 , '9' ], # keypad 9
271 : [ 128 , None ], # keypad enter
}
lookup_shiftModifiedKeys = {
# Pygame_keyConstant : [ Hack_keyCode, ASCII_character ]
49 : [ 33 , '!' ], # Exclamation, S + 1
39 : [ 34 , '"' ], # Quote double, S + Quote right
51 : [ 35 , '#' ], # Number sign, S + 3
52 : [ 36 , '$' ], # Dollar, S + 4
53 : [ 37 , '%' ], # Percent, S + 5
55 : [ 38 , '&' ], # Ampersand, S + 7
57 : [ 40 , '(' ], # Parenthesis left, S + 9
48 : [ 41 , ')' ], # Parenthesis right, S + 0
56 : [ 42 , '*' ], # Asterisk, S + 8
61 : [ 43 , '+' ], # Plus, S + Equal
59 : [ 58 , ':' ], # Colon, S + Semicolon
44 : [ 60 , '<' ], # Less, S + Comma
46 : [ 62 , '>' ], # Greater, S + Period
47 : [ 63 , '?' ], # Question, S + Slash
50 : [ 64 , '@' ], # At, S + 2
97 : [ 65 , 'A' ],
98 : [ 66 , 'B' ],
99 : [ 67 , 'C' ],
100 : [ 68 , 'D' ],
101 : [ 69 , 'E' ],
102 : [ 70 , 'F' ],
103 : [ 71 , 'G' ],
104 : [ 72 , 'H' ],
105 : [ 73 , 'I' ],
106 : [ 74 , 'J' ],
107 : [ 75 , 'K' ],
108 : [ 76 , 'L' ],
109 : [ 77 , 'M' ],
110 : [ 78 , 'N' ],
111 : [ 79 , 'O' ],
112 : [ 80 , 'P' ],
113 : [ 81 , 'Q' ],
114 : [ 82 , 'R' ],
115 : [ 83 , 'S' ],
116 : [ 84 , 'T' ],
117 : [ 85 , 'U' ],
118 : [ 86 , 'V' ],
119 : [ 87 , 'W' ],
120 : [ 88 , 'X' ],
121 : [ 89 , 'Y' ],
122 : [ 90 , 'Z' ],
54 : [ 94 , '^' ], # Caret, S + 6
45 : [ 95 , '_' ], # Underscore, S + Minus
91 : [ 123 , '{' ], # Brace left, S + Bracket left
92 : [ 124 , '|' ], # Bar, S + Backslash
93 : [ 125 , '}' ], # Brace right, S + Bracket right
96 : [ 126 , '~' ], # Tilde, S + Quote left
}
```
#### File: compiler/hl2py/hl2py.py
```python
'''
Python class not same as Jack class...
this does not refer to same thing as self
this
. Array idx 0
. Object instance field 0
self
. ???
Need some kind of custom Python class that emulates Jack class
'''
'''
For perfomance, bypass vm language
Generate equivalent python code
ex. def fxName ( arg0, arg1 ):
...
Use arithmetic class for functions
ex.
ALU = NBitArith ( N )
...
z = ALU._add( x, y )
Python/HL emulator
. RAM as array
. ...
'''
# TODO: tab levels
'ALU = NBitArithmetic( nBits )'
def compile_classDeclaration ( self, exp ):
s = ''
className = exp[ 'name' ]
s += 'class {}:\n'.format( className )
s += '\n'
# constant declarations
for constant in exp[ 'constDec' ]:
s += '\t{} = {}\n'.format( constant[ 'name' ], constant[ 'value' ] )
# static declarations
for static in exp[ 'varDec' ]:
s += '\t{} = None\n'.format( static[ 'name' ] )
#
s += '\n'
# subroutine declarations
for subDec in exp[ 'subDec' ]:
s += self.compile_subroutineDeclaration( subDec )
return s
def compile_subroutineDeclaration ( self, exp ):
s = ''
fxType = exp[ 'fx_type' ]
fxName = exp[ 'name' ]
params = []
for param in exp[ 'params' ]:
params.append( param[ 'name' ] )
'''
Super kludgy
1) Treat quirky 'Array.new' as 'constructor' instead of 'function'
Kludgy because any code that seeks to exploit behaviour similarly will not work
... no __init__ unless 'constructor'
2) Overwriting whole function so that can change
'return DataMemory.alloc( size )' to
self.base = DataMemory.alloc( size )
This means any changes in Array.new code need to be mirrored here
'''
if self.curClassName == 'Array' and fxName == 'new':
s += '\tdef __init__ ( self, size ):\n'
s += '\t\n'
s += '\t\tif ALU._lte( size, 0 ):\n'
s += '\t\n'
s += '\t\t\tSys.error( 2 )\n'
s += '\t\n'
s += '\t\tself.base = DataMemory.alloc( size )\n'
s += '\n'
return s
# def
if fxType == 'method':
s += 'def {} ( self{} ):\n'.format(
fxName,
( ', ' + ', '.join( params ) ) if params else ''
)
elif fxType == 'constructor': # TODO... check multiple constructors in class???
s += 'def __init__ ( self{} ):\n'.format(
( ', ' + ', '.join( params ) ) if params else ''
)
elif fxType == 'function':
s += 'def {} ({}):\n'.format(
fxName,
( ' ' + ', '.join( params ) + ' ' ) if params else ''
)
s += '\n'
# local constant declarations
for constant in exp[ 'constDec' ]:
s += '\t{} = {}\n'.format( constant[ 'name' ], constant[ 'value' ] )
# local variable declarations
for local in exp[ 'varDec' ]:
s += '\t{} = None\n'.format( local[ 'name' ] )
s += '\n'
# statements
s += self.compile_statements( exp[ 'statements' ] )
return s
def compile_statements ( self, statements ):
s = ''
if statements:
for statement in statements:
s += self.compile_statement( statement )
return s
def compile_statement ( self, exp ):
if exp[ 'type' ] == 'subroutineCall' : return self.compile_subroutineCall( exp )
elif exp[ 'type' ] == 'assignment' : return self.compile_letStatement( exp )
elif exp[ 'type' ] == 'whileStatement' : return self.compile_whileStatement( exp )
elif exp[ 'type' ] == 'ifStatement' : return self.compile_ifStatement( exp )
elif exp[ 'type' ] == 'forStatement' : return self.compile_forStatement( exp )
elif exp[ 'type' ] == 'returnStatement' : return self.compile_returnStatement( exp )
elif exp[ 'type' ] == 'breakStatement' : return self.compile_breakStatement( exp )
elif exp[ 'type' ] == 'continueStatement' : return self.compile_continueStatement( exp )
else:
raise Exception ( "Error: Don't know how to compile the statement " + exp[ 'type' ] )
def compile_subroutineCall ( self, exp ):
s = ''
subName = exp[ 'name' ]
if '.' not in subName:
# Method of current class
subName = 'self.' + subName
# Kludgy workaround for use of 'this' to reference an array's base address
if subName == 'DataMemory.dealloc':
if self.curClassName == 'Array':
# replace 'this' keyword with 'self.base'
s = 'DataMemory.dealloc( self.base )'
else:
# ignore all other calls (typically by object instances to dispose self)
s = ''
return
# args
args = exp[ 'args' ]
s_args = []
if args:
for expr in args:
s.append( self.compile_expression( expr ) )
s = '{}({})'.format(
subName,
( ' ' + ', '.join( s_args ) + ' ' ) if args else ''
)
return s
def compile_letStatement ( self, exp ):
s = ''
return s
....
ALUFxLookup_unary = {
'!' : '_not',
'~' : '_not',
'-' : '_neg'
}
ALUFxLookup_binary = {
# logic
'&' : '_and',
'|' : '_or',
'^' : '_xor',
# arithmetic
'+' : '_add',
'-' : '_sub',
'*' : '_mul',
'/' : '_div',
'>>' : '_lsr',
'<<' : '_lsl',
'%' : ?,
# comparison
'=' : '_eq',
'==' : '_eq',
'>' : '_gt',
'<' : '_lt',
'>=' : '_gte',
'<=' : '_lte',
'!=' : '_ne'
}
def compile_binaryOp ( self, op, a, b ):
if op in ALUFxLookup_binary():
ALUFx = ALUFxLookup_binary[ op ]
return 'ALU.{}( {}, {} )'.format( ALUFx, a, b )
elif op == '%':
return 'Math.mod( {}, {} )'.format( a, b )
else:
raise Exception ( "Error: Don't know how to compile the binaryOp " + op )
def compile_expression ( self, exp ):
s = ''
if exp == None:
return ''
elif isinstance( exp, dict ):
return self.compile_expressionTerm( exp )
else:
s_a = self.compile_expression( exp[ 0 ] )
for i in range( 1, len( exp ), 2 ):
s_b = self.compile_expression( exp[ i + 1 ] )
s_ab = self.compile_binaryOp( exp[ i ][ 'value' ], s_a, s_b )
s += s_ab
s_a = s_ab
return s
def compile_expressionTerm ( self, exp ):
s = ''
expType = exp[ 'type' ]
expValue = exp[ 'value' ]
if expType == 'integerConstant':
s = exp[ 'value' ]
elif expType == 'stringConstant':
'String()' ... ??
for c in expValue:
'?.appendChar( {} )'.format( ord( c ) )
elif expType == 'charConstant':
s = str( ord( expValue ) ) # Ascii code
elif expType == 'keywordConstant':
if expValue == 'true':
s = 'ALU.negativeOne'
elif expValue == 'false' or expValue == 'null':
s = '0'
elif expValue == 'this':
... self?
elif expType == 'identifier':
name = exp[ 'name' ]
arrIdx = exp[ 'arrIdx' ]
if arrIdx:
s_idx = self.compile_expression( arrIdx )
s = '{}[ {} ]'.format( name, s_idx )
... ???
else:
s = name
elif expType == 'subroutineCall':
return self.compile_subroutineCall( exp )
elif expType == 'unaryOp':
s_operand = self.compile_expression( exp[ 'operand' ] )
op = exp[ 'op' ]
ALUFx = ALUFxLookup_unary[ op ]
s = 'ALU.{}( {} )'.format( ALUFx, s_operand )
else:
raise Exception ( "Error: Don't know how to compile the expressionTerm " + expType )
return s
```
#### File: OtherExperiments/Components/comparator.py
```python
''''''''''''''' imports '''''''''''''''''''''''''''''
# Built ins
import sys
# Computer files
sys.path.append('../../Modules')
from _1__elementaryGates import *
from _2__arithmeticGates import *
''''''''''''''''''''''''' main '''''''''''''''''''''''''
# https://en.wikipedia.org/wiki/Digital_comparator
'''
doesn't work when comparing (1)negative to (0)positive numbers
as under 2's complement, negative will always be larger
positive range(0, 2**(n-1))
negative range(2**(n-1), 2**n)
MSB doubles as sign... maybe could use to make a workaround ? =/
'''
# -- using logic gates ---
# 1Bit
def comp_( a, b ):
eq = xnor_( a, b ) # a == b
lt = and_( not_(a), b ) # a < b
gt = and_( a, not_(b) ) # a > b
return( eq, lt, gt )
# nBit
def compN_( N, a, b ):
# individual
comps = []
for i in range(N):
comps.append( comp_( a[i], b[i] ) )
# print(comps)
# equal
eqB = [ c[0] for c in comps ]
eq = andNto1_( eqB )
# print( eq, eqB )
# less than
parts = []
for i in range(N):
part = eqB[:i]
# print(part)
part.insert( 0, comps[i][1] )
parts.append( andNto1_( part ) )
lt = orNto1_( parts )
# greater than
gt = xnor_( eq, lt )
#
return( eq, lt, gt )
# --- using mux ---
# 1Bit
def comp2_( a, b ):
eq = mux4to1_( 1, 0, 0, 1, a, b ) # a == b
lt = mux4to1_( 0, 1, 0, 0, a, b ) # a < b
gt = mux4to1_( 0, 0, 1, 0, a, b ) # a > b
return( eq, lt, gt )
# nBit
def compN2_( N, a, b ):
''' Exact same as compN_(),
just swap comp_() with comp2_() '''
pass
''''''''''''''''''''''''' test '''''''''''''''''''''''''
def dec2BinN( N, x ):
if x < 0: x = 2**N - abs(x) # two's complement negation
b = bin(x)[2:] # strip leading '0b'
return b.zfill( N ) # pad with zeros as needed
testVals = [
[ 1, 0, 1 ],
[10, 15, 4 ],
[-5, -8, 3+1],
[-7, -7, 3+1],
[-9, 0, 4+1], # fails
[55, 80, 7 ]
]
for vals in testVals:
# for i in range(3):
# vals = testVals[i]
v1 = vals[0]
v2 = vals[1]
N = vals[2]
expected = [ v1 == v2, v1 < v2, v1 > v2 ]
result = compN_( N, dec2BinN( N, v1 ), dec2BinN( N, v2 ) )
print( expected[0] == result[0], ">", expected[0], result[0] )
print( expected[1] == result[1], ">", expected[1], result[1] )
print( expected[2] == result[2], ">", expected[2], result[2] )
print( "---" )
```
#### File: OtherExperiments/FSMs/sequenceDetector6a_HLP.py
```python
''''''''''''''''''' Helpers '''''''''''''''''''''''''''''''''
class StackFSM():
''' http://gamedevelopment.tutsplus.com/tutorials/finite-state-machines-theory-and-implementation--gamedev-11867 '''
def __init__(self):
self.stack = []
def getCurrentState(self):
if len(self.stack) > 0 :
return self.stack[-1]
else:
return None
def popState(self):
self.stack.pop()
def pushState(self, state):
if self.getCurrentState() != state :
self.stack.append(state)
def update(self):
currentStateFunction = self.getCurrentState()
if currentStateFunction != None :
currentStateFunction()
''''''''''''''''''''''''' Main '''''''''''''''''''''''''''''''''
# FSM as shown in notes
class Thing():
# detect 101 or 110 or 011
def __init__(self):
self.sManager = StackFSM()
self.sManager.pushState(self.state0)
self.input = None
def update(self):
self.sManager.update()
def state0(self):
print("reset")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state2)
else :
self.sManager.popState()
self.sManager.pushState(self.state1)
def state1(self):
print("detected 0")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state4)
else :
self.sManager.popState()
self.sManager.pushState(self.state3)
def state2(self):
print("detected 1")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state6)
else :
self.sManager.popState()
self.sManager.pushState(self.state5)
def state3(self):
print("detected 00")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state4)
else :
self.sManager.popState()
self.sManager.pushState(self.state3)
def state4(self):
print("detected 01")
if self.input :
print("detected 011")
self.sManager.popState()
self.sManager.pushState(self.state6)
else :
self.sManager.popState()
self.sManager.pushState(self.state5)
def state5(self):
print("detected 10")
if self.input :
print("detected 101")
self.sManager.popState()
self.sManager.pushState(self.state4)
else :
self.sManager.popState()
self.sManager.pushState(self.state3)
def state6(self):
print("detected 11")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state6)
else :
print("detected 110")
self.sManager.popState()
self.sManager.pushState(self.state5)
class Thing_stateReduced():
# detect 101 or 110 or 011
def __init__(self):
self.sManager = StackFSM()
self.sManager.pushState(self.state0)
self.input = None
def update(self):
self.sManager.update()
def state0(self):
print("reset")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state2)
else :
self.sManager.popState()
self.sManager.pushState(self.state1)
def state1(self):
print("detected 0")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state4)
else :
self.sManager.popState()
self.sManager.pushState(self.state3)
def state2(self):
print("detected 1")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state6)
else :
self.sManager.popState()
self.sManager.pushState(self.state5)
def state4(self):
print("detected 01")
if self.input :
print("detected 011")
self.sManager.popState()
self.sManager.pushState(self.state6)
else :
self.sManager.popState()
self.sManager.pushState(self.state5)
def state5(self):
print("detected 10")
if self.input :
print("detected 101")
self.sManager.popState()
self.sManager.pushState(self.state4)
else :
self.sManager.popState()
self.sManager.pushState(self.state1)
def state6(self):
print("detected 11")
if self.input :
self.sManager.popState()
self.sManager.pushState(self.state6)
else :
print("detected 110")
self.sManager.popState()
self.sManager.pushState(self.state5)
''''''''''''''''''''''''' Run '''''''''''''''''''''''''''''''''
sequence = [1,1,0,1,0,1,1,0,1,1,0,1,0,0,1,1,0]
# sequence = [0,0,1,1,1,0,1,1,0,1,0,0,1,1,0]
# sequence.reverse()
thing = Thing()
for input in sequence:
thing.input = input
thing.update()
print( input )
print("\n---")
thing2 = Thing_stateReduced()
for input in sequence:
thing2.input = input
thing2.update()
print( input )
```
#### File: OtherExperiments/ShiftRegisters/ringCounter.py
```python
''''''''''''''' imports '''''''''''''''''''''''''''''
# Built ins
import sys
# Computer files
sys.path.append('../../Modules')
from _1__elementaryGates import *
from _2__arithmeticGates import *
from _3__clock import *
from _4__flipFlops import *
''''''''''''''''''''''''' helpers '''''''''''''''''''''''''
def toString(array):
return ''.join( map(str, array) )
def toDecimal(bitSeq):
return int(bitSeq, 2)
''''''''''''''''''''''''' main '''''''''''''''''''''''''
clock = Clock()
nStages = 4
dff = []
for i in range(nStages): dff.append( DFlipFlop() )
# start with '1000'
for i in range(nStages): dff[i].clear()
dff[0].preset()
n_iterations = 0
def SR(clk):
# Shift
dff[0].doTheThing( clk, dff[3].q1 )
dff[1].doTheThing( clk, dff[0].q1 )
dff[2].doTheThing( clk, dff[1].q1 )
dff[3].doTheThing( clk, dff[2].q1 )
global n_iterations
n_iterations += 1
if n_iterations >= 100: clock.stop() # stop the clock
def record():
print( toString( [dff[0].q1, dff[1].q1, dff[2].q1, dff[3].q1] ) )
''''''''''''''''''''''''' run '''''''''''''''''''''''''
# Things to execute on clock edges
def callOnRising():
SR( clock.value )
def callOnFalling():
record()
clock.callbackRising = callOnRising
clock.callbackFalling = callOnFalling
if __name__ == '__main__':
# Start program
clock.run()
```
#### File: PlayArea/shifters/shift16.py
```python
def shiftRight16_( x, y ):
''' 16 bit barrel shifter (right) '''
N = 16
t0 = [ None ] * N
t1 = [ None ] * N
t2 = [ None ] * N
t3 = [ None ] * N
y = y[::-1] # make life simpler by matching array access to MSB-to-LSB format
#
for i in range( N - 1, 0, -1 ):
t0[i] = mux_( x[ i - 1 ], x[i], y[0] )
t0[0] = mux_( 0, x[0], y[0] )
#
for i in range( N - 1, 1, -1 ):
t1[i] = mux_( t0[ i - 2 ], t0[i], y[1] )
t1[1] = mux_( 0, t0[1], y[1] )
t1[0] = mux_( 0, t0[0], y[1] )
#
for i in range( N - 1, 3, -1 ):
t2[i] = mux_( t1[ i - 4 ], t1[i], y[2] )
t2[3] = mux_( 0, t1[3], y[2] )
t2[2] = mux_( 0, t1[2], y[2] )
t2[1] = mux_( 0, t1[1], y[2] )
t2[0] = mux_( 0, t1[0], y[2] )
#
for i in range( N - 1, 7, -1 ):
t3[i] = mux_( t2[ i - 8 ], t2[i], y[3] )
t3[7] = mux_( 0, t2[7], y[3] )
t3[6] = mux_( 0, t2[6], y[3] )
t3[5] = mux_( 0, t2[5], y[3] )
t3[4] = mux_( 0, t2[4], y[3] )
t3[3] = mux_( 0, t2[3], y[3] )
t3[2] = mux_( 0, t2[2], y[3] )
t3[1] = mux_( 0, t2[1], y[3] )
t3[0] = mux_( 0, t2[0], y[3] )
#
return t3
def shiftLeft16_( x, y ):
''' 16 bit barrel shifter (left) '''
N = 16
t0 = [ None ] * N
t1 = [ None ] * N
t2 = [ None ] * N
t3 = [ None ] * N
y = y[::-1] # make life simpler by matching array access to MSB-to-LSB format
#
t0[N - 1] = mux_( 0, x[N - 1], y[0] )
for i in range( N - 2, -1, -1 ):
t0[i] = mux_( x[ i + 1 ], x[i], y[0] )
#
t1[ N - 1 ] = mux_( 0, t0[ N - 1 ], y[1] )
t1[ N - 2 ] = mux_( 0, t0[ N - 2 ], y[1] )
for i in range( N - 3, -1, -1 ):
t1[i] = mux_( t0[ i + 2 ], t0[i], y[1] )
#
t2[ N - 1 ] = mux_( 0, t1[ N - 1 ], y[2] )
t2[ N - 2 ] = mux_( 0, t1[ N - 2 ], y[2] )
t2[ N - 3 ] = mux_( 0, t1[ N - 3 ], y[2] )
t2[ N - 4 ] = mux_( 0, t1[ N - 4 ], y[2] )
for i in range( N - 5, -1, -1 ):
t2[i] = mux_( t1[ i + 4 ], t1[i], y[2] )
#
t3[ N - 1 ] = mux_( 0, t2[ N - 1 ], y[3] )
t3[ N - 2 ] = mux_( 0, t2[ N - 2 ], y[3] )
t3[ N - 3 ] = mux_( 0, t2[ N - 3 ], y[3] )
t3[ N - 4 ] = mux_( 0, t2[ N - 4 ], y[3] )
t3[ N - 5 ] = mux_( 0, t2[ N - 5 ], y[3] )
t3[ N - 6 ] = mux_( 0, t2[ N - 6 ], y[3] )
t3[ N - 7 ] = mux_( 0, t2[ N - 7 ], y[3] )
t3[ N - 8 ] = mux_( 0, t2[ N - 8 ], y[3] )
for i in range( N - 9, -1, -1 ):
t3[i] = mux_( t2[ i + 8 ], t2[i], y[3] )
#
return t3
```
|
{
"source": "jetstream5500/FFT",
"score": 3
}
|
#### File: FFT/src/spectogram.py
```python
import sys
import argparse
import sounddevice as sd
import queue
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import r2fft
from random import *
# Global queue
q = queue.Queue()
data = np.zeros((512,512)).tolist()
im = None
def audio_callback(indata, frames, time, status):
#print(len(indata))
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
#fancy slicing??
#print(indata[::1, 0])
q.put(indata[::1, 0])
#print(q)
def update_spectogram(frame):
while True:
if q.qsize() > 16:
#print(q.qsize())
a = im.get_array()
data_seg = []
for i in range(16):
data_seg.extend(q.get_nowait())
fft_data = abs(r2fft.fft(data_seg))[::16]
#print(len(fft_data))
#for i in range(512):
# a[i] = fft_data
a = np.append(a[1:],fft_data)
im.set_array(a)
im.set_clim(vmax=np.amax(a))
else:
break
return [im]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Description")
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
parser.add_argument(
'-d', '--device', type=int,
help='input device (numeric ID or substring)')
#parser.add_argument(
# '-w', '--window', type=float, default=200, metavar='DURATION',
# help='visible time slot (default: %(default)s ms)')
parser.add_argument(
'-i', '--interval', type=float, default=100,
help='minimum time between plot updates (default: %(default)s ms)')
#parser.add_argument(
# '-b', '--blocksize', type=int, help='block size (in samples)')
parser.add_argument(
'-r', '--samplerate', type=float, help='sampling rate of audio device')
#parser.add_argument(
# '-n', '--downsample', type=int, default=10, metavar='N',
# help='display every Nth sample (default: %(default)s)')
#parser.add_argument(
# 'channels', type=int, default=[1], nargs='*', metavar='CHANNEL',
# help='input channels to plot (default: the first)')
args = parser.parse_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
if args.samplerate is None:
device_info = sd.query_devices(args.device, 'input')
args.samplerate = device_info['default_samplerate']
print(args)
stream = sd.InputStream(
device=args.device, channels=1,
samplerate=args.samplerate, callback=audio_callback)
fig = plt.figure()
im = plt.imshow(data, animated=True, vmin=0, vmax=1)
ani = animation.FuncAnimation(fig, update_spectogram, interval=args.interval, blit=True)
with stream:
print('Recording started...')
plt.show()
```
|
{
"source": "Jet-Streaming/gyp",
"score": 2
}
|
#### File: test/mac/gyptest-app-error.py
```python
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format in ['ninja', 'xcode-ninja'] \
and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
```
|
{
"source": "jetstreamin/sqlitebiter",
"score": 2
}
|
#### File: jetstreamin/sqlitebiter/setup.py
```python
import io
import os.path
import sys
import setuptools
MODULE_NAME = "sqlitebiter"
REPOSITORY_URL = "https://github.com/thombashi/{:s}".format(MODULE_NAME)
REQUIREMENT_DIR = "requirements"
ENCODING = "utf8"
pkg_info = {}
def need_pytest():
return set(["pytest", "test", "ptr"]).intersection(sys.argv)
def get_release_command_class():
try:
from releasecmd import ReleaseCommand
except ImportError:
return {}
return {"release": ReleaseCommand}
with open(os.path.join(MODULE_NAME, "__version__.py")) as f:
exec(f.read(), pkg_info)
with io.open("README.rst", encoding=ENCODING) as fp:
long_description = fp.read()
with io.open(os.path.join("docs", "pages", "introduction", "summary.txt"), encoding=ENCODING) as f:
summary = f.read().strip()
with open(os.path.join(REQUIREMENT_DIR, "requirements.txt")) as f:
install_requires = [line.strip() for line in f if line.strip()]
with open(os.path.join(REQUIREMENT_DIR, "test_requirements.txt")) as f:
tests_requires = [line.strip() for line in f if line.strip()]
with open(os.path.join(REQUIREMENT_DIR, "docs_requirements.txt")) as f:
docs_requires = [line.strip() for line in f if line.strip()]
build_exe_requires = ["pyinstaller>=3.4"]
build_wheel_requires = ["twine", "wheel"]
build_requires = build_exe_requires + build_wheel_requires
SETUPTOOLS_REQUIRES = ["setuptools>=38.3.0"]
PYTEST_RUNNER_REQUIRES = ["pytest-runner"] if need_pytest() else []
setuptools.setup(
name=MODULE_NAME,
version=pkg_info["__version__"],
url=REPOSITORY_URL,
author=pkg_info["__author__"],
author_email=pkg_info["__email__"],
description=summary,
include_package_data=True,
keywords=[
"SQLite", "converter",
"CSV", "Excel", "Google Sheets", "HTML", "JSON", "LTSV", "TSV",
],
license=pkg_info["__license__"],
long_description=long_description,
packages=setuptools.find_packages(exclude=['test*']),
project_urls={
"Documentation": "https://{:s}.rtfd.io/".format(MODULE_NAME),
"Download": "{:s}/releases".format(REPOSITORY_URL),
"Source": REPOSITORY_URL,
"Tracker": "{:s}/issues".format(REPOSITORY_URL),
},
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
install_requires=SETUPTOOLS_REQUIRES + install_requires,
setup_requires=SETUPTOOLS_REQUIRES + PYTEST_RUNNER_REQUIRES,
tests_require=tests_requires,
extras_require={
"build": build_requires,
"buildexe": build_exe_requires,
"buildwhl": build_wheel_requires,
"docs": docs_requires,
"gs": ["gspread", "oauth2client", "pyOpenSSL"],
"mediawiki": ["pypandoc"],
"release": ["releasecmd>=0.0.18,<0.1.0"],
"test": tests_requires,
},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Framework :: Jupyter",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Database",
],
entry_points={
"console_scripts": [
"sqlitebiter=sqlitebiter.sqlitebiter:cmd",
],
},
cmdclass=get_release_command_class())
```
#### File: sqlitebiter/sqlitebiter/_common.py
```python
from __future__ import absolute_import, unicode_literals
from ._clrm import bright, cyan, green
DEFAULT_DUP_COL_HANDLER = "rename"
class ResultLogger(object):
@property
def verbosity_level(self):
return self.__verbosity_level
def __init__(self, logger, schema_extractor, result_counter, verbosity_level):
self.__logger = logger
self.__schema_extractor = schema_extractor
self.__result_counter = result_counter
self.__verbosity_level = verbosity_level
def logging_success(self, source, table_name, is_create_table):
table_schema = self.__schema_extractor.fetch_table_schema(table_name.strip())
self.__result_counter.inc_success(is_create_table)
self.__logger.info(
"convert '{source:s}' to '{table_info:s}' table".format(
source=cyan(source),
table_info=bright(
green(
table_schema.dumps(
output_format="text", verbosity_level=self.__verbosity_level
)
)
),
)
)
```
#### File: sqlitebiter/sqlitebiter/_counter.py
```python
from __future__ import absolute_import, unicode_literals
from ._const import ExitCode
class ResultCounter(object):
@property
def success_count(self):
return self.__success_count
@property
def fail_count(self):
return self.__fail_count
@property
def skip_count(self):
return self.__skip_count
@property
def total_count(self):
return self.success_count + self.fail_count + self.skip_count
@property
def created_table_count(self):
return self.__create_table_count
def __init__(self):
self.__create_table_count = 0
self.__success_count = 0
self.__fail_count = 0
self.__skip_count = 0
def __repr__(self):
return "results: " + ", ".join(
[
"success={:d}".format(self.__success_count),
"failed={:d}".format(self.__fail_count),
"skip={:s}".format(self.__skip_count),
"return_code={:d}".format(self.get_return_code()),
]
)
def inc_success(self, is_create_table):
self.__success_count += 1
if is_create_table:
self.__create_table_count += 1
def inc_fail(self):
self.__fail_count += 1
def inc_skip(self):
self.__skip_count += 1
def get_return_code(self):
if self.__success_count > 0:
return ExitCode.SUCCESS
if self.__fail_count > 0:
return ExitCode.FAILED_CONVERT
return ExitCode.NO_INPUT
```
#### File: sqlitebiter/subcommand/_gs.py
```python
from __future__ import absolute_import, unicode_literals
import msgfy
import pytablereader as ptr
import six
from ._base import SourceInfo, TableConverter
class GoogleSheetsConverter(TableConverter):
def convert(self, credentials, title):
logger = self._logger
result_counter = self._result_counter
source_id = self._fetch_next_source_id()
loader = ptr.GoogleSheetsTableLoader()
loader.source = credentials
loader.title = title
# if typepy.is_null_string(loader.source):
# loader.source = app_config_mgr.load().get(
# ConfigKey.GS_CREDENTIALS_FILE_PATH)
try:
for table_data in loader.load():
logger.debug("loaded table_data: {}".format(six.text_type(table_data)))
sqlite_tabledata = self.normalize_table(table_data)
source_info = SourceInfo(
base_name=title,
dst_table=sqlite_tabledata.table_name,
format_name="google sheets",
source_id=source_id,
)
try:
self._table_creator.create(
sqlite_tabledata, self._index_list, source_info=source_info
)
SourceInfo.insert(source_info)
except (ptr.ValidationError, ptr.DataError):
result_counter.inc_fail()
except ptr.OpenError as e:
logger.error(msgfy.to_error_message(e))
result_counter.inc_fail()
except (ptr.ValidationError, ptr.DataError) as e:
logger.error(
"invalid credentials data: path={}, message={}".format(credentials, str(e))
)
result_counter.inc_fail()
except ptr.APIError as e:
logger.error(msgfy.to_error_message(e))
result_counter.inc_fail()
```
|
{
"source": "jetsunwhitton/pico-ner-relations",
"score": 2
}
|
#### File: pico-ner-relations/scripts/custom_functions.py
```python
from functools import partial
from pathlib import Path
from typing import Iterable, Callable
import spacy
from spacy.training import Example
from spacy.tokens import DocBin, Doc
from spacy.language import Language
# make the factory work
from scripts.rel_pipe import make_relation_extractor
# make the config work
from scripts.rel_model import create_relation_model, create_classification_layer, create_instances, create_tensors
from scripts.entity_ruler import custom_entity_ruler
@Language.component("custom_sentencizer")
def custom_sentencizer(doc):
for i, token in enumerate(doc[:-2]):
# Define sentence start if . + not numeric (e.g decimal point) token after
if token.text == "." and not doc[i + 1].text.isnumeric():
doc[i + 1].is_sent_start = True
else:
# Explicitly set sentence start to False otherwise, to tell
# the parser to leave those tokens alone
doc[i + 1].is_sent_start = False
return doc
# This function was sourced from the spaCy relation component
# template: https://github.com/explosion/projects/tree/v3/tutorials
@spacy.registry.readers("Gold_ents_Corpus.v1")
def create_docbin_reader(file: Path) -> Callable[["Language"], Iterable[Example]]:
return partial(read_files, file)
# This function was adapated from the spaCy relation component
# template: https://github.com/explosion/projects/tree/v3/tutorials
def read_files(file: Path, nlp: "Language") -> Iterable[Example]:
"""Custom reader that keeps the tokenization of the gold data,
and also adds the gold PICO annotations as we do not attempt to predict these."""
doc_bin = DocBin().from_disk(file)
docs = doc_bin.get_docs(nlp.vocab)
for gold in docs:
pred = Doc(
nlp.vocab,
words=[t.text for t in gold],
spaces=[t.whitespace_ for t in gold],
)
pred.ents = gold.ents
yield Example(pred, gold)
```
#### File: pico-ner-relations/scripts/demo.py
```python
import spacy_streamlit, spacy, operator
import streamlit as st
from spacy import displacy
from spacy.pipeline import merge_entities
# make the factory work
from rel_pipe import make_relation_extractor, score_relations
# make the config work
from rel_model import create_relation_model, create_classification_layer, create_instances, create_tensors
from tabulate import relation_extraction, tabulate_pico_entities
import base64
# set page config
st.set_page_config(
page_title="RCT-ART",
page_icon="logo.jpg"
)
st.sidebar.image("logo.jpg")
st.sidebar.markdown("RCT-ART is an NLP pipeline built with spaCy for converting clinical trial result sentences into tables through jointly extracting intervention, outcome and outcome measure entities and their relations. ")
st.sidebar.subheader("Current constraints:")
st.sidebar.markdown("""
- Only abstracts from studies with 2 trial arms
- Must be a sentence with study results
- Sentence must contain at least least one intervention (e.g. drug name), outcome description (e.g. blood pressure) and non-comparative outcome measure)
""")
st.title("Demo")
st.header("Randomised Controlled Trial Abstract Result Tabulator")
ner_model = "trained_models/biobert/ner/all_domains/model-best"
rel_model = "trained_models/biobert/rel/all_domains/model-best"
default_text = "Somnolence , the most frequently reported adverse event , was noted in 72.5 % versus 7.7 % of subjects ( risperidone vs placebo ) and seemed manageable with dose/dose-schedule modification ."
st.subheader("Enter result sentence for analysis")
text = st.text_area("Input should follow constraints outlined in sidebar", default_text, height=200)
nlp = spacy.load("trained_models/biobert/ner/all_domains/model-best")
ent_doc = nlp(text)
st.subheader("NER analysis")
spacy_streamlit.visualize_ner(
ent_doc,
labels=["INTV", "OC", "MEAS"],
show_table=False,
title=False
)
rel_doc = relation_extraction(rel_model,[ent_doc])[0]
deps = {"words": [],"arcs": []}
for tok in rel_doc:
deps["words"].append({"text": tok.text, "tag": tok.ent_type_})
for key in rel_doc._.rel:
rel = rel_doc._.rel[key] # get relation
pred_rel = max(rel.items(), key=operator.itemgetter(1)) # selects relation type with highest probability
if pred_rel[1] > 0.5: # includes relation if above set threshold for probability
if key[0] > key[1] and rel_doc[key[1]].ent_type_ != "MEAS":
deps["arcs"].append({"start": key[1], "end": key[0], "label": pred_rel[0], "dir": "right"})
elif key[0] > key[1]:
deps["arcs"].append({"start": key[1], "end": key[0], "label": pred_rel[0], "dir": "left"})
elif rel_doc[key[1]].ent_type_ != "MEAS":
deps["arcs"].append({"start": key[0], "end": key[1], "label": pred_rel[0], "dir": "left"})
else:
deps["arcs"].append({"start": key[0], "end": key[1], "label": pred_rel[0], "dir": "right"})
html = displacy.render(deps, style="dep", manual=True, options={'distance':80})
st.subheader("RE analysis")
st.write(spacy_streamlit.util.get_svg(html), unsafe_allow_html=True)
heading_properties = [('font-size', '16px')]
cell_properties = [('font-size', '16px')]
dfstyle = [dict(selector="th", props=heading_properties),dict(selector="td", props=cell_properties)]
df = tabulate_pico_entities(rel_doc)
print(rel_doc._.rel)
#df.style.set_table_styles([cell_hover, index_names, headers])
st.subheader("Tabulation")
st.table(df.style.set_table_styles(dfstyle))
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="result_sentence.csv">Download csv file</a>'
return href
st.markdown(get_table_download_link(df), unsafe_allow_html=True)
```
#### File: pico-ner-relations/scripts/evaluate.py
```python
import spacy, operator, csv, os
from spacy.tokens import DocBin, Doc
from spacy.training.example import Example
from spacy.scorer import Scorer,PRFScore
from spacy.vocab import Vocab
# make the factory work
from rel_pipe import make_relation_extractor, score_relations
# make the config work
from rel_model import create_relation_model, create_classification_layer, create_instances, create_tensors
def ner_evaluate(ner_model_path,test_data):
""" Evaluates NER scores of model on test data, can output to console and/or file"""
print("|| Loading model for NER task")
nlp = spacy.load(ner_model_path)
doc_bin = DocBin(store_user_data=True).from_disk(test_data)
docs = doc_bin.get_docs(nlp.vocab)
examples = []
for gold in docs:
pred = nlp(gold.text)
examples.append(Example(pred, gold))
print("|| Evaluating NER task performance")
print(nlp.evaluate(examples))
outfile.write("NER_evaluation\n")
outfile.write(f"{nlp.evaluate(examples)}\n")
# This function was extensively adapted from the spaCy relation component
# template: https://github.com/explosion/projects/tree/v3/tutorials
# it can now be used to evaluate joint entity--relation extraction performance
def joint_ner_rel_evaluate(ner_model_path, rel_model_path, test_data, print_details: bool):
"""Evaluates joint performance of ner and rel extraction model,
as well as the rel model alone if only rel model provided"""
if ner_model_path != None:
print("|| Loading models for joint task")
ner = spacy.load(ner_model_path)
print("|| Evaluating joint task performance")
else:
print("|| Loading models for rel task")
print("|| Evaluating rel task performance")
rel = spacy.load(rel_model_path)
doc_bin = DocBin(store_user_data=True).from_disk(test_data)
docs = doc_bin.get_docs(rel.vocab)
examples = []
for gold in docs:
pred = Doc(
rel.vocab,
words=[t.text for t in gold],
spaces=[t.whitespace_ for t in gold],
)
if ner_model_path != None:
pred.ents = ner(gold.text).ents
else:
pred.ents = gold.ents
for name, proc in rel.pipeline:
pred = proc(pred)
examples.append(Example(pred, gold))
# Print the gold and prediction, if gold label is not 0
if print_details:
print()
print(f"Text: {gold.text}")
print(f"gold_spans: {[(e.start, e.text, e.label_) for e in gold.ents]}")
print(f"pred_spans: {[(e.start, e.text, e.label_) for e in pred.ents]}")
gold_ents = [e.text for e in gold.ents]
assessed_ents = []
for value, rel_dict in pred._.rel.items():
try:
gold_labels = [k for (k, v) in gold._.rel[value].items() if v == 1.0]
if gold_labels:
print(
f" pair: {value} --> gold labels: {gold_labels} --> predicted values: {rel_dict}"
)
except KeyError:
pred_rel = max(rel_dict.items(),key=operator.itemgetter(1))
if pred_rel[1] > 0.5:
print("Relation mapped with wrong entity pair")
else:
parent_ent = list(filter(lambda x: x.start == value[0], pred.ents))[0].text
child_ent = list(filter(lambda x: x.start == value[1], pred.ents))[0].text
if parent_ent not in assessed_ents:
if parent_ent in gold_ents:
print(parent_ent," Correct entity and correctly didn't map relation")
else:
print(parent_ent," incorrect entity")
assessed_ents.append(parent_ent)
if child_ent not in assessed_ents:
if child_ent in gold_ents:
print(child_ent, "Correct entity and correctly didn't map relation")
else:
print(child_ent, "incorrect entity")
assessed_ents.append(child_ent)
print()
thresholds = [0.000, 0.050, 0.100, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.999]
print()
print("Results of the trained model:")
#outfile.write()
task = False
if ner_model_path != None:
task = True
_score_and_format(examples, thresholds, task)
def _score_and_format(examples, thresholds, task):
"""outputs rel and joint performance scores, to console and/or txt file"""
#if task:
#outfile.write("Joint\n")
#else:
#outfile.write("Rel alone\n")
for threshold in thresholds:
r = score_relations(examples, threshold)
results = {k: "{:.2f}".format(v * 100) for k, v in r.items()}
print(f"threshold {'{:.2f}'.format(threshold)} \t {results}")
#outfile.write(f"threshold {'{:.2f}'.format(threshold)} \t {results}\n")
def evaluate_result_tables(gold_path, predicted_path, strict = True):
""" Evaluates performance of model on tabulation task, compares prediction tables
vs gold tables, can output to console and/or txt file"""
print("|| Evaluating table task performance")
prf = PRFScore()
examples = []
for gold_csv, pred_csv in zip(os.listdir(gold_path), os.listdir(predicted_path)):
gold_open = open(os.path.join(gold_path, gold_csv), newline='')
pred_open = open(os.path.join(predicted_path, pred_csv), newline='')
gold_list = [d for d in csv.DictReader(gold_open)]
pred_list = [d for d in csv.DictReader(pred_open)]
for gold, pred in zip(gold_list,pred_list):
del gold['']
del pred['']
examples.append({"gold":gold,"pred":pred})
continue
if gold_list == []:
continue # empty lists in gold are error in data and should be skipped
if pred_list == []: # empty lists in pred are false negatives if not empty in gold
for gold in gold_list:
del gold['']
examples.append({"gold": gold, "pred": {}})
if strict: # assess table with exact entity matches
for example in examples:
if not example["pred"]: prf.fn += 1
else:
if example["pred"] == example["gold"]: prf.tp += 1
else: prf.fp += 1
else: # assess tables with less strict entity criteria -- gold/pred entity boundary overlap
for example in examples:
relaxed_match = True
if not example["pred"]: prf.fn += 1 # empty prediction --> false negative
else:
for pval, gval in zip(example["pred"].values(), example["gold"].values()):
if gval not in pval and pval not in gval:
relaxed_match = False
if relaxed_match: prf.tp += 1
else: prf.fp += 1
output = {"rel_micro_p": prf.precision,
"rel_micro_r": prf.recall,
"rel_micro_f": prf.fscore,}
outfile.write("Table_evaluation")
if strict: outfile.write("strict\n")
else: outfile.write("relaxed\n")
outfile.write(f"{output}\n")
print(output)
def create_ner_confusion_matrix(model_path, test_path):
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
import matplotlib.pyplot as plt
ner = spacy.load(model_path)
doc_bin = DocBin(store_user_data=True).from_disk(test_path)
gold_docs = list(doc_bin.get_docs(ner.vocab))
pred_docs = [ner(gold_doc.text) for gold_doc in gold_docs]
gold_array = []
pred_array = []
for gold_doc, pred_doc in zip(gold_docs, pred_docs):
for g_tok,p_tok in zip(gold_doc, pred_doc):
if g_tok.ent_type_ == '':
gold_array.append("NO_ENT")
else:
gold_array.append(g_tok.ent_type_)
if p_tok.ent_type_ == '':
pred_array.append("NO_ENT")
else:
pred_array.append(p_tok.ent_type_)
cm = confusion_matrix(gold_array, pred_array,
labels=["OC","INTV","MEAS","NO_ENT"],
sample_weight=None, normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=["OC","INTV","MEAS","NO_ENT"])
font = {'family': 'normal',
'weight': 'normal',
'size': 24}
plt.rc('font', **font)
fig, ax = plt.subplots(figsize=(12, 12))
disp = disp.plot(include_values=True,
cmap=plt.cm.Blues, ax=ax, xticks_rotation='vertical')
plt.show()
def create_rel_confusion_matrix(model_path, test_path):
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
import matplotlib.pyplot as plt
from scripts.tabulate import relation_extraction
vocab = Vocab()
doc_bin = DocBin(store_user_data=True).from_disk(test_path)
for_pred = list(doc_bin.get_docs(vocab))
pred_docs = relation_extraction(model_path, for_pred)
doc_bin = DocBin(store_user_data=True).from_disk(test_path)
gold_docs = list(doc_bin.get_docs(vocab))
pred_array, pred_keys, gold_keys, gold_array = [], [], [], []
for pred_doc, gold_doc in zip(pred_docs,gold_docs):
for pkey, p_rel_dict in pred_doc._.rel.items():
pred_keys.append(pkey)
if pkey in gold_doc._.rel.keys():
gold_keys.append(pkey)
gold_rel = gold_doc._.rel[pkey] # get relation
max_gold = max(gold_rel.items(),
key=operator.itemgetter(1)) # selects highest probability relation
if max_gold[1] > 0.5: # includes relation if above set threshold for probability
gold_array.append(max_gold[0])
else:
gold_array.append("NO_RE")
pred_rel = pred_doc._.rel[pkey] # get relation
max_pred = max(pred_rel.items(),
key=operator.itemgetter(1)) # selects highest probability relation
if max_pred[1] > 0.5: # includes relation if above set threshold for probability
pred_array.append(max_pred[0])
else:
pred_array.append("NO_RE")
cm = confusion_matrix(gold_array, pred_array, labels=["A1_RES", "A2_RES", "OC_RES", "NO_RE"],
sample_weight=None, normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=["A1_RES", "A2_RES", "OC_RES", "NO_RE"])
font = {'family': 'normal',
'weight': 'normal',
'size': 24}
plt.rc('font', **font)
fig, ax = plt.subplots(figsize=(12, 12))
disp = disp.plot(include_values=True,
cmap=plt.cm.Blues, ax=ax, xticks_rotation='vertical')
plt.show()
if __name__ == "__main__":
# some of these paths require trained models to be in place already
doc_path = "../datasets/preprocessed/out_of_domain/solid_tumour_cancer_as_test/test.spacy"
gold_table_path = "../datasets/preprocessed/all_domains/gold_tables"
pred_table_path = "../output_tables/all_domains_"
model_bases = ["biobert","scibert","roberta"]
# evaluate different model-bases
for model_base in model_bases:
outfile = open(f"../evaluation_results/{model_base}.txt", "w")
# assess ner performance
ner_evaluate(f"../trained_models/biobert/ner/all_domains/model-best",doc_path)
# assess rel performance
joint_ner_rel_evaluate(None,f"../trained_models/biobert/rel/all_domains/model-best",doc_path,False)
# assess joint performance
joint_ner_rel_evaluate(None,
f"../trained_models/rel/all_domains/model-best",doc_path,True)
# assess table strict performance
evaluate_result_tables(gold_table_path, f"{pred_table_path}{model_base}", strict=True)
# assess table relaxed performance
evaluate_result_tables(gold_table_path, f"{pred_table_path}{model_base}", strict=False)
outfile.close()
create_ner_confusion_matrix("../trained_models/ner/all_domains/model-best", doc_path)
create_rel_confusion_matrix("../trained_models/rel/all_domains/model-best", doc_path)
```
#### File: pico-ner-relations/scripts/preprocessing.py
```python
import json, random, os, ast
from spacy.tokens import DocBin, Doc
from spacy.vocab import Vocab
from wasabi import Printer
def merge_jsonl(jsonl_dirs, output_path):
"""Merges gold standard JSONL files from different disease area domains"""
merge_list = []
for path in jsonl_dirs:
loaded = open(path,"r").read()
merge_list += loaded.split("\n")
with open(output_path, "w") as output:
for dict in merge_list:
if dict == "\n":
pass
else:
try:
dict = json.loads(dict)
output.write(json.dumps(dict) + "\n")
except:
print(dict)
output.close()
# This function was adapted from the spaCy relation component
# template: https://github.com/explosion/projects/tree/v3/tutorials
def annotations_to_spacy(json_loc):
"""Converts Prodigy annotations into doc object with custom rel attribute."""
msg = Printer()
MAP_LABELS = {
"A1_RES": "A1_RES",
"A2_RES": "A2_RES",
"OC_RES": "OC_RES",
}
try:
Doc.set_extension("rel", default={})
except ValueError:
print("Rel extension already set on doc")
vocab = Vocab()
docs = []
with open(json_loc, "r", encoding="utf8") as jsonfile:
for line in jsonfile:
example = json.loads(line)
span_starts = set()
if example["answer"] == "accept":
try:
# Parse the tokens
words = [t["text"] for t in example["tokens"]]
spaces = [t["ws"] for t in example["tokens"]]
doc = Doc(vocab, words=words, spaces=spaces)
# Parse the PICO entities
spans = example["spans"]
entities = []
span_end_to_start = {}
for span in spans:
entity = doc.char_span(
span["start"], span["end"], label=span["label"]
)
span_end_to_start[span["token_end"]] = span["token_start"]
entities.append(entity)
span_starts.add(span["token_start"])
doc.ents = entities
# Parse the PICO relations
rels = {}
for x1 in span_starts:
for x2 in span_starts:
rels[(x1, x2)] = {}
relations = example["relations"]
for relation in relations:
# swaps tokens to correct relation positions
start = span_end_to_start[relation["head"]]
end = span_end_to_start[relation["child"]]
label = relation["label"]
label = MAP_LABELS[label]
if label not in rels[(start, end)]:
rels[(start, end)][label] = 1.0
# The annotation is complete, so fill in zero's where the data is missing
for x1 in span_starts:
for x2 in span_starts:
for label in MAP_LABELS.values():
if label not in rels[(x1, x2)]:
rels[(x1, x2)][label] = 0.0
doc._.rel = rels
try:
pmid = ast.literal_eval(example["user_data"])
doc.user_data["pmid"] = pmid["pmid"]
except KeyError:
pass # pmids have not been added to glaucoma dataset
docs.append(doc)
except KeyError as e:
msg.fail(f"Skipping doc because of key error")
print(len(docs))
return docs
def train_dev_test_split(docs,output_dir):
"""
Splits spaCy docs collection into train test and dev datasets based on pmid
:param docs: list
:return: train dev and test spacy files for model training and testing
"""
pmid_set = set()
with_pmid = []
without_pmid = []
for doc in docs:
try:
pmid_set.add(doc.user_data["pmid"])
with_pmid.append(doc)
except KeyError: # primarily for glaucoma dataset in instance of this study
without_pmid.append(doc)
pmid_list = list(pmid_set)
random.shuffle(pmid_list) # randomise pmids before train, dev, test split
l = len(pmid_list)
train = pmid_list[0:int(l * 0.7)]
dev = pmid_list[int(l * 0.7):int(l * 0.8)]
w_train, w_dev, w_test = [],[],[]
for doc in with_pmid:
if doc.user_data["pmid"] in train:
w_train.append(doc)
elif doc.user_data["pmid"] in dev:
w_dev.append(doc)
else:
w_test.append(doc)
random.shuffle(without_pmid) # randomise sentences without pubmed ids for dividing across sets
l = len(without_pmid)
wo_train = without_pmid[0:int(l * 0.7)]
wo_dev = without_pmid[int(l * 0.7):int(l * 0.8)]
wo_test = without_pmid[int(l * 0.8):]
joined_train = w_train + wo_train
joined_dev = w_dev + wo_dev
joined_test = w_test + wo_test
docbin = DocBin(docs=joined_train, store_user_data=True)
docbin.to_disk(f"{output_dir}/train.spacy")
print(f"{len(joined_train)} training sentences")
docbin = DocBin(docs=joined_dev, store_user_data=True)
docbin.to_disk(f"{output_dir}/dev.spacy")
print(f"{len(joined_dev)} dev sentences")
docbin = DocBin(docs=joined_test, store_user_data=True)
docbin.to_disk(f"{output_dir}/test.spacy")
print(f"{len(joined_test)} test sentences")
def stratify_train_examples(doc_path, strats):
"""Stratifies input docs and binaries them into new spacy files"""
vocab = Vocab()
doc_bin = DocBin(store_user_data=True).from_disk(doc_path)
docs = list(doc_bin.get_docs(vocab))
l = len(docs)
for strat in strats:
if str(strat)[-2] != "0":
name = str(strat)[-1] + "0%"
else:
name = str(strat)[-1] + "%"
doc_strat = docs[:int(l*strat)]
docbin = DocBin(docs=doc_strat, store_user_data=True)
docbin.to_disk(f"../datasets/preprocessed/all_domains/training_stratifications/"
f"train_strat_{name}.spacy")
def out_of_domain_split(doc_dirs, exclude):
"""excludes one domain from full domain train and dev sets for use as test set"""
merged_docs = []
vocab = Vocab()
for dir in doc_dirs:
for files in os.listdir(dir):
doc_bin = DocBin(store_user_data=True).from_disk(f"{dir}/{files}")
merged_docs += list(doc_bin.get_docs(vocab))
l = len(merged_docs)
train = merged_docs[0:int(l * 0.9)]
dev = merged_docs[int(l * 0.9):]
test = []
test_dir = f"../datasets/preprocessed/{exclude}/results_only"
for files in os.listdir(test_dir):
doc_bin = DocBin(store_user_data=True).from_disk(f"{test_dir}/{files}")
test += list(doc_bin.get_docs(vocab))
docbin = DocBin(docs=train, store_user_data=True)
docbin.to_disk(f"../datasets/preprocessed/out_of_domain/{exclude}_as_test/train.spacy")
print(f"{len(train)} training sentences")
docbin = DocBin(docs=dev, store_user_data=True)
docbin.to_disk(f"../datasets/preprocessed/out_of_domain/{exclude}_as_test/dev.spacy")
print(f"{len(dev)} dev sentences")
docbin = DocBin(docs=test, store_user_data=True)
docbin.to_disk(f"../datasets/preprocessed/out_of_domain/{exclude}_as_test/test.spacy")
print(f"{len(test)} test sentences")
def cap_docs(doc_dirs, names, cap):
"""Caps number of examples in datasets for comparison, outputting both individual
sets and merged sets for incremental evaluation"""
vocab = Vocab()
merged_docs = []
merged_names = ""
count = 0
for docs, name in zip(doc_dirs, names):
doc_bin = DocBin(store_user_data=True).from_disk(docs)
capped = list(doc_bin.get_docs(vocab))[:cap-1]
random.shuffle(capped)
l = len(capped)
train = capped[:int(l*0.7)]
dev = capped[int(l*0.8):int(l*0.9)]
test = capped[int(l*0.9):]
capped_train = DocBin(docs=train, store_user_data=True)
capped_train.to_disk(f"../datasets/preprocessed/capped_for_comparison/{name}/train.spacy")
capped_dev = DocBin(docs=dev, store_user_data=True)
capped_dev.to_disk(f"../datasets/preprocessed/capped_for_comparison/{name}/dev.spacy")
capped_test = DocBin(docs=test, store_user_data=True)
capped_test.to_disk(f"../datasets/preprocessed/capped_for_comparison/{name}/test.spacy")
# output merged sets of capped docs for incremental domain increase evaluation
merged_docs += capped
merged_names += name + "_"
if count > 0:
random.shuffle(merged_docs)
train = merged_docs[:int(len(merged_docs)*0.9)]
dev = merged_docs[int(len(merged_docs)*0.9):]
merged_train = DocBin(docs= train, store_user_data=True)
merged_train.to_disk(f"../datasets/preprocessed/capped_mix/{merged_names[:-1]}/train.spacy")
merged_dev = DocBin(docs=dev, store_user_data=True)
merged_dev.to_disk(f"../datasets/preprocessed/capped_mix/{merged_names[:-1]}/dev.spacy")
count += 1
if __name__ == "__main__":
gold_dir = "../datasets/gold_result_annotations"
cardiovascular = f"{gold_dir}/cardiovascular_disease/cardiovascular_disease_gold.jsonl"
glaucoma = f"{gold_dir}/glaucoma/glaucoma_gold.jsonl"
merge_all_list = [f'../datasets/gold_result_annotations/{domain}/{domain}_gold.jsonl'
for domain in os.listdir("../datasets/gold_result_annotations")
if domain != "all_domains"]
stratify_train_examples("../datasets/preprocessed/all_domains/results_only/train.spacy",[0.05,0.5])
merge_jsonl(merge_all_list, "../datasets/gold_result_annotations/all_domains/all_domains_gold.jsonl")
for domain in os.listdir("../datasets/gold_result_annotations"):
docs = annotations_to_spacy(f"../datasets/gold_result_annotations/{domain}/{domain}_gold.jsonl")
train_dev_test_split(docs,(f"../datasets/preprocessed/{domain}/results_only"))
names = ["glaucoma","cardiovascular_disease","solid_tumour_cancer"]
docs = ["../datasets/preprocessed/out_of_domain/glaucoma_as_test/test.spacy",
"../datasets/preprocessed/out_of_domain/cardiovascular_disease_as_test/test.spacy",
"../datasets/preprocessed/out_of_domain/solid_tumour_cancer_as_test/test.spacy"]
cap_docs(docs,names, 72)
```
|
{
"source": "jettan/form2fit",
"score": 3
}
|
#### File: form2fit/benchmark/metrics.py
```python
import cv2
import numpy as np
from form2fit.code.utils.pointcloud import transform_xyz
def rotational_error(R1, R2):
r, _ = cv2.Rodrigues(R1.dot(R2.T))
return np.degrees(np.linalg.norm(r))
def translational_error(t1, t2):
return np.linalg.norm(t1 - t2)
def compute_ADD(pose_true, pred_pose, obj_xyz):
"""Computes the Average Distance Metric (ADD) [1].
[1]: https://arxiv.org/pdf/1711.00199.pdf
"""
obj_xyz_pred = transform_xyz(obj_xyz, pred_pose)
obj_xyz_true = transform_xyz(obj_xyz, pose_true)
return np.linalg.norm(obj_xyz_pred - obj_xyz_true, axis=1).mean()
def reprojection_error(pose_true, pred_pose, obj_xyz, view_bounds, pixel_size):
obj_xyz_pred = transform_xyz(obj_xyz, pred_pose)
obj_xyz_true = transform_xyz(obj_xyz, pose_true)
obj_xyz_pred[:, 0] = (obj_xyz_pred[:, 0] - view_bounds[0, 0]) / pixel_size
obj_xyz_pred[:, 1] = (obj_xyz_pred[:, 1] - view_bounds[1, 0]) / pixel_size
obj_idx_pred = obj_xyz_pred[:, [1, 0]]
obj_xyz_true[:, 0] = (obj_xyz_true[:, 0] - view_bounds[0, 0]) / pixel_size
obj_xyz_true[:, 1] = (obj_xyz_true[:, 1] - view_bounds[1, 0]) / pixel_size
obj_idx_true = obj_xyz_true[:, [1, 0]]
return np.linalg.norm(obj_idx_true - obj_idx_pred, axis=1).mean()
```
#### File: code/gui/main.py
```python
import argparse
import os
import sys
import glob
import pickle
import torch
import torch.nn as nn
import numpy as np
import matplotlib.cm as cm
from form2fit import config
from form2fit.code.ml.models import *
from form2fit.code.ml.dataloader import get_corr_loader
from form2fit.code.utils import ml, misc
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class Debugger(QDialog):
"""A PyQt5 GUI for debugging a descriptor network.
"""
USE_CUDA = True
WINDOW_WIDTH = 1500
WINDOW_HEIGHT = 1000
WINDOW_TITLE = "Debug Descriptor Network"
def __init__(self, args):
super().__init__()
self._foldername = args.foldername
self._dtype = args.dtype
self._num_desc = args.num_desc
self._background_subtract = args.background_subtract
self._augment = args.augment
self._num_channels = args.num_channels
self._init_loader_and_network()
self._reset()
self._init_UI()
self.show()
def _init_loader_and_network(self):
"""Initializes the data loader and network.
"""
self._dev = torch.device("cuda" if Debugger.USE_CUDA and torch.cuda.is_available() else "cpu")
self._data = get_corr_loader(
self._foldername,
batch_size=1,
sample_ratio=1,
dtype=self._dtype,
shuffle=False,
num_workers=0,
augment=self._augment,
num_rotations=20,
background_subtract=config.BACKGROUND_SUBTRACT[self._foldername],
num_channels=self._num_channels,
)
self._net = CorrespondenceNet(self._num_channels, self._num_desc, 20).to(self._dev)
self._net.eval()
stats = self._data.dataset.stats
self._color_mean = stats[0][0]
self._color_std = stats[0][1]
self._resolve_data_dims()
def _resolve_data_dims(self):
"""Reads the image dimensions from the data loader.
"""
x, _, _ = next(iter(self._data))
self._h, self._w = x.shape[2:]
self._c = 3
self._zeros = np.zeros((self._h, self._w, self._c), dtype="uint8")
self.xs = None
self.xt = None
def _reset(self):
"""Resets the GUI.
"""
def _he_init(m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(m.weight, mode="fan_in")
self._is_switch = False
self._pair_idx = 0
self._dloader = iter(self._data)
self._get_network_names()
self._net.apply(_he_init)
def _get_network_names(self):
"""Reads all saved model weights.
"""
self.weights_dir = os.path.join(config.weights_dir, "matching")
filenames = glob.glob(os.path.join(self.weights_dir, "*.tar"))
self._model_names = [os.path.basename(x).split(".")[0] for x in filenames]
def _load_selected_network(self, name):
"""Loads a trained network.
"""
if name:
self._model_name = name
state_dict = torch.load(os.path.join(self.weights_dir, name + ".tar"), map_location=self._dev)
self._net.load_state_dict(state_dict['model_state'])
self._set_prediction_text("{} was loaded...".format(name))
def _init_UI(self):
"""Initializes the UI.
"""
self.setWindowTitle(Debugger.WINDOW_TITLE)
# self.setFixedSize(Debugger.WINDOW_WIDTH, Debugger.WINDOW_HEIGHT)
self._create_menu()
self._create_main()
self._create_progress()
self._all_layout = QVBoxLayout(self)
self._all_layout.addLayout(self._menu_layout)
self._all_layout.addLayout(self._main_layout)
self._all_layout.addLayout(self._progress_layout)
def _create_menu(self):
"""Creates the top horizontal menu bar.
"""
# buttons
next_button = QPushButton("Next Pair", self)
next_button.clicked.connect(self._next_click)
reset_button = QPushButton("Reset", self)
reset_button.clicked.connect(self._reset_click)
sample_button = QPushButton("Sample", self)
sample_button.clicked.connect(self._sample_click)
colorize_button = QPushButton("Rotation Error", self)
colorize_button.clicked.connect(self._colorize_click)
self._switch_button = QPushButton("View RGB", self)
self._switch_button.clicked.connect(self._switch_click)
# boxes
self._is_correct_box = QLabel(self)
self._networks_box = QComboBox(self)
self._networks_box.addItems([""] + self._model_names)
self._networks_box.activated[str].connect(self._load_selected_network)
self._networks_box_label = QLabel("Network Name", self)
self._networks_box_label.setBuddy(self._networks_box)
# add to layout
self._menu_layout = QHBoxLayout()
self._menu_layout.addWidget(self._networks_box_label)
self._menu_layout.addWidget(self._networks_box)
self._menu_layout.addWidget(next_button)
self._menu_layout.addWidget(sample_button)
self._menu_layout.addWidget(colorize_button)
self._menu_layout.addWidget(self._is_correct_box)
self._menu_layout.addStretch(1)
self._menu_layout.addWidget(self._switch_button)
self._menu_layout.addWidget(reset_button)
def _create_main(self):
"""Creates the main layout.
"""
vbox_left = QVBoxLayout()
grid_right = QGridLayout()
self._target_widget = QLabel(self)
self._source_widget = QLabel(self)
self._grid_widgets = [QLabel(self) for _ in range(20)]
self._draw_target(init=True)
self._draw_source(init=True)
vbox_left.addWidget(self._target_widget)
vbox_left.addWidget(self._source_widget)
self._target_widget.mousePressEvent = self._get_mouse_pos
self._draw_rotations(init=True)
for col in range(5):
for row in range(4):
grid_right.addWidget(self._grid_widgets[col * 4 + row], col, row)
self._main_layout = QHBoxLayout()
self._main_layout.addLayout(vbox_left)
self._main_layout.addLayout(grid_right)
def _create_progress(self):
"""A progress bar for the data loader.
"""
self._progress_bar = QProgressBar(self)
self._progress_bar.setRange(0, len(self._dloader))
self._progress_bar.setValue(0)
self._progress_layout = QHBoxLayout()
self._progress_layout.addWidget(self._progress_bar)
self._advance_progress_bar()
def _draw_target(self, uv=None, init=False):
img_target = self._zeros.copy() if init else self._xt_np.copy()
if uv is not None:
img_target[uv[0] - 1 : uv[0] + 1, uv[1] - 1 : uv[1] + 1] = [255, 0, 0]
self._target_img = QImage(
img_target.data, self._w, self._h, self._c * self._w, QImage.Format_RGB888
)
self._target_pixmap = QPixmap.fromImage(self._target_img)
self._target_widget.setPixmap(self._target_pixmap)
self._target_widget.setScaledContents(True)
def _draw_source(self, uvs=None, init=False):
if uvs is None:
img_source = self._zeros.copy() if init else self._xs_np.copy()
else:
img_source = self._xt_np.copy()
colors = [[0, 255, 0], [0, 0, 255], [255, 0, 0]]
color_names = ["green", "blue", "red"]
for i in range(3):
mask = np.where(uvs[:, 2] == i)[0]
idxs = uvs[mask]
img_source[idxs[:, 0], idxs[:, 1]] = colors[i]
self._source_img = QImage(
img_source.data, self._w, self._h, self._c * self._w, QImage.Format_RGB888
)
self._source_pixmap = QPixmap.fromImage(self._source_img)
self._source_widget.setPixmap(self._source_pixmap)
self._source_widget.setScaledContents(True)
def _draw_rotations(self, init=False, heatmap=True):
def _hist_eq(img):
from skimage import exposure
img_cdf, bin_centers = exposure.cumulative_distribution(img)
return np.interp(img, bin_centers, img_cdf)
for col in range(5):
for row in range(4):
offset = col * 4 + row
if init:
img = self._zeros.copy()
else:
if heatmap:
img = self.heatmaps[offset].copy()
img = img / img.max()
img = _hist_eq(img)
img = np.uint8(cm.viridis(img) * 255)[..., :3]
img = img.copy()
else:
img = misc.rotate_img(self._xs_np, -(360 / 20) * offset, center=(self.center[1], self.center[0]))
img = img.copy()
if offset == self._uv[-1]:
img[
self._uv[0] - 1 : self._uv[0] + 1,
self._uv[1] - 1 : self._uv[1] + 1,
] = [255, 0, 0]
self._add_border_clr(img, [255, 0, 0])
if offset == self.best_rot_idx:
self._add_border_clr(img, [0, 255, 0])
self._img = QImage(
img.data, self._w, self._h, self._c * self._w, QImage.Format_RGB888
)
pixmap = QPixmap.fromImage(self._img)
self._grid_widgets[offset].setPixmap(pixmap)
self._grid_widgets[offset].setScaledContents(True)
def _switch_click(self):
if not self._is_switch:
self._switch_button.setText("Heatmap View")
self._is_switch = True
self._draw_rotations(heatmap=False)
else:
self._switch_button.setText("RGB View")
self._is_switch = False
self._draw_rotations(heatmap=True)
def _next_click(self):
if self._pair_idx == len(self._dloader):
self.close()
else:
self._get_next_data()
self._draw_target()
self._draw_source()
self._draw_rotations(init=True)
self._advance_progress_bar()
def _reset_click(self):
self._reset()
self._networks_box.setCurrentIndex(0)
self._draw_target(init=True)
self._draw_source(init=True)
self._draw_rotations(init=True)
self._advance_progress_bar()
def _colorize_click(self):
filename = os.path.join(
config.rot_stats_dir,
self._model_name,
self._dtype,
str(self._pair_idx - 1),
"rot_color.npy",
)
pixel_colors = np.load(filename)
self._draw_source(pixel_colors)
def _set_prediction_text(self, msg):
self._is_correct_box.setText(msg)
def _sample_click(self):
if self._pair_idx > 0:
self._forward_network()
rand_idx = np.random.choice(np.arange(len(self.target_pixel_idxs)))
u_rand, v_rand = self.target_pixel_idxs[rand_idx]
self._draw_target([u_rand, v_rand])
u_s, v_s = self.source_pixel_idxs[rand_idx]
target_vector = self.out_t[:, :, u_rand, v_rand]
outs_flat = self.outs.view(self.outs.shape[0], self.outs.shape[1], -1)
target_vector_flat = target_vector.unsqueeze_(2).repeat(
(outs_flat.shape[0], 1, outs_flat.shape[2])
)
diff = outs_flat - target_vector_flat
dist = diff.pow(2).sum(1).sqrt()
self.heatmaps = dist.view(dist.shape[0], self._h, self._w).cpu().numpy()
predicted_best_idx = dist.min(dim=1)[0].argmin()
is_correct = predicted_best_idx == self.best_rot_idx
msg = "Correct!" if is_correct else "Wrong!"
self._set_prediction_text(msg)
min_val = self.heatmaps[predicted_best_idx].argmin()
u_min, v_min = misc.make2d(min_val, self._w)
self._uv = [u_min, v_min, predicted_best_idx]
self._draw_rotations(heatmap=not self._is_switch)
else:
print("[!] You must first click next to load a data sample.")
def _get_mouse_pos(self, event):
v = event.pos().x()
u = event.pos().y()
u = int(u * (self._h / self._target_widget.height()))
v = int(v * (self._w / self._target_widget.width()))
uv = [u, v]
if self.xs is not None and self.xt is not None:
self._forward_network()
row_idx = np.where((self.target_pixel_idxs == uv).all(axis=1))[0]
if row_idx.size != 0:
row_idx = row_idx[0]
self._draw_target(uv)
u_s, v_s = self.source_pixel_idxs[row_idx]
target_vector = self.out_t[:, :, uv[0], uv[1]]
outs_flat = self.outs.view(self.outs.shape[0], self.outs.shape[1], -1)
target_vector_flat = target_vector.unsqueeze_(2).repeat(
(outs_flat.shape[0], 1, outs_flat.shape[2])
)
diff = outs_flat - target_vector_flat
dist = diff.pow(2).sum(1).sqrt()
self.heatmaps = dist.view(dist.shape[0], self._h, self._w).cpu().numpy()
predicted_best_idx = dist.min(dim=1)[0].argmin()
is_correct = predicted_best_idx == self.best_rot_idx
msg = "Correct!" if is_correct else "Wrong!"
self._set_prediction_text(msg)
min_val = self.heatmaps[predicted_best_idx].argmin()
u_min, v_min = misc.make2d(min_val, self._w)
self._uv = [u_min, v_min, predicted_best_idx]
self._draw_rotations(heatmap=not self._is_switch)
def _get_next_data(self):
"""Grabs a fresh pair of source and target data points.
"""
self._pair_idx += 1
self.imgs, labels, center = next(self._dloader)
self.center = center[0]
label = labels[0]
self.xs, self.xt = self.imgs[:, :self._num_channels, :, :], self.imgs[:, self._num_channels:, :, :]
if self._num_channels == 4:
self._xs_np = ml.tensor2ndarray(self.xs[:, :3], [self._color_mean * 3, self._color_std * 3])
self._xt_np = ml.tensor2ndarray(self.xt[:, :3], [self._color_mean * 3, self._color_std * 3])
else:
self._xs_np = ml.tensor2ndarray(self.xs[:, :1], [self._color_mean, self._color_std], False)
self._xt_np = ml.tensor2ndarray(self.xt[:, :1], [self._color_mean, self._color_std], False)
self._xs_np = np.uint8(cm.viridis(self._xs_np) * 255)[..., :3]
self._xt_np = np.uint8(cm.viridis(self._xt_np) * 255)[..., :3]
source_idxs = label[:, 0:2]
target_idxs = label[:, 2:4]
rot_idx = label[:, 4]
is_match = label[:, 5]
self.best_rot_idx = rot_idx[0].item()
mask = (is_match == 1) & (rot_idx == self.best_rot_idx)
self.source_pixel_idxs = source_idxs[mask].numpy()
self.target_pixel_idxs = target_idxs[mask].numpy()
def _forward_network(self):
"""Forwards the current source-target pair through the network.
"""
self.imgs = self.imgs.to(self._dev)
with torch.no_grad():
self.outs, self.out_t = self._net(self.imgs, *self.center)
self.outs = self.outs[0]
def _advance_progress_bar(self):
"""Advances the progress bar.
"""
curr_val = self._pair_idx
max_val = self._progress_bar.maximum()
self._progress_bar.setValue(curr_val + (max_val - curr_val) / 100)
def _add_border_clr(self, img, color):
"""Adds a border color to an image.
"""
img[0 : self._h - 1, 0:10] = color # left
img[0:10, 0 : self._w - 1] = color # top
img[self._h - 11 : self._h - 1, 0 : self._w - 1] = color
img[0 : self._h - 1, self._w - 11 : self._w - 1] = color
return img
if __name__ == "__main__":
def str2bool(s):
return s.lower() in ["1", "true"]
parser = argparse.ArgumentParser(description="Descriptor Network Visualizer")
parser.add_argument("foldername", type=str)
parser.add_argument("--dtype", type=str, default="valid")
parser.add_argument("--num_desc", type=int, default=64)
parser.add_argument("--num_channels", type=int, default=2)
parser.add_argument("--background_subtract", type=tuple, default=None)
parser.add_argument("--augment", type=str2bool, default=False)
args, unparsed = parser.parse_known_args()
app = QApplication(sys.argv)
window = Debugger(args)
window.show()
sys.exit(app.exec_())
```
#### File: ml/dataloader/placement.py
```python
import glob
import logging
import multiprocessing
import os
import pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from pathlib import Path
from PIL import Image
from skimage.draw import circle
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from walle.core import RotationMatrix
from form2fit import config
from form2fit.code.utils import misc
class PlacementDataset(Dataset):
"""The placement network dataset.
"""
def __init__(self, root, sample_ratio, stateless, augment, background_subtract, num_channels, radius):
"""Initializes the dataset.
Args:
root: (str) Root directory path.
sample_ratio: (float) The ratio of negative to positive
labels.
stateless: (bool) Whether to use just the current placement
point and ignore all the ones from the previous objects
in the sequence.
augment: (bool) Whether to apply data augmentation.
background_subtract: (bool) Whether to apply background subtraction.
num_channels: (int) 4 clones the grayscale image to produce an RGB image.
"""
self._root = root
self._sample_ratio = sample_ratio
self._augment = augment
self._stateless = stateless
self._background_subtract = background_subtract
self._num_channels = num_channels
self.radius = radius
# figure out how many data samples we have
self._get_filenames()
stats = pickle.load(open(os.path.join(Path(self._root).parent, "mean_std.p"), "rb"))
if self._num_channels == 4:
self._c_norm = transforms.Normalize(mean=stats[0][0] * 3, std=stats[0][1] * 3)
else:
self._c_norm = transforms.Normalize(mean=stats[0][0], std=stats[0][1])
self._d_norm = transforms.Normalize(mean=stats[1][0], std=stats[1][1])
self._transform = transforms.ToTensor()
def __len__(self):
return len(self._filenames)
def _get_filenames(self):
self._filenames = glob.glob(os.path.join(self._root, "*/"))
self._filenames.sort(key=lambda x: int(x.split("/")[-2]))
def _load_state(self, name):
"""Loads the raw state variables.
"""
# load the list of suction points
placement_points = np.loadtxt(os.path.join(name, "placement_points.txt"), ndmin=2)
# we just want the current timestep place point
placement_points = np.round(placement_points)
if self._stateless:
placement_points = placement_points[-1:]
# load heightmaps
c_height = np.asarray(Image.open(os.path.join(name, "final_color_height.png")))
d_height = np.asarray(Image.open(os.path.join(name, "final_depth_height.png")))
c_height_i = np.asarray(Image.open(os.path.join(name, "init_color_height.png")))
d_height_i = np.asarray(Image.open(os.path.join(name, "init_depth_height.png")))
# convert depth to meters
d_height_i = (d_height_i * 1e-3).astype("float32")
d_height = (d_height * 1e-3).astype("float32")
# load kit mask
kit_mask = np.load(os.path.join(name, "curr_kit_plus_hole_mask.npy"))
return c_height, d_height, placement_points, kit_mask, c_height_i, d_height_i
def _split_heightmap(self, height):
"""Splits a heightmap into a source and target.
For placement, we just need the source heightmap.
"""
half = height.shape[1] // 2
self._half = half
height_s = height[:, half:].copy()
return height_s
def _sample_negative(self, positives):
"""Randomly samples negative pixel indices.
"""
max_val = self._H * self._W
num_pos = len(positives)
num_neg = int(num_pos * self._sample_ratio)
positives = np.round(positives).astype("int")
positives = positives[:, :2]
positives = np.ravel_multi_index((positives[:, 0], positives[:, 1]), (self._H, self._W))
if self._sample_ratio < 70:
negative_indices = []
while len(negative_indices) < num_neg:
negative = np.random.randint(0, max_val)
if negative not in positives:
negative_indices.append(negative)
else:
allowed = list(set(np.arange(0, max_val)) - set(positives.ravel()))
np.random.shuffle(allowed)
negative_indices = allowed[:num_neg]
negative_indices = np.unravel_index(negative_indices, (self._H, self._W))
return negative_indices
def _sample_free_negative(self, kit_mask):
"""Randomly samples negative pixel indices.
"""
max_val = self._H * self._W
num_neg = int(100 * self._sample_ratio)
negative_indices = []
while len(negative_indices) < num_neg:
negative_indices.append(np.random.randint(0, max_val))
negative_indices = np.vstack(np.unravel_index(negative_indices, (self._H, self._W))).T
idxs = np.random.choice(np.arange(len(kit_mask)), size=30, replace=False)
inside = kit_mask[idxs]
negative_indices = np.vstack([negative_indices, inside])
return negative_indices
def _sample_translation(self, corrz, angle):
aff_1 = np.eye(3)
aff_1[:2, 2] = [-self._uc, -self._vc]
aff_2 = RotationMatrix.rotz(-angle)
aff_3 = np.eye(3, 3)
aff_3[:2, 2] = [self._uc, self._vc]
affine = aff_3 @ aff_2 @ aff_1
affine = affine[:2, :]
corrs = []
for corr in corrz:
ones = np.ones((len(corr), 1))
corrs.append((affine @ np.hstack((corr, ones)).T).T)
max_vv = corrs[0][:, 1].max()
max_vu = corrs[0][corrs[0][:, 1].argmax()][0]
min_vv = corrs[0][:, 1].min()
min_vu = corrs[0][corrs[0][:, 1].argmin()][0]
max_uu = corrs[0][:, 0].max()
max_uv = corrs[0][corrs[0][:, 0].argmax()][1]
min_uu = corrs[0][:, 0].min()
min_uv = corrs[0][corrs[0][:, 0].argmin()][1]
for t in corrs[1:]:
if t[:, 1].max() > max_vv:
max_vv = t[:, 1].max()
max_vu = t[t[:, 1].argmax()][0]
if t[:, 1].min() < min_vv:
min_vv = t[:, 1].min()
min_vu = t[t[:, 1].argmin()][0]
if t[:, 0].max() > max_uu:
max_uu = t[:, 0].max()
max_uv = t[t[:, 0].argmax()][1]
if t[:, 0].min() < min_uu:
min_uu = t[:, 0].min()
min_uv = t[t[:, 0].argmin()][1]
tu = np.random.uniform(-min_vv + 10, self._W - max_vv - 10)
tv = np.random.uniform(-min_uu + 10, self._H - max_uu - 10)
return tu, tv
def __getitem__(self, idx):
name = self._filenames[idx]
# load state
c_height, d_height, positives, kit_mask, c_height_i, d_height_i = self._load_state(name)
# split heightmap into source and target
c_height = self._split_heightmap(c_height)
d_height = self._split_heightmap(d_height)
c_height_i = self._split_heightmap(c_height_i)
d_height_i = self._split_heightmap(d_height_i)
self._H, self._W = c_height.shape[:2]
pos_placement = []
for pos in positives:
rr, cc = circle(pos[0], pos[1], self.radius)
pos_placement.append(np.vstack([rr, cc]).T)
pos_placement = np.concatenate(pos_placement)
# offset placement point to adjust for splitting
pos_placement[:, 1] = pos_placement[:, 1] - self._half
kit_mask[:, 1] = kit_mask[:, 1] - self._half
# center of rotation is the center of the kit
self._uc = int((kit_mask[:, 0].max() + kit_mask[:, 0].min()) // 2)
self._vc = int((kit_mask[:, 1].max() + kit_mask[:, 1].min()) // 2)
if self._augment:
shape = (self._W, self._H)
angle = np.radians(np.random.uniform(0, 360))
tu, tv = self._sample_translation([kit_mask], angle)
aff_1 = np.eye(3)
aff_1[:2, 2] = [-self._vc, -self._uc]
aff_2 = RotationMatrix.rotz(angle)
aff_2[:2, 2] = [tu, tv]
aff_3 = np.eye(3, 3)
aff_3[:2, 2] = [self._vc, self._uc]
affine = aff_3 @ aff_2 @ aff_1
affine = affine[:2, :]
c_height = cv2.warpAffine(c_height, affine, shape, flags=cv2.INTER_NEAREST)
d_height = cv2.warpAffine(d_height, affine, shape, flags=cv2.INTER_NEAREST)
aff_1[:2, 2] = [-self._uc, -self._vc]
aff_2 = RotationMatrix.rotz(-angle)
aff_2[:2, 2] = [tv, tu]
aff_3[:2, 2] = [self._uc, self._vc]
affine = aff_3 @ aff_2 @ aff_1
affine = affine[:2, :]
pos_placement = (affine @ np.hstack((pos_placement, np.ones((len(pos_placement), 1)))).T).T
kit_mask = (affine @ np.hstack((kit_mask, np.ones((len(kit_mask), 1)))).T).T
# update center of rotation
self._uc = int((kit_mask[:, 0].max() + kit_mask[:, 0].min()) // 2)
self._vc = int((kit_mask[:, 1].max() + kit_mask[:, 1].min()) // 2)
if self._background_subtract is not None:
idxs = np.vstack(np.where(d_height > self._background_subtract[0])).T
mask = np.zeros_like(d_height)
mask[idxs[:, 0], idxs[:, 1]] = 1
mask = misc.largest_cc(np.logical_not(mask))
idxs = np.vstack(np.where(mask == 1)).T
c_height[idxs[:, 0], idxs[:, 1]] = 0
d_height[idxs[:, 0], idxs[:, 1]] = 0
idxs = np.vstack(np.where(d_height_i > self._background_subtract[0])).T
mask = np.zeros_like(d_height)
mask[idxs[:, 0], idxs[:, 1]] = 1
mask = misc.largest_cc(np.logical_not(mask))
idxs = np.vstack(np.where(mask == 1)).T
c_height_i[idxs[:, 0], idxs[:, 1]] = 0
d_height_i[idxs[:, 0], idxs[:, 1]] = 0
if self._num_channels == 2:
c_height = c_height[..., np.newaxis]
c_height_i = c_height_i[..., np.newaxis]
else: # clone the gray channel 3 times
c_height = np.repeat(c_height[..., np.newaxis], 3, axis=-1)
c_height_i = np.repeat(c_height_i[..., np.newaxis], 3, axis=-1)
# convert heightmaps tensors
c_height = self._c_norm(self._transform(c_height))
d_height = self._d_norm(self._transform(d_height[..., np.newaxis]))
c_height_i = self._c_norm(self._transform(c_height_i))
d_height_i = self._d_norm(self._transform(d_height_i[..., np.newaxis]))
# concatenate height and depth into a 4-channel tensor
# img_tensor = torch.cat([c_height, d_height], dim=0)
img_tensor_i = torch.cat([c_height_i, d_height_i], dim=0)
img_tensor = torch.cat([c_height, d_height], dim=0)
img_tensor = torch.stack([img_tensor_i, img_tensor], dim=0)
# add columns of 1 (positive labels)
pos_label = np.hstack((pos_placement, np.ones((len(pos_placement), 1))))
# generate negative labels
neg_placement = np.vstack(self._sample_negative(pos_label)).T
neg_label = np.hstack((neg_placement, np.zeros((len(neg_placement), 1))))
# stack positive and negative into a single array
label = np.vstack((pos_label, neg_label))
neg_placement_i = self._sample_free_negative(kit_mask)
neg_label_i = np.hstack((neg_placement_i, np.zeros((len(neg_placement_i), 1))))
label_tensor_i = torch.LongTensor(neg_label_i)
label_tensor_f = torch.LongTensor(label)
label_tensor = [label_tensor_i, label_tensor_f]
# convert suction points to tensors
# label_tensor = torch.LongTensor(label)
return img_tensor, label_tensor
def get_placement_loader(
foldername,
dtype="train",
batch_size=1,
sample_ratio=1.0,
shuffle=True,
stateless=True,
augment=False,
background_subtract=None,
num_channels=2,
radius=2,
num_workers=4,
use_cuda=True,
):
"""Returns a dataloader over the `Placement` dataset.
Args:
foldername: (str) The name of the folder containing the data.
dtype: (str) Whether to use the train, validation or test partition.
batch_size: (int) The number of data samples in a batch.
sample_ratio: (float) The ratio of negative to positive
labels.
shuffle: (bool) Whether to shuffle the dataset at the end
of every epoch.
stateless: (bool) Whether to use just the current placement
point and ignore all the ones from the previous objects
in the sequence.
augment: (bool) Whether to apply data augmentation.
background_subtract: (bool) Whether to apply background subtraction.
num_workers: (int) How many processes to use. Each workers
is responsible for loading a batch.
use_cuda: (bool) Whether to use the GPU.
"""
def _collate_fn(batch):
"""A custom collate function.
This is to support variable length suction labels.
"""
# imgs = [b[0] for b in batch]
# labels = [b[1] for b in batch]
# imgs = torch.stack(imgs, dim=0)
# return [imgs, labels]
imgs = [b[0] for b in batch]
labels = [b[1] for b in batch]
imgs = torch.cat(imgs, dim=0)
labels = [l for sublist in labels for l in sublist]
return [imgs, labels]
num_workers = min(num_workers, multiprocessing.cpu_count())
root = os.path.join(config.ml_data_dir, foldername, dtype)
dataset = PlacementDataset(
root,
sample_ratio,
stateless,
augment,
background_subtract,
num_channels,
radius,
)
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=_collate_fn,
pin_memory=True,
num_workers=num_workers,
)
return loader
```
#### File: code/utils/pointcloud.py
```python
from functools import reduce
import numpy as np
def transform_xyzrgb(xyzrgb, transform):
"""Applies a rigid transform to a colored pointcloud.
Args:
xyzrgb: (ndarray) The colored pointcloud of shape (N, 6).
transform: (ndarray) The rigid transform of shape (4, 4).
Returns:
xyzrgb_t: (ndarray) The transformed colored pointcloud.
"""
num_pts = xyzrgb.shape[0]
xyz = xyzrgb[:, :3]
rgb = xyzrgb[:, 3:]
xyz_h = np.hstack([xyz, np.ones((num_pts, 1))])
xyz_t = (transform @ xyz_h.T).T
xyzrgb_t = np.hstack([xyz_t[:, :3], rgb])
return xyzrgb_t
def transform_xyzg(xyzg, transform):
"""Applies a rigid transform to a grayscale pointcloud.
Args:
xyzg: (ndarray) The grayscale pointcloud of shape (N, 4).
transform: (ndarray) The rigid transform of shape (4, 4).
Returns:
xyzg_t: (ndarray) The transformed colored pointcloud.
"""
num_pts = xyzg.shape[0]
xyz = xyzg[:, :3]
g = xyzg[:, 3:]
xyz_h = np.hstack([xyz, np.ones((num_pts, 1))])
xyz_t = (transform @ xyz_h.T).T
xyzg_t = np.hstack([xyz_t[:, :3], g])
return xyzg_t
def transform_xyz(xyz, transform):
"""Applies a rigid transform to a pointcloud.
Args:
xyz: (ndarray) The pointcloud of shape (N, 3).
transform: (ndarray) The rigid transform of shape (4, 4).
Returns:
(ndarray) The transformed pointcloud.
"""
xyz_h = np.hstack([xyz, np.ones((xyz.shape[0], 1))])
xyz_t = (transform @ xyz_h.T).T
xyz_t = xyz_t[:, :3]
return xyz_t
def deproject(uv, depth, intr, extr):
"""2D -> 3D.
"""
z = depth[uv[:, 0], uv[:, 1]]
zero_zs = z == 0.0
z = z[~zero_zs]
uv = uv[~zero_zs]
x = z * (uv[:, 1] - intr[0, 2]) / intr[0, 0]
y = z * (uv[:, 0] - intr[1, 2]) / intr[1, 1]
xyz = np.vstack([x, y, z]).T
xyz_tr = transform_xyz(xyz, extr)
return xyz_tr
```
|
{
"source": "jettbui/JettBot-py",
"score": 3
}
|
#### File: JettBot-py/cogs/customization.py
```python
import discord
from discord.ext import commands
class Customization(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Customization commands loaded.")
@commands.command()
async def status(self, ctx, *, args):
activity = discord.Game(name=args)
await self.client.change_presence(status=discord.Status.online, activity=activity)
await ctx.send(f"Status changed to '{args}'")
@status.error
async def handleStatusError(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Missing argument; must specify the status message to be set.\n"
"Usage: !status <message>")
elif isinstance(error, commands.BadArgument):
await ctx.send("Invalid argument.")
def setup(client):
client.add_cog(Customization(client))
```
#### File: JettBot-py/cogs/fun.py
```python
import discord
import random
from discord.ext import commands
class Fun(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Fun commands loaded.")
@commands.command(aliases=["8ball"])
async def _8ball(self, ctx, *, question):
responses = ["For sure.",
"YEP",
"Probably.",
"I think so.",
"Maybe... maybe not.",
"I don't even know.",
"Probably not.",
"I don't think so.",
"NO."]
await ctx.send(f"**Magic 8Ball**\nQuestion: _{question}_\n<:8ball:734976384782565447>: {random.choice(responses)}")
@_8ball.error
async def handle8BallError(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Missing argument; must specify a question.\n"
"Usage: !8ball <question>")
@commands.command(aliases=["coin"])
async def flip(self, ctx):
responses = ["<:orange_circle:735583801249497138> Heads.",
"<:blue_circle:735725260321718302> Tails."]
await ctx.send(f"**Flip a Coin**\n{random.choice(responses)}")
@commands.command(aliases=["dice"])
async def roll(self, ctx):
responses = ["<:one:735714655934349385> Rolled a one.",
"<:two:735714655934349385> Rolled a two.",
"<:three:735714655934349385> Rolled a three.",
"<:four:735714655934349385> Rolled a four.",
"<:five:735714655934349385> Rolled a five.",
"<:six:735714655934349385> Rolled a six."]
await ctx.send(f"**Roll the Dice**\n{random.choice(responses)}")
def setup(client):
client.add_cog(Fun(client))
```
#### File: JettBot-py/cogs/informational.py
```python
import discord
from discord.ext import commands
class Informational(commands.Cog):
FOOTER = "JettBot by <NAME>, © 2020"
THUMBNAIL = 'https://cdn.discordapp.com/avatars/734847208154857493/30a1820b464b373e85a9a2f66233d7e4.png'
INVITE = 'https://discord.com/api/oauth2/authorize?client_id=734847208154857493&permissions=8&scope=bot'
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Informational commands loaded.")
@commands.command()
async def about(self, ctx):
embed = discord.Embed(
title="JettBot",
description="A simple and straightforward Discord bot for your Discord server.",
colour=discord.Colour.blue()
)
embed.set_thumbnail(
url=Informational.THUMBNAIL)
embed.add_field(name="Author", value="<NAME>", inline=False)
embed.add_field(name="Invite Link", value=Informational.INVITE, inline=False)
await ctx.send(embed=embed)
@commands.command()
async def help(self, ctx, category=None):
author = ctx.message.author # user to send help message to
if category == None:
general_embed = discord.Embed(
title="Commands",
color=discord.Color.blurple()
)
general_embed.set_footer(text=Informational.FOOTER)
general_embed.set_author(name="JettBot", icon_url=Informational.THUMBNAIL)
general_embed.add_field(name="Customization", value="!help customization", inline=True)
general_embed.add_field(name="Fun", value="!help fun", inline=True)
general_embed.add_field(name="Informational", value="!help informational", inline=True)
general_embed.add_field(name="Moderation", value="!help moderation", inline=True)
general_embed.add_field(name="Utility", value="!help utility", inline=True)
await author.send(embed=general_embed)
elif category == "customization":
customization_embed = discord.Embed(
title="Customization Commands",
color=discord.Color.blue()
)
customization_embed.set_footer(text=Informational.FOOTER)
customization_embed.set_author(name="JettBot", icon_url=Informational.THUMBNAIL)
customization_embed.add_field(name="!status <message>", value="Changes the status of the bot to the given argument", inline=True)
await author.send(embed=customization_embed)
elif category == "fun":
fun_embed = discord.Embed(
title="Fun Commands",
color=discord.Color.magenta()
)
fun_embed.set_footer(text=Informational.FOOTER)
fun_embed.set_author(name='JettBot', icon_url=Informational.THUMBNAIL)
fun_embed.add_field(name="!8ball <question>", value="Ask the magic 8ball a question", inline=True)
fun_embed.add_field(name="!flip", value="Flip a coin", inline=True)
fun_embed.add_field(name="!roll", value="Roll a dice", inline=True)
await author.send(embed=fun_embed)
elif category == "informational":
informational_embed = discord.Embed(
title="Informational Commands",
color=discord.Color.teal()
)
informational_embed.set_footer(text=Informational.FOOTER)
informational_embed.set_author(name='JettBot', icon_url=Informational.THUMBNAIL)
informational_embed.add_field(name="!about", value="Sends information about JettBot", inline=True)
informational_embed.add_field(name="!help", value="Sends available commands for JettBot", inline=True)
await author.send(embed=informational_embed)
elif category == "moderation":
moderation_embed = discord.Embed(
title="Moderation Commands",
color=discord.Color.green()
)
moderation_embed.set_footer(text=Informational.FOOTER)
moderation_embed.set_author(name='JettBot', icon_url=Informational.THUMBNAIL)
moderation_embed.add_field(name="!ban <user>", value="Ban a user from the server", inline=False)
moderation_embed.add_field(name="Requires Permissions:", value="Ban Members", inline=False)
moderation_embed.add_field(name="!clear <amount>", value="Clears a given number of messages in the current channel", inline=False)
moderation_embed.add_field(name="Requires Permissions:", value="Manage Messages", inline=False)
moderation_embed.add_field(name="!kick <user>", value="Kick a user from the server", inline=False)
moderation_embed.add_field(name="Requires Permissions:", value="Kick Members", inline=False)
await author.send(embed=moderation_embed)
elif category == "utility":
utility_embed = discord.Embed(
title="Utility Commands",
color=discord.Color.purple()
)
utility_embed.set_footer(text=Informational.FOOTER)
utility_embed.set_author(name='JettBot', icon_url=Informational.THUMBNAIL)
utility_embed.add_field(name="!echo <message>", value="Repeats message in the given argument", inline=True)
utility_embed.add_field(name="!latency", value="Returns the latency of the bot", inline=True)
utility_embed.add_field(name="!ping", value="Returns a ping message", inline=True)
await author.send(embed=utility_embed)
else:
await author.send("Invalid argument, use '!help' for more info.")
@commands.command()
async def whomadeyou(self, ctx):
if ctx.message.author.id == 80<PASSWORD>:
await ctx.send("You, <@80509831487692800>")
else:
await ctx.send(f"Not you, <@{ctx.message.author.id}>")
def setup(client):
client.add_cog(Informational(client))
```
|
{
"source": "JettChenT/CommentAnalysis_En",
"score": 3
}
|
#### File: JettChenT/CommentAnalysis_En/getcomment.py
```python
import re, os, json
import googleapiclient.discovery
from urllib.parse import urlparse, parse_qs
with open("key", "r") as f:
APIKEY = f.readline()
with open("config.json", "r") as f:
cfg = json.load(f)
COMMENTSMAX = cfg["commentsMax"]
# youtube api stuff
def getCommentJson(vId):
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = APIKEY
youtube = googleapiclient.discovery.build(
api_service_name, api_version, developerKey=DEVELOPER_KEY
)
request = youtube.commentThreads().list(
part="replies", maxResults=COMMENTSMAX, order="relevance", videoId=vId
)
response = request.execute()
return response
# the getcomment function
def getVideoId(value):
"""a function to get the id of a youtube video(copied from stackoverflow)"""
query = urlparse("//" + value)
if query.netloc == "youtu.be":
return query.path[1:]
if query.netloc in ("www.youtube.com", "youtube.com"):
if query.path == "/watch":
p = parse_qs(query.query)
return p["v"][0]
if query.path[:7] == "/embed/":
return query.path.split("/")[2]
if query.path[:3] == "/v/":
return query.path.split("/")[2]
# fail?
return None
def getcomment(url):
VideoId = getVideoId(url)
if VideoId == None:
print("fail!!")
return [], 0
commentJson = getCommentJson(VideoId)
print(commentJson)
```
|
{
"source": "JettChenT/Encrypto",
"score": 3
}
|
#### File: JettChenT/Encrypto/app.py
```python
import re
from fastapi import FastAPI, Response, status
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
from encryptMessage import Encryptor
from pydantic import BaseModel
app = FastAPI(title="The encryptii api")
enc = Encryptor()
origins = [
"http://127.0.0.1",
"http://localhost:3000",
"https://.*"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class encryptINP(BaseModel):
msg: str
auto_destroy: bool = False
emoji: bool = False
class decryptINP(BaseModel):
dec: str
destroy: bool = True
@app.get("/", status_code=status.HTTP_307_TEMPORARY_REDIRECT)
async def routePage():
response = RedirectResponse(url="/docs")
return response
@app.get("/ping")
def pingpong():
return {"ping": "pong"}
@app.post("/encrypt", status_code=status.HTTP_201_CREATED)
async def encryption(pmt: encryptINP):
msg = pmt.msg
encrypted = enc.encrypt(msg, pmt.emoji)
resp = {"encrypted": encrypted}
return resp
@app.get("/encrypt", status_code=status.HTTP_201_CREATED)
async def getEncryption(msg: str):
encrypted = enc.encrypt(msg)
resp = {"encrypted": encrypted}
return resp
@app.post("/decrypt", status_code=status.HTTP_200_OK)
async def decryption(pmt: decryptINP, response: Response):
dec = pmt.dec.encode()
des = pmt.destroy
d = enc.decrypt(dec, des)
if d == -1:
response.status_code = status.HTTP_404_NOT_FOUND
return {"msg": "Message does not exist or was destroyed"}
else:
return {"msg": str(d)}
@app.get("/decrypt", status_code=status.HTTP_200_OK)
async def getDecryption(dec: str, response: Response, des: bool = True):
dec = dec.encode()
d = enc.decrypt(dec, des)
if d == -1:
response.status_code = status.HTTP_404_NOT_FOUND
return {"msg": "Message does not exist or was destroyed"}
else:
return {"msg": str(d)}
```
#### File: JettChenT/Encrypto/test.py
```python
import random
import pprint
from fastapi.testclient import TestClient
from app import app
client = TestClient(app)
def getRandLetter():
return chr(random.randint(ord("a"), ord("z")))
def getRandWord():
wlen = 10
wlst = [getRandLetter() for _ in range(wlen)]
word = "".join(wlst)
return word
def dec(curWrd):
diffList = []
prm = {"msg": curWrd}
rsp = client.post("/encrypt", json={"msg": curWrd}).json()
encrypted = rsp["encrypted"]
diffList.append(len(encrypted) / len(curWrd))
prm = {"dec": encrypted, "destroy": "True"}
rbp = client.post("/decrypt", json=prm).json()
pprint.pprint(rbp)
decrypted = rbp["msg"]
return decrypted
def test_answer():
curWrd = getRandWord()
assert dec(curWrd) == curWrd
```
|
{
"source": "JettChenT/realthink",
"score": 4
}
|
#### File: HighA/Algorithm/binTree.py
```python
class CLS_node():
"""
A Node in binary tree
"""
def __init__(self,v,l,r):
self.value = v
self.left = l
self.right = r
def __str__(self):
if self.left == None and self.right == None:
return str(self.value)
elif self.left == None:
return str(self.value)+str(self.right)
elif self.right == None:
return str(self.left)+str(self.value)
else:
return str(self.left)+str(self.value)+strself.right)
class CLS_tree():
"""
The binary tree
"""
def __init__(self,r):
self.root = r
self.widLst = [0 for _ in range(self.dep(self.root))]
def sortInsert(self,node,start):
# print(start)
if node.value>start.value:
if start.right==None:
start.right = node
else:
self.sortInsert(node,start.right)
else:
if start.left==None:
start.left = node
else:
self.sortInsert(node,start.left)
# print(root)
return
def dep(self,start,dept=1):
if start == None:
return dept
ld = self.dep(start.left,dept+1)
rd = self.dep(start.right,dept+1)
if ld>rd:
return ld
else:
return rd
def wid(self,start,dept=0):
# print(dept)
if(dept==0):
self.widLst = [0 for _ in range(self.dep(self.root))]
if start == None:
return
self.widLst[dept]+=1
ld = self.wid(start.left,dept+1)
rd = self.wid(start.right,dept+1)
return max(self.widLst)
def __repr__(self):
return str(root)
```
#### File: HighA/Algorithm/quicksort.py
```python
from random import randint
lis = []
for i in range(100):
lis.append(randint(0,100))
def quick_one(lis):
"""
the first item of the list is the key, all items that are less than the key will be on the left of the key,
all items that are larger will be on the right of the key, returns the list and the index of the key.
"""
nList = lis.copy()
key = lis[0]
keyI = 0
pMin = 0
pMax = len(lis)-1
while(pMin!=pMax):
while pMax>keyI:
if nList[pMax]<=key:
nList[pMax],nList[keyI] = nList[keyI],nList[pMax]
keyI = pMax
break
pMax-=1
while pMin<keyI:
if nList[pMin]>key:
nList[pMin],nList[keyI] = nList[keyI],nList[pMin]
keyI = pMin
break
pMin+=1
return nList,keyI
def quick_onePy(lis):
"""python styled quick one"""
key = lis[0]
smLis = []
bgLis = []
for n in lis[1:]:
if n<=key:
smLis.append(n)
else:
bgLis.append(n)
return smLis+[key]+bgLis,len(smLis)
def quick_sortPy(lis):
"""
quick sort with recursion, python styled
"""
nLis = lis.copy()
if len(lis) <= 1:
return lis
else:
nLis,keyI = quick_onePy(nLis)
return quick_sort(nLis[:keyI])+[nLis[keyI]]+quick_sort(nLis[keyI+1:])
def quick_sort(lis):
"""
quick sort with recursion
"""
nLis = lis.copy()
if len(lis) <= 1:
return lis
else:
nLis,keyI = quick_one(nLis)
# print(quick_sort(nLis[:keyI])+[nLis[keyI]]+quick_sort(nLis[keyI+1:]))
return quick_sort(nLis[:keyI])+[nLis[keyI]]+quick_sort(nLis[keyI+1:])
def quick_oneNR(lis,pMin,keyI,pMax):
nList = lis.copy()
key = lis[keyI]
while(pMin!=pMax):
while pMax>keyI:
if nList[pMax]<=key:
nList[pMax],nList[keyI] = nList[keyI],nList[pMax]
keyI = pMax
break
pMax-=1
while pMin<keyI:
if nList[pMin]>key:
nList[pMin],nList[keyI] = nList[keyI],nList[pMin]
keyI = pMin
break
pMin+=1
# print(nList)
return nList,keyI
def quick_sortNR(lis):
"""quick sort without recursion"""
mem = [(0,0,len(lis)-1)]
c = 1
while len(mem)!=0:
p = 0
# print(c)
c+=1
# print(mem[p])
lis,keyI = quick_oneNR(lis,mem[p][0],mem[p][0],mem[p][2])
if len(lis) == 0:
# print("break")
break
else:
if (keyI-1)-mem[p][0]>0:
mem.append((mem[p][0],mem[p][0],keyI-1))
if (mem[p][-1])-(keyI+1)>0:
mem.append((keyI+1,keyI+1,mem[p][-1]))
mem.pop(p)
# print("ed")
# print("out of while loop")
return lis
print(lis)
nlis = quick_sortNR(lis)
print("done")
print(nlis)
if nlis==sorted(lis):
print("YAY!")
```
#### File: HighA/Function/main.py
```python
import pygame,random,math
# A program that draws a function
# Some basic settings
PRECISION = 0.1
WIDTH,HEIGHT = 1024,768
SCALE = 50
BG_COLOR = (0,0,0)
START_X = 0-WIDTH//SCALE//2
AXIS_COLOR = (225,225,225)
pointColor = (92,167,86)
LINE_COLOR = (random.randint(0,225),random.randint(0,225),random.randint(0,225))
ORIG_POINT = (WIDTH//2,HEIGHT//2)
# FUNCTION: draw a point
def disText(text,scr,x,y,font,color):
print(text)
textSurface = font.render(text,False,color)
screen.blit(textSurface,(x,y))
def draw_point(screen,c,x,y):
nx = int(ORIG_POINT[0]+x*SCALE)
ny = int(ORIG_POINT[1]+y*SCALE)
global lastx,lasty
# print(nx,ny)
pygame.draw.line(screen,pointColor,(lastx,lasty),(nx,ny))
lastx,lasty = nx,ny
return
def cal(x,fn):
if fn == 0:
return 0-x
elif fn == 1:
return 0-x*0.5
elif fn == 2:
return 0-math.cos(x)
elif fn == 3:
return 0-x**2
else:
return None
# init
curX = START_X
pygame.init()
screen = pygame.display.set_mode((WIDTH,HEIGHT))
clock = pygame.time.Clock()
screen.fill(BG_COLOR)
fontScore = pygame.font.Font(None,10)
fn = 0
lastx,lasty = START_X,cal(curX,fn)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
curX = START_X
fn = (fn+1)%4
lastx,lasty = START_X,cal(curX,fn)
pointColor = (random.randint(0,255),random.randint(0,255),random.randint(0,255))
# screen.fill(BG_COLOR)
for n in range(-50,50):
nx = int(ORIG_POINT[0]+n*SCALE)
ny = int(ORIG_POINT[1]+n*SCALE)
disText(str(n),screen,nx,HEIGHT//2,fontScore,(255,255,255))
disText(str(n),screen,WIDTH//2,ny,fontScore,(255,255,255))
pygame.draw.line(screen,(255,255,255),(0,HEIGHT//2),(WIDTH,HEIGHT//2))
pygame.draw.line(screen,(255,255,255),(WIDTH//2,0),(WIDTH//2,HEIGHT))
pygame.draw.line(screen,(255,255,255),(WIDTH//2-20,30),(WIDTH//2,0))
pygame.draw.line(screen,(255,255,255),(WIDTH//2+20,30),(WIDTH//2,0))
pygame.draw.line(screen,(255,255,255),(WIDTH-20,HEIGHT//2+20),(WIDTH,HEIGHT//2))
pygame.draw.line(screen,(255,255,255),(WIDTH-20,HEIGHT//2-20),(WIDTH,HEIGHT//2))
curY = cal(curX,fn)
print(curX,curY)
draw_point(screen,pointColor,curX,curY)
curX+=PRECISION
pygame.display.update()
# clock.tick(60)
```
#### File: HighA/LodeRunner/read.py
```python
from PIL import Image
from pprint import pprint
import math
def get_image(im):
if type(im) == str:
image = Image.open(im,'r')
else:
image = im
width, height = image.size
pixel_values = list(image.getdata())
# if image.mode == 'RGB':
# channels = 3
# elif image.mode == 'L':
# channels = 1
# else:
# print("Unknown mode: %s" % image.mode)
# return None
# # pixel_values = numpy.array(pixel_values).reshape((width, height, channels))
return pixel_values
def get_size(image_path):
"""Get a numpy array of an image so that one can access values[x][y]."""
image = Image.open(image_path, 'r')
width, height = image.size
return width,height
def avgCl(lst):
"""return the average RGB of a RGB list"""
c1=c2=c3=0
n = len(lst)
for c in lst:
c1+=c[0]
c2+=c[1]
c3+=c[2]
c1,c2,c3=c1/n,c2/n,c3/n
return [c1,c2,c3]
def dist(x1,y1,z1,x2,y2,z2):
return math.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
def blockDist(block1,block2):
return dist(block1.avg[0],block1.avg[1],block1.avg[2],block2.avg[0],block2.avg[1],block2.avg[2])
def find(lst,num):
for i in range(len(lst)):
if lst[i]==num:
return i
return None
def reco(blst,target):
dislist = []
for b in blst:
dislist.append(blockDist(b,target))
return str(blst[find(dislist,min(dislist))])
class block(object):
def __init__(self,url,rstr=''):
self.url = url
self.pixel_values = get_image(url)
self.avg = avgCl(self.pixel_values)
self.rstr = rstr
def __repr__(self):
return self.url
def __str__(self):
return self.rstr
box = block('./blocks/box.bmp','B')
floor = block('./blocks/floor.bmp','#')
ground = block('./blocks/ground.bmp','=')
ladder = block('./blocks/ladder.bmp','H')
bar = block('./blocks/bar.bmp','-')
police = block('./blocks/lp02.bmp','P')
runner = block('./blocks/lr01.bmp','R')
vground = block('./blocks/vground.bmp','.')
void = block('./blocks/void.bmp',' ')
# test = block('./blocks/test.png')
# print(test)
blockList = [box,floor,ground,ladder,bar,police,runner,vground,void]
# print(reco(blockList,test))
im = Image.open('./blocks/level06.png')
sampleBlock = Image.open('./blocks/floor.bmp')
print(sampleBlock.size)
print(im.size)
nMap = [[0 for n in range(im.size[1]//sampleBlock.size[1])]for _ in range(im.size[0]//sampleBlock.size[0])]
cnt = 0
print(im.size[0]//sampleBlock.size[0])
fout = open('./maps/level06.txt','w')
for y in range(im.size[1]//sampleBlock.size[1]):
tstr = ''
for x in range(im.size[0]//sampleBlock.size[0]):
cnt+=1
area = (x*sampleBlock.size[0],y*sampleBlock.size[1],x*sampleBlock.size[0]+sampleBlock.size[0],y*sampleBlock.size[1]+sampleBlock.size[1])
nimg = im.crop(box=area)
nimg.save('./tmp/block{c}.bmp'.format(c=cnt),'BMP')
# pprint(get_image(im))
nBlock = block(nimg)
# print(cnt)
# if cnt==34:
# print(reco(blockList,nBlock),end='')
tstr+=reco(blockList,nBlock)
tstr+='\n'
fout.write(tstr)
fout.close()
```
#### File: MidC/games/app.py
```python
import pygame
import sys
from random import randint,random
SCREEN_WIDTH, SCREEN_HEIGHT = 1024, 768
# The definition of class CLS_disk
class CLS_disk(object):
group = []
def __init__(self, rect, color, speedX, speedY):
self.rect = pygame.Rect(rect)
self.color = color
self.speedX, self.speedY, self.accY = speedX, speedY, 0.02
CLS_disk.group.append(self)
def run(self):
self.speedY += self.accY
self.rect.x += self.speedX
self.rect.y += self.speedY
def draw(self, scr):
pygame.draw.ellipse(scr, self.color, self.rect, 0)
# the definition of class CLS_gun
class CLS_gun(object):
def __init__(self, x, y, r):
self.x, self.y, self.r = x, y, r
self.score = 0
self.diskNum = 20
self.bulletNum = 0
self.fireTime = 0
def update(self):
self.fireTime *= (1-(pygame.time.get_ticks() - self.fireTime > 100))
def draw(self, scr):
self.update()
x, y, r = self.x, self.y, self.r
pygame.draw.circle(scr, (225, 225, 224), (x, y), r, 1)
pygame.draw.circle(scr, (225, 225, 225), (x, y), int(r*0.4), 1)
pygame.draw.line(scr, (225, 225, 225), (x-r, y), (x+r, y), 1)
pygame.draw.line(scr, (225, 225, 225), (x, y-r), (x, y+1), 1)
if self.fireTime > 0:
pygame.draw.polygon(scr, (225, 0, 0), [
(x-int(r*0.4), y-4), (x-int(r*0.4), y+4),(x,y)], 0)
pygame.draw.polygon(scr, (225, 0, 0), [
(x+int(r*0.4), y-4), (x-int(r*0.4), y-4),(x,y)], 0)
def RT_draw(screen,data,clrList,x0,y0,w,scale):
for dy in range(len(data)):
line = data[dy]
for dx in range(w):
clr = clrList[line&1]
tx,ty = x0+(w-dx-1)*scale,y0+dy*scale
if scale > 1:
pygame.draw.rect(screen,clr,(tx,ty,scale,scale),0)
else:
screen.set_at((s,y),clr)
line = line >>1
return
# run
pygame.init()
aData = [0x04,0x0a,0x11,0x11,0x1f,0x11,0x11,0x00]
brickData = [0xff,0x04,0x04,0x04,0xff,0x80,0x80,0x80]
brickColor = [[64,64,64],[225,127,80]]
brickScale = 4
treeData =[0x02,0x15,0x07,0x19,0x2e,0x1f,0xfb,0x6e]
treeColor = [[0,50,0],[0,120,0]]
treeScale = 4
screen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
clock = pygame.time.Clock()
font = pygame.font.Font(None, 32)
pygame.mouse.set_visible(False)
gun = CLS_gun(SCREEN_WIDTH//2,SCREEN_HEIGHT//2,30)
t0 = pygame.time.get_ticks()
t1 = randint(0,3000)+3000
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN and gun.bulletNum>0:
gun.bulletNum-=1
i=0
gun.fireTime = pygame.time.get_ticks()
while i<len(CLS_disk.group):
d = CLS_disk.group[i]
if d.rect.collidepoint(gun.x,gun.y):
CLS_disk.group.pop(i)
gun.score+=1
gun.diskNum+=2
i+=1
if event.type == pygame.MOUSEMOTION:
gun.x,gun.y= event.pos
if event.type == pygame.QUIT:
pygame.quit()
if pygame.time.get_ticks()-t0 > t1 and gun.diskNum>0:
gun.diskNum -= 1
gun.bulletNum = 2
w = randint(40,80)
h = w//2
disk = CLS_disk((0,SCREEN_HEIGHT,w,h),(0,225,0),random()+1.5,random()-4.5)
t0 = pygame.time.get_ticks()
t1 = randint(0,3000)+3000
if random()<0.3:
disk = CLS_disk((SCREEN_WIDTH,SCREEN_HEIGHT,w,h),(255,0,0),random()-2.5,random()-4.5)
screen.fill((0,0,225))
for x in range(0,SCREEN_WIDTH,32):
for y in range((SCREEN_HEIGHT//4)*3,SCREEN_HEIGHT,32):
RT_draw(screen,brickData,brickColor,x,y,8,4)
for x in range(0,SCREEN_WIDTH,32):
for y in range(SCREEN_HEIGHT//2,SCREEN_HEIGHT//4*3,32):
RT_draw(screen,treeData,treeColor,x,y,8,4)
for disk in CLS_disk.group:
disk.run()
disk.draw(screen)
gun.draw(screen)
img = font.render('score:'+str(gun.score)+' disks:'+str(gun.diskNum),True,(240,0,140))
screen.blit(img,(0,0))
pygame.display.update()
clock.tick(300)
```
#### File: MidC/games/editor.py
```python
import pygame,sys
SCREEN_WIDTH,SCREEN_HEIGHT = 1000,700
def RT_draw(screen,data,clrList,x0,y0,w,scale):
for dy in range(len(data)):
line = data[dy]
for dx in range(w):
clr = clrList[line&1]
tx,ty = x0+(w-dx-1)*scale,y0+dy*scale
if scale > 1:
pygame.draw.rect(screen,clr,(tx,ty,scale,scale),0)
else:
screen.set_at((x,y),clr)
line = line >>1
return
def RT_block_read(fn):
with open (fn,'r') as f:
txtLine = f.readlines()[0]
dataList = txtLine.split()
block = [int(dataList[p],16) for p in range(len(dataList))]
return block
def RT_block_init(fn,cList,dm,scale):
pic = pygame.Surface.Surface(dm*scale, dm*scale)
blockList = RT_block_read(fn)
if blockList == None:
return None
RT_draw(pic,blockList,cList,0,0,dm,scale)
return pic
class CLS_step(object):
def __init__(self,dx,dy):
self.dx, self.dy = dx,dy
return
class CLS_stack(object):
def __init__(self):
self.nList = []
return
def PUSH(self,step):
self.nList.append(step)
return
def POP(self):
if len(self.nList) == 0:
return None
step= self.nList[-1]
self.nList.pop()
return step
class CLS_grid(object):
def __init__(self,x0,y0,n,scale,d,cList=[(0,0,0),(255,255,255)]):
self.x0,self.y0 = x0,y0
self.n, self.scale = n,scale
self.data = [[0 for x in range(n)] for y in range(n)]
self.block=['0']*n
self.cList = cList
self.d = d
return
def draw(self,scr):
lineC=(255,255,255)
x0,y0,scl = self.x0,self.y0,self.scale
d=self.n*scl + 1
pygame.draw.line(scr, lineC, (x0,y0+d), (x0+d,y0+d), 1)
pygame.draw.line(scr, lineC, (x0+d,y0), (x0+d,y0+d), 1)
for y in range(self.n):
n=0
for x in range(self.n):
n+=2**(self.n-x-1)*self.data[y][x]
gx,gy = x*self.scale, y*self.scale
pygame.draw.line(scr, lineC, (x0+gx,y0+gy),\
(x0+gx,y0+gy+scl), 1)
pygame.draw.line(scr, lineC, (x0+gx, y0+gy),\
(x0+gx+scl,y0+gy), 1)
pygame.draw.rect(scr, self.cList[self.data[y][x]], (x0+gx+1,y0+gy+1,scl-1,scl-1), 0)
screen.set_at((x+100,y+40),cList[grid.data[y][x]])
self.block[y]=hex(n)
img = font.render(self.block[y],True,lineC)
bx,by = 500,y*self.scale+150
screen.blit(img,(bx,by))
return
def mousedown(self,mx,my):
x,y = (mx-15)//self.scale,(my-15)//self.scale-self.d
if 0<=x<self.n and 0<=y<self.n:
self.data[y][x] = 1-self.data[y][x]
def save(self,fname):
# save
# generate bytestr
byteStr = ' '.join(self.block)
with open(fname,'w') as f:
f.write(byteStr)
return
def clear(self):
self.data = [[0 for x in range(self.n)] for y in range(self.n)]
return
# init
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))
font = pygame.font.Font(None, 32)
cList = [(0,0,0),(0,255,255)]
grid = CLS_grid(20,120,16,20,5,cList)
grid.draw(screen)
stack = CLS_stack()
clock = pygame.time.Clock()
# main program
while True:
screen.fill((0,0,0))
grid.draw(screen)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
mx,my = event.pos[0],event.pos[1]
grid.mousedown(mx,my)
step = CLS_step(mx,my)
stack.PUSH(step)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
print('saved😊😊')
grid.save('demo.txt')
elif event.key == pygame.K_c:
print("cleared")
grid.clear()
elif event.key == pygame.K_z:
last_step = stack.POP()
if last_step != None:
grid.mousedown(last_step.dx,last_step.dy)
elif event.key == pygame.K_p:
grid.clear()
for step in stack.nList:
grid.mousedown(step.dx,step.dy)
clock.tick(10)
pygame.display.update()
elif event.type == pygame.QUIT:
pygame.quit()
```
|
{
"source": "jett/couchbase-lite-C",
"score": 2
}
|
#### File: python/CouchbaseLite/Blob.py
```python
from ._PyCBL import ffi, lib
from .Collections import *
from .common import *
class Blob (object):
def __init__(self, data, *, contentType =None, fdict =None):
if fdict != None:
super.__init__(lib.CBLBlob_Get(fdict), "Dict is not a Blob")
else:
super.__init__(lib.CBLBlob_CreateWithData(contentType, asSlice(data)),
"Failed to create Blob")
@property
def digest(self):
return pystr(lib.CBLBlob_Digest(self._ref))
@property
def length(self):
return lib.CBLBlob_Length(self._ref)
@property
def contentType(self):
return pystr(lib.CBLBlob_ContentType(self._ref))
@property
def data(self):
if "_data" in self.__dict__:
return self._data
elif self.digest != None:
sliceResult = lib.CBLBlob_LoadContent(self._ref, gError)
# OPT: This copies the bytes
result = sliceResultToBytes(sliceResult)
lib.FLSliceResult_Release(sliceResult)
return result
else:
return None
def __repr__(self):
r = "Blob["
if self.contentType != None:
r += self.contentType
if self.length != None:
if self.contentType != None:
r += ", "
r += self.length + " bytes"
return r + "]"
def _jsonEncodable(self):
return decodeFleeceDict( lib.CBLBlob_Properties(self._ref), depth=99 )
```
|
{
"source": "jett-crowdis/deTiN",
"score": 2
}
|
#### File: deTiN/deTiN/deTiN.py
```python
import argparse
import os
import sys
import numpy as np
import pandas as pd
from itertools import compress
import copy
import deTiN_utilities as du
import deTiN_SSNV_based_estimate as dssnv
import deTiN_aSCNA_based_estimate as dascna
import numpy.ma as ma
class input:
"""class which holds the required detin somatic data prior to model"""
def __init__(self, args, ascna_probe_number_filter=200, ascna_SNP_number_filter=20, coverage_threshold=15,
SSNV_af_threshold=0.15, aSCNA_variance_threshold=0.025):
# related to inputs from command line
self.call_stats_file = args.mutation_data_path
self.seg_file = args.cn_data_path
self.tumor_het_file = args.tumor_het_data_path
self.normal_het_file = args.normal_het_data_path
self.exac_db_file = args.exac_data_path
self.indel_file = args.indel_data_path
self.indel_type = args.indel_data_type
self.only_ascnas = args.only_ascnas
if type(args.weighted_classification):
self.weighted_classification = bool(args.weighted_classification)
else:
self.weighted_classification = args.weighted_classification
if type(args.mutation_prior) == str:
self.mutation_prior = float(args.mutation_prior)
else:
self.mutation_prior = args.mutation_prior
if type(args.TiN_prior) == str:
self.TiN_prior = float(args.TiN_prior)
else:
self.TiN_prior = args.TiN_prior
if type(args.resolution) == str:
self.resolution = int(args.resolution)
else:
self.resolution = args.resolution
self.output_path = args.output_dir
self.output_name = args.output_name
if type(args.use_outlier_removal) == str:
if args.use_outlier_removal.lower() == 'false':
self.use_outlier_removal = False
else:
self.use_outlier_removal = True
else:
self.use_outlier_removal = args.use_outlier_removal
if type(args.aSCNA_threshold) == str:
self.aSCNA_thresh = float(args.aSCNA_threshold)
else:
self.aSCNA_thresh = args.aSCNA_threshold
try:
self.ascna_probe_number_filter = float(args.ascna_probe_number_filter)
except AttributeError:
self.ascna_probe_number_filter = ascna_probe_number_filter
try:
self.ascna_SNP_number_filter = float(args.ascna_SNP_number_filter)
except AttributeError:
self.ascna_SNP_number_filter = ascna_SNP_number_filter
try:
self.coverage_threshold = float(args.coverage_threshold)
except AttributeError:
self.coverage_threshold = coverage_threshold
try:
self.SSNV_af_threshold = float(args.SSNV_af_threshold)
except AttributeError:
self.SSNV_af_threshold = SSNV_af_threshold
try:
self.aSCNA_variance_threshold = float(args.aSCNA_variance_threshold)
except AttributeError:
self.aSCNA_variance_threshold = aSCNA_variance_threshold
try:
self.CancerHotSpotsBED = args.cancer_hot_spots
except AttributeError:
self.aSCNA_variance_threshold = 'NA'
# related to inputs from class functions
self.call_stats_table = []
self.seg_table = []
self.het_table = []
self.candidates = []
self.indel_table = []
self.skew = 0.5
def read_call_stats_file(self):
fields = ['contig', 'position', 'ref_allele', 'alt_allele', 'tumor_name', 'normal_name', 't_alt_count',
't_ref_count'
, 'n_alt_count', 'n_ref_count', 'failure_reasons', 'judgement','t_lod_fstar']
fields_type = {'contig': str, 'position': np.int, 'ref_allele': str, 'alt_allele': str, 'tumor_name': str,
'normal_name': str,
't_alt_count': np.int, 't_ref_count': np.int, 'n_alt_count': np.int, 'n_ref_count': np.int,
'failure_reasons': str, 'judgement': str}
try:
self.call_stats_table = pd.read_csv(self.call_stats_file, '\t', index_col=False,
comment='#', usecols=fields, dtype=fields_type)
except (ValueError, LookupError):
try:
fields = ['contig', 'position', 'ref_allele', 'alt_allele', 'tumor_name', 'normal_name', 't_alt_count',
't_ref_count'
, 'n_alt_count', 'n_ref_count', 'failure_reasons', 'judgement']
self.call_stats_table = pd.read_csv(self.call_stats_file, '\t', index_col=False,
comment='#', usecols=fields, dtype=fields_type)
except (ValueError, LookupError):
print('Error reading call stats skipping first two rows and trying again')
self.call_stats_table = pd.read_csv(self.call_stats_file, '\t', index_col=False,
comment='#', skiprows=2, usecols=fields, dtype=fields_type)
if type(self.call_stats_table['contig'][0]) == str:
self.call_stats_table['Chromosome'] = du.chr2num(np.array(self.call_stats_table['contig']))
else:
self.call_stats_table['Chromosome'] = np.array(self.call_stats_table['contig']) - 1
self.call_stats_table = self.call_stats_table[np.isfinite(self.call_stats_table['Chromosome'])]
self.call_stats_table['genomic_coord_x'] = du.hg19_to_linear_positions(
np.array(self.call_stats_table['Chromosome']), np.array(self.call_stats_table['position']))
self.n_calls_in = len(self.call_stats_table)
self.call_stats_table.reset_index(inplace=True, drop=True)
def read_het_file(self):
t_het_header = du.read_file_header(self.tumor_het_file)
n_het_header = du.read_file_header(self.normal_het_file)
cols_t_type = {t_het_header[0]: str}
cols_n_type = {n_het_header[0]: str}
tumor_het_table = pd.read_csv(self.tumor_het_file, '\t', index_col=False, low_memory=False, comment='#',
dtype=cols_t_type)
normal_het_table = pd.read_csv(self.normal_het_file, '\t', index_col=False, low_memory=False, comment='#',
dtype=cols_n_type)
tumor_het_table = du.fix_het_file_header(tumor_het_table)
normal_het_table = du.fix_het_file_header(normal_het_table)
tumor_het_table['Chromosome'] = du.chr2num(np.array(tumor_het_table['CONTIG']))
normal_het_table['Chromosome'] = du.chr2num(np.array(normal_het_table['CONTIG']))
tumor_het_table = tumor_het_table[np.isfinite(tumor_het_table['Chromosome'])]
tumor_het_table['genomic_coord_x'] = du.hg19_to_linear_positions(np.array(tumor_het_table['Chromosome']),
np.array(tumor_het_table['POSITION']))
normal_het_table = normal_het_table[np.isfinite(normal_het_table['Chromosome'])]
normal_het_table['genomic_coord_x'] = du.hg19_to_linear_positions(np.array(normal_het_table['Chromosome']),
np.array(normal_het_table['POSITION']))
tumor_het_table['AF'] = np.true_divide(tumor_het_table['ALT_COUNT'],
tumor_het_table['ALT_COUNT'] + tumor_het_table['REF_COUNT'])
normal_het_table['AF'] = np.true_divide(normal_het_table['ALT_COUNT'],
normal_het_table['ALT_COUNT'] + normal_het_table['REF_COUNT'])
self.het_table = pd.merge(normal_het_table, tumor_het_table, on='genomic_coord_x', suffixes=('_N', '_T'))
def read_seg_file(self):
if self.seg_file == 'NULL':
self.seg_table = pd.DataFrame(index=[0],columns=['Chromosome','Start.bp','End.bp','n_probes','length','f','tau','genomic_coord_start','genomic_coord_end'])
self.het_table = pd.DataFrame(index=[0],columns=['seg_id','tau','f','d','AF_T','AF_N','Chromosome','genomic_coord_x','ALT_COUNT_N'
'ALT_COUNT_T','REF_COUNT_N','REF_COUNT_T'])
else:
seg_header = du.read_file_header(self.seg_file)
cols_seg_type = {seg_header[0]: str}
self.seg_table = pd.read_csv(self.seg_file, '\t', index_col=False, low_memory=False, comment='#',
dtype=cols_seg_type)
self.seg_table = du.fix_seg_file_header(self.seg_table)
self.seg_table['Chromosome'] = du.chr2num(np.array(self.seg_table['Chromosome']))
self.seg_table['genomic_coord_start'] = du.hg19_to_linear_positions(np.array(self.seg_table['Chromosome']),
np.array(self.seg_table['Start.bp']))
self.seg_table['genomic_coord_end'] = du.hg19_to_linear_positions(np.array(self.seg_table['Chromosome']),
np.array(self.seg_table['End.bp']))
def annotate_call_stats_with_allelic_cn_data(self):
f_acs = np.zeros([self.n_calls_in, 1]) + 0.5
tau = np.zeros([self.n_calls_in, 1]) + 2
for i, r in self.seg_table.iterrows():
f_acs[np.logical_and(np.array(self.call_stats_table['genomic_coord_x']) >= r['genomic_coord_start'],
np.array(self.call_stats_table['genomic_coord_x']) <= r['genomic_coord_end'])] = r.f
tau[np.logical_and(np.array(self.call_stats_table['genomic_coord_x']) >= r['genomic_coord_start'],
np.array(self.call_stats_table['genomic_coord_x']) <= r[
'genomic_coord_end'])] = r.tau + 0.001
self.call_stats_table['tau'] = tau
self.call_stats_table['f_acs'] = f_acs
def annotate_het_table(self):
seg_id = np.zeros([len(self.het_table), 1]) - 1
tau = np.zeros([len(self.het_table), 1]) + 2
f = np.zeros([len(self.het_table), 1]) + 0.5
for seg_index, seg in self.seg_table.iterrows():
het_index = np.logical_and(self.het_table['genomic_coord_x'] >= seg['genomic_coord_start'],
self.het_table['genomic_coord_x'] <= seg['genomic_coord_end'])
ix = list(compress(range(len(het_index)), het_index))
seg_id[ix] = seg_index
tau[ix] = seg['tau']
f[ix] = seg['f']
self.het_table['seg_id'] = seg_id
self.het_table['tau'] = tau
self.het_table['f'] = f
d = np.ones([len(self.het_table), 1])
d[np.array(self.het_table['AF_T'] <= 0.5, dtype=bool)] = -1
self.skew = 0.5
self.het_table['d'] = d
def read_and_preprocess_SSNVs(self):
self.read_call_stats_file()
self.read_seg_file()
self.annotate_call_stats_with_allelic_cn_data()
if not self.indel_file == 'None':
if not self.indel_type == 'None':
self.indel_table = du.read_indel_vcf(self.indel_file, self.seg_table, self.indel_type)
else:
print('Warning: if indels are provided you must also specify indel data source using --indel_data_type')
print('no indels will be returned')
self.indel_file = 'None'
self.indel_type = 'None'
def read_and_preprocess_aSCNAs(self):
self.read_seg_file()
self.read_het_file()
self.seg_table = du.filter_segments_based_on_size_f_and_tau(self.seg_table, self.aSCNA_thresh,
self.ascna_probe_number_filter)
self.annotate_het_table()
self.het_table = du.remove_sites_near_centromere_and_telomeres(self.het_table)
def read_and_preprocess_data(self):
self.read_and_preprocess_SSNVs()
self.read_and_preprocess_aSCNAs()
class output:
""" combined from deTiN's models
reclassified SSNVs based on TiN estimate are labeled KEEP in judgement column
self.SSNVs['judgement'] == KEEP
confidence intervals (CI_tin_high/low) represent 95% interval
"""
def __init__(self, input, ssnv_based_model, ascna_based_model):
# previous results
self.input = input
self.ssnv_based_model = ssnv_based_model
self.ascna_based_model = ascna_based_model
# useful outputs
self.SSNVs = input.candidates
self.joint_log_likelihood = np.zeros([self.input.resolution, 1])
self.joint_posterior = np.zeros([self.input.resolution, 1])
self.CI_tin_high = []
self.CI_tin_low = []
self.TiN = []
self.p_null = 1
# variables
self.TiN_range = np.linspace(0, 1, num=self.input.resolution)
self.TiN_int = 0
# threshold for accepting variants based on the predicted somatic assignment
# if p(S|TiN) exceeds threshold we keep the variant.
self.threshold = 0.5
# defines whether to remove events based on predicted exceeding predicted allele fractions
# if Beta_cdf(predicted_normal_af;n_alt_count+1,n_ref_count+1) <= 0.01 we remove the variant
self.use_outlier_threshold = input.use_outlier_removal
if self.input.indel_file != 'None':
if self.input.indel_table.isnull().values.sum() == 0:
self.indels = self.input.indel_table
def calculate_joint_estimate(self):
# do not use SSNV based estimate if it exceeds 0.3 (this estimate can be unreliable at high TiNs due to
# germline events)
if self.ssnv_based_model.TiN <= 0.3 and ~np.isnan(self.ascna_based_model.TiN):
if len(self.ascna_based_model.centroids) > 1:
reselect_cluster = np.argmin(np.abs(self.ascna_based_model.centroids / 100 - self.ssnv_based_model.TiN))
self.ascna_based_model.TiN_likelihood = self.ascna_based_model.cluster_TiN_likelihoods[reselect_cluster]
print('reselected cluster based on SSNVs')
# combine independent likelihoods
self.joint_log_likelihood = self.ascna_based_model.TiN_likelihood + self.ssnv_based_model.TiN_likelihood
# normalize likelihood to calculate posterior
self.joint_posterior = np.exp(self.ascna_based_model.TiN_likelihood + self.ssnv_based_model.TiN_likelihood
- np.nanmax(
self.ascna_based_model.TiN_likelihood + self.ssnv_based_model.TiN_likelihood))
self.joint_posterior = np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))
self.CI_tin_low = self.TiN_range[next(x[0] for x in enumerate(
np.cumsum(np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.025)]
self.CI_tin_high = self.TiN_range[
next(x[0] for x in enumerate(np.cumsum(
np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.975)]
self.TiN_int = np.nanargmax(self.joint_posterior)
self.TiN = self.TiN_range[self.TiN_int]
zero_tin_ssnv_model = copy.deepcopy(self.ssnv_based_model)
zero_tin_ssnv_model.TiN = 0
zero_tin_ssnv_model.expectation_of_z_given_TiN()
zero_tin_ssnv_model.maximize_TiN_likelihood()
zero_total_l = zero_tin_ssnv_model.TiN_likelihood + self.ascna_based_model.TiN_likelihood
zero_total_l = np.exp(zero_total_l - np.nanmax(zero_total_l))
self.p_null = np.true_divide(zero_total_l,np.nansum(zero_total_l))[0]
print('joint TiN estimate = ' + str(self.TiN))
# use only ssnv based model
elif ~np.isnan(self.ascna_based_model.TiN):
# otherwise TiN estimate is = to aSCNA estimate
print('SSNV based TiN estimate exceed 0.3 using only aSCNA based estimate')
self.joint_log_likelihood = self.ascna_based_model.TiN_likelihood
self.joint_posterior = np.exp(
self.ascna_based_model.TiN_likelihood - np.nanmax(self.ascna_based_model.TiN_likelihood))
self.joint_posterior = np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))
self.CI_tin_low = self.TiN_range[next(x[0] for x in enumerate(
np.cumsum(np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.025)]
self.CI_tin_high = self.TiN_range[
next(x[0] for x in enumerate(np.cumsum(
np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.975)]
self.TiN_int = np.nanargmax(self.joint_posterior)
self.TiN = self.TiN_range[self.TiN_int]
self.p_null = self.joint_posterior[0]
# use only aSCNA based estimate
elif ~np.isnan(self.ssnv_based_model.TiN) and self.ssnv_based_model.TiN <= 0.3:
print('No aSCNAs only using SSNV based model')
self.joint_log_likelihood = self.ssnv_based_model.TiN_likelihood
self.joint_posterior = np.exp(
self.ssnv_based_model.TiN_likelihood - np.nanmax(self.ssnv_based_model.TiN_likelihood))
self.joint_posterior = np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))
self.CI_tin_low = self.TiN_range[next(x[0] for x in enumerate(
np.cumsum(np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.025)]
self.CI_tin_high = self.TiN_range[
next(x[0] for x in enumerate(np.cumsum(
np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.975)]
self.TiN_int = np.nanargmax(self.joint_posterior)
self.TiN = self.TiN_range[self.TiN_int]
zero_tin_ssnv_model = copy.deepcopy(self.ssnv_based_model)
zero_tin_ssnv_model.TiN = 0
zero_tin_ssnv_model.expectation_of_z_given_TiN()
zero_tin_ssnv_model.maximize_TiN_likelihood()
zero_total_l = zero_tin_ssnv_model.TiN_likelihood
zero_total_l = np.exp(zero_total_l - np.nanmax(zero_total_l))
self.p_null = np.true_divide(zero_total_l, np.nansum(zero_total_l))[0]
else:
print('insuffcient data to generate TiN estimate.')
self.CI_tin_high = 0
self.CI_tin_low = 0
self.joint_posterior = np.zeros([self.input.resolution, 1])
self.joint_posterior[0] = 1
self.TiN_int = 0
self.TiN = 0
self.p_null = 1
pH1 = self.joint_posterior[self.TiN_int]
#print(self.joint_posterior)
#print(self.p_null)
# code to deal with underflows
if ma.is_masked(self.p_null):
self.p_null = 0
pH0 = self.p_null
p_model = np.true_divide(self.input.TiN_prior * pH1,
(self.input.TiN_prior * pH1) + ((1 - self.input.TiN_prior) * pH0))
if p_model < 0.5 or ~np.isfinite(p_model):
print('insufficient evidence to justify TiN > 0')
self.joint_posterior = np.zeros([self.input.resolution, 1])
self.joint_posterior[0] = 1
self.TiN_int = 0
self.TiN = 0
self.CI_tin_high = 0
self.CI_tin_low = 0
def reclassify_mutations(self):
# calculate p(Somatic | given joint TiN estimate)
if self.input.weighted_classification == True:
numerator = np.zeros(len(self.ssnv_based_model.p_TiN_given_S))
denominator = np.zeros(len(self.ssnv_based_model.p_TiN_given_S))
for idx, p in enumerate(self.joint_posterior):
if p > 0.001:
num_iter = (p * self.ssnv_based_model.p_somatic * self.ssnv_based_model.p_TiN_given_S[:, idx])
numerator = numerator + num_iter
denom_iter = num_iter + p * (np.array(
[1 - self.ssnv_based_model.p_somatic] * np.nan_to_num(
self.ssnv_based_model.p_TiN_given_G[:, idx])))
denominator = denominator + denom_iter
else:
numerator = self.ssnv_based_model.p_somatic * np.expand_dims(
self.ssnv_based_model.p_TiN_given_S[:, self.TiN_int], 1)
denominator = numerator + np.array(
[1 - self.ssnv_based_model.p_somatic] * np.expand_dims(
np.nan_to_num(self.ssnv_based_model.p_TiN_given_G[:, self.TiN_int]), 1))
self.SSNVs.loc[:, ('p_somatic_given_TiN')] = np.nan_to_num(np.true_divide(numerator, denominator))
# expected normal allele fraction given TiN and tau
af_n_given_TiN = np.multiply(self.ssnv_based_model.tumor_f, self.ssnv_based_model.CN_ratio[:, self.TiN_int])
# probability of normal allele fraction less than or equal to predicted fraction
self.SSNVs.loc[:, 'p_outlier'] = self.ssnv_based_model.rv_normal_af.cdf(af_n_given_TiN + 0.01)
if self.TiN_int == 0:
print('Estimated 0 TiN no SSNVs will be recovered outputing deTiN statistics for each site')
elif self.use_outlier_threshold:
# remove outliers mutations p(af_n >= E[af_n|TiN]) < 0.05
self.SSNVs['judgement'][np.logical_and(self.SSNVs['p_somatic_given_TiN'] > self.threshold,
self.SSNVs['p_outlier'] >= 0.01)] = 'KEEP'
else:
self.SSNVs['judgement'][self.SSNVs['p_somatic_given_TiN'] > self.threshold] = 'KEEP'
if self.input.indel_file != 'None':
if self.input.indel_table.isnull().values.sum() == 0:
indel_model = dssnv.model(self.input.indel_table, self.input.mutation_prior, self.input.resolution)
indel_model.generate_conditional_ps()
self.indels = self.input.indel_table
numerator = indel_model.p_somatic * np.expand_dims(indel_model.p_TiN_given_S[:, self.TiN_int], 1)
denominator = numerator + np.array(
[1 - indel_model.p_somatic] * np.expand_dims(np.nan_to_num(
indel_model.p_TiN_given_G[:, self.TiN_int]), 1))
af_n_given_TiN = np.multiply(indel_model.tumor_f, indel_model.CN_ratio[:, self.TiN_int])
self.indels.loc[:, ('p_somatic_given_TiN')] = np.nan_to_num(np.true_divide(numerator, denominator))
self.indels.loc[:, 'p_outlier'] = indel_model.rv_normal_af.cdf(af_n_given_TiN)
if self.TiN_int == 0:
print('Estimated 0 TiN no indels will be recovered outputing deTiN statistics for each site')
elif self.use_outlier_threshold:
# remove outliers mutations p(af_n >= E[af_n|TiN]) < 0.05
self.indels['filter'][np.logical_and(self.indels['p_somatic_given_TiN'] > self.threshold,
self.indels['p_outlier'] >= 0.01)] = 'PASS'
else:
self.indels['filter'][self.indels['p_somatic_given_TiN'] > self.threshold] = 'PASS'
elif self.input.indel_table.isnull().values.sum() > 0:
self.indels = self.input.indel_table
__version__ = '1.0'
def main():
""" deTiN pipeline. Method operates in two stages (1) estimating tumor in normal via candidate SSNVs and SCNAS.
(2) Performing variant re-classification using bayes rule.
"""
parser = argparse.ArgumentParser(description='Estimate tumor in normal (TiN) using putative somatic'
' events see Taylor-Weiner & Stewart et al. 2017')
# input files
parser.add_argument('--mutation_data_path',
help='Path to mutation candidate SSNV data.'
'Supported formats: MuTect call-stats', required=False, default = 'NULL')
parser.add_argument('--cn_data_path',
help='Path to copy number data.'
'Supported format: AllelicCapseg .seg file. Generated by GATK4 AllelicCNV.',
required=False, default='NULL')
parser.add_argument('--tumor_het_data_path',
help='Path to heterozygous site allele count data in tumor. Generated by GATK4 GetBayesianHetCoverage.'
'Required columns: CONTIG,POS,REF_COUNT and ALT_COUNT', required=False,
default='NULL')
parser.add_argument('--normal_het_data_path',
help='Path to heterozygous site allele count data in normal. Generated by GATK4 GetBayesianHetCoverage'
'Required columns: CONTIG,POS,REF_COUNT and ALT_COUNT', required=False,
default = 'NULL')
parser.add_argument('--exac_data_path',
help='Path to exac af > 0.01 pickle. Can be generated by downloading ExAC VCF and running build_exac_pickle',
required=False)
parser.add_argument('--indel_data_path',
help='Path to candidate indels data.'
'Supported formats: Strelka / MuTect2 VCFs', required=False, default='None')
parser.add_argument('--indel_data_type',
help='MuTect2 or Strelka'
'Caller used to generate indels', required=False, default='None')
# output related arguments
parser.add_argument('--output_name', required=True,
help='sample name')
parser.add_argument('--output_dir', help='directory to put plots and TiN solution', required=False, default='.')
# model related parameters
parser.add_argument('--mutation_prior', help='prior expected ratio of somatic mutations to rare germline events'
, required=False, default=0.05)
parser.add_argument('--aSCNA_threshold', help='minor allele fraction threshold for calling aSCNAs.'
, required=False, default=0.1)
parser.add_argument('--TiN_prior',
help='expected frequency of TiN contamination in sequencing setting used for model selection',
required=False, default=0.5)
parser.add_argument('--use_outlier_removal',
help='remove sites from recovered SSNVs where allele fractions significantly exceed predicted fraction',
required=False, default=True)
parser.add_argument('--resolution',
help='number of TiN bins to consider default = 101 corresponds to 0.01 TiN levels'
, required=False, default=101)
parser.add_argument('--weighted_classification',
help='integrate variant classification over all values of TiN'
, required=False, default=False)
parser.add_argument('--ascna_probe_number_filter', help='number of probes to require for an aSCNA to be considered'
, required=False, default=200)
parser.add_argument('--ascna_SNP_number_filter', help='number of probes to require for an aSCNA to be considered'
, required=False, default=20)
parser.add_argument('--coverage_threshold', help='number of reads required to use a site for TiN estimation',
required=False,
default=15)
parser.add_argument('--SSNV_af_threshold', help='fraction of alternate alleles required for site to be used '
'for SSNV TiN estimation',
required=False,
default=.2)
parser.add_argument('--aSCNA_variance_threshold',
help='variance of segment allele shift tolerated before removing segment '
'as artifact', required=False, default=0.025)
parser.add_argument('--cancer_hot_spots',
help='Optional BED file of cancer hot spot mutations which the user has a stronger prior on being somatic e.g. BRAF v600E mutations.'
'The format of this file is Chromosome\tPosition\tProbability. Note this will override the mutation prior at these locations'
, required=False, default='NA')
parser.add_argument('--only_ascnas',
help='only use ascna data for TiN estimation',required=False, action='store_true')
args = parser.parse_args()
if args.cn_data_path == 'NULL' and args.mutation_data_path == 'NULL':
print('One of CN data or SSNV data are required.')
sys.exit()
elif args.cn_data_path =='NULL':
di = input(args)
di.read_and_preprocess_SSNVs()
di.candidates = du.select_candidate_mutations(di.call_stats_table, di.exac_db_file)
n_calls_pre = np.sum(di.candidates['judgement'] == "KEEP")
ssnv_based_model = dssnv.model(di.candidates, di.mutation_prior, di.resolution, di.SSNV_af_threshold,
di.coverage_threshold, di.CancerHotSpotsBED, skew = di.skew)
ssnv_based_model.perform_inference()
ascna_based_model = dascna.model(di.seg_table, di.het_table, di.resolution)
ascna_based_model.TiN = np.nan
elif args.mutation_data_path=='NULL':
di = input(args)
di.read_and_preprocess_aSCNAs()
di.candidates = pd.DataFrame(index=[0],columns=['contig', 'position', 'ref_allele', 'alt_allele', 'tumor_name', 'normal_name',
't_alt_count','t_ref_count', 'n_alt_count', 'n_ref_count', 'failure_reasons', 'judgement','genomic_coord_x','f_acs','tau'])
ssnv_based_model = dssnv.model(di.candidates, di.mutation_prior, di.resolution, di.SSNV_af_threshold,
di.coverage_threshold, di.CancerHotSpotsBED)
ssnv_based_model.TiN = np.nan
ascna = False
# identify aSCNAs and filter hets
if len(di.seg_table) > 0:
di.aSCNA_hets = du.ensure_balanced_hets(di.seg_table, di.het_table)
if len(di.aSCNA_hets) > 0:
di.aSCNA_segs, di.convergent_segs = du.identify_aSCNAs(di.seg_table, di.aSCNA_hets, di.aSCNA_thresh,
di.ascna_SNP_number_filter,
di.aSCNA_variance_threshold)
if len(di.aSCNA_segs) > 0:
ascna_based_model = dascna.model(di.aSCNA_segs, di.aSCNA_hets, di.resolution)
ascna_based_model.perform_inference()
ascna = True
if not ascna:
ascna_based_model = dascna.model(di.seg_table, di.het_table, di.resolution)
ascna_based_model.TiN = np.nan
else:
di = input(args)
di.read_and_preprocess_data()
# identify candidate mutations based on MuTect flags.
# kept sites are flagged as KEEP or rejected for normal lod and/or alt_allele_in_normal
di.candidates = du.select_candidate_mutations(di.call_stats_table, di.exac_db_file)
n_calls_pre = np.sum(di.candidates['judgement'] == "KEEP")
# generate SSNV based model using candidate sites
ssnv_based_model = dssnv.model(di.candidates, di.mutation_prior, di.resolution, di.SSNV_af_threshold,
di.coverage_threshold, di.CancerHotSpotsBED)
ssnv_based_model.perform_inference()
if di.only_ascnas == True:
ssnv_based_model.TiN = np.nan
print('Only using aSCNA data')
ascna = False
# identify aSCNAs and filter hets
if len(di.seg_table) > 0:
di.aSCNA_hets = du.ensure_balanced_hets(di.seg_table, di.het_table)
if len(di.aSCNA_hets) > 0:
di.aSCNA_segs,di.convergent_segs = du.identify_aSCNAs(di.seg_table, di.aSCNA_hets, di.aSCNA_thresh, di.ascna_SNP_number_filter,
di.aSCNA_variance_threshold)
if len(di.aSCNA_segs) > 0:
ascna_based_model = dascna.model(di.aSCNA_segs, di.aSCNA_hets, di.resolution)
ascna_based_model.perform_inference()
ascna = True
if not ascna:
ascna_based_model = dascna.model(di.seg_table, di.het_table, di.resolution)
ascna_based_model.TiN = np.nan
# combine models and reclassify mutations
do = output(di, ssnv_based_model, ascna_based_model)
do.calculate_joint_estimate()
if len(do.SSNVs)>1:
do.reclassify_mutations()
do.SSNVs.drop('Chromosome', axis=1, inplace=True)
n_calls_post = np.sum(do.SSNVs['judgement']=="KEEP")
n_calls_added = n_calls_post - n_calls_pre
# make output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# write deTiN reclassified SSNVs
do.SSNVs.to_csv(path_or_buf=do.input.output_path + '/' + do.input.output_name + '.deTiN_SSNVs.txt', sep='\t',
index=None)
if not di.indel_file == 'None':
#if 'Chromosome' in do.indels.columns:
do.indels.drop('Chromosome', axis=1, inplace=True)
do.indels.to_csv(path_or_buf=do.input.output_path + '/' + do.input.output_name + '.deTiN_indels.txt', sep='\t',
index=None)
# write plots
if not np.isnan(ascna_based_model.TiN):
do.ascna_based_model.segs['Chromosome'] = do.ascna_based_model.segs['Chromosome'] + 1
do.ascna_based_model.segs.to_csv(path_or_buf=do.input.output_path + '/' + do.input.output_name + '.deTiN_aSCNAs.txt', sep='\t',
index=None)
du.plot_kmeans_info(ascna_based_model, do.input.output_path, do.input.output_name)
du.plot_TiN_models(do)
du.plot_aSCNA_het_data(do)
if not np.isnan(ssnv_based_model.TiN):
du.plot_SSNVs(do)
# write TiN and CIs
file = open(do.input.output_path + '/' + do.input.output_name + '.TiN_estimate.txt', 'w')
file.write('%s' % (do.TiN))
file.close()
file = open(do.input.output_path + '/' + do.input.output_name + '.TiN_estimate_CI.txt', 'w')
file.write('%s - %s' % (str(do.CI_tin_low), str(do.CI_tin_high)))
file.close()
file = open(do.input.output_path + '/' + do.input.output_name + '.number_of_SSNVs_added.txt','w')
file.write('%s\n'% int(n_calls_added))
if __name__ == "__main__":
main()
```
#### File: deTiN/deTiN/deTiN_utilities.py
```python
import numpy as np
import sys
from scipy.stats import beta
from scipy.stats import fisher_exact
from itertools import compress
import gzip
import random
import pandas as pd
import matplotlib
import pickle
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy.special import gammaln
random.seed(1)
def beta_binomial_pdf(x,n,a,b):
x = x.reshape(-1,1)
n = n.reshape(-1,1)
return gammaln(n+1) + gammaln(x+a) + gammaln(n-x+b) + gammaln(a+b) - \
(gammaln(x+1) + gammaln(n-x+1) + gammaln(a) + gammaln(b) + gammaln(n+a+b))
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_member(a, b):
# based on the matlab is_member function
# code from stackoverflow user <NAME>
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
return [bind.get(itm, np.nan) for itm in a]
def chr2num(chr):
if chr[0][0:2] == 'ch':
chr = [c[3:] for c in chr]
# convert chromosome from strings to ints
chr = ['23' if x == 'X' else x for x in chr]
chr = ['24' if x == 'Y' else x for x in chr]
chr = ['25' if x == 'M' else x for x in chr]
chr = ['25' if x == 'MT' else x for x in chr]
chromosomes = np.array(range(1, 26))
return np.array(is_member(chr, chromosomes.astype(np.str)))
def remove_sites_near_centromere_and_telomeres(het_table):
positions = het_table['genomic_coord_x']
centromere_positions = [125000001, 1718373143, 1720573143, 1867507890,
1869607890, 1984214406, 1986714406, 2101066301,
2102666301, 2216036179, 2217536179, 2323085719, 2326285719, 2444417111,
2446417111, 2522371864, 2524171864, 2596767074, 2598567074, 2683844322,
2685944322, 339750621, 342550621, 2744173305, 2746073305, 2792498825, 2794798825,
2841928720,
2844428720, 580349994, 583449994, 738672424, 740872424, 927726700, 930026700, 1121241960,
1123541960, 1291657027, 1293557027, 1435895690, 1438395690, 1586459712, 1588159712]
lengths = np.array([249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663,
146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540,
102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566,
155270560, 59373566, 16569]) # chromosome lengths from genome-mysql.cse.ucsc.edu
telomere_positions = np.append(1, np.cumsum(lengths))
distance_centromere = np.zeros([len(positions), len(centromere_positions)])
distance_telomere = np.zeros([len(positions), len(telomere_positions)])
for i, centromere in enumerate(centromere_positions):
distance_centromere[:, i] = np.abs(positions - centromere)
distance_centromere = np.min(distance_centromere, axis=1)
for i, telomere in enumerate(telomere_positions):
distance_telomere[:, i] = np.abs(positions - telomere)
distance_telomere = np.min(distance_telomere, axis=1)
het_table = het_table[np.logical_and(distance_centromere > 5000000, distance_telomere > 5000000)]
het_table.reset_index(inplace=True, drop=True)
return het_table
def filter_hets_based_on_coverage(het_table, depth = 10):
het_table = het_table[np.logical_and(het_table['READ_DEPTH_N'] > depth, het_table['READ_DEPTH_T'] > depth)]
het_table.reset_index(inplace=True, drop=True)
return het_table
def filter_segments_based_on_size_f_and_tau(seg_table, aSCNA_thresh, n_probes = 200):
seg_table = seg_table[np.logical_and.reduce(np.array([np.array(seg_table['f']) < 0.5 - aSCNA_thresh,
seg_table['n_probes'] > n_probes, seg_table['tau'] > 0]))]
seg_table.reset_index(inplace=True, drop=True)
return seg_table
def alternate_file_headers():
headers = {'alternate_headers_position' : ['Start', 'Start_bp', 'start','position','pos','POS','Start_position'],
'alternate_headers_start_position' : ['Start', 'Start_bp', 'start','position','pos','POS','Start_position','START'],
'alternate_headers_end_position' : ['End', 'End_bp', 'end','END'],
'alternate_headers_chromosome' : ['Contig', 'chrom', 'CONTIG', 'chr', 'Chrom', 'CHROMOSOME','Chromosome','contig'],
'alternate_headers_f' : ['f_acs', 'MAF_Post_Mode','MINOR_ALLELE_FRACTION_POSTERIOR_50'],
'alternate_headers_tau' : ['CN', 'Segment_Mean_Post_Mode','LOG2_COPY_RATIO_POSTERIOR_50'],
'alternate_headers_alt_count' : ['t_alt_count', 'n_alt_count', 'alt_count', 'i_t_alt_count', 'i_n_alt_count'],
'alternate_headers_ref_count' : ['t_ref_count', 'n_ref_count', 'ref_count', 'i_t_ref_count', 'i_n_ref_count'],
'alternate_headers_n_probes': ['n_probes','NUM_POINTS_COPY_RATIO','Num_Targets']}
return headers
def read_file_header(text_file):
headers = alternate_file_headers()
with open(text_file, 'rt') as f:
for header_lines, line in enumerate(f):
line = line.strip()
if not line[0] == '#':
break
file_head = line.split('\t')
try :
headers['alternate_headers_chromosome'].index(file_head[0])
except ValueError:
sys.exit('The first column of all input files should be chromosome: could not find any of the chromosome headers in the first column of '+
text_file)
return file_head
def identify_aSCNAs(seg_table, het_table, aSCNA_thresh = 0.1, n_snps = 20, var_thresh = 0.025):
# identify aSCNAs based on minor allele fraction of segments
mu_af_n = np.mean(het_table['AF_N'])
f_detin = np.zeros([len(seg_table), 1])
f_variance = np.zeros([len(seg_table), 1])
n_snps_above_mu = np.zeros([len(seg_table), 1])
n_snps_below_mu = np.zeros([len(seg_table), 1])
fishers_p_convergent_seg = np.ones([len(seg_table), 1])
thresh_snps = np.round(np.true_divide(n_snps,2))
for seg_id, seg in seg_table.iterrows():
seg_hets = het_table[het_table['seg_id'] == seg_id]
f_detin[seg_id] = mu_af_n - np.mean(np.abs(seg_hets['AF_T'] - mu_af_n))
f_variance[seg_id] = np.var(np.abs(seg_hets['AF_T'] - mu_af_n))
n_snps_above_mu[seg_id] = np.sum(seg_hets['AF_T'] > mu_af_n)
n_snps_below_mu[seg_id] = np.sum(seg_hets['AF_T'] <= mu_af_n)
try:
fe_tuple = fisher_exact([[np.sum(np.logical_and(seg_hets['AF_T'] > mu_af_n,
seg_hets['AF_N'] > mu_af_n)),
np.sum(np.logical_and(seg_hets['AF_T'] > mu_af_n,
seg_hets['AF_N'] <= mu_af_n))],
[np.sum(np.logical_and(seg_hets['AF_T'] <= mu_af_n,
seg_hets['AF_N'] > mu_af_n)),
np.sum(np.logical_and(seg_hets['AF_T'] <= mu_af_n,
seg_hets['AF_N'] <= mu_af_n))]], 'less')
fishers_p_convergent_seg[seg_id] = fe_tuple[1]
except ValueError:
fishers_p_convergent_seg[seg_id] = 1
seg_table['f_detin'] = f_detin
seg_table['f_variance'] = f_variance
seg_table['n_snps_above_mu'] = n_snps_above_mu
seg_table['n_snps_below_mu'] = n_snps_below_mu
seg_table['fishers_p_convergent_seg'] = fishers_p_convergent_seg
if any((seg_table['fishers_p_convergent_seg'] * len(seg_table)) < 0.05):
segs = (seg_table['fishers_p_convergent_seg'] * len(seg_table)) < 0.05
ix = list(compress(range(len(segs)), segs))
print('identified convergent aSCNA in normal on chromosomes:' + str(np.unique(seg_table['Chromosome'][ix] + 1)))
convergent_segs = seg_table[seg_table['fishers_p_convergent_seg'] * len(seg_table) <= 0.05]
else:
convergent_segs = None
aSCNAs = seg_table[
np.logical_and.reduce(np.array([np.array(seg_table['fishers_p_convergent_seg'] * len(seg_table)) > 0.05,
seg_table['n_snps_above_mu'] > thresh_snps,
seg_table['n_snps_below_mu'] > thresh_snps,
seg_table['f_detin'] <= 0.5 - aSCNA_thresh,
seg_table['f_variance'] < var_thresh]))]
return aSCNAs,convergent_segs
def ensure_balanced_hets(seg_table, het_table):
seg_table['aSCNA'] = np.zeros([len(seg_table), 1])
aSCNA_hets = []
for seg_id, seg in seg_table.iterrows():
seg_hets = het_table[het_table['seg_id'] == seg_id]
if np.sum(seg_hets['d'] == -1) > 10 and np.sum(seg_hets['d'] == 1) > 10:
if sum(seg_hets['AF_T'] > 0.5) < sum(seg_hets['AF_T'] <= 0.5):
sites = seg_hets['AF_T'] <= 0.5
index = list(compress(range(len(sites)), sites))
ixs = random.sample(index, (sum(seg_hets['AF_T'] <= 0.5) - sum(seg_hets['AF_T'] > 0.5)))
seg_hets = seg_hets.drop(seg_hets.index[ixs])
seg_hets.reset_index(inplace=True, drop=True)
if sum(seg_hets['AF_T'] > 0.5) > sum(seg_hets['AF_T'] <= 0.5):
sites = seg_hets['AF_T'] > 0.5
index = list(compress(range(len(sites)), sites))
ixs = random.sample(index, (sum(seg_hets['AF_T'] > 0.5) - sum(seg_hets['AF_T'] <= 0.5)))
seg_hets = seg_hets.drop(seg_hets.index[ixs])
seg_hets.reset_index(inplace=True, drop=True)
if len(aSCNA_hets) == 0:
aSCNA_hets = seg_hets
else:
aSCNA_hets = pd.concat([aSCNA_hets, seg_hets])
aSCNA_hets.reset_index(inplace=True, drop=True)
return aSCNA_hets
def plot_kmeans_info(ascna_based_model, output_path, sample_name):
# method for plotting clustering results of aSCNA TiN estimates
X = np.array(ascna_based_model.segs['TiN_MAP'])
X_low = np.array(ascna_based_model.segs['TiN_ci_l'])
X_high = np.array(ascna_based_model.segs['TiN_ci_h'])
Y = np.array(ascna_based_model.segs['Chromosome'])
kIdx = int(np.max(ascna_based_model.cluster_assignment))
K = range(1, 4)
# variance explained by incorporating additional clusters
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(K, ascna_based_model.sum_squared_distance, 'b.-')
ax.plot(K[kIdx], ascna_based_model.sum_squared_distance[kIdx], marker='o', markersize=12,
markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Average within-cluster sum of squares')
plt.title('KMeans residual')
plt.xticks([1,2,3])
fig.set_dpi(150)
fig.savefig(output_path + '/' + sample_name + '_KmeansEval_plot.png', bbox_inches='tight')
# scatter plot of TiN estimates per segment by chromosome location and cluster
fig = plt.figure()
ax = fig.add_subplot(111)
clr = ['b', 'g', 'r']
if len(X) > 1:
for i in range(K[kIdx]):
ind = (ascna_based_model.cluster_assignment == i)
ax.errorbar(X[ind], Y[ind], xerr=[X[ind]-X_low[ind],X_high[ind]-X[ind]] , c=clr[i], label='Cluster %d' % i,ls='None',marker='.')
else:
ax.errorbar(X,Y+1,xerr=[X-X_low,X_high-X],c='b',label='Cluster 1',ls='None',marker='.')
plt.xlabel('MAP tumor in normal estimate (%)')
plt.ylabel('Chromosome')
plt.title('Cluster by chromosome and TiN')
plt.yticks(np.arange(min(Y) , max(Y) + 2, 2.0))
plt.xticks(np.arange(0, max(X) + 1, np.max([np.round(np.true_divide(np.max(X), 10)), 1])))
ax.set_xlim([-2, np.max(X) + 2])
fig.set_dpi(150)
fig.savefig(output_path + '/' + sample_name + '_KmeansEval_scatter_plot.png', bbox_inches='tight')
def plot_aSCNA_het_data(do):
fig, ax = plt.subplots(1, 1)
ax.plot(do.input.het_table['genomic_coord_x'], do.input.het_table['AF_T'], c=[0.5, 0.5, 0.5], marker='.', ls='None',
ms=1, alpha=0.5)
tumor_af = ax.plot(do.ascna_based_model.hets['genomic_coord_x'], do.ascna_based_model.hets['AF_T'], c=[0, 0, 1], marker='.',
ls='None', ms=5)
normal_af = ax.plot(do.ascna_based_model.hets['genomic_coord_x'], do.ascna_based_model.hets['AF_N'], c=[1, 0, 0], marker='.',
ls='None', ms=5)
fig.set_dpi(300)
chrs = hg19_to_linear_positions(np.linspace(0, 23, 24), np.ones([23]))
for c in chrs:
ax.plot([c, c], [0, 1], 'k--')
plt.legend(handles=[tumor_af[0], normal_af[0]],labels=['Tumor', 'Normal'])
ax.set_xticks((chrs[1:] + chrs[:-1]) / 2)
ax.set_xticklabels((np.linspace(1, 24, 24, dtype=int)), size=5, rotation=90)
ax.set_yticks(np.linspace(0, 1, 5))
ax.set_yticklabels(np.linspace(0, 1, 5), size=5)
ax.set_xlabel('Chromosomes')
ax.set_ylabel('Allele fraction')
fig.set_dpi(150)
fig.savefig(do.input.output_path + '/' + do.input.output_name + '_TiN_hets_aSCNA_model.png', bbox_inches='tight')
fig.savefig(do.input.output_path + '/' + do.input.output_name + '_TiN_hets_aSCNA_model.eps', bbox_inches='tight')
def plot_TiN_models(do):
fig, ax = plt.subplots(1, 1)
TiN_range = np.linspace(0, 1, num=do.input.resolution)
if ~np.isnan(do.ascna_based_model.TiN):
ascna = ax.plot(TiN_range,
np.true_divide(np.exp(
do.ascna_based_model.TiN_likelihood - np.nanmax(do.ascna_based_model.TiN_likelihood)),
np.nansum(np.exp(do.ascna_based_model.TiN_likelihood - np.nanmax(
do.ascna_based_model.TiN_likelihood))))
, 'r--', lw=1)
ssnv = ax.plot(TiN_range,
np.true_divide(
np.exp(do.ssnv_based_model.TiN_likelihood - np.nanmax(do.ssnv_based_model.TiN_likelihood)),
np.nansum(
np.exp(do.ssnv_based_model.TiN_likelihood - np.nanmax(do.ssnv_based_model.TiN_likelihood))))
, 'b--', lw=1)
joint = ax.plot(TiN_range, do.joint_posterior
, 'k-', lw=2)
plt.xlabel('Tumor in normal estimate')
plt.ylabel('p(TiN=x)')
plt.title('TiN estimate posterior')
if ~np.isnan(do.ascna_based_model.TiN):
plt.legend(handles=[ascna[0], ssnv[0], joint[0]], labels=['aSCNA', 'SSNV', 'Joint Est.'])
else:
plt.legend(handles=[ssnv[0], joint[0]], labels=['SSNV', 'Joint Est.'])
fig.set_dpi(150)
fig.savefig(do.input.output_path + '/' + do.input.output_name + '_TiN_models_plot.png', bbox_inches='tight')
def plot_SSNVs(do):
fig, ax = plt.subplots(1, 1)
TiN_fit = ax.plot(np.linspace(0, 1, do.input.resolution), np.multiply(do.TiN, np.linspace(0, 1, do.input.resolution)), '--', lw=1, alpha=1,
color='#1D1D1D')
background = ax.plot(do.ssnv_based_model.tumor_f, do.ssnv_based_model.normal_f
, '.', lw=0.1, alpha=0.75, color=[0.75, 0.75, 0.75])
nod_kept = np.logical_and(do.SSNVs['judgement'] == 'KEEP', do.SSNVs.isnull()['failure_reasons']).values
cis = do.ssnv_based_model.rv_normal_af.interval(0.6825)
kept_def = ax.plot(do.ssnv_based_model.tumor_f[nod_kept], do.ssnv_based_model.normal_f[nod_kept],
'b.', lw=0.1)
d_kept = np.logical_and(do.SSNVs['judgement'] == 'KEEP', ~do.SSNVs.isnull()['failure_reasons']).values
yerr_low = do.ssnv_based_model.normal_f[d_kept] - cis[0][d_kept]
yerr_low[yerr_low<0] = 0
detin_kept = ax.errorbar(do.ssnv_based_model.tumor_f[d_kept], do.ssnv_based_model.normal_f[d_kept],
yerr=[yerr_low,
cis[1][d_kept] - do.ssnv_based_model.normal_f[d_kept]], fmt='r.', capsize=2)
plt.xlabel('Tumor AF')
plt.ylabel('Normal AF')
plt.title('SSNVs considered and recovered')
plt.legend(handles=[background[0], kept_def[0], detin_kept[0], TiN_fit[0]],
labels=['Candidate Sites', 'Called w/o deTiN ', 'deTiN recovered', 'TiN_fit'])
fig.set_dpi(300)
fig.savefig(do.input.output_path + '/' + do.input.output_name + '_SSNVs_plot.png', bbox_inches='tight')
fig.savefig(do.input.output_path + '/' + do.input.output_name + '_SSNVs_plot.eps', format='eps',
bbox_inches='tight')
def select_candidate_mutations(call_stats_table, exac_db_file):
# filter sites in call stats table to those only rejected for presence in the normal
failure_reasons = np.array(call_stats_table['failure_reasons'])
candidate_sites = call_stats_table[np.logical_or.reduce(np.array([np.array(call_stats_table['judgement']) == 'KEEP',
failure_reasons == 'normal_lod,alt_allele_in_normal',
failure_reasons == 'alt_allele_in_normal']))]
candidate_sites['t_depth'] = candidate_sites['t_alt_count'] + candidate_sites['t_ref_count']
candidate_sites['n_depth'] = candidate_sites['n_alt_count'] + candidate_sites['n_ref_count']
candidate_sites.reset_index(inplace=True, drop=True)
candidate_sites = remove_exac_sites_from_call_stats(candidate_sites, exac_db_file)
candidate_sites.reset_index(inplace=True, drop=True)
return candidate_sites
def hg19_to_linear_positions(chromosome, position, **keyword_parameters):
# type: (nparray, nparray,string) -> nparray
"""
Change chromosome-position to continuous linear coordinates
"""
if ('build' in keyword_parameters):
build = keyword_parameters['build']
else:
build = 'hg19'
if build == 'hg19':
L = np.array([249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663,
146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540,
102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566,
155270560, 59373566, 16569]) # chromosome lengths from genome-mysql.cse.ucsc.edu
if build == 'hg38':
L = np.array([248956422,242193529,198295559,190214555,181538259,170805979,159345973,
145138636,138394717,133797422,135086622,133275309,114364328,107043718,
101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,
156040895,57227415,16569])
C = np.append(1, np.cumsum(L))
x = np.array([chromosome[int(i)] for i in np.arange(0, len(position))],dtype=int)
return C[x] + position
def fix_het_file_header(het_file):
# allowing flexibility in het file headers to accommodate changing versions of GATK4 and other CN tools
# in order to add support for your het file headers please modify the alternate header lists above
headers = alternate_file_headers()
required_headers = ['CONTIG', 'POSITION', 'ALT_COUNT', 'REF_COUNT']
if np.sum(np.isfinite((is_member(required_headers, het_file.columns)))) == 4:
return het_file
else:
missing_idx = np.where(~np.isfinite((is_member(required_headers, het_file.columns))))
for i in missing_idx[0]:
if required_headers[i] == 'POSITION':
if np.sum(np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns))) > 1:
sys.exit('missing required header POSITION and could not replace with POS,position, or pos!')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_position'][idx_replace[0][0]]: 'POSITION'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_position'][
idx_replace[0][0]] + ' to POSITION')
if required_headers[i] == 'CONTIG':
if np.sum(np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns))) > 1:
sys.exit(
'missing required header CONTIG and could not replace with any one of CHR, chrom, Chromosome, chr, Chrom!')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_chromosome'][idx_replace[0][0]]: 'CONTIG'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_chromosome'][
idx_replace[0][0]] + ' to CONTIG')
if required_headers[i] == 'ALT_COUNT':
if np.sum(np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns))) > 1:
sys.exit(
'missing required header ALT_COUNT and could not replace with any one of t_alt_count, n_alt_count, alt_count')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_alt_count'][idx_replace[0][0]]: 'ALT_COUNT'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_alt_count'][
idx_replace[0][0]] + ' to ALT_COUNT')
if required_headers[i] == 'REF_COUNT':
if np.sum(np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns))) > 1:
sys.exit(
'missing required header ALT_COUNT and could not replace with any one of t_ref_count, n_ref_count, ref_count')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_ref_count'][idx_replace[0][0]]: 'REF_COUNT'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_ref_count'][
idx_replace[0][0]] + ' to REF_COUNT')
return het_file
def fix_seg_file_header(seg_file):
# allowing flexibility in seg file headers to accommodate changing versions of GATK4 and other CN tools
# in order to add support for your seg file headers please modify the alternate header lists above
headers = alternate_file_headers()
required_headers = ['Chromosome', 'Start.bp', 'End.bp', 'f', 'tau','n_probes']
if np.sum(np.isfinite((is_member(required_headers, seg_file.columns)))) == 5:
return seg_file
else:
missing_idx = np.where(~np.isfinite((is_member(required_headers, seg_file.columns))))
for i in missing_idx[0]:
if required_headers[i] == 'Start.bp':
if np.sum(np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns))) > 1:
sys.exit('missing required header Start.bp and could not replace with Start or Start_bp')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_start_position'][idx_replace[0][0]]: 'Start.bp'},
inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_start_position'][
idx_replace[0][0]] + ' to Start.bp')
if required_headers[i] == 'End.bp':
if np.sum(np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns))) > 1:
sys.exit('missing required header End.bp and could not replace with End or End_bp')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_end_position'][idx_replace[0][0]]: 'End.bp'}, inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_end_position'][
idx_replace[0][0]] + ' to End.bp')
if required_headers[i] == 'Chromosome':
if np.sum(np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns))) > 1:
sys.exit(
'missing required header Chromosome and could not replace with any other header')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_chromosome'][idx_replace[0][0]]: 'Chromosome'},
inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_chromosome'][
idx_replace[0][0]] + ' to Chromosome')
if required_headers[i] == 'f':
if np.sum(np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns))) > 1:
sys.exit(
'missing required header f and could not replace with any one of f_acs')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_f'][idx_replace[0][0]]: 'f'}, inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_f'][idx_replace[0][0]] + ' to f')
if required_headers[i] == 'tau':
if np.sum(np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns))) > 1:
sys.exit(
'missing required header tau and could not replace with any one of CN')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_tau'][idx_replace[0][0]]: 'tau'}, inplace=True)
if headers['alternate_headers_tau'][idx_replace[0][0]] == 'LOG2_COPY_RATIO_POSTERIOR_50':
print('transforming log2 data tau column to 2 centered: 2^(CNratio)+1')
seg_file['tau'] = np.power(2,seg_file['tau'])+1
print('changing header of seg file from ' + headers['alternate_headers_tau'][idx_replace[0][0]] + ' to tau')
if required_headers[i] == 'n_probes':
if np.sum(np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns))) > 1:
sys.exit(
'missing required header n_probes and could not replace with any one of alternates')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_n_probes'][idx_replace[0][0]]: 'n_probes'}, inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_n_probes'][idx_replace[0][0]] + ' to n_probes')
return seg_file
def read_indel_vcf(vcf,seg_table,indel_type):
content = []
if vcf[-2:] == 'gz':
with gzip.open(vcf, 'r') as f:
content = f.readlines()
else:
with open(vcf) as f:
content = f.readlines()
cols_type = {0: str}
for line in content:
if line[0] == '#' and line[1] != '#':
headerline = line.split('\t')
break
if indel_type.lower() == 'strelka':
indel_table = pd.read_csv(vcf, sep='\t', comment='#', header=None, low_memory=False, dtype=cols_type)
indel_table.rename(columns={0: 'contig', 1: 'position',2:'ID',3:'REF',4:'ALT',5:'QUAL',7:'INFO', 8: 'format', 6: 'filter', 9: headerline[9].lower(), 10: headerline[10][0:-1].lower()},
inplace=True)
counts_format = indel_table['format'][0].split(':')
depth_ix = counts_format.index('DP')
alt_indel_ix = counts_format.index('TIR')
ref_indel_ix = counts_format.index('TAR')
indel_table = indel_table[np.isfinite(is_member(indel_table['filter'], ['PASS', 'QSI_ref']))]
indel_table.reset_index(inplace=True, drop=True)
elif indel_type.lower() == 'mutect2':
indel_table = pd.read_csv(vcf, sep='\t', comment='#', header=None, low_memory=False, dtype=cols_type)
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT TUMOR NORMAL
normal_sample = 'normal'
tumor_sample = 'tumor'
for line in content:
if line[0:15] == '##normal_sample':
normal_sample = line.split('=')[1][0:-1]
if line[0:14] == '##tumor_sample':
tumor_sample = line.split('=')[1][0:-1]
if tumor_sample == 'tumor' and normal_sample == 'normal':
indel_table.rename(
columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',
6: 'filter', 9: 'tumor', 10: 'normal'},
inplace=True)
else:
if tumor_sample == headerline[9]:
indel_table.rename(
columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',
6: 'filter', 9: 'tumor', 10: 'normal'},
inplace=True)
elif tumor_sample == headerline[10][0:-1]:
indel_table.rename(
columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',
6: 'filter', 9: 'normal', 10: 'tumor'},
inplace=True)
else:
print('failed to read MuTect 2 indels VCF')
sys.exit()
counts_format = indel_table['format'][0].split(':')
depth_ix = counts_format.index('AD')
indel_table = indel_table[np.isfinite(is_member(indel_table['filter'], ['PASS', 'alt_allele_in_normal','artifact_in_normal']))]
indel_table.reset_index(inplace=True, drop=True)
elif indel_type.lower() == 'sanger':
indel_table = pd.read_csv(vcf, sep='\t', comment='#', header=None, low_memory=False, dtype=cols_type)
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT NORMAL TUMOUR
indel_table.rename(columns={0: 'contig', 1: 'position',2:'ID',3:'REF',4:'ALT',5:'QUAL',7:'INFO',8: 'format', 6: 'filter', 9: headerline[9].lower(), 10: headerline[10][0:-1].lower()},
inplace=True)
b1 = np.logical_or.reduce([indel_table['filter'] == 'F012', indel_table['filter'] == 'F012;F015'])
b2 = np.logical_or.reduce([indel_table['filter'] == 'PASS', indel_table['filter'] == 'F015'])
indel_table = indel_table[np.logical_or.reduce([b1, b2])]
indel_table.reset_index(inplace=True,drop=True)
format_string = indel_table['format'][0].split(':')
total_depth_idx = [format_string.index('PR'), format_string.index('NR')]
alt_count_idx = [format_string.index('PU'), format_string.index('NU')]
# parsing format line and file to determine required alt and ref columns
# we use "tier 1" read counts for varaints
n_depth = np.zeros([len(indel_table), 1])
n_alt_count = np.zeros([len(indel_table), 1])
n_ref_count = np.zeros([len(indel_table), 1])
t_depth = np.zeros([len(indel_table), 1])
t_alt_count = np.zeros([len(indel_table), 1])
t_ref_count = np.zeros([len(indel_table), 1])
for index, row in indel_table.iterrows():
spl_n = row['normal'].split(':')
spl_t = row['tumor'].split(':')
if indel_type.lower() == 'strelka':
n_depth[index] = int(spl_n[depth_ix])
n_alt_count[index] = int(spl_n[alt_indel_ix].split(',')[0])
n_ref_count[index] = int(spl_n[ref_indel_ix].split(',')[0])
t_depth[index] = int(spl_t[depth_ix])
t_alt_count[index] = int(spl_t[alt_indel_ix].split(',')[0])
t_ref_count[index] = int(spl_t[ref_indel_ix].split(',')[0])
if indel_type.lower() == 'mutect2':
n_alt_count[index] = int(spl_n[depth_ix].split(',')[1])
n_ref_count[index] = int(spl_n[depth_ix].split(',')[0])
n_depth[index] = n_alt_count[index]+n_ref_count[index]
t_alt_count[index] = int(spl_t[depth_ix].split(',')[1])
t_ref_count[index] = int(spl_t[depth_ix].split(',')[0])
t_depth[index] = t_alt_count[index] + t_ref_count[index]
if indel_type.lower() == 'sanger':
n_depth[index] = np.sum([int(spl_n[i]) for i in total_depth_idx])
n_alt_count[index] = np.sum([int(spl_n[i]) for i in alt_count_idx])
n_ref_count[index] = n_depth[index] - n_alt_count[index]
t_depth[index] = np.sum([int(spl_t[i]) for i in total_depth_idx])
t_alt_count[index] = np.sum([int(spl_t[i]) for i in alt_count_idx])
t_ref_count[index] = t_depth[index] - t_alt_count[index]
if len(indel_table) == 0:
indel_table = pd.DataFrame(index=[0],columns=['contig', 'position','ID','REF','ALT','QUAL','INFO','format', 'filter',headerline[9].lower(), headerline[10][0:-1].lower(),
't_depth','t_alt_count','t_ref_count','n_alt_count','n_depth','n_ref_count','tau','f_acs','Chromosome','genomic_coord_x'])
else:
indel_table['t_depth'] = t_alt_count + t_ref_count
indel_table['t_alt_count'] = t_alt_count
indel_table['t_ref_count'] = t_ref_count
indel_table['n_depth'] = n_alt_count + n_ref_count
indel_table['n_alt_count'] = n_alt_count
indel_table['n_ref_count'] = n_ref_count
# only consider sites which were rejected as germline or were passed
if type(indel_table['contig'][0]) == str :
indel_table['Chromosome'] = chr2num(indel_table['contig'])
else:
indel_table['Chromosome'] = indel_table['contig']-1
# add linear position field and consider only sites which are rejected as germline i.e. PASS or QSI_ref
indel_table = indel_table[np.isfinite(indel_table['Chromosome'])]
indel_table.reset_index(inplace=True, drop=True)
indel_table['genomic_coord_x'] = hg19_to_linear_positions(indel_table['Chromosome'], indel_table['position'])
# annotate with acs data
f_acs = np.zeros([len(indel_table), 1]) + 0.5
tau = np.zeros([len(indel_table), 1]) + 2
for i, r in seg_table.iterrows():
f_acs[np.logical_and(np.array(indel_table['genomic_coord_x']) >= r['genomic_coord_start'],
np.array(indel_table['genomic_coord_x']) <= r['genomic_coord_end'])] = r.f
tau[np.logical_and(np.array(indel_table['genomic_coord_x']) >= r['genomic_coord_start'],
np.array(indel_table['genomic_coord_x']) <= r[
'genomic_coord_end'])] = r.tau + 0.001
indel_table['tau'] = tau
indel_table['f_acs'] = f_acs
return indel_table
def build_exac_pickle(exac_file):
# create ExAC site dictionary from VCF file
exac_site_info = {}
print('Filtering ExAC sites from candidate mutations')
with gzip.open(exac_file, "rb") as vcf_file:
for line_index, line in enumerate(vcf_file):
if line_index % 10000 == 0:
print('processed ' + str(line_index) + ' ExAC sites')
spl = line.strip("\n").split("\t")
# line is a comment
if line[0] == '#':
continue
site = spl[0] + '_' + spl[1]
info = spl[7]
info_dict = {}
info_dict['ref_allele'] = spl[3]
info_dict['alt_allele'] = spl[4]
for field in info.strip("\n").split(";"):
if field.split("=")[0] not in ['AC', 'AF']: continue
try:
info_dict[field.split("=")[0]] = field.split("=")[1]
except:
pass
# print 'bad field:', field
# select only sites where population allele fractions exceeds 0.01
if np.sum(np.array(info_dict['AF'].split(','), dtype=float)) >= 0.01:
exac_site_info[site] = info_dict
with open('exac.pickle', 'wb') as handle:
pickle.dump(exac_site_info, handle, protocol=pickle.HIGHEST_PROTOCOL)
def remove_exac_sites_from_call_stats(call_stats_table, exac_file):
# use ExAC vcf to filter likely germline variants out of candidate sites
with open(exac_file, 'rb') as handle:
exac_dict = pickle.load(handle)
keep = np.ones_like(call_stats_table['position'], dtype=bool)
for index, row in call_stats_table.iterrows():
key = str(row['contig']) + '_' + str(row['position'])
if row['judgement'] == 'REJECT':
try:
exac_dict[key]
# print 'removing site '+ key+ ' minor allele fraction = ' + str(exac_dict[key]['AF'])
keep[index] = False
except KeyError:
pass
return call_stats_table[keep]
```
|
{
"source": "Jette16/spacy-course",
"score": 3
}
|
#### File: exercises/de/exc_03_10_02.py
```python
from spacy.lang.de import German
from spacy.tokens import Span
nlp = German()
# Definiere die Methode
def to_html(span, tag):
# Verpacke den Text der Span in einem HTML-Tag und gebe ihn zurück
return f"<{tag}>{span.text}</{tag}>"
# Registriere die Span-Erweiterung "to_html" mit der Methode to_html
____.____(____, ____=____)
# Verarbeite den Text und rufe die Methode to_html mit dem Tag "strong" auf
doc = nlp("<NAME>, dies ist ein Satz.")
span = doc[0:2]
print(____)
```
#### File: exercises/de/solution_03_10_01.py
```python
from spacy.lang.de import German
from spacy.tokens import Doc
nlp = German()
# Definiere die Getter-Funktion
def get_has_number(doc):
# Gebe zurück, ob einer der Tokens im Doc True für token.like_num zurückgibt
return any(token.like_num for token in doc)
# Registriere die Doc-Erweiterung "has_number" mit Getter-Funktion get_has_number
Doc.set_extension("has_number", getter=get_has_number)
# Verarbeite den Text und drucke den Wert des Attributs has_number
doc = nlp("Das Museum war ab 2012 fünf Jahre lang geschlossen.")
print("has_number:", doc._.has_number)
```
#### File: exercises/de/test_01_02_02.py
```python
def test():
import spacy.tokens
import spacy.lang.de
assert isinstance(
nlp, spacy.lang.de.German
), "Das nlp-Objekt sollte eine Instanz der Klasse German sein."
assert isinstance(
doc, spacy.tokens.Doc
), "Hast du den Text mit dem nlp-Objekt verarbeitet und ein Doc erstellt?"
assert "print(doc.text)" in __solution__, "Hast du doc.text gedruckt?"
__msg__.good("Gut gemacht!")
```
#### File: exercises/de/test_01_03_02.py
```python
def test():
assert (
doc.text == "Ich mag niedliche Katzen und Faultiere."
), "Bist du dir sicher, dass du den Text korrekt verarbeitet hast?"
assert (
niedliche_katzen == doc[2:4]
), "Bist du dir sicher, dass du die korrekte Span für niedliche_katzen ausgewählt hast?"
assert (
niedliche_katzen_und_faultiere == doc[2:6]
), "Bist du dir sicher, dass du die korrekte Span für niedliche_katzen_und_faultiere ausgewählt hast?"
__msg__.good("Gut gemacht!")
```
#### File: exercises/de/test_01_07.py
```python
def test():
assert "spacy.load" in __solution__, "Rufst du spacy.load auf?"
assert nlp.meta["lang"] == "de", "Lädst du das korrekte Modell?"
assert nlp.meta["name"] == "core_news_sm", "Lädst du das korrekte Modell?"
assert "nlp(text)" in __solution__, "Verarbeitest du den Text korrekt?"
assert "print(doc.text)" in __solution__, "Druckst du den Text des Doc?"
__msg__.good(
"Gut gemacht! Jetzt wo du das Laden von Modellen geübt hast, lass uns "
"mal ein paar ihrer Vorhersagen anschauen."
)
```
#### File: exercises/de/test_02_09.py
```python
def test():
assert (
'spacy.load("en_core_web_md")' in __solution__
), "Lädst du das mittelgroße Modell?"
assert "doc[1].vector" in __solution__, "Greifst du auf den richtigen Vector zu?"
__msg__.good(
"Bravo! In der nächsten Übung wirst du spaCy benutzen, um mithilfe von "
"Wortvektoren Ähnlichkeiten von Dokumenten, Spans und Tokens zu berechnen."
)
```
#### File: exercises/de/test_02_10_02.py
```python
def test():
assert (
"token1.similarity(token2)" or "token2.similarity(token1)" in __solution__
), "Vergleichst du die Ähnlichkeit der zwei Tokens?"
assert (
0 <= float(similarity) <= 1
), "Der Ähnlichkeitswert muss eine Zahl zwischen 0 und 1 sein. Hast du ihn korrekt berechnet?"
__msg__.good("Prima!")
```
#### File: exercises/de/test_03_09_01.py
```python
def test():
assert Token.has_extension(
"is_country"
), "Hast du die Token-Erweiterung korrekt registriert?"
ext = Token.get_extension("is_country")
assert ext[0] == False, "Hast du den default-Wert korrekt angegeben?"
country_values = [False, False, False, True, False]
assert [
t._.is_country for t in doc
] == country_values, "Hast du den Wert für den richtigen Token geändert?"
assert (
"print([(token.text, token._.is_country)" in __solution__
), "Druckst du die richtigen Token-Attribute?"
__msg__.good("Gut gemacht!")
```
#### File: exercises/de/test_03_11.py
```python
def test():
assert Span.has_extension(
"wikipedia_url"
), "Hast du die Span-Erweiterung korrekt registriert?"
ext = Span.get_extension("wikipedia_url")
assert ext[2] is not None, "Hast du die Getter-Funktion korrekt angegeben?"
assert (
"getter=get_wikipedia_url" in __solution__
), "Hast du die Funktion get_wikipedia_url as Getter-Funktion angegeben?"
assert (
"(ent.text, ent._.wikipedia_url)" in __solution__
), "Greifst du auf das richtige Attribut zu?"
assert (
doc.ents[-1]._.wikipedia_url
== "https://de.wikipedia.org/w/index.php?search=David_Bowie"
), "Es scheint, als ob der Wert des Attributs nicht korrekt ist."
__msg__.good(
"Sehr schön! Du hast nun eine Pipeline-Komponente, die vom Modell "
"vorhergesagte Entitäten verwendet, um Wikipedia-URLs zu generieren und "
"diese als benutzerdefiniertes Attribut hinzufügt. Versuche mal, den Link "
"in deinem Browser zu öffnen und schau, was passiert!"
)
```
#### File: exercises/de/test_03_12.py
```python
def test():
assert (
len(nlp.pipeline) == 1 and nlp.pipe_names[0] == "countries_component"
), "Hast du die Komponente korrekt hinzugefügt?"
assert Span.has_extension(
"capital"
), "Hast du die Span-Erweiterung korrekt registriert?"
ext = Span.get_extension("capital")
assert (
ext[2] is not None
), "Hast du die Funktion get_capital als Getter-Funktion angegeben?"
assert (
"(ent.text, ent.label_, ent._.capital)" in __solution__
), "Druckst du die richtigen Attribute?"
assert (
len(doc.ents) == 2
), "Es sieht so aus, als ob die Entitäten nicht korrekt hinzugefügt wurden?"
assert (
doc.ents[0]._.capital == "Prag" and doc.ents[1]._.capital == "Bratislava"
), "Das Attribut capital scheint nicht korrekt zu funktionieren."
__msg__.good(
"Bravo! Dies ist ein gutes Beispiel dafür, wie du strukturierte Daten "
"zu deiner spaCy-Pipeline hinzufügen kannst."
)
```
#### File: exercises/de/test_04_03.py
```python
def test():
assert len(pattern1) == 2, "pattern1 sollte zwei Tokens beschreiben."
assert len(pattern2) == 2, "pattern2 sollte zwei Tokens beschreiben."
assert (
len(pattern1[0]) == 1
), "Der erste Token von pattern1 sollte ein einziges Attribut beschreiben."
assert any(
pattern1[0].get(l) == "iphone" for l in ("LOWER", "lower")
), "Der erste Token von pattern1 sollte die kleingeschriebene Form von 'iphone' beschreiben."
assert (
len(pattern1[1]) == 1
), "Der zweite Token von pattern1 sollte ein einziges Attribut beschreiben."
assert any(
pattern1[1].get(l) == "x" for l in ("LOWER", "lower")
), "Der zweite Token von pattern1 sollte die kleingeschriebene form von 'x' beschreiben."
assert (
len(pattern2[0]) == 1
), "Der erste Token von pattern2 sollte ein einziges Attribut beschreiben."
assert any(
pattern2[0].get(l) == "iphone" for l in ("LOWER", "lower")
), "Der erste Token von pattern2 sollte die kleingeschriebene Form von 'iphone' beschreiben."
assert (
len(pattern2[1]) == 1
), "Der zweite Token von pattern2 sollte ein einziges Attribute beschreiben."
assert any(
pattern2[1].get(l) == True for l in ("IS_DIGIT", "is_digit")
), "Der zweite Token von pattern2 sollte eine Ziffer beschreiben."
__msg__.good(
"Super! Lass uns nun diese Patterns verwenden, um schnell ein paar "
"Trainingsdaten für unser Modell zu erstellen."
)
```
#### File: exercises/de/test_04_07.py
```python
def test():
assert "nlp.begin_training()" in __solution__, "Hast du nlp.begin_training aufgerufen?"
assert (
"range(10)" in __solution__
), "Trainierst du mit der richten Anzahl an Iterationen?"
assert (
"spacy.util.minibatch(TRAINING_DATA" in __solution__
), "Verwendest du die minibatch Hilfsfunktion, um Batches zu erstellen?"
assert (
"text for text" in __solution__ and "entities for text" in __solution__
), "Teilst du die Texte und Annotationen korrekt auf?"
assert "nlp.update" in __solution__, "Aktualisierst du das Modell mit den Beispielen?"
__msg__.good(
'Gut gemacht – du hast erfolgreich dein erstes spaCy-Modell trainiert. '
'Die Zahlen, die du hier siehst werden auch "loss" genannt und bezeichnen '
'quasi die Arbeit, die der Optimizer noch zu erledigen hat. Je niedriger '
'die Zahl, desto besser. Im echten Leben würdest du normalerweise *viel '
'mehr* Daten verwenden als hier, idealerweise ein paar Hundert oder sogar '
'ein paar Tausend Beispiele.'
)
```
#### File: exercises/de/test_04_11_01.py
```python
def test():
assert (
len(TRAINING_DATA) == 3
), "Irgendetwas scheint mit deinen Daten nicht zu stimmen. Erwartet werden 3 Beispiele."
assert all(
len(entry) == 2 and isinstance(entry[1], dict) for entry in TRAINING_DATA
), "Die Trainingsdaten haben nicht das richtige Format. Erwartet wird eine Liste von Tuples, bestehend aus Text und einem Dictionary als zweites Element."
ents = [entry[1].get("entities", []) for entry in TRAINING_DATA]
assert len(ents[0]) == 2, "Das erste Beispiel sollte zwei Entitäten enhalten."
ent_0_0 = (0, 6, "WEBSITE")
ent_0_1 = (11, 18, "WEBSITE")
assert (
ents[0][0] == ent_0_0
), "Überprüfe nochmal die erste Entität im ersten Beispiel."
assert (
ents[0][1] == ent_0_1
), "Überprüfe nochmal die zweite Entität im ersten Beispiel."
assert len(ents[1]) == 1, "Das zweite Beispiel sollte eine Entität enthalten."
assert ents[1] == [
(28, 35, "WEBSITE",)
], "Überprüfe nochmal die Entität im zweiten Beispiel."
assert len(ents[2]) == 1, "Das dritte Beispiel sollte eine Entität enthalten."
assert ents[2] == [
(15, 21, "WEBSITE",)
], "Überprüfe nochmal die Entität im dritten Beispiel."
__msg__.good("Sehr schön!")
```
#### File: exercises/en/test_01_12_03.py
```python
def test():
assert (
len(pattern) == 3
), "The pattern should describe three tokens (three dictionaries)."
assert (
isinstance(pattern[0], dict)
and isinstance(pattern[1], dict)
and isinstance(pattern[2], dict)
), "Each entry in a pattern should be a dictionary."
assert (
len(pattern[0]) == 1 and len(pattern[1]) == 1
), "The first two pattern entries should have only one key."
assert len(pattern[2]) == 2, "The third pattern entry should have two keys."
assert any(
pattern[0].get(key) == "ADJ" for key in ["pos", "POS"]
), "Are you matching on the first token's part-of-speech tag with the correct label?"
assert any(
pattern[1].get(key) == "NOUN" for key in ["pos", "POS"]
), "Are you matching on the second token's part-of-speech tag with the correct label?"
assert any(
pattern[2].get(key) == "NOUN" for key in ["pos", "POS"]
), "Are you matching on the third token's part-of-speech tag with the correct label?"
assert (
pattern[2].get("OP") == "?"
), "Are you using the correct operator for the third token?"
__msg__.good(
"Great work – those were some pretty complex patterns! Let's move on "
"to the next chapter and take a look at how to use spaCy for more "
"advanced text analysis."
)
```
#### File: exercises/en/test_02_14.py
```python
def test():
assert (
"from spacy.matcher import PhraseMatcher" in __solution__
), "Did you import the PhraseMatcher correctly?"
assert (
"PhraseMatcher(nlp.vocab)" in __solution__
), "Did you initialize the PhraseMatcher correctly?"
assert "matcher(doc)" in __solution__, "Did you call the matcher on the doc?"
assert len(matches) == 2, "Incorrect number of matches – expected 2."
__msg__.good("Well done! Let's use this matcher to add some custom entities.")
```
#### File: exercises/en/test_03_12.py
```python
def test():
assert (
len(nlp.pipeline) == 1 and nlp.pipe_names[0] == "countries_component"
), "Did you add the component correctly?"
assert Span.has_extension("capital"), "Did you set the extension on the span?"
ext = Span.get_extension("capital")
assert ext[2] is not None, "Did you register get_capital as the getter?"
assert (
"(ent.text, ent.label_, ent._.capital)" in __solution__
), "Are you printing the correct attributes?"
assert len(doc.ents) == 2, "Looks like the entities didn't get set correctly?"
assert (
doc.ents[0]._.capital == "Prague" and doc.ents[1]._.capital == "Bratislava"
), "Looks like the capital attribute isn't working correctly."
__msg__.good(
"Well done! This is a great example of how you can add structured "
"data to your spaCy pipeline."
)
```
#### File: exercises/en/test_03_14_01.py
```python
def test():
assert (
"for doc in nlp.pipe(TEXTS)" in __solution__
), "Are you iterating over docs yielded by nlp.pipe?"
__msg__.good("Nice!")
```
#### File: exercises/en/test_04_04.py
```python
def test():
assert (
"for doc in nlp.pipe(TEXTS)" in __solution__
), "Are you calling nlp.pipe on the texts?"
assert (
"TRAINING_DATA.append" in __solution__
), "Are you appending to the TRAINING_DATA?"
assert (
len(TRAINING_DATA) == 6
), "Looks like the training data isn't correct. Expected 6 examples."
for entry in TRAINING_DATA:
assert (
len(entry) == 2
and isinstance(entry[0], str)
and isinstance(entry[1], dict)
and "entities" in entry[1]
), "Looks like examples have the wrong format. It should be a tuple with a text and a dict with the key 'entities'."
assert TRAINING_DATA[0][1]["entities"] == [
(20, 28, "GADGET")
], "Double-check the entities in example 1."
assert TRAINING_DATA[1][1]["entities"] == [
(0, 8, "GADGET")
], "Double-check the entities in example 2."
assert TRAINING_DATA[2][1]["entities"] == [
(28, 36, "GADGET")
], "Double-check the entities in example 3."
assert TRAINING_DATA[3][1]["entities"] == [
(4, 12, "GADGET")
], "Double-check the entities in example 4."
assert TRAINING_DATA[4][1]["entities"] == [
(0, 9, "GADGET"),
(13, 21, "GADGET"),
], "Double-check the entities in example 5."
assert (
TRAINING_DATA[5][1]["entities"] == []
), "Double-check the entities in example 6."
__msg__.good(
"Well done! Before you train a model with the data, you always want "
"to double-check that your matcher didn't identify any false "
"positives. But that process is still much faster than doing "
"*everything* manually."
)
```
#### File: exercises/en/test_04_11_02.py
```python
def test():
assert (
len(TRAINING_DATA) == 3
), "Looks like there's something wrong with the data. Expected 3 examples."
assert all(
len(entry) == 2 and isinstance(entry[1], dict) for entry in TRAINING_DATA
), "Incorrect training data format. Expected a list of tuples where the second element is a dict."
ents = [entry[1].get("entities", []) for entry in TRAINING_DATA]
assert all(len(e) == 2 for e in ents), "Expected all examples to have two entities"
assert any(
e == (0, 9, "PERSON") for e in ents[1]
), "Did you label the PERSON correctly?"
assert any(
e == (15, 29, "PERSON") for e in ents[2]
), "Did you label the PERSON correctly?"
__msg__.good(
"Good job! After including both examples of the new WEBSITE "
"entities, as well as existing entity types like PERSON, the model "
"now performs much better."
)
```
#### File: exercises/es/exc_03_06.py
```python
import spacy
# Define el componente personalizado
def length_component(doc):
# Obtén la longitud del doc
doc_length = ____
print(f"Este documento tiene {doc_length} tokens.")
# Devuelve el doc
____
# Carga el modelo pequeño de español
nlp = spacy.load("es_core_news_sm")
# Añade el componente en el primer lugar del pipeline e imprime
# los nombres de los pipes en pantalla
____.____(____)
print(nlp.pipe_names)
# Procesa un texto
doc = ____
```
#### File: exercises/es/test_01_02_02.py
```python
def test():
import spacy.tokens
import spacy.lang.de
assert isinstance(
nlp, spacy.lang.de.German
), "El objeto nlp debería ser un instance de la clase de alemán."
assert isinstance(
doc, spacy.tokens.Doc
), "¿Procesaste el texto con el objeto nlp para crear un doc?"
assert "print(doc.text)" in __solution__, "¿Imprimiste en pantalla el doc.text?"
__msg__.good("Sehr gut! :)")
```
#### File: exercises/es/test_01_03_02.py
```python
def test():
assert (
doc.text == "Me gustan las panteras negras y los leones."
), "¿Procesaste el texto correctamente?"
assert (
panteras_negras == doc[3:5]
), "¿Seleccionaste el span correcto para 'panteras_negras'?"
assert (
panteras_negras_y_leones == doc[3:8]
), "¿Seleccionaste el span correcto para 'panteras_negras_y_leones'?"
__msg__.good("¡Buen trabajo!")
```
#### File: exercises/es/test_01_08_02.py
```python
def test():
assert "for ent in doc.ents" in __solution__, "¿Estás iterando sobre las entidades?"
assert (
"print(ent.text, ent.label_)" in __solution__
), "¿Estás imprimiendo en pantalla el texto y el label?"
__msg__.good(
"¡Muy buen trabajo! Hasta ahora el modelo a estado correcto todas las veces. "
"En el siguiente ejercicio verás que sucede cuando el modelo se equivoca "
"y cómo ajustarlo."
)
```
#### File: exercises/es/test_01_11.py
```python
def test():
import spacy.matcher
assert isinstance(
matcher, spacy.matcher.Matcher
), "¿Estás inicializando el matcher correctamente?"
assert (
"Matcher(nlp.vocab)" in __solution__
), "¿Estás inicializando el matcher correctamente con el vocabulario compartido?"
assert (
len(pattern) == 2
), "El patrón debería describir dos tokens (dos diccionarios)."
assert isinstance(pattern[0], dict) and isinstance(
pattern[1], dict
), "Cada entrada en el patrón debería ser un diccionario."
assert (
len(pattern[0]) == 1 and len(pattern[1]) == 1
), "Cada entrada en el patrón debería tener solo un key."
assert any(
pattern[0].get(key) == "adidas" for key in ["text", "TEXT"]
), "¿Estás encontrando el texto del token?"
assert any(
pattern[1].get(key) == "zx" for key in ["text", "TEXT"]
), "¿Estás encontrando el texto del token?"
assert (
'matcher.add("ADIDAS_ZX_PATTERN"' in __solution__
), "¿Estás añadiendo el patrón correctamente?"
assert (
"matches = matcher(doc)" in __solution__
), "¿Estás llamando al matcher sobre el doc?"
__msg__.good(
"¡Bien hecho! Encontraste un resultado exitosamente: los tokens en doc[14:16] "
'que describen el span de "adidas ZX".'
)
```
#### File: exercises/es/test_02_05_02.py
```python
def test():
assert (
"from spacy.tokens import Doc" in __solution__
), "¿Estás importando la clase Doc correctamente?"
assert (
len(spaces) == 5
), "Parece que el número de espacios no concuerda con el número de palabras."
assert all(isinstance(s, bool) for s in spaces), "Los espacios tienen que ser booleanos."
assert [int(s) for s in spaces] == [0, 0, 1, 0, 0], "¿Están correctos los espacios?"
assert doc.text == "¡Vamos, empieza!", "¿Creaste el Doc correctamente?"
__msg__.good("¡Bien!")
```
#### File: exercises/es/test_03_09_01.py
```python
def test():
assert Token.has_extension(
"is_country"
), "¿Registraste la extensión en el token?"
ext = Token.get_extension("is_country")
assert ext[0] == False, "¿Añadiste correctamente el valor por defecto?"
country_values = [False, False, True, False]
assert [
t._.is_country for t in doc
] == country_values, "¿Cambiaste el valor en el token correcto?"
assert (
"print([(token.text, token._.is_country)" in __solution__
), "¿Estás imprimiendo en pantalla los atributos del token correctos?"
__msg__.good("¡Bien hecho!")
```
#### File: exercises/es/test_03_16_02.py
```python
def test():
assert (
'with nlp.disable_pipes("tagger", "parser")' in __solution__
or 'with nlp.disable_pipes("parser", "tagger")' in __solution__
), "¿Estás usando nlp.disable_pipes con los componentes correctos?"
__msg__.good(
"¡Perfecto! Ahora que has practicado los consejos y trucos de rendimiento, "
"puedes pasar al siguiente capítulo y entrenar modelos de redes neuronales de spaCy."
)
```
#### File: exercises/fr/exc_03_11.py
```python
import spacy
from spacy.tokens import Span
nlp = spacy.load("fr_core_news_sm")
def get_wikipedia_url(span):
# Retourne une URL Wikipédia si le span possède un des libellés
if ____ in ("PER", "ORG", "GPE", "LOCATION"):
entity_text = span.text.replace(" ", "_")
return "https://fr.wikipedia.org/w/index.php?search=" + entity_text
# Définis l'extension de Span wikipedia_url avec le getter get_wikipedia_url
____.____(____, ____=____)
doc = nlp(
"Pendant plus de cinquante ans depuis ses tout premiers enregistrements "
"jusqu'à son dernier album, <NAME> a toujours été à l'avant-garde "
"de la culture contemporaine."
)
for ent in doc.ents:
# Affiche le text et l'URL Wikipédia de l'entité
print(____, ____)
```
#### File: exercises/fr/solution_03_06.py
```python
import spacy
# Définis le composant personnalisé
def length_component(doc):
# Obtiens la longueur du doc
doc_length = len(doc)
print(f"Ce document comporte {doc_length} tokens.")
# Retourne le doc
return doc
# Charge le petit modèle français
nlp = spacy.load("fr_core_news_sm")
# Ajoute le composant en premier dans le pipeline
# et affiche les noms des composants
nlp.add_pipe(length_component, first=True)
print(nlp.pipe_names)
# Traite un texte
doc = nlp("Ceci est une phrase.")
```
#### File: exercises/fr/solution_03_09_02.py
```python
from spacy.lang.fr import French
from spacy.tokens import Token
nlp = French()
# Définis la fonction getter qui prend en argument un token
# et retourne son texte inversé
def get_reversed(token):
return token.text[::-1]
# Déclare l'extension de propriété de Token "reversed"
# avec le getter get_reversed
Token.set_extension("reversed", getter=get_reversed)
# Traite le texte et affiche l'attribut inversé pour chaque token
doc = nlp("Toutes les généralisations sont fausses, celle-ci aussi.")
for token in doc:
print("reversed :", token._.reversed)
```
#### File: exercises/fr/test_02_06.py
```python
def test():
assert (
"import Doc, Span" or "import Span, Doc" in __solution__
), "As-tu correctement importé Doc et Span ?"
assert doc.text == "<NAME> <NAME>", "As-tu correctement créé le Doc ?"
assert span.text == "<NAME>", "As-tu correctement créé le span ?"
assert span.label_ == "PERSON", "As-tu ajouté le label PERSON au span?"
assert "doc.ents =" in __solution__, "As-tu réécrit doc.ents ?"
assert len(doc.ents) == 1, "As-tu ajouté le span à doc.ents ?"
assert (
list(doc.ents)[0].text == "<NAME>"
), "As-tu ajouté le span à doc.ents ?"
__msg__.good(
"Parfait ! Savoir créer manuellement des objets de spaCy et modifier "
"les entités sera utile plus tard quand tu créeras tes propres "
"pipelines d'extraction d'informations."
)
```
#### File: exercises/fr/test_02_07.py
```python
def test():
assert "token_texts" not in __solution__, "As-tu supprimé la variable token_texts ?"
assert "pos_tags" not in __solution__, "As-tu supprimé la variable pos_tags ?"
assert (
"token.pos_ ==" in __solution__
), "Vérifies-tu si l'étiquette de partie de discours du token est un nom propre ?"
assert (
"token.i + 1" in __solution__ or "token.i+1" in __solution__
), "Utilises-tu l'attribut index du token pour vérifier le token suivant ?"
__msg__.good(
"Excellent travail ! Si la solution fonctionne bien ici pour l'exemple "
"donné, il y a encore des choses qui peuvent être améliorées. Si le "
"doc se termine par un nom propre, doc[token.i + 1] va générer une "
"erreur. Pour être certain de pouvoir généraliser, tu devrais d'abord "
"vérifier si token.i + 1 < len(doc)."
)
```
#### File: exercises/fr/test_02_10_01.py
```python
def test():
assert (
"doc1.similarity(doc2)" or "doc2.similarity(doc1)" in __solution__
), "Compares-tu la similarité entre les deux docs ?"
assert (
0 <= float(similarity) <= 1
), "La valeur de similarité doit être un nombre flottant. L'as-tu calculé correctement ?"
__msg__.good("Bien joué !")
```
#### File: exercises/fr/test_02_15.py
```python
def test():
assert (
"list(doc.ents) + [span]" in __solution__
), "As-tu ajouté le span à doc.ents ?"
assert (
"span_root_head = span.root.head" in __solution__
), "Obtiens-tu la tête du token racine du span ?"
assert (
"print(span_root_head.text" in __solution__
), "Affiches-tu le texte de la tête de la racine du span ?"
ents = [ent for ent in doc.ents if ent.label_ == "GPE"]
assert len(ents) == 19, "Nombre incorrect d'entités. Attendu 19."
__msg__.good(
"Bien joué ! Maintenant que tu as pratiqué la combinaison de "
"prédictions avec des extractions basées sur des règles, tu es prêt "
"pour le chapitre 3, qui va tout t'apprendre sur les pipelines de "
"traitement de spaCy."
)
```
#### File: exercises/fr/test_03_14_01.py
```python
def test():
assert (
"for doc in nlp.pipe(TEXTS)" in __solution__
), "Itères-tu sur les docs générés par nlp.pipe ?"
__msg__.good("Joli !")
```
#### File: exercises/fr/test_04_03.py
```python
def test():
assert len(pattern1) == 2, "pattern1 doit décrire deux tokens."
assert len(pattern2) == 2, "pattern2 doit décrire deux tokens."
assert (
len(pattern1[0]) == 1
), "Le premier token de pattern1 nécessite un attribut seulement."
assert any(
pattern1[0].get(l) == "iphone" for l in ("LOWER", "lower")
), "Le premier token de pattern1 doit rechercher 'iphone' en minuscules."
assert (
len(pattern1[1]) == 1
), "Le second token de pattern1 nécessite un attribut seulement."
assert any(
pattern1[1].get(l) == "x" for l in ("LOWER", "lower")
), "Le second token de pattern1 doit rechercher 'x' en minuscules."
assert (
len(pattern2[0]) == 1
), "Le premier token de pattern2 nécessite un attribut seulement."
assert any(
pattern2[0].get(l) == "iphone" for l in ("LOWER", "lower")
), "Le premier token de pattern2 doit rechercher 'iphone' en minuscules."
assert (
len(pattern2[1]) == 1
), "Le second token de pattern2 nécessite un attribut seulement."
assert any(
pattern2[1].get(l) == True for l in ("IS_DIGIT", "is_digit")
), "Le second token de pattern2 doit rechercher un nombre."
__msg__.good(
"Bien ! Mainteant utilisons ces motifs pour démarrer rapidement un "
"apprentissage de données pour notre modèle."
)
```
#### File: exercises/fr/test_general.py
```python
import spacy
from spacy.matcher import Matcher
import pytest
@pytest.fixture
def nlp():
return spacy.load("fr_core_news_sm")
def test_01_08_02_predictions(nlp):
text = "Apple a été créée en 1976 par <NAME>, <NAME> et <NAME>."
doc = nlp(text)
ents = [(ent.text, ent.label_) for ent in doc.ents]
assert len(ents) == 4
assert ents[0] == ("Apple", "ORG")
assert ents[1] == ("<NAME>", "PER")
assert ents[2] == ("<NAME>", "PER")
assert ents[3] == ("<NAME>", "PER")
def test_01_09_predictions(nlp):
text = "Le constructeur Citröen présente la e-Méhari Courrèges au public."
doc = nlp(text)
ents = [(ent.text, ent.label_) for ent in doc.ents]
assert len(ents) == 1
assert ents[0] == ("Citröen", "MISC")
assert doc[5].ent_type == 0
assert doc[6].ent_type == 0
def test_slides_01_03(nlp):
doc = nlp("Avant elle mangeait des pâtes. Désormais elle mange des légumes.")
pattern = [{"LEMMA": "manger", "POS": "VERB"}, {"POS": "DET"}, {"POS": "NOUN"}]
matcher = Matcher(nlp.vocab)
matcher.add("TEST", None, pattern)
matches = [doc[start:end].text for _, start, end in matcher(doc)]
assert matches == ["mangeait des pâtes", "mange des légumes"]
def test_03_16_02_predictions(nlp):
text = (
"Le groupe aéronautique Airbus construit des avions et des "
"hélicoptères vendus dans le monde entier. Le siège opérationnel du "
"groupe est situé en France à Toulouse dans la région Occitanie."
)
doc = nlp(text)
assert [ent.text for ent in doc.ents] == ["Airbus", "France", "Toulouse", "Occitanie"]
```
#### File: exercises/ja/test_01_03_02.py
```python
def test():
assert doc.text == "私はツリーカンガルーとイルカが好きです。", "テキストをちゃんと処理しましたか?"
assert tree_kangaroos == doc[2:4], "ツリーカンガルーのスパンを選択しましたか?"
assert (
tree_kangaroos_and_dolphins == doc[2:6]
), "ツリーカンガルーとイルカのスパンを選択しましたか?"
__msg__.good("よくできました!")
```
#### File: exercises/ja/test_01_04.py
```python
def test():
assert "if token.like_num" in __solution__, "トークンのlike_num属性を調べましたか?"
assert 'next_token.text == "%"' in __solution__, "次のトークンが%記号であるかどうかを調べましたか?"
assert next_token.text == "%", "次のトークンが%記号であるかどうかを調べましたか?"
__msg__.good("よくできました!今見たように、トークンとその属性を使うことで非常に強力な解析が可能です。")
```
#### File: exercises/ja/test_02_09.py
```python
def test():
assert 'spacy.load("ja_core_news_md")' in __solution__, "中サイズのモデルをロードしましたか?"
assert "doc[7].vector" in __solution__, "正しいベクトルを取得しましたか?"
__msg__.good("Well done!次章では、単語ベクトルを用いたdoc、スパン、トークン間の類似度の予測を行います。")
```
#### File: exercises/ja/test_02_14.py
```python
def test():
assert (
"from spacy.matcher import PhraseMatcher" in __solution__
), "PhraseMatcherをちゃんとインポートしましたか?"
assert "PhraseMatcher(nlp.vocab)" in __solution__, "PhraseMatcherをきちんと初期化しましたか?"
assert "matcher(doc)" in __solution__, "docに対してMatcherを呼び出しましたか?"
assert len(matches) == 2, "matchesの数がただしくありません。正しくは2です。"
__msg__.good("Well done!このmatcherを使って、カスタムの固有表現を追加してみましょう。")
```
#### File: exercises/pt/test_01_07.py
```python
def test():
assert "spacy.load" in __solution__, "Você está usando spacy.load?"
assert nlp.meta["lang"] == "en", "Você está carregando o modelo correto?"
assert nlp.meta["name"] == "core_web_sm", "Você está carregando o modelo correto?"
assert "nlp(text)" in __solution__, "Você está processando o texto corretamente?"
assert "print(doc.text)" in __solution__, "Você está imprimindo o texto do documento Doc?"
__msg__.good(
"Muito bem! Agora que você já exercitou como carregar modelos, vamos dar"
"uma olhada nas previsões."
)
```
#### File: exercises/pt/test_01_09.py
```python
def test():
assert "in doc.ents" in __solution__, "Você está iterando nas entidades?"
assert iphone_x.text == "iPhone X", "Verifique se iphone_x aponta para o intervalo de tokens correto."
__msg__.good(
"Perfeito! É claro que você não precisa fazer tudo isso manualmente. No "
"próximo exercício, você vai conhecer o comparador baseado em regras da spaCy, "
"que poderá ajudá-lo a encontrar determinadas palavras e frases em um texto."
)
```
#### File: exercises/pt/test_01_11.py
```python
def test():
import spacy.matcher
assert isinstance(
matcher, spacy.matcher.Matcher
), "Você está inicializando o Comparador corretamente?"
assert (
"Matcher(nlp.vocab)" in __solution__
), "Você está inicializando o Comparador corretamente com o vocabulário compartilhado?"
assert (
len(pattern) == 2
), "A expressão deve descrever dois tokens (dois dicionários)."
assert isinstance(pattern[0], dict) and isinstance(
pattern[1], dict
), "Cada item da expressão deve conter um dicionário."
assert (
len(pattern[0]) == 1 and len(pattern[1]) == 1
), "Cada item na expressão deve conter apenas uma chave."
assert any(
pattern[0].get(key) == "iPhone" for key in ["text", "TEXT"]
), "Você está fazendo a comparação com o texto do token?"
assert any(
pattern[1].get(key) == "X" for key in ["text", "TEXT"]
), "Você está fazendo a comparação com o texto do token?"
assert (
'matcher.add("IPHONE_X_PATTERN"' in __solution__
), "Você está adicionando a expressão corretamente?"
assert (
"matches = matcher(doc)" in __solution__
), "Você está chamando o Comparador passando o doc como parâmetro?"
__msg__.good(
"Parabéns! Você identificou uma correspondência com sucesso: dois tokens "
"em doc[1:3] que correspondem a partição 'iPhone X'. "
)
```
#### File: exercises/pt/test_02_02_02.py
```python
def test():
assert (
person_hash == nlp.vocab.strings["PERSON"]
), "Você atribuiu o código hash corretamente?"
assert (
'nlp.vocab.strings["PERSON"]' in __solution__
), "Você atribuiu o código hash corretamente?"
assert person_string == "PERSON", "Você selecionou a string corretamente?"
assert (
"nlp.vocab.strings[person_hash]" in __solution__
), "Você obteve a string a partir do código hash?"
__msg__.good("Good job!")
```
#### File: exercises/pt/test_02_05_02.py
```python
def test():
assert (
"from spacy.tokens import Doc" in __solution__
), "Você fez a importação da classe Doc corretamente?"
assert (
len(spaces) == 5
), "Parece que o número de espaços não coincide com o número de palavras..."
assert all(isinstance(s, bool) for s in spaces), "Os espaços devem ser boleanos."
assert [int(s) for s in spaces] == [0, 1, 1, 0, 0], "Os espaços estão corretos?"
assert doc.text == "Go, get started!", "Tem certeza que você criou o Doc corretamente?"
__msg__.good("Bom!")
```
#### File: exercises/pt/test_02_10_01.py
```python
def test():
assert (
"doc1.similarity(doc2)" or "doc2.similarity(doc1)" in __solution__
), "Você está comparando a similaridade entre os dois documentos?"
assert (
0 <= float(similarity) <= 1
), "O valor da similaridade deve ser um número de ponto flutuante. Você fez este cálculo corretamente?"
__msg__.good("Muito bem!")
```
#### File: exercises/pt/test_03_10_01.py
```python
def test():
assert Doc.has_extension("has_number"), "Você registrou a extensão no doc?"
ext = Doc.get_extension("has_number")
assert ext[2] is not None, "Você definiu o getter corretamente?"
assert (
"getter=get_has_number" in __solution__
), "Você atribuiu a função get_has_number como a função getter?"
assert "doc._.has_number" in __solution__, "Você está acessando o atributo personalizado?"
assert doc._.has_number, "Parece que a função getter está retornando o valor errado."
__msg__.good("Bom trabalho!")
```
#### File: exercises/pt/test_03_14_03.py
```python
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "Você está usando nlp.pipe envolvido em uma lista (list)?"
__msg__.good(
"Bom trabalho! Vamos seguir agora com um exemplo prático que "
"usa nlp.pipe para processar documentos com metadados adicionais."
)
```
#### File: exercises/pt/test_04_04.py
```python
def test():
assert (
"for doc in nlp.pipe(TEXTS)" in __solution__
), "Você está chamando nlp.pipe passando TEXTS como parâmetros?"
assert (
"TRAINING_DATA.append" in __solution__
), "Você está adicionando novos dados aos dados de treinamento (TRAINING_DATA)?"
assert (
len(TRAINING_DATA) == 6
), "Parece que os dados de treinamento não estão corretos. Deveria haver 6 exemplos."
for entry in TRAINING_DATA:
assert (
len(entry) == 2
and isinstance(entry[0], str)
and isinstance(entry[1], dict)
and "entities" in entry[1]
), "Parece que os exemplos estão em um formato errado. Deve ser uma tupla com o texto e um dicionário com a chave 'entities'."
assert TRAINING_DATA[0][1]["entities"] == [
(20, 28, "GADGET")
], "Verifique as entidades do exemplo 1."
assert TRAINING_DATA[1][1]["entities"] == [
(0, 8, "GADGET")
], "Verifique as entidades do exemplo 2."
assert TRAINING_DATA[2][1]["entities"] == [
(28, 36, "GADGET")
], "Verifique as entidades do exemplo 3."
assert TRAINING_DATA[3][1]["entities"] == [
(4, 12, "GADGET")
], "Verifique as entidades do exemplo 4."
assert TRAINING_DATA[4][1]["entities"] == [
(0, 9, "GADGET"),
(13, 21, "GADGET"),
], "Verifique as entidades do exemplo 5."
assert (
TRAINING_DATA[5][1]["entities"] == []
), "Verifique as entidades do exemplo 6."
__msg__.good(
"Muito bem! Antes de treinar um modelo com alguns dados, você deve sempre "
"confirmar se seu comparador não identificou nenhum falso positivo. "
"Mas ainda assim, este processo é bem mais rápido do que fazer *tudo* "
"manualmente."
)
```
#### File: exercises/pt/test_04_11_02.py
```python
def test():
assert (
len(TRAINING_DATA) == 3
), "Parece que há algo errado com os dados de treinamento. O esperado são 3 exemplos."
assert all(
len(entry) == 2 and isinstance(entry[1], dict) for entry in TRAINING_DATA
), "Dados do treinamento com formato errado. Esperado uma lista de tuplas com um dicionário como segundo elemento."
ents = [entry[1].get("entities", []) for entry in TRAINING_DATA]
assert all(len(e) == 2 for e in ents), "Todos os exemplos devem conter duas entidades."
assert any(
e == (0, 9, "PERSON") for e in ents[1]
), "Você rotulou a entidade PERSON corretamente?"
assert any(
e == (15, 29, "PERSON") for e in ents[2]
), "Você rotulou a entidade PERSON corretamente?"
__msg__.good(
"Bom trabalho! Depois de incluir ambos os exemplos na nova entidade "
"WEBSITE, bem como exemplos para a entidade existemte PERSON, o "
"modelo agora está com uma performance bem melhor."
)
```
#### File: exercises/zh/exc_03_06.py
```python
import spacy
# 定义定制化组件
def length_component(doc):
# 获取doc的长度
doc_length = ____
print(f"This document is {doc_length} tokens long.")
# 返回这个doc
____
# 读取小规模的中文模型
nlp = spacy.load("zh_core_web_sm")
# 将组件加入到流程的最前面,打印流程组件名
____.____(____)
print(nlp.pipe_names)
# 处理一段文本
doc = ____
```
#### File: exercises/zh/test_01_02_04.py
```python
def test():
import spacy.tokens
import spacy.lang.es
assert isinstance(
nlp, spacy.lang.zh.Chinese
), "nlp应该是中文类的一个实例。"
assert isinstance(
doc, spacy.tokens.Doc
), "你用nlp实例处理过文本并且创建了一个doc吗?"
assert "print(doc.text)" in __solution__, "你打印doc.text了吗?"
__msg__.good("Perfecto! 我们现在继续试试documents,spans和tokens.")
```
#### File: exercises/zh/test_02_06.py
```python
def test():
assert (
"import Doc, Span" or "import Span, Doc" in __solution__
), "你有正确导入Doc和Span吗?"
assert doc.text == "我喜欢周杰伦", "你有正确创建Doc吗?"
assert span.text == "周杰伦", "有正确创建span吗?"
assert span.label_ == "PERSON", "你有把标签PERSON加到span中吗?"
assert "doc.ents =" in __solution__, "你有覆盖doc.ents吗?"
assert len(doc.ents) == 1, "你有把span加入到doc.ents吗?"
assert (
list(doc.ents)[0].text == "周杰伦"
), "你有把span加入到doc.ents吗?"
__msg__.good(
"完美!之后我们学习编码信息提取流程的时候,我们就会发现"
"手动创建spaCy的实例并改变其中的实体会非常方便有用。"
)
```
#### File: exercises/zh/test_03_10_02.py
```python
def test():
assert Span.has_extension("to_html"), "你有在span上注册这个扩展吗?"
ext = Span.get_extension("to_html")
assert ext[1] is not None, "你有正确设置这个方法吗?"
assert "method=to_html" in __solution__, "你有把to_html设置成为方法吗?"
assert (
'span._.to_html("strong")' in __solution__
), "你有读取到定制化属性了吗?"
assert (
span._.to_html("strong") == "<strong>大家好</strong>"
), "貌似这个方法返回的值是错误的。"
__msg__.good(
"完美!下一个练习中我们要结合使用定制化属性与定制化的模型组件。"
)
```
#### File: exercises/zh/test_03_14_02.py
```python
def test():
assert (
"docs = list(nlp.pipe(TEXTS))" in __solution__
), "你有用list将nlp.pipe的结果变为列表吗?"
__msg__.good("美美哒!")
```
#### File: exercises/zh/test_03_14_03.py
```python
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "你有用list将nlp.pipe的结果变为列表吗?"
__msg__.good(
"干得漂亮!接下来我们看一个实际例子,用nlp.pipe来处理文档生成更多的元数据。"
)
```
|
{
"source": "jettej/Python",
"score": 4
}
|
#### File: jettej/Python/ceasar_cipher.py
```python
alpha = range(ord("A"), ord("Z")+1)
# Ceasar Cipher
for x in alpha:
print(chr(x), end = "; ")
print("")
for x in alpha[5:]:
print(chr(x), end = "; ")
print("")
def create_ceaser_wheel(displacement = 0):
global alpha
wheel = {" ": " "}
index = 0
for x in alpha[displacement:]:
wheel[chr(alpha[index])] = chr(x)
index += 1
for x in alpha[0:displacement]:
wheel[chr(alpha[index])] = chr(x)
index += 1
return wheel
def encrypt(text, displacement):
wheel = create_ceaser_wheel(displacement)
for l in text:
print(wheel[l.upper()], end = " " )
print("")
print(create_ceaser_wheel(10))
text = input("Type some text to be encrypted: ")
for x in range(0,26):
encrypt(text, x)
```
#### File: Python/challenge1/challenge.py
```python
import socket
import select
import sys
import pickle
import codecs
import time
from _thread import *
def send_data(sock,data,delimitter=b"!##2?3##!!!##!!!"):
data=pickle.dumps(data)
send_da=codecs.encode(data,"base64").strip()+delimitter
sock.sendall(send_da)
return 0
def rcv_data(sock,delimitter=b"!##2?3##!!!##!!!"):
data=b""
while not data.endswith(delimitter):
data+=sock.recv(2048)
data=codecs.decode(data[:-len(delimitter)],"base64")
data=pickle.loads(data)
return data
###############################################
class client():
def __init__(self,ip_address="localhost", port=10000):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ip_address = ip_address
self.port = port
self.server.connect((self.ip_address, self.port))
print(rcv_data(self.server))
def login(self,username,password):
send_data(self.server,"LOGIN:-:"+username+":#:"+password)
print(rcv_data(self.server))
def getusers(self):
send_data(self.server,"GETUSERS:-:xxxx")
print(rcv_data(self.server))
def score(self):
send_data(self.server,"SCORE:-:xxxx")
print(rcv_data(self.server))
def question(self,num):
send_data(self.server,"QUESTION:-:"+str(num))
print(rcv_data(self.server))
def answer(self,num,ans):
send_data(self.server,"ANSWER:-:"+str(num)+":#:"+str(ans))
print(rcv_data(self.server))
def data(self,num):
send_data(self.server,"DATA:-:"+str(num))
rcv=rcv_data(self.server)
# print(str(rcv_data(self.server)))
return rcv
def close(self):
self.server.close()
```
|
{
"source": "jettify/aiocluster",
"score": 2
}
|
#### File: aiocluster/aioc/gossiper.py
```python
import time
from .state import (
make_compaund,
Alive, Suspect,
add_msg_size, NodeMeta,
EventType,
NodeStatus)
from .dissemination_queue import DisseminationQueue
__all__ = ('Gossiper',)
MAX_UDP_PACKET_SIZE = 508
class Gossiper:
def __init__(self, mlist, listener, lclock):
self._mlist = mlist
retransmit_mult = self._mlist.config.retransmit_mult
self._queue = DisseminationQueue(self._mlist, retransmit_mult)
self._listener = listener
self._suspicions = {}
self._lclock = lclock
@property
def queue(self):
return self._queue
async def gossip(self, udp_server):
for node_meta in self._mlist.select_gossip_nodes():
raw_payloads = self.queue.get_update_up_to(MAX_UDP_PACKET_SIZE)
if not raw_payloads:
return
raw = add_msg_size(make_compaund(*raw_payloads))
host, port = node_meta.node
addr = (host, int(port))
udp_server.send_raw_message(addr, raw)
def alive(self, message, waiter=None):
a = message
node = a.node
node_meta = self._mlist.node_meta(a.node)
if node_meta is None:
new_node_meta = NodeMeta(
node,
a.incarnation,
a.meta,
NodeStatus.ALIVE,
time.time(),
False)
self._mlist.update_node(new_node_meta)
self._listener.notify(EventType.JOIN, node)
else:
# TODO: check corner case here
if a.incarnation <= node_meta.incarnation:
return
new_node_meta = node_meta._replace(
incarnation=a.incarnation,
meta=a.meta, status=NodeStatus.ALIVE,
state_change=time.time())
self._mlist.update_node(new_node_meta)
self._suspicions.pop(a.node, None)
self.queue.put(message, waiter=waiter)
self._listener.notify(EventType.UPDATE, new_node_meta)
def dead(self, message, waiter=None):
node = message.node
node_meta = self._mlist.node_meta(node)
if node_meta is None:
set_waiter(waiter)
return
if message.incarnation <= node_meta.incarnation:
set_waiter(waiter)
return
is_local = message.node == self._mlist.local_node
if node_meta.status == NodeStatus.DEAD and not is_local:
set_waiter(waiter)
return
node_meta = node_meta._replace(
status=NodeStatus.DEAD,
incarnation=message.incarnation,
state_change=time.time())
self._mlist.update_node(node_meta)
self.queue.put(message, waiter=waiter)
self._listener.notify(EventType.LEAVE, node_meta.node)
def suspect(self, message: Suspect, waiter=None):
s = message
node = self._mlist.node_meta(s.node)
if node is None:
set_waiter(waiter)
return
if s.incarnation < node.incarnation:
set_waiter(waiter)
return
if s.node in self._suspicions:
suspicion = self._suspicions[s.node]
suspicion.confirm(s.sender)
self.queue.put(message, waiter=waiter)
return
if s.node == self._mlist.local_node:
self.refute(s)
return
suspicion = Suspicion(message.sender, )
self._mlist.update_node(node)
self.queue.put(message, waiter=waiter)
self._listener.notify(EventType.UPDATE, node)
def merge(self, message):
for n in message.nodes:
if n.status == NodeStatus.ALIVE:
a = Alive(message.sender, n.node, n.incarnation, n.meta)
self.alive(a)
elif n.status in (NodeStatus.DEAD, NodeStatus.SUSPECT):
# TODO: fix incorrect from_node address
s = Suspect(message.sender, n.node, n.incarnation)
self.suspect(s)
def refute(self, msg):
incarnation = self._lclock.next_incarnation()
if msg.incarnation >= incarnation:
incarnation = self._lclock.skip_incarnation(msg.incarnation)
node_meta = self._mlist.local_node_meta
a = Alive(node_meta.node, node_meta.node, incarnation, node_meta.meta)
self.queue.put(a, waiter=None)
def set_waiter(fut):
if (fut is not None) and (not fut.cancelled()):
fut.set_result(True)
```
#### File: aiocluster/tests/test_serialization.py
```python
from aioc.state import (encode_message, decode_message, encode_messages,
decode_messages)
from aioc.state import (Ping, Suspect, Node,
IndirectPingReq, AckResp, NackResp, Alive,
Dead, PushPull, NodeMeta
)
def test_basic():
messages = [
Ping(Node("host", 9001), 1, Node("host", 9001)),
IndirectPingReq(Node("host", 9001), 1, Node("host", 9001), True),
AckResp(Node("host", 9001), 1, "data"),
NackResp(Node("host", 9001), 1),
Suspect(Node("host", 9001), Node("host", 9001), 1),
Alive(Node("host", 9001), Node("host", 9001), 1, "data"),
Dead(Node("host", 9001), 1, Node("host", 9001), Node("host", 9001)),
PushPull(Node("host", 9001),
[NodeMeta(Node("host", 9001), 1, "data", "ALIVE", 0)],
True)
]
for msg in messages:
raw_msg = encode_message(msg)
decoded_msg = decode_message(raw_msg)
assert decoded_msg == msg
def test_compaund():
ping = Ping(Node("host", 9001), 1, Node("host", 9001))
ack = AckResp(Node("host", 9001), 1, "data")
raw_msg = encode_messages(ping, ack)
ms = decode_messages(raw_msg)
assert [ping, ack] == ms
```
|
{
"source": "jettify/aiogearman",
"score": 2
}
|
#### File: aiogearman/tests/test_client.py
```python
from ._testutil import GearmanTest, run_until_complete
from aiogearman import create_client
class ClientTest(GearmanTest):
@run_until_complete
def test_client_ctor(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit(b'rev', b'submit', unique_id=1)
yield from self.worker.do_job()
result = yield from job.wait_result()
self.assertEqual(result, job.result)
self.assertEqual(result, b'timbus')
self.assertTrue('Gearman' in client.__repr__())
@run_until_complete
def test_job_handle_attributes(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit(b'rev', b'submit', unique_id=1)
self.assertEqual(job.function, b'rev')
self.assertEqual(job.data, b'submit')
self.assertEqual(job.unique_id, 1)
self.assertTrue(job.job_id, 3)
with self.assertRaises(RuntimeError):
print(job.result)
self.assertTrue(str(job.job_id) in job.__repr__())
yield from self.worker.do_job()
result = yield from job.wait_result()
self.assertEqual(result, job.result)
self.assertEqual(result, b'timbus')
self.assertTrue('Gearman' in client.__repr__())
@run_until_complete
def test_client_submit_high(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit_high(b'rev', b'submit_high',
unique_id=4)
yield from self.worker.do_job()
result = yield from job.wait_result()
self.assertEqual(result, b'hgih_timbus')
@run_until_complete
def test_client_submit_low(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit_low(b'rev', b'submit_low',
unique_id=5)
yield from self.worker.do_job()
result = yield from job.wait_result()
self.assertEqual(result, b'wol_timbus')
@run_until_complete
def test_client_submit_bg(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit_bg(b'rev', b'submit_bg',
unique_id=6)
yield from self.worker.do_job()
self.assertTrue(job)
@run_until_complete
def test_client_submit_high_bg(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit_high_bg(b'rev', b'submit_high_bg',
unique_id=7)
yield from self.worker.do_job()
self.assertTrue(job)
@run_until_complete
def test_client_submit_low_bg(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit_low_bg(b'rev', b'submit_low',
unique_id=8)
yield from self.worker.do_job()
self.assertTrue(job)
@run_until_complete
def test_send_work_data_couple_of_times(self):
client = yield from create_client(loop=self.loop)
job = yield from client.submit(b'foobar', b'submit_low', unique_id=9)
yield from self.worker.do_job()
result = yield from job.wait_result()
self.assertEqual(result, [b'foo', b'baz', b'bar'])
self.assertTrue(job.result, [b'foo', b'baz', b'bar'])
```
#### File: aiogearman/tests/test_connection.py
```python
import asyncio
from unittest import mock
from ._testutil import BaseTest, run_until_complete
from aiogearman.consts import ECHO_REQ, ECHO_RES, REQ
from aiogearman.connection import create_connection, GearmanConnection
from aiogearman.utils import encode_command
class ConnectionTest(BaseTest):
@run_until_complete
def test_connect(self):
conn = yield from create_connection(loop=self.loop)
self.assertTrue(conn._loop, self.loop)
packet_type, data = yield from conn.execute(ECHO_REQ, b'foo')
self.assertEqual(packet_type, ECHO_RES)
self.assertEqual(data, b'foo')
conn.close()
@run_until_complete
def test_connection_ctor_global_loop(self):
asyncio.set_event_loop(self.loop)
conn = yield from create_connection()
self.assertEqual(conn.loop, self.loop)
self.assertEqual(conn.host, 'localhost')
self.assertEqual(conn.port, 4730)
self.assertEqual(conn.closed, False)
self.assertTrue('Gearman' in conn.__repr__())
conn.close()
conn.close()
self.assertEqual(conn.closed, True)
@run_until_complete
def test_connect_close(self):
conn = yield from create_connection(loop=self.loop)
self.assertTrue(conn._loop, self.loop)
resp_fut1 = conn.execute(ECHO_REQ, b'foo')
resp_fut2 = conn.execute(ECHO_REQ, b'foo')
conn.close()
self.assertTrue(resp_fut1.cancelled)
self.assertTrue(resp_fut2.cancelled)
@run_until_complete
def test_execute_cancel_future(self):
conn = yield from create_connection(loop=self.loop)
self.assertTrue(conn._loop, self.loop)
resp_fut1 = conn.execute(ECHO_REQ, b'foo')
resp_fut1.cancel()
packet_type, data = yield from conn.execute(ECHO_REQ, b'foo')
conn.close()
self.assertEqual(packet_type, ECHO_RES)
self.assertEqual(data, b'foo')
def test_encode_command(self):
res = encode_command(REQ, ECHO_RES, 'foo', 3.14)
expected = b'\x00REQ\x00\x00\x00\x11\x00\x00\x00\x08foo\x003.14'
self.assertEqual(res, expected)
res = encode_command(REQ, ECHO_RES, b'foo', bytearray(b'Q'))
expected = b'\x00REQ\x00\x00\x00\x11\x00\x00\x00\x05foo\x00Q'
self.assertEqual(res, expected)
with self.assertRaises(TypeError):
encode_command(REQ, ECHO_RES, object())
@run_until_complete
def test_osserror(self):
conn = GearmanConnection(loop=self.loop)
@asyncio.coroutine
def invoke_osserror(*a, **kw):
yield from asyncio.sleep(0.1, loop=self.loop)
raise OSError
# setup reader
reader = mock.MagicMock()
reader.readexactly.return_value = invoke_osserror()
reader.at_eof.return_value = False
writer = mock.MagicMock()
conn._reader = reader
conn._writer = writer
conn._read_task = asyncio.async(conn._read_data(), loop=self.loop)
with self.assertRaises(ConnectionError):
yield from conn.execute(ECHO_RES, 'foo')
```
#### File: aiogearman/tests/_testutil.py
```python
import asyncio
import unittest
from functools import wraps
from aiogearman import create_worker
from aiogearman.worker import Job
def run_until_complete(fun):
if not asyncio.iscoroutinefunction(fun):
fun = asyncio.coroutine(fun)
@wraps(fun)
def wrapper(test, *args, **kw):
loop = test.loop
ret = loop.run_until_complete(fun(test, *args, **kw))
return ret
return wrapper
class BaseTest(unittest.TestCase):
"""Base test case for unittests.
"""
def setUp(self):
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
def tearDown(self):
self.loop.close()
del self.loop
class GearmanTest(BaseTest):
def setUp(self):
super().setUp()
self.worker = self.loop.run_until_complete(self._create_worker())
def tearDown(self):
self.worker.close()
super().tearDown()
@asyncio.coroutine
def _create_worker(self):
worker = yield from create_worker(loop=self.loop)
class RevJob(Job):
@asyncio.coroutine
def function(self, data):
return data[::-1]
class FooJob(Job):
@asyncio.coroutine
def function(self, data):
yield from self.send_work_data('foo')
yield from self.send_work_data('baz')
return 'bar'
yield from worker.register_function(b'rev', RevJob)
yield from worker.register_function(b'foobar', FooJob)
return worker
```
|
{
"source": "jettify/aionsq",
"score": 2
}
|
#### File: aionsq/aionsq/consumer.py
```python
import asyncio
import random
from collections import deque
import time
from aionsq.http import NsqLookupd
from aionsq.nsq import create_nsq
from aionsq.utils import RdyControl
class NsqConsumer:
"""Experiment purposes"""
def __init__(self, nsqd_tcp_addresses=None, lookupd_http_addresses=None,
max_in_flight=42, loop=None):
self._nsqd_tcp_addresses = nsqd_tcp_addresses or []
self._lookupd_http_addresses = lookupd_http_addresses or []
self._max_in_flight = max_in_flight
self._loop = loop or asyncio.get_event_loop()
self._queue = asyncio.Queue(loop=self._loop)
self._connections = {}
self._idle_timeout = 10
self._rdy_control = None
self._max_in_flight = max_in_flight
self._is_subscribe = False
self._redistribute_timeout = 5 # sec
self._lookupd_poll_time = 30 # sec
@asyncio.coroutine
def connect(self):
if self._lookupd_http_addresses:
self._lookupd_task = asyncio.Task(self._lookupd(), loop=self._loop)
if self._nsqd_tcp_addresses:
for host, port in self._nsqd_tcp_addresses:
conn = yield from create_nsq(host, port, queue=self._queue,
loop=self._loop)
self._connections[conn.id] = conn
self._rdy_control = RdyControl(idle_timeout=self._idle_timeout,
max_in_flight=self._max_in_flight,
loop=self._loop)
self._rdy_control.add_connections(self._connections)
@asyncio.coroutine
def _poll_lookupd(self, host, port):
conn = NsqLookupd(host, port, loop=self.loop)
res = yield from conn.lookup('foo')
for producer in res['producers']:
host = producer['broadcast_address']
port = producer['tcp_port']
conn = yield from create_nsq(host, port, queue=self._queue,
loop=self._loop)
self._connections[conn.id] = conn
conn.close()
@asyncio.coroutine
def subscribe(self, topic, channel):
self._is_subscribe = True
for conn in self._connections.values():
yield from conn.sub(topic, channel)
self._redistribute_task = asyncio.Task(self._redistribute(),
loop=self._loop)
def wait_messages(self):
if not self._is_subscribe:
raise ValueError('You must subscribe to the topic first')
while self._is_subscribe:
fut = asyncio.async(self._queue.get(), loop=self._loop)
yield fut
def is_starved(self):
conns = self._connections.values()
return any(conn.is_starved() for conn in conns)
@asyncio.coroutine
def _redistribute(self):
while self._is_subscribe:
self._rdy_control.redistribute()
yield from asyncio.sleep(self._redistribute_timeout,
loop=self._loop)
@asyncio.coroutine
def _lookupd(self):
while self._is_subscribe:
yield from asyncio.sleep(self._redistribute_timeout,
loop=self._loop)
host, port = random.choice(self._lookupd_http_addresses)
yield from self._poll_lookupd(host, port)
```
#### File: aionsq/http/base.py
```python
import asyncio
import json
import aiohttp
from .http_exceptions import HTTP_EXCEPTIONS, NsqHttpException
from ..utils import _convert_to_str
class NsqHTTPConnection:
"""XXX"""
def __init__(self, host='127.0.0.1', port=4150, *, loop):
self._loop = loop
self._endpoint = (host, port)
self._connector = aiohttp.TCPConnector(resolve=True, loop=loop)
self._base_url = 'http://{0}:{1}/'.format(*self._endpoint)
self._request = aiohttp.request
@property
def endpoint(self):
return 'http://{0}:{1}'.format(*self._endpoint)
def close(self):
self._connector.close()
@asyncio.coroutine
def perform_request(self, method, url, params, body):
_body = _convert_to_str(body) if body else body
url = self._base_url + url
resp = yield from self._request(method, url, params=params,
data=_body, loop=self._loop,
connector=self._connector)
resp_body = yield from resp.text()
try:
response = json.loads(resp_body)
except ValueError:
return resp_body
if not (200 <= resp.status <= 300):
extra = None
try:
extra = json.loads(resp_body)
except ValueError:
pass
exc_class = HTTP_EXCEPTIONS.get(resp.status, NsqHttpException)
raise exc_class(resp.status, resp_body, extra)
return response['data']
def __repr__(self):
cls_name = self.__class__.__name__
return '<{}: {}>'.format(cls_name, self._endpoint)
```
#### File: aionsq/http/nsqd.py
```python
import asyncio
from .base import NsqHTTPConnection
from ..utils import _convert_to_str
class Nsqd(NsqHTTPConnection):
"""
:see: http://nsq.io/components/nsqd.html
"""
@asyncio.coroutine
def ping(self):
"""Monitoring endpoint.
:returns: should return `"OK"`, otherwise raises an exception.
"""
return self.perform_request('GET', 'ping', None, None)
@asyncio.coroutine
def info(self):
"""Returns version information."""
resp = yield from self.perform_request('GET', 'info', None, None)
return resp
@asyncio.coroutine
def stats(self):
"""Returns version information."""
resp = yield from self.perform_request(
'GET', 'stats', {'format': 'json'}, None)
return resp
@asyncio.coroutine
def pub(self, topic, message):
"""Returns version information."""
resp = yield from self.perform_request(
'POST', 'pub', {'topic': topic}, message)
return resp
@asyncio.coroutine
def mpub(self, topic, *messages):
"""Returns version information."""
assert len(messages), "Specify one or mor message"
_msgs = [_convert_to_str(m) for m in messages]
msgs = '\n'.join(_msgs)
resp = yield from self.perform_request(
'POST', 'mpub', {'topic': topic}, msgs)
return resp
@asyncio.coroutine
def create_topic(self, topic):
resp = yield from self.perform_request(
'POST', 'topic/create', {'topic': topic}, None)
return resp
@asyncio.coroutine
def delete_topic(self, topic):
resp = yield from self.perform_request(
'POST', 'topic/delete', {'topic': topic}, None)
return resp
@asyncio.coroutine
def create_channel(self, topic, channel):
resp = yield from self.perform_request(
'POST', 'channel/create', {'topic': topic, 'channel': channel},
None)
return resp
@asyncio.coroutine
def delete_channel(self, topic, channel):
resp = yield from self.perform_request(
'GET', 'channel/delete', {'topic': topic, 'channel': channel},
None)
return resp
@asyncio.coroutine
def empty_topic(self, topic):
resp = yield from self.perform_request(
'GET', 'topic/empty', {'topic': topic}, None)
return resp
@asyncio.coroutine
def topic_pause(self, topic):
resp = yield from self.perform_request(
'GET', 'topic/pause', {'topic': topic}, None)
return resp
@asyncio.coroutine
def topic_unpause(self, topic):
resp = yield from self.perform_request(
'GET', 'topic/unpause', {'topic': topic}, None)
return resp
@asyncio.coroutine
def pause_channel(self, channel, topic):
resp = yield from self.perform_request(
'GET', 'channel/pause', {'topic': topic, 'channel': channel},
None)
return resp
@asyncio.coroutine
def unpause_channel(self, channel, topic):
resp = yield from self.perform_request(
'GET', '/channel/unpause', {'topic': topic, 'channel': channel},
None)
return resp
@asyncio.coroutine
def debug_pprof(self):
resp = yield from self.perform_request(
'GET', 'debug/pprof', None, None)
return resp
@asyncio.coroutine
def debug_pprof_profile(self):
resp = yield from self.perform_request(
'GET', 'debug/pprof/profile', None, None)
return resp
@asyncio.coroutine
def debug_pprof_goroutine(self):
resp = yield from self.perform_request(
'GET', '/debug/pprof/goroutine', None, None)
return resp
@asyncio.coroutine
def debug_pprof_heap(self):
resp = yield from self.perform_request(
'GET', '/debug/pprof/heap', None, None)
return resp
@asyncio.coroutine
def debug_pprof_block(self):
resp = yield from self.perform_request(
'GET', '/debug/pprof/block', None, None)
return resp
@asyncio.coroutine
def debug_pprof_threadcreate(self):
resp = yield from self.perform_request(
'GET', '/debug/pprof/threadcreate', None, None)
return resp
```
#### File: aionsq/aionsq/nsq.py
```python
import asyncio
from . import consts
import time
from .log import logger
from .utils import retry_iterator
from .connection import create_connection
from .consts import TOUCH, REQ, FIN, RDY, CLS, MPUB, PUB, SUB, AUTH
@asyncio.coroutine
def create_nsq(host='127.0.0.1', port=4150, loop=None, queue=None,
heartbeat_interval=30000, feature_negotiation=True,
tls_v1=False, snappy=False, deflate=False, deflate_level=6,
sample_rate=0):
# TODO: add parameters type and value validation
queue = queue or asyncio.Queue(loop=loop)
conn = Nsq(host=host, port=port, queue=queue,
heartbeat_interval=heartbeat_interval,
feature_negotiation=feature_negotiation,
tls_v1=tls_v1, snappy=snappy, deflate=deflate,
deflate_level=deflate_level,
sample_rate=sample_rate, loop=loop)
yield from conn.connect()
return conn
class Nsq:
def __init__(self, host='127.0.0.1', port=4150, loop=None, queue=None,
heartbeat_interval=30000, feature_negotiation=True,
tls_v1=False, snappy=False, deflate=False, deflate_level=6,
sample_rate=0):
# TODO: add parameters type and value validation
self._config = {
"deflate": deflate,
"deflate_level": deflate_level,
"sample_rate": sample_rate,
"snappy": snappy,
"tls_v1": tls_v1,
"heartbeat_interval": heartbeat_interval,
'feature_negotiation': feature_negotiation,
}
self._host = host
self._port = port
self._conn = None
self._loop = loop
self._queue = queue or asyncio.Queue(loop=self._loop)
self._status = consts.INIT
self._reconnect = True
self._rdy_state = 0
self._last_message = None
self._on_rdy_changed_cb = None
self._last_rdy = 0
@asyncio.coroutine
def connect(self):
self._conn = yield from create_connection(self._host, self._port,
self._queue, loop=self._loop)
self._conn._on_message = self._on_message
yield from self._conn.identify(**self._config)
self._status = consts.CONNECTED
def _on_message(self, msg):
# should not be coroutine
# update connections rdy state
self.rdy_state = int(self.rdy_state) - 1
self._last_message = time.time()
if self._on_rdy_changed_cb is not None:
self._on_rdy_changed_cb(self.id)
return msg
@property
def rdy_state(self):
return self._rdy_state
@rdy_state.setter
def rdy_state(self, value):
self._rdy_state = value
@property
def in_flight(self):
return self._conn.in_flight
@property
def last_message(self):
return self._last_message
@asyncio.coroutine
def reconnect(self):
timeout_generator = retry_iterator(init_delay=0.1, max_delay=10.0)
while not (self._status == consts.CONNECTED):
try:
yield from self.connect()
except ConnectionError:
logger.error("Can not connect to: {}:{} ".format(
self._host, self._port))
else:
self._status = consts.CONNECTED
t = next(timeout_generator)
yield from asyncio.sleep(t, loop=self._loop)
@asyncio.coroutine
def execute(self, command, *args, data=None):
if self._state <= consts.CONNECTED and self._reconnect:
yield from self.reconnect()
response = self._conn.execute(command, *args, data=data)
return response
@property
def id(self):
return self._conn.endpoint
def wait_messages(self):
while True:
future = asyncio.async(self._queue.get(), loop=self._loop)
yield future
@asyncio.coroutine
def auth(self, secret):
"""
:param secret:
:return:
"""
return (yield from self._conn.execute(AUTH, data=secret))
@asyncio.coroutine
def sub(self, topic, channel):
"""
:param topic:
:param channel:
:return:
"""
return (yield from self._conn.execute(SUB, topic, channel))
@asyncio.coroutine
def pub(self, topic, message):
"""
:param topic:
:param message:
:return:
"""
return (yield from self._conn.execute(PUB, topic, data=message))
@asyncio.coroutine
def mpub(self, topic, message, *messages):
"""
:param topic:
:param message:
:param messages:
:return:
"""
msgs = [message] + list(messages)
return (yield from self._conn.execute(MPUB, topic, data=msgs))
@asyncio.coroutine
def rdy(self, count):
"""
:param count:
:return:
"""
if not isinstance(count, int):
raise TypeError('count argument must be int')
self._last_rdy = count
self.rdy_state = count
return (yield from self._conn.execute(RDY, count))
@asyncio.coroutine
def fin(self, message_id):
"""
:param message_id:
:return:
"""
return (yield from self._conn.execute(FIN, message_id))
@asyncio.coroutine
def req(self, message_id, timeout):
"""
:param message_id:
:param timeout:
:return:
"""
return (yield from self._conn.execute(REQ, message_id, timeout))
@asyncio.coroutine
def touch(self, message_id):
"""
:param message_id:
:return:
"""
return (yield from self._conn.execute(TOUCH, message_id))
@asyncio.coroutine
def cls(self):
"""
:return:
"""
yield from self._conn.execute(CLS)
self.close()
def close(self):
self._conn.close()
def is_starved(self):
if self._queue.qsize():
starved = False
else:
starved = (self.in_flight > 0 and
self.in_flight >= (self._last_rdy * 0.85))
return starved
def __repr__(self):
return '<Nsq{}>'.format(self._conn.__repr__())
```
#### File: aionsq/aionsq/selectors.py
```python
import abc
import random
class AbstractSelector(metaclass=abc.ABCMeta):
@abc.abstractmethod
def select(self, connections):
pass # pragma: no cover
class RandomSelector(AbstractSelector):
def select(self, connections):
return random.choice(connections)
class RoundRobinSelector(AbstractSelector):
def __init__(self):
self._current = 0
def select(self, connections):
self._current += 1
if self._current >= len(connections):
self._current = 0
return connections[self._current]
```
#### File: aionsq/tests/test_producer.py
```python
from ._testutils import run_until_complete, BaseTest
from aionsq.producer import create_producer
class NsqTCPProducerTest(BaseTest):
@run_until_complete
def test_publish(self):
endpoints = [('127.0.0.1', 4150)]
config = {'tls_v1': False}
nsq_producer = yield from create_producer(endpoints, config,
loop=self.loop)
ok = yield from nsq_producer.publish('baz', 'producer msg')
self.assertEqual(ok, b'OK')
@run_until_complete
def test_mpublish(self):
endpoints = [('127.0.0.1', 4150)]
config = {'tls_v1': False}
nsq_producer = yield from create_producer(endpoints, config,
loop=self.loop)
messages = ['baz:1', b'baz:2', 3.14, 42]
ok = yield from nsq_producer.mpublish('baz', *messages)
self.assertEqual(ok, b'OK')
```
#### File: aionsq/tests/_testutils.py
```python
import asyncio
import unittest
from functools import wraps
def run_until_complete(fun):
if not asyncio.iscoroutinefunction(fun):
fun = asyncio.coroutine(fun)
@wraps(fun)
def wrapper(test, *args, **kw):
loop = test.loop
ret = loop.run_until_complete(fun(test, *args, **kw))
return ret
return wrapper
class BaseTest(unittest.TestCase):
"""Base test case for unittests.
"""
def setUp(self):
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
def tearDown(self):
self.loop.close()
del self.loop
```
|
{
"source": "jettify/dclock",
"score": 3
}
|
#### File: dclock/dclock/vector.py
```python
class Vector:
__slots__ = ('_vector', )
def __init__(self) -> None:
pass
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, str(self._vector))
```
#### File: dclock/tests/test_lamport.py
```python
import pytest
from dclock import Lamport
def test_basic():
lclock = Lamport()
assert lclock.time() == 0
lclock.increment()
assert lclock.time() == 1
other = Lamport(10)
lclock.witness(other)
assert other.time() == 10
assert lclock.time() == 11
other2 = Lamport(5)
lclock.witness(other2)
assert lclock.time() == 12
assert other2.time() == 5
assert lclock.__repr__() == "Lamport('12')"
def test_errors():
with pytest.raises(ValueError):
Lamport(-1)
```
|
{
"source": "jettify/isoforest",
"score": 2
}
|
#### File: isoforest/py-isoforest/setup.py
```python
import os
import re
from setuptools import find_packages, setup
from setuptools_rust import Binding, RustExtension
install_requires = []
def _read(f):
with open(os.path.join(os.path.dirname(__file__), f)) as f_:
return f_.read().strip()
def _read_version():
regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
init_py = os.path.join(
os.path.dirname(__file__), 'isoforest', '__init__.py'
)
with open(init_py) as f:
for line in f:
match = regexp.match(line)
if match is not None:
return match.group(1)
raise RuntimeError('Cannot find version in isoforest/__init__.py')
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
keywords = ['isolation forest', 'linfa']
project_urls = {
'Website': 'https://github.com/jettify/isoforest',
'Documentation': 'https://isoforest.readthedocs.io',
'Issues': 'https://github.com/jettify/isoforest/issues',
}
setup(
name='isoforest',
version=_read_version(),
description=('isoforest'),
long_description='\n\n'.join((_read('README.rst'), _read('CHANGES.rst'))),
long_description_content_type='text/x-rst',
classifiers=classifiers,
platforms=['POSIX'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jettify/isoforest',
download_url='https://pypi.org/project/isoforest/',
license='Apache 2',
packages=['isoforest'],
install_requires=install_requires,
keywords=keywords,
include_package_data=True,
project_urls=project_urls,
python_requires='>=3.6.0',
rust_extensions=[
RustExtension("isoforest._isoforest", binding=Binding.PyO3)
],
# rust extensions are not zip safe, just like C-extensions.
zip_safe=False,
)
```
|
{
"source": "jettify/mlserve",
"score": 3
}
|
#### File: multiple/toxic_lr/toxic_lr.py
```python
import json
import pandas as pd
import numpy as np
import cloudpickle
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from mlserve import build_schema
def read_data(dataset_path):
class_names = ['toxic', 'severe_toxic', 'obscene',
'insult', 'identity_hate']
train = pd.read_csv(dataset_path).fillna(' ')
train_text = train[['comment_text']]
train_targets = train[class_names]
return train_text, train_targets
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, df):
return df[self.key]
dataset_path = 'data/train.csv'
train, targets = read_data(dataset_path)
original = pd.concat([train, targets], axis=1)
seed = 1234
word_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
stop_words='english',
ngram_range=(1, 1),
max_features=10000,
)
logistic = LogisticRegression(C=0.1, solver='sag', random_state=seed)
classifier = MultiOutputClassifier(logistic)
pipeline = Pipeline(steps=[
('selector', ColumnSelector(key='comment_text')),
('word_tfidf', word_vectorizer),
('logistic', classifier)
])
pipeline.fit(train, targets)
scores = cross_val_score(
pipeline,
train,
targets,
cv=5,
scoring='roc_auc')
score = np.mean(scores)
print(score)
print('Writing model')
with open('toxic_lr.pkl', 'wb') as f:
cloudpickle.dump(pipeline, f)
print('Writing dataset schema')
schema = build_schema(original)
with open('toxic_lr.json', 'w') as f:
json.dump(schema, f, indent=4, sort_keys=True)
```
#### File: mlserve/mlserve/utils.py
```python
import json
import os
import trafaret as t
import yaml
from dataclasses import dataclass, asdict
from pathlib import Path
from typing import Any, List, Dict
ModelMeta = t.Dict(
{
t.Key('name'): t.String,
t.Key('description'): t.String,
t.Key('model_path'): t.String,
t.Key('data_schema_path'): t.String,
t.Key('target'): t.String | t.List(t.String),
t.Key('loader', default='pickle'): t.Enum('pickle', 'joblib'),
}
)
# TODO: rename to something more general
ModelConfig = t.Dict({
t.Key('host', default='127.0.0.1'): t.String,
t.Key('port', default=9000): t.Int[0: 65535],
t.Key('workers', default=2): t.Int[1:127],
t.Key('models'): t.List(ModelMeta),
})
ServerConfigTrafaret = t.Dict({
t.Key('host', default='127.0.0.1'): t.String,
t.Key('port', default=9000): t.Int[0: 65535],
t.Key('workers', default=2): t.Int[1:127],
}).ignore_extra('*')
@dataclass(frozen=True)
class ServerConfig:
host: str
port: int
workers: int
@dataclass(frozen=True)
class ModelDescriptor:
name: str
description: str
target: List[str]
features: List[str]
schema: Dict[Any, Any]
model_path: Path
model_size: int
data_schema_path: Path
schema_size: int
loader: str
def asdict(self) -> Dict[str, Any]:
return asdict(self)
def load_model_config(fname: Path) -> Dict[str, Any]:
with open(fname, 'rt') as f:
raw_data = yaml.safe_load(f)
data: Dict[str, Any] = ModelConfig(raw_data)
return data
def load_models(model_conf: List[Dict[str, str]]) -> List[ModelDescriptor]:
result: List[ModelDescriptor] = []
for m in model_conf:
with open(m['data_schema_path'], 'rb') as f:
schema = json.load(f)
_target = m['target']
target: List[str] = _target if isinstance(_target, list) else [_target]
schema = drop_columns(schema, target)
schema_size = os.path.getsize(m['data_schema_path'])
model_size = os.path.getsize(m['model_path'])
features = list(schema['schema']['properties'].keys())
model_desc = ModelDescriptor(
name=m['name'],
description=m['description'],
target=target,
features=features,
schema=schema,
model_path=Path(m['model_path']),
model_size=model_size,
data_schema_path=Path(m['data_schema_path']),
schema_size=schema_size,
loader=m['loader'],
)
result.append(model_desc)
return result
def drop_columns(schema: Dict[str, Any], columns: List[str]) -> Dict[str, Any]:
for col in columns:
schema['schema']['properties'].pop(col, None)
schema['ui_schema'].pop(col, None)
schema['example_data'].pop(col, None)
if col in schema['schema']['required']:
schema['schema']['required'].remove(col)
return schema
```
|
{
"source": "jettify/mung",
"score": 3
}
|
#### File: mung/examples/catb2.py
```python
import pandas as pd
import numpy as np
from itertools import combinations
from catboost import CatBoostClassifier
train_df = pd.read_csv('input/train.csv')
test_df = pd.read_csv('input/test.csv')
print('read complete')
labels = train_df.target
test_id = test_df.ID
selected_features = [
'v10', 'v12', 'v14', 'v21', 'v22', 'v24', 'v30', 'v31', 'v34', 'v38', 'v40', 'v47', 'v50',
'v52', 'v56', 'v62', 'v66', 'v72', 'v75', 'v79', 'v91', 'v112', 'v113', 'v114', 'v129'
]
# drop some of the features that were not selected
train_df = train_df[selected_features]
test_df = test_df[selected_features]
import ipdb
ipdb.set_trace()
train_df.fillna(-9999, inplace=True)
test_df.fillna(-9999, inplace=True)
# update the list of categorical features
cat_features_ids = np.where(train_df.apply(pd.Series.nunique) < 30000)[0].tolist()
char_features = list(train_df.columns[train_df.dtypes == np.object])
char_features_without_v22 = list(train_df.columns[(train_df.dtypes == np.object) & (train_df.columns != 'v22')])
cmbs = list(combinations(char_features, 2)) + list(map(lambda x: ("v22",) + x, combinations(char_features_without_v22, 2)))
def concat_columns(df, columns):
value = df[columns[0]].astype(str) + ' '
for col in columns[1:]:
value += df[col].astype(str) + ' '
return value
# add new features based on combinations/interactions
for cols in cmbs:
train_df["".join(cols)] = concat_columns(train_df, cols)
test_df["".join(cols)] = concat_columns(test_df, cols)
# add new engineered features to the list of categorical features in dataframe
cat_features_ids += range(len(selected_features), train_df.shape[1])
clf = CatBoostClassifier(
learning_rate=0.1, iterations=1000, random_seed=0,
# logging_level='Silent'
)
print('start training')
clf.fit(train_df, labels, cat_features=cat_features_ids)
prediction = clf.predict_proba(test_df)[:,1]
pd.DataFrame(
{'ID':test_id, 'PredictedProb':prediction}
).to_csv(
'submission_improved.csv', index=False
)
```
#### File: mung/mung/utils.py
```python
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_boston as sk_load_boston
from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.utils import shuffle
def make_advesarial_dataset(X_train, X_train_new, seed=None):
# add warning
# assert X_train.shape == X_train_new.shape
y_train_new = np.zeros(X_train_new.shape[0])
y_train = np.ones(X_train.shape[0])
X = np.concatenate((X_train, X_train_new), axis=0)
y = np.concatenate((y_train, y_train_new), axis=0)
X, y = shuffle(X, y, random_state=seed)
return X, y
def advesarial_validator(
X_train, X_train_new, categorical_features=None, seed=None):
X, y = make_advesarial_dataset(X_train, X_train_new, seed=seed)
params = {
'n_estimators': [100, 200, 300],
'random_state': [seed],
'learning_rate': [0.1, 0.01],
'categorical_feature': [categorical_features],
}
clf = lgb.LGBMClassifier(objective='binary', metric='auc')
grid = GridSearchCV(clf, params, scoring='roc_auc', cv=3)
grid_result = grid.fit(X, y)
return grid_result.best_score_
def load_boston(seed=None):
boston = sk_load_boston()
X, y = shuffle(boston.data, boston.target, random_state=seed)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.6)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
return X_train, y_train, X_test, y_test
def load_adult(seed=None):
dtypes = [
('Age', 'float32'),
('Workclass', 'category'),
('fnlwgt', 'float32'),
('Education', 'category'),
('Education-Num', 'float32'),
('Marital Status', 'category'),
('Occupation', 'category'),
('Relationship', 'category'),
('Race', 'category'),
('Sex', 'category'),
('Capital Gain', 'float32'),
('Capital Loss', 'float32'),
('Hours per week', 'float32'),
('Country', 'category'),
('Target', 'category'),
]
raw_data = pd.read_csv(
'tests/data/adult.data',
names=[d[0] for d in dtypes],
na_values=['?', ' ?', ' ?'],
dtype=dict(dtypes),
skipinitialspace=True,
)
# redundant with Education-Num
exclude = ['Education', 'fnlwgt', 'Target']
X = raw_data.drop(exclude, axis=1)
y = (raw_data['Target'] == '>50K').astype(int)
cats = [d[0] for d in dtypes if d[1] == 'category' and d[0] not in exclude]
nums = [d[0] for d in dtypes if d[1] != 'category' and d[0] not in exclude]
pipeline = make_pipeline(
SimpleImputer(strategy='constant', fill_value='na'),
OrdinalEncoder(),
)
X = X[nums + cats]
transformer = make_column_transformer(
(pipeline, cats),
remainder='passthrough',
sparse_threshold=0,
)
X = transformer.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.6, random_state=seed)
return X_train, X_test, y_train, y_test
```
|
{
"source": "jettify/pytorch-inspect",
"score": 2
}
|
#### File: pytorch-inspect/tests/test_basic.py
```python
import io
import torch
from torch_inspect import inspect, summary
from torch_inspect.inspect import LayerInfo as L, NetworkInfo
def test_inspect(simple_model):
bs = 2 # default batch size
r = inspect(simple_model, (1, 32, 32))
expected = [
L('Conv2d-1', [bs, 1, 32, 32], [bs, 6, 30, 30], 60, 0),
L('Conv2d-2', [bs, 6, 15, 15], [bs, 16, 13, 13], 880, 0),
L('Linear-3', [bs, 576], [bs, 120], 69240, 0),
L('Linear-4', [bs, 120], [bs, 84], 10164, 0),
L('Linear-5', [bs, 84], [bs, 10], 850, 0),
]
assert r == expected
bsize = 10
r = inspect(
simple_model,
(1, 32, 32),
input_dtype=torch.FloatTensor,
input_initializer=torch.zeros,
batch_size=bsize,
)
expected = [
L('Conv2d-1', [bsize, 1, 32, 32], [bsize, 6, 30, 30], 60, 0),
L('Conv2d-2', [bsize, 6, 15, 15], [bsize, 16, 13, 13], 880, 0),
L('Linear-3', [bsize, 576], [bsize, 120], 69240, 0),
L('Linear-4', [bsize, 120], [bsize, 84], 10164, 0),
L('Linear-5', [bsize, 84], [bsize, 10], 850, 0),
]
assert r == expected
def test_inspect_multi_input(multi_input_net):
bs = 10
r = inspect(multi_input_net, [(1, 16, 16), (1, 28, 28)], batch_size=bs)
expected = [
L('Conv2d-1', [bs, 1, 16, 16], [bs, 1, 16, 16], 10, 0),
L('ReLU-2', [bs, 1, 16, 16], [bs, 1, 16, 16], 0, 0),
L('Conv2d-3', [bs, 1, 28, 28], [bs, 1, 28, 28], 10, 0),
L('ReLU-4', [bs, 1, 28, 28], [bs, 1, 28, 28], 0, 0),
]
assert r == expected
expected_summary = """
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [2, 6, 30, 30] 60
Conv2d-2 [2, 16, 13, 13] 880
Linear-3 [2, 120] 69,240
Linear-4 [2, 84] 10,164
Linear-5 [2, 10] 850
================================================================
Total params: 81,194
Trainable params: 81,194
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.01
Forward/backward pass size (MB): 0.13
Params size (MB): 0.31
Estimated Total Size (MB): 0.44
----------------------------------------------------------------
"""
def test_summary(simple_model):
with io.StringIO() as buf:
summary(
simple_model,
[(1, 32, 32)],
input_dtype=torch.FloatTensor,
input_initializer=torch.zeros,
file=buf,
flush=True,
)
r = buf.getvalue()
assert r == expected_summary
def test_inspect_net_with_batch_norm(netbatchnorm):
bs = 10
r = inspect(netbatchnorm, (20,), batch_size=bs)
expected = [
L('Linear-1', [bs, 20], [bs, 15], 300, 0),
L('BatchNorm1d-2', [bs, 15], [bs, 15], 30, 30),
L('Linear-3', [bs, 15], [bs, 15], 225, 0),
L('BatchNorm1d-4', [bs, 15], [bs, 15], 30, 30),
L('Linear-5', [bs, 15], [bs, 1], 16, 0),
]
assert r == expected
with io.StringIO() as buf:
network_info = summary(netbatchnorm, (20,), file=buf, batch_size=bs)
expected_info = NetworkInfo(661, 601, 80 * bs, 488 * bs, 2644, 8324)
assert expected_info == network_info
def test_simpleconv(simpleconv):
bs = 2
r = inspect(simpleconv, [(1, 16, 16), (1, 28, 28)], batch_size=bs)
expected = [
L('Conv2d-1', [bs, 1, 16, 16], [bs, 1, 16, 16], 10, 0),
L('ReLU-2', [bs, 1, 16, 16], [bs, 1, 16, 16], 0, 0),
L('Conv2d-3', [bs, 1, 28, 28], [bs, 1, 28, 28], 10, 0),
L('ReLU-4', [bs, 1, 28, 28], [bs, 1, 28, 28], 0, 0),
]
assert r == expected
def test_autoencoder(autoencoder):
bs = 10
r = inspect(autoencoder, [(3, 32, 32)], batch_size=bs)
expected = [
L('Conv2d-1', [bs, 3, 32, 32], [bs, 6, 28, 28], 456, 0),
L('ReLU-2', [bs, 6, 28, 28], [bs, 6, 28, 28], 0, 0),
L('Conv2d-3', [bs, 6, 28, 28], [bs, 16, 24, 24], 2416, 0),
L('ReLU-4', [bs, 16, 24, 24], [bs, 16, 24, 24], 0, 0),
L('ConvTranspose2d-5', [bs, 16, 24, 24], [bs, 6, 28, 28], 2406, 0),
L('ReLU-6', [bs, 6, 28, 28], [bs, 6, 28, 28], 0, 0),
L('ConvTranspose2d-7', [bs, 6, 28, 28], [bs, 3, 32, 32], 453, 0),
L('ReLU-8', [bs, 3, 32, 32], [bs, 3, 32, 32], 0, 0),
L('Sigmoid-9', [bs, 3, 32, 32], [bs, 3, 32, 32], 0, 0),
]
assert r == expected
def test_rnn(rnn):
bs = 12
r = inspect(rnn, [(6, 3)], batch_size=bs, input_initializer=torch.zeros)
expected = [
L('RNN-1', [bs, 6, 3], [[bs, 6, 5], [3, bs, 5]], 170, 0),
L('Linear-2', [6 * bs, 5], [6 * bs, 1], 6, 0),
]
assert r == expected
def test_multi_input_net2(multi_input_net2):
bs = 10
r = inspect(multi_input_net2, [(3, 128, 1024), (4,)], batch_size=bs)
expected = [
L('Conv2d-1', [10, 3, 128, 1024], [10, 64, 128, 1024], 1792, 0),
L('MaxPool2d-2', [10, 64, 128, 1024], [10, 64, 64, 512], 0, 0),
L('Conv2d-3', [10, 64, 64, 512], [10, 96, 64, 512], 55392, 0),
L('MaxPool2d-4', [10, 96, 64, 512], [10, 96, 32, 256], 0, 0),
L('Conv2d-5', [10, 96, 32, 256], [10, 128, 32, 256], 110720, 0),
L('MaxPool2d-6', [10, 128, 32, 256], [10, 128, 16, 128], 0, 0),
L('Conv2d-7', [10, 128, 16, 128], [10, 192, 16, 128], 221376, 0),
L('AdaptiveAvgPool2d-8', [10, 192, 16, 128], [10, 192, 1, 1], 0, 0),
L('Linear-9', [10, 192], [10, 64], 12352, 0),
L('Linear-10', [10, 64], [10, 4], 260, 0),
]
assert r == expected
expected_info = NetworkInfo(
401892, 401892, 62914560, 1289769280, 1607568, 1354291408
)
with io.StringIO() as buf:
net_info = summary(
multi_input_net2, [(3, 128, 1024), (4,)], batch_size=bs, file=buf
)
assert net_info == expected_info
def test_lstm_model(lstm_model):
bs = 10
r = inspect(
lstm_model, [(1, 28)], batch_size=bs, input_initializer=torch.zeros
)
out = [[10, 1, 100], [[1, 10, 100], [1, 10, 100]]]
expected = [
L('LSTM-1', [10, 1, 28], out, 52000, 0),
L('Linear-2', [10, 100], [10, 10], 1010, 0),
]
assert r == expected
def test_lstm_tagger_with_embedding(lstm_tagger):
bs = 10
r = inspect(
lstm_tagger,
[(1, 1)],
batch_size=bs,
input_initializer=torch.zeros,
input_dtype=torch.LongTensor,
)
expected = [
L('Embedding-1', [bs, 1, 1], [bs, 1, 1, 6], 30, 0),
L('LSTM-2', [bs, 1, 6], [[bs, 1, 6], [[1, 1, 6], [1, 1, 6]]], 336, 0),
L('Linear-3', [bs, 6], [bs, 3], 21, 0),
]
assert r == expected
```
|
{
"source": "jettify/uddsketch-py",
"score": 2
}
|
#### File: jettify/uddsketch-py/setup.py
```python
import os
from setuptools import find_packages, setup
def _read(f):
with open(os.path.join(os.path.dirname(__file__), f)) as f_:
return f_.read().strip()
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
]
keywords = ["uddsketch", "ddsketch"]
project_urls = {
"Website": "https://github.com/jettify/uddsketch",
"Documentation": "https://uddsketch.readthedocs.io",
"Issues": "https://github.com/jettify/uddsketch/issues",
}
setup(
name="uddsketch",
description=("uddsketch"),
long_description="\n\n".join((_read("README.rst"), _read("CHANGES.rst"))),
long_description_content_type="text/x-rst",
classifiers=classifiers,
platforms=["POSIX"],
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/jettify/uddsketch-py",
download_url="https://pypi.org/project/uddsketch/",
license="Apache 2",
packages=find_packages(exclude=("tests",)),
install_requires=[],
setup_requires=[
"setuptools>=45",
"setuptools_scm",
"setuptools_scm_git_archive",
"wheel",
],
keywords=keywords,
zip_safe=True,
include_package_data=True,
project_urls=project_urls,
python_requires=">=3.7.0",
use_scm_version=True,
)
```
#### File: uddsketch-py/uddsketch/__init__.py
```python
import math
from dataclasses import dataclass
from typing import Dict, Optional, Union
from ._version import version as _version
__all__ = ("UDDSketch",)
__version__ = _version
@dataclass
class _Entry:
count: int
next_bucket: Optional[int]
class _Store:
def __init__(self) -> None:
self._store: Dict[int, _Entry] = {}
self._head: Optional[int] = None
self._tail: Optional[int] = None
self._count: int = 0
@property
def num_values(self) -> int:
return self._count
def add_to_bucket(self, bucket: int, count: int = 1) -> None:
self._count += count
next_ = self._head
if not self._store:
self._store[bucket] = _Entry(count, None)
self._head = bucket
self._tail = bucket
elif bucket in self._store:
self._store[bucket].count += count
else:
if self._head is not None and bucket < self._head:
self._store[bucket] = _Entry(count, self._head)
self._head = bucket
else:
prev = next_
while next_ is not None and bucket > next_:
prev = next_
next_ = self._store[next_].next_bucket
assert prev is not None # nosec for mypy
self._store[prev].next_bucket = bucket
self._store[bucket] = _Entry(count, next_)
if next_ is None:
self._tail = bucket
def bucket_at_count(
self, count: Union[int, float], lower: bool = True
) -> int:
assert self._tail is not None # nosec for mypy
next_ = self._head
running_count = 0
if count >= self.num_values:
return self._tail
while next_ is not None:
entry = self._store[next_]
running_count += entry.count
if lower and running_count >= count:
return next_
elif not lower and running_count > count:
return next_
next_ = entry.next_bucket
return self._tail
def compact(self):
return
def _value_to_bucket(value: float, gamma: float) -> int:
value = math.fabs(value)
return math.ceil(math.log(value, gamma))
def _bucket_to_value(alpha: float, gamma: float, bucket: int) -> float:
return (1.0 + alpha) * gamma ** (bucket - 1)
def _compact_bucket(bucket):
return (bucket + 1 if bucket > 0 else bucket) // 2
class UDDSketch:
def __init__(
self, max_buckets: int = 256, initial_error: float = 0.01
) -> None:
self._max_buckets: int = max_buckets
self._initial_error: float = initial_error
self._alpha: float = initial_error
self._gamma: float = (1.0 + initial_error) / (1.0 - initial_error)
self._compactions = 0
self._values_sum: float = 0
self._running_mean: float = 0
self._min = float("inf")
self._max = float("-inf")
# storage
self._neg_storage = _Store()
self._zero_counts = 0
self._pos_storage = _Store()
def min(self):
return self._min
def max(self):
return self._max
@property
def num_values(self):
return (
self._neg_storage.num_values
+ self._pos_storage.num_values
+ self._zero_counts
)
def add(self, value: float, count: int = 1) -> None:
self._values_sum += value * count
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
old_count = self.num_values
new_count = old_count + count
self._running_mean = (self._running_mean * old_count) / new_count + (
value * count
) / new_count
self._min = min(self._min, value)
self._max = max(self._max, value)
if value > 0.0:
bucket = _value_to_bucket(value, self._gamma)
self._pos_storage.add_to_bucket(bucket, count=count)
elif value < 0.0:
bucket = _value_to_bucket(value, self._gamma)
self._neg_storage.add_to_bucket(bucket, count=count)
else:
self._zero_counts += count
@property
def num_compactions(self) -> int:
return self._compactions
def max_error(self) -> float:
return self._initial_error
def quantile(self, q: float) -> float:
if not (q >= 0 and q <= 1):
raise ValueError("Quantile should be value from 0 to 1.")
rank = q * (self.num_values)
val: float
if self._neg_storage.num_values > rank:
reversed_rank = self._neg_storage.num_values - rank
bucket = self._neg_storage.bucket_at_count(
reversed_rank, lower=False
)
val = -_bucket_to_value(self._alpha, self._gamma, bucket)
elif self._neg_storage.num_values + self._zero_counts > rank:
val = 0.0
else:
pos_count = rank - (
self._neg_storage.num_values + self._zero_counts
)
bucket = self._pos_storage.bucket_at_count(pos_count, lower=True)
val = _bucket_to_value(self._alpha, self._gamma, bucket)
return val
def median(self) -> float:
return self.quantile(0.5)
def mean(self) -> float:
return self._running_mean
def std(self) -> float:
return self._values_sum / self.num_values
def merge(self, other: "UDDSketch") -> "UDDSketch":
return self
```
|
{
"source": "JettJones/test-py-retry",
"score": 3
}
|
#### File: test-py-retry/tests/test_retrying.py
```python
import functools
import time
import unittest
from unittest.mock import patch
import retrying
from util import no_error, fail_n, timed_retry, FakeTime
class TestRetrying(unittest.TestCase):
def test_retry(self):
""" Timing with retry. """
r = retrying.retry(no_error)
timed_retry(r)
def test_n_retry(self):
""" Retries a fixed number of times. """
r = retrying.retry(stop_max_attempt_number=10)(fail_n(9))
fake_time = FakeTime()
with fake_time:
r()
self.assertEqual(fake_time.mock_sleep.calls, 9)
def test_backoff(self):
""" Retries with exponential backoff. """
r = retrying.retry(wait_exponential_multiplier=1000)(fail_n(9))
fake_time = FakeTime()
with fake_time:
r()
self.assertGreaterEqual(fake_time.mock_sleep.total, 2**9 - 1)
def test_deadline(self):
""" Retry limit based on total time. """
r = retrying.retry(stop_max_delay=1000, wait_fixed=200)(fail_n(5))
fake_time = FakeTime()
with fake_time:
r()
self.assertGreaterEqual(fake_time.mock_sleep.total, 1.0)
```
|
{
"source": "jettom/JtSpider",
"score": 3
}
|
#### File: JtSpider/book_spiderbook/1.4.3.1.py
```python
from gevent import monkey; monkey.patch_all()
import gevent
import urllib.request, urllib.error, urllib.parse
def run_task(urlP):
print('Visit --> %s' % urlP)
try:
response = urllib.request.urlopen(urlP)
data = response.read()
print('%d bytes received from %s.' % (len(data), urlP))
except Exception as e:
print(e)
if __name__ == '__main__':
urls = ['https://github.com/', 'https://www.python.org/', 'http://www.cnblogs.com/']
greenlets = [gevent.spawn(run_task, url) for url in urls]
gevent.joinall(greenlets)
#TODO RUN ERROR
'''
Visit --> https://github.com/
Visit --> https://www.python.org/
Visit --> http://www.cnblogs.com/
<urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:726)>
<urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:726)>
<urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:726)>
Process finished with exit code 0
'''
```
#### File: jettom/JtSpider/csdn.py
```python
import re
# import urllib2
import urllib.request, urllib.error, urllib.parse
import sys
import imp
class GetHtmlPage():
def __init__(self, strPage):
self.strPapge = strPage
def GetPage(self):
# req = urllib2.Request(self.strPapge)
req = urllib.request.Request(self.strPapge)
rep = req.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0")
try:
# cn = urllib2.urlopen(req)
cn = urllib.request.urlopen(req)
page = cn.read()
uPage = page.decode("utf-8")
cn.close()
return uPage
except urllib.error.URLError as e:
print('URLError:', e.code)
return
except urllib.error.HTTPError as e:
print('HTTP Error:' + e.reason)
return
return rep
class RePage():
def GetReText(self, page, recode):
# print(page,recode)
rePage = re.findall(recode, page, re.S)
for m in rePage:
print(m)
return rePage
class SaveText():
def Save(self, text, tilte):
try:
t = "blog\\" + tilte + ".html"
f = file(t, "a")
f.write(text)
f.close()
except IOError as e:
print(e.message)
if __name__ == "__main__":
s = SaveText()
print(1111111)
imp.reload(sys)
# sys.setdefaultencoding( "utf-8" )
page = GetHtmlPage("http://blog.csdn.net/u013088062/article/list/1")
htmlPage = page.GetPage()
reServer = RePage()
reBlog = reServer.GetReText(htmlPage, r'<a href="(.+?)">.*?(\s.+?)</a>')
print(reBlog)
for ref in reBlog:
print(ref)
pageHeard = "http://blog.csdn.net/"
strPage = pageHeard + ref[0]
tilte = ref[1].replace('<font color="red">[TOP]</font>', "")
tilte = tilte.replace("\r\n", "").lstrip().rstrip()
#
htmlPage = GetHtmlPage(strPage)
htmlPageData = htmlPage.GetPage()
reBlogText = reServer.GetReText(htmlPageData, '<div id="article_content" class="article_content">(.+?)</div>')
#
for s1 in reBlogText:
s1 = '<meta charset="UTF-8">\n' + s1
s.Save(s1, tilte)
```
#### File: JtSpider/data/getNetease.py
```python
def get_all_today_cap_from_163():
"""
从网易财经,获取每支股票当日的流通市值和总市值,与新浪财经获取的数据进行合并
:return: df['SYMBOL', 'MCAP', 'TCAP']
"""
def deal_content(count):
"""
从网易财经获取数据,并进行处理,形成dict
:param count:股票数量
:return:
"""
# 网易财经的“神奇网址”,
# 对应的浏览器用网是:http://quotes.money.163.com/old/#query=EQA&DataType=HS_RANK&sort=PERCENT&order=desc&count=24&page=0
url = r'http://quotes.money.163.com/hs/service/diyrank.php?host=http%3A%2F%2Fquotes.money.163.com%2' \
r'Fhs%2Fservice%2Fdiyrank.php&page=0&query=STYPE%3AEQA&fields=NO%2CSYMBOL%2CNAME%2CPRICE%2CPE' \
r'RCENT%2CUPDOWN%2CFIVE_MINUTE%2COPEN%2CYESTCLOSE%2CHIGH%2CLOW%2CVOLUME%2CTURNOVER%2CHS%2CLB%' \
r'2CWB%2CZF%2CPE%2CMCAP%2CTCAP%2CMFSUM%2CMFRATIO.MFRATIO2%2CMFRATIO.MFRATIO10%2CSNAME%2CCODE%' \
rf'2CANNOUNMT%2CUVSNEWS&sort=SYMBOL&order=asc&count={count}&type=query'
ctn = get_content_from_internet(url) # 原始信息,是bytes类型,需要转换成str
ctn = ctn.decode('gbk') # decode,bytes转str;encode,str转bytes类型
ctn = ctn.strip() # 去掉文本前后的空格、回车等
# 将内容是dict的字符串,转换为dict(loads会自动转换为最适合的python数据类型)
ctn = json.loads(ctn) # ********形成dict的数据具体内容见本帖后“附2”的说明
return ctn
page_count = deal_content(count=2)['total'] # 只取2条记录,获取共有多少支股票
time.sleep(1)
content = deal_content(page_count) # 一次性获取全部股票,4000+支股票日线数据
k_lines = content['list']
df = pd.DataFrame(k_lines)
# ********只取了三列数据,学友可以修改本行代码,选择更多的数据留存
# --------若要使用更多数据,请修改些处-----,开始处-------------------------------------------
df = df[['SYMBOL', 'MCAP', 'TCAP']] # 股票代码、流通市值、总市值
rename_dict = {'SYMBOL': '股票代码_m', 'MCAP': '流通市值', 'TCAP': '总市值'}
# --------若要使用更多数据,请修改些处-----,结束处-------------------------------------------
df.rename(columns=rename_dict, inplace=True)
return df
# 主程序
if __name__ == '__main__':
if is_today_trading_day() is False:
print('今天不是交易日,不需要更新股票数据,退出程序')
exit()
# 判断当前时间是否超过15点
if datetime.now().hour < 16: # 保险起见可以小于16点
print('今天股票尚未收盘,不更新股票数据,退出程序')
exit()
df_sina = get_all_today_stock_data_from_sina_marketcenter() # 课程代码
df_sina['股票代码_m'] = df_sina['股票代码'].str[2:]
df_163 = get_all_today_cap_from_163()
# 合并表
df = pd.merge(left=df_sina, right=df_163, on='股票代码_m', how='left', indicator=True, sort=True)
df.drop(['股票代码_m', '_merge'], axis=1, inplace=True)
# 对数据进行存储
for i in df.index:
t = df.iloc[i:i+1, :]
stock_code = t.iloc[0]['股票代码']
# 构建存储文件路径
path = r'e:\data\stockDB\D_candles\\' + stock_code + '.csv'
# 文件存在,不是新股
if os.path.exists(path):
t.to_csv(path, header=None, index=False, mode='a')
# 文件不存在,说明是新股
else:
# 先将头文件输出
t.to_csv(path, index=False, mode='a')
print(stock_code)
'''
{'count': 2,
'list': [{'CODE': '1000001',
'FIVE_MINUTE': 0.0028360748723767,
'HIGH': 18.34,
'HS': 0.0049360011082477,
'LB': 0.94584803647406,
'LOW': 17.7,
'MCAP': 348527486471.88,
'MFRATIO': {'MFRATIO10': 116564000000, 'MFRATIO2': 22398000000},
'MFSUM': 1.33,
'NAME': '平安银行',
'NO': 1,
'OPEN': 17.71,
'PE': 13.503759398496,
'PERCENT': 0.018718,
'PRICE': 17.96,
'SNAME': '平安银行',
'SYMBOL': '000001',
'TCAP': 348530290836.08,
'TURNOVER': 1727488481.5,
'UPDOWN': 0.33,
'VOLUME': 95786863,
'WB': -0.4374377768793,
'YESTCLOSE': 17.63,
'ZF': 0.036301758366421},
{'CODE': '1000002',
'FIVE_MINUTE': 0.00035778175313065,
'HIGH': 28.15,
'HS': 0.0063518461181229,
'LB': 0.82576904894833,
'LOW': 27.81,
'MCAP': 272569228819.99,
'MFRATIO': {'MFRATIO10': 241491467190.53,
'MFRATIO2': 19862827129.73},
'MFSUM': 3.58,
'NAME': '<NAME>',
'NO': 2,
'OPEN': 28,
'PE': 7.8296089385475,
'PERCENT': 0.002862,
'PRICE': 28.03,
'SNAME': '万科A',
'SYMBOL': '000002',
'TCAP': 325645033594.03,
'TURNOVER': 1728820912.73,
'UPDOWN': 0.08,
'VOLUME': 61766600,
'WB': -0.26104545492617,
'YESTCLOSE': 27.95,
'ZF': 0.01216457960644}],
'order': 'asc',
'page': 0,
'pagecount': 2042,
'time': '2020-11-03 23:10:46',
'total': 4083}
'''
```
|
{
"source": "jettom/numpy-unittest-100",
"score": 3
}
|
#### File: jettom/numpy-unittest-100/test_array_slicing.py
```python
import unittest
import numpy as np
from numpy.testing import assert_array_equal
class TestArraySlicing(unittest.TestCase):
def test_slicing_1d(self):
vector = np.arange(10)
assert_array_equal(vector[2:5], np.array([2, 3, 4]))
def test_slicing_1d_with_step(self):
vector = np.arange(10)
assert_array_equal(vector[0:10:2], np.array([0, 2, 4, 6, 8]))
def test_slicing_1d_reverse(self):
vector = np.arange(10)
assert_array_equal(vector[::-1], np.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0]))
def test_slicing_2d(self):
metrix = np.arange(9).reshape(3, 3)
assert_array_equal(metrix[0:2, 0:2], np.array([[0, 1], [3, 4]]))
if __name__ == '__main__':
unittest.main()
```
#### File: jettom/numpy-unittest-100/test_array_ufunc.py
```python
import unittest
import numpy as np
from numpy.testing import assert_array_equal
class TestArrayUfunc(unittest.TestCase):
def test_abs(self):
vector = np.array([-1, 1, -7])
assert_array_equal(np.abs(vector), np.array([1, 1, 7]))
def test_sqrt(self):
vector = np.array([9, 16, 25])
assert_array_equal(np.sqrt(vector), np.array([3, 4, 5]))
def test_sort(self):
vector = np.array([1, 6, 2, 3, 8, 1])
assert_array_equal(np.sort(vector), np.array([1, 1, 2, 3, 6, 8]))
def test_argsort(self):
vector = np.array([2, 6, 1])
assert_array_equal(np.argsort(vector), np.array([2, 0, 1]))
def test_add(self):
A = np.array([0, 1, 2])
B = np.array([2, -1, 4])
assert_array_equal(np.add(A, B), np.array([2, 0, 6]))
def test_subtract(self):
A = np.array([0, 1, 2])
B = np.array([2, -1, 4])
assert_array_equal(np.subtract(A, B), np.array([-2, 2, -2]))
def test_maximum(self):
A = np.array([0, 1, 2])
B = np.array([2, -1, 4])
assert_array_equal(np.maximum(A, B), np.array([2, 1, 4]))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jettpettus/modularizationandtesting",
"score": 3
}
|
#### File: project_2_packages/iv_jett/iv_standard_error.py
```python
import numpy as np
from iv_jett.iv_init import projection_matrix
def calculate_resid(Z, X, Y, beta_iv, nocons = False):
if nocons == False:
X_constant = np.ones((N, 1))
X = np.hstack((X_constant, X))
resid = Y - X @ beta_iv
return resid
def calculate_sigma(Z, X, Y, beta_iv, nocons = False):
'''
Description:
Estimates the variance of the errors.
Inputs:
Z (matrix) -- Instrumental Variables
X (matrix) -- Endogenous Variables
Y (matrix) -- Outcome Variable
beta_iv -- IV results
nocons -- Option for constant (default False)
'''
N = Z.shape[0]
if nocons == False:
X_constant = np.ones((N, 1))
X = np.hstack((X_constant, X))
resid = Y - X @ beta_iv
sigma_iv = (1/N) * np.transpose(resid) @ resid
return np.sqrt(sigma_iv)
def calculate_var_beta(sigma, X, Z, resid, nocons = False, robust = False):
'''
Description:
Calculates the variance of beta, the IV estimator
Inputs:
sigma
Z (matrix) -- Instrumental Variables
X (matrix) -- Endogenous Variables
nocons -- Option for constant (default False)
Ouputs:
se_beta (array) -- Standard error
'''
N = Z.shape[0]
if nocons == False:
X_constant = np.ones((N, 1))
X = np.hstack((X_constant, X))
Z_constant = np.ones((N, 1))
Z = np.hstack((Z_constant, Z))
if robust == False:
var_beta = (np.asscalar(sigma**2) * np.identity(X.shape[1])) @ np.linalg.inv(np.transpose(X) @ projection_matrix(Z) @ X)
se_beta = np.sqrt(np.diag(var_beta))
if robust == True:
resid_sq = resid @ np.transpose(resid)
print("resid sq:")
print(resid_sq)
sandwich_bread = np.linalg.inv(np.transpose(Z) @ X)
var_beta = (sandwich_bread @ np.transpose(Z) @ np.diag(np.diag(resid_sq)) @ Z @ sandwich_bread)
print("var_beta")
print(var_beta)
se_beta = np.sqrt(np.diag(var_beta))
return se_beta
```
|
{
"source": "jettro/semi-website",
"score": 3
}
|
#### File: semi-website/script/createSitemap.py
```python
from bs4 import BeautifulSoup
import requests
from urllib.request import urlopen
import re
from datetime import datetime
LOADEDLINKS = []
def getLinks(i):
global LOADEDLINKS
url = i.split('#')[0]
if url not in LOADEDLINKS:
if ('.jpg' in url) or ('.zip' in url) or ('.svg' in url):
return
LOADEDLINKS.append(url)
try:
request = urlopen('https://www.semi.technology' + url)
except:
return
if request.status is 200:
soup = BeautifulSoup(request.read(), "lxml")
for link in soup.findAll('a'):
link = link.get('href')
if link is not None:
if ('https://' not in link) and ('http://' not in link) and ('mailto:' not in link) and ('javascript:' not in link):
link = link.split('#')[0]
if link.startswith('/'):
getLinks(link)
elif link.startswith('./'):
getLinks(link.replace('./', '/'))
elif link != '':
getLinks(url+link)
# Get the links
getLinks("/")
# create sitemap
sitemap = open('sitemap.xml', 'w+')
sitemap.write('<?xml version="1.0" encoding="UTF-8"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'+"\n")
for link in LOADEDLINKS:
sitemap.write('<url>'+"\n")
sitemap.write('<loc>https://semi.technology'+link+'</loc>'+"\n")
sitemap.write('<lastmod>'+datetime.today().strftime('%Y-%m-%d')+'</lastmod>'+"\n")
sitemap.write('</url>'+"\n")
sitemap.write('</urlset>')
# post links to the wayback machine
for link in LOADEDLINKS:
url = 'https://www.semi.technology' + link
request = requests.post(url = 'https://pragma.archivelab.org', json = { 'url': url })
print(request.status_code, url)
print('DONE')
```
|
{
"source": "Jettsteel/terminal-todo",
"score": 4
}
|
#### File: terminal-todo/src/db.py
```python
import sqlite3
import os
from datetime import *
import locale
import shutil
# date and welcome
locale.setlocale(locale.LC_ALL, '')
today = datetime.today().strftime('%d-%m-%B').split('-')
print(f'Boas vindas ao terminal todo! Hoje é dia {today[0]} de {today[2]}!')
## useful functions
# convert tuple to string
def toString(tuple):
if type(tuple) == str:
return tuple.replace('(', '').replace(')', '').replace(',', '').replace("'", "")
else:
return str(tuple).replace('(', '').replace(')', '').replace(',', '').replace("'", "")
# convert date words to numbers
def toDate(date):
td = [['today', 'hoje', 'hj'], ['amanha', 'amanhã', 'amnh', 'tomorrow'], ['semana que vem']]
if date in td[0]:
date = today[0]
elif date in td[1]:
date = str(int(today[0]) + 1)
elif date in td[2]:
date = str(int(today[0]) + 7)
return date
def toNotDate(date):
if date == today[0]: return 'hoje'
elif date == str(int(today[0]) + 1): return 'amanhã'
elif date == str(int(today[0]) + 7): return 'semana que vem'
else: return date
# database creation
cur = os.path.abspath(os.getcwd())
if not os.path.isfile(f'{cur}/db.sqlite'):
open(f'{cur}/db.sqlite', 'x')
else: pass
# database setup
database = sqlite3.connect('db.sqlite')
db = database.cursor()
db.execute("""CREATE TABLE IF NOT EXISTS dates (
dates_id INTEGER PRIMARY KEY AUTOINCREMENT,
date TEXT
);""")
db.execute("""CREATE TABLE IF NOT EXISTS items (
dates_id INTEGER,
item TEXT
);""")
# database for done items
if not os.path.isfile(f'{cur}/itemsdone.sqlite'):
open(f'{cur}/itemsdone.sqlite', 'x')
else: pass
_database = sqlite3.connect('itemsdone.sqlite')
_db = _database.cursor()
_db.execute('CREATE TABLE IF NOT EXISTS done (date TEXT, item TEXT)')
# function for adding items do done db
def doneDatabase(date, item):
if date and item:
_db.execute('INSERT INTO done VALUES (?, ?)', (date.replace("'", ''), str(item),))
_database.commit()
else: return 0
# init function
def init():
# delete todos from dates before today
db.execute(f"SELECT dates_id FROM dates WHERE date < ?", (today[0],))
date1_id = toString(db.fetchone())
db.execute("DELETE FROM items WHERE dates_id = ?", (date1_id,))
db.execute('DELETE FROM dates WHERE date < ?', (today[0],))
database.commit()
# print all todos for today
db.execute(f"SELECT dates_id FROM dates WHERE date = ?", (today[0],))
date2_id = toString(db.fetchone())
db.execute('SELECT item FROM items WHERE dates_id = ?', (date2_id,))
i = db.fetchall()
if i:
print('Items para hoje: ')
for row in range(len(i)):
sla = toString(i[row])
print(f"- {sla}")
else: print('Sem itens para hoje!')
init()
## functions
# add item to database
def addItem(dater, item):
date = toDate(dater)
db.execute(f'SELECT * FROM dates WHERE date = ?', (date,))
data = db.fetchall()
if len(data) == 0:
print('Data não encontrada, criando uma nova...')
db.execute(f"INSERT INTO dates VALUES (NULL, ?)", (date,))
db.execute(f"SELECT dates_id FROM dates WHERE date = ?", (date,))
date_id = db.fetchall()[0][0]
db.execute(f"INSERT INTO items VALUES (?, ?)", (str(date_id), str(item),))
print(f"Item adicionado, criada data {date}")
else:
db.execute(f"SELECT dates_id FROM dates WHERE date = ?", (date,))
date_id = db.fetchall()[0][0]
db.execute(f"INSERT INTO items VALUES (?, ?)", (str(date_id), str(item),))
print(f"Adicionado item a data {date}")
database.commit()
# done item - basically remove item from database
def doneItem(item):
db.execute(f'SELECT * FROM items WHERE item = ?', (item,))
data = db.fetchall()[0]
if data:
print(f'Item encontrado! Excluindo item...')
db.execute('SELECT dates_id FROM items WHERE item = ?', (item,))
date_id = db.fetchone()[0]
db.execute('SELECT date FROM dates WHERE dates_id = ?', (date_id,))
dat = toString(db.fetchone())
doneDatabase(dat, item)
db.execute('DELETE FROM items WHERE item = ?', (item,))
print('Item excluído!')
else:
print('Item não encontrado...')
database.commit()
# show all todos from specific date
def showItems(dater):
date = toDate(dater)
db.execute('SELECT dates_id FROM dates WHERE date = ?', (date,))
row = db.fetchall()
if row:
print(f'Itens para {toNotDate(date)}:\n')
list = toString(row[0])
db.execute('SELECT item FROM items WHERE dates_id = ?', (list,))
item = db.fetchall()
for i in range(len(item)):
_item = toString(item[i])
print(f"- {_item}")
else: print('Data não existente')
# show all todos and their date
def showAll():
db.execute('SELECT dates_id FROM dates')
d = db.fetchall()
if d:
for i in range(len(d)):
list = d[i][0]
db.execute('SELECT date FROM dates WHERE dates_id = ?', str(list,))
date = toString(db.fetchone())
print(f"\nTodos do dia {toNotDate(date)}:")
db.execute('SELECT item FROM items WHERE dates_id = ?', str(list,))
it = db.fetchall()
for row in range(len(it)):
list = toString(it[row])
print(f"- {list}")
else:
print('Ainda não há nenhum item!')
# show all done todos
def showDone():
_db.execute('SELECT date FROM done')
daate = _db.fetchall()
def parseDuplicated():
lista = []
for i in range(len(daate)):
lista.append(toString(daate[i]))
return list(dict.fromkeys(lista))
_date = parseDuplicated()
for i in range(len(_date)):
_db.execute("SELECT item FROM done WHERE date = ?", (str(_date[i]),))
_item = _db.fetchall()
print(f'\nItems feitos de {toNotDate(_date[i])}')
for row in range(len(_item)):
itt = toString(_item[row])
print(f"- {itt}")
# reset entire aplication to default
def resetDefault():
os.remove(f'{cur}/db.sqlite')
os.remove(f'{cur}/itemsdone.sqlite')
shutil.rmtree(f'{cur}/__pycache__', ignore_errors=True)
```
#### File: terminal-todo/src/main.py
```python
from db import addItem, doneItem, showItems, showAll, showDone, resetDefault
# main function
def main():
yes = ['sim', 'si', 'sisi', 'yes', 'yeah', 'yep', 'aye']
def anyMore():
answer = input('\nAlgo mais? \n> ')
if answer in yes: main()
else: exit()
command = input('\nO que deseja fazer? Para saber dos comandos, use help:\n> ').split()
# help command
if command[0] == 'help':
print('''
use "todo" + data da sua atividade + item a fazer!\n
use "done" + item que você finalizou!\n
"show" + data para ver TODOs de tal data!\n
"showall" para ver todos os TODOs!\n
"showdone" para ver todos os TODOs finalizados!\n
"reset" para resetar a aplicação!
''')
main()
# add todo command
elif command[0] == 'todo':
item = ''
for i in range(2, len(command)):
item += f"{command[i]} "
addItem(command[1], item)
anyMore()
# done todo command
elif command[0] == 'done':
item = ''
for i in range(1, len(command)):
item += f"{command[i]} "
doneItem(item)
anyMore()
# show todo from specific date command
elif command[0] == 'show':
showItems(command[1])
anyMore()
# show all todos command
elif command[0] == 'showall':
showAll()
anyMore()
# show all done todos
elif command[0] == 'showdone':
showDone()
anyMore()
# reset to default command
elif command[0] == 'reset':
st = 'Tem certeza? Digite "sim eu tenho" para excluir as databases: \n> '
if input(st) == 'sim eu tenho':
resetDefault()
print('Aplicação resetada!')
exit()
else:
print('Ok, não excluirei!\n')
anyMore()
# command not recognized
else:
if input('Não entendi. Quer tentar novamente?\n> ') in yes: main()
else: exit()
main()
```
|
{
"source": "jetuk/cython-sundials",
"score": 3
}
|
#### File: examples/cvode/testCvAdvDiff_bnd.py
```python
from pySundials.cvode import Cvode
from pySundials.sundials import NvectorNdarrayFloat64
import numpy as np
# Problem Constants
XMAX =2.0 # domain boundaries
YMAX =1.0
MX =10 # mesh dimensions
MY =5
NEQ =MX*MY # number of equations
ATOL =1.0e-5 # scalar absolute tolerance
T0 =0.0 # initial time
T1 =0.1 # first output time
DTOUT =0.1 # output time increment
NOUT =10 # number of output times
ZERO =0.0
HALF =0.5
ONE =1.0
TWO =2.0
FIVE =5.0
# User-defined vector access macro IJth
"""
IJth is defined in order to isolate the translation from the
mathematical 2-dimensional structure of the dependent variable vector
to the underlying 1-dimensional storage.
IJth(vdata,i,j) references the element in the vdata array for
u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY.
The vdata array is obtained via the macro call vdata = NV_DATA_S(v),
where v is an N_Vector.
The variables are ordered by the y index j, then by the x index i.
"""
class AdvDiff(Cvode):
"""
UserData (contains grid constants)
"""
def __init__(self, dx, dy, hdcoef, hacoef, vdcoef, **kwds):
Cvode.__init__(self, **kwds)
self.dx = dx
self.dy = dy
self.hdcoef = hdcoef
self.hacoef = hacoef
self.vdcoef = vdcoef
def RhsFn(self, t, u, udot):
"""
f routine. Compute f(t,u).
"""
# Extract needed constants from data
hordc = self.hdcoef
horac = self.hacoef
verdc = self.vdcoef
# Loop over all grid points.
for j in range(MY):
for i in range(MX):
# Extract u at x_i, y_j and four neighboring points
uij = u.data[i,j]
udn = ZERO if j == 0 else u.data[i,j-1]
uup = ZERO if j == MY-1 else u.data[i,j+1]
ult = ZERO if i == 0 else u.data[i-1,j]
urt = ZERO if i == MX-1 else u.data[i+1,j]
# Set diffusion and advection terms and load into udot
hdiff = hordc*(ult - TWO*uij + urt)
hadv = horac*(urt - ult)
vdiff = verdc*(uup - TWO*uij + udn)
udot.data[i,j] = hdiff + hadv + vdiff
return 0
def RootFn(self, t, y, gout):
"""
g routine. Compute functions g_i(t,y) for i = 0,1.
"""
y1 = y.data[0]; y3 = y.data[2]
gout[0] = y1 - 0.0001
gout[1] = y3 - 0.01
return 0
def DlsBandJacFn(self, N, mupper, mlower, t, y, fy, J, tmp1, tmp2, tmp3):
"""
Jacobian routine. Compute J(t,u).
The components of f = udot that depend on u(i,j) are
f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with
df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2)
df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1)
df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX)
df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1)
df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY)
"""
hordc = self.hdcoef
horac = self.hacoef
verdc = self.vdcoef
for j in range(MY):
for i in range(MX):
k = j + i*MY
#kthCol = BAND_COL(J,k);
# set the kth column of J
#BAND_COL_ELEM(kthCol,k,k) = -TWO*(verdc+hordc);
#if (i != 1) BAND_COL_ELEM(kthCol,k-MY,k) = hordc + horac;
#if (i != MX) BAND_COL_ELEM(kthCol,k+MY,k) = hordc - horac;
#if (j != 1) BAND_COL_ELEM(kthCol,k-1,k) = verdc;
#if (j != MY) BAND_COL_ELEM(kthCol,k+1,k) = verdc;:
J[k,k] = -TWO*(verdc*hordc)
if i != 0: J[k-MY,k] = hordc + horac
if i != MX-1: J[k+MY,k] = hordc - horac
if j != 0: J[k-1,k] = verdc
if j != MY-1: J[k+1,k] = verdc
return 0
def SetIC(self, u):
"""
Load initial profile into u vector
"""
for j in range(MY):
y = (j+1)*self.dy
for i in range(MX):
x = (i+1)*self.dx
u.data[i,j] = x*(XMAX - x)*y*(YMAX - y)*np.exp(FIVE*x*y)
def PrintOutput(self, t, umax):
"""
Print current value
"""
nst = self.numSteps
print "At t = %4.2f max.norm(u) =%14.6e nst = %4ld" % (t, umax, nst)
def PrintFinalStats(self, ):
"""
Get and print some final statistics
"""
print "\nFinal Statistics:"
print "nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld" % (
self.numSteps, self.numRhsEvals, self.numLinSolvSetups, self.numRhsEvals, self.dlsNumJacEvals)
print "nni = %-6ld ncfn = %-6ld netf = %ld\n " % (
self.numNonlinSolvIters, self.numNonlinSolvConvFails, self.numErrTestFails)
def PrintHeader(reltol, abstol, umax):
"""
Print first lines of output (problem description)
"""
print "\n2-D Advection-Diffusion Equation"
print "Mesh dimensions = %d X %d" % (MX, MY)
print "Total system size = %d" % NEQ
print "Tolerance parameters: reltol = %g abstol = %g\n" % (reltol, abstol)
print "At t = %g max.norm(u) =%14.6e " %(T0, umax)
if __name__ == '__main__':
# Create a serial vector
u = NvectorNdarrayFloat64( (MX,MY) )
reltol = ZERO # Set the tolerances
abstol = ATOL
dx = XMAX/(MX+1); # Set grid coefficients in data
dy = YMAX/(MY+1);
# Call CVodeCreate to create the solver memory and specify the
# Backward Differentiation Formula and the use of a Newton iteration
cvode_mem = AdvDiff(dx, dy, ONE/(dx*dx), HALF/(TWO*dx), ONE/(dy*dy),
multistep='bdf', iteration='newton')
cvode_mem.SetIC(u) # Initialize u vector
# Call CVodeInit to initialize the integrator memory and specify the
# user's right hand side function in u'=f(t,u), the inital time T0, and
# the initial dependent variable vector u.
cvode_mem.initSolver( T0, u )
# Call CVodeSStolerances to specify the scalar relative tolerance
# and scalar absolute tolerance
cvode_mem.setTolerances(reltol, abstol)
# Call CVBand to specify the CVBAND band linear solver
# Set the user-supplied Jacobian routine Jac
cvode_mem.setupBandLinearSolver( NEQ, MY, MY, user_jac=True)
# In loop over output points: call CVode, print results, test for errors
umax = u.MaxNorm()
PrintHeader(reltol, abstol, umax);
tout = T1
for iout in range(NOUT):
flag, t = cvode_mem.Solve(tout, u)
umax = u.MaxNorm()
cvode_mem.PrintOutput(t, umax,)
tout += DTOUT
cvode_mem.PrintFinalStats() # Print some final statistics
```
|
{
"source": "jetuk/pypet",
"score": 2
}
|
#### File: jetuk/pypet/setup.py
```python
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('pypet', parent_package, top_path)
config.add_extension('_sol_ufunc', ['pypet/_sol_ufunc.c'])
config.add_extension('_pet_ufunc', ['pypet/_pet_ufunc.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(
name='pypet',
version='0.1dev',
description='Python Potential Evapotranspiration functions.',
author='<NAME>',
author_email='<EMAIL>',
packages=['pypet', ],
configuration=configuration
)
```
|
{
"source": "jet-universe/particle_transformer",
"score": 3
}
|
#### File: particle_transformer/networks/example_PCNN.py
```python
import torch
import torch.nn as nn
class ResNetUnit(nn.Module):
r"""Parameters
----------
in_channels : int
Number of channels in the input vectors.
out_channels : int
Number of channels in the output vectors.
strides: tuple
Strides of the two convolutional layers, in the form of (stride0, stride1)
"""
def __init__(self, in_channels, out_channels, strides=(1, 1), **kwargs):
super(ResNetUnit, self).__init__(**kwargs)
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=strides[0], padding=1)
self.bn1 = nn.BatchNorm1d(out_channels)
self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, stride=strides[1], padding=1)
self.bn2 = nn.BatchNorm1d(out_channels)
self.relu = nn.ReLU()
self.dim_match = True
if not in_channels == out_channels or not strides == (1, 1): # dimensions not match
self.dim_match = False
self.conv_sc = nn.Conv1d(in_channels, out_channels, kernel_size=1,
stride=strides[0] * strides[1], bias=False)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
# print('resnet unit', identity.shape, x.shape, self.dim_match)
if self.dim_match:
return identity + x
else:
return self.conv_sc(identity) + x
class ResNet(nn.Module):
r"""Parameters
----------
features_dims : int
Input feature dimensions.
num_classes : int
Number of output classes.
conv_params : list
List of the convolution layer parameters.
The first element is a tuple of size 1, defining the transformed feature size for the initial feature convolution layer.
The following are tuples of feature size for multiple stages of the ResNet units. Each number defines an individual ResNet unit.
fc_params: list
List of fully connected layer parameters after all EdgeConv blocks, each element in the format of
(n_feat, drop_rate)
"""
def __init__(self, features_dims, num_classes,
conv_params=[(32,), (64, 64), (64, 64), (128, 128)],
fc_params=[(512, 0.2)],
for_inference=False,
**kwargs):
super(ResNet, self).__init__(**kwargs)
self.conv_params = conv_params
self.num_stages = len(conv_params) - 1
self.fts_conv = nn.Sequential(
nn.BatchNorm1d(features_dims),
nn.Conv1d(
in_channels=features_dims, out_channels=conv_params[0][0],
kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(conv_params[0][0]),
nn.ReLU())
# define ResNet units for each stage. Each unit is composed of a sequence of ResNetUnit block
self.resnet_units = nn.ModuleDict()
for i in range(self.num_stages):
# stack units[i] layers in this stage
unit_layers = []
for j in range(len(conv_params[i + 1])):
in_channels, out_channels = (conv_params[i][-1], conv_params[i + 1][0]) if j == 0 \
else (conv_params[i + 1][j - 1], conv_params[i + 1][j])
strides = (2, 1) if (j == 0 and i > 0) else (1, 1)
unit_layers.append(ResNetUnit(in_channels, out_channels, strides))
self.resnet_units.add_module('resnet_unit_%d' % i, nn.Sequential(*unit_layers))
# define fully connected layers
fcs = []
for idx, layer_param in enumerate(fc_params):
channels, drop_rate = layer_param
in_chn = conv_params[-1][-1] if idx == 0 else fc_params[idx - 1][0]
fcs.append(nn.Sequential(nn.Linear(in_chn, channels), nn.ReLU(), nn.Dropout(drop_rate)))
fcs.append(nn.Linear(fc_params[-1][0], num_classes))
if for_inference:
fcs.append(nn.Softmax(dim=1))
self.fc = nn.Sequential(*fcs)
def forward(self, points, features, lorentz_vectors, mask):
# x: the feature vector, (N, C, P)
if mask is not None:
features = features * mask
x = self.fts_conv(features)
for i in range(self.num_stages):
x = self.resnet_units['resnet_unit_%d' % i](x) # (N, C', P'), P'<P due to kernal_size>1 or stride>1
# global average pooling
x = x.mean(dim=-1) # (N, C')
# fully connected
x = self.fc(x) # (N, out_chn)
return x
def get_model(data_config, **kwargs):
conv_params = [(32,), (64, 64), (64, 64), (128, 128)]
fc_params = [(512, 0.2)]
pf_features_dims = len(data_config.input_dicts['pf_features'])
num_classes = len(data_config.label_value)
model = ResNet(pf_features_dims, num_classes,
conv_params=conv_params,
fc_params=fc_params)
model_info = {
'input_names': list(data_config.input_names),
'input_shapes': {k: ((1,) + s[1:]) for k, s in data_config.input_shapes.items()},
'output_names': ['softmax'],
'dynamic_axes': {**{k: {0: 'N', 2: 'n_' + k.split('_')[0]} for k in data_config.input_names}, **{'softmax': {0: 'N'}}},
}
return model, model_info
def get_loss(data_config, **kwargs):
return torch.nn.CrossEntropyLoss()
```
|
{
"source": "jetwang/pygame-scratch",
"score": 3
}
|
#### File: src/pygamescratch/sprite.py
```python
import random
from pygamescratch.pygs import *
class Sprite(object):
def __init__(self, sprite_name, center_x=0, center_y=0):
"""
定义一个角色对象
:param sprite_name: 角色名称,该名称也对应default_sprite_image_folder定义的文件夹下面的角色图片所在的文件夹
:param center_x:
:param center_y:
"""
pygs._sprites_max_id = pygs._sprites_max_id + 1
self.id = sprite_name + str(pygs._sprites_max_id)
self.sprite_name = sprite_name
self.size = 100
self.direction = 0
self.timer_start = time.perf_counter()
self.event_watcher = {}
self.costume = {}
self.text = None
self.text_end_time = None
self.showing = True
sprite_image_name = sprite_name
if not os.path.exists(sprite_image_name):
sprite_image_name = pygs.default_sprite_image_folder + sprite_image_name
for file_name in os.listdir(sprite_image_name):
file_name_key = os.path.splitext(file_name)[0]
self.costume[file_name_key] = os.path.join(sprite_image_name, file_name) # open(os.path.join(name,file_name), 'r')
current_costume = list(self.costume.items())[0]
self.current_costume_key = current_costume[0]
self.current_costume_value = current_costume[1]
self.image = pygame.image.load(self.current_costume_value).convert_alpha()
self.rect = self.image.get_rect() # rect(1,2,3,4) # self.sprite.get_rect()
width = self.rect.width
height = self.rect.height
self.rect.x = center_x - width / 2
self.rect.y = center_y - height / 2
self.center_x = center_x # 存这个浮点数的原因是,pygame里面的坐标是整数,如果改变坐标的值小于1,那么里面的坐标实际上不会移动
self.center_y = center_y # 还有一个原因是,坐标都是角色左上角的位置,但是角度计算都是计算角色中心点,存这2个值方便计算
self.rotate_angle = 0
pygs.sprites_in_game[self.id] = self
self.event(EVENT_SPRITE_CREATED, self)
def move(self, steps):
"""
根据角色的direction(这是一个角度)移动,会根据direction计算出x和y分别移动的像素值
:param steps:
:return:
"""
direction_pi = math.pi * (self.direction / 180) # to π
steps_x = steps * math.cos(direction_pi)
steps_y = steps * math.sin(direction_pi)
self.go_to(self.center_x + steps_x, self.center_y + steps_y)
def turn_right(self, degrees):
"""
向右旋转
:param degrees:
:return:
"""
self.turn(-degrees)
def turn_left(self, degrees):
"""
向左旋转
:param degrees:
:return:
"""
self.turn(degrees)
def go_to(self, new_x, new_y):
"""
移到新的坐标
:param new_x:
:param new_y:
:return:
"""
self.set_x_to(new_x)
self.set_y_to(new_y)
def go_to_random_position(self):
"""
移到窗口内随机位置
:return:
"""
random_x = random.randint(0, pygs.max_x)
random_y = random.randint(0, pygs.max_y)
self.go_to(random_x, random_y)
def go_to_mouse_pointer(self):
"""
移到鼠标所在位置
:return:
"""
self.go_to(pygs.mouse_position[0], pygs.mouse_position[1])
def point(self, direction):
"""
指向特定角度,正右为0度,按照顺时针累加,正上为-90度,正下90度,正左为180度或-180度。
:param direction:
:return:
"""
self.direction = direction
def point_to(self, center_x, center_y):
"""
指向特定坐标
:param center_x:
:param center_y:
:return:
"""
direction_pi = math.atan2(center_y - self.center_y, center_x - self.center_x)
self.direction = (direction_pi * 180) / math.pi
def point_to_sprite(self, target_sprite):
"""
指定特定角色
:param target_sprite:
:return:
"""
self.point_to(target_sprite.center_x, target_sprite.center_y)
def point_towards_mouse_pointer(self):
"""
指向鼠标所在位置
:return:
"""
mouse_x = pygs.mouse_position[0]
mouse_y = pygs.mouse_position[1]
self.point_to(mouse_x, mouse_y)
def change_x_by(self, change_x):
"""
调整x坐标
:param change_x: 要调整的值
:return:
"""
self.center_x = self.center_x + change_x
self._adjust_position()
def set_x_to(self, new_x):
"""
设置x坐标
:param new_x: 要设置的新值
:return:
"""
self.center_x = new_x
self._adjust_position()
def change_y_by(self, change_y):
"""
调整y坐标
:param change_y: 要调整的值
:return:
"""
self.center_y = self.center_y + change_y
self._adjust_position()
def set_y_to(self, new_y):
"""
设置y坐标
:param new_y: 要设置的新值
:return:
"""
self.center_y = new_y
self._adjust_position()
def touching_edge(self):
"""
判断是否在边缘
:return:
"""
if self.rect.x >= pygs.max_x - self.rect.width or self.rect.x <= 0 or self.rect.y >= pygs.max_y - self.rect.height or self.rect.y <= 0:
return True
return False
def bounce_if_on_edge(self):
"""
如果碰到边缘就反弹
:return:
"""
if self.rect.x >= pygs.max_x - self.rect.width:
self.direction = 180 - self.direction
elif self.rect.x <= 0:
self.direction = 180 - self.direction
elif self.rect.y >= pygs.max_y - self.rect.height:
self.direction = - self.direction
elif self.rect.y <= 0:
self.direction = - self.direction
def _adjust_position(self):
max_center_x = pygs.max_x - self.rect.width / 2
max_center_y = pygs.max_y - self.rect.height / 2
if self.center_x > max_center_x:
self.center_x = max_center_x
if self.center_x < self.rect.width / 2:
self.center_x = self.rect.width / 2
if self.center_y > max_center_y:
self.center_y = max_center_y
if self.center_y < self.rect.height / 2:
self.center_y = self.rect.height / 2
self.rect.x = self.center_x - self.rect.width / 2
self.rect.y = self.center_y - self.rect.height / 2
def flip(self):
"""
翻转
:return:
"""
self.sprite = pygame.transform.flip(self.sprite, True, False)
def turn(self, degrees):
self.rotate_angle += degrees
self.direction = self.direction + degrees
# Looks
def say(self, text_str, size=20, color=(128, 128, 128), bg_color=None):
"""
角色标注,可以在角色旁边显示一段文字
:param text_str: 文字内容
:param size: 字体大小
:param color: 字体颜色
:param bg_color: 字体背景颜色
:return:
"""
self.say_for_seconds(text_str, None, size, color, bg_color)
def say_for_seconds(self, text_str, secs=2, size=20, color=(128, 128, 128), bg_color=None):
"""
角色标注,可以在角色旁边显示一段文字, 若干秒后会消失
:param text_str: 文字内容
:param secs: 存在秒数
:param size: 字体大小
:param color: 字体颜色
:param bg_color: 字体背景颜色
:return:
"""
font = pygame.font.Font(pygs.default_font_name, size)
text_image = font.render(str(text_str), True, color) # ,(128,128,128)
self.text = {"text": text_str, "size": size, "text_image": text_image, "bg_color": bg_color}
if secs is not None:
self.text_end_time = time.perf_counter() + secs
else:
self.text_end_time = None
def switch_costume_to(self, name):
"""
切换造型
:param name: 造型名称(也就是图片去掉扩展名的名称)
:return:
"""
if name != self.current_costume_key:
self.current_costume_key = name
self.current_costume_value = self.costume.get(name)
new_sprite = pygame.image.load(self.current_costume_value).convert_alpha()
self.image = new_sprite
self.set_size_to(self.size)
def next_costume(self):
"""
下一个造型
:return:
"""
keys = list(self.costume.keys())
size = len(keys)
index = keys.index(self.current_costume_key)
if index >= size - 1:
index = 0
else:
index = index + 1
self.switch_costume_to(keys[index])
def set_size_to(self, num):
"""
修改大小
:param num: 新的大小,100就是100%,1就是缩放为1%
:return:
"""
proto_rect = self.image.get_rect()
width = proto_rect.width
height = proto_rect.height
new_width = int(width * (num / 100))
new_height = int(height * (num / 100))
self.image = pygame.transform.smoothscale(self.image, (new_width, new_height))
self.rect.width = new_width
self.rect.height = new_height
self.rect.x = self.center_x - new_width / 2
self.rect.y = self.center_y - new_height / 2
self.size = num
def change_size_by(self, size_by):
"""
调整大小
:param size_by: 调整的数量
:return:
"""
new_size = self.size + size_by
if new_size > 0:
self.set_size_to(new_size)
def show(self):
"""
显示
:return:
"""
self.showing = True
def hide(self):
"""
隐藏
:return:
"""
self.showing = False
def action(self):
"""
角色在每帧的活动情况,比如如果希望角色不断移动1步,就可以重载这个方法,里面加入self.move(1)的代码
:return:
"""
pass
def goto_front_layer(self):
"""
显示在前面
:return:
"""
s = pygs.sprites_in_game[self.id]
del pygs.sprites_in_game[self.id]
pygs.sprites_in_game[self.id] = s
def goto_back_layer(self):
"""
显示在后面
:return:
"""
s = pygs.sprites_in_game[self.id]
del pygs.sprites_in_game[self.id]
new_dict = OrderedDict()
new_dict[self.id] = s
for k, v in list(pygs.sprites_in_game.items()):
new_dict[k] = v
sprites_in_game = new_dict
# Events
def regist_event(self, event_name, func):
"""
监听事件
:param event_name: 事件名称
:param func: 事件发生时,调用的函数
:return:
"""
if event_name in self.event_watcher:
functions = self.event_watcher.get(event_name)
functions.append(func)
else:
self.event_watcher[event_name] = [func]
def when_start(self, func):
"""
监听游戏启动事件
:param func:
:return:
"""
self.regist_event(EVENT_START, func)
def when_key_pressed(self, key_name, func):
"""
监听键盘按住事件
:param key_name: 键名
:param func:
:return:
"""
self.regist_event(pygs._get_key_down_event_name(key_name), func)
def when_key_up(self, key_name, func):
"""
监听键盘松开事件
:param key_name: 键名
:param func:
:return:
"""
self.regist_event(pygs._get_key_up_event_name(key_name), func)
def when_created(self, func):
"""
监听角色创建事件
:param func:
:return:
"""
self.regist_event(EVENT_SPRITE_CREATED, func)
def broadcast(self, event_name):
"""
广播事件
:param event_name:
:return:
"""
pygs.global_event(event_name)
# Sensing
def get_touching_sprite(self, sprite_name=None):
"""
获取接触到的角色
:param sprite_name: 接触的角色名称
:return:
"""
sprites = []
for sprite in list(pygs.sprites_in_game.values()):
if sprite.id != self.id:
if sprite_name is None or sprite.sprite_name == sprite_name:
if pygame.Rect.colliderect(self.rect, sprite.rect) and pygame.sprite.collide_mask(self, sprite):
sprites.append(sprite)
return sprites
def get_closest_sprite_by_name(self, sprite_name):
"""
获取最近的特定角色
:param sprite_name: 角色名称
:return:
"""
sprites = pygs.get_sprites_by_name(sprite_name)
return self.get_closest_sprite(sprites)
def get_closest_sprite(self, sprites):
"""
从角色列表中找出离自己最近的
:param sprites: 角色列表
:return:
"""
min_distance = 9999
closest_sprite = None
self_point = (self.center_x, self.center_y)
for sprite in sprites:
distance = pygs.get_distance(self_point, (sprite.center_x, sprite.center_y))
if min_distance > distance:
min_distance = distance
closest_sprite = sprite
return closest_sprite
def reset_timer(self):
"""
重置定时器
:return:
"""
self.timer_start = time.perf_counter()
def timer(self):
"""
上次定时后到目前的秒数
:return:
"""
return time.perf_counter() - self.timer_start
def event(self, event_name, *args, **kwargs):
"""
触发事件
:param event_name:
:param args:
:param kwargs:
:return:
"""
if event_name in self.event_watcher:
functions = self.event_watcher.get(event_name)
for func in functions:
func(*args, **kwargs)
def delete(self):
"""
删除自己
:return:
"""
self.hide()
if self.id in pygs.sprites_in_game.keys():
del pygs.sprites_in_game[self.id]
pygs.delete_delay_function_by_object(self)
```
#### File: src/sample-planefight/friend_plane.py
```python
from global_var import *
from hero_bullet import HeroBullet
from pygamescratch import *
from pygamescratch.sprite import Sprite
class FriendPlane(Sprite):
def __init__(self, center_x, center_y):
Sprite.__init__(self, "friendplane", center_x, center_y)
self.hp = 10
self.set_size_to(60)
self.switch_costume_to("a")
self.immune = False
self.move_speed = 3
pygs.play_sound("callfriend.wav")
pygs.schedule(g.friend_change_direction_wait, self.change_direction, None)
pygs.schedule(g.friend_fire_wait, self.friend_fire, None)
def friend_fire(self):
if g.hero is None or g.hero.hp <= 0:
return
if self.hp <= 0:
return
if self.showing:
enemy_planes = g.get_enemies()
if len(enemy_planes) > 0:
target_position = enemy_planes[0].rect.center
pygs.play_sound("hero_fire.wav")
self.single_fire(self.rect.midtop, target_position, enemy_planes[0])
pygs.schedule(g.friend_fire_wait, self.friend_fire, None)
def single_fire(self, start_position, target_position, target_enemy):
hero_bullet = HeroBullet(start_position[0], start_position[1], BULLET_TYPE_FRIEND, target_sprite=target_enemy)
hero_bullet.point_to(target_position[0], target_position[1])
def friend_down(self):
pygs.play_sound("boom.wav")
self.switch_costume_to("down")
pygs.schedule(0.2, self.delete, None)
def got_hit(self):
self.immune = True
pygs.play_sound("hit.wav")
self.hp = self.hp - 1
if self.hp <= 0:
pygs.schedule(0.2, self.friend_down, None)
self.switch_costume_to("hp0")
else:
pygs.schedule(1, self.recover, None)
def recover(self):
self.immune = False
def action(self):
if self.hp <= 0:
return
self.move(self.move_speed)
self.bounce_if_on_edge()
enemy_bullets = self.get_touching_sprite("enemybullet")
for enemy_bullet in enemy_bullets:
if enemy_bullet and not enemy_bullet.hit() and not self.immune:
enemy_bullet.hit_plane()
self.got_hit()
self.say(str(self.hp), 11, (51, 173, 255))
if self.immune:
self.showing = not self.showing
else:
self.show()
def change_direction(self):
if self.hp <= 0:
return
closest_enemy_bullet = self.get_closest_sprite_by_name("enemybullet")
if closest_enemy_bullet:
distance = pygs.get_distance(closest_enemy_bullet.rect.center, (self.center_x, self.center_y))
if distance <= 100:
if closest_enemy_bullet.center_x >= self.center_x: # 如果子弹在右边
if self.rect.x > pygs.max_x / 4: # 如果友机左边还有余地
self.point(180)
else:
self.point(0)
elif closest_enemy_bullet.center_x <= self.center_x: # 如果子弹在左边
if self.rect.x < pygs.max_x / 4: # 如果友机右边还有余地
self.point(0)
else:
self.point(180)
pygs.schedule(g.friend_change_direction_wait, self.change_direction, None)
```
|
{
"source": "jetyang2005/elastalert",
"score": 2
}
|
#### File: elastalert/elastalert/db_method.py
```python
from db_sqlconn import Mysql
import config
import yaml
import json
import util_switch
def get_rules_from_db(conf, args):
# 申请资源
mysql = Mysql(conf)
rules = []
sql_rules = "select * from link_rules"
result = mysql.getAll(sql_rules)
if result:
for row in result:
# query_ruletypesdictvalue_sql = "select dict_name as rule_type from link_dict_entry " \
# "where dict_type_code = 'alertRuletypes' and dict_code = %s"
# query_ruletypesdictvalue_param = [row["rule_type"]]
# rule_type = mysql.getOne(query_ruletypesdictvalue_sql, query_ruletypesdictvalue_param)
# query_alertdictvalue_sql = "select dict_name as rule_type from link_dict_entry " \
# "where dict_type_code = 'alertRuletypes' and dict_code = %s"
# query_dictvalue_param = [row["rule_type"]]
# rule_type = mysql.getOne(query_alertdictvalue_sql, query_dictvalue_param)
query_alertperson_sql = "select * from ( " \
" select u.id as user_id,u.user_name as user_name ,u.email as user_email " \
"from LINK_RULE_RECEIVER a ,link_role r,link_user_role ur,link_user u " \
"where a.RULE_RECEIVER_TYPE='0' and a.RULE_RECEIVER_ID = r.id and r.id = ur.role_id " \
" and ur.user_id=u.id and a.rule_id = %s " \
"union " \
"select a.RULE_RECEIVER_ID as user_id,c.user_name as user_name,c.email as user_email " \
"from LINK_RULE_RECEIVER a join link_user c on a.RULE_RECEIVER_ID=c.id " \
"where a.RULE_RECEIVER_TYPE=1 and a.rule_id = %s " \
") alertpersons "
query_alertperson_param = [row['id'], row['id']]
alertpersons = mysql.getAll(query_alertperson_sql, query_alertperson_param) #数据库查询的接收者
emails=[]
for alertemail in alertpersons:
emails.append(alertemail['user_email'])
#print "数组=========",emails
"""if row['rule_type'] == 'frequency':
rule = frequency_rule(conf, row) #frequency规则模板填充数据"""
row['user_email']= emails
for case in util_switch.switch(row['rule_type']):
if case('frequency'):
rule = frequency_rule(conf, row)
config.load_options(rule, conf, '', args)
config.load_modules(rule, args)
rule['alertpersons'] = alertpersons
rule['rule_id'] = row['id']
rule['rule_type'] = row['rule_type']
rules.append(rule)
break
if case('spike'):
rule = spike_rule(conf, row)
config.load_options(rule, conf, '', args)
config.load_modules(rule, args)
rule['alertpersons'] = alertpersons
rule['rule_id'] = row['id']
rule['rule_type'] = row['rule_type']
rules.append(rule)
break
if case(): # 默认
print "switch something else!"
""" config.load_options(rule, conf, '', args)
config.load_modules(rule, args)
rule['alertpersons'] = alertpersons
rule['rule_id'] = row['id']
rule['rule_type'] =row['rule_type']
rules.append(rule)"""
# 释放资源
mysql.dispose()
return rules
def frequency_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db','email'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': '',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
#rule_filter_str = row["rule_filter"].replace("\n", "").strip()/Users/yangwm/log/elastalert/example_rules/example_frequency.yaml
rule_extend_str = row["rule_extend"].replace("\n", "").strip()
#rule_filter = json.loads(rule_filter_str)
rule_extend = json.loads(rule_extend_str)
alert_way = row['rule_alerts']
rule['alert'] = alert_way.split(',')
rule['alert_way'] = row['rule_alerts']
rule['name'] = row["rule_name"]
rule['index'] = row["rule_index"]
rule['realert'] = rule_extend['realert']
rule['num_events'] = rule_extend['num_events']
rule['timeframe'] = rule_extend['timeframe']
rule['type'] = 'frequency'
rule['filter'] = rule_extend['filter']
rule['email'] = row['user_email']
print "frequency=========%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (
row["id"], row["rule_name"], row["rule_type"], row["rule_index"], row["rule_desc"], row["rule_alerts"],
row["rule_filter"], row["rule_extend"], row["rule_seq"])
return rule
def spike_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db','email'],
'filter': [{'query': {'query_string': {'query': '_id:AVwGceqjqjtyRJ_Calx5'}},'type':{'value':'spike'}}],
'email_reply_to': '',
'rule_file': '',
'timeframe': {'minutes':1},
'type': 'spike',
'spike_height': 1,
'threshold_cur':1,
'spike_type': 'up',
'email': ['<EMAIL>']}
rule_extend_str = row["rule_extend"].replace("\n", "").strip()
rule_extend = json.loads(rule_extend_str)
rule['filter'] = rule_extend['filter']
rule['name'] = row["rule_name"]
rule['index'] = row["rule_index"]
alert_way = row['rule_alerts']
rule['alert'] = alert_way.split(',')
rule['alert_way'] = row['rule_alerts']
rule['email']=row['user_email']
rule['spike_height'] = rule_extend['spike_height']
rule['threshold_cur'] = rule_extend['threshold_cur']
rule['spike_type'] = rule_extend['spike_type']
rule['timeframe'] = rule_extend['timeframe']
print "spike=============%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (
row["id"], row["rule_name"], row["rule_type"], row["rule_index"], row["rule_desc"], row["rule_alerts"],
row["rule_filter"], row["rule_extend"], row["rule_seq"])
return rule
def cardinality_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': 'example_rules/example_frequency.yaml',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
return rule
def change_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': 'example_rules/example_frequency.yaml',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
return rule
def new_term_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': 'example_rules/example_frequency.yaml',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
return rule
def opsgenie_frequency_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': 'example_rules/example_frequency.yaml',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
return rule
def percentage_match_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': 'example_rules/example_frequency.yaml',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
return rule
def single_metric_rule(conf, row):
rule = {'index': '',
'name': '',
'realert': {'minutes': 1},
'from_addr': conf['from_addr'],
'smtp_host': conf['smtp_host'],
'smtp_port': conf['smtp_port'],
'smtp_auth_file': '',
'num_events': 1,
'alert': ['db'],
'filter': [{'query': {'query_string': {'query': 'field6:Exception'}}}],
'email_reply_to': '',
'rule_file': 'example_rules/example_frequency.yaml',
'timeframe': {'minutes': 1},
'type': 'frequency',
'email': ['<EMAIL>']}
return rule
```
#### File: elastalert/samples/ImportDataFromSqlserver.py
```python
import pymssql
from Python_Elasticsearch import ElasticsearchOperate
from Process_Stat import Process_Stat
import logging
import sys
import time
reload(sys)
sys.setdefaultencoding( "utf-8" )
class MSSQL:
"""
对pymssql的简单封装
pymssql库,该库到这里下载:http://www.lfd.uci.edu/~gohlke/pythonlibs/#pymssql
使用该库时,需要在Sql Server Configuration Manager里面将TCP/IP协议开启
用法:
"""
def __init__(self, host, user, pwd, db):
# 日志基本配置,将日志文件输出到当前目录下的elastticsearch_sample.log文件中
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='sqlserver_import_logger.log',
filemode='w')
self.sqlserver_import_logger = logging.getLogger('MSSQL')
self.host = host
self.user = user
self.pwd = <PASSWORD>
self.db = db
def __GetConnect(self):
"""
得到连接信息
返回: conn.cursor()
"""
if not self.db:
raise(NameError,"没有设置数据库信息")
self.conn = pymssql.connect(host=self.host,user=self.user,password=<PASSWORD>,database=self.db,charset="utf8")
cur = self.conn.cursor()
if not cur:
raise(NameError,"连接数据库失败")
else:
return cur
def ExecQuery(self, sql):
"""
执行查询语句
返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段
调用示例:
ms = MSSQL(host="localhost",user="sa",pwd="<PASSWORD>",db="PythonWeiboStatistics")
resList = ms.ExecQuery("SELECT id,NickName FROM WeiBoUser")
for (id,NickName) in resList:
print str(id),NickName
"""
cur = self.__GetConnect()
cur.execute(sql)
resList = cur.fetchall()
# 查询完毕后必须关闭连接
self.conn.close()
return resList
def ExecNonQuery(self, sql):
"""
执行非查询语句
调用示例:
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
"""
cur = self.__GetConnect()
cur.execute(sql)
self.conn.commit()
self.conn.close()
def main():
# ms = MSSQL(host="localhost",user="sa",pwd="<PASSWORD>",db="PythonWeiboStatistics")
# #返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段
# ms.ExecNonQuery("insert into WeiBoUser values('2','3')")
ms = MSSQL(host="10.0.0.29", user="sa", pwd="<PASSWORD>", db="ZHONGZI")
# 流程定义表
processdefineList = ms.ExecQuery(
"select processdefname, processchname, versionsign,createtime from link_processdefine")
processdefines = []
for (processdefname, processchname, versionsign, createtime) in processdefineList:
esdata = {"_index": "index_stat_processdefine",
"_type": "type_stat_processdefine",
"_source": {
"processdefname": processdefname,
"processchname": processchname,
"versionsign": versionsign,
"createtime": str(createtime),
}
}
processdefines.append(esdata)
# 流程实例表
process_processinstList = ms.ExecQuery("select processinstid, processinstname, processdefname, processchname, versionsign,currentstate, createtime, endtime, subtime from link_processinst")
process_processinsts = []
for (processinstid, processinstname, processdefname, processchname, versionsign, currentstate, createtime, endtime, subtime) in process_processinstList:
esdata = {"_index": "index_stat_process_processinst",
"_type": "type_stat_process_processinst",
"_source": {
"processinstid": processinstid,
"processinstname": processinstname,
"processdefname": processdefname,
"processchname": processchname,
"versionsign": versionsign,
"currentstate": currentstate,
"createtime": str(createtime),
"endtime": str(endtime),
"subtime": subtime
}
}
process_processinsts.append(esdata)
#环节实例表
activityinstList = ms.ExecQuery("select processinstid, processinstname, processdefname, processchname, versionsign,activityinstid,activityinstname, currentstate, createtime, endtime, subtime from link_activityinst")
activityinsts = []
for (processinstid, processinstname, processdefname, processchname, versionsign,activityinstid,activityinstname, currentstate, createtime, endtime, subtime) in activityinstList:
esdata = {"_index": "index_stat_process_activityinst",
"_type": "type_stat_process_activityinst",
"_source": {
"processinstid": processinstid,
"processinstname": processinstname,
"processdefname": processdefname,
"processchname": processchname,
"versionsign": versionsign,
"activityinstid": activityinstid,
"activityinstname": activityinstname,
"currentstate": currentstate,
"createtime": str(createtime),
"endtime": str(endtime),
"subtime": subtime
}
}
activityinsts.append(esdata)
#工作项表
workitemList = ms.ExecQuery("select processinstid, processinstname, processdefname, processchname,workitemid,workitemname,istimeout,timeoutnum,activityinstid,activityinstname, currentstate, createtime, endtime, userid,orgid,orgname from link_workitem")
workitems = []
for (processinstid, processinstname, processdefname, processchname,workitemid,workitemname,istimeout,timeoutnum,activityinstid,activityinstname, currentstate, createtime, endtime, userid,orgid,orgname) in workitemList:
esdata = {"_index": "index_stat_process_workitem",
"_type": "type_stat_process_workitem",
"_source": {
"processinstid": processinstid,
"processinstname": processinstname,
"processdefname": processdefname,
"processchname": processchname,
"workitmeid": workitemid,
"workitemname": workitemname,
"istimeout": istimeout,
"timeoutnum": timeoutnum,
"currentstate": currentstate,
"activityinstid": activityinstid,
"activityinstname": activityinstname,
"createtime": str(createtime),
"endtime": str(endtime),
"userid": userid,
"orgid": orgid,
"orgname": orgname
}
}
workitems.append(esdata)
process_stat = Process_Stat()
process_stat.create_index()
process_stat.create_data(processdefines)
process_stat.create_data(process_processinsts)
process_stat.create_data(activityinsts)
process_stat.create_data(workitems)
process_stat.query_count("index_stat_process_activityinst", "type_stat_process_activityinst")
process_stat.query_count("index_stat_process_processinst", "type_stat_process_processinst")
process_stat.query_count("index_stat_processdefine", "type_stat_processdefine")
process_stat.query_count("index_stat_process_workitem", "type_stat_process_workitem")
if __name__ == '__main__':
main()
```
#### File: elastalert/samples/Python_Elasticsearch.py
```python
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import logging
import xlrd
import datetime
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
class ElasticsearchOperate():
def __init__(self):
# 日志基本配置,将日志文件输出到当前目录下的elastticsearch_sample.log文件中
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='elastticsearch_sample.log',
filemode='w')
# 将日志打印在屏幕上
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
self.elastalert_logger = logging.getLogger('elasticsearch_test')
# 连接elasticsearch
self.es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
def queryData(self, query):
# extra_args = {'_source_include': ['@timestamp', '*']}
#
# query = {'sort': [{'@timestamp': {'order': 'asc'}}],
# 'query':
# {'bool':
# {'filter':
# {'bool':
# {'must':
# [{'range':
# {'@timestamp': {'gt': '2017-04-06T14:30:00.860000+08:00',
# 'lte': '2017-04-06T14:31:00.860000+08:00'}}},
# {u'query_string': {u'query': u'field6:Exception'}
#
# }
# ]
# }
# }
# }
# }
# }
res = self.es.search(scroll='30s', index='index-student', size=10000, body=query, ignore_unavailable=True)
self.elastalert_logger.info(str(res).decode("unicode_escape").encode("utf8"))
def create_index(self):
if self.es.indices.exists(index='index-student') is not True:
test_index_mapping = {"mappings": {
"type_student": {
"properties": {
"studentNo": {
"type": "string",
"index": "not_analyzed"
},
"name": {
"type": "string",
"index": "analyzed",
"analyzer": "ik"
},
"male": {
"type": "string",
"index": "not_analyzed"
},
"age": {
"type": "integer"
},
"birthday": {
"type": "date",
"format": "yyyy-MM-dd"
},
"address": {
"type": "string",
"index": "analyzed",
"analyzer": "ik"
},
"classNo": {
"type": "string",
"index": "not_analyzed "
},
"isLeader": {
"type": "boolean"
}
}
}
}
}
self.es.indices.create(index="index-student", ignore=400, body=test_index_mapping)
def create_data(self, esdatas):
self.elastalert_logger.info(str(esdatas).decode("unicode_escape").encode("utf8"))
helpers.bulk(self.es, esdatas)
def init_data(self):
data = xlrd.open_workbook('/Users/yangwm/log/elasticsearch_data.xls') # 打开xls文件
table = data.sheets()[0] # 打开第一张表
esdatas = []
nrows = table.nrows # 获取表的行数
for i in range(nrows): # 循环逐行打印
if i == 0: # 跳过第一行)
continue
rows = table.row_values(i)
esdata = {
"_index": "index-student",
"_type": "type_student",
"_id": i,
"_source": {
"studentNo": str(rows[0]),
"name": str(rows[1]),
"male": str(rows[2]),
"age": int(rows[3]),
"birthday": xlrd.xldate.xldate_as_datetime(rows[4], 1),
"classNo": str(rows[5]),
"address": str(rows[6]),
"isLeader": rows[7]
}
}
esdatas.append(esdata)
#print str(esdatas)
helpers.bulk(self.es, esdatas)
esclient = ElasticsearchOperate()
esclient.create_index()
#esclient.init_data()
# query_str = {
# "query": {
# "match_all": {}
# }
# }
#
# esclient.queryData(query_str)
# query_str_withpage = {
# "query": {
# "match_all": {}
# },
# "from": 2,
# "size": 4,
# "sort": {
# "studentNo": {
# "order": "asc"
# }
# }
# }
#
# esclient.queryData(query_str_withpage)
# term_str_query = {
# "query": {
# "term": {
# "name": "关羽"
# }
# }
# }
#
# esclient.queryData(term_str_query)
bool_str_query = {"query": {
"bool": {
"must": [
{
"term": {
"classNo": "2.0"
}
},
{
"term": {
"isLeader": "1"
}
}
]
}
}
}
esclient.queryData(bool_str_query)
```
|
{
"source": "jetyey/M2-Tweet_Sentiment_extraction",
"score": 4
}
|
#### File: data/dataset/normalize_dataset.py
```python
import sys
import pandas as pd
def usage():
return "Usage: python normalize_dataset.py dataset.csv\nWe assume label column name being \"sentiment\" "
#Verifying number of arguments
if(len(sys.argv)!=2):
sys.exit(usage())
#Normalizing x between -1 and 1, given the maximum and the minimum value of our sentiment labels
def normalize(x,xmin,xmax):
moy = float(xmin+xmax)/2
r = float(xmax - xmin) / 2
normalized = (x - moy) / r
return normalized
#Loading csv data - Add the following parameter for "stanford-sentiment-treebank.train.csv" ==> encoding="ISO-8859-1"
dataset = pd.read_csv(sys.argv[1],sep =";")
"""
print("Before normalization: ")
print(dataset)
"""
#Getting maximum/min value on the sentiment column
minval = dataset.sentiment.min()
maxval = dataset.sentiment.max()
#Normalizing the dataset between -1 and 1
dataset.sentiment = normalize(dataset.sentiment,minval, maxval);
"""
print("\nAfter normalization:")
print(dataset)
"""
newfile = "normalized_"+sys.argv[1]
dataset.to_csv(newfile, sep=';', encoding='utf-8')
print("Saved as: "+newfile)
```
#### File: M2-Tweet_Sentiment_extraction/Prediction/tweet_classification.py
```python
from Prediction import model2
from model.processing import Process
import tensorflow as tf
#model , architecture , pre_process , model_inputs = tweet_classification.get_model_instance (model_name , embed_model)
def get_model_instance(model_name, embedding_model, keep_prob=1.0,
label_size=2, n_word=40, n_char=33 ):
"""
Get model instance, preprocessing, and input format
:param model_name:
:param embedding_model:
:param float learning_rate: learning rate
:param float gradient_clip: (option) max norm for gradient clipping
:param float keep_prob: (option) keep probability for dropout.
:param float batch_norm: (option) decaying parameter for batch normalization
:param int label_size: output label size
:param int n_word: word size (word sequence is pad by this value)
:param int n_char: character size (character sequence is pad by this value)
:return:
"""
# set up pre processing
_pre_process = Process("embed", {"length_word": n_word, "dim": embedding_model.vector_size, "model": embedding_model , "path": "./data/random_dict.json"})
if model_name == "cnn_char":
_model = model2.CharCNN
_pre_process = [Process("onehot", {"length_word": n_word, "length_char": n_char}), _pre_process]
_net = {"input_char": [n_word, n_char, _pre_process[0].char_dict_size],
"input_word": [n_word, embedding_model.vector_size],
"label_size": label_size,
"char_embed_dim": 5, "char_cnn_unit": 10, "char_cnn_kernel": 3, # character embedding
"word_embed_dim": 30, "cnn_unit": 300, "cnn_kernel": 5, "hidden_unit": 300}
_model_inputs = model2.InputFormat.char_word
elif model_name == "lstm_char":
_model = model2.CharLSTM
_pre_process = [Process("onehot", {"length_word": n_word, "length_char": n_char}), _pre_process]
_net = {"input_char": [n_word, n_char, _pre_process[0].char_dict_size],
"input_word": [n_word, embedding_model.vector_size],
"label_size": label_size,
"char_embed_dim": 5, "char_cnn_unit": 10, "char_cnn_kernel": 3, # character embedding
"n_hidden_1": 64, "n_hidden_2": 128, "n_hidden_3": 256}
_model_inputs = model2.InputFormat.char_word
elif model_name == "cnn_gap":
_net = {"input_word": [n_word, embedding_model.vector_size, 1], "label_size": label_size}
_model = model2.GapCNN
_model_inputs = model2.InputFormat.word_3d
elif model_name == "lstm":
_net = {"input_word": [n_word, embedding_model.vector_size], "label_size": label_size,
"n_hidden_1": 64, "n_hidden_2": 128, "n_hidden_3": 256}
_model = model2.LSTM
_model_inputs = model2.InputFormat.basic
else:
raise ValueError("unknown model!")
#model_instance = _model(network_architecture=_net , keep_prob=keep_prob)
return _net, _pre_process, _model_inputs
```
|
{
"source": "jetyun/zergling",
"score": 2
}
|
#### File: zergling/spiders/jetyun.py
```python
import copy
import six
import json
import time
import logging
import base64
from urlparse import urlparse
from lxml import etree
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy.http import Request, HtmlResponse
from scrapy_redis.spiders import RedisCrawlSpider
from scrapy_redis.utils import bytes_to_str
from goose import Goose
from goose.text import StopWordsChinese
from ..items import ZerglingItem
logger = logging.getLogger(__name__)
class JetyunSpider(RedisCrawlSpider):
name = 'jetyun'
def __init__(self, *a, **kw):
pass
def _build_request(self, rule, link, config_json):
r = Request(url=link.url, callback=self._response_downloaded)
r.meta.update(rule=rule, link_text=link.text, config_json=config_json)
return r
def _requests_to_follow(self, response):
if not isinstance(response, HtmlResponse):
return
seen = set()
config_json = response.meta.get('config_json', None)
if not config_json:
logger.info("response.meta['config_json'] is null =====_requests_to_follow====== %s" % response.meta)
return
else:
m_rules = self._compile_rules(config_json)
for n, rule in enumerate(m_rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link, config_json)
yield rule.process_request(r)
def _response_downloaded(self, response):
config_json = response.meta.get('config_json', None)
if config_json:
m_rules = self._compile_rules(config_json)
rule = m_rules[response.meta['rule']]
else:
logger.info("self._rules: %s============response meta not find rules ,use self._rules=============response.meta: %s" % (self._rules, response.meta))
return
return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow)
def make_request_from_data(self, data):
config_json = json.loads(bytes_to_str(data, self.redis_encoding))
url = config_json["start_url"]
r = self.make_requests_from_url(url)
r.meta.update(config_json=config_json)
return r
def parse_item(self, response):
g = Goose({'stopwords_class': StopWordsChinese})
article = g.extract(raw_html=response.body)
item = ZerglingItem()
infos = article.infos
config_json = response.meta.get('config_json', None)
for k in config_json:
if k != "collections" and k != "start_url" and k != "extracts" and k != "follows":
item[k] = config_json[k]
item["platform"] = 1
item["url"] = response.url.encode('utf-8')
collections = config_json["collections"]
for c in collections:
name = str(c["name"])
value = None
if c.has_key("xpath") and c["xpath"]:
value = response.xpath(c["xpath"]).extract()
if c.has_key("css") and c["css"]:
value = response.css(c["css"]).extract()
if c.has_key("callback") and c["callback"]:
try:
exec c["callback"]
ret = callback(value, item, infos)
if ret :
value = ret
except Exception as e:
value = None
logger.error("callback exec error: %s" % e)
# raise e
else:
if value and len(value) >= 1 :
value = value[0]
infos[name] = value
try:
infos['raw_html'] = base64.b64encode(article.raw_html)
except Exception as e:
logger.error("raw_html can't base64 encode, url: %s . error: %s" % (item["url"], e))
item["datas"] = infos
item["crawled_at"] = int(time.time())
logger.info("%s============parse_item=============%s" % (config_json, item["url"]))
yield item
def _compile_rules(self, config_json):
extract_links = config_json["extracts"]
follow_links = config_json["follows"]
rules = ()
if extract_links:
if follow_links:
rules +=(
Rule(LinkExtractor(allow=( follow_links ))),
)
rules += (
Rule(LinkExtractor(allow=( extract_links )), callback='parse_item'),
)
def get_method(method):
if callable(method):
return method
elif isinstance(method, six.string_types):
return getattr(self, method, None)
_rules = [copy.copy(r) for r in rules]
for rule in _rules:
rule.callback = get_method(rule.callback)
rule.process_links = get_method(rule.process_links)
rule.process_request = get_method(rule.process_request)
return _rules
```
|
{
"source": "jetz/sqlmapcli",
"score": 3
}
|
#### File: sqlmapcli/sqlmapcli/logs.py
```python
import sys
import logging
def get_logger(name):
""" config logger
Args:
name (str): logger name
Returns:
object: logger
"""
# suppress requests library's log
logging.getLogger("requests").setLevel(logging.WARNING)
log_fmt = '[%(asctime)s @%(filename)s:%(lineno)d <%(levelname)s>] %(message)s' # noqa
logging.basicConfig(
level=logging.INFO,
format=log_fmt,
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout
)
return logging.getLogger(name)
```
#### File: sqlmapcli/sqlmapcli/task.py
```python
import time
from urllib.parse import urljoin
import requests
from .logs import get_logger
from .exceptions import TaskStatusError, TaskResultError, TaskLogError
logger = get_logger('sqlmapcli')
class TaskStatus(object):
""" Task status constant """
READY = 'not running'
RUNNING = 'running'
FINISHED = 'terminated'
class Task(object):
def __init__(self, id, options, addr):
""" Create a task object.
Args:
id (str): task id from remote sqlmapapi server.
options (dict): options used to run task, see
`curl http://<host>:<port>/option/<taskid>/list`.
addr (str): remote sqlmapapi server address.
"""
self.id = id
self.addr = addr
self.options = options or {}
# always store url target in task object
self.url = self.options.get('url', None)
def __str__(self):
return '<Task#%s>' % self.id
def __repr__(self):
return str(self)
def _request(self, path, method='GET'):
""" Used to request remote sqlmapapi server.
Args:
path (str): url path for request.
method (str): GET or POST for different request.
Returns:
dict if successful, None otherwirse.
"""
try:
url, method = urljoin(self.addr, path), method.upper()
if method == 'GET':
r = requests.get(url).json()
elif method == 'POST':
r = requests.post(url, json=self.options).json()
except requests.RequestException as e:
logger.error('Fail to %s %s: %s' % (method, path, e))
return None
if r.get('success'):
return r
else:
logger.error('Fail to %s %s: %s' % (method, path, r.get('message'))) # noqa
return None
def set_option(self, key, value):
""" Set option for task.
Options can be set when client create task, or call `set_option`
after task is created but not start.
Args:
key (str): option name.
value (str): option value.
Returns:
Task: for chained call, eg.
`task.set_option(key, value).set_option(key, value)`.
"""
self.options[key] = value
if key == 'url':
self.url = value
return self
def get_option(self, key):
""" Get task option.
Args:
key (str): option name.
Returns:
str: option value.
"""
return self.options.get(key)
def update_options(self, options):
""" Update some options at same time.
Args:
options (dict): options that to update.
"""
self.options.update(options)
if 'url' in options:
self.url = options.get('url')
def list_options(self):
""" Get options that manually set.
Returns:
dict: options that user set.
"""
return self.options
def start(self, url=None, options=None):
""" Task start to run.
Args:
url (str): target url to scan by sqlmap, this is a shorthand
for set option with key `url`
options (Optional[dict]): shorthand, set options for task,
alternative to `set_option` or `update_options` or set
options when create task.
Returns:
str: engineid, maybe useful in future.
"""
if options:
self.update_options(options)
if url:
self.url = url
self.set_option('url', url)
r = self._request('/scan/%s/start' % self.id, 'POST')
self.engineid = r.get("engineid") if r else None
return self.engineid
def stop(self):
""" Stop running task.
Returns:
bool: True if stop successfully, False otherwise.
"""
r = self._request('/scan/%s/stop' % self.id)
return bool(r)
def kill(self):
""" Kill running task unconditionally.
Returns:
bool: True if Kill successfully, False otherwise.
"""
r = self._request('/scan/%s/kill' % self.id)
return bool(r)
def status(self):
""" Task currenty status, ready, running or finished.
Returns:
dict: include status and retcode.
Raises:
TaskStatusError: status exception.
"""
r = self._request('/scan/%s/status' % self.id)
if r:
status, retcode = r.get('status'), r.get('returncode')
return {'status': status, 'retcode': retcode}
else:
raise TaskStatusError("Can't get status")
@property
def ready(self):
""" shorthand for task status.
Returns:
bool: True if task is created but not start, False otherwise.
"""
try:
r = self.status()
return r.get('status') == TaskStatus.READY
except TaskStatusError as e:
logger.error('Fail to GET task<%s> status: %s', self.id, e)
return False
@property
def running(self):
""" shorthand for task status.
Returns:
bool: True if task start but not finished, False otherwise.
"""
try:
r = self.status()
return r.get('status') == TaskStatus.RUNNING
except TaskStatusError as e:
logger.error('Fail to GET task<%s> status: %s', self.id, e)
return False
@property
def finished(self):
""" shorthand for task status.
Returns:
bool: True if task is finished, False otherwise.
"""
try:
r = self.status()
return r.get('status') == TaskStatus.FINISHED
except TaskStatusError as e:
logger.error('Fail to GET task<%s> status: %s', self.id, e)
return False
def get_result(self):
""" Get task result.
Returns:
dict: task data.
Raises:
TaskResultError: task result exception.
"""
r = self._request('/scan/%s/data' % self.id)
if r:
return r.get('data')
else:
raise TaskResultError("Can't get result")
def get_log(self, start=None, end=None):
""" Get task log.
Args:
start (int): start index of log list.
end (int): end index of log list.
Returns:
dict: task log data.
Raises:
TaskLogError: task log exception.
"""
if start and end:
r = self._request('/scan/%s/log/%s/%s' % (self.id, start, end))
else:
r = self._request('/scan/%s/log' % self.id)
if r:
return r.get('log')
else:
raise TaskLogError("Can't get log")
def run(self, url=None, options=None, interval=5):
""" Shorthand for call `start`, `status` and `get_result`
Args:
url (str): target url to scan by sqlmap, this is a shorthand
for set option with key `url`
options (Optional[dict]): shorthand, set options for task,
alternative to `set_option` or `update_options` or set
options when create task.
interval (int): interval time toquery task status, seconds default.
Returns:
dict if successfully, None otherwise.
"""
self.start(url, options)
while self.running:
time.sleep(interval)
try:
r = self.get_result()
except TaskResultError as e:
logger.error('Fail to GET task<%s> result: %s', self.id, e)
return None
return r
```
|
{
"source": "jeubanks/vera-life360",
"score": 3
}
|
#### File: jeubanks/vera-life360/vera.py
```python
import urllib.request, urllib.parse, urllib.error, json
import sys
import hashlib
#import sha
import base64
import http.client
import time
import requests
class Time(object):
"""
Time object represents a time value in a 24-hour day i.e. a value
we might normally represent HH:MM:SS.
"""
def __init__(self, h=0, m=0, s=0, after_sunrise=False, after_sunset=False):
"""
Constructs a new Time object.
:param h: Hour value
:param m: Minute value
:param s: Seconds value
:param after_sunrise: True if value is relative to sunrise
:param after_sunset: True if the time value is relative to sunset
"""
assert (after_sunrise and after_sunset) == False, \
"Must not specify both after_sunrise and after_sunset"
self.time = (h, m, s)
self.after_sunrise = after_sunrise
self.after_sunset = after_sunset
def output(self):
"""
Formats the time value in a format suitable for LUUP comms.
"""
if self.after_sunrise:
return "%02d:%02d:%02dR" % self.time
if self.after_sunset:
return "%02d:%02d:%02dT" % self.time
return "%02d:%02d:%02d" % self.time
def parse(s):
"""
Converts LUUP time values to a Time object.
:param s: Value from LUUP comms.
"""
rise = False
set = False
if s[-1:] == "R":
rise = True
s = s[:-1]
elif s[-1:] == "T":
set = True
s = s[:-1]
x = s.split(":")
if len(x) == 1:
x.append("0")
if len(x) == 2:
x.append("0")
return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,
after_sunset=set)
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class Timer(object):
"""
Base class for a timer value. There are four types of timer
implemented in the four subclasses.
"""
def parse(s):
"""
Converts LUUP timer values to a Timer object.
:param s: Value from LUUP comms.
"""
if s["type"] == 1:
return IntervalTimer.parse(s)
if s["type"] == 2:
return DayOfWeekTimer.parse(s)
if s["type"] == 3:
return DayOfMonthTimer.parse(s)
if s["type"] == 4:
return AbsoluteTimer.parse(s)
raise RuntimeError("Parsing timer not implemented.")
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__
class DayOfWeekTimer(Timer):
"""
Represents a daily timer which fires at a specific point in a
24-hour day. The timer can be restricted so that it only operates on
certain days in the week.
"""
def __init__(self, id=None, name=None, days=None, time=None):
"""
Creates a DayOfWeekTimer object.
:param id: Integer identifier for the timer.
:param name: A human-readable name.
:param days: A string containing comma-separated digits representing
the days of the week, 1=Monday etc.
:param time: A Time object representing the time.
"""
self.id = id
self.name = name
self.days = days
self.time = time
def output(self):
"""
Formats the time value in a format suitable for LUUP comms.
"""
return {
"id": self.id,
"name": self.name,
"type": 2,
"enabled": 1,
"days_of_week": self.days,
"time": self.time.output()
}
def parse(s):
"""
Converts LUUP day-of-week timer values to a DayOfWeekTimer object.
:param s: Value from LUUP comms.
"""
t = DayOfWeekTimer()
t.id = s.get("id", None)
t.name = s.get("name", None)
t.days = s.get("days_of_week", None)
if "time" in s:
t.time = Time.parse(s["time"])
else:
t.time = None
return t
parse = staticmethod(parse)
class DayOfMonthTimer(Timer):
"""
Represents a daily timer which fires at a specific point in a
24-hour day. The timer can be restricted so that it only operates on
certain days in the month.
"""
def __init__(self, id=None, name=None, days=None, time=None):
"""
Creates a new DayOfMonthTimer.
:param id: Integer identifier for the timer.
:param name: A human-readable name.
:param days: A string containing comma-separated digits representing
the days of the month, 1=1st etc.
:param time: A Time object representing the time.
"""
self.id = id
self.name = name
self.days = days
self.time = time
def output(self):
"""
Formats the time value in a format suitable for LUUP comms.
"""
return {
"id": self.id,
"name": self.name,
"type": 3,
"enabled": 1,
"days_of_month": self.days,
"time": self.time.output()
}
def parse(s):
"""
Converts LUUP day-of-month timer values to a DayOfMonthTimer object.
:param s: Value from LUUP comms.
"""
t = DayOfMonthTimer()
t.id = s.get("id", None)
t.name = s.get("name", None)
t.days = s.get("days_of_month", None)
if "time" in s:
t.time = Time.parse(s["time"])
else:
t.time = None
return t
parse = staticmethod(parse)
class IntervalTimer(Timer):
"""
A timer describing a regularly occuring event, whose period can be
described in terms of a number of seconds, minutes, hours or days.
"""
def __init__(self, id=None, name=None, seconds=0, minutes=0, hours=0,
days=0):
"""
Create a new IntervalTimer object.
:param id: Integer identified for timer.
:param name: Human-readable name for the timer.
:param seconds: Interval value in seconds.
:param minutes: Interval value in minutes.
:param hours: Interval value in hours.
:param days: Interval value in days.
"""
specd = 0
if seconds != 0: specd = specd + 1
if minutes != 0: specd = specd + 1
if hours != 0: specd = specd + 1
if days != 0: specd = specd + 1
assert specd < 2, \
"Should specify only one of seconds, minutes, hours and days"
self.id = id
self.name = name
(self.seconds, self.minutes, self.hours, self.days) = \
(seconds, minutes, hours, days)
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
if self.days > 0:
interval = "%dd" % self.days
if self.hours > 0:
interval = "%dh" % self.hours
if self.minutes > 0:
interval = "%dm" % self.minutes
else:
interval = "%ds" % self.seconds
return {
"id": self.id,
"name": self.name,
"type": 1,
"enabled": 1,
"interval": interval
}
def parse(s):
"""
Converts LUUP interval timer values to a IntervalTimer object.
:param s: Value from LUUP comms.
"""
t = IntervalTimer()
t.id = s.get("id", None)
t.name = s.get("name", None)
if "interval" in s:
ival = s["interval"]
if ival[-1:] == "s":
t.seconds = int(ival[:-1])
if ival[-1:] == "m":
t.minutes = int(ival[:-1])
if ival[-1:] == "h":
t.hours = int(ival[:-1])
if ival[-1:] == "d":
t.days = int(ival[:-1])
return t
parse = staticmethod(parse)
class AbsoluteTimer(Timer):
"""
Describes a timer which fires only once, at a described point in time.
"""
def __init__(self, id=None, name=None, year=None, month=None, date=None,
hours=0, minutes=0, seconds=0):
"""
Creates an AbsoluteTimer object.
:param id: Identified for the timer.
:param name: Human-readble name for the timer.
:param year: Absolute year value, 4-digit.
:param month: Absolute month value, 1-12.
:param date: Absolute date value, 1-31.
:param hours: Hour value.
:param minutes: Minute value.
:param seconds: Seconds value.
"""
self.id, self.name = id, name
(self.year, self.month, self.date) = (year, month, date)
(self.hours, self.minutes, self.seconds) = (hours, minutes, seconds)
def output(self):
"""
Formats the timer value in a format suitable for LUUP comms.
"""
time = "%04d-%02d-%02d %02d:%02d:%02d" % (self.year, self.month, \
self.date, self.hours, self.minutes, self.seconds)
return {
"id": self.id,
"name": self.name,
"type": 4,
"enabled": 1,
"abstime": time
}
def parse(s):
"""
Converts LUUP absolute timer values to an AbsoluteTimer object.
:param s: Value from LUUP comms.
"""
t = AbsoluteTimer()
t.id = s.get("id", None)
t.name = s.get("name", None)
if "abstime" in s:
parts = s["abstime"].split(" ")
if len(parts) != 2:
raise RuntimeError("Invalid date format")
dateparts = parts[0].split("-")
timeparts = parts[1].split(":")
if len(dateparts) != 3:
raise RuntimeError("Invalid date format")
if len(timeparts) != 3:
raise RuntimeError("Invalid date format")
t.year = int(dateparts[0])
t.month = int(dateparts[1])
t.date = int(dateparts[2])
t.hours = int(timeparts[0])
t.minutes = int(timeparts[1])
t.seconds = int(timeparts[2])
return t
parse = staticmethod(parse)
class Trigger(Timer):
"""
Describes a device-initiated event for triggering a scene.
"""
def __init__(self, id=None, name=None, device=None, template=None,
args=None, start=None, stop=None, days_of_week=None):
"""
Creates a Trigger object.
:param id: Trigger identifier.
:param name: Human-readable name.
:param device: Device object identifying the device which is to
initiate the scene
:param template: is the template function for the device
:param args: is a sequence of arguments
:param start: a Time object specifying the start time for the period
for which this trigger is valid.
:param end: a Time object specifying the end time for which this trigger
is valid
:param days_of_week: days for which this trigger applies.
"""
self.id, self.name = id, name
self.device = device
self.template = template
if args == None:
self.args = []
else:
self.args = args
self.start, self.stop = start, stop
self.days_of_week = days_of_week
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
args = []
for i in range(0, len(self.args)):
args.append({"id": i + 1, "value": self.args[i]})
val = {
"id": self.id,
"device": self.device.id,
"enabled": 1,
"name": self.name,
"template": self.template,
"arguments": args
}
if self.start != None and self.stop != None:
val["start"] = self.start.output()
val["stop"] = self.stop.output()
if self.days_of_week != None:
val["days_of_week"] = self.days_of_week
return val
def parse(vera, s):
"""
Converts LUUP trigger values to a Trigger object.
:param vera: Vera object.
:param s: Value from LUUP comms.
"""
t = Trigger()
t.id = s.get("id", None)
t.template = s.get("template", None)
t.name = s.get("name", None)
t.days_of_week = s.get("days_of_week", None)
if "arguments" in s:
for i in s["arguments"]:
if 'value' in i:
t.args.append(i["value"])
if "device" in s:
t.device = vera.get_device_by_id(s["device"])
else:
t.device = None
if "start" in s:
t.start = Time.parse(s["start"])
else:
t.start = None
if "stop" in s:
t.stop = Time.parse(s["stop"])
else:
t.stop = None
return t
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class Job(object):
"""
Represents a job, typically used to implement an action.
"""
def __init__(self):
"""
Creates a Job object.
"""
pass
def get_status(self):
"""
Gets the job status, returns a tuple. First element is job code,
second element is human-readable meaning of the job code.
"""
url = "data_request?id=jobstatus&job=%d&plugin=zwave" % self.id
return self.vera.get(url)
def is_complete(self):
"""
Tests whether the job has completed.
:return: True if the job is complete, False otherwise.
"""
status = self.get_status()
return status["status"] == 4
def is_pending(self):
"""
Tests whether the job is stilling pending completion.
:return: True if the job is still pending completion, False otherwise.
"""
status = self.get_status()
return status["status"] == 3
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
if type(self) != type(obj): return False
return self.__dict__ == obj.__dict__
class Action(object):
"""
Base class describing an action.
"""
def invoke(self):
"""
Invoke an action
:return: a Job object, describing the job implementing the action.
"""
raise RuntimeError("Not implemented")
def parse(vera, s):
"""
Converts LUUP action values to an Action object.
:param vera: A Vera object.
:param s: Value from LUUP comms.
"""
if s["service"] == "urn:upnp-org:serviceId:TemperatureSetpoint1":
return SetpointAction.parse(vera, s)
if s["service"] == "urn:upnp-org:serviceId:Dimming1":
return DimmerAction.parse(vera, s)
if s["service"] == "urn:upnp-org:serviceId:SwitchPower1":
return SwitchAction.parse(vera, s)
if s["service"] == "urn:upnp-org:serviceId:HVAC_UserOperatingMode1":
return HeatingAction.parse(vera, s)
if s["service"] == "urn:upnp-org:serviceId:RGBController1":
return RGBAction.parse(vera, s)
raise RuntimeError("Don't know how to handle service %s" % \
s["service"])
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class SetpointAction(Action):
"""
An action, which changes the 'set point' of a thermostat.
Note that when this action is applied to a battery-powered device, the
result may not be applied until the device does a rendezvous with the
controller.
"""
def __init__(self, device=None, value=None):
"""
Creates a SetpointAction object.
:param device: Device object specifying the device
:param value: set-point value, a float
"""
self.device = device
self.value = value
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
return {
"device": self.device.id,
"action": "SetCurrentSetpoint",
"arguments": [
{
"name": "NewCurrentSetpoint",
"value": self.value
}
],
"service": "urn:upnp-org:serviceId:TemperatureSetpoint1"
}
def invoke(self):
"""
Immediately invoke the action
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "SetCurrentSetpoint"
svc = "urn:upnp-org:serviceId:TemperatureSetpoint1"
path = "%s&DeviceNum=%d&serviceId=%s&action=%s&NewCurrentSetpoint=%f&output_format=json" \
% (base, self.device.id, svc, action, self.value)
status = self.device.vera.get(path)
job = Job()
job.id = int(status["u:SetCurrentSetpointResponse"]["JobID"])
job.vera = self.device.vera
return job
def parse(vera, s):
"""
Converts LUUP SetCurrentSetpoint action values to a SetpointAction
object.
:param s: Value from LUUP comms.
"""
ha = SetpointAction()
ha.device = vera.get_device_by_id(s["device"])
ha.value = s["arguments"][0]["value"]
return ha
parse = staticmethod(parse)
class SwitchAction(Action):
"""
Action which operates against a standard power switch which has on/off
semantics.
"""
def __init__(self, device=None, value=None):
"""
Creates a SwitchAction object.
:param device: Device object describing the device to apply
:param value: boolean value for switch
"""
self.device = device
self.value = value
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
return {
"device": self.device.id,
"action": "SetTarget",
"arguments": [
{
"name": "newTargetValue",
"value": self.value
}
],
"service": "urn:upnp-org:serviceId:SwitchPower1"
}
def invoke(self):
"""
Implements the defined action.
:return: a Job object, describing the job implementing the action.
"""
if self.value:
value = 1
else:
value = 0
base="data_request?id=action"
action = "SetTarget"
svc = "urn:upnp-org:serviceId:SwitchPower1"
path = "%s&DeviceNum=%d&serviceId=%s&action=%s&newTargetValue=%d&output_format=json" \
% (base, self.device.id, svc, action, value)
status = self.device.vera.get(path)
job = Job()
job.id = int(status["u:SetTargetResponse"]["JobID"])
job.vera = self.device.vera
return job
def parse(vera, s):
"""
Converts LUUP SetTarget values to a SwitchAction object.
:param s: Value from LUUP comms.
"""
ha = SwitchAction()
ha.device = vera.get_device_by_id(s["device"])
ha.value = s["arguments"][0]["value"]
return ha
parse = staticmethod(parse)
class VSwitchAction(Action):
"""
Action which operates against a standard power switch which has on/off
semantics.
"""
def __init__(self, device=None, value=None):
"""
Creates a VSwitchAction object.
:param device: Device object describing the device to apply
:param value: boolean value for switch
"""
self.device = device
self.value = value
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
return {
"device": self.device.id,
"action": "SetTarget",
"arguments": [
{
"name": "newTargetValue",
"value": self.value
}
],
"service": "urn:upnp-org:serviceId:VSwitch1"
}
def invoke(self):
"""
Implements the defined action.
:return: a Job object, describing the job implementing the action.
"""
if self.value:
value = 1
else:
value = 0
base="data_request?id=action"
action = "SetTarget"
svc = "urn:upnp-org:serviceId:VSwitch1"
path = "%s&DeviceNum=%d&serviceId=%s&action=%s&newTargetValue=%d&output_format=json" \
% (base, self.device.id, svc, action, value)
status = self.device.vera.get(path)
job = Job()
job.id = status["u:SetTargetResponse"]["OK"]
job.vera = self.device.vera
return job
def parse(vera, s):
"""
Converts LUUP SetTarget values to a SwitchAction object.
:param s: Value from LUUP comms.
"""
ha = SwitchAction()
ha.device = vera.get_device_by_id(s["device"])
ha.value = s["arguments"][0]["value"]
return ha
parse = staticmethod(parse)
class DimmerAction(Action):
"""
Action which changes the dim level of a dimmer device
"""
def __init__(self, device=None, value=None):
"""
Creates a DimmerAction object.
:param device: a Device object specifying the device to be affected
:param value: Dim value, integer 0-100
"""
self.device = device
self.value = value
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
return {
"device": self.device.id,
"action": "SetLoadLevelTarget",
"arguments": [
{
"name": "newTargetValue",
"value": self.value
}
],
"service": "urn:upnp-org:serviceId:Dimming1"
}
def invoke(self):
"""
Invokes the action, affecting the specified device.
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "SetLoadLevelTarget"
svc = "urn:upnp-org:serviceId:Dimming1"
path = "%s&DeviceNum=%d&serviceId=%s&action=%s&newTargetValue=%d&output_format=json" \
% (base, self.device.id, svc, action, self.value)
status = self.device.vera.get(path)
job = Job()
job.id = int(status["u:SetLoadLevelTargetResponse"]["JobID"])
job.vera = self.device.vera
return job
def parse(vera, s):
"""
Converts LUUP values to a DimmerAction object.
:param s: Value from LUUP comms.
"""
ha = DimmerAction()
ha.device = vera.get_device_by_id(s["device"])
ha.value = s["arguments"][0]["value"]
return ha
parse = staticmethod(parse)
class HeatingAction(Action):
"""
Action which changes the operational mode of a heating device
"""
def __init__(self, device=None, value=None):
"""
Creates a HeatingAction device.
:param device: a Device object specifying the device to be affected
:param value: string, one of: Off, HeatOn
"""
self.device = device
self.value = value
def output(self):
"""
Formats the time value in a format suitable for LUUP comms.
"""
return {
"device": self.device.id,
"action": "SetModeTarget",
"arguments": [
{
"name": "NewModeTarget",
"value": self.value
}
],
"service": "urn:upnp-org:serviceId:HVAC_UserOperatingMode1"
}
def invoke(self):
"""
Invokes the action, affecting the specified device.
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "SetModeTarget"
svc = "urn:upnp-org:serviceId:HVAC_UserOperatingMode1"
path = "%s&DeviceNum=%d&serviceId=%s&action=%s&NewModeTarget=%s&output_format=json" \
% (base, self.device.id, svc, action, self.value)
status = self.device.vera.get(path)
job = Job()
job.id = int(status["u:SetModeTargetResponse"]["JobID"])
job.vera = self.device.vera
return job
def parse(vera, s):
"""
Converts LUUP values to a HeatingAction object.
:param s: Value from LUUP comms.
"""
ha = HeatingAction()
ha.device = vera.get_device_by_id(s["device"])
ha.value = s["arguments"][0]["value"]
return ha
parse = staticmethod(parse)
class RGBAction(Action):
"""
Action which operates against a colour controller which has 5 channels
"""
def __init__(self, device=None, value=None):
"""
Creates a RGAction object.
:param device: Device object describing the device to apply
:param value: value for color
"""
self.device = device
self.value = value
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
return {
"device": self.device.id,
"action": "SetColor",
"arguments": [
{
"name": "newColorTargetValue",
"value": self.value
}
],
"service": "urn:upnp-org:serviceId:RGBController1"
}
def invoke(self):
"""
Implements the defined action.
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "SetColorTarget"
svc = "urn:upnp-org:serviceId:RGBController1"
path = "%s&DeviceNum=%d&serviceId=%s&action=%s&newColorTargetValue=%s&transitionDuration=0&transitionNbSteps=10&output_format=json" \
% (base, self.device.id, svc, action, self.value)
status = self.device.vera.get(path)
job = Job()
job.id = int(status["u:SetColorTargetResponse"]["JobID"])
job.vera = self.device.vera
return job
def parse(vera, s):
"""
Converts LUUP SetTarget values to a RGBAction object.
:param s: Value from LUUP comms.
"""
sa = RGBAction()
sa.device = vera.get_device_by_id(s["device"])
sa.value = s["arguments"][0]["value"]
return sa
parse = staticmethod(parse)
class SceneAction(Action):
"""
Action which runs a scene
"""
def __init__(self, vera=None, id=None):
"""
Creates a RGAction object.
:param device: Device object describing the device to apply
:param value: value for color
"""
self.vera = vera
self.id = id
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
return {
"action": "RunScene",
"arguments": [
{
"name": "SceneNum",
"value": self.id
}
],
"service": "urn:micasaverde-com:serviceId:HomeAutomationGateway1"
}
def invoke(self):
"""
Implements the defined action.
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "RunScene"
svc = "urn:micasaverde-com:serviceId:HomeAutomationGateway1"
path = "%s&serviceId=%s&action=%s&SceneNum=%d&output_format=json" \
% (base, svc, action, self.id)
status = self.vera.get(path)
if status["u:RunSceneResponse"]["OK"] != "OK":
return False
return True
def parse(vera, s):
"""
Converts LUUP SetTarget values to a RGBAction object.
:param s: Value from LUUP comms.
"""
sa = RGBAction()
sa.vera = vera
sa.value = s["arguments"][0]["value"]
return sa
parse = staticmethod(parse)
class Group(object):
"""
A list of Action objects plus a delay time for when the actions are applied,
"""
def __init__(self, delay=None, actions=None):
"""
Creates an Group object
:param delay: delay in seconds
:param actions: sequence of Action objects
"""
self.delay = delay
self.actions = actions
if self.actions == None: self.actions = []
def output(self):
"""
Formats the time value in a format suitable for LUUP comms.
"""
acts = []
for i in self.actions:
acts.append(i.output())
return {
"delay": self.delay,
"actions": acts
}
def parse(vera, s):
"""
Converts LUUP group value to an Group object.
:param s: Value from LUUP comms.
"""
aset = Group()
aset.delay = s.get("delay", 0)
if "actions" in s:
for i in s["actions"]:
aset.actions.append(Action.parse(vera, i))
return aset
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class SceneDefinition(object):
def __init__(self, name=None, triggers=None, modes=None, timers=None,
actions=None, room=None):
self.name = name
if triggers != None:
self.triggers = triggers
else:
self.triggers = []
self.modes = modes
if timers != None:
self.timers = timers
else:
self.timers = []
if actions != None:
self.actions = actions
else:
self.actions = []
self.room = room
def output(self):
"""
Formats the time value in a format suitable for LUUP comms.
"""
triggers = []
for i in self.triggers:
triggers.append(i.output())
timers = []
for i in self.timers:
timers.append(i.output())
actions = []
for i in self.actions:
actions.append(i.output())
val = {
"name": self.name,
"triggers": triggers,
"triggers_operator": "OR",
"timers": timers,
"groups": actions,
"users": "",
}
if self.modes != None:
val["modeStatus"] = self.modes.output()
if self.room != None:
val["room"] = self.room.id
return val
def parse(vera, s):
"""
Converts LUUP scene to a SceneDefinition object.
:param s: Value from LUUP comms.
"""
sd = SceneDefinition()
sd.name = s["name"]
for i in s["triggers"]:
sd.triggers.append(Trigger.parse(vera, i))
if "timers" in s:
for i in s["timers"]:
sd.timers.append(Timer.parse(i))
if "groups" in s:
for i in s["groups"]:
sd.actions.append(Group.parse(vera, i))
if "room" in s:
if s["room"] == 0:
sd.room = None
else:
sd.room = vera.get_room_by_id(s["room"])
if "modeStatus" in s:
sd.modes = Modes.parse(vera, s["modeStatus"])
return sd
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class Modes(object):
"""
Describes the set of modes for which a scene will be valid
"""
def __init__(self, home=False, away=False, night=False, vacation=False):
"""
Creates a Modes object
:param home: True for scene to be valid in home mode
:param away: True for scene to be valid in away mode
:param night: True for scene to be valid in night mode
:param vacation: True for scene to be valid in vacation mode
"""
self.home, self.away, self.night = home, away, night
self.vacation = vacation
def output(self):
"""
Formats the value in a format suitable for LUUP comms.
"""
val = ""
if self.home:
val = "1"
if self.away:
if val != "": val = val + ","
val = val + "2"
if self.night:
if val != "": val = val + ","
val = val + "3"
if self.vacation:
if val != "": val = val + ","
val = val + "4"
return val
def parse(vera, s):
"""
Converts LUUP modeSet values to a Mode object.
:param vera: A vera object.
:param s: Value from LUUP comms.
"""
x = s.split(",")
y = {}
for i in x:
y[i] = True
m = Modes()
m.home = y.get(0, None)
m.away = y.get(1, None)
m.night = y.get(2, None)
m.vacation = y.get(3, None)
parse = staticmethod(parse)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class Device(object):
"""
Describes a LUUP device
"""
def __init__(self):
"""
Creates a Device object
"""
pass
def get_variable(self, svc, var):
"""
Queries the LUUP engine for the value of a variable.
:param svc: LUUP service
:param var: Variable name.
"""
action = "variableget"
path = "data_request?id=%s&DeviceNum=%d&serviceId=%s&Variable=%s" \
% (action, self.id, svc, var)
return self.vera.get(path)
def get_switch(self):
"""
Get the current state of a power switch device. Returns a boolean
value.
"""
svc = "urn:upnp-org:serviceId:SwitchPower1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
status = self.get_variable(svc, "Status")
return status == 1
def get_vswitch(self):
"""
Get the current state of a virtual power switch device. Returns a boolean
value.
"""
svc = "urn:upnp-org:serviceId:VSwitch1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
status = self.get_variable(svc, "Status")
return status == 1
def get_rgb(self):
"""
Get the current state of an RGB device. Returns a string.
"""
svc = "urn:upnp-org:serviceId:RGBController1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
# Strip off hash.
return self.get_variable(svc, "Color")[1:]
def get_dimmer(self):
"""
Get the current state of a dimmer device. Returns an integer in
the range 0-100.
"""
svc = "urn:upnp-org:serviceId:Dimming1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "LoadLevelStatus")
def get_temperature(self):
"""
Get the current value of a temperature sensor device. Returns a
floating point value. The temperature scale in use is dependent on
device configuration.
"""
svc = "urn:upnp-org:serviceId:TemperatureSensor1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "CurrentTemperature")
def get_humidity(self):
"""
Get the current value of a humidity sensor device. Returns a
integer value representing % relative humidity.
"""
svc = "urn:micasaverde-com:serviceId:HumiditySensor1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "CurrentLevel")
def get_lux(self):
"""
Get the current value of a light sensor device. Returns a
integer value representing % of lighting.
"""
svc = "urn:micasaverde-com:serviceId:LightSensor1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "CurrentLevel")
def get_kwh(self):
"""
Get the current value of the energy meter in kwh. Returns a
integer value with current kilowatts being used.
"""
svc = "urn:micasaverde-com:serviceId:EnergyMetering1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "KWH")
def get_kwh_reading(self):
"""
Get the current reading of the energy meter in kwh. Returns a
integer value with to date kilowatts being used.
"""
svc = "urn:micasaverde-com:serviceId:EnergyMetering1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "KWHReading")
def get_watt(self):
"""
Get the current value of the energy meter in watts. Returns a
integer value with current wattage being used.
"""
svc = "urn:micasaverde-com:serviceId:EnergyMetering1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "Watts")
def get_setpoint(self):
"""
Get the 'set point' of a thermostat device. This is the temperature
at which the device is configured to turn on heating.
"""
svc = "urn:upnp-org:serviceId:TemperatureSetpoint1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "CurrentSetpoint")
def get_heating(self):
"""
Get the operating mode of a heating device. Valid values are:
Off, HeatOn.
"""
svc = "urn:upnp-org:serviceId:HVAC_UserOperatingMode1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "ModeStatus")
def get_battery(self):
"""
Get the battery capacity of a battery-powered device. Is a %
value, 0-100.
"""
svc = "urn:micasaverde-com:serviceId:HaDevice1"
if not svc in self.services:
raise RuntimeError("Device doesn't support the service")
return self.get_variable(svc, "BatteryLevel")
def set_switch(self, value):
"""
Changes the setting of a switch device.
:param value: new value, boolean
"""
act = SwitchAction(self, value)
return act.invoke()
def set_vswitch(self, value):
"""
Changes the setting of a switch device.
:param value: new value, boolean
"""
act = VSwitchAction(self, value)
return act.invoke()
def set_rgb(self, value):
"""
Changes the setting of an RGB device.
:param value: new value, string in form #aabbccddee
"""
act = RGBAction(self, value)
return act.invoke()
def set_dimmer(self, value):
"""
Changes the setting of a dimmer device.
:param value: new value, 0-100.
"""
act = DimmerAction(self, value)
return act.invoke()
def set_setpoint(self, value):
"""
Changes the set point of a thermostat device.
:param value: new value, float.
"""
act = SetpointAction(self, value)
return act.invoke()
def set_heating(self, value):
"""
Changes the operating mode of a heating device.
:param value: new value, string, valid values are: Off HeatOn.
"""
act = HeatingAction(self, value)
return act.invoke()
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
class Scene(object):
"""
Represents a scene, once it is configured into the LUUP engine.
"""
def __init__(self):
"""
Creates a Scene object.
"""
pass
def delete(self):
"""
Calls the LUUP engine to delete this scene.
"""
self.vera.delete_scene(self)
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
def run(self):
self.vera.run_scene(self.id)
class Room(object):
"""
Represents a room, configured into the LUUP engine.
"""
def __init__(self):
pass
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
if type(self) != type(obj): return False
return self.__dict__ == obj.__dict__
class Vera(object):
"""
Vera represents a connection to a Vera device's LUUP engine.
"""
def __init__(self):
"""
Creates a new Vera object.
"""
self.update_state()
def update_state(self):
"""
Queries the LUUP engine for user_data state, and updates the local
copy.
"""
ud = self.get('data_request?id=user_data&output_format=json')
self.user_data = ud
self.rooms = {}
for i in self.user_data["rooms"]:
s = Room()
s.vera = self
s.id = int(i["id"])
s.name = i["name"]
self.rooms[s.id] = s
self.devices = {}
for i in self.user_data["devices"]:
d = Device()
d.vera = self
d.id = int(i["id"])
d.name = i["name"]
if "manufacturer" in i:
d.manufacturer = i["manufacturer"]
else:
d.manufacturer = None
if "model" in i:
d.model = i["model"]
else:
d.model = None
d.services = set()
for st in i["states"]:
d.services.add(st["service"])
d.device_type = i["device_type"]
if "device_file" in i:
d.device_file = i["device_file"]
if "device_json" in i:
d.device_json = i["device_json"]
if "invisible" in i and int(i["invisible"]) > 0:
d.invisible = True
else:
d.invisible = False
if "room" in i and int(i["room"]) in self.rooms:
d.room = self.rooms[int(i["room"])]
else:
d.room = None
self.devices[d.id] = d
self.scenes = {}
for i in self.user_data["scenes"]:
s = Scene()
s.vera = self
s.id = int(i["id"])
s.name = i["name"]
if 'room' in i and int(i["room"]) in self.rooms:
s.room = self.rooms[int(i["room"])]
else:
s.room = None
s.definition = SceneDefinition.parse(self, i)
self.scenes[s.id] = s
def get_room_by_id(self, id):
"""
Return a Room object if one exists matching the provided ID.
:param id: Room ID, an integer
:return: A Room object
"""
if not isinstance(id, int):
id = int(id)
if id in self.rooms:
return self.rooms[id]
raise RuntimeError("Room not known")
def get_room(self, name):
"""
Return a Room object if one exists with the provided room name.
:param name: Room name.
:return: A Room object
"""
for i in self.rooms:
if self.rooms[i].name == name:
return self.rooms[i]
raise RuntimeError("Room '%s' not known" % name)
def get_device(self, name, room=None):
"""
Return a Device object if one exists with the provided device name.
Optionally, a room object can be specified to restrict the search for
the device to that room.
:param name: Device name.
:param room: Optional Rooom object.
:return: A Device object
"""
for i in self.devices:
if self.devices[i].name == name:
if room == None or self.devices[i].room == room:
return self.devices[i]
raise RuntimeError("Device '%s' not known" % name)
def get_device_by_id(self, id):
"""
Return a Device object if one exists matching the provided ID.
:param id: Device ID, an integer
:return: A Device object
"""
if not isinstance(id, int):
id = int(id)
for i in self.devices:
if self.devices[i].id == id:
return self.devices[i]
raise RuntimeError("Device not found")
def get_devices(self):
"""
Return a sequence of devices.
:return: A sequence of Device objects.
"""
devices = []
for i in self.devices:
devices.append(self.devices[i])
return devices
def get_scenes(self):
"""
Returns a list of scenes.
:return: A sequence of Scene objects.
"""
scenes = []
for i in self.scenes:
scenes.append(self.scenes[i])
return scenes
def get_rooms(self):
"""
Returns a list of rooms.
:return: A sequence of Room objects.
"""
rooms = []
for i in self.rooms:
rooms.append(self.rooms[i])
return rooms
def get(self, path):
"""
Performs an HTTP/S 'GET' for a LUUP resource, which is returned.
:param path: Relative path for the resource e.g. data_request?id=alive
:return: The resource. If the underlying resource is JSON, this is
converted to Python dict.
"""
raise RuntimeError("Not implemented")
def get_user_data(self):
"""
Returns the user_data. Doesn't fetch, this is a local copy.
:return: user_data as a Python dict.
"""
return self.user_data
def get_sdata(self):
"""
Fetches the sdata from the LUUP engine.
:return: sdata as a dict.
"""
payload = self.get('data_request?id=sdata&output_format=json')
return payload
def get_file(self, path):
"""
Fetches a file from the Vera device.
:param path: filename.
:return: file contents
"""
file = self.get('data_request?id=file¶meters=%s' % path)
return file
def get_status(self):
"""
Gets Vera status.
:return: status value.
"""
payload = self.get('data_request?id=status&output_format=json')
return payload
def get_scene(self, id):
"""
Fetches a scene from the LUUP device. You probably want to use
get_scenes, to access Scene objects.
:param id: scene ID.
:return: scene as Python dict, not a Scene object.
"""
if not isinstance(id, int):
id = int(id)
payload = self.get('data_request?id=scene&action=list&scene=%d&output_format=json' % id)
return payload
def delete_scene(self, s):
"""
Deletes a Scene from Vera.
:param s: A Scene object
"""
return self.get('data_request?id=scene&action=delete&scene=%s' % s.id)
def create_scene(self, s):
"""
Creates a Scene object from a SceneDefinition object.
:param s: SceneDefinition object.
"""
s = json.dumps(s.output())
# URL-encoding. Vera not happy with Python's standard
# URL-encoding.
s = Vera.urlencode(s)
payload = self.get('data_request?id=scene&action=create&json=%s' % s)
return payload
def run_scene(self, id):
"""
Run a scene by ID.
:param id: Scene number.
"""
act = SceneAction(self, id)
return act.invoke()
def __str__(self):
return str(self.__dict__)
def __eq__(self, obj):
return self.__dict__ == obj.__dict__ and type(self) == type(obj)
def urlencode(s):
# URL-encoding. Vera not happy with Python's standard
# URL-encoding.
s = s.replace("%", "%25")
s = s.replace(":", "%3a")
s = s.replace("+", "%2b")
s = s.replace("&", "%26")
s = s.replace("{", "%7b")
s = s.replace("}", "%7d")
s = s.replace("'", "%27")
s = s.replace('"', "%22")
s = s.replace("?", "%3f")
s = s.replace(" ", "%20")
s = s.replace("/", "%2f")
return s
urlencode = staticmethod(urlencode)
def get_weather(self):
"""
Gets the weather status for the current location.
:returns: a tuple of two items, first item is a floating point
local temperature, second is a human-readable outlook.
"""
city = self.user_data["weatherSettings"]["weatherCity"]
country = self.user_data["weatherSettings"]["weatherCountry"]
host = "weather.mios.com"
temp_scale = "C"
url = "http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s" % \
(host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))
weather = self.proxy_get(url)
return (float(weather["temp"]), weather["text"])
def all_switches(self, value):
"""
Implement a state change to all switches.
:param value: The value to apply to all switches, a boolean.
:return: a Job object, describing the job implementing the action.
"""
if value:
value = 1
else:
value = 0
base="data_request?id=action"
action = "SetTarget"
svc = "urn:upnp-org:serviceId:SwitchPower1"
path = "%s&Category=%d&serviceId=%s&action=%s&newTargetValue=%d&output_format=json" \
% (base, 3, svc, action, value)
status = self.get(path)
job = Job()
job.id = int(status["u:SetTargetResponse"]["JobID"])
job.vera = self
return job
def all_dimmers(self, value):
"""
Implement a state change to all dimmer devices.
:param value: The value to apply to all devices, an integer 0-100.
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "SetLoadLevelTarget"
svc = "urn:upnp-org:serviceId:Dimming1"
path = "%s&Category=%d&serviceId=%s&action=%s&newTargetValue=%d&output_format=json" \
% (base, 2, svc, action, value)
status = self.get(path)
job = Job()
job.id = int(status["u:SetTargetResponse"]["JobID"])
job.vera = self
return job
def all_lights(self, value):
"""
Implement a state change to all light devices.
:param value: The value to apply to all switches, a value 0-100.
:return: a Job object, describing the job implementing the action.
"""
if value:
value = 1
else:
value = 0
base="data_request?id=action"
action = "SetTarget"
svc = "urn:upnp-org:serviceId:SwitchPower1"
path = "%s&Category=%d&serviceId=%s&action=%s&newTargetValue=%d&output_format=json" \
% (base, 999, svc, action, value)
status = self.get(path)
job = Job()
job.id = int(status["u:SetTargetResponse"]["JobID"])
job.vera = self
return job
def all_heating(self, value):
"""
Implement a state change to all heating devices.
:param value: The value to apply to all switches, a string.
:return: a Job object, describing the job implementing the action.
"""
base="data_request?id=action"
action = "SetModeTarget"
svc = "urn:upnp-org:serviceId:HVAC_UserOperatingMode1"
path = "%s&Category=%d&serviceId=%s&action=%s&NewModeTarget=%s&output_format=json" \
% (base, 5, svc, action, value)
status = self.get(path)
job = Job()
job.id = int(status["u:SetModeTargetResponse"]["JobID"])
job.vera = self
return job
class VeraLocal(Vera):
"""
Represents a connection to a local Vera device.
"""
def __init__(self, host, port = 3480):
"""
Connects to a local Vera device on the local network.
:param host: string containing hostname or IP address.
:param port: port number.
"""
self.host = host
self.port = port
Vera.__init__(self)
def get(self, path):
"""
Performs an HTTP/S 'GET' for a LUUP resource, which is returned.
:param path: Relative path for the resource e.g. data_request?id=alive
:return: The resource. If the underlying resource is JSON, this is
converted to Python dict.
"""
base = 'http://%s:%d' % (self.host, self.port)
url = '%s/%s' % (base, path)
conn = urllib.request.urlopen(url)
payload = conn.read()
try:
payload = json.loads(payload)
except:
pass
conn.close()
return payload
def proxy_get(self, url):
url = Vera.urlencode(url)
url = "http://%s/cgi-bin/cmh/proxy.sh?url=%s" % \
(self.host, url)
response = requests.get(url)
try:
return response.json()
except:
pass
return response.text
class VeraRemote(Vera):
def get_session_token(self, server):
"""
Get a session token for subsequent operations on a server. You
shouldn't need to call this.
:param server: the server.
:return: session token string.
"""
headers = {"MMSAuth": self.auth_token, "MMSAuthSig": self.auth_sig}
url = "https://%s/info/session/token" % server
session_token = self.session.get(url, headers=headers).text
return session_token
def __init__(self, user, password, device):
"""
Connects to a remote Vera device through the relay servers.
:param user: username.
:param password: password.
:param device: device ID
"""
self.user = user
self.password = password
self.device = device
self.session = requests.session()
# Hard-coded auth seed
seed = "oZ7QE6LcLJp6fiWzdqZc"
# Get auth tokens
sha1p = sha.new(user.lower() + password + seed)
sha1p = sha1p.hexdigest()
auth_server = "vera-us-oem-autha11.mios.com"
url = "https://%s/autha/auth/username/%s?SHA1Password=%s&PK_Oem=1" % \
(auth_server, user.lower(), sha1p)
response = self.session.get(url).json()
self.server_account = response["Server_Account"]
self.auth_token = response["Identity"]
self.auth_sig = response["IdentitySignature"]
# Get account number
account_info = json.loads(base64.b64decode(self.auth_token))
pk_account = account_info["PK_Account"]
sys.stderr.write("Account number: %s\n" % pk_account)
# Get session token for server account
session_token = self.get_session_token(self.server_account)
# Get devices
headers = { "MMSSession": session_token }
url = "https://%s/account/account/account/%s/devices" % \
(self.server_account, str(pk_account))
devices = self.session.get(url, headers=headers).json()
# Work out server device
server_device = None
for i in devices["Devices"]:
if i["PK_Device"] == device:
server_device = i["Server_Device"]
if server_device == None:
raise RuntimeError("Device %s not known.\n" % device)
sys.stderr.write("Server device: %s\n" % server_device)
# Get session token on server_device
session_token = self.get_session_token(server_device)
# Get server_relay
headers = { "MMSSession": session_token }
url = "https://" + server_device + "/device/device/device/" + \
str(device)
relay_info = self.session.get(url, headers=headers).json()
self.relay = relay_info["Server_Relay"]
sys.stderr.write("Server relay: %s\n" % self.relay)
# Get session token on server_relay
self.session_token = self.get_session_token(self.relay)
Vera.__init__(self)
sys.stderr.write("Connected to remote device.\n")
def get(self, path):
"""
Performs an HTTP/S 'GET' for a LUUP resource, which is returned.
:param path: Relative path for the resource e.g. data_request?id=alive
:return: The resource. If the underlying resource is JSON, this is
converted to Python dict.
"""
headers = { "MMSSession": self.session_token }
url = "https://%s/relay/relay/relay/device/%s/port_3480/%s" % \
(self.relay, str(self.device), path)
response = requests.get(url, headers=headers)
try:
return response.json()
except:
pass
return response.text
def proxy_get(self, url):
url = Vera.urlencode(url)
headers = { "MMSSession": self.session_token }
url = "https://%s/relay/relay/proxy?url=%s" % (self.relay, url)
response = requests.get(url, headers=headers)
try:
return response.json()
except:
pass
return response.text
#http://control/cgi-bin/cmh/proxy.sh?url=http%3A%2F%2Fweather.mios.com%2F%3FtempFormat%3DC%26cityWeather%3DCheltenham%2520England%26countryWeather%3DUnited%2520Kingdom
def connect(config="LUUP-AUTH.json"):
"""
Gets Vera connection information from a file, and connects to a Vera.
:param config: filename
:return: a Vera object on successful connection.
"""
config = json.loads(open(config, "r").read())
if "local" in config:
return VeraLocal(config["local"]["address"])
else:
user = config["remote"]["user"]
password = config["remote"]["password"]
device = config["remote"]["device"]
return VeraRemote(user, password, device)
```
|
{
"source": "jeugregg/newspaper-crawler",
"score": 3
}
|
#### File: newspaper_crawler/crawling_jobs/base_crawling_job.py
```python
import feedparser
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor
from ..database import Database
class BaseCrawlingJob():
"""Base class for newspaper crawling process.
Attributes:
has_database (boolean): whether or not the crawler has a database.
db (Database object, optional): allows to interact with the crawler
database (cf. database module).
newspaper (string): name of the scraped newspaper.
NewspaperSpider (Class inheriting from scrapy.Spider): defines how to
scrape the newspaper.
rss_feeds (list): rss urls where to find newspaper new articles.
"""
def __init__(self, has_database):
self.has_database = has_database
if self.has_database:
self.db = Database(db_name="newspaper_db")
else:
self.db = None
def crawl(self, delay=3600):
"""Crawls newspaper articles not crawled yet every `x` seconds.
Args:
delay (int): delay in seconds before next crawling job.
Defaults to 1 hour.
"""
urls_to_crawl = self.get_urls_to_crawl()
if len(urls_to_crawl) > 0:
runner = CrawlerRunner()
runner.crawl(self.NewspaperSpider, start_urls=urls_to_crawl,
db=self.db)
reactor.callLater(delay, self.crawl)
def get_urls_to_crawl(self):
"""Returns a list of the newspaper urls not crawled yet."""
urls = []
for rss_feed in self.rss_feeds:
parsed_rss_feed = feedparser.parse(rss_feed)
for post in parsed_rss_feed.entries:
url = post.link
if url.split(".")[1] == self.newspaper:
with open("urls_met.txt", "r") as f:
urls_met = f.read().split("\n")
if url not in urls_met:
entry = "\n{}".format(url)
with open("urls_met.txt", "a", encoding="utf-8") as f:
f.write(entry)
urls.append(url)
return urls
```
#### File: newspaper_crawler/loaders/lemonde_loader.py
```python
from scrapy.loader.processors import Compose
from scrapy.loader.processors import Join
from .base_loader import BaseLoader
class LeMondeLoader(BaseLoader):
def clean_date(rawdata):
return rawdata[: 10]
def clean_author(rawdata):
return rawdata.replace(" Par ", "")
def clean_description(rawdata):
return rawdata[2: -1]
date_in = Compose(lambda v: v[0], clean_date)
author_out = Compose(Join(", "), clean_author)
description_out = Compose(Join(", "), clean_description)
```
#### File: newspaper_crawler/loaders/liberation_loader.py
```python
import re
from scrapy.loader.processors import Compose
from scrapy.loader.processors import Join
from .base_loader import BaseLoader
class LiberationLoader(BaseLoader):
def clean_date(rawdata):
date = re.sub("[^0-9-]", "", rawdata)
date = date[0:len(date)-6]
date = date.replace("-", "/")
date = date.split("/")
date[2], date[0] = date[0], date[2]
date = "/".join(date)
return date
def clean_title(rawdata):
return rawdata[0].replace(" - Libération", "")
def clean_author(rawdata):
return rawdata.replace(" LIBERATION", "")
date_in = Compose(lambda v: v[0], clean_date)
title_in = Compose(clean_title)
author_out = Compose(Join(", "), clean_author)
```
#### File: newspaper_crawler/spiders/base_spider.py
```python
import unicodedata
import scrapy
import json
import abc
from datetime import datetime
class BaseSpider(scrapy.Spider, abc.ABC):
"""Base spider, defines how to scrape a newspaper website.
See Scrapy documentation for more information:
'https://docs.scrapy.org/en/latest/topics/spiders.html'
Attributes:
dirname (string): directory name where the articles (jsons) are stored.
newspaper (string): name of the scraped newspaper.
db: (Database object, optional): allows to interact with the crawler
database (cf. database module).
start_urls (list): urls to crawl.
custom_settings (dict): configuration when running the spider.
article_count (int): number of articles scraped by the instance.
"""
custom_settings = {"DOWNLOAD_DELAY": 2}
article_count = 0
@abc.abstractmethod
def parse(self, response):
"""Parses an article before processing and saving it.
Args:
response (scrapy Response object): the http response of the scraped
web page.
"""
def load_item(self, loader, response):
"""Extracts and processes informations from an article.
Args:
loader (scrapy Loader object): able to scrape data from a response.
response (scrapy Response object): the http response of the scraped
web page.
Returns:
item (scrapy Item object): populated with scraped and processed
data.
"""
loader.add_value("title", response.css("title::text").extract_first())
loader.add_value("theme", response.url)
loader.add_value("url", response.url)
item = loader.load_item()
return item
def save(self, newspaper, item):
"""Saves scraped data into json files and in database if asked.
Args:
newspaper (string): name of the newspaper.
item (scrapy Item object): populated with scraped and processed
data.
"""
missing_keys = list(set(item.fields.keys()) - set(item.keys()))
for missing_key in missing_keys:
item[missing_key] = ""
if len(item["body"]) > 2100:
if item["date"] == "":
time = datetime.now()
item["date"] = "{}/{}/{}".format(time.day, time.month,
time.year)
self.article_count += 1
filename = "{} #{}.json".format(newspaper, self.article_count)
filepath = "pressarticles/{}/{}".format(self.dirname, filename)
with open(filepath, "w", encoding="utf-8") as f:
json.dump(dict(item), f, indent=4)
self.log("Saved file {}".format(filename))
if self.db != None:
self.db.insert_article(
url=item["url"],
source=newspaper,
author=item["author"],
title=item["title"],
theme=item["theme"],
description=item["description"],
date_published=item["date"],
body=item['body'],
)
```
|
{
"source": "jeui123/pyrelay",
"score": 3
}
|
#### File: pyrelay/Data/SlotObjectData.py
```python
class SlotObjectData:
def __init__(self, objectId=0, slotId=0, objectType=0):
self.objectId = objectId
self.slotId = slotId
self.objectType = objectType
def read(self, reader):
self.objectId = reader.readInt32()
self.slotId = reader.readInt32()
self.objectType = reader.readInt32()
def write(self, writer):
writer.writeInt32(self.objectId)
writer.writeInt32(self.slotId)
writer.writeInt32(self.objectType)
def __str__(self):
return "{} {} {}".format(self.objectId, self.slotId, self.objectType)
def clone(self):
return SlotObjectData(self.objectId, self.slotId, self.objectType)
```
#### File: Networking/Packets/PacketTypes.py
```python
import Networking.Packets.Incoming as Incoming
import Networking.Packets.Outgoing as Outgoing
class PacketTypes:
def __init__(self):
for file in dir(Incoming):
if "Packet" in file:
file = file.replace("Packet", "")
self.__dict__[file.upper()] = file.upper()
for file in dir(Outgoing):
if "Packet" in file:
file = file.replace("Packet", "")
self.__dict__[file.upper()] = file.upper()
```
|
{
"source": "jeury301/algo-stanford",
"score": 4
}
|
#### File: algo-stanford/scratch-code/n_square_sorts.py
```python
def selection_sort(arr):
"""Refresher implementation of selection sort - in-place & stable.
:param arr: List of integers to sort
:return: Sorted list
"""
for i in range(len(arr)):
min = i
# selecting index of smallest number
for j in range(i, len(arr)):
if arr[j] < arr[min]:
min = j
# swaping current index with smallest number
arr[i], arr[min] = arr[min], arr[i]
return arr
def insertion_sort(arr):
"""Refresher implementation of inserstion sort - in-place & stable.
:param arr: List to be sorted.
:return: Sorted list.
"""
for i in range(1, len(arr)):
tmp = arr[i]
j = i
# find the position for insertion
for j in range(i, len(arr)):
# the position is found if the prev element is smaller than current
if arr[j - 1] < tmp:
break
# shift to the right
arr[j] = arr[j - 1]
arr[j] = tmp
return arr
def bubble_sort(arr):
"""Refresher implementation of buble-sort - in-place & stable.
:param arr: List to be sorted.
:return: Sorted list.
"""
for i in range(len(arr)):
for j in range(len(arr) - 1):
# check if elements are in relative out-of-order
if arr[j] > arr[j + 1]:
# swapping adjacent elements
arr[j], arr[j + 1] = arr[j + 1], arr[j]
return arr
if __name__ == "__main__":
arr = [23, 12, 3, 9, 7, 1, 13, 10]
print("original", arr)
print("selection-sort", selection_sort(arr))
print("insertion-sort", insertion_sort(arr))
print("bubble-sort", bubble_sort(arr))
```
|
{
"source": "jeury301/code-fights",
"score": 3
}
|
#### File: intro/a. 4. adjacent-elements-product/adjacent_elements_product.py
```python
def adjacentElementsProduct(inputArray):
first, second = 0, 1
lp = inputArray[first]*inputArray[second]
for index in range(2, len(inputArray)):
first = second
second = index
new_lp = inputArray[first]*inputArray[second]
if new_lp > lp:
lp = new_lp
return lp
```
#### File: intro/b. 15. add-border/add_border.py
```python
def addBorder(picture):
rows, cols = len(picture), len(picture[0])
final = ["*"*(cols+2)] * (rows + 2)
for i, x in enumerate(picture):
final[i+1] = "*" + x + "*"
return final
```
#### File: intro/c. 20. array-maximal-adjacent-difference/array_maximal_adjacent_difference.py
```python
def arrayMaximalAdjacentDifference(inputArray):
k = inputArray[:]
return max([abs(k[i]-k[i - 1]) for i in range(1, len(k))])
```
#### File: intro/d. 30. circle-of-numbers/circle_of_numbers.py
```python
def circleOfNumbers(n, firstNumber):
return firstNumber + n/2 if firstNumber + n/2 < n else firstNumber - n/2
```
#### File: intro/d. 31. deposit-profit/deposit_profit.py
```python
def depositProfit(deposit, rate, threshold):
d, y = deposit, 0
while(d < threshold):
d, y= d + (d*rate)/100, y+1
print(rate, d, y)
return y
```
#### File: intro/d. 39. knapsack-light/knapsack_light.py
```python
def knapsackLight(value1, weight1, value2, weight2, maxW):
w, v = [weight1, weight2], [value1, value2]
k_w, k_v= ({weight1:value1, weight2:value2},
{value1:weight1, value2:weight2})
if sum(w) <= maxW:
return sum(v)
if k_v.get(max(v)) <= maxW:
return max(v)
if k_w.get(maxW):
return k_w.get(maxW)
if min(w) > maxW:
return 0
```
#### File: intro/e. 42. bishop-and-pawn/bishop_and_pawn.py
```python
def bishopAndPawn(bishop, pawn):
trans = {chr(ord("a")+i):i for i in range(8)}
b_pos, p_pos = (trans[bishop[0]]*8 + int(bishop[1]),
trans[pawn[0]]*8 + int(pawn[1]))
print(b_pos, p_pos)
if p_pos > b_pos:
b_pos = b_pos+8*abs(int(pawn[1])-int(bishop[1]))+int(pawn[1])-int(bishop[1])
else:
b_pos = b_pos-8*abs(int(pawn[1])-int(bishop[1]))+int(pawn[1])-int(bishop[1])
return b_pos == p_pos
```
#### File: intro/e. 44. find-email-domain/find_email_domain.py
```python
def findEmailDomain(address):
tokens = address.split("@")
return "" if len(tokens) < 2 else tokens[len(tokens)-1]
```
#### File: intro/e. 45. build-palindrome/build_palindrome.py
```python
def buildPalindrome(st):
k = [i for i in st]
to_append = []
while(checkPalin(k) is False):
to_append.append(k[0])
k = k[1:]
to_append = "".join(to_append[::-1])
return st + to_append
def checkPalin(t):
for i in range(len(t)):
if t[i] != t[len(t) - (i + 1)]:
return False
return True
```
#### File: intro/f. 52. longest-word/longest_word.py
```python
def longestWord(text):
tokens = "".join([i if i.isalnum() else " " for i in text]).split(" ")
clean_tokens = ["".join([i for i in x if i.isalnum()]) for x in tokens]
return sorted(clean_tokens, key=len)[::-1][0]
```
#### File: intro/f. 53. valid-time/valid_time.py
```python
def validTime(time):
tokens = time.split(":")
hours, mins = tokens[0], tokens[1]
if int(hours) < 0 or int(hours) > 23:
return False
if int(mins) < 0 or int(mins) > 59:
return False
return True
```
#### File: intro/f. 57. file-naming/file_naming.py
```python
def fileNaming(names):
seen = set()
times = {}
output = []
for x in names:
if x not in seen:
output.append(x)
seen.add(x)
times[x] = 1
else:
to_add = x + "("+str(times[x])+")"
times[x] += 1
while to_add in seen:
to_add = x + "("+str(times[x])+")"
times[x] += 1
output.append(to_add)
seen.add(to_add)
times[to_add] = 1
return output
```
|
{
"source": "jeury301/ml-coursera",
"score": 4
}
|
#### File: practice/python-ml/matrix_ops.py
```python
import numpy as np
def transpose(M):
rows, cols = len(M), len(M[0])
N = build_matrix(cols,rows)
for i in range(cols):
for j in range(rows):
N[i][j] = M[j][i]
return N
def build_matrix(rows, cols):
return [[0 for j in range(cols)] for i in range(rows)]
if __name__ == "__main__":
A = [[1, 2, 0],[3, 5, 9]]
print("A: ")
print(np.matrix(A))
B = transpose(A)
print("B: ")
print(np.matrix(B))
```
|
{
"source": "jeury301/njit-advising-system",
"score": 2
}
|
#### File: app/controllers/majors.py
```python
from ferris import Controller, scaffold, route, localize
from app.models.major import Major
from ferris.components.flash_messages import FlashMessages
from datetime import datetime, timedelta
from google.appengine.ext import deferred
from app.services import advising_services
import httplib2, logging, json, time, math
class Majors(Controller):
class Meta:
prefixes = ('cron',)
components = (scaffold.Scaffolding,FlashMessages,)
@route
def list(self):
self.context['majors'] = Major.get_majors()
@route
def view_major(self, major_key):
major = self.util.decode_key(major_key).get()
#self.meta.change_view('JSON')
self.context['major_description'] = json.loads(major.major_description)
self.context['discipline'] = major.discipline
@route
def cron_update_majors(self):
deferred.defer(advising_services.update_majors)
return 200
```
#### File: app/models/major.py
```python
from ferris import BasicModel
from google.appengine.ext import ndb
class Major(BasicModel):
college = ndb.StringProperty();
department = ndb.StringProperty();
link = ndb.StringProperty();
degree_level = ndb.StringProperty();
discipline = ndb.StringProperty();
major_description = ndb.JsonProperty() # The full metadata of the major in JSON format
parsed = ndb.BooleanProperty()
@classmethod
def get_majors(cls):
"""
Retrieves all majors, ordered by discipline
"""
return cls.query().order(cls.discipline)
@classmethod
def get_major_for_discipline(cls, discipline):
return cls.query(cls.discipline==discipline)
@classmethod
def add_new(cls, form_data):
new_major = Major(
college = form_data['college'],
department = form_data['department'],
link = form_data['link'],
degree_level = form_data['degree_level'],
discipline = form_data['discipline'],
major_description = form_data['major_description'],
parsed = form_data['parsed']
)
new_major.put()
return new_major
```
|
{
"source": "jeury301/Python-Game",
"score": 3
}
|
#### File: jeury301/Python-Game/game.py
```python
import pygame
import math
import random
from pygame.locals import *
def main():
"""Main game execution
"""
game_init() # initializing game
load_resources() # loading game resources
game_loop() # looping through game
def game_init():
"""Initializing game
"""
# initializing global variables
global screen, width, height, keys, playerpos, accuracy, arrows
global badtimer,badtimer1, badguys, healthvalue
# initializing game and game-related variables
pygame.init()
width, height = 640, 480 # screen width and height
keys = [False, False, False, False] # game keys (WASD)
playerpos=[100,100] # player position
accuracy =[0,0] # player's accuracy
arrows = [] # arrows
badtimer=100 # timer to decrease for bad guys to appear
badtimer1=0 # timer to increase for bad guys to appear/disappear
badguys=[[640,100]] # bad guys initial opsition
healthvalue=194 # health value
screen = pygame.display.set_mode((width, height))
def load_resources():
"""Loading game resources
"""
# initializing global variables
global player, grass, castle, arrow, gameover
global badguyimg, badguyimg1, healthbar, health, youwin
global shoot, hit, enemy
# initializing mixer
pygame.mixer.init()
# loading resources
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
badguyimg = badguyimg1
# setting up music
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
def draw_grass():
"""Drawing grass to the screen
"""
# referencing global variables
global width, height, grass, screen
# iterating over width/grass_width
for x in range(width/grass.get_width() + 1):
# iterating over height/grass_height
for y in range(height/grass.get_height()+1):
# drawing grass on screen
screen.blit(grass,(x*100,y*100))
def draw_castle():
"""Drawing castle
"""
# referencing global variable(s)
global castle, screen
y_castle = 30
# drawing castle(s) on the screen
for x in range(4):
screen.blit(castle, (0,y_castle))
y_castle += 105
def draw_player():
"""Drawing player with z rotation
"""
# referencing global variables
global player, playerpos, playerpos1
# calculazing z rotation value
position = pygame.mouse.get_pos() # getting mouse position
# calculating angle between mouse and player tan(angle) = (y2-y1)/(x2-x1)
# angle = arctan((y2-y1)/(x2-x1))
# angle is in radians
angle = math.atan2(
position[1]-(playerpos[1]+32),
position[0]-(playerpos[0]+26)
)
angle_degress = 360-angle*57.29
# player rotation
playerrot = pygame.transform.rotate(player, angle_degress)
# player new position
playerpos1 = (
playerpos[0]-playerrot.get_rect().width/2,
playerpos[1]-playerrot.get_rect().height/2)
# drawing player on the screen
screen.blit(playerrot, playerpos1)
def draw_arrows():
"""Drawing the arrows fired by the player
"""
# referencing global variables
global arrow, arrows
# updating arrows position with velocity components
for bullet in arrows:
index=0
# velocity vector components:
# x-component: cos(angle) * acceleration
# y-compoent: sin(angle) * acceleration
velx=math.cos(bullet[0])*10 # x-component of the velocity vector
vely=math.sin(bullet[0])*10 # y-value of the velocity vector
# adding velocities to the arrows position components
bullet[1]+=velx
bullet[2]+=vely
# removing arrow from screen
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
# drawing arrows on screen
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
def draw_bad_guys():
"""Drawing bad guys
"""
# referencing global variables
global badtimer, badtimer1, badguys, badguyimg
global healthvalue, accuracy, arrows, hit, enemy
# check if its time to add a new bad guy to the screen
if badtimer == 0:
# ok, its tim to add a new bad guy
# adding a bad guy from any y-coordinate from the right of the screen
# with boundaries
badguys.append([640, random.randint(50,430)])
# reduce time for bad guys to appear
badtimer=100-(badtimer1*2)
# check for another timer
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
# remove bad guys if they went off-screen
if badguy[0]<-64:
badguys.pop(index)
# reduce bad guys x-position (move to the left)
badguy[0]-=5 # use this variable to modify bad guys speed
# blowing up castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
# hit castle sound
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
# keeping track of current arrow
index1=0
# checking for collision between bad guys and arrows
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect()) # arrow rect
bullrect.left=bullet[1] # left?
bullrect.top=bullet[2] # top?
# checking for collision between arrow and badguy
if badrect.colliderect(bullrect):
# enemy sound
enemy.play()
# a collision happened, increase accuracy?
accuracy[0]+=1
# removing bad guy and arrow from screen
badguys.pop(index)
arrows.pop(index1)
index1+=1
# keeping track of current bad guy
index+=1
# drawing bad guys
for badguy in badguys:
screen.blit(badguyimg, badguy)
def draw_clock():
"""Drawing a timer
"""
# creating a font with size
font = pygame.font.Font(None, 24)
# rendering a text containing the current time
survivedtext = font.render(
(str((90000-pygame.time.get_ticks())/60000)+
":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2)),
True,(0,0,0))
# retrieving rect for text
textRect = survivedtext.get_rect()
# positioning text on top right corner
textRect.topright=[635,5]
# drawing text onto the screen
screen.blit(survivedtext, textRect)
def draw_health():
"""Drawing health bar
"""
# referencing global variables
global healthbar, health, healthvalue
# drawing health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
# according to how much value left, draw health
screen.blit(health, (health1+8,8))
def check_for_end():
"""Checking for the end of game
"""
# referencing global variables
global running, exitcode, accuracy, gameover, accuracy_str
# check if game needs to end
if pygame.time.get_ticks()>=90000:
# time has elapsed
running=0
exitcode=1
if healthvalue<=0:
# player health is gone
running=0
exitcode=0
if accuracy[1]!=0:
accuracy_str=accuracy[0]*1.0/accuracy[1]*100
else:
accuracy_str=0
def end_game():
"""Ending game
"""
# referencing global variables
global accuracy_str, gameover, youwin
# check if player won/lost
if exitcode==0:
# player lost
pygame.font.init()
font = pygame.font.Font(None, 24) # creating font
# rendering text
text = font.render("Accuracy: "+str(accuracy_str)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect) # adding text to screen
else:
# player won
pygame.font.init()
font = pygame.font.Font(None, 24) # creating font
# rendering text
text = font.render("Accuracy: "+str(accuracy_str)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect) # adding text to screen
pygame.display.flip()
# giving user the ability to quit game
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def game_events():
"""Checking for game events
"""
# referencing global variables
global keys, playerpos, accuracy, arrows, playerpos1, shoot
# loop through events
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is, quit the game
pygame.quit()
exit(0)
# checking for key down keyboard events
if event.type == pygame.KEYDOWN:
if event.key == K_w: # 'w' key was pressed down
keys[0] = True
if event.key == K_a: # 'a' key was pressed down
keys[1] = True
if event.key == K_s: # 's' key was pressed down
keys[2] = True
if event.key == K_d: # 'd' key was pressed down
keys[3] = True
# checking for key up keyboard events
if event.type == pygame.KEYUP:
if event.key == K_w: # 'w' key was pressed up
keys[0] = False
if event.key == K_a: # 'a' key was pressed up
keys[1] = False
if event.key == K_s: # 's' key was pressed up
keys[2] = False
if event.key == K_d: # 'd' key was pressed up
keys[3] = False
# checking if mouse was clicked AKA an arrow was fired!
if event.type == pygame.MOUSEBUTTONDOWN:
# shoot sound
shoot.play()
position = pygame.mouse.get_pos() # mouse position
accuracy[1]+=1 # increase y accuracy
# calculating the arrow rotation based on the rotated player
# position and the cursor position.
# This rotation value is stored in the arrows array.
# arrow = (angle, x, y)
arrows.append(
[math.atan2(
position[1]-(playerpos1[1]+32),
position[0]-(playerpos1[0]+26)),
playerpos1[0]+32,playerpos1[1]+32])
# updating player position based on which key was pressed
# AKA moving player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
def game_loop():
"""Infinite game loop
"""
# referencing global variables
global screen, badtimer
# initializing global variables
global running, exitcode
running = 1 # use to determine if player wins or loses
exitcode = 0 # use to determine if game should be finished
# keeping looping through game
while running:
# clear screen before drawing it again
screen.fill(0)
draw_grass() # drawing grass
draw_castle() # drawing castle(s)
draw_player() # drawing player
draw_arrows() # drawing arrows
draw_bad_guys() # drawing bad guys
draw_clock() # drawing a clock
draw_health() # drawing health!
pygame.display.flip() # update the screen
game_events() # loading game events
# updating bad time for guys to appear
badtimer-=1
# checking for end game
check_for_end()
# ending game
end_game()
if __name__ == "__main__":
main()
```
|
{
"source": "jeury301/python-morsels",
"score": 4
}
|
#### File: python-morsels/14. lstrip/lstrip_solutions.py
```python
def v0_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
stripped = []
is_beginning = True
for item in iterable:
if is_beginning:
if item != strip_value:
is_beginning = False
else:
continue
stripped.append(item)
return stripped
def v1_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
stripped = []
is_beginning = True
for item in iterable:
if is_beginning and item == strip_value:
continue
is_beginning = False
stripped.append(item)
return stripped
def v2_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
stripped = []
iterator = iter(iterable)
try:
item = next(iterator)
while item == strip_value:
item = next(iterator)
stripped.append(item)
except StopIteration:
pass
else:
for item in iterator:
stripped.append(item)
return stripped
def v3_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
stripped = []
iterator = iter(iterable)
for item in iterator:
if not item == strip_value:
stripped.append(item)
break
for item in iterator:
stripped.append(item)
return stripped
from itertools import dropwhile
def v4_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
stripped = []
def is_strip_value(value): return value == strip_value
for item in dropwhile(is_strip_value, iterable):
stripped.append(item)
return stripped
def v5_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
def is_strip_value(value): return value == strip_value
return dropwhile(is_strip_value, iterable)
def v6_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning.
Bonus 1: We're supposed to make lstrip return an iterator.
"""
iterator = iter(iterable)
for item in iterator:
if item != strip_value:
yield item
break
for item in iterator:
yield item
def v6_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
iterator = iter(iterable)
for item in iterator:
if item != strip_value:
yield item
break
yield from iterator
def v7_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning.
Bonus 2: we're supposed to optionally accept a function as our strip value
and call that function to determine whether values should be removed.
"""
iterator = iter(iterable)
for item in iterator:
if (callable(strip_value) and not strip_value(item)
or not callable(strip_value) and item != strip_value):
yield item
break
yield from iterator
def v8_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
iterator = iter(iterable)
if callable(strip_value):
predicate = strip_value
else:
def predicate(value): return value == strip_value
for item in iterator:
if not predicate(item):
yield item
break
yield from iterator
def v9_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
if callable(strip_value):
predicate = strip_value
else:
def predicate(value): return value == strip_value
return dropwhile(predicate, iterable)
```
#### File: python-morsels/17. deep_add/deep_add.py
```python
from datetime import timedelta
from decimal import Decimal
def deep_add(to_add, start=0):
"""Adding up all numbers from a nested list of numbers
"""
global sum
global data_type
data_type = Decimal
sum = None
d_deep_add(to_add)
d_deep_add(start)
if data_type == timedelta:
return timedelta(sum)
return sum
def d_deep_add(to_add):
global sum, data_type
try:
for x in iter(to_add):
d_deep_add(x)
except TypeError as te:
if type(to_add) == list:
for x in to_add:
d_deep_add(x)
else:
data_type = type(to_add)
if type(to_add) == timedelta:
to_add = to_add.days
if not sum:
sum = to_add
else:
sum += to_add
if __name__ == '__main__':
print(deep_add([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
print(deep_add([1, [2, [3, 4], [], [[5, 6], [7, 8]]]]))
print(deep_add([[], []]))
```
#### File: python-morsels/17. deep_add/test_deep_add.py
```python
from decimal import Decimal
from datetime import timedelta
from fractions import Fraction
import unittest
from deep_add import deep_add
class DeepAddTests(unittest.TestCase):
"""Tests for deep_add."""
def test_shallow(self):
self.assertEqual(deep_add([1, 2, 3, 4]), 10)
def test_with_empty_lists(self):
self.assertEqual(deep_add([1, [2, 3, []], [], 4]), 10)
self.assertEqual(deep_add([]), 0)
def test_deeply_nested_iterables(self):
self.assertEqual(deep_add([[1, 2], [3, [4, [[[5]], 6]]]]), 21)
def test_non_numeric_types(self):
with self.assertRaises(TypeError):
deep_add([1, [2, None]])
def test_other_numeric_types(self):
self.assertEqual(deep_add([1.0, [3, 1.5]]), 5.5)
self.assertEqual(deep_add([1.0, [3j]]), 1+3j)
self.assertEqual(deep_add([Decimal('5.6'), 2]), Decimal('7.6'))
self.assertEqual(deep_add([[Fraction(1)], Fraction(2)]), Fraction(3))
# To test the Bonus part of this exercise, comment out the following line
<EMAIL>
def test_other_iterables(self):
numbers = [1, 2, 3, 4]
cubes_and_squares = ((n, (n**3, n**2)) for n in numbers)
self.assertEqual(deep_add(cubes_and_squares), 140)
self.assertEqual(deep_add([(1, 2), [3, {4, 5}]]), 15)
# To test the Bonus part of this exercise, comment out the following line
<EMAIL>
def test_start_value(self):
numbers = [1, 2, 3, 4]
self.assertEqual(deep_add(numbers, 0), 10)
self.assertEqual(deep_add(numbers, 1), 11)
self.assertEqual(deep_add(numbers, start=1), 11)
self.assertEqual(deep_add([[], []], start=-10), -10)
# To test the Bonus part of this exercise, comment out the following line
<EMAIL>
def test_pseudonumeric_types(self):
self.assertEqual(deep_add([timedelta(1)], timedelta(0)), timedelta(1))
class Num:
def __init__(self, val=0):
self.val = val
def __add__(self, other):
if isinstance(other, Num):
return Num(self.val + other.val)
else:
return Num(self.val + other)
__radd__ = __add__
def __eq__(self, other):
return self.val == other.val
self.assertEqual(deep_add([[Num(1)], Num(2)]), Num(3))
if __name__ == "__main__":
unittest.main()
```
#### File: python-morsels/1. numeric_range/test_numeric_range.py
```python
numeric_range/test_numeric_range.py
import unittest
from numeric_range import numeric_range
class NumericRangeTests(unittest.TestCase):
"""Tests for numeric_range."""
def test_ordered_numbers(self):
self.assertEqual(numeric_range([0, 1, 2, 3, 4]), 4)
def test_with_out_of_order_numbers(self):
self.assertEqual(numeric_range([10, 8, 7, 5.0, 3, 6, 2]), 8)
def test_single_item(self):
self.assertEqual(numeric_range([10]), 0)
def test_same_item_multiple_times(self):
self.assertEqual(numeric_range([8, 8, 8]), 0)
self.assertEqual(numeric_range([7, 5, 6, 5, 7]), 2)
def test_negative_numbers(self):
self.assertEqual(numeric_range([-10, -8, -7, -5, -3]), 7)
def test_mixed_types(self):
with self.assertRaises(TypeError):
numeric_range(['a', 2])
def test_very_large_numbers(self):
self.assertEqual(numeric_range([2**1000, -2**1000]), 2**1001)
# To test the Bonus part of this exercise, comment out the following line
<EMAIL>
def test_returns_zero_for_empty_list(self):
self.assertEqual(numeric_range([]), 0)
# To test the Bonus part of this exercise, comment out the following line
<EMAIL>
def test_with_non_lists(self):
self.assertEqual(numeric_range((89, 17, 70, 9)), 80)
self.assertEqual(numeric_range({8, 7, 5, 3, 9, 6, 2}), 7)
self.assertEqual(numeric_range(n**2 for n in range(1, 4)), 8)
self.assertEqual(numeric_range(n for n in []), 0)
if __name__ == "__main__":
unittest.main()
```
#### File: python-morsels/4. compact/compact.py
```python
def compact_0(iterable):
"""Return new iterable with adjacent duplicate values removed."""
deduped = []
previous = object()
for item in iterable:
if item != previous:
deduped.append(item)
previous = item
return deduped
def compact(iterable):
from itertools import groupby
return (
item
for item, group in groupby(iterable)
)
print(compact([None, None, 3, 4]))
```
#### File: python-morsels/6. matrix_from_string/matrix.py
```python
def matrix_from_string(mtx_str):
"""Turning a string into it's matrix representation.
This function accepts a string representation of a matrix, and turns it
into an actual matrix. This function uses \n as a delimiter for the rows,
and ignores any extra spaces.
Turned into a one-liner comprehension, it's equivalent was commented out.
Args:
mtx_str: A string representation of a matrix
Returns:
An actual matrix for the string
"""
# mtx_rows = mtx_str.split("\n")
# final_matrix = []
#
# for row in mtx_rows:
# if row.replace(" ", "") != "":
# mtx_row = [float(col) for col in row.strip().split(" ") if col != ""]
# final_matrix.append(mtx_row)
# return final_matrix
return ([[float(col) for col in row.strip().split(" ") if col != ""]
for row in mtx_str.split("\n") if row.replace(" ", "") != ""])
```
#### File: python-morsels/9. is_anagram/anagram_solutions.py
```python
def v0_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
That won't work for words that have the same letters but they occur
a different number of times.
"""
return set(word1) == set(word2)
def count_letters(word):
letters = {}
for char in word:
letters.setdefault(char, 0)
letters[char] += 1
return letters
def v1_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Instead of set, we could make a function that accepts a string and
returns a dictionary of character counts for the string.
"""
return count_letters(word1) == count_letters(word2)
from collections import Counter
def v2_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Using Counter which does exactly the same as count_letters.
"""
return Counter(word1) == Counter(word2)
def v3_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Using the built-in python function - sorted.
"""
return sorted(word1) == sorted(word2)
def v4_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Normalizing the words by lower-casing them.
"""
word1, word2 = word1.lower(), word2.lower()
return sorted(word1) == sorted(word2)
def v5_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Solving the first bonus: make sure we ignore spaces.
"""
word1, word2 = word1.lower(), word2.lower()
return Counter(word1.replace(' ', '')) == Counter(word2.replace(' ', ''))
def v6_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Solving the second bonus: ignoring extra stuff.
"""
word1, word2 = word1.lower(), word2.lower()
alphabet = 'abcdefghijklmnopqrstuvwxyz'
letters1 = sorted(c for c in word1 if c in alphabet)
letters2 = sorted(c for c in word2 if c in alphabet)
return letters1 == letters2
def v7_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Replacing the alphabet.
"""
word1, word2 = word1.lower(), word2.lower()
letters1 = sorted(c for c in word1 if c.isalpha())
letters2 = sorted(c for c in word2 if c.isalpha())
return letters1 == letters2
def letters_in(string):
"""Return sorted list of letters in given string."""
return sorted(
char
for char in string.lower()
if char.isalpha()
)
def v8_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Moving sorted(...) into a function - for clean up purposes
"""
return letters_in(word1) == letters_in(word2)
def count_letters(string):
"""Return sorted list of letters in given string."""
return Counter(
char
for char in string.lower()
if char.isalpha()
)
def v9_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Using Counter instead of sorted.
"""
return count_letters(word1) == count_letters(word2)
import unicodedata
def remove_accents(string):
"""Return decomposed form of the given string."""
return unicodedata.normalize('NFKD', string)
def letters_in(string):
"""Return sorted list of letters in given string."""
string = remove_accents(string.lower())
return sorted(
char
for char in string
if char.isalpha()
)
def v10_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Last bonus: removing accents.
"""
return letters_in(word1) == letters_in(word2)
```
|
{
"source": "jeury301/text-classifier",
"score": 3
}
|
#### File: classifiers/samples/loader.py
```python
import csv
def load_data(path):
"""Loads data from csv file.
:param path: Path to file.
:returns: List of tuples containing the training data
"""
data = []
with open(path, 'r') as dt:
d_csv = csv.reader(dt, delimiter=',')
for row in d_csv:
data.append((row[1],row[0]))
return data[1:]
```
#### File: feature-engineering/samples/statistical_features.py
```python
from sklearn.feature_extraction.text import TfidfVectorizer
def compute_tf_idf(corpus):
"""Computing term frequency (tf) - inverse document frequency (idf).
:param corpus: List of documents.
:returns: tf-idf of corpus.
"""
return TfidfVectorizer().fit_transform(corpus)
if __name__ == '__main__':
sample_corpus = [
'This is sample document.',
'another random document.',
'third sample document text'
]
print(compute_tf_idf(sample_corpus))
```
#### File: nlp_problems/samples/text_matching.py
```python
import fuzzy
import math
from collections import Counter
def get_cosine(vec1, vec2):
"""Computing cosine similarities between 2 vectors.
Code from: https://www.analyticsvidhya.com/blog/2017/01/ultimate-guide-to-understand-implement-natural-language-processing-codes-in-python/
:param vec1: Vector representation of first string.
:param vec2: Vector representation of second string.
:returns: COSINE SIMILARITY
"""
common = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in common])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = text.split()
return Counter(words)
def phonetic_matching(s1, s2):
"""Computing the phonetic sound of 2 strings and using LD to compute its
similarity.
:param s1: First string.
:param s2: A second string.
:returns: The LD between the 2 string phonetic representations.
"""
soundex = fuzzy.Soundex(4)
return levenshtein_distance(soundex(s1),soundex(s2))
def levenshtein_distance(s1, s2):
"""Computing the Levenshtein Distance between two strings.
The Levenshtein Distance consists of the minimum number of edits needed to
transform one string into the other.
Code from: https://www.analyticsvidhya.com/blog/2017/01/ultimate-guide-to-understand-implement-natural-language-processing-codes-in-python/
:param s1: First string.
:param s2: A second string.
:returns: The LD between the 2 strings.
"""
if len(s1) > len(s2):
s1,s2 = s2,s1
distances = range(len(s1) + 1)
for index2,char2 in enumerate(s2):
newDistances = [index2+1]
for index1,char1 in enumerate(s1):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1],
distances[index1+1], newDistances[-1])))
distances = newDistances
return distances[-1]
if __name__ == '__main__':
while True:
s1 = input("> Enter string 1: ")
s2 = input("> Enter string 2: ")
print("LD Phonetic Distance", phonetic_matching(s1, s2))
print("Cosine similarity", get_cosine(text_to_vector(s1), text_to_vector(s2)))
```
#### File: preprocessing/samples/noise_removal.py
```python
def noise_removal_w_list(input_text):
"""Removing noise from text using a list of noise words.
:param input_text: Noisy text.
:return: Clean text.
"""
# noise to be removed
noise_words = ["is", "a", "this", "..."]
# converting sentence into words
words = input_text.split()
# noise-free words
noise_free_words = [word for word in words if word not in noise_words]
# building noise-free text
return " ".join(noise_free_words)
def noise_removal_w_regex(input_text):
"""Removing noise from using using regular expression.
:param input_text: Noisy text.
:return: Clean text.
"""
import re
# pattern to remove hashtagged words eg. #this
r_pattern = "#[\w]*"
# matches iterator
matches = re.finditer(r_pattern, input_text)
for i in matches:
# removing each match from original input text
input_text = re.sub(i.group().strip(),'', input_text)
# removing inner extra spaces
return re.sub(' +',' ',input_text)
if __name__ == '__main__':
while True:
text = input("> ")
print("noise-removal-with-list", noise_removal_w_list(text))
print("noise-remove-with-regex", noise_removal_w_regex(text))
```
#### File: preprocessing/samples/object_standarization.py
```python
def look_up_words(input_text):
"""Replacing social media slangs with more standarized words.
:param input_text: Slanged social media text.
:return: Standarized text.
"""
# look-up dictionary
social_media_look_up = {
'rt':'Retweet',
'dm':'direct message',
"awsm" : "awesome",
"luv" :"love"
}
# words in sentence
words = input_text.split()
# standarize each word using look-up table
return " ".join([social_media_look_up.get(word.lower(),word) for word in words])
if __name__ == '__main__':
while True:
text = input("> ")
print("standarized-text",look_up_words(text))
```
|
{
"source": "jeusgao/jobot_factory_nlp_simple",
"score": 2
}
|
#### File: jobot_factory_nlp_simple/gui/embeded_configuration.py
```python
import os
from .components import get_default
def embed_conf(c2=None, _dic=None, _embeded_model_params=None, _base_model_params=None, DIC_Models=None, _key=None):
_embeded_model_value_params = _embeded_model_params.get(_key) if _embeded_model_params else None
_dic_value = {}
_options, _default = get_default(_embeded_model_value_params, DIC_Models, 'model_type')
_dic_value['model_type'] = c2.selectbox('model type', _options, _options.index(_default))
_options, _default = get_default(_embeded_model_value_params, _base_model_params, 'base')
_dic_value['base'] = c2.selectbox('base model', _options, _options.index(_default))
_dic[_key] = _dic_value
_dic = {k: v for k, v in sorted(_dic.items(), key=lambda x: x[0])}
return _dic
def pretrained_conf(c2=None, _key=None, _dic=None, _pretrained_model_params=None, DIC_Bases=None):
_pretrained_model_value_params = _pretrained_model_params.get(_key) if _pretrained_model_params else None
_dic_value = {}
_options, _default = get_default(_pretrained_model_value_params, DIC_Bases, 'base_code')
_dic_value['base_code'] = c2.selectbox('base type', _options, _options.index(_default))
_options = tuple(os.walk('hub/bases'))[0][1]
_default = _options[0]
if _pretrained_model_value_params:
_default = _pretrained_model_value_params.get('base_params').get('fn_config').split('/')[-2]
_base_path = c2.selectbox('base model', _options, _options.index(_default))
_dic_value['base_params'] = {
'fn_config': f'hub/bases/{_base_path}/bert_config.json',
'fn_base_model': f'hub/bases/{_base_path}/bert_model.ckpt',
'training': c2.radio('is base training', [True, False], index=1),
'trainable': c2.radio('is base trainable', [True, False], index=0),
}
_dic[_key] = _dic_value
_dic = {k: v for k, v in sorted(_dic.items(), key=lambda x: x[0])}
return _dic
```
#### File: jobot_factory_nlp_simple/gui/layer_configuration.py
```python
import os
from .components import get_default
def inputs_conf(c2, st, _default, _inputs, _dic_inputs_types):
_num_inputs = c2.number_input('numer of inputs:', min_value=0, max_value=20, value=_default, step=1)
ls = []
for i in range(_num_inputs):
with c2.beta_expander(f"Input No. {i}"):
_options = ['Pretrained', 'Embeded', 'Layer']
_default = _inputs[i].get('inputs_type', 'Pretrained') if i < len(
_inputs) and _inputs[i] else _options[0]
_inputs_type = st.selectbox(
'select inputs source',
_options,
_options.index(_default),
key=f'inputs source_{i}'
)
_dic_inputs_type = _dic_inputs_types.get(_inputs_type)
_options = list(_dic_inputs_type.keys()) if _dic_inputs_type else ['']
_cur = _inputs[i].get('inputs') if i < len(_inputs) else None
_default = _options.index(_cur) if i < len(_inputs) and _cur and _cur in _options else 0
_layer_inputs = st.selectbox(
'select inputs',
_options,
_default,
key=f'inputs_{i}',
)
if len(_layer_inputs.strip()):
ls.append({'inputs_type': _inputs_type, 'inputs': _layer_inputs})
return ls
def layer_conf(c2, st, _dic, _dic_inputs_types, _key, _model_layer_params, DIC_Layers, _options=None):
_dic_value = {}
_model_layer_value_params = _model_layer_params.get(_key) if _model_layer_params else None
_options, _default = get_default(_model_layer_value_params, DIC_Layers, 'layer')
_dic_value['layer'] = c2.selectbox('model layer', _options, _options.index(_default))
_default = _model_layer_value_params.get('params', '') if _model_layer_value_params else None
_layer_params = c2.text_input('layer params', _default)
if not len(_layer_params.strip()) or _layer_params == 'None':
_layer_params = None
if _layer_params:
if '{' in _layer_params or '[' in _layer_params:
try:
_dic_value['params'] = eval(_layer_params)
except Exception as err:
c2.error(f'{err}, Check your input please...')
else:
_dic_value['params'] = _layer_params
_inputs = _model_layer_value_params.get('layer_inputs', []) if _model_layer_value_params else []
_default = len(_inputs) if _inputs else 0
ls = inputs_conf(c2, st, _default, _inputs, _dic_inputs_types)
_dic_value['layer_inputs'] = ls
_dic[_key] = _dic_value
_dic = {k: v for k, v in sorted(_dic.items(), key=lambda x: x[0])}
return _dic
def add_layer(c2, st, _dic_inputs_types, _model_layer_params, DIC_Layers):
_key = c2.text_input('Input a new name:', '').strip()
if not len(_key):
return False, 'Input a name please.'
if _model_layer_params and _key in _model_layer_params:
return False, f'Duplcated name - [{_key}] .'
_dic = {k: v for k, v in _model_layer_params.items()} if _model_layer_params else {}
_dic = layer_conf(c2, st, _dic, _dic_inputs_types, _key, _model_layer_params, DIC_Layers)
return True, _dic
```
#### File: jeusgao/jobot_factory_nlp_simple/init_params.py
```python
import os
import json
from utils import (
DIC_DataLoaders,
DIC_Resolvers,
)
from modules import(
DIC_Funcs,
DIC_Inits,
DIC_Losses,
DIC_Metrics,
DIC_Layers,
DIC_Bases,
DIC_Models,
DIC_Optimizers,
DIC_Tokenizers,
DIC_Generators_for_train,
DIC_Generators_for_pred,
)
def _get_dic(_dic):
_d = {}
for k, v in _dic.items():
_d[k] = {}
_d[k]['func'] = ''
if isinstance(v, dict) and v.get('params'):
_d[k]['params'] = v.get('params')
return _d
_dics = {
'DIC_Funcs': _get_dic(DIC_Funcs),
'DIC_Inits': _get_dic(DIC_Inits),
'DIC_Losses': _get_dic(DIC_Losses),
'DIC_Metrics': _get_dic(DIC_Metrics),
'DIC_Layers': _get_dic(DIC_Layers),
'DIC_Bases': _get_dic(DIC_Bases),
'DIC_Optimizers': _get_dic(DIC_Optimizers),
'DIC_Tokenizers': _get_dic(DIC_Tokenizers),
'DIC_DataLoaders': _get_dic(DIC_DataLoaders),
'DIC_Generators_for_train': _get_dic(DIC_Generators_for_train),
'DIC_Generators_for_pred': _get_dic(DIC_Generators_for_pred),
'DIC_Resolvers': _get_dic(DIC_Resolvers),
'DIC_Models': _get_dic(DIC_Models),
}
def env_init():
with open('params_templates.json', 'w') as f:
json.dump(_dics, f, ensure_ascii=False, indent=2)
if not os.path.exists('hub/bases'):
os.makedirs('hub/base')
if not os.path.exists('hub/models'):
os.makedirs('hub/models')
if not os.path.exists('data'):
os.mkdir('data')
if __name__ == '__main__':
env_init()
print('System initialized.')
```
#### File: jobot_factory_nlp_simple/modules/embedding.py
```python
import os
import codecs
import numpy as np
from keras_bert import load_trained_model_from_checkpoint
from backend import keras
from modules import tokenizer_zh
class EmbedModel(object):
def __init__(self, dict_path, config_path, checkpoint_path, maxlen=32):
self.maxlen = maxlen
self.token_dict, self.tokenizer = tokenizer_zh(fn_vocab=dict_path)
base = load_trained_model_from_checkpoint(
config_path, checkpoint_path,
seq_len=maxlen,
training=False,
trainable=False,
)
self.model = keras.models.Model(base.input, base.output)
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
def get_embed(self, texts):
X, X_seg = [], []
for t in texts:
x, x_seg = self.tokenizer.encode(t[:self.maxlen - 2])
X.append(x)
X_seg.append(x_seg)
X = keras.preprocessing.sequence.pad_sequences(X, value=0, padding='post', maxlen=self.maxlen)
X_seg = keras.preprocessing.sequence.pad_sequences(X_seg, value=0, padding='post', maxlen=self.maxlen)
embs = self.model.predict([X, X_seg])
return embs
```
#### File: jeusgao/jobot_factory_nlp_simple/predictor.py
```python
import os
import pickle
from builders import model_builder
from modules import DIC_Generators_for_pred
from utils import DIC_Resolvers, task_init
from backend import V_TF
def _get_params(
model_bases_params=None,
model_common_params=None,
model_embeded_params=None,
model_inputs_params=None,
model_layer_params=None,
model_outputs_params=None,
model_optimizer_params=None,
params_data=None,
params_train=None,
params_pred=None,
):
return model_bases_params, model_common_params, model_embeded_params, model_inputs_params, model_layer_params, model_outputs_params, model_optimizer_params, params_data, params_train, params_pred
class Predictor(object):
def __init__(self, api_name):
task_path = f'hub/models/{api_name}'
fn_model, dic_task_params = task_init(task_path, is_train=False)
model_bases_params, model_common_params, model_embeded_params, model_inputs_params, model_layer_params, model_outputs_params, model_optimizer_params, params_data, params_train, params_pred = _get_params(
**dic_task_params)
if model_common_params.TF_KERAS == 1:
os.environ["TF_KERAS"] = '1'
self.labeler = None
self.maxlen = model_common_params.maxlen
self.ML = model_common_params.ML
self.is_pair = model_common_params.is_pair
self.activation = params_data.activation
self.is_sequence = params_data.is_sequence
fn_labeler = params_data.fn_labeler
if fn_labeler:
fn_labeler = f'{task_path}/{fn_labeler}'
if os.path.exists(fn_labeler):
self.labeler = pickle.load(open(fn_labeler, 'rb'))
self.data_generator = DIC_Generators_for_pred.get(params_data.data_generator_for_pred).get('func')
self.resolver = DIC_Resolvers.get(params_pred.resolver).get('func')
params_model = {}
params_model['maxlen'] = self.maxlen
params_model['ML'] = self.ML
params_model['tokenizer_code'] = 'tokenizer_zh'
params_model['tokenizer_params'] = {'fn_vocab': 'hub/bases/rbtl3/vocab.txt'}
params_model['obj_common'] = model_common_params
params_model['dic_bases'] = model_bases_params
params_model['dic_embeds'] = model_embeded_params
params_model['list_inputs'] = model_inputs_params
params_model['dic_layers'] = model_layer_params
params_model['dic_outputs'] = model_outputs_params
params_model['obj_optimizer'] = model_optimizer_params
self.tokenizer, self.token_dict, self.model = model_builder(is_predict=True, **params_model)
# self.model.summary()
self.model.load_weights(fn_model)
def predict(self, inputs, from_api=True):
if self.is_pair:
if len(inputs) < 2:
return {'result': 'Not enough inputs.'}
elif len(inputs) > 1:
inputs = ['.'.join(inputs)]
if len(inputs) < 1:
return {'result': 'Inputs invalid.'}
data_input = self.data_generator(
data=inputs,
tokenizer=self.tokenizer,
token_dict=self.token_dict,
maxlen=self.maxlen,
ML=self.ML,
is_sequence=self.is_sequence,
)
pred = self.model.predict(data_input)
rst = self.resolver(pred, inputs, from_api=from_api, activation=self.activation,
labeler=self.labeler, is_sequence=self.is_sequence)
return rst
models = tuple(os.walk('hub/models'))[0][1]
DIC_Predictors = {
k: Predictor(k) for k in models if os.path.exists(f'hub/models/{k}/model.h5')
}
def main(api_name, input1, input2=None, from_api=True):
if not len(input1.strip()):
return {'result': 'Empty input(s).'}
inputs = [input1]
if input2 and len(input2.strip()):
inputs.append(input2)
predictor = DIC_Predictors.get(api_name)
rst = predictor.predict(inputs, from_api=from_api)
return rst
```
#### File: jobot_factory_nlp_simple/utils/resolvers.py
```python
import os
import numpy as np
def _get_output(word, kw_type, pos_start):
return {
'oriFrag': word,
'type': kw_type,
'beginPos': pos_start,
'endPos': pos_start + len(word)
}
def _get_label_sequence(id2label, preds, text):
output = []
word, kw_type, word, pos_start = '', '', '', 0
for i, p in enumerate(preds):
l = id2label.get(p, 'O')
if len(l) > 1:
if 'B-' in l:
if word:
output.append(_get_output(word, kw_type, pos_start))
word = ''
pos_start = i
kw_type = l[2:]
word = text[i]
if 'I-' in l:
kw_type = l[2:]
word += text[i]
else:
if word:
output.append(_get_output(word, kw_type, pos_start))
word = ''
if len(word):
output.append(_get_output(word, kw_type, pos_start))
return output
def _resolve_sequence(
pred,
text,
id2label=None,
):
text = text[0]
print(pred[0].argmax(axis=-1).tolist()[:len(text)])
_max_ner = pred[0].argmax(axis=-1).tolist()[:len(text)]
rst_ner = _get_label_sequence(id2label, _max_ner, text)
return rst_ner
def resolve(pred, text, from_api=True, activation='sigmoid', labeler=None, is_sequence=False, threshold=0.7):
rst = None
score = None
if is_sequence:
id2label = None
if labeler:
id2label = {v: k for k, v in labeler.items()}
else:
return {'result': 'Labeler not found.'}
rst = _resolve_sequence(
pred,
text,
id2label=id2label,
)
else:
if activation == 'sigmoid':
pred = np.asarray(pred).reshape(-1)
rst = 0 if pred[0] < threshold else 1
score = float(pred[0])
else:
rst = int(pred.argmax(-1)[0])
score = float(np.asarray(pred).reshape(-1)[rst])
if labeler:
rst = labeler.get(rst, 0)
return {'result': rst, 'score': score}
def resolve_spo(pred, text, from_api=True, **params):
text = text[0]
pred_words = pred[0][0].argmax(axis=-1).tolist()[:len(text)]
words, word, pos = {}, '', 0
for i, p in enumerate(pred_words):
if p > 0:
if p == 1:
pos = i
if len(word):
words[i - len(word)] = word
word = text[i]
else:
word += text[i]
elif len(word):
words[pos] = word
word = ''
if len(word) > 0:
words[pos] = word
pred_rels = pred[1][0].argmax(axis=-1).tolist()
rels = []
for i, (_max, scores) in enumerate(zip(pred_rels[:len(text)], pred[1][0][:len(text)])):
for j, m in enumerate(_max):
if m > 0:
if not j == i and words.get(i) and words.get(j):
_score = scores[j, m].tolist()
rels.append({
'from_word': words.get(j),
'from_pos': j,
'to_word': words.get(i),
'to_pos': i,
'score': _score,
'tensors': {} if from_api else {
# 'object': np.mean(pred[2][0][j:j + len(words.get(j))], axis=0).tolist(),
# 'subject': np.mean(pred[2][0][i:i + len(words.get(i))], axis=0).tolist(),
'object': pred[2][0][j:j + len(words.get(j))].tolist(),
'subject': pred[2][0][i:i + len(words.get(i))].tolist(),
}
})
return {'text': text, 'words': words, 'rels': rels}
```
|
{
"source": "jeuvreyl/viewer-django",
"score": 2
}
|
#### File: viewer/contents/views.py
```python
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, UpdateView
from .models import UploadedFile
from braces.views import LoginRequiredMixin
class UploadedFileDetail(LoginRequiredMixin, DetailView):
model = UploadedFile
slug_field = 'slug'
slug_url_kwarg = 'slug'
class UploadedFileUpdate(LoginRequiredMixin, UpdateView):
model = UploadedFile
fields = ['file_name', ]
# send the user back to the detail page after a successful update
def get_success_url(self):
return reverse("contents:detail",
kwargs={"slug": self.object.slug})
class UploadedFileList(LoginRequiredMixin, ListView):
model = UploadedFile
slug_field = 'slug'
slug_url_kwarg = 'slug'
```
#### File: users/tests/test_models.py
```python
from test_plus.test import TestCase
class TestUser(TestCase):
def setUp(self):
user = self.make_user()
user.first_name = 'first_name'
user.last_name = 'last_name'
self.user = user
def test__str__(self):
self.assertEqual(
self.user.__str__(),
"testuser" # This is the default username for self.make_user()
)
def test_get_absolute_url(self):
self.assertEqual(
self.user.get_absolute_url(),
'/users/testuser/'
)
def test_get_short_name(self):
self.assertEqual(
self.user.get_short_name(),
'last_name'
)
def test_get_full_name(self):
self.assertEqual(
self.user.get_full_name(),
'first_name last_name'
)
```
|
{
"source": "jeuxcing/j5e",
"score": 3
}
|
#### File: j5e/game/DummyGame.py
```python
from j5e.hardware.led_strip import Grid, GridDims
from threading import Thread
class Game(Thread):
def __init__(self, grid):
super().__init__()
self.grid = grid
self.ended = False
def run(self):
while not self.ended:
for line_idx in range(Grid.SIZE):
for seg_idx in range(Grid.SIZE-1):
for led_idx in range(24):
if self.ended:
break
self.grid.set_color(GridDims.ROW, line_idx, seg_idx, led_idx, (0, 40, 0))
self.grid.set_color(GridDims.COL, line_idx, seg_idx, led_idx, (0, 40, 0))
self.grid.set_color(GridDims.RING, line_idx, seg_idx, led_idx//2, (0, 40, 0))
time.sleep(0.3)
self.grid.set_color(GridDims.ROW, line_idx, seg_idx, led_idx, (0, 0, 0))
self.grid.set_color(GridDims.COL, line_idx, seg_idx, led_idx, (0, 0, 0))
self.grid.set_color(GridDims.RING, line_idx, seg_idx, led_idx//2, (0, 0, 0))
def stop(self):
self.ended = True
```
#### File: j5e/game/Timer.py
```python
from threading import Thread
from multiprocessing import Event
from j5e.game.Agents import Lemming
class Timer(Thread):
def __init__(self):
Thread.__init__(self)
self.stopped = Event()
self.elements=[]
def add(self,el):
self.elements.append(el)
def run(self):
i=0
while not self.stopped.wait(0.2):
print(i,' : ')
for element in self.elements:
element.go()
if isinstance(element,Lemming) and element.active==False:
self.elements.remove(element)
if True not in [isinstance(ele,Lemming) for ele in self.elements] :
self.stopped.set()
i+=1
```
|
{
"source": "jevad/pythonMonteCarloPi",
"score": 4
}
|
#### File: jevad/pythonMonteCarloPi/pi_parallel.py
```python
from random import uniform as uniform
import sys
import multiprocessing as mp
import timeit
"""
Recall from elementary geometry that the area of the unit circle (the
circle of radius 1) is pi. We calculate pi by using Ulam's Monte Carlo
method to calculate the area of the unit circle. For more information on
the Monte Carlo method, the Wikipedia article, as of this date, is good:
https://en.wikipedia.org/wiki/Monte_Carlo_method .
Our algorithm uses the Python standard library random number
generator, random. Python random uses a Mersenne Twister algorithm
to generate pseudorandom numbers. The Mersenne Twister is a good
algorithm for our purposes.
The number of iterations run is user-adjustable. The more iterations
run, the more accurate the result will be.
The number of processes to be used is also user adjustable. Using more
or fewer processes will not affect the result, but will change
performance characteristics.
"""
def count_is_in_cirle(iteration_count):
"""
We count the number of random points from the unit square, running
from -1 to 1 (inclusive, horizontally and vertically), that also fall
in the unit circle. That value is returned.
iteration_count is the number of iterations that are run.
Our algorithm uses the Python standard library random number
generator, random. See the module docs.
"""
# We are using the map-reduce pattern here. Per usual Python style,
# we use a generator (lazy list comprehension) instead of map(), and
# sum() instead of a reduce() on addition. Thus, we create a
# generator with the result (1 or 0) for each iteration, then sum up
# the results contained in the generator.
#
# Our algorithm: For each of a given number of iterations, determine
# if a random point in the [-1, 1] square is in the unit circle or
# not. If it is in the circle, it contributes 1 to a sum, the total
# number of randompoints in the unit circle.
def one_if_in_circle(x, y):
"""
Given a coordinate, x, y, we return 1 if the coordinate is in the
unit circle; otherwise, we return 0. Points on the circle are
considered in the circle.
"""
return 1 if (x*x + y*y) <= 1 else 0
return (sum(
(one_if_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0))
for i in range(iteration_count)) ))
def calc_pi(iteration_count, cores_usage):
"""
We calculate pi using Ulam's Monte Carlo method. See the module
documentation. The calculated value of pi is returned.
We use a process pool to offer the option of spreading the
calculation across more then one core.
iteration_count is the number of iterations that are run.
cores_usage is the number of processes to use.
"""
# We're using a multiprocessing pool here, to take advantage of
# multi-core CPUs.
# Calculate stuff for the pool.
pool_size = cores_usage
iterations_per_process = iteration_count // pool_size
work_list = [iterations_per_process] * pool_size
work_list[0] += iteration_count % pool_size
# Set up the pool.
calc_pool = mp.Pool(pool_size)
# Use the pool to obtain random points in the unit circle.
# We'll let the system determine the chunk size.
in_circle_total = sum(calc_pool.map(
count_is_in_cirle,
work_list))
# Finish the calculation. in_circle_total, divided by the total
# number of iterations, is the area of the unit circle
# relative to the [-1, 1] square. Multiply by 4, which is the area
# of the [-1, 1] square, to get the area of the unit circle.
# .NOTE. If you modify this program to run in Python 2.7, remember
# to modify this calculation to use floating point division (or
# import division from future).
return 4 * in_circle_total / iteration_count
def calc_pi_and_time_it_too(count, cores_usage):
"""
This is like calling calc_pi, but it also returns the elapsed time
in a tuple:
(calculated value of pi, elapsed time).
iteration_count is the number of iterations that are run.
cores_usage is the number of processes to use.
"""
# Q. Why is this so complicated?
# A. The way Python handles time is inconsistent. Rather than fixing
# that problem, the powers that be in the Python world created
# a new standard library class, Timeit, to workaround the problem
# that they should have fixed. However, Timeit is poorly designed,
# so we have to jump through some hoops to get it to do what we
# want it to do which is time a function call that returns a result
# without also timing I/O.
result = 0
def wrapper():
nonlocal result
result = calc_pi(count, cores_usage)
etim = timeit.timeit(wrapper, number=1)
return (result, etim)
if __name__ == "__main__":
"""
The first argument is the number of iterations to run -- make it a
positive integer. If no first argument is given a default of 1000000
is used.
The second argument is the number of CPU cores to use, which should
be a positive integer. If no argument is given, then the value
returned by max(1, multiprocessing.cpu_count() - 1) is used.
"""
count = int(sys.argv[1]) if (sys.argv and 2 <= len(sys.argv)) else 1000000
cores_usage = (int(sys.argv[2]) if (sys.argv and 3 <= len(sys.argv))
else max(1, mp.cpu_count() - 1))
(result, etim) = calc_pi_and_time_it_too(count, cores_usage)
print("PI ({}): {}".format(count, result))
print(" elapsed ({}): {}".format(cores_usage, etim));
```
|
{
"source": "jevancc/ece276a-color-segmentation",
"score": 3
}
|
#### File: ece276a-color-segmentation/tools/labeltool.py
```python
import os
import cv2
import argparse
import numpy as np
from matplotlib import pyplot as plt
from roipoly import MultiRoi
parser = argparse.ArgumentParser(description='Label stop sign image')
parser.add_argument('-i',
nargs=1,
help='input image path',
dest='input',
required=True)
args = parser.parse_args()
IMG_FILE = args.input[0]
img = cv2.imread(IMG_FILE)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
COLORS = [('COLOR_STOP_SIGN_RED', 'red'), ('COLOR_OTHER_RED', 'orangered'),
('COLOR_BROWN', 'brown'), ('COLOR_ORANGE', 'orange'),
('COLOR_BLUE', 'cyan'), ('COLOR_OTHER', 'black')]
rois = []
color_pixels = {}
all_color_mask = np.zeros(img.shape[:-1])
def prompt_is_ok(msg):
print(msg, end='')
answer = input()
return answer == 'y'
for color, roi_color in COLORS:
is_ok = False
if not prompt_is_ok(f'Do you want to label color {color}? [y/n]: '):
color_pixels[color] = np.array([])
continue
while not is_ok:
print(f'Labeling color {color} ...')
fig = plt.figure()
plt.imshow(img, interpolation='nearest', cmap='Greys')
plt.title(f'Add ROI for Color:{color}')
multiroi = MultiRoi(color_cycle=(roi_color,))
tmask = np.zeros(img.shape[:-1])
for name, roi in multiroi.rois.items():
mask = roi.get_mask(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))
tmask += mask
masked_img = img.copy()
masked_img[tmask == 0, :] = 0
plt.imshow(masked_img)
plt.show()
is_ok = prompt_is_ok(f'Is color {color} labeled correctly? [y/n]: ')
if not is_ok:
print(f'Please label color {color} again.')
else:
rois.extend(multiroi.rois.values())
pixels = img[tmask != 0, :].reshape(-1, 3)
color_pixels[color] = pixels
data = {**color_pixels}
if prompt_is_ok(f'Do you want to label stop signs region? [y/n]: '):
is_ok = False
while not is_ok:
print(f'Labeling stop signs ...')
fig = plt.figure()
plt.imshow(img, interpolation='nearest', cmap='Greys')
plt.title(f'Add ROI for stop signs')
multiroi = MultiRoi(color_cycle=('g',))
tmask = np.zeros(img.shape[:-1])
for name, roi in multiroi.rois.items():
mask = roi.get_mask(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))
tmask += mask
masked_img = img.copy()
masked_img[tmask == 0, :] = 0
plt.imshow(masked_img)
plt.show()
is_ok = prompt_is_ok(f'Are stop signs labeled correctly? [y/n]: ')
if not is_ok:
print(f'Please label stop signs again.')
else:
rois.extend(multiroi.rois.values())
tmask[tmask != 0] = 1
stop_sign_mask = tmask
stop_sign_roi = multiroi
data['MASK_STOP_SIGN'] = stop_sign_mask
else:
data['MASK_STOP_SIGN'] = np.zeros(img.shape[:-1])
img_name = os.path.splitext(IMG_FILE)[0]
plt.figure()
plt.imshow(img)
for roi in rois:
roi.display_roi()
plt.axis('off')
plt.savefig(f'{img_name}-roi.png', bbox_inches='tight')
plt.title('Labeling Result')
plt.show()
np.savez(f'{img_name}.npz', **data)
```
|
{
"source": "jevancc/ece276a-visual-inertial-slam",
"score": 2
}
|
#### File: ece276a-visual-inertial-slam/src/mapping.py
```python
import itertools
import numpy as np
import scipy.linalg
from .utils import *
from .robot import *
class EKFLandmarkMapping:
def __init__(self,
n_landmarks,
robot_cam_T_imu,
robot_cam_intrinsic_calib,
robot_cam_baseline,
process_noise_covariance=None,
observation_noise_covariance=None,
prior_pose_covariance=None,
prior_landmark_covariance=None):
if prior_landmark_covariance is None:
prior_landmark_covariance = 5e-3 * np.eye(3)
if prior_pose_covariance is None:
prior_pose_covariance = 1e-3 * np.eye(6)
if observation_noise_covariance is None:
observation_noise_covariance = 100 * np.eye(4)
if process_noise_covariance is None:
process_noise_covariance = 1e-3 * np.eye(6)
self.xU = np.eye(4)
self.W = process_noise_covariance
self.n_landmarks = n_landmarks
self._n_initialized = 0
self._initialized_maxid = 0
self._initialized_mask = np.zeros((n_landmarks), dtype=bool)
self.xm = np.zeros((n_landmarks, 3))
self.P = np.kron(np.eye(n_landmarks), prior_landmark_covariance)
self.V = observation_noise_covariance
self.oTi = robot_cam_T_imu
K = robot_cam_intrinsic_calib
self.b = robot_cam_baseline
self.M = np.block([[K[:2, :], np.array([[0, 0]]).T], [K[:2, :], np.array([[-K[0, 0] * self.b, 0]]).T]])
@property
def initialized_maxid(self):
return self._initialized_maxid
@property
def n_initialized(self):
return self._n_initialized
@property
def xUp(self):
return self.xU[:3, 3].reshape(-1, 1)
@property
def oTw(self):
return self.oTi @ self.xU
def predict(self, u, tau):
F = scipy.linalg.expm(-tau * wedge(u))
self.xU = F @ self.xU
def _make_zmap(self, z):
assert z.ndim == 2 and z.shape[0] == 4
return np.array(np.where(z.sum(axis=0) > -4), dtype=np.int32).reshape(-1)
def _init_landmark(self, z, zmap):
mask = np.invert(self._initialized_mask[zmap])
zmap = zmap[mask]
if zmap.size > 0:
wTo = np.linalg.inv(self.oTw)
self._initialized_mask[zmap] = True
z = z[:, zmap]
M = self.M
b = self.b
wcoord = np.ones((4, zmap.size))
wcoord[0, :] = (z[0, :] - M[0, 2]) * b / (z[0, :] - z[2, :])
wcoord[1, :] = (z[1, :] - M[1, 2]) * (-M[2, 3]) / (M[1, 1] * (z[0, :] - z[2, :]))
wcoord[2, :] = -M[2, 3] / (z[0, :] - z[2, :])
wcoord = wTo @ wcoord
self.xm[zmap, :] = wcoord[:3, :].T
self._n_initialized = np.sum(self._initialized_mask)
self._initialized_maxid = max(zmap.max() + 1, self._initialized_maxid)
def _make_H(self, z, zmap):
n_observations = zmap.size
n_updates = self._initialized_maxid
P = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], dtype=np.float64)
xm = np.hstack([self.xm[zmap, :], np.ones((n_observations, 1))])
H = np.zeros((n_observations * 4, n_updates * 3))
for i in range(n_observations):
obi = zmap[i]
H[i * 4:(i + 1) * 4,
obi * 3:(obi + 1) * 3] = self.M @ dpidq(self.oTw @ xm[i, :].reshape(-1, 1)) @ self.oTw @ P.T
return H
def _make_xm_P(self, z, zmap):
n_observations = zmap.size
n_updates = self._initialized_maxid
xm = self.xm[:n_updates, :]
P = self.P[:n_updates * 3, :n_updates * 3]
return xm, P
def _make_z(self, z, zmap):
return z[:, zmap].reshape(-1, 1, order='F')
def _make_predicted_z(self, z, zmap):
n_observations = zmap.size
xm = np.hstack([self.xm[zmap, :], np.ones((n_observations, 1))])
zp = self.M @ pi(self.oTw @ xm.T)
return zp.reshape(-1, 1, order='F')
def _update_value_xm_P(self, xm, P, zmap):
n_observations = zmap.size
n_updates = self._initialized_maxid
self.xm[:n_updates, :] = xm
self.P[:n_updates * 3, :n_updates * 3] = P
def update(self, z):
zmap = self._make_zmap(z)
if zmap.size > 0:
n_observations = zmap.size
self._init_landmark(z, zmap)
H = self._make_H(z, zmap)
xm, P = self._make_xm_P(z, zmap)
zp = self._make_predicted_z(z, zmap)
z = self._make_z(z, zmap)
V = np.kron(np.eye(n_observations), self.V)
PHT = P @ H.T
K = np.linalg.solve((H @ PHT + V).T, PHT.T).T
xm += (K @ (z - zp)).reshape(-1, 3)
P = (np.eye(K.shape[0]) - K @ H) @ P
self._update_value_xm_P(xm, P, zmap)
```
#### File: ece276a-visual-inertial-slam/src/robot.py
```python
import numpy as np
def wedge(x):
assert x.size in [3, 6]
x = x.reshape(-1)
if x.size == 3:
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
elif x.size == 6:
return np.block([[wedge(x[3:]), x[:3].reshape(-1, 1)], [np.array([0, 0, 0, 0])]])
def cwedge(x):
assert x.size == 6
x = x.reshape(-1)
return np.block([[wedge(x[3:]), wedge(x[:3])], [np.zeros((3, 3)), wedge(x[3:])]])
def cdot(x):
x = x.reshape(-1)
assert x.size == 4 and x[-1] == 1
return np.block([[np.eye(3), -wedge(x[:3])], [np.zeros((1, 6))]])
def pi(q):
assert q.ndim == 2 and q.shape[0] == 4
return q / q[2, :]
def dpidq(q):
assert q.size == 4
q = q.reshape(-1)
return (1 / q[2]) * np.array([[1, 0, -q[0] / q[2], 0], [0, 1, -q[1] / q[2], 0], [0, 0, 0, 0],
[0, 0, -q[3] / q[2], 1]])
```
|
{
"source": "jevandezande/chem_translate",
"score": 4
}
|
#### File: chem_translate/chem_translate/unicode_convertor.py
```python
from .convertor import Convertor
class UnicodeConvertor(Convertor):
"""
Converts a string to unicode
>>> UnicodeConvertor.convert("H2O + NH3 -> OH- + NH4+")
'H₂O + NH₃ -> OH⁻ + NH₄⁺'
"""
@classmethod
def subscript_number(cls, number: str) -> str:
"""
Converts a number to subscript
>>> UnicodeConvertor.subscript_number("123")
'₁₂₃'
"""
return str.translate(number, UNICODE_SUBSCRIPT_TRANSLATION)
@classmethod
def superscript_number_charge(cls, number: str, charge: str) -> str:
"""
Converts a number and charge to superscript
>>> UnicodeConvertor.superscript_number_charge("123", "-")
'¹²³⁻'
>>> UnicodeConvertor.superscript_number_charge("", "+")
'⁺'
"""
out = str.translate(number, UNICODE_SUPERSCRIPT_TRANSLATION) if number else ""
return out + str.translate(charge, UNICODE_CHARGES_TRANSLATION)
UNICODE_SUPERSCRIPT_TRANSLATION = {ord(str(i)): v for i, v in enumerate("⁰¹²³⁴⁵⁶⁷⁸⁹")}
UNICODE_SUBSCRIPT_TRANSLATION = {ord(str(i)): v for i, v in enumerate("₀₁₂₃₄₅₆₇₈₉")}
UNICODE_CHARGES_TRANSLATION = {ord("+"): "⁺", ord("-"): "⁻"}
```
|
{
"source": "jevandezande/dsc",
"score": 3
}
|
#### File: dsc/calorimeter/plot.py
```python
import numpy as np
from itertools import cycle
import matplotlib
import matplotlib.pyplot as plt
from .tools import y_at_x
def plotter(
scans,
title=None, style=None,
baseline_subtracted=False, set_zero=False, normalized=False, smoothed=False, peaks=None,
derivative=None,
plot=None,
xlim=None, xticks=None, xticks_minor=True, xlabel=None,
ylim=None, yticks=None, yticks_minor=True, ylabel=None,
colors=None, markers=None, linestyles=None,
derivative_colors=None, derivative_markers=None, derivative_linestyles=None,
legend=True,
savefig=None
):
"""
Plot a list of Scans.
:param scans: list of Scans to plot
:param title: title of the plot
:param style: plot-style (currently does nothing)
:param baseline_subtracted: amount to subtract, if True, use the lowest value from each Scan
:param set_zero: set x-value (or range of values) at which y (or y average) is set to 0
:param normalized: normalize all of the curves at given point (or highest if True)
:param smoothed: number of points with which to smooth
:param peaks: dictionary of peak picking parameters
:param derivative: whether to plot scan derivatives (True or `only`)
:param plot: (figure, axis) on which to plot, generates new figure if None
:param x*: x-axis setup parameters
:param y*: y-axis setup parameters
:param colors: colors to plot the Scans
:param markers: markers to plot the Scans
:param linestyles: linestyles to plot the Scans
:param derivative_colors: colors to plot the derivatives
:param derivative_markers: markers to plot the derivatives
:param derivative_linestyles: linestyles to plot the derivatives
:param legend: boolean to plot legend
:param savefig: where to save the figure
:return: figure and axes
"""
assert all(isinstance(s, type(scans[0])) for s in scans[1:])
assert not (baseline_subtracted and set_zero)
if baseline_subtracted:
scans = [s.baseline_subtracted(baseline_subtracted) for s in scans]
elif set_zero:
try:
x, x2 = set_zero
except (TypeError, ValueError):
x, x2 = set_zero, None
scans = [s.set_zero(x, x2) for s in scans]
if normalized is True:
scans = [s / max(s.temps) for s in scans]
elif normalized is not False:
scans = [s / y_at_x(normalized, s.time, s.temps) for s in scans]
if smoothed:
scans = [s.smoothed(smoothed) for s in scans]
if plot is None:
fig, ax = plt.subplots()
else:
fig, ax = plot
setup_axis(ax, style, title, xlim, xticks, xticks_minor, xlabel, ylim, yticks, yticks_minor, ylabel)
if derivatives == 'only':
scans = [scan.derivative() for scan in scans]
plot_scans(scans, style, ax, markers=markers, linestyles=linestyles, colors=colors, peaks=peaks)
if derivative is True:
if derivative_colors is None:
if colors is not None:
derivative_colors = colors
else:
plt.gca().set_prop_cycle(None)
if derivative_linestyles is None:
derivative_linestyles = '--'
plot_scans(
[scan.derivative() for scan in scans],
style, ax, markers=markers, linestyles=derivative_linestyles, colors=derivative_colors, peaks=peaks
)
if legend:
ax.legend()
if savefig:
fig.savefig(savefig)
return fig, ax
def plot_scans(scans, style, ax, colors=None, markers=None, linestyles=None, peaks=None):
"""
Plot Scans on an axis.
:param scans: the Scans to be plotted
:param ax: the axis on which to plot
:param style: plot-style (currently does nothing)
:param colors: the colors to use
:param markers: the markers to use at each point on the plot
:param linestyles: the styles of line to use
:param peaks: peak highlighting parameters
"""
colors = cycle_values(colors)
markers = cycle_values(markers)
linestyles = cycle_values(linestyles)
for scan, color, marker, linestyle in zip(scans, colors, markers, linestyles):
plot_scan(scan, style, ax, color=color, marker=marker, linestyle=linestyle, peaks=peaks)
def plot_scan(scan, style, ax, color=None, marker=None, linestyle=None, peaks=None):
"""
Plot a Scan on an axis.
:param scan: the Scan to be plotted
:param ax: the axis on which to plot
:param style: plot-style (currently does nothing)
:param color: the color to use
:param marker: the marker to use at each point on the plot
:param linestyle: the style of line to use
:param peaks: peak highlighting parameters
"""
ax.plot(
scan.temps, scan.heat_flows,
label=scan.name,
color=color, marker=marker, linestyle=linestyle,
)
if peaks:
peak_defaults = {
'format': '4.1f',
'labels': True,
'marks': 'x',
'print': True,
}
peaks = peak_defaults if peaks is True else {**peak_defaults, **peaks}
peak_indices, _ = scan.peaks(True, prominence=peaks['prominence'])
peak_xs, peak_ys = scan.temps[peak_indices], scan.heat_flow[peak_indices]
if peaks['marks']:
ax.scatter(peak_xs, peak_ys, color=color, marker=peaks['marks'])
if peaks['labels']:
for x, y in zip(peak_xs, peak_ys):
ax.text(x, y, f'{{:{peaks["format"]}}}'.format(x), verticalalignment='bottom')
if peaks['print']:
print(' X Y')
for x, y in zip(peak_xs, peak_ys):
print(f'{x:>9.3f} {y:>9.3f}')
def setup_axis(
ax, style=None, title=None,
xlim=None, xticks=None, xticks_minor=True, xlabel=None,
ylim=None, yticks=None, yticks_minor=True, ylabel=None,
):
"""
Setup the axis labels and limits.
Autogenerates based on style for any variable set to None.
:param ax: axis to setup
:param style: plot-style (currently does nothing)
:param title: title of the axis
:param `*lim`: limits for `*-axis` values
:param `*ticks`: `*-axis` ticks
:param `*ticks_minor`: `*-axis` minor ticks
:param `*label`: label for the `*-axis`
"""
# update values that are None
up = lambda v, d: d if v is None else v
# make ticks multiples of the tick width
make_ticks = lambda start, end, tw: np.arange(int(start/tw)*tw, int(end/tw + 1)*tw, tw)
backwards = False
if xlim is not None:
xticks = up(xticks, make_ticks(*xlim, 10))
xlabel = up(xlabel, 'Temperature (°C)')
ylabel = up(ylabel, 'Heat Flow (mW)')
ax.set_title(title)
if xticks is not None:
ax.set_xticks(xticks)
if xticks_minor is True:
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
elif xticks_minor is not None:
xticks_minor *= 1 if not backwards else -1
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(xticks_minor))
if yticks is not None:
ax.set_yticks(yticks)
if yticks_minor is True:
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
elif yticks_minor is not None:
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator(yticks_minor))
if xlim is not None:
ax.set_xlim(*xlim)
if ylim is not None:
ax.set_ylim(*ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def cycle_values(values):
"""
Make a cycle iterator of values.
:param values: a value or list of values to be cycled.
:return: iterator of cycled values
"""
if not isinstance(values, list):
values = [values]
return cycle(values)
```
#### File: dsc/calorimeter/tools.py
```python
import csv
import itertools
import numpy as np
from glob import glob
def read_csv(inp, header=True):
"""
Reads a csv file.
:param inp: input file
:param header: inp contains a header
:return:
:titles: titles of the columns
:xs: x-values (1- or 2-dim np.array)
:ys: y-values (1- or 2-dim np.array, matches x)
"""
titles = None
try:
with open(inp) as f:
reader = csv.reader(f)
if header:
titles = next(reader)
xs, ys = [], []
for x, *y in reader:
xs.append(float(x))
ys.append([float(y_val) for y_val in y])
except ValueError as e:
raise ValueError(f'Error reading value in {inp}.') from e
ys = np.array(ys).T
xs = np.array(xs)
if titles is None:
titles = [''] * len(xs)
return titles, xs, ys
def read_csvs(inps, header=True):
"""
Read an iterable of CSVs (or only one if a string).
:param inps: input file(s) to read
:param header: inp contains a header
:return: titles, xs, ys
"""
titles = []
if isinstance(inps, str):
titles, xs_list, ys_list = read_csv(inps, header)
titles = titles[1:]
xs_list = (np.ones(ys_list.shape) * xs_list)
else:
xs_list, ys_list = [], []
for inp in inps:
ts, xs, ys = read_csv(inp, header)
xs = (np.ones(ys.shape) * xs)
titles.extend(ts[1:])
if ys.shape[1] == 1:
xs_list.append(xs)
ys_list.append(ys)
else:
for x_vals, y_vals in zip(xs, ys):
xs_list.append(x_vals)
ys_list.append(y_vals)
xs = np.array(xs_list)
ys = np.array(ys_list)
# Sanity checks
assert len(xs) == len(ys)
assert len(ys) == len(titles)
return titles, xs, ys
def glob_read_csvs(inps, header=True):
"""
Use glob to find CSVs and then reads them.
:param inps: a string or list of strings that can be read by glob
:param header: inp contains a header
:return: titles, xs, ys, file_names
"""
if isinstance(inps, str):
inps = [inps]
file_names = list(itertools.chain(*(glob(inp) for inp in inps)))
titles, xs, ys = read_csvs(file_names)
return titles, np.array(xs), np.array(ys), file_names
def y_at_x(x_point, xs, ys):
"""
Determine the y-value at a specified x. If in between xs, choose the first
past it. Assumes xs are ordered.
:param x_point: x-value for which the y-value is desired
:param xs: x-values
:param ys: y-values
:return: desired y-value
"""
if len(xs) != len(ys):
raise ValueError(f'xs and ys must be of the same length, got: {len(xs)} and {len(ys)}')
return ys[index_of_x(x_point, xs)]
def index_of_x(x_point, xs):
"""
Determine the index of value(s) in an ordered list. If in between xs,
choose the first past it (larger). Assumes xs are ordered.
:param x_point: value(s) to find
:param xs: list to search in
:return: index of the nearest x_point
"""
# If in reverse order
revd = False
if xs[0] > xs[-1]:
xs = xs[::-1]
revd = True
try:
x_iter = iter(x_point)
except TypeError:
x_iter = [x_point]
for x in x_iter:
if x < xs[0] or x > xs[-1]:
raise IndexError(f'x_point not in xs, x_point: {x}, xs: ({xs[0]}→{xs[-1]})')
if revd:
return len(xs) - np.searchsorted(xs, x_point) - 1
return np.searchsorted(xs, x_point)
def integrate(xs, ys, x_range=None):
"""
Integrate a set of ys on the xs.
Note: if x_range does not fall exactly on values in x, it finds the next largest x value.
:param xs: x-values
:param ys: y-values
:param x_range: range of x_values to integrate over
:return: integration
"""
if len(xs) != len(ys):
raise ValueError(f'xs and ys must be of the same length, got: {len(xs)} and {len(ys)}')
if x_range is not None:
begin, end = x_range
if begin < xs[0]:
raise IndexError(f'x_range starts before first value in xs ({begin} > {xs[0]}')
start = index_of_x(begin, xs)
finish = index_of_x(end, xs)
xs = xs[start:finish + 1]
ys = ys[start:finish + 1]
return np.trapz(ys, xs)
def smooth_curve(ys, box_pts=True):
"""
Smooth a curve.
Assumes that the ys are uniformly distributed. Returns output of length
`max(ys, box_pts)`, boundary effects are visible.
Note: ys must be > box_pts
:param ys: points to smooth
:param box_pts: number of data points to convolve, if True, use 3
:return: smoothed points
"""
if box_pts is True:
box_pts = 3
box = np.ones(box_pts) / box_pts
return np.convolve(ys, box, mode='same')
def cull(vals, n):
"""
Cull `vals` to have `n` "evenly" spaced values.
If not evenly divisible, spread them out as evenly as possible.
:var vals: the values to cull
:var n: number of values to keep
:yield: culled values
"""
yield from (vals[i] for i in np.linspace(0.5, len(vals) - 0.5, n, dtype=int))
```
#### File: dsc/tests/test_plot.py
```python
import sys
import matplotlib.pyplot as plt
sys.path.insert(0, '..')
from calorimeter.plot import cycle_values, plotter, setup_axis
from calorimeter.scan import scans_from_csvs
def setup():
pass
def teardown():
pass
def test_setup_axis():
fig, ax = plt.subplots()
setup_axis(ax)
setup_axis(ax, None, xticks=range(100), xlim=(0, 100))
def test_cycle_values():
assert next(cycle_values(None)) is None
assert next(cycle_values(1)) == 1
it = cycle_values([0, 1, 2])
assert next(it) == 0
assert next(it) == 1
assert next(it) == 2
assert next(it) == 0
def test_plotter(tmp_path):
scans = scans_from_csvs('tests/files/6HAW158.csv', 'tests/files/7HAW008-2.csv')
fig, ax = plt.subplots()
plotter(
scans,
title='Hello World', style=None,
baseline_subtracted=True, set_zero=False, normalized=False, smoothed=False, peaks=None,
plot=(fig, ax), xlim=(0, 50), xticks_minor=True, yticks_minor=2,
legend=True, colors=None, markers=None, linestyles=None,
savefig=f'{tmp_path}/my_scans_figure.svg',
)
```
|
{
"source": "jevandezande/elongation",
"score": 3
}
|
#### File: elongation/elongation/elongation.py
```python
import itertools
import numpy as np
from datetime import datetime
from scipy import signal
from .tools import (MyIter, compare_dictionaries, read_key_value, smooth_curve,
try_to_num)
class Elongation:
def __init__(self, xs, ys, gauge_length, sample_width, sample_thickness, name=None):
"""
Container for elongation data.
:param xs: elongation (in units of strain)
:param ys: force (in Newtons)
:param gauge_length: length of sample (in meters)
:param sample width: width of sample (in meters)
:param sample_thickness: thickness of sample (in meters)
:param name: optional name for the Elongation
"""
assert len(xs) == len(ys)
self.xs = np.array(xs)
self.ys = np.array(ys)
self.gauge_length = gauge_length # m
self.sample_width = sample_width # m
self.sample_thickness = sample_thickness # m
self.name = name
def __eq__(self, other):
"""
Check if two Elongation objects are equivalent.
:param other: other Elongation object to compare with
"""
return isinstance(other, Elongation)\
and len(self.xs) == len(other.xs)\
and all(self.xs == other.xs) and all(self.ys == other.ys)\
and self.gauge_length == other.gauge_length\
and self.sample_width == other.sample_width\
and self.sample_thickness == other.sample_thickness\
and self.name == other.name
def copy(self):
"""
Make a copy of the Elongation object.
"""
return self.__class__(
self.xs.copy(), self.ys.copy(),
self.gauge_length,
self.sample_width,
self.sample_thickness,
self.name
)
def write(self, file_name, style=None):
"""
Write Elongation object to file.
:param file_name: file to write to.
:param style: format to write to (guesses based on file extension if None)
"""
write_elongation(self, file_name, style=style)
@property
def max(self):
"""
Determine the max strain and coordinate stress.
:return: stress, max_strain
"""
max_i = np.nanargmax(self.ys)
return self.xs[max_i], self.ys[max_i]
@property
def cross_section(self):
"""
Cross sectional area of the material.
:return: cross_section in m²
"""
return self.sample_thickness*self.sample_width # m x m = m²
def smoothed(self, box_pts=True):
"""
Generate a smoothed version of the Elongation.
:param box_pts: number of data points to convolve, if True, use default
:return: smoothed Elongation
"""
elong = self.copy()
elong.ys = smooth_curve(self.ys, box_pts)
return elong
def cropped(self, start=None, end=None, shifted=True):
"""
Crop the Elongation by x-value.
:param start: x-value at which to start
:param end: x-value at which to end
:return: cropped Elongation object
"""
start_i, end_i, i = None, None, 0
if start is not None:
for i, val in enumerate(self.xs):
if val > start:
start_i = i
break
if end is not None:
for i, val in enumerate(self.xs[i:], start=i):
if val > end:
end_i = i + 1
break
return self.cropped_index(start_i, end_i, shifted)
def cropped_index(self, start_i=None, end_i=None, shifted=True):
"""
Crop the Elongation by index.
:param start_i: index at which to start
:param end_i: index at which to end
:param shifted: shift the x-values so that they start at 0
"""
xs = self.xs[start_i:end_i]
ys = self.ys[start_i:end_i]
if shifted:
xs = xs - xs[0]
return self.__class__(xs, ys, self.gauge_length, self.sample_width, self.sample_thickness, self.name)
def cleaned(self, start_threshold=0.01, end_threshold=0.25, shifted=True):
"""
Remove the slack at the beginning and post-break at the end.
:param start_threshold: threshold of max for starting
:param end_threshold: threshold of max for break
"""
start_i, end_i = None, None
max_i = np.nanargmax(self.ys)
max_y = self.ys[max_i]
if start_threshold is not None:
# includes the value before threshold is met
for i, y in enumerate(self.ys[1:]):
if y > max_y*start_threshold:
start_i = i
break
if end_threshold is not None:
for i, y in enumerate(self.ys[max_i:], start=max_i):
if y < max_y*end_threshold:
end_i = i
break
return self.cropped_index(start_i, end_i, shifted)
@property
def youngs_modulus(self, x_limit=None):
"""
Determine the Young's modulus of the Elongation.
Modulus is calculated as the peak of the derivative of the stress strain curve.
:return: Young's modulus (units of Pa)
"""
if x_limit is not None:
raise NotImplementedError('Limits on x not yet implemented, see youngs_modulus_array().')
return max(self.youngs_modulus_array)
@property
def youngs_modulus_array(self):
"""
Determine the Young's modulus at all points on the Elongation.
:return: Young's modulus array (units of Pa)
"""
return self.derivative()/self.cross_section # N/ΔL · L₀/A
def derivative(self):
"""
:return: derivative
"""
return np.diff(self.ys)/np.diff(self.xs) # ΔN/ΔL · L₀
def peaks(self, **kwargs):
"""
Finds the location of peaks in the Elongation.
Utilizes scipy.signal.find_peaks and the parameters therein.
:param **kwargs: kwargs for scipy.signal.find_peaks
:return: peak x-values, properties
"""
peaks, properties = self.peak_indices(**kwargs)
return self.xs[peaks], properties
def peak_indices(self, **kwargs):
"""
Finds the location of peaks in the Elongation.
Utilizes scipy.signal.find_peaks and the parameters therein.
:param **kwargs: kwargs for scipy.signal.find_peaks
:return: peak indices, properties
"""
kwarg_defaults = {
'width': 5, # ensure small spikes are ignored
}
kwarg_defaults.update(kwargs)
return signal.find_peaks(self.ys, **kwarg_defaults)
def break_index(self, **kwargs):
"""
Determine the strain index of break.
Break is defined herein as the last peak in the stress/strain curve.
:param **kwargs: see peaks()
:return: index of break
"""
return self.peak_indices(**kwargs)[0][-1]
def break_elongation(self, **kwargs):
return self.xs[self.break_index(**kwargs)]
def break_load(self, **kwargs):
return self.ys[self.break_index(**kwargs)]
def break_strength(self, **kwargs):
return self.break_load(**kwargs)/self.cross_section
def yield_index(self, **kwargs):
"""
Determine the location and force at yield.
Yield is defined herein as the first peak in the stress/strain curve.
:param **kwargs: see peaks()
:return: index of yield
"""
return self.peak_indices(**kwargs)[0][0]
def yield_elongation(self, **kwargs):
return self.xs[self.yield_index(**kwargs)]
def yield_load(self, **kwargs):
return self.ys[self.yield_index(**kwargs)]
def yield_strength(self, **kwargs):
return self.yield_load(**kwargs)/self.cross_section
def write_elongation(elongation, file_name, style=None):
"""
Write Elongation object to file.
:param: Elongation object
:param file_name: name of the file to be written to
:param style: format to write to (guesses based on file extension if None)
"""
style = file_name.split('.')[-1] if style is None else style
if style == 'csv':
write_csv(elongation, file_name)
elif style == 'prn':
raise NotImplementedError()
else:
raise NotImplementedError()
def write_csv(elongation, file_name):
"""
Write Elongation object to a csv file.
:param: Elongation object
:param file_name: name of the file to be written to
"""
e = elongation
with open(file_name, 'w') as f:
f.write(f"""\
Break Load, {e.break_load()}
Break Strength, {e.break_strength()}
Break Elongation, {e.break_elongation()}
Yield Load, {e.yield_load()}
Yield Strength, {e.yield_strength()}
Yield Elongation, {e.yield_elongation()}
Gauge Length, {e.gauge_length}
Sample Width, {e.sample_width}
Sample Thickness, {e.sample_thickness}
Points
%, N""")
for x, y in zip(e.xs, e.ys):
f.write(f'\n{x:>8.4f}, {y:>8.4f}')
def read_elongations(file_names):
"""
Read an iterable of elongation files.
:param file_names: name of elongation files
:return: list of Elongation objects.
"""
return list(itertools.chain(*(read_elongation(f) for f in file_names)))
def read_elongation(file_name):
"""
Read an elongation file.
:param file_name: name of the file
:return: list of Elongation objects
"""
extension = file_name.split('.')[-1]
if extension == 'prn':
return read_prn(file_name)
elif extension == 'csv':
return read_csv(file_name)
else:
raise NotImplementedError(f'Reading {extension} files is not yet implemented.')
def read_prn(file_name):
"""
Read a prn file.
:param file_name: name of the file
:return: list of Elongation objects
```
prn:13|
subtype = MT2500
Doc={MT2500:14|
Film={12.1|
Test_Mode = tensile
Setup_Name = -
Unit_System = SI
Graph_Mode = stress/strain
Sample_Length = 60.00
CrossheadVlcty = 540
VelocityUnitId = 1
CrossheadSpeed = 21.2598
Loadcell_Mode = Tension
Loadcell_Type = "SM Series"
Start_Threshold = 0.10
Stop_Threshold = 0.10
Auto_Stop = True
Auto_Return = True
ExtnsnResetOnStart = False
Yield_Type = 0
COF_Sled_Load = 200.00
}
Test_Info={2|
Color = e
Order_Id = d
Technician = a
Test_Method = b
Sample_Conditioning = f
Test_Conditions = g
Product_Name = c
Test_Direction = up
}
Test_Data=(
{6|
Crosshead_speed = 0.787
X_unit = Secs.
Y_unit = Newtons
Sample_Thkness = 1.000
Sample_Width = 1.000
Grip_Separation = 6.000
Start_Threshhold = 0.100
Stop_Threshhold = 0.100
Number_Of_Points = 2344
Points = [
0.1800, 0.0000
...
6.1130, -1.2009
]
},
{6|
...
}
)
Test_Results=(
{6|
TestDate = 21 Aug, 2019
Length_Cnvrsn = 0.333333
Force_Cnvrsn = 1.000000
LoadCell_Capacity = 100
LoadCell_CpctyUnit = 1
LoadCell_BitsOfReso = 14
Analysis={ATensile:1|
Slack_time = 0.000000
SampleThickness = 1.0000
BreakLoad = 16.9010
BreakStrength = 16900.9524
BreakElongation = 77.8917
BreakPctElongation = 1298.1944
YieldStrength1 = 5199.9639
YieldLoad1 = 5.2000
}
},
{6|
...
}
)
}
```
"""
with open(file_name) as f:
f = MyIter(f)
try:
assert next(f).strip() == 'prn:13|'
assert next(f).strip() == 'subtype = MT2500'
assert next(f).strip() == 'Doc={MT2500:14|'
assert next(f).strip() == 'Film={12.1|'
film_data = {}
for line in f:
if '}' in line:
break
key, value = read_key_value(line)
film_data[key] = value
assert next(f).strip() == 'Test_Info={2|'
test_info = {}
for line in f:
if '}' in line:
break
key, value = read_key_value(line)
test_info[key] = value
assert next(f).strip() == 'Test_Data=('
test_data = []
for i, line in enumerate(f):
if line.strip() != '{6|':
break
test_data.append({})
for line in f:
if '[' in line:
break
key, value = read_key_value(line)
test_data[i][key] = try_to_num(value)
xs, ys = [], []
for line in f:
if ']' in line:
break
x, y = line.split(',')
xs.append(x)
ys.append(y)
test_data[i]['xs'] = np.array(xs, dtype='float')
test_data[i]['ys'] = np.array(ys, dtype='float')
assert int(test_data[i]['Number_Of_Points']) == len(xs)
assert next(f).strip()[0] == '}' # may have a comma
assert 'Test_Results=(' == next(f).strip()
test_results = []
for i, line in enumerate(f):
if line.strip() != '{6|':
break
test_results.append({})
for line in f:
if '}' in line:
break
key, value = read_key_value(line)
test_results[i][key] = try_to_num(value)
assert next(f).strip()[0] == '}' # may include comma
except AssertionError as e:
print(f._index, f._line)
raise
data_remove = ['Number_Of_Points']
results_swaps = [
('TestDate', 'date'),
('Length_Cnvrsn', 'length_conversion'),
('Force_Cnvrsn', 'force_conversion'),
('LoadCell_Capacity', 'loadcell_capacity'),
('LoadCell_CpctyUnit', 'loadcell_capacity_unit'),
('LoadCell_BitsOfReso', 'loadcell_bits_of_resolution'),
('Slack_time', 'slack_time'),
('BreakStrength', 'break_strength'),
('BreakElongation', 'break_elongation'),
('BreakPctElongation', 'break_percent_elongation'),
('YieldStrength1', 'yield_strength'),
('YieldLoad1', 'yield_load'),
('SampleThickness', 'thickness'),
('BreakLoad', 'break_load'),
]
results_remove = ['Analysis']
data_swaps = [
('X_unit', 'x_units'),
('Y_unit', 'y_units'),
('Crosshead_speed', 'crosshead_speed'),
('Sample_Thkness', 'sample_thickness'),
('Sample_Width', 'sample_width'),
('Grip_Separation', 'gauge_length'),
('Start_Threshhold', 'start_threshhold'),
('Stop_Threshhold', 'stop_threshhold'),
]
elongations = []
assert len(test_data) == len(test_results)
for data, results in zip(test_data, test_results):
for original, to in data_swaps:
data[to] = data.pop(original)
for original, to in results_swaps:
results[to] = results.pop(original)
for key in data_remove:
data.pop(key)
for key in results_remove:
results.pop(key)
if data['x_units'] == 'Secs.':
data['x_units'] = 's'
if data['y_units'] == 'Newtons':
data['y_units'] = 'N'
if results['date']:
results['date'] = datetime.strptime(results['date'], '%d %b, %Y')
xs = data['xs']*float(data['crosshead_speed'])
elongations.append(
Elongation(
xs, data['ys'],
float(data['gauge_length']) / 1e3, # mm → m
float(data['sample_width']) / 1e3, # mm → m
float(data['sample_thickness']) / 1e3, # mm → m
None
)
)
return elongations
def read_csv(file_name):
"""
Read a csv file.
:param file_name: name of the file
:return: list of Elongation objects (currently only a single item in list).
"""
data = {}
with open(file_name) as f:
f = MyIter(f)
try:
for line in f:
if not line.strip():
continue
if line == 'Points\n':
break
key, val = read_key_value(line, separator=',')
key = key.lower().replace(' ', '_')
data[key] = val
x_units, y_units = next(f).split(',')
data['x_units'], data['y_units'] = x_units.strip(), y_units.strip()
xs, ys = [], []
for line in f:
x, y = line.split(',')
xs.append(float(x.strip()))
ys.append(float(y.strip()))
except Exception as e:
print(f'Error on line {f._index}')
print(f._line)
raise e
elong = Elongation(
np.array(xs), np.array(ys),
float(data['gauge_length']),
float(data['sample_width']),
float(data['sample_thickness'])
)
return [elong]
if __name__ == "__main__":
elongs = read_prn('../test/test_files/test1.prn')
elongs = read_elongation('../test/test_files/test1.prn')
elong = elongs[0]
elong.write('a.csv')
open('a.out', 'w').write(str(elong.__dict__))
```
#### File: elongation/elongation/tools.py
```python
import numpy as np
import more_itertools as mit
class MyIter(mit.peekable):
"""
Simple class for making it easier to debug the parsing of files.
Extends more-itertools.peekable:
_index: index of currently read line
_line: currently read line
"""
def __init__(self, iterable):
self._index = 0
self._line = None
super().__init__(iterable)
def __next__(self):
self._line = super().__next__()
self._index += 1
return self._line
def read_key_value(line, separator='='):
"""
Read a key and value from a line.
Only splits on first instance of separator.
:param line: string of text to read.
:param separator: key-value separator
"""
key, value = line.split(separator, 1)
return key.strip(), value.strip()
def try_to_num(in_str):
"""
Convert string to a number if possible.
:param: in_str
:return: int, float, or original string
"""
for f in (int, float):
try:
return f(in_str)
except ValueError as e:
pass
return in_str
def compare_dictionaries(dict1, dict2):
"""
Recursively compare two dictionaries.
:return: True if exactly the same, False if not
"""
if len(dict1) != len(dict2):
return False
for key in dict1:
try:
val1, val2 = dict1[key], dict2[key]
if val1 != val2: # different values
return False
except KeyError: # different keys
return False
except ValueError: # non-comparable types
if isinstance(val1, dict):
if not compare_dictionaries(val1, val2):
return False
elif any(val1 != val2): # Compare all values
return False
return True
def smooth_curve(ys, box_pts=True):
"""
Smooth a curve.
Assumes that the ys are uniformly distributed. Returns output of length
`max(ys, box_pts)`, boundary effects are visible.
:param ys: points to smooth
:param box_pts: number of data points to convolve, if True, use 3
:return: smoothed points
"""
if box_pts is True:
box_pts = 3
box = np.ones(box_pts) / box_pts
return np.convolve(ys, box, mode='same')
```
|
{
"source": "jevandezande/mo_diagrams",
"score": 3
}
|
#### File: mo_diagrams/mo_diagrams/orbitals.py
```python
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterator, Optional
@dataclass
class _OrbitalBase:
# default argument rearrangement with dataclasses requires separate class
energy: float
@dataclass
class _MOBase:
energy: float
atomic_orbitals: list[AtomicOrbital]
coefficients: list[float]
@dataclass(order=True)
class Orbital(_OrbitalBase):
label: str = ""
def __repr__(self) -> str:
label = f" {self.label}".rstrip()
return f"<{type(self).__name__}:{label} {self.energy}>"
def copy(self) -> Orbital:
return Orbital(
self.energy,
self.label,
)
@dataclass(order=True)
class AtomicOrbital(Orbital):
atom: Optional[str | int] = None
def __repr__(self) -> str:
label = f" {self.atom} {self.label}".rstrip()
return f"<{type(self).__name__}:{label} {self.energy}>"
def copy(self) -> AtomicOrbital:
return AtomicOrbital(
self.energy,
self.label,
self.atom,
)
@dataclass(order=True)
class MolecularOrbital(Orbital, _MOBase):
"""
A combination of atomic orbitals
"""
label: str = ""
def __str__(self) -> str:
return (
type(self).__name__
+ ":\n"
+ f"{self.energy}"
+ "\n"
+ (self.label + "\n" if self.label else "")
+ "\n".join(f" {c:>6.3f} {ao!r}" for ao, c in self)
)
def __iter__(self) -> Iterator[tuple[AtomicOrbital, float]]:
yield from zip(self.atomic_orbitals, self.coefficients)
def copy(self) -> MolecularOrbital:
return MolecularOrbital(
self.energy,
self.atomic_orbitals,
self.coefficients,
self.label,
)
@property
def norm(self) -> float:
"""
Frobenius norm of the coefficients
"""
return sum(c ** 2 for _, c in self) ** 0.5
@property
def normalized_coefficients(self) -> list[float]:
return [c / self.norm for _, c in self]
def normalized(self) -> MolecularOrbital:
"""
Generates a MolecularOrbital with normalized atomic orbital coefficients
"""
copy = self.copy()
copy.coefficients = self.normalized_coefficients
return copy
if __name__ == "__main__":
orb1 = Orbital(1)
orb2 = Orbital(2.0)
print(repr(orb1))
print(repr(orb2))
print()
ao1 = AtomicOrbital(1)
ao2 = AtomicOrbital(2, "2s")
print(repr(ao1))
print(repr(ao2))
print()
mo1 = MolecularOrbital(1, [ao1], [1])
mo2 = MolecularOrbital(2.0, [ao1, ao2], [1, 2.0], "HF")
print(str(mo1))
print(str(mo2))
print()
```
|
{
"source": "jevandezande/orbital_diagrams",
"score": 3
}
|
#### File: orbital_diagrams/orbitals/combo_orbital.py
```python
from dataclasses import dataclass, field
from typing import Iterator
from ._base_orbital import BaseOrbital
@dataclass
class ComboOrbital(BaseOrbital):
orbs: list[BaseOrbital]
weights: list[float] = field(default_factory=list)
def __post_init__(self):
orbs, weights = self.orbs, self.weights
if len(orbs) != len(weights):
raise ValueError(
f"Received mismatched orbitals and weights: {len(orbs)=} != {len(weights)=}"
)
if weights is None:
self.weights = [1 / len(orbs) ** 0.5] * len(orbs)
else:
norm = sum(w ** 2 for w in self.weights) ** 0.5
if norm <= 0:
raise ValueError("Weights cannot all be 0.")
self.weights = [w / norm for w in weights]
def __iter__(self) -> Iterator[tuple[BaseOrbital, float]]:
yield from zip(self.orbs, self.weights)
def __str__(self) -> str:
return "<ComboOrbital " + " ".join(f"{w:+5.2f} {orb}" for orb, w in self) + ">"
```
#### File: orbital_diagrams/orbitals/orbital_group.py
```python
from dataclasses import dataclass
from typing import Iterator
from ._base_orbital import BaseOrbital
@dataclass
class OrbitalGroup(BaseOrbital):
orbs: list[BaseOrbital]
def __len__(self) -> int:
return len(self.orbs)
def __getitem__(self, index):
return self.orbs[index]
def __iter__(self) -> Iterator[BaseOrbital]:
yield from self.orbs
def __str__(self) -> str:
return f"<{type(self).__name__} {self.orbs}>"
```
#### File: orbital_diagrams/tests/test_plot.py
```python
import matplotlib.pyplot as plt
from pytest import mark, raises
from orbital_diagrams.api import EnergyOrbital
from orbital_diagrams.orbitals._base_orbital import BaseOrbital
from orbital_diagrams.plot import cycle_values, plotter, setup_axis, subplots
def test_setup_axis():
fig, ax = plt.subplots()
setup_axis(ax, None, xticks=range(100), xlim=(0, 100))
setup_axis(ax, "BaseOrbital")
setup_axis(ax, "EnergyOrbital")
setup_axis(ax, "ComboOrbital")
setup_axis(ax, "ComboEnergyOrbital")
setup_axis(ax, "ComboOrbitalGroup")
setup_axis(ax, "ComboEnergyOrbitalGroup")
with raises(NotImplementedError):
setup_axis(ax, "None")
def test_subplots():
assert len(subplots("BaseOrbital")) == 2
assert len(subplots("EnergyOrbital")[1]) == 1
assert subplots("ComboOrbital", 1, 4)[1].shape == (1, 4)
assert subplots("ComboEnergyOrbital", 3, 5)[1].shape == (3, 5)
@mark.xfail
def test_plotter(tmp_path):
fig, ((ax,),) = subplots("BaseOrbital")
plotter(
[BaseOrbital(), BaseOrbital(), BaseOrbital()],
title="Hello World",
style="BaseOrbital",
plot=(fig, ax),
xlim=(0, 2),
xticks_minor=True,
yticks_minor=2,
legend=True,
colors=None,
markers=None,
linestyles=None,
savefig=f"{tmp_path}/BaseOrbitals.png",
)
plotter(
[EnergyOrbital(-1), EnergyOrbital(-2), EnergyOrbital(-3)],
title="World",
style="EnergyOrbital",
plot=None,
xlim=None,
xticks=None,
xticks_minor=1,
yticks_minor=True,
legend=False,
alphas=[0.9, 0.1],
colors=["b", "k"],
markers="x",
linestyles=["-", ":"],
savefig=f"{tmp_path}/",
)
plotter(
[],
title="Hello",
style=type(BaseOrbital()).__name__,
plot=None,
xlim=None,
xticks=None,
legend=True,
colors=None,
markers=None,
linestyles=None,
savefig=f"{tmp_path}/",
)
plotter(
[],
title="Hello",
style=None,
plot=None,
xlim=(0, 10),
xticks=None,
legend=False,
colors=None,
markers="+",
linestyles="--",
savefig=f"{tmp_path}/",
)
plotter(
[],
title="Hello",
style=None,
plot=None,
xlim=(0, 10),
xticks=None,
ylim=(0, 10),
yticks=(0, 5, 10),
yticks_minor=True,
legend=False,
colors=None,
alphas=0.5,
markers="+",
linestyles="--",
savefig=f"{tmp_path}/",
)
with raises(NotImplementedError):
plotter([], style="QWERTY")
def test_cycle_values():
assert next(cycle_values(None)) is None
assert next(cycle_values(1)) == 1
it = cycle_values([0, 1, 2])
assert next(it) == 0
assert next(it) == 1
assert next(it) == 2
assert next(it) == 0
```
|
{
"source": "jevandezande/scripts",
"score": 3
}
|
#### File: scripts/qgrep/convergence.py
```python
import numpy as np
from collections import OrderedDict
class Step:
"""
An object that stores a geometry convergence step
:param params: convergence parameters
:param criteria: criteria for convergence
"""
def __init__(self, params, criteria):
self.__dict__.update(params)
self.params = params
self.criteria = criteria
def __str__(self):
out = ''
for (key, value), criterion in zip(self.params.items(), self.criteria):
# integers
if key in ['scf_steps']:
pass
out += f'{value:> 9.2e}'
out += '*' if abs(value) < criterion else ' '
out += f'|{value:> 7d}'
return out
class Convergence:
def __init__(self, steps, criteria, program='orca'):
"""
Stores multiple geometry convergence steps
:param steps: list of Steps
:param criteria: criteria for convergence
:param program: the program the results are from
"""
self.steps = steps
self.criteria = criteria
self.program = program
def __iter__(self):
yield from self.steps
def __str__(self):
if self.program == 'orca':
header = " Δ energy RMS grad MAX grad RMS step MAX Step | SCF Steps\n"
else:
raise NotImplementedError('Convergence currently only implemented for ORCA')
line = '-'*66 + '\n'
out = header + line
for i, step in enumerate(self):
out += f'{i:>3}: '
for (key, value), criterion in zip(step.params.items(), step.criteria):
# integers
if key in ['scf_steps']:
pass
else:
star = ' '
if abs(value) < criterion and not (i == 0 and key == 'delta_e'):
star = '*'
out += f'{value:> 9.2e}{star}'
out += f'|{step.scf_steps:> 7d}\n'
return out + line + ' ' + (' {:> 9.2e}'*len(self.criteria)).format(*self.criteria)
def plot(self, show=True):
"""
Generate a plot of the convergence
:param show: show the plot
"""
from matplotlib import pyplot as plt
f, (ax0, ax1) = plt.subplots(1, 2, sharex='col')
f.suptitle('Convergence', fontsize=16)
plt.xlabel('Step')
x = range(len(self.steps))
ax0.set_yscale('symlog', linthreshy=1e-5)
ax0.set_title(r'$\Delta$ Energy')
ax0.plot(x, self.delta_e, label='Energy')
ax0.legend()
ax1.set_ylim(0, 1)
ax1.set_yscale('symlog', linthreshy=1e-4)
ax1.set_title('Convergence Parameters')
ax1.plot(x, self.rms_grad, 'b-', label='RMS Grad')
ax1.plot(x, self.max_grad, 'b--', label='Max Grad')
ax1.plot(x, self.rms_step, 'r-', label='RMS Step')
ax1.plot(x, self.max_step, 'r--', label='Max Step')
# TODO: generalize for more than ORCA
ax1.plot(x, [self.criteria[1]]*len(self.steps), 'k-')
ax1.plot(x, [self.criteria[1]]*len(self.steps), 'b*')
ax1.plot(x, [self.criteria[2]]*len(self.steps), 'k--')
ax1.plot(x, [self.criteria[2]]*len(self.steps), 'b*')
ax1.plot(x, [self.criteria[3]]*len(self.steps), 'k-')
ax1.plot(x, [self.criteria[3]]*len(self.steps), 'r*')
ax1.plot(x, [self.criteria[4]]*len(self.steps), 'k--')
ax1.plot(x, [self.criteria[4]]*len(self.steps), 'r*')
ax1.legend()
if show:
plt.show()
@property
def delta_e(self):
return np.array([step.delta_e for step in self.steps])
@property
def rms_e(self):
return np.array([step.rms_e for step in self.steps])
@property
def max_grad(self):
return np.array([step.max_grad for step in self.steps])
@property
def rms_grad(self):
return np.array([step.rms_grad for step in self.steps])
@property
def max_step(self):
return np.array([step.max_step for step in self.steps])
@property
def rms_step(self):
return np.array([step.rms_step for step in self.steps])
```
#### File: qgrep/population/nbo.py
```python
import re
import numpy as np
from itertools import zip_longest
from collections import defaultdict
from more_itertools import peekable
class NAOs:
"""Natural Atomic Orbitals"""
def __init__(self, lines):
"""
"""
self.vals = NAOs.read(lines)
def __len__(self):
return len(self.vals)
def __iter__(self):
for ao_vals in self.vals:
yield ao_vals
def __sub__(self, other):
"""
Difference of two NAO objects
"""
raise NotImplementedError
@staticmethod
def read(lines):
""" Reads the lines of a NAO output
NAO Atom No lang Type(AO) Occupancy Energy
---------------------------------------------------------
1 C 1 s Cor( 1s) 1.99916 -10.15626
2 C 1 s Val( 2s) 0.94193 -0.30622
"""
start = -1
for i, line in enumerate(lines):
if line.strip() == 'NAO Atom No lang Type(AO) Occupancy Energy':
start = i + 2
if start == -1:
raise Exception('Cannot find the start of NAO')
vals = []
atom = ''
for line in lines[start:]:
if not line.strip():
continue
try:
idx, atom, num, orb, orb_type, *shell, occupancy, energy = line.split()
except ValueError as e:
break
if not shell:
orb_type, shell = orb_type.split('(')
else:
shell = shell[0]
orb_type = orb_type.strip('(')
shell = shell.strip(')')
vals.append([atom, int(num), orb, orb_type, shell, float(occupancy), float(energy)])
return vals
class NPA:
"""Natural Population Analysis class"""
def __init__(self, atoms=None, charges=None, lines=''):
"""
:param atoms: a list of atoms
:param charges: numpy array of charges per atom (charge, core, valence, rydberg, total)
:param lines: line of an output file to read
"""
if atoms is None and charges is None and lines:
self.atoms, self.charges = self.read(lines)
else:
self.atoms = atoms
self.charges = charges
def __eq__(self, other):
"""
Uses np.allclose
"""
return self.atoms == other.atoms and np.allclose(self.charges, other.charges)
def __iter__(self):
"""
Iterate over atoms and charges, each time returning an array of a single atom with charges
"""
for atom, atom_charges in zip(self.atoms, self.charges):
yield atom, atom_charges
def __len__(self):
"""
Number of atoms
"""
return len(self.atoms)
def __getitem__(self, index):
"""
Get the atom and charges corresponding to the index
"""
return [self.atoms[index]] + self.charges[index]
def __setitem__(self, index, value):
"""
Set the atom and charges corresponding to the index
"""
if len(value) != 6:
raise SyntaxError('Invalid number of charges')
self.atoms[index] = value[0]
self.charges[index] = value[1:]
def __str__(self):
"""
Return a string resmbling the NPA output
"""
ret = 'Idx Atom Charge Core Valence Rydberg Total\n'
line_form = '{:>3} {:<5}' + ' {: >8.5f}' * 5 + '\n'
for i, (atom, charge) in enumerate(zip(self.atoms, self.charges)):
ret += line_form.format(i, atom, *charge)
return ret
def __sub__(self, other):
"""
Subtract two NPA objects of different sizes
"""
return self._combine(other, '-')
def __add__(self, other):
"""
Add two NPA objects
"""
return self._combine(other, '+')
def _combine(self, other, form):
"""
Add or subtract two NPA objects
"""
if form not in ['+', '-']:
raise ValueError("form must be '+' or '-'")
atoms = []
charges = []
# Allow combination even if the dimensions don't match
for (atom1, charges1), (atom2, charges2) in zip_longest(self, other, fillvalue=['', np.zeros(5)]):
atoms.append(f'{atom1:>2}{form}{atom2:<2}')
if form == '-':
charges.append(charges1 - charges2)
else:
charges.append(charges1 + charges2)
if form == '-':
return NPA_Diff(atoms, charges)
return NPA_Sum(atoms, charges)
def append(self, atom, *vals):
"""
Append an atom and charges to the population analysis
"""
self.atoms.append(atom)
if not len(vals) == 5:
raise SyntaxError('Invalid number of charges')
self.charges = np.array(list(self.charges).append(list(vals)))
@staticmethod
def read(lines):
"""Read the natural population analysis from an output file
Summary of Natural Population Analysis:
Natural Population
Natural ---------------------------------------------
Atom No Charge Core Valence Rydberg Total
--------------------------------------------------------------------
Fe 1 -0.57877 17.97641 8.54310 0.05926 26.57877
C 2 0.60637 1.99951 3.34225 0.05187 5.39363
O 3 -0.42097 1.99976 6.38932 0.03189 8.42097
...
"""
# Find the NPA Section
start = -1
for i, line in enumerate(lines):
if line == ' Atom No Charge Core Valence Rydberg Total\n':
start = i + 2
break
if start == -1:
raise Exception('Unable to find the start of NPA analysis')
npa = []
atoms = []
# Interpret the NPA
for line in lines[start:]:
if line[:50] == " " + "=" * 49:
break
atom, num, *others = line.split()
atoms.append(atom)
num = int(num)
charge, core, valence, rydberg, total, *ns = map(float, others)
npa.append([charge, core, valence, rydberg, total])
return atoms, np.array(npa)
class NPA_Diff(NPA):
"""
NPA class without restrictions on population
Currently exactly the same
"""
class NPA_Sum(NPA):
"""
NPA class without restrictions on population
Currently exactly the same
"""
pass
class Orbital:
"""
Base class for all orbitals
"""
def __init__(self, occupation, atom, atom_n):
"""
:param occupation: orbital occupation
:param atom: the primary atom of the orbital (more atoms allowed in subclasses)
:param atom_n: index of the primary atom
"""
self.occupation = occupation
self.atom = atom
self.atom_n = int(atom_n)
self.type = self.__class__.__name__
def __repr__(self):
return f'<{self.type} {self.atom}{self.atom_n}>'
def __str__(self):
return f'{self.type:4} {self.occupation:>7.5f} {self.atom_n:>3} {self.atom:2}'
class LP(Orbital):
""" Lone Pair """
pass
class CR(Orbital):
"""Core Orbital"""
pass
class LV(Orbital):
""" TODO: Figure out what type of orbital this is """
pass
class RY(Orbital):
"""Rydberg Orbital"""
pass
class RYs(Orbital):
"""Rydberg* Orbital"""
def __init__(self):
super().__init__(occupation, atom, atom_n)
self.type = "RY*"
class NBO(Orbital):
"""
Natural Bond Orbital
"""
def __init__(self, occupation, atom1, atom1_n, atom2, atom2_n, hybrids, densities):
"""
:param occupation: orbital occupation
:param atom1: the first atom in the NBO
:param atom1_n: index of the first atom
:param atom2: the second atom in the NBO
:param atom2_n: index of the second atom
:param hybrids: hybridization of both atoms
:param densities: densities of both atoms
"""
super().__init__(occupation, atom1, atom1_n)
self.atom2 = atom2
self.atom2_n = int(atom2_n)
self.hybrids = hybrids
self.densities = densities
def __repr__(self):
return f'<NBO {self.atom1}{self.atom1_n}--{self.atom2}{self.atom2_n}>'
def __str__(self):
return f'{self.type:4} {self.occupation:>7.5f} {self.atom_n:>3} {self.atom:2}--{self.atom2_n:>3} {self.atom:2}'
class NBOs(NBO):
def __init__(self):
super().__init__(occupation, atom1, atom1_n, atom2, atom2_n)
self.type = "NBO*"
class NBOSet:
"""Natural Bond Orbital class"""
def __init__(self, file_iter):
"""
:param file_iter: file to be read from an output file
"""
self.orbitals = self.read(file_iter)
def __str__(self):
out = 'Index Type Occup atom Other info \n'
for i, nbo in enumerate(self.orbitals):
out += f'{i:>5} {nbo}\n'
return out
def bond_orders(self):
"""
Determines the bond orders of all bonds based on (occ x BD - occ x BD*)/2
"""
bos = defaultdict(int)
for orbital in self.orbitals:
if isinstance(orbital, NBOs):
bos[(orbital.atom_n, orbital.atom2_n)] -= orbital.occupation/2
elif isinstance(orbital, NBO):
bos[(orbital.atom_n, orbital.atom2_n)] += orbital.occupation/2
return bos
@staticmethod
def read(file_iter):
"""
Read the Natural Bond Orbital Analysis
1. (1.92050) BD ( 1)Fe 1- C 2
( 32.17%) 0.5672*Fe 1 s( 33.10%)p 0.00( 0.02%)d 2.02( 66.88%)
f 0.00( 0.00%)
0.0000 0.0000 -0.0044 0.5753 0.0022
...
36. (2.00000) CR ( 1)Fe 1 s(100.00%)
1.0000 -0.0000 0.0000 -0.0000 0.0000
...
72. (1.79971) LP ( 1)Fe 1 s( 0.00%)p 0.00( 0.00%)d 1.00(100.00%)
f 0.00( 0.00%)
0.0000 0.0000 -0.0000 0.0001 -0.0001
...
90. (0.01241) RY*( 1)Fe 1 s( 0.00%)p 1.00( 3.05%)d31.77( 96.89%)
f 0.02( 0.06%)
0.0000 0.0000 0.0000 0.0000 0.0005
...
618. (0.67669) BD*( 1)Fe 1- C 2
( 67.83%) 0.8236*Fe 1 s( 33.10%)p 0.00( 0.02%)d 2.02( 66.88%)
f 0.00( 0.00%)
...
"""
file_iter = peekable(iter(file_iter))
for line in file_iter:
if line == ' (Occupancy) Bond orbital / Coefficients / Hybrids\n':
break
try:
file_iter.peek()
except StopIteration:
raise Exception('Could not find NBO section.')
single_center = {'CR': CR, 'LP': LP, 'LV': LV, 'RY': RY, 'RY*': RYs}
orbitals = []
while line:
# Start of a new block for parsing
if re.search('\s*\d+\. \(', line):
idx, occup, nbo_type, *other = line.split()
idx = int(idx[:-1])
occup = float(occup[1:-1])
nbo_type = nbo_type.strip('(')
atom_re = ' ?(\w{1,2})\s+(\d+)'
hybridicity_regex = '(\w)\s*(\d+\.\d+)\(\s*(\d+\.\d+)'
if nbo_type in ['BD', 'BD*']:
"""
4. (1.99999) BD ( 1) H 1- O 2
( 41.34%) 0.6429* H 1 s(100.00%)
1.0000
( 58.66%) 0.7659* O 2 s( 12.16%)p 7.22( 87.84%)
0.0000 0.3487 0.0000 -0.0650 -0.9350
"""
regex = fr'\( ?(\d+)\){atom_re}-{atom_re}'
number, atom1, atom1_n, atom2, atom2_n = re.search(regex, line).groups()
regex = fr'\(\s*(\d+\.\d+)%\)\s+(-?\d\.\d+)\*{atom_re}\s+(\w)\(\s*(\d+\.\d+)'
# For each atom block within the BD/BD*
# while not re.search('\s*\d+\.\s', file_iter.peek()):
hybrids = []
densities = [] # TODO: Figure out what this actually is
# TODO: Adapt for three atom bonds?
for i in range(2):
line = next(file_iter)
percent, val, atom, atom_n, orbital1, percent = re.search(regex, line).groups()
assert (atom in [atom1, atom2]) and (atom_n in [atom1_n, atom2_n])
hybrids.append([(orbital1, 1.0, float(percent))])
finder = lambda l: re.findall(hybridicity_regex, l)
matches = finder(line)
while matches:
for orbital, hybridicity, percent in matches:
hybrids[i].append((orbital, float(hybridicity), float(percent)))
matches = finder(file_iter.peek())
if not matches:
break
next(file_iter)
densities.append([])
line = file_iter.peek()
while line[:40] == ' '*40:
densities[i] += map(float, re.findall('-?\d\.\d+', line))
next(file_iter)
line = file_iter.peek()
orbitals.append(NBO(occup, atom1, atom1_n, atom2, atom2_n, hybrids, densities)) # [idx, occup, nbo_type, int(number), atom1, int(atom1_n), atom2, int(atom2_n), hybrids, densities])
elif nbo_type in single_center:
"""90. (0.01241) RY*( 1)Fe 1 s( 0.00%)p 1.00( 3.05%)d31.77( 96.89%)"""
regex = fr'\( ?(\d+)\){atom_re}\s+(\w)\(\s*(\d+\.\d+)'
try:
number, atom, atom_n, orbital1, percent = re.search(regex, line).groups()
hybrids = [(orbital1, 1.0, float(percent))]
finder = lambda l: re.findall(hybridicity_regex, l)
matches = finder(line) + finder(file_iter.peek())
for orbital, hybridicity, percent in matches:
hybrids.append((orbital, float(hybridicity), float(percent)))
except Exception as e:
raise ValueError from e
orbitals.append(single_center[nbo_type](occup, atom, atom_n))
else:
raise Exception(f'Cannot parse nbo type {nbo_type}')
line = next(file_iter).strip()
return orbitals
```
#### File: qgrep/population/orbital_pop.py
```python
import re
import numpy as np
from re import search
from copy import deepcopy
am_types = 'spdfghi'
class OrbitalPopulation:
"""Löwdin Orbital Population class (OP for short)"""
def __init__(self, file_name='', orb_list=None, method='lowdin'):
if file_name:
self.orb_list = self.read(file_name)
elif orb_list is not None:
self.orb_list = orb_list
else:
self.orb_list = []
def __eq__(self, other):
return self.orb_list == other.orb_list
def __iter__(self):
for orb in self.orb_list:
yield orb
def __len__(self):
return len(self.orb_list)
def __getitem__(self, index):
return self.orb_list[index]
def __setitem__(self, index, value):
if not isinstance(value, Orbital):
raise SyntaxError(f'Must be an Orbital, got: {type(value)}')
self.orb_list[index] = value
def __str__(self):
return '\n\n'.join([f'{orb}' for orb in self.orb_list])
def __sub__(self, other):
# TODO: Fix MOrbital indexing problem
if len(self) != len(other):
Warning(f'Differing number of orbitals ({len(self)} != {len(other)}), '
'output will be truncated')
min_len = min(len(self), len(other))
orb_list = [s - o for s, o in zip(self[:min_len], other[:min_len][:min_len])]
return OrbitalPopulation(orb_list=orb_list)
def csv(self):
return '\n\n'.join([orb.csv() for orb in self.orb_list])
def write(self, file_name, format='str'):
"""
Write to a file
"""
if format == 'str':
out = f'{self}'
elif format == 'csv':
out = self.csv()
elif format == 'latex':
out = '\n\n'.join([orb.latex() for orb in self.orb_list])
else:
raise SyntaxError(f'Invalid write format: {format}')
if file_name == 'stdout':
print(out)
else:
with open(file_name, 'w') as f:
f.write(out)
def sorted(self, key='contribution'):
"""
Generates a sorted ROP
"""
if key == 'index':
sort_key = lambda x: x.index
elif key == 'atom':
sort_key = lambda x: x.atom
elif key == 'ao':
sort_key = lambda x: x.ao
elif key == 'contribution':
sort_key = lambda x: x.val
elif key == 'spin':
sort_key = lambda x: x.spin
else:
raise SyntaxError(f'Invalid key given to sorted: {key}')
orb_list = []
for mo in self.orb_list:
contribs = sorted(mo.contributions, key=sort_key, reverse=True)
orb_list.append(MOrbital(mo.index, mo.spin, mo.energy, mo.occupation, contribs))
return OrbitalPopulation(orb_list=orb_list)
@property
def homo(self):
"""
Returns the index of the HOMO
Does not work for UHF (the HOMO is not well defined)
WARNING: 0-indexed
"""
for i, orb in enumerate(self):
if orb.occupation < 2:
return i - 1
return None
@property
def lumo(self):
"""
Returns the index of the LUMO
WARNING: 0-indexed
"""
for i, orb in enumerate(self):
if orb.occupation == 0:
return i
return None
@property
def somo(self):
"""
Returns the indices of the SOMO's
WARNING: 0-indexed
"""
somos = []
for i, orb in enumerate(self):
if orb.occupation == 1:
somos.append(i)
return somos
def append(self, orbital):
"""
Append another orbital
"""
if not isinstance(orbital, MOrbital):
raise SyntaxError(f'You may only append Orbitals, got: type{orbital}')
self.orb_list.append(orbital)
def atom_contract(self):
"""
Contracts all atom AO_Contributions together (i.e. adds)
"""
return OrbitalPopulation(orb_list=[orb.atom_contract() for orb in self])
def am_contract(self):
"""
Contracts all AO_Contributions of the same am together (i.e. adds)
"""
return OrbitalPopulation(orb_list=[orb.am_contract() for orb in self])
def crop(self, max_num=5, min_num=2, cutoff=5):
"""
Make an ROP that removes the smallest contributors
"""
orb_list = []
for mo in self.orb_list:
contribs = []
for contrib in sorted(mo.contributions, key=lambda x: x.val, reverse=True):
if contrib.val < cutoff and len(contribs) >= min_num:
break
contribs.append(contrib)
if len(contribs) == max_num:
break
orb_list.append(MOrbital(mo.index, mo.spin, mo.energy, mo.occupation, contribs))
return OrbitalPopulation(orb_list=orb_list)
def range(self, low, high):
"""
Make an OP with a restricted range of orbitals
"""
return OrbitalPopulation(orb_list=self.orb_list[low:high])
@staticmethod
def read(file_name, method='lowdin'):
"""Read the orbital populations"""
if file_name.split('.')[-1] == 'csv':
return OrbitalPopulation._read_csv(file_name, method=method)
else:
return OrbitalPopulation._read_orca(file_name, method=method)
@staticmethod
def _read_orca(file_name, method='lowdin'):
"""Löwdin
------------------------------------------
LOEWDIN REDUCED ORBITAL POPULATIONS PER MO
-------------------------------------------
THRESHOLD FOR PRINTING IS 0.1%
SPIN UP
0 1 2 3 4 5
-482.78209 -235.47558 -62.42906 -56.24347 -56.24238 -56.24235
1.00000 1.00000 1.00000 1.00000 1.00000 1.00000
-------- -------- -------- -------- -------- --------
0 Mn s 0.0 100.0 0.0 0.0 0.0 0.0
25 Br s 100.0 0.0 100.0 0.0 0.0 0.0
25 Br pz 0.0 0.0 0.0 0.0 100.0 0.0
25 Br px 0.0 0.0 0.0 1.5 0.0 98.5
25 Br py 0.0 0.0 0.0 98.5 0.0 1.5
...
Three blank lines
"""
orb_list = []
spin = None
lowdin_re = r'''LOEWDIN REDUCED ORBITAL POPULATIONS PER MO.*?\n\n\n'''
if method == 'lowdin':
orb_pop_re = lowdin_re
else:
raise NotImplementedError('Only Löwdin Reduced Orbital Population Analysis is implemented')
with open(file_name) as f:
output = f.read()
matches = re.findall(orb_pop_re, output, re.MULTILINE + re.DOTALL)
first = True
if matches:
match = matches[-1].strip()
blocks = match.split('\n\n')
for block in blocks:
lines = block.splitlines()
# Remove excess from the first block
if first:
lines = lines[3:]
first = False
# If open shell, an extra line is printed
if lines[0] == 'SPIN UP':
spin = 1
lines = lines[1:]
elif lines[0] == 'SPIN DOWN':
spin = -1
lines = lines[1:]
# Parse out the header
indexes = map(int, lines[0].split())
orb_es = map(float, lines[1].split())
occs = map(float, lines[2].split())
# Generate orbitals (occupations added in next section)
orbs = [MOrbital(idx, spin, orb_e, occ) for idx, orb_e, occ in zip(indexes, orb_es, occs)]
# Parse out the orbital contributions
for line in lines[4:]:
index, atom, ao, *vals = line.split()
index = int(index)
vals = map(float, vals)
for i, val in enumerate(vals):
# If there is occupation, this deletes all the appearances of 0.0
# Due to rounding, the occupations will not add up to 100
if val > 0:
orbs[i].contributions.append(AO_Contrib(index, atom, ao, val, spin))
orb_list += orbs
else:
raise Exception('Unable to find the start of Reduced Orbital Population analysis')
return orb_list
@staticmethod
def _read_csv(file_name, method='lowdin'):
"""Read the CSV output by the OrbitalPopulation class"""
with open(file_name) as f:
csv = f.read()
orb_list = []
for block in csv.split('\n\n'):
lines = block.strip().splitlines()
mo_index, spin, orb_e, occ = lines[0].split(',')
mo_index, orb_e, occ = int(mo_index), float(orb_e), round(float(occ))
aocs = []
for line in lines[1:]:
index, atom, ao, val = line.split(',')
aocs.append(AO_Contrib(int(index), atom.strip(), ao.strip(), float(val), spin))
orb_list.append(MOrbital(mo_index, spin, orb_e, occ, aocs))
return orb_list
class MOrbital:
"""
Simple orbital class that holds the contributions from AOs as well as a
little more necessary information
"""
def __init__(self, index=0, spin=None, energy=0, occupation=0, contributions=None):
self.index = index
self.spin = spin
self.energy = energy
self.occupation = occupation
self.contributions = contributions if contributions is not None else []
def __eq__(self, other):
"""
Checks if all values are equal
"""
if self.index == other.index \
and np.isclose(self.energy, other.energy) \
and np.isclose(self.occupation, other.occupation) \
and len(self) == len(other):
for s, o in zip(self.contributions, other.contributions):
if not s == o:
return False
return True
return False
def __len__(self):
return len(self.contributions)
def __repr__(self):
return f'<MO {self.index}{self.gspin} {self.energy: >8.5f} [{self.occupation}]>'
def __str__(self):
contrib_str = '\n'.join([f'{contrib}' for contrib in self.contributions])
return f'{self.index: >2d}{self.gspin} {self.energy: >8.5f} {self.occupation:>3.2f}\n{contrib_str}'
def __sub__(self, other):
if len(self) != len(other):
Warning('The MOrbitals are of different lengths.')
# raise ValueError('The MOrbitals are of different lengths.')
min_len = min(len(self), len(other))
index = self.index if self.index == other.index else 0
energy = self.energy - other.energy
occupation = self.occupation - other.occupation
contributions = []
for s, o in zip(self.contributions[:min_len], other.contributions[:min_len]):
contributions.append(s - o)
# Only appends from one list (the other is maxed out and returns an empty list)
contributions += self.contributions[min_len:] + other.contributions[min_len:]
spin = self.spin if self.spin == other.spin else None
return MOrbital(index, spin, energy, occupation, contributions)
@property
def gspin(self):
if self.spin == 1:
return 'α'
elif self.spin == -1:
return 'β'
return ''
def csv(self):
ao_contrib_str = '\n'.join([ao_contrib.csv() for ao_contrib in self.contributions])
return f'{self.index: >2d}, {self.gspin}, {self.energy: > 7.5f}, {self.occupation:>3.2f}\n{ao_contrib_str}'
def latex(self):
"""
Make into a latex tabular
"""
aoc_latex = f'{self.index:>2d} {self.energy: > 6.4f} {self.occupation:>3.2f}\n'
aoc_latex += '\\begin{tabular}{r l r r}\n Index & Atom & AO & val \\\\ \\hline\n'
for aoc in self.contributions:
aoc_latex += f'{aoc.index:>6d} & {aoc.atom:<4s} & {aoc.ao:<3s} & {aoc.val:>4.1f} \\\\\n'
aoc_latex += '\\end{tabular}'
return aoc_latex
def atom_contract(self):
"""
Contracts all atom AO_Contributions together (i.e. adds)
"""
# Dictionary of contracted ao_contributions
atoms = {}
for aoc in self.contributions:
index, val = aoc.index, aoc.val
if index in atoms:
atoms[index].val += val
else:
aoc = deepcopy(aoc)
aoc.ao = ''
atoms[index] = aoc
# Sort by atom index
contribs = sorted(atoms.values(), key=lambda x: x.index)
return MOrbital(self.index, None, self.energy, self.occupation, contribs)
def am_contract(self):
"""
Contracts all AO_Contributions of the same am together (i.e. adds)
"""
# Dictionary of contracted ao_contributions
atoms = {}
for aoc in self.contributions:
# First non-number corresponds to the am
index, am, val = aoc.index, aoc.ao[0], aoc.val
if (index, am) in atoms:
atoms[(index, am)].val += val
else:
aoc = deepcopy(aoc)
aoc.ao = am
atoms[(index, am)] = aoc
# Sort by atom index and then am_type
key = lambda x: (x.index, am_types.index(x.ao[0]))
contribs = sorted(atoms.values(), key=key)
return MOrbital(self.index, None, self.energy, self.occupation, contribs)
def atom_sum(self, atom):
"""
Sum over all the contributions from an atom
"""
val = 0
if isinstance(atom, str):
for ao_contrib in self.contributions:
if ao_contrib.atom == atom:
val += ao_contrib.val
elif isinstance(atom, int):
for ao_contrib in self.contributions:
if ao_contrib.index == atom:
val += ao_contrib.val
else:
raise SyntaxError(f'Atom specifier must be either an int or str, got: {type(atom)}')
return val
def orbital_type_sum(self, atom, am_type):
"""
Sum over all the contributions from am_type on the specified atom
"""
if am_type not in am_types:
raise Exception(f'Invalid am_type, got: {am_type}, expected {am_types}')
val = 0
if isinstance(atom, str):
for ao_contrib in self.contributions:
if ao_contrib.atom == atom and ao_contrib.ao[0] == am_type:
val += ao_contrib.val
elif isinstance(atom, int):
for ao_contrib in self.contributions:
if ao_contrib.index == atom and ao_contrib.ao[0] == am_type:
val += ao_contrib.val
else:
raise SyntaxError('Atom specifier must be either an int or str.')
return val
class AO_Contrib:
"""
Simple class containing an AO and its contribution to an MOrbital
"""
def __init__(self, index, atom, ao, val, spin=None):
self.index = index
self.atom = atom
self.ao = ao
self.val = val
self.spin = spin
def __repr__(self):
return f'<AOC {self.index}{self.gspin} {self.atom} {self.ao} [{self.val}]>'
def __eq__(self, other):
"""
Use np.allclose or almost_equal???
"""
if not isinstance(other, AO_Contrib):
return False
if self.index == other.index \
and self.atom == other.atom \
and self.ao == other.ao \
and self.val == other.val:
return True
return False
def __str__(self):
return f'{self.index:>2d} {self.atom:<2s} {self.ao:<4s}: {self.val:>4.1f}'
@property
def am(self):
return search('[a-z]', self.ao).group()
@property
def gspin(self):
if self.spin == 1:
return 'α'
elif self.spin == -1:
return 'β'
return ''
def __sub__(self, other):
index = self.index if self.index == other.index else 0
atom = self.atom if self.atom == other.atom else ''
ao = self.ao if self.ao == other.ao else ''
val = self.val - other.val
return AO_Contrib(index, atom, ao, val)
def csv(self):
return f'{self.index:>2d}, {self.atom:<2s}, {self.ao:<4s}, {self.val:>4.1f}'
class Group_Contrib:
"""
Simple class containing a group of atoms and their contribution to a MOrbital
"""
def __init__(self, index, group, val):
self.index = index
self.group = group
self.val = val
def __eq__(self, other):
"""
Use np.allclose or almost_equal???
"""
if not isinstance(other, Group_Contrib):
return False
if self.index == other.index \
and self.group == other.group \
and self.val == other.val:
return True
return False
def __str__(self):
return f'{self.index:>2d} {self.group:<10}: {self.val:>4.1f}'
def __sub__(self, other):
index = self.index if self.index == other.index else 0
group = self.group if self.group == other.group else ''
val = self.val - other.val
return Group_Contrib(index, atom, ao, val)
```
#### File: scripts/qgrep/queues.py
```python
import re
import getpass
import subprocess
import os.path
from collections import OrderedDict, defaultdict
from xml.etree import ElementTree
from .helper import colors
from itertools import zip_longest
from configparser import ConfigParser
config_file = os.path.join(os.path.expanduser("~"), '.qgrepconfig')
config = ConfigParser()
config.read(config_file)
BAR = colors.purple + '│' + colors.normal
JOB_ID_LENGTH = 7
NAME_LENGTH = 22
SMALL_QUEUE = 3
if 'queues' in config:
JOB_ID_LENGTH = max(config['queues'].getint('job_id_length', 7), 4)
NAME_LENGTH = max(config['queues'].getint('name_length', 22), 8)
SMALL_QUEUE = max(config['queues'].getint('small_queue', 3), 1)
COLUMN_WIDTH = 11 + JOB_ID_LENGTH + NAME_LENGTH
class Queues:
def __init__(self, omit=None):
self.omit = omit if omit else []
self.queues = {}
self.grid_engine, self.tree = self.qxml()
self.find_sizes(omit=self.omit)
self.parse_tree(omit=self.omit)
def __str__(self):
"""
Make the tree into a printable form
"""
return self.print()
def __eq__(self, other):
"""
Check if queues are equivalent
"""
if len(self.queues) != len(other.queues):
return False
for my_queue, other_queue in zip(self.queues.values(), other.queues.values()):
if my_queue != other_queue:
return False
return True
def __ne__(self, other):
return not self == other
# noinspection PyPep8
def print(self, numjobs=50, person=None):
"""
Print the queues in a nice table
"""
# Form header (without small queues)
large_num = sum([size > SMALL_QUEUE for size in self.sizes.values()])
# Horizontal line (uses box drawing characters)
top_line = '\033[95m' + '┌' + '┬'.join(['─'*(COLUMN_WIDTH - 1)]*large_num) + '┐' + '\033[0m\n'
mid_line = '\033[95m' + '├' + '┼'.join(['─'*(COLUMN_WIDTH - 1)]*large_num) + '┤' + '\033[0m\n'
bot_line = '\033[95m' + '└' + '┴'.join(['─'*(COLUMN_WIDTH - 1)]*large_num) + '┘' + '\033[0m\n'
out = top_line
name_form = '{} ({:2d}/{:2d}/{:2d})'
# Print a nice header
for name, queue in sorted(self.queues.items()):
# Print small queues near the end
if queue.size <= SMALL_QUEUE:
continue
out += BAR + ('{:^' + f'{COLUMN_WIDTH-1}' + '}').format(name_form.format(name, queue.used, queue.avail, queue.queued))
out += BAR + '\n' + mid_line
header = BAR + 'ID'.center(JOB_ID_LENGTH) + ' USER ' + 'Job Name'.center(NAME_LENGTH) + ' ST'
out += header*large_num + BAR + '\n' + mid_line
if person is True:
person = getpass.getuser()
# Remove small queues for later use
job_list = []
small_queues = []
for name, queue in sorted(self.queues.items()):
if queue.size <= SMALL_QUEUE:
if queue.size > 0:
small_queues.append(queue)
continue
job_list.append(queue.person_jobs(person).values())
blank = BAR + ' '*(COLUMN_WIDTH-1)
for i, job_row in enumerate(zip_longest(*job_list)):
if i >= numjobs:
# Add how many more jobs are running in each queue
for queue in job_list:
if len(queue) > numjobs:
out += BAR + ('{:^' + f'{COLUMN_WIDTH + 7}' + '}').format(f'\033[1m{len(queue) - numjobs: >+5} jobs\033[0m')
else:
out += blank
out += BAR + '\n'
break
for job in job_row:
out += BAR + f'{job}' if job else blank
out += BAR + '\n'
out += mid_line if small_queues else bot_line
# Display small queues below other queues
for i, queue in enumerate(small_queues):
out += queue.print_inline(len(self.sizes) - large_num, None, person) + '\n'
out += mid_line if i < len(small_queues) - 1 else bot_line
# Remove newline character
out = out[:-1]
return out
@staticmethod
def qxml():
"""
Produce an xml ElementTree object containing all the queued jobs
Sample output from SGE:
<?xml version='1.0'?>
<job_info xmlns:xsd="http://gridengine.sunsource.net/source/browse/*checkout*/gridengine/source/dist/util/resources/schemas/qstat/qstat.xsd?revision=1.11">
<queue_info>
<Queue-List>
<name><EMAIL></name>
...
</Queue-List>
<Queue-List>
<name><EMAIL></name>
...
<job_list state="running">
<JB_job_number>113254</JB_job_number>
<JB_name>optg</JB_name>
<JB_owner>mullinax</JB_owner>
<state>r</state>
<JAT_start_time>2015-05-11T15:52:49</JAT_start_time>
<hard_req_queue>large.q<hard_req_queue>
...
</job_list>
</Queue-List>
...
</queue_info>
<job_info>
<job_list state="pending">
<JB_job_number>112742</JB_job_number>
<JB_name>CH3ONO2</JB_name>
<JB_owner>meghaanand</JB_owner>
<state>qw</state>
<JB_submission_time>2015-05-08T16:30:25</JB_submission_time>
<hard_req_queue>large.q<hard_req_queue>
...
</job_list>
</job_info>
...
</job_info>
Sample output from PBS:
<Data>
<Job>
<Job_Id>77816.icqc</Job_Id>
<Job_Name>e7_cas2_ddci3_tighter</Job_Name>
<Job_Owner>sivalingam@icmaster1</Job_Owner>
<resources_used>
<cput>21002:04:52</cput>
<energy_used>0</energy_used>
<mem>60978424kb</mem>
<vmem>73997480kb</vmem>
<walltime>2630:02:36</walltime>
</resources_used>
<job_state>R</job_state>
<queue>batch</queue>
<server>control</server>
<Checkpoint>u</Checkpoint>
<ctime>1488149683</ctime>
<Error_Path>zeusln1:/home/sivalingam/s4/e7_cas2_ddci3_tighter.err</Error_Path>
<exec_host>izeusbn13/8-11+izeusbn12/11-12+izeusbn11/12-13</exec_host>
<Hold_Types>n</Hold_Types>
<Join_Path>oe</Join_Path>
<Keep_Files>n</Keep_Files>
<Mail_Points>a</Mail_Points>
<mtime>1488149684</mtime>
<Output_Path>zeus1:/home/sivalingam/s4/e7_cas2_ddci3_tighter.o77816</Output_Path>
<Priority>0</Priority>
<qtime>1488149683</qtime>
<Rerunable>False</Rerunable>
<Resource_List>
<nodect>8</nodect>
<nodes>8</nodes>
<walltime>8760:00:00</walltime>
</Resource_List>
<session_id>3716</session_id>
<Shell_Path_List>/bin/zsh</Shell_Path_List>
<euser>sivalingam</euser>
<egroup>gl-ag orca</egroup>
<queue_type>E</queue_type>
<etime>1488149683</etime>
<submit_args>-j oe -e /home/sivalingam/s4/e7_cas2_ddci3_tighter.err -N e7_cas2_ddci3_tighter -r n e7_cas2_ddci3_tighter.job</submit_args>
<start_time>1488149684</start_time>
<Walltime>
<Remaining>22067782</Remaining>
</Walltime>
<start_count>1</start_count>
<fault_tolerant>False</fault_tolerant>
<job_radix>0</job_radix>
<submit_host>zeus1</submit_host>
</Job>
...
</Data>
"""
cmds = [('sge', 'qstat -u "*" -r -f -xml'), ('pbs', 'qstat -x -t')]
for grid_engine, cmd in cmds:
try:
xml = subprocess.check_output(cmd, shell=True, stderr=subprocess.DEVNULL)
return grid_engine, ElementTree.fromstring(xml)
except FileNotFoundError as e:
raise Exception("Could not find qstat")
except subprocess.CalledProcessError as e:
pass
raise Exception('Could not generate XML, only PBS and SGE currently supported.')
def parse_tree(self, omit=None):
"""
Parse the xml tree from qxml
"""
omit = omit if omit else []
if self.grid_engine == 'sge':
self.queues = OrderedDict()
for child in self.tree:
# Running jobs are arranged by node/queue
if child.tag == 'queue_info':
for node in child:
# <Queue-List>
# <name><EMAIL></name>
name = node.find('name').text.split('@')[0]
# If we don't want to display the queue
if name in omit:
continue
if name not in self.queues:
self.queues[name] = Queue(self.sizes[name], name)
for job_xml in node.iterfind('job_list'):
job = Job(job_xml, self.grid_engine)
self.queues[name].running[job.id] = job
# Queued jobs
elif child.tag == 'job_info':
for job_xml in child:
job = Job(job_xml, self.grid_engine)
name = job.queue.split('@')[0]
if name in omit:
continue
if name not in self.queues:
self.queues[name] = Queue(self.sizes[name], name)
self.queues[name].queueing[job.id] = job
elif self.grid_engine == 'pbs':
self.queues = OrderedDict()
for job_xml in self.tree:
job = Job(job_xml, self.grid_engine)
queue = job.queue
if job.state == 'c' or queue in omit:
continue
if queue not in self.queues:
self.queues[queue] = Queue(self.sizes[queue], queue)
if job.state == 'r':
self.queues[queue].running[job.id] = job
else:
try:
self.queues[queue].queueing[job.id] = job
except:
print(job)
raise
else:
raise Exception('Could not read XML, only PBS and SGE currently supported.')
def find_sizes(self, omit=None):
"""
Find the sizes of the queues
"""
omit = omit if omit else []
self.sizes = {}
if self.grid_engine == 'sge':
"""Sample output from 'qstat -g c':
CLUSTER QUEUE CQLOAD USED RES AVAIL TOTAL aoACDS cdsuE
--------------------------------------------------------------------------------
all.q -NA- 0 0 0 0 0 0
gen3.q 0.00 0 0 0 16 0 16
gen4.q 0.26 31 0 13 48 0 4
gen5.q 0.50 4 0 0 4 0 0
gen6.q 0.39 19 0 0 19 0 1
"""
qstat_queues_cmd = "qstat -g c"
out = subprocess.check_output(qstat_queues_cmd, shell=True)
for line in out.splitlines()[2:]:
line = line.decode('UTF-8')
if 'all.q' == line[:5]:
continue
queue, cqload, used, res, avail, total, aoacds, cdsue = line.split()
if queue not in omit:
self.sizes[queue] = int(used) + int(avail)
elif self.grid_engine == 'pbs':
"""sample output from pbsnodes:
izeussn153
state = job-exclusive
power_state = Running
np = 16
properties = small
ntype = cluster
jobs = 0-15/86886.icqc
status = rectime=1498123346,macaddr=40:f2:e9:c6:22:60,cpuclock=Fixed,varattr=,jobs=86886.icqc(cput=65375153,energy_used=0,mem=118685472kb,vmem=133127908kb,walltime=4154720,session_id=3357),state=free,netload=75804699166624,gres=,loadave=16.00,ncpus=16,physmem=131338172kb,availmem=176492292kb,totmem=265555896kb,idletime=10974874,nusers=1,nsessions=1,sessions=3357,uname=Linux zeussn153 3.10.0-229.el7.x86_64 #1 SMP Fri Mar 6 11:36:42 UTC 2015 x86_64,opsys=linux
mom_service_port = 15002
mom_manager_port = 15003
"""
out = subprocess.check_output('pbsnodes', shell=True).decode('utf-8').strip()
for job in out.split('\n\n'):
try:
queue = re.search('properties = (.*)', job).group(1)
except AttributeError as e:
queue = 'batch'
if queue == 'big':
queue = 'batch'
if queue not in omit:
if queue in self.sizes:
self.sizes[queue] += 1
else:
self.sizes[queue] = 1
else:
raise Exception('Could not read queue sizes, only PBS and SGE currently supported.')
class Queue:
"""
A class that contains Jobs that are running and queued
"""
def __init__(self, size, name='', running=None, queueing=None):
"""
Initialize a queue with its jobs
:param running: an OrderedDict of Jobs that are running
:param queueing: an OrderedDict of Jobs that are queueing
"""
self.size = size
self.name = name
if running is None:
self.running = OrderedDict()
else:
self.running = running
if queueing is None:
self.queueing = OrderedDict()
else:
self.queueing = queueing
def __eq__(self, other):
if len(self) != len(other):
return False
for s, o in zip(self.jobs.values(), other.jobs.values()):
if s != o:
return False
return True
def __ne__(self, other):
return not self == other
def __len__(self):
return len(self.running) + len(self.queueing)
def __list__(self):
"""Make a list of all the Jobs in the queue"""
return list(self.running.values()) + list(self.queueing.values())
def __str__(self):
"""Make a string with each job on a new line"""
return self.print()
def print(self, numlines=50, person=False):
if person:
jobs = self.person_jobs(person)
else:
jobs = self.jobs
out = '\n'.join(list(map(str, jobs.values()))[:numlines])
if numlines < len(self):
out += f'\n+{len(self) - numlines} jobs'
return out
def print_inline(self, width, max_num=None, person=False):
"""Print jobs inline"""
if person:
jobs = self.person_jobs(person)
else:
jobs = self.jobs
used_avail_queued = f'{self.name} ({self.used:2d}/{self.avail:2d}/{self.queued:2d})'
out = BAR + ('{:^' + f'{COLUMN_WIDTH-1}' + '}').format(used_avail_queued) + BAR
for i, job in enumerate(jobs.values()):
if not (max_num is None) and i >= max_num:
break
if not (i + 1) % width:
out += f'\n {BAR}'
out += f'{job} {BAR}'
# Add blank spots to fill out to end
if (len(jobs) + 1) % width:
out += (' '*COLUMN_WIDTH*(width - (len(jobs) + 1) % width))[:-1] + BAR
return out
def set(self, job_id, job, position):
"""
Set a job in the specified position (running or queueing)
"""
if position == 'running':
self.running[job_id] = job
elif position == 'queueing':
self.queueing[job_id] = job
else:
raise Exception("Invalid position, must be either running or"
"queueing.")
@property
def used(self):
return len(self.running)
@property
def avail(self):
return self.size - self.used
@property
def queued(self):
return len(self.queueing)
@property
def jobs(self):
"""
Makes an OrderedDict of all the running and queueing Jobs
"""
ret = OrderedDict()
# OrderedDicts cannot be readily combined
for k, v in sorted(self.running.items()):
ret[k] = v
for k, v in sorted(self.queueing.items()):
ret[k] = v
return ret
def person_jobs(self, person):
"""Return an OrderedDict of Jobs with the specified owner"""
if not person:
return self.jobs
ret = OrderedDict()
for job in self.jobs.values():
if job.owner == person:
ret[job.id] = job
return ret
class Job:
"""
A simple class that contains important information about a job and prints it
nicely
"""
def __init__(self, job_xml, grid_engine):
self.id, self.name, self.state, self.owner, self.queue, self.workdir = Job.read_job_xml(job_xml, grid_engine)
# self.id, self.name, self.state, self.owner, self.queue, self.workdir, (self.nodect, self.nodes) = Job.read_job_xml(job_xml, grid_engine)
def __eq__(self, other):
if self.id == other.id and \
self.name == other.name and \
self.state == other.state and \
self.owner == other.owner and \
self.queue == other.queue:
return True
return False
def __ne__(self, other):
return not self == other
def __str__(self):
"""Print a short description of the job, with color"""
job_form = '{:>' + f'{JOB_ID_LENGTH}' + 'd} {:<5s} {:<' + f'{NAME_LENGTH}' + 's} {}{:2s}' + colors.normal
# Color queue status by type, use red if unrecognized
job_colors = defaultdict(lambda: colors.red, {'r': colors.green, 'qw': colors.blue})
# Bold the person's jobs
if self.owner == getpass.getuser():
owner = colors.bold + f'{self.owner:5.5s}' + colors.normal
else:
owner = f'{self.owner:5.5s}'
return job_form.format(int(self.id), owner, self.name[:NAME_LENGTH],
job_colors[self.state], self.state[:2]) # + str(self.nodes) + ', ' + str(self.nodect)
@staticmethod
def read_job_xml(job_xml, grid_engine):
"""
Read the xml of qstat and find the necessary variables
"""
if grid_engine == 'sge':
jid = int(job_xml.find('JB_job_number').text)
tasks = job_xml.find('tasks')
# If there are multiple tasks with the same id, make the id a float
# with the task number being the decimal
if tasks is not None:
# If it is a range of jobs, e.g. 17-78:1, just take the first
task = tasks.text.split('-')[0] # If not a range, this does nothing
# SGE is being cute and comma separates two numbers if sequential
task = task.split(',')[0]
jid += int(task) / 10 ** len(task)
name = job_xml.find('JB_name').text
state = job_xml.get('state').lower()
owner = job_xml.find('JB_owner').text
state2 = job_xml.find('state').text
try:
queue = job_xml.find('hard_req_queue').text
except AttributeError as e:
queue = 'debug.q'
if (state == 'running' and state2 != 'r') or \
(state == 'pending' and state2 != 'qw'):
pass
return jid, name, state2, owner, queue
elif grid_engine == 'pbs':
jid = job_xml.find('Job_Id').text.split('.')[0]
try:
jid = int(jid)
except ValueError as e:
# Must be part of a job_array
jid, task_id = jid[:-1].split('[')
if task_id:
jid = float(jid + '.' + task_id)
else:
# -t must not be supported
jid = int(jid)
name = job_xml.find('Job_Name').text
state = job_xml.find('job_state').text.lower()
owner = job_xml.find('Job_Owner').text.split('@')[0]
queue = job_xml.find('queue').text
resource_list = job_xml.find('Resource_List')
nodect, nodes = resource_list.find('nodect').text, resource_list.find('nodes').text
workdir = None
try:
variables = job_xml.find('Variable_List').text.split(',')
variables = dict(kv.split('=') for kv in variables)
workdir = variables['PBS_O_WORKDIR']
except AttributeError:
pass
return jid, name, state, owner, queue, workdir
# return jid, name, state, owner, queue, workdir, (nodect, nodes)
else:
raise Exception('Could not read XML, only PBS and SGE currently supported.')
```
#### File: scripts/qgrep/redundant.py
```python
import re
from more_itertools import collapse
class RedundantInternals:
"""Redundant Internal Coordinates"""
def __init__(self, lines):
self.bond_vals, self.angle_vals, self.linear_vals, self.dihredal_vals = RedundantInternals.read(lines)
def print(self, bonds=True, angles=False, dihedrals=False):
""" Print values """
out = ''
if bonds:
for *atoms, final in self.bond_vals:
f = '-'.join(['{:>3} {:<2}']*int(len(atoms))) + f' = {final:> 5.4f}\n'
out += f.format(*collapse(atoms))
if angles:
for val in self.angle_vals + self.linear_vals:
pass
if dihedrals:
for val in self.dihedral_vals:
pass
return out
def diffs(self, other, thresh=(3, 2, 2)):
"""
Difference between two sets of redundant internals
Warning, must be exactly the same numbering of the geometry
:param thresh: list of threshold values for comparison printing
if thresh[i] = None, no cutting will be done
"""
def check(vals1, vals2, thresh=3):
mismatch = 0
for (*atoms1, final1), (*atoms2, final2) in zip(vals1, vals2):
if atoms1 != atoms2:
mismatch += 1
continue
if thresh is None or abs(final2 - final1) > 10**-thresh:
f = '-'.join(['{:>3} {:<2}']*int(len(atoms1))) + (' = {:> 5.' + f'{thresh}' + 'f}').format(final2 - final1)
print(f.format(*collapse(atoms1)))
return mismatch
print('Bonds')
mismatch = check(self.bond_vals, other.bond_vals, thresh=thresh[0])
print(f'Bond Mismatch: {mismatch}')
#print('\nAngles')
#mistmatch = check(self.angle_vals, other.angle_vals, thresh=thresh[1])
#print(f'Angle Mismatch: {mismatch}')
def diff_metric(self, other, metric='sad'):
mismatch = 0
diff = 0
for (*atoms1, final1), (*atoms2, final2) in zip(self.bond_vals, other.bond_vals):
if atoms1 != atoms2:
mismatch += 1
continue
# Sum of absolute deviation
if metric == 'sad':
diff += abs(final2 - final1)
print(diff)
print(f'Bond Mismatch: {mismatch}')
def read(lines, sort=True):
"""
Parse the Redundant Internal Coordinates from an Orca output file
TODO: Make work for Dihedrals
"""
"""
---------------------------------------------------------------------------
Redundant Internal Coordinates
--- Optimized Parameters ---
(Angstroem and degrees)
Definition OldVal dE/dq Step FinalVal
----------------------------------------------------------------------------
1. B(O 1,C 0) 1.4658 -0.000017 0.0000 1.4658
37. A(C 4,C 0,C 17) 113.60 -0.000000 -0.00 113.60
92. L(C 21,C 22,N 23,C 24, 2) 179.99 -0.000002 0.00 179.99
97. D(C 2,O 1,C 0,C 4) -0.02 -0.000001 0.00 -0.02
"""
start = -1
for i, line in enumerate(reversed(lines)):
if line.strip() == '--- Optimized Parameters ---':
start = len(lines) - i + 4
break
if start == -1:
raise Exception('Cannot find the start of the redundant internals')
regex = r'\d+\.\s+(\w)\((\w+)\s+(\d+),(\w+)\s+(\d+)(,\w+\s+\d+)?(,\w+\s+\d+)?(,\s*\d+)?\)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(\d+\.\d+)'
bond_vals = []
angle_vals = []
linear_vals = []
dihedral_vals = []
for line in lines[start:]:
line = line.strip()
if line[0] == '-':
break
try:
t, atom1, idx1, atom2, idx2, atom_idx3, atom_idx4, other, old, slope, step, final = re.search(regex, line).groups()
except AttributeError as e:
pass
if atom_idx3:
atom3, idx3 = atom_idx3[1:].split()
if atom_idx4:
atom4, idx4 = atom_idx4[1:].split()
if other:
other = other[1:].strip()
if t == 'B':
pair1, pair2 = sorted(((int(idx1), atom1), (int(idx2), atom2)))
bond_vals.append([pair1, pair2, float(final)])
elif t == 'A':
pair1, pair2, pair3 = sorted(((int(idx1), atom1), (int(idx2), atom2), (int(idx3), atom3)))
angle_vals.append([pair1, pair2, pair3, float(final)])
elif t == 'L':
pair1, pair2, pair3, pair4 = sorted(((int(idx1), atom1), (int(idx2), atom2), (int(idx3), atom3), (int(idx4), atom4)))
linear_vals.append([pair1, pair2, pair3, pair4, other, float(final)])
elif t == 'D':
pair1, pair2, pair3, pair4 = sorted(((int(idx1), atom1), (int(idx2), atom2), (int(idx3), atom3), (int(idx4), atom4)))
dihedral_vals.append([pair1, pair2, pair3, pair4, float(final)])
else:
raise ValueError('Cannot identify coordinate type')
#bond_vals = sorted(bond_vals)
#angle_vals = sorted(angle_vals)
#linear_vals = sorted(linear_vals)
#dihedral_vals = sorted(dihedral_vals)
return bond_vals, angle_vals, linear_vals, dihedral_vals
```
#### File: tests/cfour/cfour_unittest.py
```python
import unittest
from sys import path
path.insert(0, '../..')
from qgrep import cfour
class TestCFour(unittest.TestCase):
"""Tests the cfour class"""
def setUp(self):
"""Read in the necessary files"""
files = ['h2o.out', 'h2o.xyz']
self.files = {}
for file in files:
with open(file, 'r') as f:
self.files[file] = f.readlines()
def test_get_geom(self):
"""Testing get_geom"""
h2o_xyz = cfour.get_geom(self.files['h2o.out'], geom_type='xyz')
self.assertEqual(self.files['h2o.xyz'], h2o_xyz)
def test_check_convergence(self):
"""Testing check_convergence"""
raise NotImplementedError
def test_get_energy(self):
"""Testing get_energy"""
raise NotImplementedError
def test_get_freqs(self):
"""Testing get_freqs"""
raise NotImplementedError
def test_plot(self):
"""Testing plot"""
raise NotImplementedError
def test_convert_zmatrix(self):
"""Test convert_zmatrix"""
raise NotImplementedError
def test_get_molecule(self):
"""Testing get_molecule"""
raise NotImplementedError
if __name__ == '__main__':
unittest.main()
h2o_xyz = cfour.get_geom(open('h2o.out').readlines(), geom_type='xyz')
print(h2o_xyz)
```
#### File: scripts/tests/molecule_unittest.py
```python
import os
import unittest
import numpy as np
from sys import path
from numpy.testing import assert_almost_equal
path.insert(0, '..')
from qgrep.molecule import Molecule
class TestMolecule(unittest.TestCase):
"""Tests the Molecule class"""
def setUp(self):
"""Set up for every test"""
self.water_geom = [['H', [0, 0, 0]],
['O', [0, 0, 1]],
['H', [0, 1, 1]]]
self.water = Molecule(self.water_geom)
def test_len(self):
"""Testing __len__"""
self.assertEqual(len(self.water), 3)
def test_getsetdeleqinsert(self):
"""Test getting, setting and deleting atoms"""
self.assertEqual(self.water[0][0], 'H')
assert_almost_equal(self.water[0][1], np.array([0, 0, 0]))
del self.water[1]
self.assertEqual(self.water[1][0], 'H')
assert_almost_equal(self.water[1][1], np.array([0, 1, 1]))
self.water[0] = ['H', [0, 0, 0]]
self.water[1] = ['H', [0, -1, 1]]
self.assertEqual(self.water[1][0], 'H')
assert_almost_equal(self.water[1][1], np.array([0, -1, 1]))
self.water.insert(1, 'O', [0, 0, 1])
self.assertEqual(self.water[1][0], 'O')
assert_almost_equal(self.water[1][1], np.array([0, 0, 1]))
self.assertEqual(self.water[2][0], 'H')
assert_almost_equal(self.water[2][1], np.array([0, -1, 1]))
new_water = Molecule([['H', [0, 0, 0]], ['O', [0, 0, 1]], ['H', [0, -1, 1]]])
self.assertEqual(self.water, new_water)
def test_str(self):
"""Testing __str__"""
water_string = """\
H 0.00000000 0.00000000 0.00000000
O 0.00000000 0.00000000 1.00000000
H 0.00000000 1.00000000 1.00000000"""
self.assertEqual(str(self.water), water_string)
def test_check_atom(self):
"""Test check_atom throws errors correctly"""
atom, xyz = ['H', [0, 1, 2]]
self.assertTrue(Molecule.check_atom(atom, xyz))
self.assertRaises(SyntaxError, Molecule.check_atom, [[]], 'a')
self.assertRaises(TypeError, Molecule.check_atom, [[1, 2, 3]])
self.assertRaises(SyntaxError, Molecule.check_atom, [[0, 1, 2, 3]], 'a')
self.assertRaises(SyntaxError, Molecule.check_atom, ['H', [1]], ['a', [3]])
def test_check_geom(self):
"""Test check_geom throws errors correctly"""
self.assertTrue(Molecule.check_geom(self.water_geom))
# Zero-length geometries are valid
self.assertTrue(Molecule.check_geom([]))
self.assertRaises(ValueError, Molecule.check_geom, [[[1, 2, 3]]])
self.assertRaises(TypeError, Molecule.check_geom, ['H'], [[[0, 1, 2, 3]]])
def test_read_write_geometry(self):
"""Testing read and write"""
geom_file = 'geom.xyz.tmp'
self.water.write(geom_file, True)
mol = Molecule.read_from(geom_file)
mol.name = 'H2O'
self.assertEqual(mol.geom, self.water.geom)
mol.write(geom_file, style='latex')
latex_geom = '''\
\\begin{verbatim}
H 0.000000 0.000000 0.000000
O 0.000000 0.000000 1.000000
H 0.000000 1.000000 1.000000
\\end{verbatim}'''
with open(geom_file) as f:
out_tex = f.read()
self.assertEqual(latex_geom, out_tex)
os.remove(geom_file)
def test_com(self):
""" Test the center of mass """
water_com = np.array([0, 0.05595744, 0.94404256])
assert_almost_equal(self.water.center_of_mass(), water_com)
# Translations should shift the center of mass the same amount
translation = [7, 8, 9]
self.water.xyz += translation
assert_almost_equal(self.water.center_of_mass(), water_com + translation)
def test_moi_tensor(self):
""" Test the moment of inertia tensor """
water_moi_tensor = np.array([
[ 1.9028595, 0 , 0 ],
[ 0 , 0.9514297, -0.0563953],
[ 0 , -0.0563953, 0.9514297]
])
assert_almost_equal(self.water.moment_of_inertia_tensor(), water_moi_tensor)
# Translations should not change moi tensor
self.water.xyz += [7, 8, 9]
assert_almost_equal(self.water.moment_of_inertia_tensor(), water_moi_tensor)
def test_reorder(self):
""" Test the reordering of atoms """
w1 = self.water
w2 = w1.reorder([1, 0, 2])
assert w1[0][0] == w2[1][0]
assert all(w1[0][1] == w2[1][1])
assert w1[1][0] == w2[0][0]
assert all(w1[1][1] == w2[0][1])
assert w1[2][0] == w2[2][0]
assert all(w1[2][1] == w2[2][1])
if __name__ == '__main__':
unittest.main()
```
#### File: population/orb_pop/orb_pop_unittest.py
```python
import os
import unittest
import numpy as np
from sys import path
from numpy.testing import assert_almost_equal
path.insert(0, '../../../')
from qgrep.population.orbital_pop import (AO_Contrib, Group_Contrib, MOrbital,
OrbitalPopulation as OP)
class TestOrbPop(unittest.TestCase):
def test_read_write(self):
op = OP('H2O.dat')
ao_contrib = AO_Contrib(1, 'O', 'px', 100.0)
self.assertEqual(op.orb_list[4].contributions[0], ao_contrib)
c1 = AO_Contrib(0, 'H', 's', 19.6)
c2 = AO_Contrib(1, 'O', 'pz', 30.4)
c3 = AO_Contrib(1, 'O', 'py', 30.4)
c4 = AO_Contrib(2, 'H', 's', 19.6)
contributions = [c1, c2, c3, c4]
mo = MOrbital(2, None, -0.49905, 2, contributions)
self.assertEqual(op[2], mo)
op.write('tmp.csv', 'csv')
op_dup = OP('tmp.csv')
self.assertEqual(op, op_dup)
os.remove('tmp.csv')
# Test UKS open shell
op = OP('H2O+.dat')
ao_contrib = AO_Contrib(1, 'O', 'px', 100.0, 1)
self.assertEqual(op.orb_list[3].contributions[0], ao_contrib)
def test_homo_lumo_somo(self):
op = OP('H2O.dat')
self.assertEqual(op.homo, 4)
self.assertEqual(op.lumo, 5)
self.assertEqual(op.somo, [])
def test_atom_contract(self):
op = OP('H2O.dat')
atom_contract = op.atom_contract()
self.assertEqual(atom_contract[5].contributions[0].val, 34.1)
self.assertEqual(atom_contract[8].contributions[1].val, 11.5)
def test_am_contract(self):
op = OP('H2O.dat')
am_contract = op.am_contract()
self.assertEqual(am_contract[5].contributions[0].val, 34.1)
self.assertEqual(am_contract[8].contributions[2].val, 9.8)
def test_sub(self):
# Blank - Blank == Blank
self.assertEqual(OP() - OP(), OP())
# A - Blank == A
h2o = OP('H2O.dat')
# Uses blank list of properly indexed orbitals
blank = OP(orb_list=[MOrbital(i, None, 0, 0, []) for i in range(13)])
self.assertEqual(h2o - blank, h2o)
# A - B
h2s = OP('H2S.dat')
# TODO: fix indexing problem for subtraction
#(h2o.atom_contract() - h2s.atom_contract()).write('h2o_h2s.csv')
class TestOrbital(unittest.TestCase):
def setUp(self):
self.orb1 = MOrbital()
self.orb1_dup = MOrbital()
self.aoc1 = AO_Contrib(1, 'Mn', 'px', 0.3)
self.aoc1_dup = AO_Contrib(1, 'Mn', 'px', 0.3)
self.aoc2 = AO_Contrib(2, 'O', 'f-1', 0.5)
self.aoc3 = AO_Contrib(2, 'O', 'f+2', 0.2)
self.orb2 = MOrbital(1, None, -0.34, 1.00, [self.aoc1, self.aoc2, self.aoc3])
self.orb3 = MOrbital(1, None, -0.14, 0.50, [self.aoc2, self.aoc1, self.aoc3])
def test_init(self):
self.assertTrue(self.orb1 == self.orb1_dup)
self.assertFalse(self.orb1 == self.orb2)
self.assertEqual(self.orb2.atom_sum(2), 0.7)
self.assertEqual(self.orb2.atom_sum('F'), 0.0)
self.assertEqual(self.orb2.orbital_type_sum(1, 'd'), 0)
self.assertEqual(self.orb2.orbital_type_sum(9, 'p'), 0)
self.assertEqual(self.orb2.orbital_type_sum(2, 'f'), 0.7)
def test_sub(self):
# Blank - Blank == Blank
self.assertEqual(self.orb1 - self.orb1_dup, self.orb1)
# A - Blank == A
self.assertEqual((self.orb2 - self.orb1).contributions, self.orb2.contributions)
# A - B
aoc_1_2 = self.aoc1 - self.aoc2
aoc_2_1 = self.aoc2 - self.aoc1
aoc_3_3 = self.aoc3 - self.aoc3
orb_2_3 = MOrbital(1, None, -0.20, 0.50, [aoc_1_2, aoc_2_1, aoc_3_3])
self.assertEqual((self.orb2 - self.orb3).contributions, orb_2_3.contributions)
class TestAO_Contrib(unittest.TestCase):
def setUp(self):
self.aoc1 = AO_Contrib(1, 'Mn', 'px', 0.3)
self.aoc1_dup = AO_Contrib(1, 'Mn', 'px', 0.3)
self.aoc2 = AO_Contrib(2, 'O', 'f-1', 0.5)
self.aoc3 = AO_Contrib(2, 'O', 'f+2', 0.2)
def test_init(self):
self.assertTrue(self.aoc1 == self.aoc1_dup)
self.assertFalse(self.aoc1 == self.aoc2)
def test_sub(self):
# A - A = O contribution
aoc0 = AO_Contrib(1, 'Mn', 'px', 0.0)
self.assertEqual(self.aoc1 - self.aoc1_dup, aoc0)
aoc_1_2 = AO_Contrib(0, '', '', -0.2)
self.assertEqual(self.aoc1 - self.aoc2, aoc_1_2)
class TestGroup_Contrib(unittest.TestCase):
def test_Group_Contrib(self):
group_contrib1 = Group_Contrib(1, ['Mn', 'Br'], 0.3)
group_contrib1_dup = Group_Contrib(1, ['Mn', 'Br'], 0.3)
group_contrib2 = Group_Contrib(2, [], 0.5)
group_contrib3 = Group_Contrib(2, ['Mn', 'Cs', 'Mo'], 0.2)
self.assertTrue(group_contrib1 == group_contrib1_dup)
self.assertFalse(group_contrib1 == group_contrib2)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jevandezande/spectra",
"score": 2
}
|
#### File: spectra/spectra/reaction_kinetics.py
```python
from __future__ import annotations
from datetime import datetime
from glob import glob
from itertools import zip_longest
from pathlib import Path
from typing import Iterable, Optional, Sequence
import matplotlib.pyplot as plt
import numpy as np
from .conv_spectrum import ConvSpectrum
from .plot import plotter, subplots
from .progress import plot_spectra_progress
from .tools import cull
def plot_reaction_kinetics( # noqa: C901
reactions: Sequence[str],
folder: str,
names: Optional[Sequence] = None,
title: str = "",
verbose: bool = False,
rounds: Iterable[int] | str = "all",
colors: Optional[Sequence[str]] = None,
linestyles: Optional[Iterable] = None,
combo_plot: str | bool = True,
spectra_norms: Optional[Iterable] = None,
spectra_smooth: int | bool = False,
spectra_plot: bool = True,
spectra_cull_number: int = 8,
spectra_style: str = "IR",
spectra_xlim: Optional[tuple[float, float]] = None,
spectra_xticks: Optional[tuple[float, float]] = None,
spectra_xlabel: Optional[str] = None,
spectra_ylabel: Optional[str] = None,
kinetics_norms: Iterable | str | bool = True,
kinetics_smooth: int | bool = False,
kinetics_xmax: float = 60,
kinetics_x_units: str = "minutes",
kinetics_ylim: Optional[tuple[float, float]] = None,
kinetics_dot_colors: Optional[str] = None,
baseline_region: tuple[float, float] = (2500, 2600),
integration_x_points: tuple[float, float] = (2100, 2400),
savefig: Optional[str] = None,
):
"""
Plot a graph of the reaction kinetics for multiple reactions.
Note: the returned axes object is not squeezed.
:param reactions: Names of the reactions (correspond to the folder)
:param folder: location of the reaction folders
:param names: Names for the reactions, if `None`, defaults to `reactions`
:param title: title of the plot
:param verbose: print the reactions name and dots for each round of the reaction.
:param rounds: list of rounds to display, or 'all'
:param colors: colors for the reactions
:param linestyles: linestyles for the rounds
:param combo_plot: plot all on a final plot, {True, False, 'only'}
:param spectra_norms: arguments for spectra.normed() to run on all spectra
:param spectra_*: parameters for kinetics plot
:param kinetics_norms: if true, normalize start to 1. If list, normalize by values.
:param kinetics_*: parameters for kinetics plot
:param baseline_region: region for baseline correction of spectra
:param integration_x_points: x_points to integrate over for reaction kinetics
:param savefig: (where to) save the figure
:return: fig, axes
"""
if names is None:
names = reactions
if colors is None:
colors = [f"C{i}" for i in range(len(reactions))]
elif len(colors) != len(reactions):
raise ValueError(f"len(colors)={len(colors)} != len(reactions)={len(reactions)}")
if linestyles is None:
linestyles = ("-", "--", ":", "-.", (0, (4, 1, 1, 1, 1, 1)))
if not isinstance(rounds, str):
ls_iter = iter(linestyles)
linestyles = [next(ls_iter) if i + 1 in rounds else None for i in range(max(rounds))]
# Setup figures
height = len(reactions) + int(combo_plot) if combo_plot != "only" else 1
width = 1 + int(spectra_plot)
fig, axes = subplots(spectra_style, height, width, figsize=(10, 6))
axes1, axes2 = axes.T if spectra_plot else ([None] * len(axes), axes.T[0])
time_divisor = {
"seconds": 1,
"minutes": 60,
"hours": 60 * 60,
"days": 60 * 60 * 24,
}[kinetics_x_units]
if not isinstance(kinetics_norms, Iterable):
kinetics_norms = [kinetics_norms] * len(reactions)
reaction_iterator = zip_longest(
reactions,
names,
kinetics_norms,
colors,
axes1[: len(reactions)],
axes2[: len(reactions)],
)
for reaction, name, kinetics_norm, color, ax1, ax2 in reaction_iterator:
if verbose:
print(reaction, end=" ")
half_lives = []
if combo_plot != "only":
plt.text(
0.5,
0.5,
name,
horizontalalignment="center",
verticalalignment="center",
transform=ax2.transAxes,
)
for i, linestyle in enumerate(linestyles, start=1):
if rounds != "all" and i not in rounds:
continue
# Read in spectra
inputs = tuple(glob(f"{folder}/{reaction}/Round {i}/*.CSV"))
if not inputs:
break
if verbose:
print(".", end="")
# Get times and names from timestamps in input name
# e.g. 'Mon Sep 09 10-26-50 2019 (GMT-04-00).CSV'
strp = lambda x: datetime.strptime(x, "%a %b %d %H-%M-%S %Y")
times_iter = (strp(inp.split("/")[-1].split(" (")[0]) for inp in inputs)
# Sort the inputs by the timestamps
timestamps, inputs = zip(*sorted(zip(times_iter, inputs)))
times = [(time - timestamps[0]).total_seconds() / time_divisor for time in timestamps]
spectra: list[ConvSpectrum] = []
for time, inp in zip(times, inputs):
s, *others = ConvSpectrum.from_csvs(inp)
assert isinstance(s, ConvSpectrum)
if len(others) != 0:
raise ValueError(f"Multiple spectra in a CSV is not supported. File={inp}")
if spectra_smooth:
s = s.smoothed(spectra_smooth)
if baseline_region:
s = s.set_zero(*baseline_region)
if spectra_norms:
s = s.normed(*spectra_norms)
s.name = f"{name} {time:.1f} {kinetics_x_units}"
s.time = time
assert isinstance(s, ConvSpectrum)
spectra.append(s)
if i == 1 and spectra_plot:
# Only plot a subset of the spectra to avoid cluttering the figure
if len(spectra) < spectra_cull_number:
to_plot = spectra
else:
to_plot = list(cull(spectra, spectra_cull_number))
if combo_plot != "only":
plotter(
to_plot,
baseline_subtracted=False,
normalized=False,
title=None,
plot=(fig, ax1),
legend=False,
smoothed=False,
style=spectra_style,
xlim=None,
xticks=None,
colors=None,
markers=None,
)
# Plot result on last graph
if combo_plot:
plotter(
[spectra[-1]],
baseline_subtracted=False,
normalized=False,
title=None,
plot=(fig, axes[-1][0]),
legend=False,
smoothed=False,
style=spectra_style,
xlim=None,
xticks=None,
colors=None,
markers=None,
)
# Plot progress
half_life = None
if combo_plot != "only":
_, half_life, _, _ = plot_spectra_progress(
spectra,
times,
integration_x_points,
x_units=kinetics_x_units,
plot=(fig, ax2),
label=f"{i}",
color=color,
linestyle=linestyle,
smooth=kinetics_smooth,
norm=kinetics_norm,
dot_colors=kinetics_dot_colors,
)
if combo_plot:
_, half_life, _, _ = plot_spectra_progress(
spectra,
times,
integration_x_points,
x_units=kinetics_x_units,
plot=(fig, axes[-1][-1]),
label=f"{name} - {i}",
color=color,
linestyle=linestyle,
smooth=kinetics_smooth,
norm=kinetics_norm,
dot_colors=kinetics_dot_colors,
)
if half_life is not None:
half_lives.append(half_life)
# TODO: Perhaps 1/(Σ 1/half_life) ???
half_life = float(np.average(half_lives))
if half_life > 0 and combo_plot != "only":
plt.text(
0.5,
0.8,
f"$t_{{1/2}} = {int(round(half_life))}$ {kinetics_x_units}",
horizontalalignment="center",
verticalalignment="center",
transform=ax2.transAxes,
)
if verbose:
print(f" t_1/2={half_life:6.2f} {kinetics_x_units}")
# Setup axes
for i, (ax1, ax2) in enumerate(zip(axes1, axes2)):
ax2.legend()
if spectra_plot:
ax2.yaxis.set_label_position("right")
ax2.yaxis.set_ticks_position("right")
ax2.set_xlim(0, kinetics_xmax)
ax2.set_ylim(0, kinetics_ylim)
if combo_plot:
ax2.legend([plt.Line2D([0, 1], [0, 0], color=color) for color in colors], names)
if title:
fig.suptitle(title)
if savefig is True:
Path("plots").mkdir(exist_ok=True)
fig.savefig(f"plots/{title}.svg")
elif savefig:
fig.savefig(savefig)
return fig, axes
```
#### File: spectra/spectra/tools.py
```python
from __future__ import annotations
import csv
import itertools
from glob import glob
from typing import TYPE_CHECKING, Generator, Iterable, Optional, Sequence
import numpy as np
from numpy.typing import ArrayLike
from scipy import constants
if TYPE_CHECKING:
from ._abc_spectrum import Spectrum
def read_csv(inp: str, header: bool = True) -> tuple[list[str], np.ndarray, np.ndarray]:
"""
Reads a CSV file.
:param inp: input file
:param header: inp contains a header
:return:
:titles: titles of the columns
:xs: x-values (1- or 2-dim np.ndarray)
:ys: y-values (1- or 2-dim np.ndarray, matches x)
"""
try:
with open(inp) as f:
reader = csv.reader(f)
titles = next(reader) if header else None
xs, ys = [], []
for x, *y in reader:
xs.append(float(x))
ys.append([float(y_val) for y_val in y])
except ValueError as e:
raise ValueError(f"Error reading value in {inp}.") from e
xs_array = np.array(xs)
ys_array = np.array(ys).T
if titles is None:
titles = [""] * len(xs)
return titles, xs_array, ys_array
def read_csvs(inps: Iterable[str] | str, header: bool = True) -> tuple[list[str], np.ndarray, np.ndarray]:
"""
Read CSV(s)
:param inps: input file(s) to read
:param header: inp contains a header
:return: titles, xs, ys
"""
titles: list[str] = []
if isinstance(inps, str):
titles, xs, ys = read_csv(inps, header)
titles = titles[1:]
xs = np.ones(ys.shape) * xs
else:
xs_list, ys_list = [], []
for inp in inps:
ts, xs, ys = read_csv(inp, header)
xs = np.ones(ys.shape) * xs
titles.extend(ts[1:])
if ys.shape[1] == 1:
xs_list.append(xs)
ys_list.append(ys)
else:
for x_vals, y_vals in zip(xs, ys):
xs_list.append(x_vals)
ys_list.append(y_vals)
xs = np.array(xs_list)
ys = np.array(ys_list)
# Sanity checks
assert len(xs) == len(ys)
assert len(ys) == len(titles)
return titles, xs, ys
def glob_read_csvs(
inps: Iterable[str] | str, header: bool = True
) -> tuple[list[str], np.ndarray, np.ndarray, list[str]]:
"""
Use glob to find CSV(s) and then reads them.
:param inps: a string or list of strings that can be read by glob
:param header: inp contains a header
:return: titles, xs, ys, file_names
"""
if isinstance(inps, str):
inps = [inps]
file_names = list(itertools.chain(*(glob(inp) for inp in inps)))
titles, xs, ys = read_csvs(file_names)
return titles, np.array(xs), np.array(ys), file_names
def y_at_x(x_points: Iterable[float] | float, xs: ArrayLike, ys: ArrayLike) -> np.ndarray | float:
"""
Determine the y-value at a specified x. If in between xs, choose the first
past it. Assumes xs are ordered.
:param x_points: x-value(s) for which the y-value is desired
:param xs: x-values
:param ys: y-values
:return: desired y-value
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if len(xs) != len(ys):
raise ValueError(f"Mismatched lengths: {len(xs)=} and {len(ys)=}")
return ys[index_of_x(x_points, xs)]
def index_of_x(x_points: Iterable[float] | float, xs: np.ndarray) -> np.ndarray | int:
"""
Determine the index of value(s) in an ordered list. If in between xs,
choose the first past it (larger). Assumes xs are ordered.
:param x_points: value(s) to find
:param xs: list to search in
:return: index of the nearest x_point
"""
# If in reverse order
revd = xs[0] > xs[-1]
if revd:
xs = xs[::-1]
x_iter = x_points if isinstance(x_points, Iterable) else [x_points]
for x in x_iter:
if x < xs[0] or x > xs[-1]:
raise IndexError(f"x_points not in xs, x_points: {x}, xs: ({xs[0]}→{xs[-1]})")
return np.searchsorted(xs, x_points) if not revd else len(xs) - np.searchsorted(xs, x_points) - 1 # type: ignore
def integrate(xs: np.ndarray, ys: np.ndarray, x_range: Optional[tuple[float, float]] = None) -> float:
"""
Integrate a set of ys on the xs.
Note: if x_range does not fall exactly on values in x, it finds the next largest x value.
:param xs: x-values
:param ys: y-values
:param x_range: range of x_values to integrate over
:return: integration
"""
if len(xs) != len(ys):
raise ValueError(f"xs and ys must be of the same length, got: {len(xs)} and {len(ys)}")
if x_range is not None:
begin, end = x_range
if begin < xs[0]:
raise IndexError(f"x_range starts before first value in xs ({begin} > {xs[0]}")
start = index_of_x(begin, xs)
finish = index_of_x(end, xs)
if TYPE_CHECKING:
assert isinstance(start, int)
assert isinstance(finish, int)
xs = xs[start : finish + 1]
ys = ys[start : finish + 1]
return np.trapz(ys, xs)
def smooth_curve(ys: Sequence[float] | np.ndarray, box_pts: int | bool = True) -> np.ndarray:
"""
Smooth a curve.
Assumes that the ys are uniformly distributed. Returns output of length
`max(ys, box_pts)`, boundary effects are visible.
Note: ys must be > box_pts
:param ys: points to smooth
:param box_pts: number of data points to convolve, if True, use 3
:return: smoothed points
"""
if box_pts is True:
box_pts = 3
box = np.ones(box_pts) / box_pts
return np.convolve(ys, box, mode="same")
def cull(vals: Sequence, n: int) -> Generator:
"""
Cull `vals` to have `n` "evenly" spaced values.
If not evenly divisible, spread them out as evenly as possible.
:var vals: the values to cull
:var n: number of values to keep
:yield: culled values
"""
yield from (vals[i] for i in np.linspace(0.5, len(vals) - 0.5, n, dtype=int))
def boltzmann_factors(energies: Sequence[float], T: float = 300) -> np.ndarray:
"""
Compute the Boltzmann factors.
:param energies: energies in Hartree with which to generate weights
:param T: temperature, defaults to 300
"""
if T <= 0:
raise ZeroDivisionError(f"T must be greater than 0, got: {T=}")
kBT = constants.k * T / constants.physical_constants["Hartree energy"][0]
zeroed_energies = np.asarray(energies) - min(energies)
factors = np.exp(-zeroed_energies / kBT)
return factors / factors.sum()
def boltzmann_weighted(
spectra: Sequence[Spectrum], energies: Sequence[float], T: float = 300, rename: bool | str = False
) -> Spectrum:
"""
Combine spectra via Boltzmann weighting.
:param spectra: spectra to combine
:param energies: energies of the spectra
:param T: temperature for weighting, defaults to room temperature
:param rename: rename the resulting spectrum
"""
assert len(spectra) > 0
assert len(spectra) == len(energies)
spectrum = sum(s * f for s, f in zip(spectra, boltzmann_factors(energies, T)))
if TYPE_CHECKING:
assert isinstance(spectrum, Spectrum)
if rename:
spectrum.name = "Boltzmann Spectrum" if isinstance(rename, bool) else rename
return spectrum
```
#### File: spectra/tests/test_sticks_spectrum.py
```python
import numpy as np
import pytest
from numpy.testing import assert_almost_equal as aae
from spectra import SticksSpectrum
def setup():
pass
def teardown():
pass
def test_init():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities, units="ms", style="IR", y_shift=-5, time=9)
aae(s1.energies, energies)
aae(s1.intensities, intensities)
assert s1.units == "ms"
assert s1.style == "IR"
assert s1.y_shift == -5
assert s1.time == 9
def test_iter():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert all(e == i for e, i in s1)
def test_eq():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s2 = SticksSpectrum("S1", energies, intensities)
s3 = SticksSpectrum("S1", energies, intensities, style="MS")
s4 = SticksSpectrum("S4", energies, intensities)
s5 = SticksSpectrum("S5", energies, intensities, y_shift=6)
assert s1 == s2
assert s1 != s3
assert s1 != s4
assert s1 != s5
def test_len():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s2 = SticksSpectrum("S1", energies, intensities)
assert len(s1) == len(energies)
assert len(s2) == len(energies)
def test_str():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert str(s1) == "<SticksSpectrum: Hello World>"
def test_add_sub():
energies1, intensities1 = np.arange(10), np.arange(10)
energies2, intensities2 = np.arange(20), np.arange(20)
s1 = SticksSpectrum("Hello World", energies1, intensities1)
s1 + s1
s2 = 1 + s1
s3 = s2 - 1
s4 = 1 - s3
s5 = s1 - s1
s6 = s1 - s2
s7 = SticksSpectrum("Hello Big World", energies2, intensities2)
s1 + s7
s1 - s7
s = s1.copy()
s.energies += 1
s + s1
s - s1
assert s1.name == "Hello World"
assert s2.name == "Hello World + 1"
assert s3.name == "Hello World + 1 – 1"
assert s4.name == "1 – Hello World + 1 – 1"
assert s5.name == "Hello World – Hello World"
assert s6.name == "Hello World – Hello World + 1"
aae(s1.energies, s2.energies)
aae(s1.energies, s3.energies)
aae(s1.energies, s4.energies)
aae(s3.intensities, s1.intensities)
def test_abs():
energies, intensities1, intensities2 = np.arange(10), np.arange(10), np.arange(10)
intensities2[5:] = -intensities2[5:]
s1 = SticksSpectrum("S1", energies, intensities1)
s2 = SticksSpectrum("S2", energies, intensities2)
assert s1 != s2
assert any(s1.intensities != s2.intensities)
aae(s1.intensities, abs(s2).intensities)
def test_mul():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s1 * s1
def test_div():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
div = s1 / s1
aae(div.energies, range(10))
aae(div.intensities, [np.nan] + [1] * 9)
def test_copy():
energies, intensities = np.arange(1, 11), np.arange(1, 11)
s1 = SticksSpectrum("Hello World", energies, intensities)
s2 = s1.copy()
assert s1 == s2
assert id(s1) != id(s2)
def test_domain():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert s1.domain == (0, 9)
@pytest.mark.xfail(raises=NotImplementedError)
def test_smoothed():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.smoothed()
def test_baseline_subtracted():
energies, intensities = np.arange(1, 11), np.arange(1, 11)
s1 = SticksSpectrum("Hello World", energies, intensities)
s2 = s1.baseline_subtracted()
s3 = s1.baseline_subtracted(9)
aae(s1.intensities - 1, s2.intensities)
aae(s1.intensities - 9, s3.intensities)
@pytest.mark.xfail(raises=NotImplementedError)
def test_set_zero():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.set_zero(99)
def test_sliced():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.sliced()
def test_from_csvs(tmp_path):
test_csv = f"{tmp_path}/test.csv"
with open(test_csv, "w") as f:
f.write("x,A,B\n0,2,4\n1,3,5")
SticksSpectrum.from_csvs(test_csv)
SticksSpectrum.from_csvs("tests/files/xrd.csv")
@pytest.mark.xfail(raises=NotImplementedError)
def test_norm():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.norm()
def test_normed():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.normed()
@pytest.mark.xfail(raises=NotImplementedError)
def test_peaks():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.peaks()
def test_min_max():
s1 = SticksSpectrum.from_csvs("tests/files/spectrum1.csv")[0]
assert min(s1) == (5, 0)
assert max(s1) == (25, 0)
assert s1.min == (16, -10)
assert s1.max == (13, 21)
@pytest.mark.xfail(raises=NotImplementedError)
def test_correlation():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.correlation(s1)
def test_convert():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.convert(2, npoints=100)
s1.convert(2, npoints=100, energy_lim=(-5, 50))
```
|
{
"source": "jevenzh/NeMo",
"score": 2
}
|
#### File: examples/asr/jasper_eval.py
```python
import argparse
import copy
import os
import pickle
import numpy as np
from ruamel.yaml import YAML
import nemo
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.helpers import post_process_predictions, post_process_transcripts, word_error_rate
from nemo.utils import logging
def main():
parser = argparse.ArgumentParser(description='Jasper')
# model params
parser.add_argument("--model_config", type=str, required=True)
parser.add_argument("--eval_datasets", type=str, required=True)
parser.add_argument("--load_dir", type=str, required=True)
# run params
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--amp_opt_level", default="O1", type=str)
# store results
parser.add_argument("--save_logprob", default=None, type=str)
# lm inference parameters
parser.add_argument("--lm_path", default=None, type=str)
parser.add_argument('--alpha', default=2.0, type=float, help='value of LM weight', required=False)
parser.add_argument(
'--alpha_max',
type=float,
help='maximum value of LM weight (for a grid search in \'eval\' mode)',
required=False,
)
parser.add_argument(
'--alpha_step', type=float, help='step for LM weight\'s tuning in \'eval\' mode', required=False, default=0.1
)
parser.add_argument('--beta', default=1.5, type=float, help='value of word count weight', required=False)
parser.add_argument(
'--beta_max',
type=float,
help='maximum value of word count weight (for a grid search in \
\'eval\' mode',
required=False,
)
parser.add_argument(
'--beta_step',
type=float,
help='step for word count weight\'s tuning in \'eval\' mode',
required=False,
default=0.1,
)
parser.add_argument("--beam_width", default=128, type=int)
args = parser.parse_args()
batch_size = args.batch_size
load_dir = args.load_dir
if args.local_rank is not None:
if args.lm_path:
raise NotImplementedError(
"Beam search decoder with LM does not currently support evaluation on multi-gpu."
)
device = nemo.core.DeviceType.AllGpu
else:
device = nemo.core.DeviceType.GPU
# Instantiate Neural Factory with supported backend
neural_factory = nemo.core.NeuralModuleFactory(
backend=nemo.core.Backend.PyTorch,
local_rank=args.local_rank,
optimization_level=args.amp_opt_level,
placement=device,
)
if args.local_rank is not None:
logging.info('Doing ALL GPU')
yaml = YAML(typ="safe")
with open(args.model_config) as f:
jasper_params = yaml.load(f)
vocab = jasper_params['labels']
sample_rate = jasper_params['sample_rate']
eval_datasets = args.eval_datasets
eval_dl_params = copy.deepcopy(jasper_params["AudioToTextDataLayer"])
eval_dl_params.update(jasper_params["AudioToTextDataLayer"]["eval"])
del eval_dl_params["train"]
del eval_dl_params["eval"]
data_layer = nemo_asr.AudioToTextDataLayer(
manifest_filepath=eval_datasets,
sample_rate=sample_rate,
labels=vocab,
batch_size=batch_size,
**eval_dl_params,
)
N = len(data_layer)
logging.info('Evaluating {0} examples'.format(N))
data_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor(
sample_rate=sample_rate, **jasper_params["AudioToMelSpectrogramPreprocessor"]
)
jasper_encoder = nemo_asr.JasperEncoder(
feat_in=jasper_params["AudioToMelSpectrogramPreprocessor"]["features"], **jasper_params["JasperEncoder"]
)
jasper_decoder = nemo_asr.JasperDecoderForCTC(
feat_in=jasper_params["JasperEncoder"]["jasper"][-1]["filters"], num_classes=len(vocab)
)
greedy_decoder = nemo_asr.GreedyCTCDecoder()
logging.info('================================')
logging.info(f"Number of parameters in encoder: {jasper_encoder.num_weights}")
logging.info(f"Number of parameters in decoder: {jasper_decoder.num_weights}")
logging.info(f"Total number of parameters in model: " f"{jasper_decoder.num_weights + jasper_encoder.num_weights}")
logging.info('================================')
# Define inference DAG
audio_signal_e1, a_sig_length_e1, transcript_e1, transcript_len_e1 = data_layer()
processed_signal_e1, p_length_e1 = data_preprocessor(input_signal=audio_signal_e1, length=a_sig_length_e1)
encoded_e1, encoded_len_e1 = jasper_encoder(audio_signal=processed_signal_e1, length=p_length_e1)
log_probs_e1 = jasper_decoder(encoder_output=encoded_e1)
predictions_e1 = greedy_decoder(log_probs=log_probs_e1)
eval_tensors = [log_probs_e1, predictions_e1, transcript_e1, transcript_len_e1, encoded_len_e1]
# inference
evaluated_tensors = neural_factory.infer(tensors=eval_tensors, checkpoint_dir=load_dir)
greedy_hypotheses = post_process_predictions(evaluated_tensors[1], vocab)
references = post_process_transcripts(evaluated_tensors[2], evaluated_tensors[3], vocab)
wer = word_error_rate(hypotheses=greedy_hypotheses, references=references)
logging.info("Greedy WER {:.2f}%".format(wer * 100))
# Convert logits to list of numpy arrays
logprob = []
for i, batch in enumerate(evaluated_tensors[0]):
for j in range(batch.shape[0]):
logprob.append(batch[j][: evaluated_tensors[4][i][j], :].cpu().numpy())
if args.save_logprob:
with open(args.save_logprob, 'wb') as f:
pickle.dump(logprob, f, protocol=pickle.HIGHEST_PROTOCOL)
# language model
if args.lm_path:
if args.alpha_max is None:
args.alpha_max = args.alpha
# include alpha_max in tuning range
args.alpha_max += args.alpha_step / 10.0
if args.beta_max is None:
args.beta_max = args.beta
# include beta_max in tuning range
args.beta_max += args.beta_step / 10.0
beam_wers = []
logprobexp = [np.exp(p) for p in logprob]
for alpha in np.arange(args.alpha, args.alpha_max, args.alpha_step):
for beta in np.arange(args.beta, args.beta_max, args.beta_step):
logging.info('================================')
logging.info(f'Infering with (alpha, beta): ({alpha}, {beta})')
beam_search_with_lm = nemo_asr.BeamSearchDecoderWithLM(
vocab=vocab,
beam_width=args.beam_width,
alpha=alpha,
beta=beta,
lm_path=args.lm_path,
num_cpus=max(os.cpu_count(), 1),
input_tensor=False,
)
beam_predictions = beam_search_with_lm(log_probs=logprobexp, log_probs_length=None, force_pt=True)
beam_predictions = [b[0][1] for b in beam_predictions[0]]
lm_wer = word_error_rate(hypotheses=beam_predictions, references=references)
logging.info("Beam WER {:.2f}%".format(lm_wer * 100))
beam_wers.append(((alpha, beta), lm_wer * 100))
logging.info('Beam WER for (alpha, beta)')
logging.info('================================')
logging.info('\n' + '\n'.join([str(e) for e in beam_wers]))
logging.info('================================')
best_beam_wer = min(beam_wers, key=lambda x: x[1])
logging.info('Best (alpha, beta): ' f'{best_beam_wer[0]}, ' f'WER: {best_beam_wer[1]:.2f}%')
if __name__ == "__main__":
main()
```
#### File: nemo/core/neural_modules.py
```python
__all__ = ['WeightShareTransform', 'NeuralModule', 'ModuleType']
import uuid
from abc import abstractmethod
from collections import namedtuple
from enum import Enum
from inspect import getargvalues, getfullargspec, stack
from os import path
from typing import Any, Dict, List, Optional, Set, Tuple
from ruamel.yaml import YAML
from nemo.core import NeuralModuleFactory, OperationMode
from nemo.core.neural_interface import NeuralInterface
from nemo.core.neural_types import NeuralPortNameMismatchError, NeuralType, NmTensor
from nemo.package_info import __version__ as nemo_version
from nemo.utils import logging
from nemo.utils.decorators.deprecated import deprecated
from nemo.utils.neural_graph.connection import StepModulePort
YAML = YAML(typ='safe')
class ModuleType(Enum):
""" Back-end independent module types """
module = 0
datalayer = 1
trainable = 2
loss = 3
nontrainable = 4
class WeightShareTransform(Enum):
"""When sharing parameters, what kind of transform to apply."""
SAME = 0
TRANSPOSE = 1
PretrainedModelInfo = namedtuple(
"PretrainedModleInfo", ("pretrained_model_name", "description", "parameters", "location"),
)
class NeuralModule(NeuralInterface):
"""
Abstract class that every Neural Module must inherit from.
"""
def __init__(self, name=None):
# Initialize the inferface.
super().__init__()
# Retrieve dictionary of parameters (keys, values) passed to init.
self._init_params = self.__extract_init_params()
# Get object UUID.
self._uuid = str(uuid.uuid4())
# Register module and store the generated name.
self._name = self._app_state.register_module(self, name)
# Set "module" type as default.
self._type = ModuleType.module
# Set "both" as default operation mode.
self._operation_mode = OperationMode.both
# Get default factory.
self._factory = NeuralModuleFactory.get_default_factory()
# Set module properties from factory else use defaults
self._placement = self._factory.placement
# If one needs to change that should override it manually.
# Optimization level.
self._opt_level = self._factory.optim_level
@property
def init_params(self) -> Dict[str, Any]:
"""
Property returning parameters used to instantiate the module.
Returns:
Dictionary containing parameters used to instantiate the module.
"""
return self._init_params
def __extract_init_params(self) -> Dict[str, Any]:
"""
Retrieves the dictionary of of parameters (keys, values) passed to constructor of a class derived
(also indirectly) from the Neural Module class.
Returns:
Dictionary containing parameters passed to init().
"""
# Get names of arguments of the original module init method.
to_set_params = getfullargspec(type(self).__init__).args
to_set_params.remove("self")
# Create empty list of init params.
init_params = {}
# Get the frame "call context".
for frame in stack()[1:]:
# Get the call arguments.
localvars = getargvalues(frame[0])
# Fill the parameters with call_args.
for key in to_set_params:
if key in localvars.args:
init_params[key] = localvars.locals[key]
# Remove all set keys.
for key in init_params.keys():
if key in to_set_params:
to_set_params.remove(key)
# Check if we have set everything.
if len(to_set_params) == 0:
break
# Make sure that we collected ALL (and ONLY) the signature params - if not, then there is a BUG!
if len(to_set_params) != 0:
raise ValueError(
"Could not collect all the signature params! "
F"Please file a bug on GitHub with the current stacktrace so that it can be resolved."
)
# print("! init_params of {}: {}\n".format(type(self).__name__, init_params))
# Return parameters.
return init_params
def __validate_params(self, params: Dict[str, Any]) -> bool:
"""
Checks whether dictionary contains parameters being primitive types (string, int, float etc.)
or (lists of)+ primitive types.
Args:
params: dictionary of parameters.
Returns:
True if all parameters were ok, False otherwise.
"""
ok = True
# Iterate over parameters and check them one by one.
for key, variable in params.items():
if not self.__is_of_allowed_type(variable):
logging.warning(
"Parameter '{}' contains a variable '{}' of type '{}' which is not allowed.".format(
key, variable, type(variable)
)
)
ok = False
# Return the result.
return ok
def __is_of_allowed_type(self, var) -> bool:
"""
A recursive function that checks if a given variable is of allowed type.
Args:
pretrained_model_name (str): name of pretrained model to use in order.
Returns:
True if all parameters were ok, False otherwise.
"""
# Special case: None is also allowed.
if var is None:
return True
var_type = type(var)
# If this is list - check its elements.
if var_type == list:
for list_var in var:
if not self.__is_of_allowed_type(list_var):
return False
# If this is dict - check its elements.
elif var_type == dict:
for _, dict_var in var.items():
if not self.__is_of_allowed_type(dict_var):
return False
elif var_type not in (str, int, float, bool):
return False
# Well, seems that everything is ok.
return True
def export_to_config(self, config_file: str):
"""
A function that exports module "configuration" (i.e. init parameters) to a YAML file.
Args:
config_file: path (absolute or relative) and name of the config file (YML)
Raises:
ValueError: An error occurred and parameters coudn't be exported.
"""
# Greate an absolute path.
abs_path_file = path.expanduser(config_file)
# Serialize the module.
to_export = self.serialize()
# All parameters are ok, let's export.
with open(abs_path_file, 'w') as outfile:
YAML.dump(to_export, outfile)
logging.info(
"Configuration of module `{}` ({}) exported to {}".format(self.name, type(self).__name__, abs_path_file)
)
def serialize(self) -> Dict[str, Any]:
"""
A method serializing the whole Neural module (into a dictionary).
Returns:
Dictionary containing a "serialized" module.
"""
# Create a dictionary representing the serialized object.
serialized_module = {}
# Add "header" with module "specification".
serialized_module["header"] = self.__serialize_header()
# Add init parameters.
serialized_module["init_params"] = self._serialize_configuration()
# Return the dictionary.
return serialized_module
def __serialize_header(self) -> Dict[str, Any]:
"""
A protected method that creates a header stored later in the configuration file.
Returns:
Dictionary containing a header with module specification.
"""
# Get module "full specification".
module_full_spec = str(self.__module__) + "." + str(self.__class__.__qualname__)
module_class_name = type(self).__name__
# print(module_full_spec)
# Check whether module belongs to a collection.
spec_list = module_full_spec.split(".")
# Do not check Neural Modules from unit tests.
if spec_list[0] == "tests":
# Set collection variables.
collection_type = "tests"
collection_version = None
else:
# Check if component belongs to any collection
if len(spec_list) < 3 or (spec_list[0] != "nemo" and spec_list[1] != "collection"):
logging.warning(
"Module `{}` does not belong to any collection. This won't be allowed in the next release.".format(
module_class_name
)
)
collection_type = "unknown"
collection_version = None
else:
# Ok, set collection.
collection_type = spec_list[2]
collection_version = None
# TODO: to be SET!
# print(getattr("nemo.collections.nlp", __version__))
# Create a "header" with module "specification".
header = {
"nemo_core_version": nemo_version,
"collection_type": collection_type,
"collection_version": collection_version,
# "class": module_class_name, # Operating only on full_spec now.
"full_spec": module_full_spec,
}
return header
def _serialize_configuration(self) -> Dict[str, Any]:
"""
A function that serializes the module "configuration (i.e. init parameters) to a dictionary.
..note:
Thus functions should be overloaded when writing a custom module import/export.
Returns:
A "serialized" dictionary with module configuration.
Raises:
A ValueError exception in case then parameters coudn't be exported.
"""
# Check if generic export will work.
if not self.__validate_params(self._init_params):
raise ValueError(
"Generic configuration export enables to use of parameters of primitive types (string, int, float) "
F"or (lists of/dicts of) primitive types. Please implement your own custom `export_to_config()` and "
F"`import_from_config()` methods for your custom Module class."
)
# In this case configuration = init parameters.
return self._init_params
@classmethod
def import_from_config(
cls, config_file: str, section_name: str = None, name: str = None, overwrite_params: Dict = {}
) -> 'NeuralModule':
"""
Class method importing the configuration file.
Raises an ImportError exception when config file is invalid or
incompatible (when called from a particular class).
Args:
config_file: path (absolute or relative) and name of the config file (YML)
section_name: section in the configuration file storing module configuration (optional, DEFAULT: None)
name: name of the module that will overwrite the name in the `init_params` (optional, DEFAULT: None)
overwrite_params: Dictionary containing parameters that will be added to or overwrite (!)
the default init parameters loaded from the configuration file (the module "init_params" section).
Returns:
Instance of the created NeuralModule object.
"""
logging.info("Loading configuration of a new Neural Module from the `{}` file".format(config_file))
# Validate the content of the configuration file (its header).
loaded_config = cls.__validate_config_file(config_file, section_name)
# "Deserialize" the module.
obj = cls.deserialize(loaded_config, name, overwrite_params)
# Return the new module.
return obj
@classmethod
def __validate_config_file(cls, config_file: str, section_name: str = None) -> Dict[str, Any]:
"""
Class method validating whether the config file has a proper content (sections, specification etc.).
Raises an ImportError exception when config file is invalid or
incompatible (when called from a particular class).
Args:
config_file: path (absolute or relative) and name of the config file (YML)
section_name: section in the configuration file storing module configuration (optional, DEFAULT: None)
Returns:
A loaded configuration file (dictionary).
"""
# Greate an absolute path.
abs_path_file = path.expanduser(config_file)
# Open the config file.
with open(abs_path_file, 'r') as stream:
loaded_config = YAML.load(stream)
# Check section.
if section_name is not None:
if section_name not in loaded_config:
raise ImportError(
"The loaded config `{}` doesn't contain the indicated `{}` section".format(
config_file, section_name
)
)
# Section exists - use only it for configuration.
loaded_config = loaded_config[section_name]
# Make sure that the config is valid.
if "header" not in loaded_config:
raise ImportError("The loaded config `{}` doesn't contain the `header` section".format(config_file))
if "init_params" not in loaded_config:
raise ImportError("The loaded config `{}` doesn't contain the `init_params` section".format(config_file))
# Parse the "full specification".
spec_list = loaded_config["header"]["full_spec"].split(".")
# Check if config contains data of a compatible class.
if cls.__name__ != "NeuralModule" and spec_list[-1] != cls.__name__:
txt = "The loaded file `{}` contains configuration of ".format(config_file)
txt = txt + "`{}` thus cannot be used for instantiation of an object of type `{}`".format(
spec_list[-1], cls.__name__
)
raise ImportError(txt)
# Success - return configuration.
return loaded_config
@classmethod
def deserialize(
cls, configuration: Dict[str, Any], name: str = None, overwrite_params: Dict[str, Any] = {}
) -> 'NeuralModule':
"""
Class method instantianting the neural module object based on the configuration (dictionary).
Args:
configuration: Dictionary containing proper "header" and "init_params" sections.
name: name of the module that will overwrite the name in the `init_params` (optional, DEFAULT: None)
overwrite_params: Dictionary containing parameters that will be added to or overwrite (!)
the default init parameters loaded from the configuration file (the module "init_params" section).
Returns:
Instance of the created NeuralModule object.
"""
# Deserialize header - get object class.
module_class = cls.__deserialize_header(configuration["header"])
# Update parameters with additional ones.
configuration["init_params"].update(overwrite_params)
# Override module name in init_params using the logic:
# * section_name if not none overrides init_params.name first (skipped for now, TOTHINK!)
# * name (if None) overrides init_params.name
if name is not None:
configuration["init_params"]["name"] = name
# Get init parameters.
init_params = cls._deserialize_configuration(configuration["init_params"])
# Create the module instance.
new_module = module_class(**init_params)
logging.info(
"Instantiated a new Neural Module named `{}` of type `{}`".format(
new_module.name, type(new_module).__name__
)
)
# Return the module instance.
return new_module
@classmethod
def __deserialize_header(cls, serialized_header: Dict[str, Any]):
"""
Method deserializes the header and extracts the module class.
Args:
serialized_header: Dictionary containing module header.
Returns:
Class of the module to be created.
"""
# Parse the "full specification".
spec_list = serialized_header["full_spec"].split(".")
# Get module class from the "full specification".
mod_obj = __import__(spec_list[0])
for spec in spec_list[1:]:
mod_obj = getattr(mod_obj, spec)
# Return "class".
return mod_obj
@classmethod
def _deserialize_configuration(cls, serialized_init_params: Dict[str, Any]):
"""
A function that deserializes the module "configuration (i.e. init parameters).
..note:
Thus functions should be overloaded when writing a custom module import/export.
Args:
serialized_init_params: List of init parameters loaded from the file.
Returns:
A "deserialized" list with init parameters.
"""
# In this case configuration = init parameters.
return serialized_init_params
@property
@abstractmethod
def input_ports(self) -> Dict[str, NeuralType]:
"""
Returns definitions of module input ports
Returns:
A dictionary containing module's input ports (names, NeuralTypes) mapping.
"""
@property
@abstractmethod
def output_ports(self) -> Dict[str, NeuralType]:
"""
Returns definitions of module output ports
Returns:
A dictionary containing module's output ports (names, NeuralTypes) mapping.
"""
@property
def _disabled_deployment_input_ports(self) -> Set[str]:
"""Returns names of input ports that will not be included in an export
Returns:
A (set) of module's input port names that are not exportable
"""
return set([])
@property
def _disabled_deployment_output_ports(self) -> Set[str]:
"""Returns names of output ports that will not be included in an export
Returns:
A (set) of module's output port names that are not exportable
"""
return set([])
def _prepare_for_deployment(self) -> None:
"""Patch the module if required to prepare for deployment
"""
return
@property
def operation_mode(self):
""" Returns the operation mode. """
return self._operation_mode
@property
def type(self):
""" Returns the type of module. """
return self._type
@operation_mode.setter
def operation_mode(self, operation_mode: OperationMode):
""" Sets the operation mode. """
self._operation_mode = operation_mode
@staticmethod
def pretrained_storage():
return ''
def __call__(self, **kwargs):
"""This method allows objects to be called with their port names
Args:
kwargs: Input ports and their values. For example:
...
mymodule1 = Subclass1_of_NeuralModule(...)
mymodule2 = Subclass2_of_NeuralModule(...)
...
out_port1, out_port2 = mymodule1(input_port1=value1,
input_port2=value2,
input_port3=value3)
out_port11 = mymodule2(input_port1=out_port2)
...
Returns:
NmTensor object or tuple of NmTensor objects
"""
# print(" Neural Module:__call__")
# Set the operation mode of the outer graph.
self.operation_mode = self._app_state.active_graph.operation_mode
# The input and output ports definitions can potentially depend on the operation mode!
# Record the operation (i.e. add a single module).
step_number = self._app_state.active_graph.record_step(self)
###### PROCESS INPUTS. ######
# Iterate through all passed parameters.
for port_name, port_content in kwargs.items():
# Make sure that passed arguments corresponds to one of the input port names.
if port_name not in self.input_ports.keys():
raise NeuralPortNameMismatchError(port_name)
# At that point the input can be one of three types:
# * NeuralGraph -> bind port using the default name and type.
# * GraphInput -> check definition, if ok bind port.
# * NmTensor -> check definition, add self as a "consumer" of a tensor (produced by other module).
# Check what was actually passed.
if type(port_content).__name__ == "NeuralGraph":
# Make sure that port_content is the currently active graph!
if port_content is not self._app_state.active_graph:
raise ConnectionError("Ports can be bound only by passing the active graph object!")
# Create an alias so the logic will be more clear.
active_graph = port_content
# This case: we are nesting one graph into another and must bind input port of one graph in another!
# So generally we must "copy" the of thus module to graog (the inverted logic!).
# Copy the port "definition" (i.e. is NeuralType) using the same port name.
active_graph.inputs[port_name] = self.input_ports[port_name]
# Bind the neural graph input port, i.e. remember that a given graph port should pass data
# to THIS module-port (when it finally will be connected).
active_graph.inputs[port_name].bind(StepModulePort(step_number, self.name, port_name))
# Please note that there are no "consumers" here - this is a "pure binding".
elif type(port_content).__name__ == "GraphInput":
# Check if GraphInput belongs to the active graph !
own_port = False
for gcontent in self._app_state.active_graph.inputs.values():
if gcontent is port_content:
own_port = True
break
if not own_port:
raise NeuralPortNameMismatchError(port_name)
# Compare input port definition with the received definition.
self.input_ports[port_name].compare_and_raise_error(
self.__class__.__name__, port_name, port_content.ntype
)
# Bind the neural graph input port, i.e. remember that a given graph port should pass data
# to THIS module-port (when it finally will be connected).
port_content.bind(StepModulePort(step_number, self.name, port_name))
# Please note that there are no "consumers" here - this is a "pure binding".
elif type(port_content) is NmTensor:
# Compare input port definition with the received definition.
self.input_ports[port_name].compare_and_raise_error(self.__class__.__name__, port_name, port_content)
# Ok, the goal here is to actually "connect": add self (module) as "consumer" to the input tensor.
port_content.add_consumer(StepModulePort(step_number, self.name, port_name))
else:
raise TypeError(
"Input '{}' must be of one of three types: NeuralGraph, GraphInput or NmTensor".format(port_name)
)
###### PRODUCE OUTPUTS. ######
output_port_defs = self.output_ports
# Create output tensors.
if len(output_port_defs) == 1:
# Get port name and type.
out_name = list(output_port_defs)[0]
out_type = output_port_defs[out_name]
# Create a single returned tensor.
results = NmTensor(producer=self, producer_args=kwargs, output_port_name=out_name, ntype=out_type,)
# Bind the "default" output ports.
self._app_state.active_graph.bind_outputs(results)
else:
# Create output tensors.
output_tensors = []
for out_name, out_type in output_port_defs.items():
output_tensors.append(
NmTensor(producer=self, producer_args=kwargs, output_port_name=out_name, ntype=out_type,)
)
# Create a named tuple type enabling to access outputs by attributes (e.g. out.x).
output_class_name = f'{self.__class__.__name__}Output'
result_type = namedtuple(typename=output_class_name, field_names=output_port_defs.keys())
# Create the returned tuple object.
results = result_type(*output_tensors)
# Bind the output tensors.
self._app_state.active_graph.bind_outputs(output_tensors)
# Return the results.
return results
def __str__(self):
return self.__class__.__name__
@abstractmethod
def get_weights(self) -> Optional[Dict[(str, bool)]]:
"""Returns NeuralModule's weights copy.
Returns:
Dictionary of name -> (weights, trainable)"""
pass
@abstractmethod
def set_weights(
self,
name2weight: Dict[(str, Tuple[str, bool])],
name2name_and_transform: Dict[(str, Tuple[str, WeightShareTransform])] = None,
):
"""Sets weight from given values. For every named weight in
name2weight,
if weight with the same name is found in the model, it will be set to
found value.
WARNING: This will NOT tie weights. It will copy values.
If ``name2name_and_transform`` is provided then if will set weights
using
name mapping and transform. For example, suppose ``objec1.X = 3x5
weight``.
Then, if ``name2name_and_transform['X']=('Y',
WeightShareTransform.TRANSPOSE)``
and ``Y`` is 5x3 weight and ``name2weight['Y']=Y. Then:
``object1.set_weights(name2weight, name2name_and_transform)`` will
set object1.X=transpose(Y).
Args:
name2weight (dict): dictionary of name to (weight, trainable).
Typically this is output of get_weights method.
name2name_and_transform: mapping from name -> (name, transform)
"""
pass
@staticmethod
def list_pretrained_models() -> Optional[List[PretrainedModelInfo]]:
"""List all available pre-trained models (e.g. weights) for this NM.
Returns:
A list of PretrainedModelInfo tuples.
The pretrained_model_name field of the tuple can be used to
retrieve pre-trained model's weights (pass it as
pretrained_model_name argument to the module's constructor)
"""
return None
def get_config_dict_and_checkpoint(self, pretrained_model_name):
"""WARNING: This part is work in progress"""
return None
@abstractmethod
def tie_weights_with(
self,
module,
weight_names=List[str],
name2name_and_transform: Dict[(str, Tuple[str, WeightShareTransform])] = None,
):
"""Ties weights between self and module. For every weight name in
weight_names, if weight with the same name is found in self, it will
be tied
with a same weight from ``module``.
WARNING: Once weights are tied, updates to one weights's weights
will affect
other module's weights.
If ``name2name_and_transform`` is provided then if will set weights
using
name mapping and transform. For example, suppose ``objec1.X = 3x5
weights``
and ``object2.Y = 5x3 weights``. Then these weights can be tied like
this:
.. code-block:: python
object1.tie_weights_with(object2, weight_names=['X'],
name2name_and_transform =
{ 'X': ('Y', WeightShareTransform.TRANSPOSE)})
Args:
module: with which module to tie weights
weight_names (List[str]): list of self weights' names
name2name_and_transform: mapping from name -> (name, transform)
"""
pass
def is_trainable(self) -> bool:
"""
Checks if NeuralModule is trainable.
A NeuralModule is trainable IFF it contains at least one trainable
weight
Returns:
True if module has trainable weights, False otherwise
"""
weights = self.get_weights()
if weights is None:
return False
for name, w in weights.items():
if w[1]:
return True
return False
@abstractmethod
def save_to(self, path: str):
"""Save module state to file.
Args:
path (string): path to while where to save.
"""
pass
@abstractmethod
def restore_from(self, path: str):
"""Restore module's state from file.
Args:
path (string): path to where to restore from.
"""
pass
@abstractmethod
def freeze(self, weights: Set[str] = None):
"""Freeze weights
Args:
weights (set): set of weight names to freeze
If None, all weights are freezed.
"""
pass
@abstractmethod
def unfreeze(self, weights: Set[str] = None):
"""Unfreeze weights
Args:
weights (set): set of weight names to unfreeze
If None, all weights are unfreezed.
"""
pass
@property
def placement(self):
"""Module's placement. Currently CPU or GPU.
DataParallel and ModelParallel will come later.
Returns:
(DeviceType) Device where NM's weights are located
"""
return self._placement
@property
@deprecated(version=0.11)
def local_parameters(self) -> Optional[Dict]:
"""Get module's parameters
Returns:
module's parameters
"""
return self._init_params
# return self._local_parameters
@property
def unique_instance_id(self):
"""A unique instance id for this object
Returns:
A uniq uuid which can be used to identify this object
"""
return self._uuid
@property
def factory(self):
""" Neural module factory which created this module
Returns: NeuralModuleFactory instance or None
"""
return self._factory
@property
@abstractmethod
def num_weights(self):
"""Number of module's weights
"""
pass
```
|
{
"source": "Jeverett3000/codeapp",
"score": 2
}
|
#### File: site-packages/setuptools_rust/test.py
```python
import os
import sys
import subprocess
from distutils.cmd import Command
from distutils.errors import CompileError, DistutilsFileError, DistutilsExecError
import semantic_version
from .extension import RustExtension
from .utils import rust_features, get_rust_version
MIN_VERSION = semantic_version.Spec(">=1.15")
class test_rust(Command):
"""Run cargo test"""
description = "test Rust extensions"
user_options = []
def run_for_extension(self, ext: RustExtension):
# Make sure that if pythonXX-sys is used, it builds against the current
# executing python interpreter.
bindir = os.path.dirname(sys.executable)
env = os.environ.copy()
env.update(
{
# disables rust's pkg-config seeking for specified packages,
# which causes pythonXX-sys to fall back to detecting the
# interpreter from the path.
"PYTHON_2.7_NO_PKG_CONFIG": "1",
"PATH": bindir + os.pathsep + os.environ.get("PATH", ""),
}
)
if not os.path.exists(ext.path):
raise DistutilsFileError(
f"can't find Rust extension project file: {ext.path}"
)
features = set(ext.features)
features.update(rust_features(ext=False, binding=ext.binding))
# test cargo command
feature_args = ["--features", " ".join(features)] if features else []
args = (
["cargo", "test", "--manifest-path", ext.path]
+ feature_args
+ list(ext.args or [])
)
# Execute cargo command
print(" ".join(args))
try:
subprocess.check_output(args, env=env)
except subprocess.CalledProcessError as e:
raise CompileError(
"cargo failed with code: %d\n%s"
% (e.returncode, e.output.decode("utf-8"))
)
except OSError:
raise DistutilsExecError(
"Unable to execute 'cargo' - this package "
"requires Rust to be installed and "
"cargo to be on the PATH"
)
else:
print(f"test completed for '{ext.name}' extension")
```
|
{
"source": "jeverling/flask-mongoalchemy",
"score": 3
}
|
#### File: books_collection/collection/documents.py
```python
from collection import db
from flaskext.mongoalchemy import BaseQuery
import re
class BookQuery(BaseQuery):
def starting_with(self, letter):
regex = r'^' + letter
return self.filter({'title' : re.compile(regex, re.IGNORECASE)})
class Book(db.Document):
query_class = BookQuery
title = db.StringField()
year = db.IntField()
```
#### File: books_collection/collection/__init__.py
```python
from flask import Flask
from flaskext.mongoalchemy import MongoAlchemy
import string
app = Flask(__name__)
app.config['MONGOALCHEMY_DATABASE'] = 'books_collection'
app.config['SECRET_KEY'] = 'very secret, do you believe?'
app.config['DEBUG'] = True
db = MongoAlchemy(app)
@app.context_processor
def put_letters_on_request():
return { 'letters' : string.ascii_uppercase }
from views import *
```
#### File: flask-mongoalchemy/tests/test_mongodb_uri.py
```python
from tests import BaseTestCase
from flask import Flask
from nose.tools import assert_equals
class MongoDBURITestCase(BaseTestCase):
"MongoDB URI generation"
def setup(self):
self.app = Flask(__name__)
self.app.config['MONGOALCHEMY_DATABASE'] = 'test'
def should_use_localhost_for_server_and_27017_for_port_when_only_the_database_name_was_specified(self):
from flaskext.mongoalchemy import _get_mongo_uri
assert_equals(_get_mongo_uri(self.app), 'mongodb://localhost:27017')
def should_be_able_to_generate_an_uri_using_only_the_username_without_password(self):
self.app.config['MONGOALCHEMY_USER'] = 'luke'
from flaskext.mongoalchemy import _get_mongo_uri
assert_equals(_get_mongo_uri(self.app), 'mongodb://luke@localhost:27017')
def should_be_able_to_generate_an_uri_using_an_username_and_a_password(self):
self.app.config['MONGOALCHEMY_USER'] = 'luke'
self.app.config['MONGOALCHEMY_PASSWORD'] = '<PASSWORD>'
from flaskext.mongoalchemy import _get_mongo_uri
assert_equals(_get_mongo_uri(self.app), 'mongodb://luke:father@localhost:27017')
def should_be_able_to_use_not_only_localhost_for_server_and_27017_for_port(self):
self.app.config['MONGOALCHEMY_SERVER'] = 'database.lukehome.com'
self.app.config['MONGOALCHEMY_PORT'] = '42'
from flaskext.mongoalchemy import _get_mongo_uri
assert_equals(_get_mongo_uri(self.app), 'mongodb://database.lukehome.com:42')
def should_be_able_to_generate_an_uri_with_options(self):
self.app.config['MONGOALCHEMY_SERVER'] = 'database.lukehome.com'
self.app.config['MONGOALCHEMY_OPTIONS'] = 'safe=true'
from flaskext.mongoalchemy import _get_mongo_uri
assert_equals(_get_mongo_uri(self.app), 'mongodb://database.lukehome.com:27017/?safe=true')
```
|
{
"source": "jeverling/pdm",
"score": 2
}
|
#### File: pdm/cli/options.py
```python
from __future__ import annotations
import argparse
import os
from typing import Any, Callable, Sequence
from click import secho
from pdm._types import Protocol
class ActionCallback(Protocol):
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None,
) -> None:
...
class Option:
"""A reusable option object which delegates all arguments
to parser.add_argument().
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.args = args
self.kwargs = kwargs
def add_to_parser(self, parser: argparse._ActionsContainer) -> None:
parser.add_argument(*self.args, **self.kwargs)
def add_to_group(self, group: argparse._ArgumentGroup) -> None:
group.add_argument(*self.args, **self.kwargs)
class CallbackAction(argparse.Action):
def __init__(self, *args: Any, callback: ActionCallback, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.callback = callback
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
return self.callback(parser, namespace, values, option_string=option_string)
class ArgumentGroup(Option):
"""A reusable argument group object which can call `add_argument()`
to add more arguments. And itself will be registered to the parser later.
"""
def __init__(
self,
name: str = None,
is_mutually_exclusive: bool = False,
required: bool = None,
) -> None:
self.name = name
self.options: list[Option] = []
self.required = required
self.is_mutually_exclusive = is_mutually_exclusive
def add_argument(self, *args: Any, **kwargs: Any) -> None:
if args and isinstance(args[0], Option):
self.options.append(args[0])
else:
self.options.append(Option(*args, **kwargs))
def add_to_parser(self, parser: argparse._ActionsContainer) -> None:
group: argparse._ArgumentGroup
if self.is_mutually_exclusive:
group = parser.add_mutually_exclusive_group(required=self.required)
else:
group = parser.add_argument_group(self.name)
for option in self.options:
option.add_to_group(group)
def add_to_group(self, group: argparse._ArgumentGroup) -> None:
self.add_to_parser(group)
def deprecated(message: str, type_: type = str) -> Callable[[Any], Any]:
"""Prints deprecation message for the argument"""
def wrapped_type(obj: Any) -> Any:
secho(f"DEPRECATED: {message}", fg="red", err=True)
return type_(obj)
return wrapped_type
verbose_option = Option(
"-v",
"--verbose",
action="count",
default=0,
help="-v for detailed output and -vv for more detailed",
)
dry_run_option = Option(
"--dry-run",
action="store_true",
default=False,
help="Show the difference only and don't perform any action",
)
pep582_option = Option(
"--pep582",
const="AUTO",
metavar="SHELL",
nargs="?",
help="Print the command line to be eval'd by the shell",
)
install_group = ArgumentGroup("Install options")
install_group.add_argument(
"--no-editable",
action="store_true",
dest="no_editable",
help="Install non-editable versions for all packages",
)
install_group.add_argument(
"--no-self",
action="store_true",
dest="no_self",
help="Don't install the project itself",
)
def no_isolation_callback(
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None,
) -> None:
os.environ["PDM_BUILD_ISOLATION"] = "no"
no_isolation_option = Option(
"--no-isolation",
dest="build_isolation",
action=CallbackAction,
nargs=0,
help="Do not isolate the build in a clean environment",
callback=no_isolation_callback,
)
install_group.options.append(no_isolation_option)
groups_group = ArgumentGroup("Dependencies selection")
groups_group.add_argument(
"-s",
"--section",
dest="groups",
metavar="GROUP",
action="append",
help="(DEPRECATED) alias of `--group`",
default=[],
type=deprecated(
"`-s/--section` is deprecated in favor of `-G/--group` "
"and will be removed in the next minor release."
),
)
groups_group.add_argument(
"-G",
"--group",
dest="groups",
metavar="GROUP",
action="append",
help="Select group of optional-dependencies "
"or dev-dependencies(with -d). Can be supplied multiple times, "
'use ":all" to include all groups under the same species.',
default=[],
)
groups_group.add_argument(
"--no-default",
dest="default",
action="store_false",
default=True,
help="Don't include dependencies from the default group",
)
dev_group = ArgumentGroup("dev", is_mutually_exclusive=True)
dev_group.add_argument(
"-d",
"--dev",
default=True,
dest="dev",
action="store_true",
help="Select dev dependencies",
)
dev_group.add_argument(
"--prod",
"--production",
dest="dev",
action="store_false",
help="Unselect dev dependencies",
)
groups_group.options.append(dev_group)
save_strategy_group = ArgumentGroup("save_strategy", is_mutually_exclusive=True)
save_strategy_group.add_argument(
"--save-compatible",
action="store_const",
dest="save_strategy",
const="compatible",
help="Save compatible version specifiers",
)
save_strategy_group.add_argument(
"--save-wildcard",
action="store_const",
dest="save_strategy",
const="wildcard",
help="Save wildcard version specifiers",
)
save_strategy_group.add_argument(
"--save-exact",
action="store_const",
dest="save_strategy",
const="exact",
help="Save exact version specifiers",
)
update_strategy_group = ArgumentGroup("update_strategy", is_mutually_exclusive=True)
update_strategy_group.add_argument(
"--update-reuse",
action="store_const",
dest="update_strategy",
const="reuse",
help="Reuse pinned versions already present in lock file if possible",
)
update_strategy_group.add_argument(
"--update-eager",
action="store_const",
dest="update_strategy",
const="eager",
help="Try to update the packages and their dependencies recursively",
)
project_option = Option(
"-p",
"--project",
dest="project_path",
help="Specify another path as the project root, "
"which changes the base of pyproject.toml and __pypackages__",
)
global_option = Option(
"-g",
"--global",
dest="global_project",
action="store_true",
help="Use the global project, supply the project root with `-p` option",
)
clean_group = ArgumentGroup("clean", is_mutually_exclusive=True)
clean_group.add_argument(
"--clean", action="store_true", dest="clean", help="clean unused packages"
)
clean_group.add_argument(
"--no-clean", action="store_false", dest="clean", help="don't clean unused packages"
)
sync_group = ArgumentGroup("sync", is_mutually_exclusive=True)
sync_group.add_argument(
"--sync", action="store_true", dest="sync", help="sync packages"
)
sync_group.add_argument(
"--no-sync", action="store_false", dest="sync", help="don't sync packages"
)
packages_group = ArgumentGroup("Package Arguments")
packages_group.add_argument(
"-e",
"--editable",
dest="editables",
action="append",
help="Specify editable packages",
default=[],
)
packages_group.add_argument("packages", nargs="*", help="Specify packages")
ignore_python_option = Option(
"-I",
"--ignore-python",
action="store_true",
help="Ignore the Python path saved in the pdm.toml config",
)
```
#### File: pdm/tests/test_integration.py
```python
import pytest
from pdm.utils import cd
@pytest.mark.integration
@pytest.mark.parametrize("python_version", ["2.7", "3.6", "3.7", "3.8", "3.9"])
def test_basic_integration(python_version, project_no_init, invoke):
"""An e2e test case to ensure PDM works on all supported Python versions"""
project = project_no_init
project.root.joinpath("foo.py").write_text("import django\n")
additional_args = ["--no-self"] if python_version == "2.7" else []
invoke(["init"], input="\ny\n\n\n\n\n\n>=2.7\n", obj=project, strict=True)
invoke(["use", "-f", python_version], obj=project, strict=True)
project._environment = None
invoke(["add", "django"] + additional_args, obj=project, strict=True)
with cd(project.root):
invoke(["run", "python", "foo.py"], obj=project, strict=True)
if python_version != "2.7":
invoke(["build", "-v"], obj=project, strict=True)
invoke(["remove", "-v", "django"] + additional_args, obj=project, strict=True)
result = invoke(["list"], obj=project, strict=True)
assert not any(
line.strip().lower().startswith("django") for line in result.output.splitlines()
)
@pytest.mark.integration
def test_actual_list_freeze(project, invoke):
project.meta["requires-python"] = ">=3.6"
invoke(["add", "click==7.1.2"], obj=project, strict=True)
r = invoke(["list", "--freeze"], obj=project)
assert "click==7.1.2" in r.output
```
|
{
"source": "JevexEndo/mw4-banners",
"score": 3
}
|
#### File: mw4-banners/generate/mcuuid.py
```python
import random
import struct
from uuid import UUID
class MCUUID(UUID):
rng = random.Random(x="Mapwreck 4")
def __init__(self) -> None:
super().__init__(int=self.rng.getrandbits(128), version=4)
@property
def nbt(self) -> str:
return "[I;{},{},{},{}]".format(
struct.unpack(">i", bytes.fromhex(self.hex[0:8]))[0],
struct.unpack(">i", bytes.fromhex(self.hex[8:16]))[0],
struct.unpack(">i", bytes.fromhex(self.hex[16:24]))[0],
struct.unpack(">i", bytes.fromhex(self.hex[24:32]))[0],
)
```
|
{
"source": "JevgenijsGalaktionovs/AntBot",
"score": 3
}
|
#### File: dns_main/src/demonstration.py
```python
import time
from math import radians
from service_router import *
from locomotion import *
from math import asin, pi, atan2
#, positionN, \
# velocityAll, accelerationAll, positionAll, readFSR
from kinematics import Kinematics
K=Kinematics()
threshold = 30
stepSize = 50
riser = 163
thread = 266
def terminate():
ee_xyz, servopos = K.doFkine(readPos())
if abs(ee_xyz[2]-ee_xyz[5]) < 20:
if abs(ee_xyz[8]-ee_xyz[11]) < 20:
if abs(ee_xyz[8]-ee_xyz[2]) < 50:
print("yeay im on top of stairs")
return True
else:
return False
def standUpForStairs():
standup_pos = [2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296]
front_standup = list_combine(leg[1] + leg[2], standup_pos)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
middle_standup = list_combine(leg[3] + leg[4], standup_pos)
positionN(front_standup)
time.sleep(1)
positionN(rear_standup)
time.sleep(1)
positionN(middle_standup)
time.sleep(1)
def correctMiddleLegs(z):
Up = [0, 0, z]
LiftUp = calc_motion(Up)
pos = list()
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[18:24])
positionN(pos)
leg_case = [3,4]
check_position_error_legs(80, 20, pos, leg_case)
ServoCentering=[7,2048,10,2048]
positionN(ServoCentering)
time.sleep(1)
Down = [0, 0, -z]
LiftDown = calc_motion(Down)
pos1 = list()
pos1.extend(LiftDown[12:18])
pos1.extend(LiftDown[18:24])
positionN(pos1)
leg_case = [3,4]
check_position_error_legs(80, 20, pos1, leg_case)
def initialDistance(distance):
all_pos = readPos()
ee_xyz, servopos = K.doFkine(all_pos)
dist2FirstStep_1 = distance
dist2FirstStep_2 = distance
dist2FirstStep_3 = distance + ee_xyz[1] - ee_xyz[7]
dist2FirstStep_4 = distance + ee_xyz[1] - ee_xyz[10]
dist2FirstStep_5 = distance + ee_xyz[1] - ee_xyz[13]
dist2FirstStep_6 = distance + ee_xyz[1] - ee_xyz[16]
dist2FirstStep = dist2FirstStep_1, dist2FirstStep_2, dist2FirstStep_3, dist2FirstStep_4, dist2FirstStep_5, dist2FirstStep_6
print dist2FirstStep
return dist2FirstStep
def initConfig_legs(depth):
maxy = 344.74638441867046
r = 392.55798277243395 - 141.33 #maximumy - y_offset of leg one
miny = 181.0804846109524
phai = asin((depth-miny)/r) * 2048/pi # change of coxa in steps
#print(int(phai))
if depth < maxy:
standup_pos = [ 1536 + int(phai), 2048, 1296, 2560 - int(phai), 2048, 1296,
2048 , 2048, 1296, 2048 , 2048, 1296,
2560 - int(phai), 2048, 1296, 1536 + int(phai), 2048, 1296]
lift_up = [2048, 2448,1296,2048,2448,1296,
2048, 2448,1296,2048,2448,1296,
2048, 2448,1296,2048,2448,1296]
print(standup_pos)
front_liftup = list_combine(leg[2] + leg[5],lift_up)
positionN(front_liftup)
time.sleep(2)
front_standup = list_combine(leg[2] + leg[5], standup_pos)
positionN(front_standup)
time.sleep(1)
rear_liftup = list_combine(leg[1] + leg[6],lift_up)
positionN(rear_liftup)
time.sleep(1)
rear_standup = list_combine(leg[1] + leg[6], standup_pos)
positionN(rear_standup)
time.sleep(1)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
positionN(rear_standup)
time.sleep(1)
ee_xyz, servopos = K.doFkine(readPos())
return maxy - ee_xyz[1]
def correctRotation(depth,riser):
slope = atan2(riser,depth)*180/pi
gamma, beta = K.get_orientation([1,5,6])
new_gamma = slope - gamma
parallelGait(0,0,int(new_gamma-4),0,0,0)
time.sleep(3)
print("Slope is:", new_gamma)
def moveForward(x, y, z, alpha, beta, gamma, distance):
Forward = [x, y, z]
Up = [0, 0, z]
Down = [x, y, 0]
Push = [0, 0, 0]
HalfForward = [0.5*x, 0.5*y, z]
HalfUp = [ 0, 0, z]
HalfDown = [0.5*x, 0.5*y, 0]
PushBackwards = calc_motion(Push)
LiftUp = calc_motion(Up)
LiftDown = calc_motion(Down)
PutForward = calc_motion(Forward)
HalfLiftUp = calc_motion(HalfUp)
HalfLiftDown = calc_motion(HalfDown)
HalfPutForward = calc_motion(HalfForward)
while distance > 0.75 * stepSize:
if distance > 1.5 * stepSize:
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(40, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - stepSize
else:
pos = list()
pos.extend(HalfLiftUp[6:12])
pos.extend(HalfLiftUp[12:18])
pos.extend(HalfLiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(HalfPutForward[6:12])
pos1.extend(HalfPutForward[12:18])
pos1.extend(HalfPutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(HalfLiftDown[6:12])
pos2.extend(HalfLiftDown[12:18])
pos2.extend(HalfLiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(HalfLiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(HalfLiftUp[18:24])
pos3.extend(HalfLiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(80, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - (0.5 *stepSize)
time.sleep(0.5)
return distance
def walkUp(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpUp[i*6 : i*6+6])
else:
pos.extend(LiftUp[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(140, 30, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpForward[i*6 : i*6+6])
else:
pos.extend(PutForward[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(140, 30, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
else:
pos.extend(LiftDown[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(140, 30, pos, leg_case)
pos = []
check_contact()
#########################################################################################################
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
UpPushBackwards = [0, -stepSize, 0]
StepUpPushBackwards = calc_motion(UpPushBackwards)
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 1 or i == 2 or i == 5:
if distanceToStair[i] < stepSize:
pos.extend(StepUpUp[i*6 : i*6+6])
else:
pos.extend(LiftUp[i*6 : i*6+6])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpPushBackwards[i*6 : i*6+6])
else:
pos.extend(StepUpPushBackwards[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i == 1 or i == 2 or i == 5:
if distanceToStair[i] < stepSize:
pos.extend(StepDownDownSecond[i*6 : i*6+6])
else:
pos.extend(PushBackwards[i*6 : i*6+6])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
def updateDistance(distanceToStair, stepSize):
distanceToStair = [i - stepSize for i in distanceToStair]
for i in range(len(distanceToStair)):
if distanceToStair[i] < 0:
distanceToStair[i] = distanceToStair[i] + thread
print distanceToStair
return distanceToStair
def rotateAndTranslate(riser,climbed_stairs_front, climbed_stairs_rear):
gamma, beta = K.get_orientation([1, 5, 6])
parallelGait(0, -beta, -gamma, 0, 0, 0)
time.sleep(2)
a = K.calc_translationStairs(riser,climbed_stairs_front, climbed_stairs_rear)
parallelGait(0, 0, 0, 0, a[1], a[0])
time.sleep(2)
return beta , gamma
def moveForwardOnStair(x, y, z, alpha, beta, gamma, distance):
initialDistance = distance
orientation = [alpha, beta, gamma]
Forward = [x, y, z]
Up = [0, 0, z]
Down = [x, y, 0]
Push = [0, 0, 0]
HalfForward = [0.5*x, 0.5*y, z]
HalfUp = [0, 0, z]
HalfDown = [0.5*x, 0.5*y, 0]
PushBackwards = calc_motion(Push, orientation)
LiftUp = calc_motion(Up, orientation)
LiftDown = calc_motion(Down, orientation)
PutForward = calc_motion(Forward, orientation)
HalfLiftUp = calc_motion(HalfUp, orientation)
HalfLiftDown = calc_motion(HalfDown, orientation)
HalfPutForward = calc_motion(HalfForward, orientation)
print (distance)
while distance > 0.75 * stepSize:
if distance > 1.5 * stepSize:
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(20, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - stepSize
else:
pos = list()
pos.extend(HalfLiftUp[6:12])
pos.extend(HalfLiftUp[12:18])
pos.extend(HalfLiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(HalfPutForward[6:12])
pos1.extend(HalfPutForward[12:18])
pos1.extend(HalfPutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(HalfLiftDown[6:12])
pos2.extend(HalfLiftDown[12:18])
pos2.extend(HalfLiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(HalfLiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(HalfLiftUp[18:24])
pos3.extend(HalfLiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(20, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - (0.5 *stepSize)
time.sleep(0.5)
return distance
def walkUpAllLegs(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i ==5:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
###########################################################################
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 2:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [3]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 2:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [3]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 2:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [3]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 3:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [4]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 3:
pos.extend(StepUpForward[i*6 : i*6+6])
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
positionN(pos)
leg_case = [4]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i ==3:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [4]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
###########################################################################
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 0:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [1]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 0:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [1]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 0:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [1]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 1:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [2]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 1:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [2]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i ==1:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [2]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
timeToTerminate = terminate()
if timeToTerminate == True:
timeToContinue = True
return timeToContinue
def moveUpOnlyLastLegs(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(80, 20, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(80, 20, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(80, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(80, 20, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(80, 20, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i ==5:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(80, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
def translateAboveRiser():
ee_xyz, servopos = K.doFkine(readPos())
legZPlancement = abs(ee_xyz[8]+ee_xyz[11])/2
print("first leg placement in z is" , legZPlancement)
if legZPlancement-riser < 70:
translationZ = riser + 70 - legZPlancement
parallelGait(0,0,0,0,0,translationZ)
time.sleep(3)
def rememberRemember():
ee_xyz, servopos = K.doFkine(readPos())
return servopos
def StairClimbingDemo():
torque(0)
pwm_list = [800]*18
pwmAll(pwm_list)
scaler_acc = [20] * 18
scaler_vel = [20] * 18
velocityAll(scaler_vel)
accelerationAll(scaler_acc)
torque(1)
standUpForStairs()
print(stepSize)
## Move forward to the first step on the stair. 700 = mm. Assuming the robot is placed at this distance
raw_input("Press something to move forward")
distanceToStair = initialDistance(moveForward(0, stepSize, threshold, 0, 0, 0, 250))
#distanceToStair = 25.0, 25.0, 376.80185049724594, 376.02627441364115, 723.417427577023, 722.8063562905638
raw_input("Rotate coxas to get by thread distance")
initConfig_legs(thread)
time.sleep(3)
#####Testing new stuff
raw_input("Translate by half of the riser")
parallelGait(0,0,0,0,0,riser/2+20)
time.sleep(2)
raw_input("Start climbing up")
distanceToStair = 25.0, 25.0, 305.0, 305.0, 629.9592456137348, 629.7755305462598
walkUp(distanceToStair,0, stepSize*2, threshold, riser, 0,0,0)
raw_input("Move Forward on stairs")
distance = moveForwardOnStair(0, stepSize, threshold, 0, 0, 0, thread/2+stepSize)
raw_input("Translate by half of the riser")
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
distanceToStair = 25.0, 25.0, 25.0, 25.0, 629.9592456137348, 629.7755305462598
raw_input("Again climb up")
walkUp(distanceToStair,0, stepSize*2, threshold, riser, 0,0,0)
raw_input("If needed translate above the riser and adjust the rotation")
translateAboveRiser()
stairs = True
#rememberState = True
#storedPosition = []
while stairs is True:
correctRotation(thread,riser)
time.sleep(2)
raw_input("Correcting middle legs")
correctMiddleLegs(20)
raw_input("Move forwards on stairs")
distance = moveForwardOnStair(0, stepSize, threshold, 0, 0, 0, riser+stepSize)
distanceToStair = 25.0, 25.0, 25.0, 25.0, 25.9592456137348, 25.7755305462598
raw_input("Walk up all legs")
checkForTermination = walkUpAllLegs(distanceToStair,0, stepSize*2, threshold, riser, 0,0,0)
correctRotation(thread,riser)
time.sleep(1)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
translateAboveRiser()
raw_input("Exit program")
if checkForTermination == True:
break
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
moveForwardOnStair(0, stepSize, threshold, 0, 0, 0, riser+stepSize)
distanceToStair = 25.0, 25.0, 25.0, 25.0, 25.0, 25.0
moveUpOnlyLastLegs(distanceToStair,0,stepSize,threshold,riser,0,0,0)
moveForward(0, stepSize, threshold, 0, 0, 0, 550)
StairClimbingDemo()
```
#### File: dns_main/src/keyboard_interface.py
```python
import time
from math import radians
from service_router import *
from locomotion import *
from demonstration import *
class _Getch:
"""Gets a single character from standard input. Does not echo to the screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
def Stand_up():
torque(0)
pwm_list = [800] * 18 # Setting PWM to "high" max is 885
pwmAll(pwm_list)
scaler_acc = [20] * 18 # Setting Acceleration to "low"
scaler_vel = [20] * 18 # Setting Velocity to "low"
velocityAll(scaler_vel)
accelerationAll(scaler_acc)
torque(1)
standup_pos = [2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296]
front_standup = list_combine(leg[1] + leg[2], standup_pos)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
middle_standup = list_combine(leg[3] + leg[4], standup_pos)
positionN(front_standup)
time.sleep(1)
positionN(rear_standup)
time.sleep(1)
positionN(middle_standup)
time.sleep(1)
def move(PushBackwards, LiftUp, LiftDown, PutForward):
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 60, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 60, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 60, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(40, 80, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 60, pos4, leg_case)
def TactileCheck(LiftTactile):
pos5 = list()
pos5.extend(LiftTactile[0:6])
positionN(pos5)
leg_case = [1]
check_position_error_legs(40, 20, pos5, leg_case)
checkContactWithoutControlSystem()
def ChangeVelocity(x):
torque(0)
scaler_acc = [20+x] * 18 #Setting Acceleration to "low"
scaler_vel = [20+x] * 18 #Setting Velocity to "low"
velocityAll(scaler_vel)
accelerationAll(scaler_acc)
torque(1)
print("Acc is", scaler_acc[0])
standup_pos = [2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296]
positionAll(standup_pos)
time.sleep(0.10)
return x
def Demo():
stairs = True
x = 20
PushF, LiftF, DownF, ForwardF, LiftBW, DownBW, ForwardBW, LiftLE, DownLE, ForwardLE, LiftRI, DownRI, ForwardRI, LiftTactile = CalculationMotions()
while stairs is True:
KeyboardControl()
getch = _Getch()
print ("Please enter something: ")
choice = getch()
if choice == "i":
print("Increase speed")
x += 10
x = ChangeVelocity(x)
elif choice == "k":
print("Decrease speed")
x -= 10
x = ChangeVelocity(x)
elif choice == "w":
move(PushF, LiftF, DownF, ForwardF)
print("Move Forward")
elif choice == "a":
print("Move Left")
move(PushF, LiftLE, DownLE, ForwardLE)
elif choice == "s":
print("Move backwards")
move(PushF, LiftBW, DownBW, ForwardBW)
elif choice == "d":
print("Move right")
move(PushF, LiftRI, DownRI, ForwardRI)
elif choice == "t":
print("Rotate Clockwise")
yawRotation(10)
elif choice == "y":
print("Rotate CounterClockwise")
yawRotation(-10)
elif choice == "v":
print("Dance")
DanceStar()
elif choice == "b":
print("Tactile")
TactileCheck(LiftTactile)
elif choice == "c":
print("CameraDemo")
CameraDemo()
elif choice == "m":
print("StairClimbingDemo")
StairClimbingDemo()
elif choice == "z":
print("Torque on")
torque(1)
elif choice == "x":
print("Torque off")
torque(0)
else:
print("")
print(" Enter number from 0 to 4!")
print(" You entered: %s" % choice)
print(" Try again in 3 seconds.")
time.sleep(3)
def CalculationMotions():
Forward = [0, 50, 40]
Up = [0, 0, 40]
Down = [0, 50, 0]
Push = [0, 0, 0]
PushF = calc_motion(Push)
LiftF = calc_motion(Up)
DownF = calc_motion(Down)
ForwardF = calc_motion(Forward)
ForwardB = [0, -50, 40]
UpB = [0, 0, 40]
DownB = [0, -50, 0]
LiftBW = calc_motion(UpB)
DownBW = calc_motion(DownB)
ForwardBW = calc_motion(ForwardB)
ForwardL = [-50, 0, 40]
UpL = [0, 0, 40]
DownL = [-50, 0, 0]
LiftLE = calc_motion(UpL)
DownLE = calc_motion(DownL)
ForwardLE = calc_motion(ForwardL)
ForwardR = [50, 0, 40]
UpR = [0, 0, 40]
DownR = [50, 0, 0]
LiftRI = calc_motion(UpR)
DownRI = calc_motion(DownR)
ForwardRI = calc_motion(ForwardR)
UpTac = [0, 0, 120]
LiftTactile = calc_motion(UpTac)
return PushF, LiftF, DownF, ForwardF, LiftBW, DownBW, ForwardBW, LiftLE, DownLE, ForwardLE, LiftRI, DownRI, ForwardRI, LiftTactile
def KeyboardControl():
print(" _________________________________________________ ")
print(" | Keyboard Control: |")
print(" | For Movement: |")
print(" | |")
print(" | w |")
print(" | a s d |")
print(" | |")
print(" | Increse/decrease speed |")
print(" | |")
print(" | i/k |")
print(" | |")
print(" | Rotate clockwise/counterclockwise |")
print(" | |")
print(" | t/y |")
print(" | Torque(z/x) |")
print(" | Dance(v) |")
print(" | CameraDemo(c) |")
print(" | Tactile(b) |")
print(" | StairClimbingDemo(m) |")
print(" |_________________________________________________|")
def parallelGaitCalc(alpha, beta, gamma, dist_x, dist_y, dist_z):
alpha_rad = radians(alpha)
beta_rad = radians(beta)
gamma_rad = radians(gamma)
current_pos = readPos()
next_pos = K.doIkineRotationEuler(current_pos, alpha_rad, beta_rad, gamma_rad, dist_x, dist_y, dist_z)
return next_pos
def CameraDemo():
clear_view_stairs()
time.sleep(1)
stair_dimensions=getAllStairsInfo()
print(stair_dimensions)
def DanceStar():
t = 0.5
##<NAME>
A1=parallelGaitCalc(0, 5, 0, 0, 0, 0)
A2=parallelGaitCalc(0, 10, 0, 0, 0, 0)
A3=parallelGaitCalc(0, 10, 5, 0, 0, 0)
A4=parallelGaitCalc(0, 10, 10, 0, 0, 0)
A5=parallelGaitCalc(0, 5, 10, 0, 0, 0)
A6=parallelGaitCalc(0, 0, 10, 0, 0, 0)
A7=parallelGaitCalc(0, 0, 5, 0, 0, 0)
A8=parallelGaitCalc(0, 0, 0, 0, 0, 0)
U0=parallelGaitCalc(0, 0, 0, 0, 0, 0)
U1=parallelGaitCalc(0, 0, 0, 0, 0, 50)
U2=parallelGaitCalc(0, 0, 0, 0, 0, 100)
L1=parallelGaitCalc(0, 0, 0, -30, 0, 100)
L1=parallelGaitCalc(0, 0, 0, -60, 0, 100)
L2=parallelGaitCalc(0, 0, 0, -60, -30, 100)
L3=parallelGaitCalc(0, 0, 0, -60, -60, 100)
L4=parallelGaitCalc(0, 0, 0, -30, -60, 100)
L5=parallelGaitCalc(0, 0, 0, 0, -60, 100)
L6=parallelGaitCalc(0, 0, 0, 0, -30, 100)
R1= parallelGaitCalc(-15, 0, 0, 0, 0, 0)
R2= parallelGaitCalc(15, 0, 0, 0, 0, 0)
R3= parallelGaitCalc(0, 0, 0, 0, 0, 100)
R4= parallelGaitCalc(20, 0, 0, 0, 0, 0)
positionAll(A1)
time.sleep(t)
positionAll(A2)
time.sleep(t)
positionAll(A3)
time.sleep(t)
positionAll(A4)
time.sleep(t)
positionAll(A5)
time.sleep(t)
positionAll(A6)
time.sleep(t)
positionAll(A7)
time.sleep(t)
positionAll(A8)
time.sleep(t)
positionAll(U1)
time.sleep(2*t)
positionAll(U2)
time.sleep(2*t)
positionAll(L1)
time.sleep(2*t)
positionAll(L2)
time.sleep(2*t)
positionAll(L3)
time.sleep(2*t)
positionAll(L4)
time.sleep(2*t)
positionAll(L5)
time.sleep(2*t)
positionAll(L6)
time.sleep(2*t)
positionAll(U2)
time.sleep(2*t)
positionAll(U1)
time.sleep(2*t)
positionAll(U0)
time.sleep(2*t)
positionAll(R1)
time.sleep(4*t)
positionAll(R2)
time.sleep(4*t)
positionAll(R3)
time.sleep(4*t)
positionAll(R4)
time.sleep(4*t)
positionAll(U0)
torque(0)
pwm_list = [800]*18
pwmAll(pwm_list)
scaler_acc = [3] * 18
scaler_vel = [3] * 18
velocityAll(scaler_vel)
accelerationAll(scaler_acc)
#time.sleep(1)
torque(1)
standUpForStairs()
time.sleep(4)
Demo()
```
#### File: dns_main/src/kinematics.py
```python
from math import pi, cos, sin, atan2, acos, sqrt, pow, radians, asin
from math_calc import *
from service_router import readPos
class LegConsts(object):
''' Class object to store characteristics of each leg '''
def __init__(self, x_off, y_off, z_off, ang_off, leg_nr):
self.x_off = x_off # X offset from body origin to first servo (mm)
self.y_off = y_off # Y offset from body origin to first servo (mm)
self.z_off = z_off # Z offset from body origin to first servo (mm)
self.ang_off = ang_off # Angular offset from body origin to first servo (mm)
self.f_ang_off = radians(13.33) # Angular offset of Femur
self.t_ang_off = radians(-25.90) # Angular offset of Tibia
self.c_len = 66.50 # Link length of Coxa (mm)
self.f_len = 144.40 # Link length of Femur (mm)
self.t_len = 287 # Link length of Tibia (mm)
self.leg_nr = leg_nr # Leg Number
class Kinematics(object):
''' Class object to compute various types of kinematics data for AntBot '''
# Origin to coxa: x_off, y_off, z_off, ang_off, name
leg1 = LegConsts(70.5, 122.225, -14.9, - pi / 3, "Leg 1")
leg2 = LegConsts(-70.5, 122.225, -14.9, -2 * pi / 3, "Leg 2")
leg3 = LegConsts(141.33, 0, -14.9, 0, "Leg 3")
leg4 = LegConsts(-141.33, 0, -14.9, pi, "Leg 4")
leg5 = LegConsts(70.5, -122.225, -14.9, pi / 3, "Leg 5")
leg6 = LegConsts(-70.5, -122.225, -14.9, 2 * pi / 3, "Leg 6")
leg_list = [leg1, leg2, leg3, leg4, leg5, leg6]
################
# Public methods
################
def doFkine(self, all_positions):
''' Function: computes forward kinematics
Parameter: all_positions: list with 18 values of servo positions in steps from ID1 to ID18
Return: ee_xyz: list of x,y,z coordinates for all 6 legs
servoPos: servo positions in radians
'''
servoPos = self.step_to_rad(all_positions)
ee_xyz = []
j = 0
for i in xrange(0, 16, 3):
ee_xyz.extend(self.calc_fkine(servoPos[i:i + 3], self.leg_list[j]))
j += 1
return ee_xyz, servoPos
def doIkine(self, all_positions, x, y, z, body_orient=None, leg=None, auto=None):
''' Function: computes inverse kinematics
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
x,y,z: desired change in x,y,z coordinates (same for all legs)
body_orient: list of 3 integers meaning alpha,beta,gamma rotation in degrees
leg: list with integers meaning leg numbers to compute inverse for them only
Return: list of 18 integers with servo steps
'''
ee_xyz, servoPos = self.doFkine(all_positions)
thetas = []
j = 0
if isinstance(leg, int):
leg = [leg]
elif isinstance(leg, tuple):
leg = list(leg)
elif isinstance(body_orient, tuple):
body_orient = list(body_orient)
if body_orient:
# Optional parameter. Compute inverse with body orientation
body_orient = [radians(d) for d in body_orient]
alpha_rad, beta_rad, gama_rad = body_orient[0], body_orient[1], body_orient[2]
x = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* cos(alpha_rad) - sin(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
y = (cos(gama_rad) * sin(beta_rad) * z + sin(gama_rad) * sin(beta_rad) * y + x * cos(beta_rad)) \
* sin(alpha_rad) + cos(alpha_rad) * (cos(gama_rad) * y - sin(gama_rad) * z)
z = -sin(beta_rad) * x + cos(beta_rad) * sin(gama_rad) * y + cos(beta_rad) * cos(gama_rad) * z
if leg:
# Optional parameter. Compute inverse for a specific leg/s.
for i in range(len(leg)):
j = leg[i] - 1
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[j * 3:j * 3 + 3], self.leg_list[j]))
else:
# Compute inverse for all legs if not leg specified.
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(x, y, z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def doIkineRotationEuler(self, all_positions, alpha_rad, beta_rad, gama_rad, dist_x, dist_y, dist_z):
''' Function: computes inverse kinematics and body rotation (Parallel kinematics)
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
alpha,beta,gama: # for leg in range(6): # 6 legs
# if leg in leg_list:
# new_pos.extend(K.calc_ikine(x, y, z, ee_xyz[leg:leg + 3], K.leg_list[leg]))
# else:
# new_pos.append(current_pos[3 * leg])
# new_pos.append(current_pos[3 * leg + 1])
# new_pos.append(current_pos[3 * leg + 2])ers with servo steps
'''
final_eexyz, ee_xyz = self.calc_rot_matrix(all_positions, alpha_rad, beta_rad, gama_rad)
thetas = []
j = 0
for i in xrange(0, 16, 3):
thetas.extend(self.calc_ikine(final_eexyz[i] - dist_x, final_eexyz[i + 1] - dist_y, final_eexyz[i + 2] - dist_z, ee_xyz[i:i + 3], self.leg_list[j]))
j += 1
result = [int(each_theta) for each_theta in self.rad_to_step(thetas)]
return result
def printForward(self, all_positions):
''' Function: Prints x,y,z coordinates of each leg
Parameters: all_positions: list with 18 values of servo positions in steps from ID1 to ID18;
'''
ee_list, theta_list = self.doFkine(all_positions)
RoundedCoords = ['%.4f' % elem for elem in ee_list]
print ""
print "X,Y,Z coordinates of Leg end-points: "
print " " + str(["X ", " Y ", " Z "])
print "Leg 1: " + str(RoundedCoords[0:3])
print "Leg 2: " + str(RoundedCoords[3:6])
print "Leg 3: " + str(RoundedCoords[6:9])
print "Leg 4: " + str(RoundedCoords[9:12])
print "Leg 5: " + str(RoundedCoords[12:15])
print "Leg 6: " + str(RoundedCoords[15:18])
print ""
def printInverse(self, all_positions, x, y, z):
''' Function: Prints servo positions, in radians, needed to reach the position
Parameters: theta_list: 18 servo positions in radians.
'''
theta_list = self.doIkine(all_positions, x, y, z)
RoundedThetas = ['%.4f' % elem for elem in theta_list]
print ""
print "Theta angles of each servo:"
print " " + str(["Coxa ", "Femur ", "Tibia"])
print "Leg 1: " + str(RoundedThetas[0:3])
print "Leg 2: " + str(RoundedThetas[3:6])
print "Leg 3: " + str(RoundedThetas[6:9])
print "Leg 4: " + str(RoundedThetas[9:12])
print "Leg 5: " + str(RoundedThetas[12:15])
print "Leg 6: " + str(RoundedThetas[15:18])
print ""
def printKinematics(self, all_positions, x, y, z):
self.printForward(all_positions)
self.printInverse(all_positions, x, y, z)
#################
# Private methods
#################
def calc_fkine(self, servoPos, leg):
theta1 = servoPos[0] - leg.ang_off
theta2 = servoPos[1] + leg.f_ang_off
theta3 = servoPos[2] + leg.t_ang_off
ee_z = leg.f_len * sin(theta2) + leg.t_len * sin(theta3 + theta2) + leg.z_off
ee_x = leg.x_off + cos(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
ee_y = leg.y_off + sin(theta1) * (leg.c_len + leg.f_len * cos(theta2) + leg.t_len * cos(theta3 + theta2))
return [ee_x, ee_y, ee_z]
def calc_ikine(self, x, y, z, ee_xyz, leg, auto=None):
init_X = ee_xyz[0]
init_Y = ee_xyz[1]
init_Z = ee_xyz[2]
X = init_X + (x) - leg.x_off
Y = init_Y + (y) - leg.y_off
Z = init_Z + (z) - leg.z_off
theta1 = atan2(Y, X) + leg.ang_off
if theta1 < -pi:
theta1 += 2 * pi
if theta1 > pi:
theta1 -= 2 * pi
new_x = cos(leg.ang_off) * X - sin(leg.ang_off) * Y
new_y = sin(leg.ang_off) * X + cos(leg.ang_off) * Y
final_x = cos(theta1) * new_x + sin(theta1) * new_y - leg.c_len
s = sqrt(pow(final_x, 2) + pow(Z, 2))
try:
t3_term = (-pow(s, 2) + pow(leg.f_len, 2) + pow(leg.t_len, 2)) / (2 * leg.f_len * leg.t_len)
t3 = pi - acos(t3_term)
except ValueError:
print "Cannot compute acos(", t3_term, ") for ", leg.leg_nr
if auto is None:
if t3_term < 0:
t3 = pi - acos(-0.99)
else:
t3 = pi - acos(0.99)
else:
return -1
theta3 = -t3 - leg.t_ang_off
theta2 = -(-atan2(Z, final_x) - atan2(leg.t_len * sin(t3), leg.f_len + leg.t_len * cos(t3)) + leg.f_ang_off)
if auto is not None:
if (theta2 > 1.8 or theta2 < -1.8) or (theta3 < -2.2 or theta3 > 2.2):
return -1
return [theta1, theta2, theta3]
def calc_rot_displacement(self, alpha_rad, beta_rad, gama_rad, ee_xyz):
pre_x = ee_xyz[0]
pre_y = ee_xyz[1]
pre_z = ee_xyz[2]
r_term1 = (cos(gama_rad) * sin(beta_rad) * pre_z + sin(gama_rad) * sin(beta_rad) * pre_y + pre_x * cos(beta_rad))
r_term2 = (cos(gama_rad) * pre_y - sin(gama_rad) * pre_z)
r_x = r_term1 * cos(alpha_rad) - r_term2 * sin(alpha_rad) - pre_x
r_y = r_term1 * sin(alpha_rad) + r_term2 * cos(alpha_rad) - pre_y
r_z = - sin(beta_rad) * pre_x + cos(beta_rad) * sin(gama_rad) * pre_y + cos(beta_rad) * cos(gama_rad) * pre_z - pre_z
return [r_x, r_y, r_z]
def calc_rot_matrix(self, all_positions, alpha_rad, beta_rad, gama_rad):
ee_xyz, servoPos = self.doFkine(all_positions)
rot_val_list = []
for i in xrange(0, 16, 3):
rot_val_list.extend(self.calc_rot_displacement(alpha_rad, beta_rad, gama_rad, ee_xyz[i:i + 3]))
return rot_val_list, ee_xyz
def rad_to_step(self, pos_rads):
return [i / pi * 2048 + 2048 for i in pos_rads]
def step_to_rad(self, pos_steps):
return [(((x / 2047.5) - 1) * pi) for x in pos_steps]
def make_poligonCorners(self, all_positions, leg_list):
if leg_list is int:
leg_list = [leg_list]
xyz_polygon = []
ee_xyz, servoPos = self.doFkine(all_positions)
newEe_xyz = [ee_xyz[0], ee_xyz[1], ee_xyz[2], ee_xyz[3], ee_xyz[4], ee_xyz[5],
ee_xyz[9], ee_xyz[10], ee_xyz[11], ee_xyz[15], ee_xyz[16], ee_xyz[17],
ee_xyz[12], ee_xyz[13], ee_xyz[14], ee_xyz[6], ee_xyz[7], ee_xyz[8]]
for i in range(len(leg_list)):
j = leg_list[i] - 1
xyz_polygon.extend((newEe_xyz[j * 3:j * 3 + 3]))
return xyz_polygon
def make_polygonLines(self, leg_list, ee_xyz):
print("leglistLins", leg_list)
line = []
for i in range(len(ee_xyz / 3)):
j = i - 1
line.extend = [ee_xyz[3 * j + 3] - ee_xyz[3 * j],
ee_xyz[3 * j + 4] - ee_xyz[3 * j + 1],
ee_xyz[3 * j + 5] - ee_xyz[3 * j + 2]]
return line
def check_stabilty(self, t_poly=None):
ee_xyz, servoPos = self.doFkine(readPos())
tac = [False, True, False, True, True, False]
leg_list = []
for i in range(len(tac)):
if tac[i] is True:
leg_list.extend([i + 1])
poly_lines, poly_points = self.make_polygonLines(leg_list, ee_xyz)
print("lines", poly_lines)
if tac[1] is True and tac[2] is True and tac[5]is True:
# gamma, beta = 10,20 #self.get_orientation(tac)
# n = [0,-sin(beta),cos(beta)]
print("im not here")
P1 = [ee_xyz[3], ee_xyz[4], 1]
P2 = [ee_xyz[6], ee_xyz[7], 1]
P3 = [ee_xyz[15], ee_xyz[16], 1]
print(P1, P2, P3)
elif tac[0] is True and tac[3] is True and tac[4] is True:
print("im here")
P1 = [ee_xyz[0], ee_xyz[1], 1]
P3 = [ee_xyz[9], ee_xyz[10], 1]
P2 = [ee_xyz[12], ee_xyz[13], 1]
print(P1, P2, P3)
k = 1 # dotProduct(n,P1)
x = 0
y = 1
z = 2
lambda_1 = ((P2[x] * P3[y] - P2[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_2 = -((P1[x] * P3[y] - P1[y] * P3[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
lambda_3 = ((P1[x] * P2[y] - P1[y] * P2[x]) * k) / (P1[x] * P2[y] * P3[z] - P1[x] * P2[z] * P3[y] - P1[y] * P2[x] * P3[z] + P1[y] * P2[z] * P3[x] + P1[z] * P2[x] * P3[y] - P1[z] * P2[y] * P3[x])
if lambda_1 > 0.1 and lambda_2 > 0.1 and lambda_3 > 0.1 and lambda_3 > 0.1:
if lambda_1 < 0.9 and lambda_2 < 0.9 and lambda_3 < 0.9:
if lambda_1 + lambda_2 + lambda_3 == 1:
inside = True
side1 = subtract(P1, P2)
side2 = subtract(P3, P2)
side3 = subtract(P1, P3)
G = [0, 0, 1]
P2_G = subtract(G, P2)
P3_G = subtract(G, P3)
margin_s1 = sqrt(pow(dotProduct(P2_G, unit_vec(side1)), 2) + dotProduct(P2_G, P2_G))
margin_s2 = sqrt(pow(dotProduct(P2_G, unit_vec(side2)), 2) + dotProduct(P2_G, P2_G))
margin_s3 = sqrt(pow(dotProduct(P3_G, unit_vec(side3)), 2) + dotProduct(P3_G, P3_G))
stability_margin = min(margin_s1, margin_s2, margin_s3)
print(stability_margin, inside)
return stability_margin, inside
def get_orientation(self, leg_list):
ee_xyz, servoPos = self.doFkine(readPos())
p1 = ee_xyz[3 * (leg_list[0] - 1):3 * (leg_list[0] - 1) + 3]
p2 = ee_xyz[3 * (leg_list[1] - 1):3 * (leg_list[1] - 1) + 3]
p3 = ee_xyz[3 * (leg_list[2] - 1):3 * (leg_list[2] - 1) + 3]
p21 = subtract(p2, p1)
p23 = subtract(p2, p3)
normz = crossProduct(p21, p23)
beta = atan2(normz[0], normz[2]) * 180 / pi
gamma = -atan2(normz[1], normz[2]) * 180 / pi
return gamma, beta
def calc_translationStairs(self, riser, climbed_stairs_front, climbed_stairs_rear):
# gamma, beta = self.get_orientation([1,5,6])
ee_xyz, servopos = self.doFkine(readPos())
dist_y = abs(ee_xyz[1] - ee_xyz[13])
riser_diff = (climbed_stairs_front - climbed_stairs_rear) * riser
omega = asin(riser_diff / dist_y) * 180 / pi
AB = -ee_xyz[14] + 30
AC = AB / cos(omega * pi / 180)
BC = AC * sin(omega * pi / 180)
BE = sqrt(pow(ee_xyz[12], 2) + pow(ee_xyz[11], 2)) - 141.33
CE = BE - BC
CD = BC * CE / AC
if AC + CD <= riser_diff:
trans_z_g = riser_diff - AC - CD + 10
translation_z = trans_z_g * cos(omega * pi / 180)
translation_y = trans_z_g * sin(omega * pi / 180)
else:
translation_z = 0
translation_y = 0
return [translation_z, translation_y]
```
#### File: dns_main/src/stair_climbing_algorithm.py
```python
import time
from math import radians
from service_router import *
from locomotion import *
from math import asin, pi, atan2
from kinematics import Kinematics
from math_calc import vector_length
K = Kinematics()
def terminate():
ee_xyz, servopos = K.doFkine(readPos())
if abs(ee_xyz[2]-ee_xyz[5]) < 50:
if abs(ee_xyz[8]-ee_xyz[11]) < 50:
if abs(ee_xyz[8]-ee_xyz[2]) < riser/2:
print("yeay im on top of stairs")
return True
else:
return False
def standUpForStairs():
standup_pos = [2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296]
front_standup = list_combine(leg[1] + leg[2], standup_pos)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
middle_standup = list_combine(leg[3] + leg[4], standup_pos)
positionN(front_standup)
time.sleep(1)
positionN(rear_standup)
time.sleep(1)
positionN(middle_standup)
time.sleep(1)
def correctMiddleLegs(z):
Up = [0, 0, z]
LiftUp = calc_motion(Up)
pos = list()
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[18:24])
positionN(pos)
leg_case = [3,4]
check_position_error_legs(80, 20, pos, leg_case)
ServoCentering=[7,2048,10,2048]
positionN(ServoCentering)
time.sleep(1)
Down = [0, 0, -z]
LiftDown = calc_motion(Down)
pos1 = list()
pos1.extend(LiftDown[12:18])
pos1.extend(LiftDown[18:24])
positionN(pos1)
leg_case = [3,4]
check_position_error_legs(80, 20, pos1, leg_case)
def initialDistance(distance):
all_pos = readPos()
ee_xyz, servopos = K.doFkine(all_pos)
dist2FirstStep_1 = distance
dist2FirstStep_2 = distance
dist2FirstStep_3 = distance + ee_xyz[1] - ee_xyz[7]
dist2FirstStep_4 = distance + ee_xyz[1] - ee_xyz[10]
dist2FirstStep_5 = distance + ee_xyz[1] - ee_xyz[13]
dist2FirstStep_6 = distance + ee_xyz[1] - ee_xyz[16]
dist2FirstStep = dist2FirstStep_1, dist2FirstStep_2, dist2FirstStep_3, dist2FirstStep_4, dist2FirstStep_5, dist2FirstStep_6
print dist2FirstStep
return dist2FirstStep
def initConfig_legs(depth):
maxy = 344.74638441867046
r = 392.55798277243395 - 141.33 #maximumy - y_offset of leg one
miny = 181.0804846109524
phai = asin((depth-miny)/r) * 2048/pi # change of coxa in steps
#print(int(phai))
if depth < maxy:
standup_pos = [ 1536 + int(phai), 2048, 1296, 2560 - int(phai), 2048, 1296,
2048 , 2048, 1296, 2048 , 2048, 1296,
2560 - int(phai), 2048, 1296, 1536 + int(phai), 2048, 1296]
# lift_up = [2048, 2448,1296,2048,2448,1296,
# 2048, 2448,1296,2048,2448,1296,
# 2048, 2448,1296,2048,2448,1296]
#print(standup_pos)
# front_liftup = list_combine(leg[2] + leg[5],lift_up)
# positionN(front_liftup)
# time.sleep(2)
front_standup = list_combine(leg[1] + leg[2], standup_pos)
positionN(front_standup)
time.sleep(1)
# rear_liftup = list_combine(leg[1] + leg[6],lift_up)
# positionN(rear_liftup)
# time.sleep(1)
rear_standup = list_combine(leg[3] + leg[4], standup_pos)
positionN(rear_standup)
time.sleep(1)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
positionN(rear_standup)
time.sleep(1)
ee_xyz, servopos = K.doFkine(readPos())
return maxy - ee_xyz[1]
def correctRotation(depth, riser):
slope = atan2(riser,depth)*180/pi
gamma, beta = K.get_orientation([1,5,6])
new_gamma = slope - gamma
parallelGait(0,0,int(new_gamma-3),0,0,0) #-3 should be 0 but seems to help to keep the robot parallel
time.sleep(3)
print("Slope is:", new_gamma)
def moveForward(x, y, z, alpha, beta, gamma, distance):
Forward = [x, y, z]
Up = [0, 0, z]
Down = [x, y, 0]
Push = [0, 0, 0]
HalfForward = [0.5*x, 0.5*y, z]
HalfUp = [ 0, 0, z]
HalfDown = [0.5*x, 0.5*y, 0]
PushBackwards = calc_motion(Push)
LiftUp = calc_motion(Up)
LiftDown = calc_motion(Down)
PutForward = calc_motion(Forward)
HalfLiftUp = calc_motion(HalfUp)
HalfLiftDown = calc_motion(HalfDown)
HalfPutForward = calc_motion(HalfForward)
while distance > 0.75 * stepSize:
if distance > 1.5 * stepSize:
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(40, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - stepSize
else:
pos = list()
pos.extend(HalfLiftUp[6:12])
pos.extend(HalfLiftUp[12:18])
pos.extend(HalfLiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(HalfPutForward[6:12])
pos1.extend(HalfPutForward[12:18])
pos1.extend(HalfPutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(HalfLiftDown[6:12])
pos2.extend(HalfLiftDown[12:18])
pos2.extend(HalfLiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(HalfLiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(HalfLiftUp[18:24])
pos3.extend(HalfLiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(80, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - (0.5 *stepSize)
time.sleep(0.5)
return distance
def walkUp(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_First_Leg_group
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpUp[i*6 : i*6+6])
else:
pos.extend(LiftUp[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Put_Forward_First_Leg_group
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpForward[i*6 : i*6+6])
else:
pos.extend(PutForward[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Step_Down_First_leg_group
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
else:
pos.extend(LiftDown[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
check_contact()
#########################################################################################################
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
UpPushBackwards = [0, -stepSize, 0]
StepUpPushBackwards = calc_motion(UpPushBackwards)
##Lift_Up_Second_Leg_group
for i in range(len(distanceToStair)):
if i == 1 or i == 2 or i == 5:
if distanceToStair[i] < stepSize:
pos.extend(StepUpUp[i*6 : i*6+6])
else:
pos.extend(LiftUp[i*6 : i*6+6])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Put_Forward_Second_Leg_group
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpPushBackwards[i*6 : i*6+6])
else:
pos.extend(StepUpPushBackwards[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Step_Down_Second_leg_group
for i in range(len(distanceToStair)):
if i == 1 or i == 2 or i == 5:
if distanceToStair[i] < stepSize:
pos.extend(StepDownDownSecond[i*6 : i*6+6])
else:
pos.extend(PushBackwards[i*6 : i*6+6])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
def updateDistance(distanceToStair, stepSize):
distanceToStair = [i - stepSize for i in distanceToStair]
for i in range(len(distanceToStair)):
if distanceToStair[i] < 0:
distanceToStair[i] = distanceToStair[i] + thread
print distanceToStair
return distanceToStair
def rotateAndTranslate(riser,climbed_stairs_front, climbed_stairs_rear):
gamma, beta = K.get_orientation([1, 5, 6])
parallelGait(0, -beta, -gamma, 0, 0, 0)
time.sleep(2)
a = K.calc_translationStairs(riser,climbed_stairs_front, climbed_stairs_rear)
parallelGait(0, 0, 0, 0, a[1], a[0])
time.sleep(2)
return beta , gamma
def moveForwardOnStair(x, y, z, alpha, beta, gamma, distance):
initialDistance = distance
orientation = [alpha, beta, gamma]
Forward = [x, y, z]
Up = [0, 0, z]
Down = [x, y, 0]
Push = [0, 0, 0]
HalfForward = [0.5*x, 0.5*y, z]
HalfUp = [0, 0, z]
HalfDown = [0.5*x, 0.5*y, 0]
PushBackwards = calc_motion(Push, orientation)
LiftUp = calc_motion(Up, orientation)
LiftDown = calc_motion(Down, orientation)
PutForward = calc_motion(Forward, orientation)
HalfLiftUp = calc_motion(HalfUp, orientation)
HalfLiftDown = calc_motion(HalfDown, orientation)
HalfPutForward = calc_motion(HalfForward, orientation)
print (distance)
while distance > 0.75 * stepSize:
if distance > 1.5 * stepSize:
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(20, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - stepSize
else:
pos = list()
pos.extend(HalfLiftUp[6:12])
pos.extend(HalfLiftUp[12:18])
pos.extend(HalfLiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(HalfPutForward[6:12])
pos1.extend(HalfPutForward[12:18])
pos1.extend(HalfPutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(HalfLiftDown[6:12])
pos2.extend(HalfLiftDown[12:18])
pos2.extend(HalfLiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(HalfLiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(HalfLiftUp[18:24])
pos3.extend(HalfLiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(20, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - (0.5 *stepSize)
time.sleep(0.5)
return distance
def walkUpAllLegs(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_Fifth_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Fifth_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Fifth_leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Sixth_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Sixth_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Sixth_leg
for i in range(len(distanceToStair)):
if i ==5:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
###########################################################################
##Lift_Up_Third_Leg
for i in range(len(distanceToStair)):
if i == 2:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [3]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Third_Leg
for i in range(len(distanceToStair)):
if i == 2:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [3]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Third_leg
for i in range(len(distanceToStair)):
if i == 2:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [3]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Forth_Leg
for i in range(len(distanceToStair)):
if i == 3:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [4]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Forth_Leg
for i in range(len(distanceToStair)):
if i == 3:
pos.extend(StepUpForward[i*6 : i*6+6])
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
positionN(pos)
leg_case = [4]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Forth_leg
for i in range(len(distanceToStair)):
if i ==3:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [4]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
###########################################################################
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 0:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [1]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 0:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [1]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 0:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [1]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 1:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [2]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 1:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [2]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i ==1:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [2]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
timeToTerminate = terminate()
if timeToTerminate == True:
timeToContinue = True
return timeToContinue
def moveUpOnlyLastLegs(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_Fifth_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Fifth_Leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Fifth_leg
for i in range(len(distanceToStair)):
if i == 4:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [5]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
#########################################################################################################
##Lift_Up_Sixth_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpUp[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Put_Forward_Sixth_Leg
for i in range(len(distanceToStair)):
if i == 5:
pos.extend(StepUpForward[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
##Step_Down_Sixth_leg
for i in range(len(distanceToStair)):
if i ==5:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
positionN(pos)
leg_case = [6]
check_position_error_legs(120, 20, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
def translateAboveRiser():
ee_xyz, servopos = K.doFkine(readPos())
legZPlancement = abs(ee_xyz[8]+ee_xyz[11])/2
print("first leg placement in z is" , legZPlancement)
if legZPlancement-riser < 70:
translationZ = riser + 70 - legZPlancement
parallelGait(0,0,0,0,0,translationZ)
time.sleep(3)
#####################STAIR CLIMBING ALGORITHM STARTS HERE#########################
torque(0)
pwm_list = [800]*18 #Setting PWM to "high" max is 885
pwmAll(pwm_list)
scaler_acc = [20] * 18 #Setting Acceleration to "low"
scaler_vel = [20] * 18 #Setting Velocity to "low"
velocityAll(scaler_vel)
accelerationAll(scaler_acc)
torque(1)
threshold = 50 # Set z threshold (step up)
stepSize = 50 # Set Step Size
riser = 163 # Enter your riser height or obtain by camera
thread = 284 # Enter your thread depth or obtain by camera
initConfig_legs(thread) # Rotates the coxa servos to cause the displacement of a thread depth between the leg pairs
time.sleep(3)
## Move forward to the first step on the stair. 700 = mm. Assuming the robot is placed at this distance
#distanceToStairInitial = initialDistance(moveForward(0, stepSize, threshold, 0, 0, 0, 550))
##For testing put the first legs next to the riser with the distance of approx 25mm
distance = 25
distanceToStairInitial = initialDistance(distance) # Gets the y value of each legs with respect to the first leg
parallelGait(0,0,0,0,0,riser/2) # Translate the body by half of the riser
time.sleep(2)
walkUp(distanceToStairInitial,0, stepSize*2, threshold, riser, 0,0,0) # Walks up with the leg within the step size (leg 1,2)of a riser by tripod
moveForwardOnStair(0, stepSize, threshold, 0, 0, 0, thread) # Moves forward on the stairs by the riser depth
parallelGait(0,0,0,0,0,riser/2)
time.sleep(2)
# Update distances by substracting the riser to 2nd and 3rd legpair
distanceToStairs = distanceToStairInitial[0], distanceToStairInitial[1], distanceToStairInitial[2]-thread, distanceToStairInitial[3]-thread, distanceToStairInitial[4]-thread,distanceToStairInitial[5]-thread
walkUp(distanceToStair,0, stepSize*2, threshold, riser, 0,0,0) # Walks up with the leg within the step size (leg 1,2,3,4 )of a riser by tripod
translateAboveRiser() # Body is translated above the riser, unless the body is already above
stairs = True
# Update distances by substracting the riser to the 3rd legpair
distanceToStairs = distanceToStairs[0], distanceToStairs[1], distanceToStairs[2], distanceToStairs[3], distanceToStairs[4]-thread, distanceToStairs[5]-thread
while stairs is True:
stepSize = 50
correctRotation(thread,riser) # Correcting the rotation by the negative slope of the stairs to keep the body horizontal
time.sleep(2)
correctMiddleLegs(20) # Corrects possible errors that tend to affect the coxa joint
moveForwardOnStair(0, stepSize, threshold, 0, 0, 0, riser+stepSize)
stability_margin, stability_bool = check_stabilty() # Checking for stability
if stability_margin > stepSize:
checkForTermination = walkUpAllLegs(distanceToStair,0, stepSize*2, threshold, riser, 0,0,0) #Walking up with all legs by wave gait and check if the robot is ontop of the stairs
if checkForTermination == True:
break #If the robot is ontop then go out of while loop
correctRotation(thread,riser)
time.sleep(1)
parallelGait(0,0,0,0,50,0) #translate the body forward
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
translateAboveRiser()
else:
stepSize = stability_margin
checkForTermination = walkUpAllLegs(distanceToStair,0, stepSize*2, threshold, riser, 0,0,0
if checkForTermination == True:
break
correctRotation(thread,riser)
time.sleep(1)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
translateAboveRiser()
# This part is executed if the algorithm is terminated by checking if the front legs dosnt change level when it tries to get to next level
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
parallelGait(0,0,0,0,50,0)
time.sleep(2)
moveForwardOnStair(0, stepSize, threshold, 0, 0, 0, riser)
moveUpOnlyLastLegs(distanceToStair,0,stepSize,threshold,riser,0,0,0) # Only lifts the rear legs by wave gait above the last riser
moveForward(0, stepSize, threshold, 0, 0, 0, 550) # Move forward
```
|
{
"source": "jevgienij/pypokertools",
"score": 2
}
|
#### File: pypokertools/tests/test_properties.py
```python
from pypokertools.pokertools import flop, hand, holecards
from pypokertools.properties.flop import (
has_2flush,
has_3straight,
has_gutshot,
has_pair,
has_threeofakind,
is_monotone,
is_rainbow,
)
from pypokertools.properties.hand import (
is_flush,
is_fourofakind,
is_fullhouse,
is_nopair,
is_onepair,
is_pair_or_better,
is_straight,
is_straightflush,
is_threeofakind,
is_twopair,
is_twopair_or_better,
)
from pypokertools.properties.holecards import (
has_one_gap,
has_two_gap,
is_connected,
is_pair,
is_suited,
)
#------------------------------------------------------------------------------
# Hand properties
def test_is_straightflush():
assert is_straightflush(hand('4h 5h 6h 7h 8h'))
assert is_straightflush(hand('As Ks Qs Js Ts'))
assert not is_straightflush(hand('4c 5h 6h 7h 8h'))
assert not is_straightflush(hand('Ad Ks Qs Js Ts'))
def test_is_fourofakind():
assert is_fourofakind(hand('4h 4d 4s 4c Ac'))
assert is_fourofakind(hand('Ac As Ah Ad 2d'))
assert not is_fourofakind(hand('4c 5h 6h 7h 8h'))
assert not is_fourofakind(hand('Ad Ks Qs Js Ts'))
def test_is_fullhouse():
assert is_fullhouse(hand('4h 4d 4s 5h 5d'))
assert is_fullhouse(hand('Ac As Ah 2c 2s'))
assert not is_fullhouse(hand('4h 4d 4s 7h 8h'))
assert not is_fullhouse(hand('Ad As Ah 2c Kd'))
def test_is_flush():
assert is_flush(hand('4d 5d 8d Jd Qd'))
assert is_flush(hand('Ac 8c 7c 5c 2c'))
assert not is_flush(hand('4h 4s 5s 7s 8s'))
assert not is_flush(hand('Ad 7d 6d 3d 2h'))
def test_is_straight():
assert is_straight(hand('4h 5d 6s 7h 8d'))
assert is_straight(hand('Ac 2s 3h 4c 5s'))
assert is_straight(hand('Tc Js Qh Kc As'))
assert not is_straight(hand('4h 5d 6s 7h 9h'))
assert not is_straight(hand('Ad 2s 3h 4c 6d'))
def test_is_threeofakind():
assert is_threeofakind(hand('4h 4d 4s 5h 6d'))
assert is_threeofakind(hand('Ac As Ah 2c Ks'))
assert not is_threeofakind(hand('4h 4d 7s 7h 8h'))
assert not is_threeofakind(hand('Ad 2s 3s 4s 5s'))
def test_is_twopair():
assert is_twopair(hand('4h 4d 5s 5h 6d'))
assert is_twopair(hand('Ac As 2h 2c Ks'))
assert not is_twopair(hand('4h 4d 5s 7h 8h'))
assert not is_twopair(hand('Ad As 2h Qc Kd'))
def test_is_onepair():
assert is_onepair(hand('4h 4d 5s 6h 7d'))
assert is_onepair(hand('Ac As 2h Qc Ks'))
assert not is_onepair(hand('4h 4d 4s 7h 8h'))
assert not is_onepair(hand('Ad 2s 3h 4c 6d'))
def test_is_nopair():
assert is_nopair(hand('4h 7d 9s Jh Ad'))
assert is_nopair(hand('Ac 2s 3h 4c 6s'))
assert not is_nopair(hand('4h 4d 4s 7h 8h'))
assert not is_nopair(hand('As Ks Qs Js Ts'))
def test_is_pair_or_better():
assert is_pair_or_better(hand('Ad Ac 7s 6s 5s'))
assert is_pair_or_better(hand('Ad Ac Kd Kc 2h'))
assert is_pair_or_better(hand('Ad Ac As 6c 5c'))
assert is_pair_or_better(hand('Ad Kh Qs Jc Td'))
assert is_pair_or_better(hand('Ad 8d 7d 6d 5d'))
assert is_pair_or_better(hand('Ad Ac As 5d 5c'))
assert is_pair_or_better(hand('Ad Ac As Ah 5c'))
assert is_pair_or_better(hand('Ad Kd Qd Jd Td'))
assert not is_pair_or_better(hand('Ad Kd Qd Jd 3c'))
assert not is_pair_or_better(hand('Ts 9s 7c 6s 5s'))
def test_is_twopair_or_better():
assert is_twopair_or_better(hand('Ad Ac Kd Kc 2h'))
assert is_twopair_or_better(hand('Ad Ac As 6c 5c'))
assert is_twopair_or_better(hand('Ad Kh Qs Jc Td'))
assert is_twopair_or_better(hand('Ad 8d 7d 6d 5d'))
assert is_twopair_or_better(hand('Ad Ac As 5d 5c'))
assert is_twopair_or_better(hand('Ad Ac As Ah 5c'))
assert is_twopair_or_better(hand('Ad Kd Qd Jd Td'))
assert not is_twopair_or_better(hand('Ad Ac 7s 6s 5s'))
assert not is_twopair_or_better(hand('Ad Kd Qd Jd 3c'))
assert not is_twopair_or_better(hand('Ts 9s 7c 6s 5s'))
#------------------------------------------------------------------------------
# Flop properties
def test_is_rainbow():
assert is_rainbow(flop('Ah Td 3c'))
assert is_rainbow(flop('Ah Ks 3c'))
assert is_rainbow(flop('Ah Td 3s'))
assert not is_rainbow(flop('Kd 5d 2d'))
assert not is_rainbow(flop('Kd 8d 6s'))
assert not is_rainbow(flop('Ks Qh 2s'))
def test_is_monotone():
assert is_monotone(flop('Ah Th 3h'))
assert is_monotone(flop('Ac Tc 3c'))
assert is_monotone(flop('Ks Qs 2s'))
assert not is_monotone(flop('Ah Td 3c'))
assert not is_monotone(flop('As Ts 3d'))
assert not is_monotone(flop('Ks Qd 2s'))
def test_has_pair():
assert has_pair(flop('Ah Ad 3c'))
assert has_pair(flop('Ah 3d 3c'))
assert has_pair(flop('Th Td 3c'))
assert not has_pair(flop('Ah Td 3c'))
assert not has_pair(flop('As Ts 3s'))
assert not has_pair(flop('Ks Qd 2s'))
def test_has_threeofakind():
assert has_threeofakind(flop('Ah Ad Ac'))
assert has_threeofakind(flop('Th Td Tc'))
assert has_threeofakind(flop('3h 3d 3c'))
assert not has_threeofakind(flop('Ah Td 3c'))
assert not has_threeofakind(flop('As Ts 3s'))
assert not has_threeofakind(flop('Ks Qd 2s'))
def test_has_3straight():
assert has_3straight(flop('Ah 2d 3c'))
assert has_3straight(flop('Jh Td 9c'))
assert has_3straight(flop('Ks Qd Js'))
assert not has_3straight(flop('Ah Td 3c'))
assert not has_3straight(flop('As Ts 3s'))
assert not has_3straight(flop('Ts 9s 7c'))
def test_has_gutshot():
assert has_gutshot(flop('Ts 9s 7c'))
assert has_gutshot(flop('Ah 2d 4c'))
assert has_gutshot(flop('Ah 3d 4c'))
assert not has_gutshot(flop('Ah Td 3c'))
assert not has_gutshot(flop('As Ts 3s'))
assert not has_gutshot(flop('Ts 9s 8c'))
def test_has_2flush():
assert has_2flush(flop('Ah Th 3c'))
assert has_2flush(flop('Ah Td 3d'))
assert has_2flush(flop('Ks Qd 2s'))
assert not has_2flush(flop('Ah Th 3h'))
assert not has_2flush(flop('Ah Td 3c'))
assert not has_2flush(flop('Ks Td Th'))
#------------------------------------------------------------------------------
# Holecards properties
def test_is_pair():
assert is_pair(holecards('Ad Ac'))
assert is_pair(holecards('Ts Td'))
assert is_pair(holecards('2c 2d'))
assert not is_pair(holecards('Ks Qs'))
assert not is_pair(holecards('Ac 2d'))
assert not is_pair(holecards('Td 9d'))
def test_is_suited():
assert is_suited(holecards('Ad Kd'))
assert is_suited(holecards('Ad 2d'))
assert is_suited(holecards('Ts 9s'))
assert not is_suited(holecards('Ad Kc'))
assert not is_suited(holecards('Ah 2s'))
assert not is_suited(holecards('Tc 9h'))
def test_is_connected():
assert is_connected(holecards('Ad 2c'))
assert is_connected(holecards('Ad Kc'))
assert is_connected(holecards('Jh Th'))
assert not is_connected(holecards('Ad Qc'))
assert not is_connected(holecards('4h 2h'))
assert not is_connected(holecards('Jc 9c'))
def test_has_one_gap():
assert has_one_gap(holecards('Ad Qd'))
assert has_one_gap(holecards('Ad 3c'))
assert has_one_gap(holecards('Ts 8h'))
assert not has_one_gap(holecards('Ad Kd'))
assert not has_one_gap(holecards('Ad 2c'))
assert not has_one_gap(holecards('Ts 9h'))
def test_has_two_gap():
assert has_two_gap(holecards('Ad Jd'))
assert has_two_gap(holecards('Ad 4c'))
assert has_two_gap(holecards('Ts 7h'))
assert not has_two_gap(holecards('Ad Qd'))
assert not has_two_gap(holecards('Ad 3c'))
assert not has_two_gap(holecards('Ts 8h'))
```
|
{
"source": "jevgienij/pytessy",
"score": 2
}
|
#### File: pytessy/src/pytessy.py
```python
import ctypes
import ctypes.util
from os import chdir, environ, getcwd
from os.path import abspath, dirname, isabs, isdir, isfile, join
from sys import platform
from distutils.spawn import find_executable
import numpy as np
class PyTessyError(Exception):
"""
PyTessyError class
------------------
Empty subclass of Exception to throw module-specific errors.
"""
pass
class TesseractHandler(object):
"""
TesseractHandler class
----------------------
Handles raw Tesseract-OCR calls with limited functionality only.
"""
_lib = None
_api = None
class TessBaseAPI(ctypes._Pointer):
"""
TessBaseAPI
-----------
Empty ctypes._Pointer subclass to serve as TessBaseAPI handler pointer.
"""
_type_ = type('_TessBaseAPI', (ctypes.Structure,), {})
def __init__(self, lib_path=None, data_path=None, language='eng'):
"""
Initializes Tesseract-OCR api handler object instance
-----------------------------------------------------
@Params: lib_path (string) [optional] Path to Tesseract-OCR library.
data_path (string) [optional] Path to Tesseract-OCR data files.
language (string) [optional] Language code to work with.
"""
if self._lib is None:
self.setup_lib(lib_path)
self._api = self._lib.TessBaseAPICreate()
if self._lib.TessBaseAPIInit3(self._api, data_path.encode('ascii'),
language.encode('ascii')):
raise PyTessyError('Failed to initialize Tesseract-OCR library.')
def get_text(self):
"""
Gets text as utf-8 decoded string
---------------------------------
@Return: (string) Text read by Tesseract-OCR as utf-8 string.
"""
self._check_setup()
result = self._lib.TessBaseAPIGetUTF8Text(self._api)
if result:
return result.decode('utf-8')
def get_text_raw(self):
"""
Gets text as raw bytes data
---------------------------
@Return: (bytes) Text read by Tesseract-OCR as raw bytes.
"""
self._check_setup()
return self._lib.TessBaseAPIGetUTF8Text(self._api)
def set_image(self, imagedata, width, height, bytes_per_pixel, bytes_per_line,
resolution):
"""
Sets image to read
------------------
@Params: imagedata (ctypes.int array) Raw imagedata to read.
width (int) Width of the image.
height (int) Height of the image.
bytes_per_pixel (int) Number of bytes that
represents a pixel.
bytes_per_line (int) Number of bytes in a line.
resolution (int) Resolution of the image
in dpi.
"""
self._check_setup()
self._lib.TessBaseAPISetImage(self._api,
imagedata, width, height,
bytes_per_pixel, bytes_per_line)
self._lib.TessBaseAPISetSourceResolution(self._api, resolution)
def set_psm(self, psm):
"""
Sets Page Segmentation Mode, as per TessPageSegMode enum:
------------------
@Params: psm (int) Page Segmentation Mode, as per TessPageSegMode enum:
typedef enum TessPageSegMode {
PSM_OSD_ONLY,
PSM_AUTO_OSD,
PSM_AUTO_ONLY,
PSM_AUTO,
PSM_SINGLE_COLUMN,
PSM_SINGLE_BLOCK_VERT_TEXT,
PSM_SINGLE_BLOCK,
PSM_SINGLE_LINE,
PSM_SINGLE_WORD,
PSM_CIRCLE_WORD,
PSM_SINGLE_CHAR,
PSM_SPARSE_TEXT,
PSM_SPARSE_TEXT_OSD,
PSM_RAW_LINE,
PSM_COUNT
} TessPageSegMode;
https://github.com/UB-Mannheim/tesseract/blob/master/include/tesseract/capi.h
"""
self._check_setup()
self._lib.TessBaseAPISetPageSegMode(self._api, psm)
def get_psm(self):
"""
Gets Page Segmentation Mode, as per TessPageSegMode enum:
------------------
@Return: psm (int) Page Segmentation Mode, as per TessPageSegMode enum:
typedef enum TessPageSegMode {
PSM_OSD_ONLY,
PSM_AUTO_OSD,
PSM_AUTO_ONLY,
PSM_AUTO,
PSM_SINGLE_COLUMN,
PSM_SINGLE_BLOCK_VERT_TEXT,
PSM_SINGLE_BLOCK,
PSM_SINGLE_LINE,
PSM_SINGLE_WORD,
PSM_CIRCLE_WORD,
PSM_SINGLE_CHAR,
PSM_SPARSE_TEXT,
PSM_SPARSE_TEXT_OSD,
PSM_RAW_LINE,
PSM_COUNT
} TessPageSegMode;
https://github.com/UB-Mannheim/tesseract/blob/master/include/tesseract/capi.h
"""
self._check_setup()
return self._lib.TessBaseAPIGetPageSegMode(self._api)
def set_variable(self, key, val):
"""
Set the value of an internal parameter.
Supply the name of the parameter and the value as a string, just as
you would in a config file.
Eg set_variable("tessedit_char_blacklist", "xyz"); to ignore x, y and z.
Or set_variable("classify_bln_numeric_mode", "1"); to set numeric-only mode.
SetVariable may be used before Init, but settings will revert to
defaults on End().
@Params: key (str) Variable name
val (str) Variable value
@Return: (bool) ``False`` if the name lookup failed.
"""
self._check_setup()
return self._lib.TessBaseAPISetVariable(self._api, key.encode('ascii'), val.encode('ascii'))
def mean_text_conf(self):
"""
@Return: (int) average confidence value between 0 and 100.
"""
self._check_setup()
return self._lib.TessBaseAPIMeanTextConf(self._api)
@classmethod
def setup_lib(cls, lib_path=None):
"""
Binds Tesseract-OCR library to the handler
------------------------------------------
@Params: (string) [optional] Path to Tesseract-OCR library.
@Raises: PyTessyError If ctypes cannot find Tesseract-OCR library.
"""
if cls._lib is not None:
return
lib_path = ctypes.util.find_library(lib_path)
if lib_path is None:
raise PyTessyError('Ctypes couldn\'t find Tesseract-OCR library')
cls._lib = lib = ctypes.CDLL(lib_path)
lib.TessBaseAPICreate.restype = cls.TessBaseAPI # handle
lib.TessBaseAPIDelete.restype = None # void
lib.TessBaseAPIDelete.argtypes = (cls.TessBaseAPI,) # handle
lib.TessBaseAPIInit3.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_char_p, # datapath
ctypes.c_char_p) # language
lib.TessBaseAPISetImage.restype = None # void
lib.TessBaseAPISetImage.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_void_p, # imagedata
ctypes.c_int, # width
ctypes.c_int, # height
ctypes.c_int, # bytes_per_pixel
ctypes.c_int) # bytes_per_line
lib.TessBaseAPIGetUTF8Text.restype = ctypes.c_char_p # text
lib.TessBaseAPIGetUTF8Text.argtypes = (cls.TessBaseAPI, ) # handle
lib.TessBaseAPISetSourceResolution.restype = None # void
lib.TessBaseAPISetSourceResolution.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_int) # ppi
lib.TessBaseAPISetPageSegMode.restype = None
lib.TessBaseAPISetPageSegMode.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_int) # mode
lib.TessBaseAPIGetPageSegMode.restype = ctypes.c_int # mode
lib.TessBaseAPIGetPageSegMode.argtypes = (cls.TessBaseAPI,) # handle
lib.TessBaseAPISetVariable.restype = ctypes.c_bool # bool
lib.TessBaseAPISetVariable.argtypes = (cls.TessBaseAPI, # handle
ctypes.c_char_p, # name
ctypes.c_char_p) # value
lib.TessBaseAPIMeanTextConf.restype = ctypes.c_int # int
lib.TessBaseAPIMeanTextConf.argtypes = (cls.TessBaseAPI,) # handle
def _check_setup(self):
"""
Checks whether Tesseract-OCR is set up or not
---------------------------------------------
@Raises: PyTessyError If library handler not yet configured.
PyTessyError If api handler not yet configured.
"""
if not self._lib:
raise PyTessyError('Tesseract handler library not configured.')
if not self._api:
raise PyTessyError('Tesseract handler api not created.')
def __del__(self):
"""
Disconnects TessBaseAPI when instance is deleted
------------------------------------------------
"""
if not self._lib or not self._api:
return
if not getattr(self, 'closed', False):
self._lib.TessBaseAPIDelete(self._api)
self.closed = True
class PyTessy(object):
"""
PyTessy
-------
Provides user-friendly and fast Tesseract-OCR interface.
"""
DEFAULT_HORIZONTAL_DPI = 96
TESSDATA_DIRNAME = 'tessdata'
TESSERACT_DIRNAME = 'Tesseract-OCR'
TESSERACT_DEFAULT_HORIZONTAL_DPI = 70
VERSION = '0.0.1'
def __init__(self, tesseract_path=None, api_version=None, lib_path=None,
data_path=None, language='eng', verbose_search=False):
"""
Initializes PyTessy instance
----------------------------
@Params: tesseract_path (string) [optional] Path (directory's name)
to Tesseract-OCR library.
api_version (string) [optional] Api version suffix string
(should be compatible with
Tesseract-OCR 3).
lib_path (string) [optional] Exact path to the
Tesseract-OCR library.
to data directory (usually "tessdata").
data_path (string) [optional] Path (directory's name)
to data directory (usually "tessdata").
language (string) [optional] Language code to use.
verbose_search (boolean) [optional] Whether to display
library searching process or not.
@Raises: NotImplementedError If the operating system is not
implemented yet (linux, macOS).
You can avoid this error by giving
exact path of Tesseract-OCR library.
NotImplementedError If the operating system will be
never implemented.
You can avoid this error by giving
exact path of Tesseract-OCR library.
FileNotFoundError If the given exact library path
doesn't point to existing file.
FileNotFoundError If failed to found library with
search process.
FileNotFoundError If cannot found "tessdata" directory.
"""
run_path = dirname(abspath(getcwd()))
no_lib = True
if lib_path is not None:
if isfile(lib_path):
no_lib = False
else:
raise FileNotFoundError('PyTessy: lib_path: "{}" doesn\'t exist.'
.format(lib_path))
if no_lib:
if verbose_search:
verbose = lambda *pa, **pk: print(*pa, **pk)
else:
verbose = lambda *pa, **pk: None
if platform.startswith('win'):
verbose('PyTessy v{} on {} searching for Tesseract-OCR library...'
.format(PyTessy.VERSION, platform))
if api_version is None:
lib_name = 'libtesseract-5'
else:
lib_name = 'libtesseract{}'.format(api_version)
verbose('--- Target library name: {}'.format(lib_name))
if tesseract_path is not None:
dirs = [tesseract_path, run_path, join(run_path, PyTessy.TESSERACT_DIRNAME)]
else:
dirs = [run_path, join(run_path, PyTessy.TESSERACT_DIRNAME)]
if 'PROGRAMFILES' in environ:
dirs.append(join(environ['PROGRAMFILES'], PyTessy.TESSERACT_DIRNAME))
if 'PROGRAMFILES(X86)' in environ:
dirs.append(join(environ['PROGRAMFILES(X86)'], PyTessy.TESSERACT_DIRNAME))
for dir_ in dirs:
test = join(dir_, '{}.dll'.format(lib_name))
if isfile(test):
lib_path = test
verbose(' {} SUCCESS.'.format(test))
break
else:
verbose(' {} FAILED.'.format(test))
if lib_path is None:
raise FileNotFoundError('Cannot locate Tesseract-OCR library.')
elif platform.startswith('linux'):
find_program = find_executable('tesseract')
if len(find_program) == 0:
raise FileNotFoundError('Cannot locate Tesseract-OCR library.')
else:
lib_path = 'tesseract'
data_path = "/usr/share/tessdata/"
elif platform.startswith('darwin'):
raise NotImplementedError('PyTessy: Library search on MacOS is not implemented yet.')
else:
raise NotImplementedError('PyTessy: Library search on this system is not implemented.')
tess_path = dirname(abspath(lib_path))
no_tessdata = True
if data_path is not None:
if isdir(data_path):
no_tessdata = False
if no_tessdata:
for test_path in [run_path, join(run_path, PyTessy.TESSERACT_DIRNAME), tess_path]:
test_path = join(test_path, PyTessy.TESSDATA_DIRNAME)
if isdir(test_path):
data_path = test_path
break
if data_path is None:
raise FileNotFoundError('PyTessy: Couldn\'t find "tessdata" directory.')
orig_path = getcwd()
chdir(tess_path)
self._tess = TesseractHandler(lib_path=lib_path, data_path=data_path,
language=language)
chdir(orig_path)
def justread(self, raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution=96, psm=None):
"""
Reads text as utf-8 string from raw image data without any check
----------------------------------------------------------------
@Params: raw_image_ctypes (ctypes int array) Raw image data.
width (int) Image width.
height (int) Image height.
bytes_per_pixel (int) Number of bytes per pixel.
bytes_per_line (int) Number of bytes per line.
resolution (int) [optional] Resolution in
dpi. Default: 96.
psm (int) [optional] Page Segmentation
Mode as per TessPageSegMode
enum (see set_psm method)
@Return: (string) Text read by Tesseract-OCR
as utf-8 string.
"""
if psm is None:
psm = self.get_psm()
self._tess.set_psm(psm)
self._tess.set_image(raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution)
return self._tess.get_text()
def justread_raw(self, raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution=96, psm=None):
"""
Reads text as raw bytes data from raw image data without any check
------------------------------------------------------------------
@Params: raw_image_ctypes (ctypes int array) Raw image data.
width (int) Image width.
height (int) Image height.
bytes_per_pixel (int) Number of bytes per pixel.
bytes_per_line (int) Number of bytes per line.
resolution (int) [optional] Resolution in
dpi. Default: 96.
psm (int) [optional] Page Segmentation
Mode as per TessPageSegMode
enum (see set_psm method)
@Return: (bytes) Text read by Tesseract-OCR
as raw bytes data.
"""
if psm is None:
psm = self.get_psm()
self._tess.set_psm(psm)
self._tess.set_image(raw_image_ctypes, width, height, bytes_per_pixel,
bytes_per_line, resolution)
return self._tess.get_text_raw()
def read(self, imagedata, width, height, bytes_per_pixel, resolution=96,
raw=False, psm=None):
"""
Reads text from image data
--------------------------
@Params: imagedata (ctypes int array) Raw image data.
width (int) Image width.
height (int) Image height.
bytes_per_pixel (int) Number of bytes per pixel.
resolution (int) [optional] Resolution in
dpi. Default: 96.
raw (boolean) [optional] Whether to read
in raw or utf-8 mode.
psm (int) [optional] Page Segmentation
Mode as per TessPageSegMode
enum (see set_psm method)
@Return: (bytes) or (string) Text read by Tesseract-OCR
"""
if psm is None:
psm = self.get_psm()
bytes_per_line = width * bytes_per_pixel
if raw:
return self.justread_raw(imagedata, width, height, bytes_per_pixel,
bytes_per_line, resolution, psm)
else:
return self.justread(imagedata, width, height, bytes_per_pixel,
bytes_per_line, resolution, psm)
def readnp(self, imagedata: np.ndarray, resolution=96, raw=False, psm=None):
"""
Reads text from image data contained in a numpy ndarray
--------------------------
@Params: imagedata (np.ndarray) Raw image data in a numpy ndarray.
resolution (int) [optional] Resolution in dpi.
Default: 96.
raw (boolean) [optional] Whether to read
in raw or utf-8 mode.
psm (int) [optional] Page Segmentation
Mode as per TessPageSegMode
enum (see set_psm method)
@Return: (bytes) or (string) Text read by Tesseract-OCR
"""
if len(imagedata.shape) == 2: # greyscale picture
height, width = imagedata.shape
bytes_per_pixel = 1
elif len(imagedata.shape) == 3: # 24 or 32 bits color picture
height, width, bytes_per_pixel = imagedata.shape
else:
raise PyTessyError('imagedata should be 3- or 2- dimensional numpy ndarray')
if psm is None:
psm = self.get_psm()
return self.read(imagedata.ctypes, width, height, bytes_per_pixel, resolution, raw, psm)
def set_psm(self, psm):
"""
Sets Page Segmentation Mode, as per TessPageSegMode enum:
------------------
@Params: psm (int) Page Segmentation Mode, as per TessPageSegMode enum
"""
self._tess.set_psm(psm)
def get_psm(self):
"""
Gets Page Segmentation Mode, as per TessPageSegMode enum:
------------------
@Return: psm (int) Page Segmentation Mode, as per TessPageSegMode enum
"""
return self._tess.get_psm()
def set_variable(self, key, val):
"""
Set the value of an internal parameter.
Supply the name of the parameter and the value as a string, just as
you would in a config file.
Eg set_variable("tessedit_char_blacklist", "xyz"); to ignore x, y and z.
Or set_variable("classify_bln_numeric_mode", "1"); to set numeric-only mode.
SetVariable may be used before Init, but settings will revert to
defaults on End().
@Params: key (str) Variable name
val (str) Variable value
@Return: (bool) ``False`` if the name lookup failed.
"""
return self._tess.set_variable(key, val)
def mean_text_conf(self):
"""
@Return: (int) average confidence value between 0 and 100.
"""
return self._tess.mean_text_conf()
if __name__ == '__main__':
print('This is a module not a script.')
```
|
{
"source": "JevinJ/Bittrex-Notify",
"score": 3
}
|
#### File: Bittrex-Notify/src/fasttick.py
```python
import config
import misc
def heartbeat():
"""
Processes data from Bittrex into a simpler dictionary,
calls the save function on it, deletes the oldest
saved dictionary(if it's out of lookback range), and finally
creates a list of the best coins to be used in tkinter listboxes.
:return: A list containing triples of (coin name, increase rate, volume)
"""
data = misc.retrieve_data()
# Processing for saving latest data from Bittrex API
latest_data = {}
for d in data.get('result', {}):
name = d.get('Market', {}).get('MarketCurrencyLong', '')
last_price = d.get('Summary', {}).get('Last', 0.0)
last_vol = d.get('Summary', {}).get('BaseVolume', 0.0)
base_currency = d.get('Market', {}).get('BaseCurrency', '')
if base_currency == 'BTC' and last_price >= \
config.FASTTICK_MIN_PRICE and last_vol >= config.FASTTICK_MIN_VOL:
latest_data[name] = {'Summary': d['Summary']}
# Processing all data within 9 ticks + latest and returning
# rate for output in GUI
prev_data = list(misc.open_pickles('fasttick_history', config.FASTTICK_LOOKBACK))
prev_data.append(latest_data)
ticker_data = []
if prev_data:
for name in latest_data:
prev_changes = []
for i in range(len(prev_data)-1):
old_price = float(prev_data[i].get(name, {}).get('Summary', {}).get('Last', 0.0))
new_price = float(prev_data[i+1].get(name, {}).get('Summary', {}).get('Last', 0.0))
if old_price != 0:
change = (((new_price - old_price) / old_price) * 100)
prev_changes.append(change)
if prev_changes:
volume = float(latest_data.get(name, {}).get('Summary', {}).get('BaseVolume', 0.0))
average_rate = float((sum(prev_changes) / len(prev_changes)))
if average_rate >= config.FASTTICK_MIN_RATE:
ticker_data.append((name, average_rate, volume))
misc.save_pickle(latest_data, 'fasttick_history')
misc.delete_ancient_pickles('fasttick_history', config.FASTTICK_LOOKBACK)
return ticker_data
```
#### File: Bittrex-Notify/src/GUIfasttick.py
```python
import tkinter as tk
from time import sleep
from playsound import playsound
import config
import fasttick
from helpmessage import fasttick_help_message
import misc
from tickerwindow import TickerWindow
class GUIfasttick(TickerWindow):
def __init__(self, app):
super().__init__(app)
misc.delete_ancient_pickles('fasttick_history')
self.draw_labels()
self.draw_buttons()
self.draw_lists()
self.draw_timer()
self.timer_update()
def draw_labels(self):
self.labelName.grid(row=3, column=0, sticky='NSWE')
self.labelChange.config(text='Rate')
self.labelChange.grid(row=3, column=1, sticky='NSWE')
self.labelVol.grid(row=3, column=2, sticky='NSWE')
self.labelBuf.grid(row=3, rowspan=2, column=3, columnspan=2, sticky='NSWE')
def draw_buttons(self):
self.sortByName.grid(row=4, column=0, sticky='NSWE')
self.sortByChange.grid(row=4, column=1, sticky='NSWE')
self.sortByVol.grid(row=4, column=2, sticky='NSWE')
self.notifyBell.grid(row=4, column=3, sticky='NSWE')
self.help.grid(row=3, column=4, sticky='E')
def on_click_help(self):
helpWindow = tk.Toplevel()
helpWindow.title('Help')
frameBuf = tk.Frame(helpWindow, width=192, bg=config.MAIN_BG)
frameBuf.grid(row=0, rowspan=4, column=0, columnspan=3)
message = tk.Message(frameBuf, bg=config.MAIN_BG, fg=config.TEXT_COLOR,
width=192, text=fasttick_help_message)
message.grid(row=0, columnspan=3)
dismissButton = tk.Button(frameBuf, text='Dismiss', command=helpWindow.destroy)
dismissButton.grid(row=1, column=1)
def draw_lists(self):
self.yScroll.grid(row=5, column=3, sticky='NSWE')
self.listName.grid(row=5, column=0, sticky='NSWE')
self.listChange.grid(row=5, column=1, sticky='NSWE')
self.listVol.grid(row=5, column=2, sticky='NSWE')
def draw_timer(self):
self.timerLabel.grid(row=5, column=4, ipadx=8)
self.timerFrame.grid(row=5, column=4, columnspan=3)
self.timerDisp.grid(row=5, column=4)
self.timerValue = config.FASTTICK_RATE
def timer_update(self):
if self.timerValue == 3:
self.async = self.pool.apply_async(fasttick.heartbeat)
if self.timerValue == 0:
while True:
if self.async.ready():
break
for i in range(1, 4):
if self.async.ready():
break
self.timerDisp.config(text=f'{"." * i}', font=('', 20))
self.app.update()
sleep(1)
self.ticker_data = self.async.get()
self.sort_ticker()
if self.notifyIsActive and self.ticker_data:
playsound('media/notification_sound.mp3')
self.timerValue = config.FASTTICK_RATE
values = divmod(self.timerValue, 60)
minutes = values[0]
seconds = values[1]
self.timerDisp.config(text=f'{minutes}:{seconds:0>2}', font=('', 20))
self.timerValue -= 1
self.app.after(1000, self.timer_update)
```
#### File: Bittrex-Notify/src/tickerwindow.py
```python
from multiprocessing import pool
import tkinter as tk
import config
class TickerWindow:
"""
Template class for a ticker window. All/most of the code here
is "backend" and widgets are not drawn via tk.grid(). This is intended
to be inherited in the GUI classes where objects will be drawn and
functions/widgets overridden if needed.
Args:
app(the main tkinter object)
Required Overrides(inside child object):
on_click_help(needs custom dialogue in help windows)
timer_update(needs different values/behavior for countdown timers)
timerValue(set to maximum timer rate value)
Required widget draw(grid()) calls(inside child object):
labelName
labelChange
labelVol
labelBuf
sortByName
sortByChange
sortByVol
notifyBell
help
yScroll
listName
listChange
listVol
timerLabel
timerFrame
timerDisp
"""
def __init__(self, app):
self.app = app
self.pool = pool.ThreadPool(processes=1)
self.async = pool.AsyncResult
self.ticker_data = []
self.create_labels()
self.create_buttons()
self.create_lists()
self.create_timer()
def create_labels(self):
color_options = {'bg': config.MAIN_BG,
'fg': config.TEXT_COLOR}
self.labelName = tk.Label(text='Market Name', **color_options)
self.labelChange = tk.Label(text='Change', **color_options)
self.labelVol = tk.Label(text='Volume', **color_options)
self.labelBuf = tk.Frame(width=120, height=42, bg=config.MAIN_BG)
def create_buttons(self):
color_options = {'bg': config.MAIN_BG,
'activebackground': config.CLICKED_BG}
self.sortByName = tk.Button(relief='raised', image=self.app.noArrow,
command=lambda: self.on_click_sort('sortByName'),
**color_options)
self.sortByChange = tk.Button(relief='raised', image=self.app.downArrow,
command=lambda: self.on_click_sort('sortByChange'),
**color_options)
self.sortByVol = tk.Button(relief='raised', image=self.app.noArrow,
command=lambda: self.on_click_sort('sortByVol'),
**color_options)
self.buttons = {'sortByName': ['none', 0],
'sortByChange': ['desc', 1],
'sortByVol': ['none', 2]}
self.notifyBell = tk.Button(relief='raised', image=self.app.notifyBell,
command=lambda: self.on_click_notif(),
**color_options)
self.notifyIsActive = False
self.help = tk.Button(relief='flat', image=self.app.questionMark,
command=lambda: self.on_click_help(),
**color_options)
def on_click_sort(self, pressed_name):
for b_name in self.buttons:
if b_name == pressed_name:
sort_direction = self.buttons[b_name][0]
if sort_direction == 'desc':
self.buttons[b_name][0] = 'asc'
getattr(self, b_name).config(image=self.app.upArrow)
self.ticker_data.sort(key=lambda x: x[self.buttons[b_name][1]])
if sort_direction == 'asc' or sort_direction == 'none':
self.buttons[b_name][0] = 'desc'
getattr(self, b_name).config(image=self.app.downArrow)
self.ticker_data.sort(key=lambda x: x[self.buttons[b_name][1]],
reverse=True)
else:
self.buttons[b_name][0] = 'none'
getattr(self, b_name).config(image=self.app.noArrow)
self.display_ticker()
def on_click_notif(self):
if self.notifyBell.cget('relief') == 'raised':
self.notifyBell.config(relief='sunken')
self.notifyIsActive = True
else:
self.notifyBell.config(relief='raised')
self.notifyIsActive = False
def on_click_help(self):
pass
def create_lists(self):
self.yScroll = tk.Scrollbar(orient=tk.VERTICAL, command=self.on_vsb)
color_options = {'bg': config.LIGHT_BG,
'fg': config.TEXT_COLOR,
'selectbackground': config.LIGHT_BG,
'selectforeground': config.TEXT_COLOR,
'highlightcolor': config.LIGHT_BG,
'highlightbackground': config.LIGHT_BG}
self.listName = tk.Listbox(activestyle='none', relief='sunken',
yscrollcommand=self.yScroll.set,
width=40, height=6, **color_options)
self.listName.bind('<MouseWheel>', self.on_mouse_wheel)
self.listChange = tk.Listbox(activestyle='none', relief='sunken',
yscrollcommand=self.yScroll.set,
width=8, height=6, **color_options)
self.listChange.bind('<MouseWheel>', self.on_mouse_wheel)
self.listVol = tk.Listbox(activestyle='none', relief='sunken',
yscrollcommand=self.yScroll.set,
width=8, height=6, **color_options)
self.listVol.bind('<MouseWheel>', self.on_mouse_wheel)
def on_vsb(self, *args):
self.listName.yview(*args)
self.listChange.yview(*args)
self.listVol.yview(*args)
def on_mouse_wheel(self, event):
if event.delta < 0:
self.listName.yview('scroll', 1, 'units')
self.listChange.yview('scroll', 1, 'units')
self.listVol.yview('scroll', 1, 'units')
if event.delta > 0:
self.listName.yview('scroll', -1, 'units')
self.listChange.yview('scroll', -1, 'units')
self.listVol.yview('scroll', -1, 'units')
return 'break'
def create_timer(self):
self.timerLabel = tk.Label(text='Time until update:', bg=config.MAIN_BG, fg=config.TEXT_COLOR)
self.timerFrame = tk.LabelFrame(width=120, height=120, bg=config.MAIN_BG)
self.timerDisp = tk.Label(font=('', 20), bg=config.MAIN_BG, fg=config.TEXT_COLOR)
self.timerValue = 0
def timer_update(self):
pass
def sort_ticker(self):
if self.ticker_data:
for b_name in self.buttons:
if self.buttons[b_name][0] == 'desc':
self.ticker_data.sort(key=lambda x: x[self.buttons[b_name][1]],
reverse=True)
if self.buttons[b_name][0] == 'asc':
self.ticker_data.sort(key=lambda x: x[self.buttons[b_name][1]])
self.display_ticker()
def display_ticker(self):
self.listName.delete(0, tk.END)
self.listChange.delete(0, tk.END)
self.listVol.delete(0, tk.END)
for i in self.ticker_data:
self.listName.insert(tk.END, f'{i[0]}')
self.listChange.insert(tk.END, f'+{i[1]:.02f}%')
self.listVol.insert(tk.END, f'{i[2]:.02f}')
self.app.update()
```
|
{
"source": "jevinskie/litedram",
"score": 2
}
|
#### File: litedram/frontend/fifo.py
```python
import math
from migen import *
from litex.soc.interconnect import stream
from litedram.common import LiteDRAMNativePort
from litedram.frontend import dma
# Helpers ------------------------------------------------------------------------------------------
def _inc(signal, modulo):
if modulo == 2**len(signal):
return signal.eq(signal + 1)
else:
return If(signal == (modulo - 1),
signal.eq(0)
).Else(
signal.eq(signal + 1)
)
# LiteDRAMFIFOCtrl ---------------------------------------------------------------------------------
class _LiteDRAMFIFOCtrl(Module):
def __init__(self, base, depth):
self.base = base
self.depth = depth
self.level = Signal(max=depth+1)
# # #
# To write buffer
self.writable = Signal()
self.write_address = Signal(max=depth)
# From write buffer
self.write = Signal()
# To read buffer
self.readable = Signal()
self.read_address = Signal(max=depth)
# From read buffer
self.read = Signal()
# # #
produce = self.write_address
consume = self.read_address
self.sync += [
If(self.write,
_inc(produce, depth)
),
If(self.read,
_inc(consume, depth)
),
If(self.write & ~self.read,
self.level.eq(self.level + 1),
).Elif(self.read & ~self.write,
self.level.eq(self.level - 1)
)
]
self.comb += [
self.writable.eq(self.level < depth),
self.readable.eq(self.level > 0)
]
# LiteDRAMFIFOWriter -------------------------------------------------------------------------------
class _LiteDRAMFIFOWriter(Module):
def __init__(self, data_width, port, ctrl, fifo_depth=32):
self.sink = sink = stream.Endpoint([("data", data_width)])
# # #
self.submodules.writer = writer = dma.LiteDRAMDMAWriter(port, fifo_depth=fifo_depth)
self.comb += [
writer.sink.valid.eq(sink.valid & ctrl.writable),
writer.sink.address.eq(ctrl.base + ctrl.write_address),
writer.sink.data.eq(sink.data),
If(writer.sink.valid & writer.sink.ready,
sink.ready.eq(1)
),
If(port.wdata.valid & port.wdata.ready,
ctrl.write.eq(1)
),
]
# LiteDRAMFIFOReader -------------------------------------------------------------------------------
class _LiteDRAMFIFOReader(Module):
def __init__(self, data_width, port, ctrl, fifo_depth=32):
self.source = source = stream.Endpoint([("data", data_width)])
# # #
self.submodules.reader = reader = dma.LiteDRAMDMAReader(port, fifo_depth=fifo_depth)
self.comb += [
reader.sink.valid.eq(ctrl.readable),
reader.sink.address.eq(ctrl.base + ctrl.read_address),
If(reader.sink.valid & reader.sink.ready,
ctrl.read.eq(1)
)
]
self.comb += reader.source.connect(source)
# _LiteDRAMFIFO ------------------------------------------------------------------------------------
class _LiteDRAMFIFO(Module):
"""LiteDRAM frontend that allows to use DRAM as a FIFO"""
def __init__(self, data_width, base, depth, write_port, read_port,
writer_fifo_depth = 32,
reader_fifo_depth = 32):
assert isinstance(write_port, LiteDRAMNativePort)
assert isinstance(read_port, LiteDRAMNativePort)
self.sink = stream.Endpoint([("data", data_width)])
self.source = stream.Endpoint([("data", data_width)])
# # #
self.submodules.ctrl = _LiteDRAMFIFOCtrl(base, depth)
self.submodules.writer = _LiteDRAMFIFOWriter(data_width, write_port, self.ctrl, writer_fifo_depth)
self.submodules.reader = _LiteDRAMFIFOReader(data_width, read_port, self.ctrl, reader_fifo_depth)
self.comb += [
self.sink.connect(self.writer.sink),
self.reader.source.connect(self.source)
]
# LiteDRAMFIFO -------------------------------------------------------------------------------------
class LiteDRAMFIFO(Module):
"""LiteDRAM FIFO with optional/automatic Bypass.
Description
-----------
┌──────────┐ ┌──────────┐
Sink │ Pre- │ Bypass │ Post- │ Source
─────────► FIFO ├────────► FIFO ├───────►
└────┬─────┘ └─────▲────┘
│ │
┌────▼─────┐ ┌─────┴────┐
│ Pre- │ │ Post- │
│Converter │ │Converter │
└────┬─────┘ └─────▲────┘
│ │
│ ┌─────────────┐ │
│ │ DRAM │ │
└──► FIFO ├───┘
└──────┬──────┘
│
▼
DRAM
The DRAM FIFO allows creation of very large FIFO with storage in DRAM. The data-width of the
input/output streams is automatically adapted to the DRAM's data-width with the Pre/Post con-
verters and the module switches seamlessly between 2 modes:
- 1) Bypass mode.
- 2) DRAM mode.
1) The module is initialized in Bypass mode, connecting the its Sink to its Source.
Backpressure from the Source is propagated from the Source to the Post-FIFO, Pre-FIFO
and the Sink.
┌──────────┐ ┌──────────┐
Sink │ Pre- │ Bypass │ Post- │ Source
─────────► FIFO ├────────► FIFO ├───────►
└──────────┘ └──────────┘
Backpressure
◄─────────────────────
Once the Post-FIFO is full and the Pre-FIFO has enough data to form a DRAM Word, the module
switches to DRAM mode.
2) In DRAM mode, the Bypass connection is disabled and Pre-FIFO's Source is redirected to
Pre-Converter's Sink. Once Pre-Converter has a full DRAM word, the word can be written to the
DRAM FIFO's Sink
┌──────────┐ ┌──────────┐
Sink │ Pre- │ │ Post- │ Source
─────────► FIFO │ │ FIFO ├───────►
└────┬─────┘ └─────▲────┘
│ │
┌────▼─────┐ ┌─────┴────┐
│ Pre- │ │ Post- │
│Converter │ │Converter │
└────┬─────┘ └─────▲────┘
│ │
│ ┌─────────────┐ │
│ │ DRAM │ │
└──► FIFO ├───┘
└──────┬──────┘
│
▼
DRAM
This data from DRAM FIFO will be generated back on the DRAM FIFO's Source and connected to
the Post-Converter to re-generate the data with the correct data-width. Data will then be
generated on the Source.
Once we no longer have data in the Pre-Converter/DRAM FIFO/Post-Converter path and Pre-FIFO's
level is below threshold, the modules switches back to Bypass mode.
"""
def __init__(self, data_width, base, depth, write_port, read_port, with_bypass=False,
writer_fifo_depth = 32,
reader_fifo_depth = 32):
assert isinstance(write_port, LiteDRAMNativePort)
assert isinstance(read_port, LiteDRAMNativePort)
self.sink = stream.Endpoint([("data", data_width)])
self.source = stream.Endpoint([("data", data_width)])
# # #
# Parameters.
# -----------
assert write_port.data_width == read_port.data_width
port_data_width = write_port.data_width
assert data_width <= port_data_width
data_width_ratio = port_data_width//data_width
if not with_bypass:
assert data_width_ratio == 1
pre_fifo_depth = 2*data_width_ratio # FIXME: Adjust.
post_fifo_depth = 2*data_width_ratio # FIXME: Adjust.
# Submodules.
# -----------
# Pre-FIFO.
self.submodules.pre_fifo = pre_fifo = stream.SyncFIFO([("data", data_width)], pre_fifo_depth)
# Pre-Converter.
self.submodules.pre_converter = pre_converter = stream.Converter(data_width, port_data_width)
# DRAM-FIFO.
self.submodules.dram_fifo = dram_fifo = _LiteDRAMFIFO(
data_width = port_data_width,
base = base,
depth = depth,
write_port = write_port,
read_port = read_port,
writer_fifo_depth = writer_fifo_depth,
reader_fifo_depth = reader_fifo_depth,
)
# Post-Converter.
self.submodules.post_converter = post_converter = stream.Converter(port_data_width, data_width)
# Post-FIFO.
self.submodules.post_fifo = post_fifo = stream.SyncFIFO([("data", data_width)], post_fifo_depth)
# Data-Flow.
# ----------
bypass = Signal()
store = Signal()
count = Signal(8)
self.comb += [
# Sink --> Pre-FIFO.
self.sink.connect(pre_fifo.sink),
# Bypass / DRAM.
If(with_bypass & bypass,
# Pre-FIFO --> Post-FIFO.
pre_fifo.source.connect(post_fifo.sink),
).Else(
# Pre-FIFO --> Pre-Converter.
If(store | (not with_bypass),
pre_fifo.source.connect(pre_converter.sink),
),
# Post-Converter --> Post-FIFO.
post_converter.source.connect(post_fifo.sink)
),
# Pre-Converter --> DRAM-FIFO.
pre_converter.source.connect(dram_fifo.sink),
# DRAM-FIFO --> Post-Converter.
dram_fifo.source.connect(post_converter.sink),
# Post-FIFO --> Source.
post_fifo.source.connect(self.source)
]
# FSM.
# ----
if with_bypass:
can_store = Signal()
self.comb += can_store.eq(pre_fifo.level > data_width_ratio)
self.submodules.fsm = fsm = FSM(reset_state="BYPASS")
fsm.act("BYPASS",
bypass.eq(1),
# Switch to DRAM mode when enough data to store a DRAM word.
If(can_store,
NextValue(store, 1),
NextValue(count, 0),
NextState("DRAM")
)
)
data_inc = Signal()
data_dec = Signal()
data_cnt = Signal(int(math.log2(depth + writer_fifo_depth + reader_fifo_depth) + 1))
self.sync += data_cnt.eq(data_cnt + data_inc - data_dec)
fsm.act("DRAM",
# Increment DRAM Data Count on Pre-Converter's Sink cycle.
data_inc.eq(pre_converter.sink.valid & pre_converter.sink.ready),
# Decrement DRAM Data Count on Post-Converter's Source cycle.
data_dec.eq(post_converter.source.valid & post_converter.source.ready),
# Update store.
If(data_inc,
NextValue(count, count + 1),
If(count == (data_width_ratio - 1),
NextValue(count, 0),
NextValue(store, can_store),
)
),
# Maintain DRAM Data Count.
NextValue(data_cnt, data_cnt + data_inc - data_dec),
# Switch back to Bypass mode when DRAM Data count
If((can_store == 0) & (data_cnt == 0),
NextState("BYPASS")
)
)
```
|
{
"source": "jevinskie/litex",
"score": 2
}
|
#### File: build/sim/cocotb.py
```python
from pathlib import Path
import os
import sys
import subprocess
from shutil import which
from migen.fhdl.structure import _Fragment
from litex import get_data_mod
from litex.build import tools
from litex.build.generic_platform import *
import rpyc
from rpyc.core.service import ClassicService
from rpyc.utils.server import ThreadedServer, ThreadPoolServer
import cocotb
pydev_host = os.environ.get('PYDEV_HOST', None)
pydev_port = os.environ.get('PYDEV_PORT', None)
if pydev_port is not None:
import pydevd
pydevd.settrace(pydev_host, port=int(pydev_port), suspend=False)
class SimService(rpyc.Service):
exposed_platform = None
exposed_soc = None
exposed_ns = None
def exposed_call_on_server(self, func):
res = None
res = func(self.exposed_platform, self.exposed_soc, self.exposed_ns)
return res
class SimServer:
def __init__(self, socket_path: str):
self.socket_path = socket_path
try:
os.remove(socket_path)
except FileNotFoundError:
pass
self.srv = ThreadPoolServer(SimService, socket_path=socket_path, protocol_config={"allow_all_attrs": True})
rpyc.lib.spawn(lambda: self.srv.start())
def __del__(self):
self.srv.close()
os.remove(self.socket_path)
def start_sim_server(socket_path=None):
if cocotb.top is None and socket_path is None:
return
elif socket_path is not None:
server = SimServer(socket_path)
return server
elif cocotb.top is not None and socket_path is None:
socket_path = f'{os.environ["MODULE"]}.pipe'
return rpyc.utils.factory.unix_connect(socket_path)
else:
raise RuntimeError
def _generate_sim_makefile(build_dir: str, build_name: str, sources: list[str], module, sim_top = None):
assert all([lambda src: src[1] == "verilog"])
toplevel = build_name
if sim_top:
toplevel = sim_top.stem
sources.append((str(sim_top), "verilog"))
module_dir = Path(module.__file__).parent
makefile_contents = f"""
SIM = icarus
TOPLEVEL_LANG = verilog
VERILOG_SOURCES += {' '.join(map(lambda src: src[0], sources))}
# TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file
TOPLEVEL = {toplevel}
# MODULE is the basename of the Python test file
MODULE = {build_name}
export PYTHONPATH := {module_dir}:$(PTYHONPATH):{':'.join(sys.path)}
DUMP_VCD = 1
# include cocotb's make rules to take care of the simulator setup
include $(shell cocotb-config --makefiles)/Makefile.sim
"""
tools.write_to_file("Makefile", makefile_contents, force_unix=True)
def _run_sim(build_name: str, platform, soc, namespace):
socket_path = f'{build_name}.pipe'
local_sim_server = start_sim_server(socket_path)
local_sim_server.srv.service.exposed_platform = platform
local_sim_server.srv.service.exposed_soc = soc
local_sim_server.srv.service.exposed_ns = namespace
try:
import pydevd
pydevd_setup = pydevd.SetupHolder.setup
if pydevd_setup is not None:
host, port = pydevd.dispatch()
os.environ['PYDEV_HOST'] = host
os.environ['PYDEV_PORT'] = str(port)
print(f'set environ to host: {host} port: {port}')
except ImportError:
pass
try:
r = subprocess.call(["make"])
if r != 0:
raise OSError("Subprocess failed")
except:
pass
# stop_sim_server(local_sim_server)
class SimCocotbToolchain:
def build(self, platform, fragment,
build_dir = "build",
build_name = "cocotb",
build = True,
run = False,
threads = 1,
verbose = True,
sim_config = None,
coverage = False,
opt_level = "O0",
trace = False,
trace_fst = False,
trace_start = 0,
trace_end = -1,
trace_exit = False,
sim_end = -1,
sim_top = None,
regular_comb = False,
module = None,
soc = None):
if sim_top:
sim_top = Path(sim_top)
sim_top = sim_top.resolve()
# Create build directory
os.makedirs(build_dir, exist_ok=True)
cwd = os.getcwd()
os.chdir(build_dir)
# Finalize design
if not isinstance(fragment, _Fragment):
fragment = fragment.get_fragment()
platform.finalize(fragment)
# Generate verilog
v_output = platform.get_verilog(fragment,
name = build_name,
dummy_signal = True,
regular_comb = False,
blocking_assign = True)
named_sc, named_pc = platform.resolve_signals(v_output.ns)
v_file = build_name + ".v"
if build:
v_output.write(v_file)
platform.add_source(v_file)
# Generate cocotb makefile
_generate_sim_makefile(build_dir, build_name, platform.sources, module, sim_top)
# Run
if run:
_run_sim(build_name, platform, soc, v_output.ns)
os.chdir(cwd)
if build:
return v_output.ns
```
#### File: soc/cores/altera_adc.py
```python
from migen import *
from migen.genlib.cdc import PulseSynchronizer
from litex.soc.interconnect.csr import *
# ------------------------------------------------------------------
# -- fiftyfivenm_adcblock parameterized megafunction component declaration
# -- Generated with 'mega_defn_creator' loader - do not edit
# ------------------------------------------------------------------
# component fiftyfivenm_adcblock
# generic (
# analog_input_pin_mask : natural := 0;
# clkdiv : natural := 1;
# device_partname_fivechar_prefix : string := "none";
# enable_usr_sim : natural := 0;
# is_this_first_or_second_adc : natural := 1;
# lpm_hint : string := "UNUSED";
# lpm_type : string := "fiftyfivenm_adcblock";
# prescalar : natural := 0;
# pwd : natural := 0;
# reference_voltage_sim : natural := 65536;
# refsel : natural := 0;
# reserve_block : string := "false";
# simfilename_ch0 : string := "simfilename_ch0";
# simfilename_ch1 : string := "simfilename_ch1";
# simfilename_ch10 : string := "simfilename_ch10";
# simfilename_ch11 : string := "simfilename_ch11";
# simfilename_ch12 : string := "simfilename_ch12";
# simfilename_ch13 : string := "simfilename_ch13";
# simfilename_ch14 : string := "simfilename_ch14";
# simfilename_ch15 : string := "simfilename_ch15";
# simfilename_ch16 : string := "simfilename_ch16";
# simfilename_ch2 : string := "simfilename_ch2";
# simfilename_ch3 : string := "simfilename_ch3";
# simfilename_ch4 : string := "simfilename_ch4";
# simfilename_ch5 : string := "simfilename_ch5";
# simfilename_ch6 : string := "simfilename_ch6";
# simfilename_ch7 : string := "simfilename_ch7";
# simfilename_ch8 : string := "simfilename_ch8";
# simfilename_ch9 : string := "simfilename_ch9";
# testbits : natural := 66;
# tsclkdiv : natural := 1;
# tsclksel : natural := 0 );
# port(
# chsel : in std_logic_vector(4 downto 0) := (others => '0');
# clk_dft : out std_logic;
# clkin_from_pll_c0 : in std_logic := '0';
# dout : out std_logic_vector(11 downto 0);
# eoc : out std_logic;
# soc : in std_logic := '0';
# tsen : in std_logic := '0';
# usr_pwd : in std_logic := '0'
# );
# end component;
class Max10ADC(Module, AutoCSR):
def __init__(self, adc_num: int):
self.adc_num = adc_num
self.chsel = CSRStorage(5)
self.dout = CSRStatus(12)
self.dout_sig = Signal(12)
self.clk_dft = Signal()
self.soc = CSRStorage()
self.soc_adc = Signal()
self.eoc = CSRStatus()
self.eoc_sig = Signal()
self.tsen = CSRStorage()
self.user_pwd = CSRStorage()
self.submodules.soc_ps = PulseSynchronizer("sys", "adc")
self.submodules.eoc_ps = PulseSynchronizer("adc", "sys")
self.comb += [
# self.soc_adc.eq(self.soc_ps.o),
# self.soc_adc.eq(self.soc.storage),
self.eoc_ps.i.eq(self.eoc_sig),
self.eoc.status.eq(self.eoc_sig),
]
self.adc_clk_cnt = Signal(8)
self.sync.adc += self.adc_clk_cnt.eq(self.adc_clk_cnt + 1)
self.submodules.ctrl_fsm = ResetInserter()(FSM(name="ctrl_fsm"))
self.ctrl_fsm.act("IDLE",
If(self.soc.storage,
NextState("WAIT_FOR_EOC"),
)
)
self.idle_flag = self.ctrl_fsm.ongoing("IDLE")
# self.ctrl_fsm.act("START",
# self.soc_ps.i.eq(1),
# NextState("WAIT_FOR_EOC"),
# )
# self.start_flag = self.ctrl_fsm.ongoing("START")
self.ctrl_fsm.act("WAIT_FOR_EOC",
self.soc_adc.eq(1),
If(self.eoc_sig,
NextValue(self.dout.status, self.dout_sig),
NextState("WAIT_FOR_SOC_LOW"),
),
)
self.wait_for_eoc_flag = self.ctrl_fsm.ongoing("WAIT_FOR_EOC")
self.ctrl_fsm.act("WAIT_FOR_SOC_LOW",
If(self.soc.storage == 0,
NextState("IDLE"),
),
)
self.wait_for_soc_low_flag = self.ctrl_fsm.ongoing("WAIT_FOR_SOC_LOW")
adcblock = Instance("fiftyfivenm_adcblock",
name = f"adcblock{adc_num}",
i_chsel = self.chsel.storage,
i_soc = self.soc_adc,
i_clkin_from_pll_c0 = ClockSignal("adc"),
i_tsen = self.tsen.storage,
i_usr_pwd = self.user_pwd.storage,
o_dout = self.dout_sig,
o_clk_dft = self.clk_dft,
o_eoc = self.eoc_sig,
p_clkdiv = 0,
)
self.specials += adcblock
for item in adcblock.items:
if isinstance(item, Instance.Parameter):
item.value.print_plain = True
print('wow')
```
#### File: soc/cores/identifier.py
```python
from migen import *
# Identifier ---------------------------------------------------------------------------------------
class Identifier(Module):
def __init__(self, ident):
contents = list(ident.encode())
l = len(contents)
if l > 255:
raise ValueError("Identifier string must be 255 characters or less")
contents.append(0)
def next_pow2(x):
return 1 << (x - 1).bit_length()
self.mem = Memory(8, next_pow2(len(contents)), init=contents)
def get_memories(self):
return [(True, self.mem)]
```
#### File: soc/cores/spi.py
```python
import math
from migen import *
from migen.genlib.cdc import MultiReg
from litex.soc.interconnect.csr import *
from litex.soc.cores.uart import RS232PHYModel
from litex.soc.interconnect import stream
# SPI Master ---------------------------------------------------------------------------------------
class SPIMaster(Module, AutoCSR):
"""4-wire SPI Master
Provides a simple and minimal hardware SPI Master with CPOL=0, CPHA=0 and build time
configurable data_width and frequency.
"""
pads_layout = [("clk", 1), ("cs_n", 1), ("mosi", 1), ("miso", 1)]
def __init__(self, pads, data_width, sys_clk_freq, spi_clk_freq, with_csr=True, mode="raw"):
assert mode in ["raw", "aligned"]
if pads is None:
pads = Record(self.pads_layout)
if not hasattr(pads, "cs_n"):
pads.cs_n = Signal()
assert len(pads.cs_n) <= 16
self.pads = pads
self.data_width = data_width
self.start = Signal()
self.length = Signal(8)
self.done = Signal()
self.irq = Signal()
self.mosi = Signal(data_width)
self.miso = Signal(data_width)
self.cs = Signal(len(pads.cs_n), reset=1)
self.cs_mode = Signal()
self.loopback = Signal()
self.clk_divider = Signal(16, reset=math.ceil(sys_clk_freq/spi_clk_freq))
if with_csr:
self.add_csr()
# # #
clk_enable = Signal()
xfer_enable = Signal()
count = Signal(max=data_width)
mosi_latch = Signal()
miso_latch = Signal()
# Clock generation -------------------------------------------------------------------------
clk_divider = Signal(16)
clk_rise = Signal()
clk_fall = Signal()
self.comb += clk_rise.eq(clk_divider == (self.clk_divider[1:] - 1))
self.comb += clk_fall.eq(clk_divider == (self.clk_divider - 1))
self.sync += [
clk_divider.eq(clk_divider + 1),
If(clk_rise,
pads.clk.eq(clk_enable),
).Elif(clk_fall,
clk_divider.eq(0),
pads.clk.eq(0),
)
]
# Control FSM ------------------------------------------------------------------------------
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
self.done.eq(1),
If(self.start,
self.done.eq(0),
mosi_latch.eq(1),
NextState("START")
)
)
fsm.act("START",
NextValue(count, 0),
If(clk_fall,
xfer_enable.eq(1),
NextState("RUN")
)
)
fsm.act("RUN",
clk_enable.eq(1),
xfer_enable.eq(1),
If(clk_fall,
NextValue(count, count + 1),
If(count == (self.length - 1),
NextState("STOP")
)
)
)
fsm.act("STOP",
xfer_enable.eq(1),
If(clk_rise,
miso_latch.eq(1),
self.irq.eq(1),
NextState("IDLE")
)
)
# Chip Select generation -------------------------------------------------------------------
if hasattr(pads, "cs_n"):
for i in range(len(pads.cs_n)):
# CS set when enabled and (Xfer enabled or Manual CS mode selected).
cs = (self.cs[i] & (xfer_enable | (self.cs_mode == 1)))
# CS Output/Invert.
self.sync += pads.cs_n[i].eq(~cs)
# Master Out Slave In (MOSI) generation (generated on spi_clk falling edge) ----------------
mosi_data = Signal(data_width)
mosi_array = Array(mosi_data[i] for i in range(data_width))
mosi_sel = Signal(max=data_width)
self.sync += [
If(mosi_latch,
mosi_data.eq(self.mosi),
mosi_sel.eq((self.length-1) if mode == "aligned" else (data_width-1)),
).Elif(clk_fall,
If(xfer_enable, pads.mosi.eq(mosi_array[mosi_sel])),
mosi_sel.eq(mosi_sel - 1)
),
]
# Master In Slave Out (MISO) capture (captured on spi_clk rising edge) --------------------
miso = Signal()
miso_data = Signal(data_width)
self.sync += [
If(clk_rise,
If(self.loopback,
miso_data.eq(Cat(pads.mosi, miso_data))
).Else(
miso_data.eq(Cat(pads.miso, miso_data))
)
)
]
self.sync += If(miso_latch, self.miso.eq(miso_data))
def add_csr(self, with_cs=True, with_loopback=True):
# Control / Status.
self._control = CSRStorage(description="SPI Control.", fields=[
CSRField("start", size=1, offset=0, pulse=True, description="SPI Xfer Start (Write ``1`` to start Xfer)."),
CSRField("length", size=8, offset=8, description="SPI Xfer Length (in bits).")
])
self._status = CSRStatus(description="SPI Status.", fields=[
CSRField("done", size=1, offset=0, description="SPI Xfer Done (when read as ``1``).")
])
self.comb += [
self.start.eq(self._control.fields.start),
self.length.eq(self._control.fields.length),
self._status.fields.done.eq(self.done),
]
# MOSI/MISO.
self._mosi = CSRStorage(self.data_width, reset_less=True, description="SPI MOSI data (MSB-first serialization).")
self._miso = CSRStatus(self.data_width, description="SPI MISO data (MSB-first de-serialization).")
self.comb += [
self.mosi.eq(self._mosi.storage),
self._miso.status.eq(self.miso),
]
# Chip Select.
if with_cs:
self._cs = CSRStorage(description="SPI CS Chip-Select and Mode.", fields=[
CSRField("sel", size=len(self.cs), offset=0, reset=1, values=[
("``0b0..001``", "Chip ``0`` selected for SPI Xfer."),
("``0b1..000``", "Chip ``N`` selected for SPI Xfer.")
]),
CSRField("mode", size=1, offset=16, reset=0, values=[
("``0b0``", "Normal operation (CS handled by Core)."),
("``0b1``", "Manual operation (CS handled by User, direct recopy of ``sel``), useful for Bulk transfers.")
]),
])
self.comb += [
self.cs.eq(self._cs.fields.sel),
self.cs_mode.eq(self._cs.fields.mode)
]
# Loopback.
if with_loopback:
self._loopback = CSRStorage(description="SPI Loopback Mode.", fields=[
CSRField("mode", size=1, values=[
("``0b0``", "Normal operation."),
("``0b1``", "Loopback operation (MOSI to MISO).")
])
])
self.comb += self.loopback.eq(self._loopback.fields.mode)
def add_clk_divider(self):
self._clk_divider = CSRStorage(16, description="SPI Clk Divider.", reset=self.clk_divider.reset)
self.comb += self.clk_divider.eq(self._clk_divider.storage)
# SPI Slave ----------------------------------------------------------------------------------------
class SPISlave(Module):
"""4-wire SPI Slave
Provides a simple and minimal hardware SPI Slave with CPOL=0, CPHA=0 and build time configurable
data_width.
"""
pads_layout = [("clk", 1), ("cs_n", 1), ("mosi", 1), ("miso", 1)]
def __init__(self, pads, data_width):
if pads is None:
pads = Record(self.pads_layout)
if not hasattr(pads, "cs_n"):
pads.cs_n = Signal()
self.pads = pads
self.data_width = data_width
self.start = Signal() # o, Signal a start of SPI Xfer.
self.length = Signal(8) # o, Signal the length of the SPI Xfer (in bits).
self.done = Signal() # o, Signal that SPI Xfer is done/inactive.
self.irq = Signal() # o, Signal the end of a SPI Xfer.
self.mosi = Signal(data_width) # i, Data to send on SPI MOSI.
self.miso = Signal(data_width) # o, Data received on SPI MISO.
self.loopback = Signal() # i, Loopback enable.
# # #
clk = Signal()
cs = Signal()
mosi = Signal()
miso = Signal()
# IOs <--> Internal (input resynchronization) ----------------------------------------------
self.specials += [
MultiReg(pads.clk, clk),
MultiReg(~pads.cs_n, cs),
MultiReg(pads.mosi, mosi),
]
self.comb += pads.miso.eq(miso)
# Clock detection --------------------------------------------------------------------------
clk_d = Signal()
clk_rise = Signal()
clk_fall = Signal()
self.sync += clk_d.eq(clk)
self.comb += clk_rise.eq(clk & ~clk_d)
self.comb += clk_fall.eq(~clk & clk_d)
# Control FSM ------------------------------------------------------------------------------
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
If(cs,
self.start.eq(1),
NextValue(self.length, 0),
NextState("XFER")
).Else(
self.done.eq(1)
)
)
fsm.act("XFER",
If(~cs,
self.irq.eq(1),
NextState("IDLE")
),
NextValue(self.length, self.length + clk_rise)
)
# Master In Slave Out (MISO) generation (generated on spi_clk falling edge) ----------------
miso_data = Signal(data_width)
self.sync += [
If(self.start,
miso_data.eq(self.miso)
).Elif(cs & clk_fall,
miso_data.eq(Cat(Signal(), miso_data[:-1]))
)
]
self.comb += [
If(self.loopback,
miso.eq(mosi)
).Else(
miso.eq(miso_data[-1]),
)
]
# Master Out Slave In (MOSI) capture (captured on spi_clk rising edge) ---------------------
self.sync += [
If(cs & clk_rise,
self.mosi.eq(Cat(mosi, self.mosi[:-1]))
)
]
# Simulation SPI Master ----------------------------------------------------------------------------
class SPIMasterStreamer(Module):
def __init__(self, pads, sys_clk_freq: int, spi_clk_freq: int):
self.submodules.master = SPIMaster(pads, 8, sys_clk_freq, spi_clk_freq, with_csr=False)
self.sink = stream.Endpoint([("data", 8)]) # module input
self.source = stream.Endpoint([("data", 8)]) # Module output
self.idle_flag = Signal()
self.xfer_from_master_flag = Signal()
self.xfer_to_stream_flag = Signal()
self.cs_sw = Signal()
self.first_byte = Signal()
self.last_byte = Signal()
self.comb += [
self.master.cs.eq(self.cs_sw),
self.master.cs_mode.eq(1),
# self.master.loopback.eq(1),
]
# Control FSM ------------------------------------------------------------------------------
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
self.idle_flag.eq(1),
self.sink.ready.eq(1),
If(self.sink.first,
NextValue(self.cs_sw, 1),
NextValue(self.first_byte, 1),
),
If(self.sink.last,
NextValue(self.last_byte, 1),
),
If(self.sink.valid,
self.master.start.eq(1),
NextState("XFER_FROM_MASTER")
),
)
fsm.act("XFER_FROM_MASTER",
self.xfer_from_master_flag.eq(1),
If(self.master.irq,
NextState("XFER_TO_STREAM"),
),
)
fsm.act("XFER_TO_STREAM",
self.xfer_to_stream_flag.eq(1),
self.source.valid.eq(1),
self.source.first.eq(self.first_byte),
self.source.last.eq(self.last_byte),
If(self.source.ready,
NextState("IDLE"),
If(self.first_byte,
NextValue(self.first_byte, 0),
),
If(self.last_byte,
NextValue(self.cs_sw, 0),
NextValue(self.last_byte, 0),
),
),
)
self.comb += [
self.master.length.eq(8),
self.master.mosi.eq(self.sink.payload.data),
self.source.payload.data.eq(self.master.miso),
]
class SimSPIMaster(Module):
def __init__(self, phy: RS232PHYModel, pads, sys_clk_freq: int, spi_clk_freq: int):
self.phy = phy
self.submodules.spi_streamer = SPIMasterStreamer(pads, sys_clk_freq, spi_clk_freq)
self.comb += self.spi_streamer.source.connect(self.phy.sink)
self.comb += self.phy.source.connect(self.spi_streamer.sink)
```
|
{
"source": "jevinskie/pcie-mitm",
"score": 2
}
|
#### File: test/avalon_mm_gpio/sim.py
```python
import argparse
from migen import *
from litex.gen.fhdl.utils import get_signals
from litex.build.generic_platform import *
from litex.build.sim import SimPlatform
from litex.build.sim.config import SimConfig
from litex.build.sim.verilator import verilator_build_args, verilator_build_argdict
from litex.soc.interconnect import avalon
from litex.soc.interconnect import wishbone
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from liteeth.phy.model import LiteEthPHYModel
from litescope import LiteScopeAnalyzer
from pcie_mitm.ip.gpio import AvalonMMGPIO
# IOs ----------------------------------------------------------------------------------------------
_io = [
("sys_clk", 0, Pins(1)),
("sys_rst", 0, Pins(1)),
("serial", 0,
Subsignal("source_valid", Pins(1)),
Subsignal("source_ready", Pins(1)),
Subsignal("source_data", Pins(8)),
Subsignal("sink_valid", Pins(1)),
Subsignal("sink_ready", Pins(1)),
Subsignal("sink_data", Pins(8)),
),
("eth_clocks", 0,
Subsignal("tx", Pins(1)),
Subsignal("rx", Pins(1)),
),
("eth", 0,
Subsignal("source_valid", Pins(1)),
Subsignal("source_ready", Pins(1)),
Subsignal("source_data", Pins(8)),
Subsignal("sink_valid", Pins(1)),
Subsignal("sink_ready", Pins(1)),
Subsignal("sink_data", Pins(8)),
),
# Leds.
("user_led", 0, Pins(1)),
("user_led", 1, Pins(1)),
("user_led", 2, Pins(1)),
("user_led", 3, Pins(1)),
("user_led", 4, Pins(1)),
("user_led", 5, Pins(1)),
("user_led", 6, Pins(1)),
("user_led", 7, Pins(1)),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(SimPlatform):
def __init__(self):
SimPlatform.__init__(self, "SIM", _io)
# Bench SoC ----------------------------------------------------------------------------------------
class SimSoC(SoCCore):
def __init__(self, sys_clk_freq = None, trace=False, **kwargs):
platform = Platform()
sys_clk_freq = int(sys_clk_freq)
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, clk_freq=sys_clk_freq,
ident = "Avalon-MM GPIO WB bridge test simulation",
**kwargs)
self.add_constant("CONFIG_DISABLE_DELAYS", 1)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = CRG(platform.request("sys_clk"))
# Trace ------------------------------------------------------------------------------------
self.platform.add_debug(self, reset=0)
# Etherbone --------------------------------------------------------------------------------
self.submodules.ethphy = LiteEthPHYModel(self.platform.request("eth"))
self.add_etherbone(phy=self.ethphy, ip_address = "192.168.42.50", buffer_depth=16*4096-1)
# Leds -------------------------------------------------------------------------------------
led_pads = platform.request_all("user_led")
if False:
self.submodules.leds = LedChaser(
pads = led_pads,
sys_clk_freq = sys_clk_freq)
else:
self.submodules.led_gpio = AvalonMMGPIO(self.platform)
for src, sink in zip(self.led_gpio.out_port, led_pads):
self.comb += sink.eq(src)
self.led_gpio_wb = wishbone.Interface(adr_width=2)
self.add_memory_region("gpio", 0x9000_0000, length=4*4, type="io")
self.add_wb_slave(0x9000_0000, self.led_gpio_wb)
self.submodules.led_gpio_avmm2wb = avalon.AvalonMM2Wishbone(self.led_gpio.avmm, self.led_gpio_wb)
if True:
analyzer_signals = set([
*get_signals(led_pads),
*get_signals(self.led_gpio),
*get_signals(self.led_gpio_wb),
*get_signals(self.led_gpio_avmm2wb),
])
analyzer_signals_denylist = set([
])
analyzer_signals -= analyzer_signals_denylist
analyzer_signals = list(analyzer_signals)
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals,
depth = 64*1,
register = True,
clock_domain = "sys",
samplerate = sys_clk_freq,
csr_csv = "analyzer.csv")
# Main ---------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteEth Bench Simulation")
parser.add_argument("--sys-clk-freq", default=200e6, help="System clock frequency (default: 200MHz)")
parser.add_argument("--debug-soc-gen", action="store_true", help="Don't run simulation")
builder_args(parser)
soc_core_args(parser)
verilator_build_args(parser)
args = parser.parse_args()
sim_config = SimConfig()
sim_config.add_clocker("sys_clk", freq_hz=args.sys_clk_freq)
sim_config.add_module("serial2console", "serial")
sim_config.add_module("ethernet", "eth", args={"interface": "tap0", "ip": "192.168.42.100"})
soc_kwargs = soc_core_argdict(args)
builder_kwargs = builder_argdict(args)
verilator_build_kwargs = verilator_build_argdict(args)
soc_kwargs['sys_clk_freq'] = int(args.sys_clk_freq)
soc_kwargs['uart_name'] = 'sim'
# soc_kwargs['cpu_type'] = 'None'
soc_kwargs['cpu_type'] = 'femtorv' # slow
soc_kwargs['cpu_variant'] = 'quark'
builder_kwargs['csr_csv'] = 'csr.csv'
soc = SimSoC(
trace=args.trace,
trace_reset_on=int(float(args.trace_start)) > 0 or int(float(args.trace_end)) > 0,
**soc_kwargs)
if not args.debug_soc_gen:
builder = Builder(soc, **builder_kwargs)
for i in range(2):
build = (i == 0)
run = (i == 1)
builder.build(
build=build,
run=run,
sim_config=sim_config,
**verilator_build_kwargs,
)
if __name__ == "__main__":
main()
```
#### File: test/avalon_mm_gpio/test_smart_base_literals.py
```python
import math
def _ent_dec(buf):
"""Get the entropy of a decimal byte-string [0-9]"""
freqs = [0] * 10
norm_buf = []
for b in buf:
if ord('0') <= b <= ord('9'):
norm_buf.append(b - ord('0'))
else:
raise ValueError(f"not a decimal digit: '{chr(b)}'")
for b in norm_buf:
freqs[b] += 1
num_bytes = len(norm_buf)
freqs = map(lambda cnt: cnt / num_bytes, freqs)
ent = 0
nsyms = 0
for freq in freqs:
if freq == 0:
continue
nsyms += 1
ent += freq * math.log2(freq)
if ent: # avoid -0.0
ent = -ent
return ent, nsyms
def _ent_hex(buf):
"""Get the entropy of a decimal byte-string [0-9a-z]"""
freqs = [0] * 16
norm_buf = []
if buf.startswith(b'0x'):
buf = buf[2:]
for b in buf:
if ord('0') <= b <= ord('9'):
norm_buf.append(b - ord('0'))
elif ord('a') <= b <= ord('f'):
norm_buf.append(b - ord('a') + 10)
else:
raise ValueError(f"not a hex digit: '{chr(b)}'")
for b in norm_buf:
freqs[b] += 1
num_bytes = len(norm_buf)
freqs = map(lambda cnt: cnt / num_bytes, freqs)
ent = 0
nsyms = 0
for freq in freqs:
if freq == 0:
continue
nsyms += 1
ent += freq * math.log2(freq)
if ent: # avoid -0.0
ent = -ent
return ent, nsyms
def _is_pretty_base_hex(n):
if n < 0:
n = -n
if n == 0:
return False
if math.log10(n) == int(math.log10(n)):
return False
if math.log2(n) == int(math.log2(n)) and n >= 16:
return True
d = str(n).encode('utf8')
h = hex(n).encode('utf8')[2:]
dl, hl = len(d), len(h)
ed, nsymsd = _ent_dec(d)
eh, nsymsh = _ent_hex(h)
frac_unique_d = nsymsd / dl
frac_unique_h = nsymsh / hl
if frac_unique_d < frac_unique_h:
return False
elif frac_unique_h < frac_unique_d:
return True
if n < 1000:
return False
if ed < eh:
return False
elif eh < ed:
return True
return False
for n in (1, 4, 9, 10, 11, 12, 13, 14, 15, 16, 42, 99, 100, 101, 243, 256, 1000, 1024, 4000, 4096, 1223334444, 603979776):
is_hex = _is_pretty_base_hex(n)
base_fmt = ":x" if is_hex else ":d"
fmt = "n: {} n hex: {} smrt_ltrl: {}{" + base_fmt + "}"
print(fmt.format(n, hex(n), "0x" if is_hex else "", n))
```
|
{
"source": "jevinskie/poetry-core",
"score": 3
}
|
#### File: core/version/parser.py
```python
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Optional
if TYPE_CHECKING:
from lark import Lark
from lark import Tree
class Parser:
def __init__(
self, grammar: Path, parser: str = "lalr", debug: bool = False
) -> None:
self._grammar = grammar
self._parser = parser
self._debug = debug
self._lark: Optional["Lark"] = None
def parse(self, text: str, **kwargs: Any) -> "Tree":
from lark import Lark
if self._lark is None:
self._lark = Lark.open(
grammar_filename=self._grammar, parser=self._parser, debug=self._debug
)
return self._lark.parse(text=text, **kwargs)
```
#### File: tests/json/test_poetry_schema.py
```python
from typing import Dict
import pytest
from poetry.core.json import validate_object
@pytest.fixture
def base_object() -> Dict:
return {
"name": "myapp",
"version": "1.0.0",
"description": "Some description.",
"dependencies": {"python": "^3.6"},
"dev-dependencies": {},
}
@pytest.fixture
def multi_url_object() -> Dict:
return {
"name": "myapp",
"version": "1.0.0",
"description": "Some description.",
"dependencies": {
"python": [
{
"url": "https://download.pytorch.org/whl/cpu/torch-1.4.0%2Bcpu-cp37-cp37m-linux_x86_64.whl",
"platform": "linux",
},
{"path": "../foo", "platform": "darwin"},
]
},
"dev-dependencies": {},
}
def test_path_dependencies(base_object: Dict):
base_object["dependencies"].update({"foo": {"path": "../foo"}})
base_object["dev-dependencies"].update({"foo": {"path": "../foo"}})
assert len(validate_object(base_object, "poetry-schema")) == 0
def test_multi_url_dependencies(multi_url_object: Dict):
assert len(validate_object(multi_url_object, "poetry-schema")) == 0
def test_multiline_description(base_object: Dict):
bad_description = "Some multi-\nline string"
base_object["description"] = bad_description
errors = validate_object(base_object, "poetry-schema")
assert len(errors) == 1
assert errors[0] == f"[description] {bad_description!r} does not match '^[^\\n]*$'"
```
|
{
"source": "jevinskie/ps3mfw-ng",
"score": 3
}
|
#### File: ps3mfw/tools/ps3mfw.py
```python
import argparse
def real_main(args):
print(f"args: {args}")
def main():
parser = argparse.ArgumentParser(description="ps3mfw")
parser.add_argument(
"--in-pup", type=str, help="Input PUP FW file", metavar="IN_PUP"
)
parser.add_argument(
"--out-pup", type=str, help="Output PUP FW file", metavar="OUT_PUP"
)
parser.add_argument(
"--out-dir", type=str, help="Output directory", metavar="OUT_DIR"
)
real_main(parser.parse_args())
return 0
```
#### File: tests/ps3mfw/test_certfile.py
```python
import importlib.resources
from contextlib import nullcontext
from pathlib import Path
import pytest
from ps3mfw.certfile import CertFile
from ps3mfw.io_extras import HTTPFile
from ps3mfw.pup import PUPFS
from .http_ranges_server import http_server
CWD = Path(__file__).parent
def test_certfile_ps3swu():
pytest.skip()
with http_server(directory=CWD):
# with nullcontext():
# url = "https://archive.org/download/ps3updat-cex-3.55/ps3updat-cex-3.55.pup"
url = "http://localhost:38080/ps3updat-cex-3.55.pup"
pupfh = HTTPFile(url)
# pup_path = importlib.resources.files(__package__) / "ps3updat-cex-3.55.pup"
# FIXME: missing refcnt incr? PUPFS(open()) doesn't work, closed file error
# pupfh = open(pup_path, 'rb')
pupfs = PUPFS(pupfh)
slf_fh = pupfs.open("/ps3swu.self", "rb")
slf = CertFile.parse_stream(slf_fh)
# print(slf)
def test_certfile_default_spp():
spp_path = importlib.resources.files(__package__) / "default.spp"
# FIXME: missing refcnt incr? PUPFS(open()) doesn't work, closed file error
sppfh = open(spp_path, "rb")
spp = CertFile.parse_stream(sppfh)
print(spp)
```
|
{
"source": "jevinskie/pycuse",
"score": 2
}
|
#### File: pycuse/pycuse/cuse.py
```python
from typing import Final, Sequence
from ._cuse import ffi, lib
CUSE_UNRESTRICTED_IOCTL: Final[int] = 1 << 0
def cstr_array(strs: Sequence[str]):
cstrs = [ffi.new("char[]", s.encode("utf-8")) for s in strs]
res = ffi.new(f"char*[{len(strs)}]", cstrs)
return res
class CUSEDev:
def __init__(self, name: str, fg: bool = True, debug: bool = True):
args = [f"pycuse-{name}"]
if fg:
args.append("-f")
if debug:
args.append("-d")
ci = ffi.new("struct cuse_info *")
print(ci)
cargs = cstr_array(args)
print(cargs)
ci.flags = CUSE_UNRESTRICTED_IOCTL
ci.dev_info_argc = 1
ci.dev_info_argv = cstr_array([f"DEVNAME={name}"])
ops = ffi.new("struct cuse_lowlevel_ops *")
lib.cuse_lowlevel_main(len(args), cargs, ci, ops, ffi.NULL)
```
|
{
"source": "jevinskie/pypci",
"score": 2
}
|
#### File: pypci/pypci/pci.py
```python
from ._native import lib, ffi
from .device import PciDevice, PciClass
from .filter import PciFilter
from typing import MutableMapping, Iterator, Iterable, Tuple, Optional
import enum
class PciParameters(MutableMapping[str, str]):
def __init__(self, pci: 'Pci'):
self._pci = pci
def __iter__(self) -> Iterator[str]:
pacc = self._pci._pacc
if pacc is None:
return
ret = []
param = lib.pci_walk_params(pacc, ffi.NULL)
while param != ffi.NULL:
ret.append(ffi.string(param.param).decode('utf-8'))
param = lib.pci_walk_params(pacc, param)
for p in ret:
yield p
def items(self) -> Iterable[Tuple[str, str]]:
def gen():
pacc = self._pci._pacc
if pacc is None:
return
ret = []
param = lib.pci_walk_params(pacc, ffi.NULL)
while param != ffi.NULL:
k, v, h = ffi.string(param.param).decode('utf-8'),\
ffi.string(param.value).decode('utf-8'),\
ffi.string(param.help).decode('utf-8')
cls = type(k, (tuple,), dict(__doc__=h))
ret.append(cls((k, v)))
param = lib.pci_walk_params(pacc, param)
for p in ret:
yield p
return list(gen())
def __len__(self):
return sum(1 for _ in self)
def __getitem__(self, key: str) -> str:
pacc = self._pci._pacc
if pacc is None:
raise KeyError(key)
key_ = key.encode('utf-8') + b'\0'
ret = lib.pci_get_param(pacc, ffi.from_buffer(key_))
if ret == ffi.NULL:
raise KeyError(key)
return ffi.string(ret).decode('utf-8')
def __setitem__(self, key: str, val: str):
pacc = self._pci._pacc
if pacc is None:
raise KeyError(key)
key_ = key.encode('utf-8') + b'\0'
val_ = val.encode('utf-8') + b'\0'
if lib.pci_set_param(pacc, ffi.from_buffer(key_), ffi.from_buffer(val_)) != 0:
raise KeyError(key)
def __delitem__(self, key: str):
raise KeyError(key)
def __repr__(self):
return repr(dict(self.items()))
PciAccessType = enum.IntFlag('PciAccessType', dict(
(''.join(x.capitalize() for x in k.split('_')[2:]), getattr(lib, k))
for k in dir(lib) if k.startswith('PCI_ACCESS_')))
PciLookupMode = enum.IntFlag('PciLookupMode', dict(
(''.join(x.capitalize() for x in k.split('_')[2:]), getattr(lib, k))
for k in dir(lib) if k.startswith('PCI_LOOKUP_')))
class Pci:
def __init__(self):
self._pacc = lib.pci_alloc()
lib.pci_init(self._pacc)
def close(self):
if self._pacc is not None:
self._pacc, pacc = None, self._pacc
lib.pci_cleanup(pacc)
def __del__(self):
self.close()
def scan_bus(self):
lib.pci_scan_bus(self._pacc)
def get_dev(self, domain: int, bus: int, dev: int, func: int) -> PciDevice:
return PciDevice(self, lib.pci_get_dev(self._pacc, domain, bus, dev, func))
@staticmethod
def lookup_method(name: str) -> int:
name_ = name.encode('utf-8') + b'\0'
return lib.pci_lookup_method(ffi.from_buffer(name_))
@staticmethod
def get_method_name(index: int) -> str:
return ffi.string(lib.pci_get_method_name(index))
@property
def parameters(self) -> PciParameters:
return PciParameters(self)
def filter(self) -> PciFilter:
filt = ffi.new('struct pci_filter *')
lib.pci_filter_init(self._pacc, filt)
return PciFilter(filt)
def lookup_name(self, flags: PciLookupMode, *args: int) -> Optional[str]:
buf = ffi.new('char[512]')
ret = lib.pci_lookup_name(self._pacc, buf, 512, flags.value, *iter(ffi.cast('int', arg) for arg in args))
if ret == ffi.NULL:
return None
else:
return ffi.string(ret).decode('utf-8')
def lookup(self, vendor_id: Optional[int] = None, device_id: Optional[int] = None,
subvendor_id: Optional[int] = None, subdev_id: Optional[int] = None,
class_id: Optional[PciClass] = None, progif: Optional[int] = None,
flags: PciLookupMode = PciLookupMode(0)):
return PciLookupName(self, vendor_id, device_id, subvendor_id, subdev_id,
class_id, progif, flags)
@property
def method(self) -> PciAccessType:
return PciAccessType(self._pacc.method)
@method.setter
def method(self, val: PciAccessType):
self._pacc.method = val.value
@property
def writeable(self) -> bool:
return self._pacc.writeable != 0
@writeable.setter
def writeable(self, val: bool):
self._pacc.writeable = 1 if val else 0
@property
def buscentric(self) -> bool:
return self._pacc.buscentric != 0
@buscentric.setter
def buscentric(self, val: bool):
self._pacc.buscentric = 1 if val else 0
@property
def id_file_name(self) -> str:
return ffi.string(self._pacc.id_file_name).decode('utf-8')
@id_file_name.setter
def id_file_name(self, val: str):
self._id_file_name = val.encode('utf-8') + b'\0'
self._pacc.id_file_name = ffi.from_buffer(self._id_file_name)
self._pacc.free_id_name = 0
@property
def numeric_ids(self) -> bool:
return self._pacc.numeric_ids != 0
@numeric_ids.setter
def numeric_ids(self, val: bool):
self._pacc.numeric_ids = 1 if val else 0
@property
def id_lookup_mode(self) -> PciLookupMode:
return PciLookupMode(self._pacc.id_lookup_mode)
@id_lookup_mode.setter
def id_lookup_mode(self, val: PciLookupMode):
self._pacc.id_lookup_mode = val.value
@property
def debugging(self) -> bool:
return self._pacc.debugging != 0
@debugging.setter
def debugging(self, val: bool):
self._pacc.debugging = 1 if val else 0
@property
def devices(self) -> Iterable[PciDevice]:
dev = self._pacc.devices
while dev != ffi.NULL:
yield self.get_dev(dev.domain, dev.bus, dev.dev, dev.func)
dev = dev.next
class PciLookupName:
def __init__(self, pci: Pci, vendor_id: Optional[int] = None, device_id: Optional[int] = None,
subvendor_id: Optional[int] = None, subdev_id: Optional[int] = None,
class_id: Optional[PciClass] = None, progif: Optional[int] = None,
flags: PciLookupMode = PciLookupMode(0)):
self._pci = pci
self.vendor_id = vendor_id
self.device_id = device_id
self.subvendor_id = subvendor_id
self.subdev_id = subdev_id
self.class_id = class_id
self.progif = progif
self.flags = flags
@property
def vendor(self) -> str:
"(vendorID) -> vendor"
if self.vendor_id is None:
raise ValueError("vendor_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Vendor, self.vendor_id)
@property
def device(self) -> str:
"(vendorID, deviceID) -> device"
if self.vendor_id is None:
raise ValueError("vendor_id is not specified")
if self.device_id is None:
raise ValueError("device_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Device, self.vendor_id, self.device_id)
@property
def vendor_device(self) -> str:
"(vendorID, deviceID) -> combined vendor and device"
if self.vendor_id is None:
raise ValueError("vendor_id is not specified")
if self.device_id is None:
raise ValueError("device_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Vendor | PciLookupMode.Device,
self.vendor_id, self.device_id)
@property
def subsystem_vendor(self) -> str:
"(subvendorID) -> subsystem vendor"
if self.subvendor_id is None:
raise ValueError("subvendor_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Subsystem | PciLookupMode.Device,
self.subvendor_id)
@property
def subsystem_device(self) -> str:
"(vendorID, deviceID, subvendorID, subdevID) -> subsystem device"
if self.vendor_id is None:
raise ValueError("subvendor_id is not specified")
if self.device_id is None:
raise ValueError("device_id is not specified")
if self.subvendor_id is None:
raise ValueError("subvendor_id is not specified")
if self.subdev_id is None:
raise ValueError("subdev_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Subsystem | PciLookupMode.Device,
self.vendor_id, self.device_id, self.subvendor_id, self.subdev_id)
@property
def subsystem_vendor_device(self) -> str:
"(vendorID, deviceID, subvendorID, subdevID) -> combined subsystem v+d"
if self.vendor_id is None:
raise ValueError("subvendor_id is not specified")
if self.device_id is None:
raise ValueError("device_id is not specified")
if self.subvendor_id is None:
raise ValueError("subvendor_id is not specified")
if self.subdev_id is None:
raise ValueError("subdev_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff |
PciLookupMode.Subsystem | PciLookupMode.Vendor | PciLookupMode.Device,
self.vendor_id, self.device_id, self.subvendor_id, self.subdev_id)
@property
def generic_subsystem(self) -> str:
"(subvendorID, subdevID) -> generic subsystem"
if self.subvendor_id is None:
raise ValueError("subvendor_id is not specified")
if self.subdev_id is None:
raise ValueError("subdev_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff |
PciLookupMode.Subsystem | PciLookupMode.Vendor | PciLookupMode.Device,
-1, -1, self.subvendor_id, self.subdev_id)
@property
def pci_class(self) -> str:
"(classID) -> class"
if self.class_id is None:
raise ValueError("class_id is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Class, self.class_id)
@property
def programming_interface(self) -> str:
"(classID, progif) -> programming interface"
if self.class_id is None:
raise ValueError("class_id is not specified")
if self.progif is None:
raise ValueError("progif is not specified")
return self._pci.lookup_name(self.flags & ~0xffff | PciLookupMode.Class | PciLookupMode.Progif,
self.class_id, self.progif)
```
|
{
"source": "jevinskie/pypcode-emu",
"score": 2
}
|
#### File: pypcode-emu/pypcode_emu/ntypes.py
```python
import operator
from typing import Type
import nativetypes as nt
from bidict import bidict
uint1 = nt.nint_type("uint1", 1, False)
int1 = nt.nint_type("int1", 1, True)
size2uintN = bidict({0: uint1, 1: nt.uint8, 2: nt.uint16, 4: nt.uint32, 8: nt.uint64})
size2intN = bidict({1: int1, 2: nt.int16, 4: nt.int32, 8: nt.int64})
def uintN(nbytes: int) -> Type[nt.nint]:
return size2uintN[nbytes]
def intN(nbytes: int) -> Type[nt.nint]:
return size2intN[nbytes]
def as_u(self: nt.nint):
if self.v < 0:
return nt.nint((1 << self.b) + self.v, self.b, False)
return nt.nint(self.v, self.b, False)
nt.nint.as_u = property(as_u)
def as_s(self: nt.nint):
if self.s:
return self
return nt.nint(self.v, self.b, True)
nt.nint.as_s = property(as_s)
def sext(self: nt.nint, nbits: int):
return nt.nint(self.as_s.v, nbits, True)
nt.nint.sext = sext
def zext(self: nt.nint, nbits: int):
return nt.nint(self.as_u.v, nbits, False)
nt.nint.zext = zext
def asr(self: nt.nint, nbits: int):
return nt.nint((self.as_s >> nbits).v, self.b, True)
nt.nint.asr = asr
nt.nint.CMP_MAP = {
">": "gt",
"<": "lt",
"==": "eq",
"!=": "ne",
">=": "ge",
"<=": "le",
}
def cmp(self: nt.nint, cmp: str, other: nt.nint) -> nt.uint8:
signed = cmp.startswith("s")
if signed:
a, b = self.as_s, other.as_s
else:
a, b = self.as_u, other.as_u
cmp = cmp.lstrip("s")
py_op_name = f"__{nt.nint.CMP_MAP[cmp]}__"
op_func = getattr(operator, py_op_name)
return nt.uint8(1 if op_func(a, b) else 0)
nt.nint.cmp = cmp
def strict_eq(self: nt.nint, other: nt.nint) -> bool:
return self.v == other.v and self.b == other.b and self.s == other.s
nt.nint.strict_eq = strict_eq
def comp_time_eq(self: nt.nint, other: nt.nint) -> bool:
return self.strict_eq(other)
nt.nint.comp_time_eq = comp_time_eq
def nint_hash(self: nt.nint) -> int:
return hash((self.v, self.b, self.s))
nt.nint.__hash__ = nint_hash
exported_attrs_names = list(
filter(lambda n: not n.startswith("__") and not n.endswith("__"), dir(nt))
)
exported_attrs = [getattr(nt, n) for n in exported_attrs_names]
exported_attrs = [*exported_attrs, uint1, int1]
exported_attrs_names = [*exported_attrs_names, "uint1", "int1"]
for n, a in zip(exported_attrs_names, exported_attrs):
globals()[n] = a
nint = nt.nint
uint8, int8 = nt.uint8, nt.int8
uint16, int16 = nt.uint16, nt.uint16
uint32, int32 = nt.uint32, nt.int32
uint64, int64 = nt.uint64, nt.int64
__all__ = tuple(exported_attrs_names)
```
#### File: pypcode_emu/tools/pypcode_emu_llvm_tool.py
```python
import argparse
from pypcode_emu.llvm import LLVMELFLifter
def real_main(args):
lifter = LLVMELFLifter(
args.elf,
args.exe,
entry=args.entry,
bb_override=args.bb_addr,
asan=args.asan,
msan=args.msan,
opt=args.O,
trace=args.trace,
arg0=args.arg0,
inline=args.inline,
assertions=args.assertions,
)
lifter.lift()
# try:
# lifter.lift()
# except Exception as e:
# print("IR Module:")
# print(str(lifter.m))
# print(f"lifting error: {e}")
def main() -> int:
parser = argparse.ArgumentParser(description="pypcode-emu")
parser.add_argument("elf", help="Input ELF file", metavar="ELF")
parser.add_argument("exe", help="Output executable", metavar="EXE")
parser.add_argument("-e", "--entry", help="Entry point", metavar="ENTRY")
parser.add_argument(
"-0",
"--arg0",
type=lambda n: int(n, 0),
default=0,
help="Argument 0",
metavar="ARG0",
)
parser.add_argument("-t", "--trace", help="Enable tracing", action="store_true")
parser.add_argument("-i", "--inline", help="Enable inlining", action="store_true")
parser.add_argument(
"-A",
"--no-assert",
dest="assertions",
help="Disable Assertions",
action="store_false",
)
parser.add_argument("--asan", help="Enable Address Sanitizer", action="store_true")
parser.add_argument("--msan", help="Enable Memory Sanitizer", action="store_true")
parser.add_argument(
"-O", default="z", help="Optimization level", metavar="OPT_LEVEL"
)
parser.add_argument(
"-b",
"--bb-addr",
help="Basic block address override",
type=lambda n: int(n, 0),
action="append",
metavar="BB",
)
args = parser.parse_args()
real_main(args)
return 0
if __name__ == "__main__":
main()
```
#### File: pypcode-emu/pypcode_emu/utils.py
```python
import shutil
import subprocess
import sys
from typing import Callable
from bidict import bidict
def first(iterable, default=None):
for item in iterable:
return item
return default
def first_where(iterable, pred, default=None):
return first((x for x in iterable if pred(x)), default=default)
def first_where_key_is(iterable, key, val, default=None):
return first_where(iterable, lambda x: x[key] == val, default=default)
def first_where_attr_is(iterable, key, val, default=None):
return first_where(iterable, lambda x: getattr(x, key) == val, default=default)
def run_cmd(*args, log: bool = True):
args = (*args,)
if log:
print(f"run_cmd args: {' '.join(map(str, args))}", file=sys.stderr)
r = subprocess.run(list(map(str, args)), capture_output=True)
if r.returncode != 0:
sys.stderr.buffer.write(r.stdout)
sys.stderr.buffer.write(r.stderr)
raise subprocess.CalledProcessError(r.returncode, args, r.stdout, r.stderr)
try:
r.out = r.stdout.decode()
except UnicodeDecodeError:
pass
return r
def gen_cmd(bin_name: str) -> Callable:
bin_path = shutil.which(bin_name)
assert bin_path is not None
return lambda *args, **kwargs: run_cmd(bin_path, *args, **kwargs)
class UniqueBiDict(bidict):
"""bidict with additional requirement that keys must be disjoint from items"""
def __getitem__(self, item):
a = self._fwdm.get(item, None)
b = self._invm.get(item, None)
if a is None and b is None:
raise KeyError(item)
assert (a is None) ^ (b is None)
return a if b is None else b
```
#### File: pypcode-emu/scripts/inh_test.py
```python
from __future__ import annotations
from typing import Optional
from icecream import ic
from wrapt import ObjectProxy
class Const:
constant: str
def __init__(self, val):
self.constant = str(val)
def __add__(self, other: Const) -> Const:
assert isinstance(other, Const)
return type(self)(f"{self} C+ {other}")
def __str__(self) -> str:
return f"C:{self.constant}"
def __repr__(self) -> str:
return f"Const({self.constant})"
def __int__(self) -> int:
return int(self.constant)
class VarVal:
name: str
def __init__(self, name: str):
self.name = name
def __str__(self) -> str:
return f"V:{self.name}"
def __repr__(self) -> str:
return f"VarVal({self.name})"
def __add__(self, other) -> VarVal:
return type(self)(f"{self} VV+ {other}")
class IntVal(ObjectProxy):
_self_concrete: Optional[int]
def __init__(self, v):
if isinstance(v, IntVal) and isinstance(v, ObjectProxy):
v = v.__wrapped__
super().__init__(v)
try:
self._self_concrete = int(v)
except (ValueError, TypeError):
self._self_concrete = None
def __repr__(self) -> str:
return f"IntVal({self})"
@property
def concrete(self) -> Optional[int]:
return self._self_concrete
@property
def is_const(self):
return isinstance(self, Const)
def __add__(self, other: IntVal):
if self.is_const and other.is_const:
return type(self)(self.__wrapped__ + other)
return type(self)(f"{self} IV+ {other}")
c42 = Const(42)
ic(c42)
c1 = Const(1)
ic(c1)
c43 = c1 + c42
ic(c43)
vva = VarVal("a")
ic(vva)
va = IntVal(vva)
ic(va)
ic(va.is_const)
try:
int(va)
assert False
except:
pass
vunk = va + c42
ic(vunk)
ic(vunk.is_const)
v42 = IntVal(c42)
ic(v42)
ic(int(v42))
ic(v42.is_const)
v1 = IntVal(c1)
ic(v1)
ic(int(v1))
ic(v1.is_const)
v43 = v1 + v42
ic(v43)
try:
ic(int(v43))
assert False
except ValueError:
pass
ic(v43.is_const)
v42_2 = IntVal(v42)
ic(v42_2)
```
#### File: pypcode-emu/scripts/z3_sandbox.py
```python
import nativetypes as ntypes
import z3
def scarry_ntypes(in1, in2):
res = in1 + in2
a = (in1 >> (in1.b - 1)) & 1
b = (in2 >> (in2.b - 1)) & 1
r = (res >> (res.b - 1)) & 1
r ^= a
a ^= b
a ^= 1
r &= a
return r
def test_ntypes():
nt_127 = ntypes.int8(127)
nt_1 = ntypes.int8(1)
nt_n1 = ntypes.int8(-1)
nt_n128 = ntypes.int8(-128)
r = scarry_ntypes(nt_127, nt_1)
print(r)
r = scarry_ntypes(nt_n128, nt_n1)
print(r)
r = scarry_ntypes(nt_n128, nt_1)
print(r)
def scarry_z3(in1, in2):
res = in1 + in2
a = (in1 >> (in1.size() - 1)) & 1
b = (in2 >> (in2.size() - 1)) & 1
r = (res >> (res.size() - 1)) & 1
r ^= a
a ^= b
a ^= 1
r &= a
return r
def test_z3():
nt_127 = z3.BitVecVal(127, 8)
nt_1 = z3.BitVecVal(1, 8)
nt_n1 = z3.BitVecVal(-1, 8)
nt_n128 = z3.BitVecVal(-128, 8)
r = scarry_z3(nt_127, nt_1)
print(r)
print()
print(z3.simplify(r))
print("\n\n")
r = scarry_z3(nt_n128, nt_n1)
print(r)
print()
print(z3.simplify(r))
print("\n\n")
r = scarry_z3(nt_n128, nt_1)
print(r)
print()
print(z3.simplify(r))
print("\n\n")
in1 = z3.BitVec("in1", 8)
in2 = z3.BitVec("in2", 8)
r = scarry_z3(in1, in2)
print(r)
print()
print(z3.simplify(r))
print("\n\n")
test_ntypes()
print("\n\n\n====================\n\n\n")
test_z3()
```
|
{
"source": "jevonearth/b2bua",
"score": 2
}
|
#### File: b2bua/sippy/Cli_session.py
```python
from twisted.internet.protocol import Protocol
import sys, traceback
class Cli_session(Protocol):
command_cb = None
rbuffer = None
wbuffer = None
cb_busy = False
expect_lf = True
raddr = None
def __init__(self):
self.rbuffer = ''
self.wbuffer = ''
#def connectionMade(self):
# print self.transport.getPeer()
# self.transport.loseConnection()
def dataReceived(self, data):
#print 'Cli_session::dataReceived', self, data
if len(data) == 0:
return
self.rbuffer += data
self.pump_rxdata()
def pump_rxdata(self):
while self.rbuffer != None and len(self.rbuffer) > 0:
if self.cb_busy:
return
if self.rbuffer.find('\n') == -1 and self.expect_lf:
return
parts = self.rbuffer.split('\n', 1)
if len(parts) == 1:
parts = (parts[0], '')
cmd, self.rbuffer = parts
cmd = cmd.strip()
if len(cmd) > 0:
try:
self.cb_busy = self.command_cb(self, cmd)
except:
print 'Cli_session: unhandled exception when processing incoming data'
print '-' * 70
traceback.print_exc(file = sys.stdout)
print '-' * 70
def done(self):
self.cb_busy = False
self.pump_rxdata()
def send(self, data):
if isinstance(data, unicode):
data = data.encode('ascii')
return self.transport.write(data)
def close(self):
return self.transport.loseConnection()
```
#### File: b2bua/sippy/Rtp_proxy_client_local.py
```python
from Rtp_proxy_client_stream import Rtp_proxy_client_stream
import socket
class Rtp_proxy_client_local(Rtp_proxy_client_stream):
is_local = True
def __init__(self, global_config, address = '/var/run/rtpproxy.sock', \
bind_address = None, nworkers = 1):
Rtp_proxy_client_stream.__init__(self, global_config = global_config, \
address = address, bind_address = bind_address, nworkers = nworkers, \
family == socket.AF_UNIX)
if __name__ == '__main__':
from twisted.internet import reactor
def display(*args):
print args
reactor.crash()
r = Rtp_proxy_client_local({'_sip_address':'1.2.3.4'})
r.send_command('VF 123456', display, 'abcd')
reactor.run(installSignalHandlers = 1)
r.shutdown()
```
#### File: b2bua/sippy/SdpMedia.py
```python
class MTAudio(object):
pass
class MTOther(object):
pass
class SdpMedia(object):
type = None
stype = None
port = None
transport = None
formats = None
def __init__(self, body = None, cself = None):
if body != None:
params = body.split()
self.stype = params[0]
if self.stype.lower() == 'audio':
self.type = MTAudio
else:
self.type = MTOther
self.port = int(params[1])
self.transport = params[2]
if self.type == MTAudio:
self.formats = [int(x) for x in params[3:]]
else:
self.formats = params[3:]
else:
self.type = cself.type
self.stype = cself.stype
self.port = cself.port
self.transport = cself.transport
self.formats = cself.formats[:]
def __str__(self):
rval = '%s %d %s' % (self.stype, self.port, self.transport)
if self.type == MTAudio:
for format in self.formats:
rval += ' %d' % format
else:
for format in self.formats:
rval += ' %s' % format
return rval
def localStr(self, local_addr = None, local_port = None):
return str(self)
def getCopy(self):
return SdpMedia(cself = self)
```
#### File: b2bua/sippy/SipWWWAuthenticate.py
```python
from random import random
from hashlib import md5
from time import time
from SipGenericHF import SipGenericHF
from SipConf import SipConf
class SipWWWAuthenticate(SipGenericHF):
hf_names = ('www-authenticate',)
realm = None
nonce = None
def __init__(self, body = None, realm = None, nonce = None):
SipGenericHF.__init__(self, body)
if body != None:
return
self.parsed = True
if nonce == None:
ctime = time()
nonce = md5(str((random() * 1000000000L) + ctime)).hexdigest() + hex(int(ctime))[2:]
if realm == None:
realm = SipConf.my_address
self.realm = realm
self.nonce = nonce
def parse(self):
parts = self.body.split(' ', 1)[1].strip().split('"')
if len(parts) % 2 != 0 and len(parts[-1]) == 0:
parts.pop()
while len(parts) > 0:
parts1 = [x.strip().split('=', 1) for x in parts.pop(0).strip(' ,=').split(',')]
if len(parts) > 0:
parts1[-1].append(parts.pop(0))
for name, value in parts1:
if name == 'realm':
self.realm = value
elif name == 'nonce':
self.nonce = value
self.parsed = True
def __str__(self):
return self.localStr()
def localStr(self, local_addr = None, local_port = None):
if not self.parsed:
return self.body
if local_addr != None and 'my' in dir(self.realm):
return 'Digest realm="%s",nonce="%s"' % (local_addr, self.nonce)
return 'Digest realm="%s",nonce="%s"' % (self.realm, self.nonce)
def getCopy(self):
if not self.parsed:
return self.__class__(self.body)
return self.__class__(realm = self.realm, nonce = self.nonce)
def getCanName(self, name, compact = False):
return 'WWW-Authenticate'
def getRealm(self):
return self.realm
def getNonce(self):
return self.nonce
```
#### File: b2bua/tests/MyConfigParser_tests.py
```python
import unittest
from sippy.MyConfigParser import MyConfigParser
class TestMyConfigParser(unittest.TestCase):
def TestParamHandling(self):
m = MyConfigParser()
m['_foo'] = 'bar'
m['b2bua_socket'] = 'bar1'
m['acct_enable'] = True
m['auth_enable'] = 'False'
self.assertTrue(m.has_key('_foo'))
self.assertEqual(m['_foo'], 'bar')
self.assertEqual(m['b2bua_socket'], 'bar1')
self.assertEqual(m.get('_foo'), 'bar')
self.assertEqual(m.get('b2bua_socket'), 'bar1')
self.assertEqual(m.get('general', 'b2bua_socket'), 'bar1')
self.assertTrue(m.get('acct_enable'))
self.assertFalse(m.get('auth_enable'))
m.check_and_set('keepalive_ans', '15')
self.assertEqual(m['keepalive_ans'], 15)
self.assertIsInstance(m.get('keepalive_ans'), int)
m.check_and_set('pass_header', 'a')
m.check_and_set('pass_header', 'b')
self.assertEqual(m['pass_headers'], 'a,b')
self.assertEqual(m['_pass_headers'][0], 'a')
self.assertEqual(m['_pass_headers'][1], 'b')
m.check_and_set('accept_ips', '1.2.3.4, 5.6.7.8')
self.assertEqual(m['_accept_ips'][0], '1.2.3.4')
self.assertEqual(m['_accept_ips'][1], '5.6.7.8')
self.assertEqual(m.check_and_set('hrtb_ival', "1"), None)
self.assertEqual(m['hrtb_ival'], 1)
self.assertEqual(m.get('hrtb_ival'), 1)
# Test that get method returns correct type
self.assertIsInstance(m.get('hrtb_ival'), float)
# Test that access by index returns correct type
self.assertIsInstance(m['hrtb_ival'], float)
with self.assertRaises(KeyError):
m.check_and_set('non_existant_key', "1")
def TestSipPortValidation(self):
m = MyConfigParser()
with self.assertRaises(ValueError):
m.check_and_set('sip_port', "-1")
with self.assertRaises(ValueError):
m.check_and_set('sip_port', "0")
with self.assertRaises(ValueError):
m.check_and_set('sip_port', "65536")
self.assertEquals(m.check_and_set('sip_port', "1"), None)
self.assertEquals(m.check_and_set('sip_port', "65535"), None)
def TestMaxCreditTime(self):
m = MyConfigParser()
with self.assertRaises(ValueError):
m.check_and_set('max_credit_time', "-1")
def TestMaxKeepAlive(self):
m = MyConfigParser()
with self.assertRaises(ValueError):
m.check_and_set('keepalive_ans', "-1")
```
|
{
"source": "jevonxu/CSDI",
"score": 2
}
|
#### File: jevonxu/CSDI/diff_models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
def get_torch_trans(heads=8, layers=1, channels=64):
encoder_layer = nn.TransformerEncoderLayer(
d_model=channels, nhead=heads, dim_feedforward=64, activation="gelu"
)
return nn.TransformerEncoder(encoder_layer, num_layers=layers)
def Conv1d_with_init(in_channels, out_channels, kernel_size):
layer = nn.Conv1d(in_channels, out_channels, kernel_size)
nn.init.kaiming_normal_(layer.weight)
return layer
class DiffusionEmbedding(nn.Module):
def __init__(self, num_steps, embedding_dim=128, projection_dim=None):
super().__init__()
if projection_dim is None:
projection_dim = embedding_dim
self.register_buffer(
"embedding",
self._build_embedding(num_steps, embedding_dim / 2),
persistent=False,
)
self.projection1 = nn.Linear(embedding_dim, projection_dim)
self.projection2 = nn.Linear(projection_dim, projection_dim)
def forward(self, diffusion_step):
x = self.embedding[diffusion_step]
x = self.projection1(x)
x = F.silu(x)
x = self.projection2(x)
x = F.silu(x)
return x
def _build_embedding(self, num_steps, dim=64):
steps = torch.arange(num_steps).unsqueeze(1) # (T,1)
frequencies = 10.0 ** (torch.arange(dim) / (dim - 1) * 4.0).unsqueeze(0) # (1,dim)
table = steps * frequencies # (T,dim)
table = torch.cat([torch.sin(table), torch.cos(table)], dim=1) # (T,dim*2)
return table
class diff_CSDI(nn.Module):
def __init__(self, config, inputdim=2):
super().__init__()
self.channels = config["channels"]
self.diffusion_embedding = DiffusionEmbedding(
num_steps=config["num_steps"],
embedding_dim=config["diffusion_embedding_dim"],
)
self.input_projection = Conv1d_with_init(inputdim, self.channels, 1)
self.output_projection1 = Conv1d_with_init(self.channels, self.channels, 1)
self.output_projection2 = Conv1d_with_init(self.channels, 1, 1)
nn.init.zeros_(self.output_projection2.weight)
self.residual_layers = nn.ModuleList(
[
ResidualBlock(
side_dim=config["side_dim"],
channels=self.channels,
diffusion_embedding_dim=config["diffusion_embedding_dim"],
nheads=config["nheads"],
)
for _ in range(config["layers"])
]
)
def forward(self, x, cond_info, diffusion_step):
B, inputdim, K, L = x.shape
x = x.reshape(B, inputdim, K * L)
x = self.input_projection(x)
x = F.relu(x)
x = x.reshape(B, self.channels, K, L)
diffusion_emb = self.diffusion_embedding(diffusion_step)
skip = []
for layer in self.residual_layers:
x, skip_connection = layer(x, cond_info, diffusion_emb)
skip.append(skip_connection)
x = torch.sum(torch.stack(skip), dim=0) / math.sqrt(len(self.residual_layers))
x = x.reshape(B, self.channels, K * L)
x = self.output_projection1(x) # (B,channel,K*L)
x = F.relu(x)
x = self.output_projection2(x) # (B,1,K*L)
x = x.reshape(B, K, L)
return x
class ResidualBlock(nn.Module):
def __init__(self, side_dim, channels, diffusion_embedding_dim, nheads):
super().__init__()
self.diffusion_projection = nn.Linear(diffusion_embedding_dim, channels)
self.cond_projection = Conv1d_with_init(side_dim, 2 * channels, 1)
self.mid_projection = Conv1d_with_init(channels, 2 * channels, 1)
self.output_projection = Conv1d_with_init(channels, 2 * channels, 1)
self.time_layer = get_torch_trans(heads=nheads, layers=1, channels=channels)
self.feature_layer = get_torch_trans(heads=nheads, layers=1, channels=channels)
def forward_time(self, y, base_shape):
B, channel, K, L = base_shape
if L == 1:
return y
y = y.reshape(B, channel, K, L).permute(0, 2, 1, 3).reshape(B * K, channel, L)
y = self.time_layer(y.permute(2, 0, 1)).permute(1, 2, 0)
y = y.reshape(B, K, channel, L).permute(0, 2, 1, 3).reshape(B, channel, K * L)
return y
def forward_feature(self, y, base_shape):
B, channel, K, L = base_shape
if K == 1:
return y
y = y.reshape(B, channel, K, L).permute(0, 3, 1, 2).reshape(B * L, channel, K)
y = self.feature_layer(y.permute(2, 0, 1)).permute(1, 2, 0)
y = y.reshape(B, L, channel, K).permute(0, 2, 3, 1).reshape(B, channel, K * L)
return y
def forward(self, x, cond_info, diffusion_emb):
B, channel, K, L = x.shape
base_shape = x.shape
x = x.reshape(B, channel, K * L)
diffusion_emb = self.diffusion_projection(diffusion_emb).unsqueeze(-1) # (B,channel,1)
y = x + diffusion_emb
y = self.forward_time(y, base_shape)
y = self.forward_feature(y, base_shape) # (B,channel,K*L)
y = self.mid_projection(y) # (B,2*channel,K*L)
_, cond_dim, _, _ = cond_info.shape
cond_info = cond_info.reshape(B, cond_dim, K * L)
cond_info = self.cond_projection(cond_info) # (B,2*channel,K*L)
y = y + cond_info
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter) # (B,channel,K*L)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
x = x.reshape(base_shape)
residual = residual.reshape(base_shape)
skip = skip.reshape(base_shape)
return (x + residual) / math.sqrt(2.0), skip
```
#### File: jevonxu/CSDI/main_model.py
```python
import numpy as np
import torch
import torch.nn as nn
from diff_models import diff_CSDI
class CSDI_base(nn.Module):
def __init__(self, target_dim, config, device):
super().__init__()
self.device = device
self.target_dim = target_dim
self.emb_time_dim = config["model"]["timeemb"]
self.emb_feature_dim = config["model"]["featureemb"]
self.is_unconditional = config["model"]["is_unconditional"]
self.target_strategy = config["model"]["target_strategy"]
self.emb_total_dim = self.emb_time_dim + self.emb_feature_dim
if self.is_unconditional == False:
self.emb_total_dim += 1 # for conditional mask
self.embed_layer = nn.Embedding(
num_embeddings=self.target_dim, embedding_dim=self.emb_feature_dim
)
config_diff = config["diffusion"]
config_diff["side_dim"] = self.emb_total_dim
input_dim = 1 if self.is_unconditional == True else 2
self.diffmodel = diff_CSDI(config_diff, input_dim)
# parameters for diffusion models
self.num_steps = config_diff["num_steps"]
if config_diff["schedule"] == "quad":
self.beta = np.linspace(
config_diff["beta_start"] ** 0.5, config_diff["beta_end"] ** 0.5, self.num_steps
) ** 2
elif config_diff["schedule"] == "linear":
self.beta = np.linspace(
config_diff["beta_start"], config_diff["beta_end"], self.num_steps
)
self.alpha_hat = 1 - self.beta
self.alpha = np.cumprod(self.alpha_hat)
self.alpha_torch = torch.tensor(self.alpha).float().to(self.device).unsqueeze(1).unsqueeze(1)
def time_embedding(self, pos, d_model=128):
pe = torch.zeros(pos.shape[0], pos.shape[1], d_model).to(self.device)
position = pos.unsqueeze(2)
div_term = 1 / torch.pow(
10000.0, torch.arange(0, d_model, 2).to(self.device) / d_model
)
pe[:, :, 0::2] = torch.sin(position * div_term)
pe[:, :, 1::2] = torch.cos(position * div_term)
return pe
def get_randmask(self, observed_mask):
rand_for_mask = torch.rand_like(observed_mask) * observed_mask
rand_for_mask = rand_for_mask.reshape(len(rand_for_mask), -1)
for i in range(len(observed_mask)):
sample_ratio = np.random.rand() # missing ratio
num_observed = observed_mask[i].sum().item()
num_masked = round(num_observed * sample_ratio)
rand_for_mask[i][rand_for_mask[i].topk(num_masked).indices] = -1
cond_mask = (rand_for_mask > 0).reshape(observed_mask.shape).float()
return cond_mask
def get_hist_mask(self, observed_mask, for_pattern_mask=None):
if for_pattern_mask is None:
for_pattern_mask = observed_mask
if self.target_strategy == "mix":
rand_mask = self.get_randmask(observed_mask)
cond_mask = observed_mask.clone()
for i in range(len(cond_mask)):
mask_choice = np.random.rand()
if self.target_strategy == "mix" and mask_choice > 0.5:
cond_mask[i] = rand_mask[i]
else: # draw another sample for histmask (i-1 corresponds to another sample)
cond_mask[i] = cond_mask[i] * for_pattern_mask[i - 1]
return cond_mask
def get_side_info(self, observed_tp, cond_mask):
B, K, L = cond_mask.shape
time_embed = self.time_embedding(observed_tp, self.emb_time_dim) # (B,L,emb)
time_embed = time_embed.unsqueeze(2).expand(-1, -1, K, -1)
feature_embed = self.embed_layer(
torch.arange(self.target_dim).to(self.device)
) # (K,emb)
feature_embed = feature_embed.unsqueeze(0).unsqueeze(0).expand(B, L, -1, -1)
side_info = torch.cat([time_embed, feature_embed], dim=-1) # (B,L,K,*)
side_info = side_info.permute(0, 3, 2, 1) # (B,*,K,L)
if self.is_unconditional == False:
side_mask = cond_mask.unsqueeze(1) # (B,1,K,L)
side_info = torch.cat([side_info, side_mask], dim=1)
return side_info
def calc_loss_valid(
self, observed_data, cond_mask, observed_mask, side_info, is_train
):
loss_sum = 0
for t in range(self.num_steps): # calculate loss for all t
loss = self.calc_loss(
observed_data, cond_mask, observed_mask, side_info, is_train, set_t=t
)
loss_sum += loss.detach()
return loss_sum / self.num_steps
def calc_loss(
self, observed_data, cond_mask, observed_mask, side_info, is_train, set_t=-1
):
B, K, L = observed_data.shape
if is_train != 1: # for validation
t = (torch.ones(B) * set_t).long().to(self.device)
else:
t = torch.randint(0, self.num_steps, [B]).to(self.device)
current_alpha = self.alpha_torch[t] # (B,1,1)
noise = torch.randn_like(observed_data)
noisy_data = (current_alpha ** 0.5) * observed_data + (1.0 - current_alpha) ** 0.5 * noise
total_input = self.set_input_to_diffmodel(noisy_data, observed_data, cond_mask)
predicted = self.diffmodel(total_input, side_info, t) # (B,K,L)
target_mask = observed_mask - cond_mask
residual = (noise - predicted) * target_mask
num_eval = target_mask.sum()
loss = (residual ** 2).sum() / (num_eval if num_eval > 0 else 1)
return loss
def set_input_to_diffmodel(self, noisy_data, observed_data, cond_mask):
if self.is_unconditional == True:
total_input = noisy_data.unsqueeze(1) # (B,1,K,L)
else:
cond_obs = (cond_mask * observed_data).unsqueeze(1)
noisy_target = ((1 - cond_mask) * noisy_data).unsqueeze(1)
total_input = torch.cat([cond_obs, noisy_target], dim=1) # (B,2,K,L)
return total_input
def impute(self, observed_data, cond_mask, side_info, n_samples):
B, K, L = observed_data.shape
imputed_samples = torch.zeros(B, n_samples, K, L).to(self.device)
for i in range(n_samples):
# generate noisy observation for unconditional model
if self.is_unconditional == True:
noisy_obs = observed_data
noisy_cond_history = []
for t in range(self.num_steps):
noise = torch.randn_like(noisy_obs)
noisy_obs = (self.alpha_hat[t] ** 0.5) * noisy_obs + self.beta[t] ** 0.5 * noise
noisy_cond_history.append(noisy_obs * cond_mask)
current_sample = torch.randn_like(observed_data)
for t in range(self.num_steps - 1, -1, -1):
if self.is_unconditional == True:
diff_input = cond_mask * noisy_cond_history[t] + (1.0 - cond_mask) * current_sample
diff_input = diff_input.unsqueeze(1) # (B,1,K,L)
else:
cond_obs = (cond_mask * observed_data).unsqueeze(1)
noisy_target = ((1 - cond_mask) * current_sample).unsqueeze(1)
diff_input = torch.cat([cond_obs, noisy_target], dim=1) # (B,2,K,L)
predicted = self.diffmodel(diff_input, side_info, torch.tensor([t]).to(self.device))
coeff1 = 1 / self.alpha_hat[t] ** 0.5
coeff2 = (1 - self.alpha_hat[t]) / (1 - self.alpha[t]) ** 0.5
current_sample = coeff1 * (current_sample - coeff2 * predicted)
if t > 0:
noise = torch.randn_like(current_sample)
sigma = (
(1.0 - self.alpha[t - 1]) / (1.0 - self.alpha[t]) * self.beta[t]
) ** 0.5
current_sample += sigma * noise
imputed_samples[:, i] = current_sample.detach()
return imputed_samples
def forward(self, batch, is_train=1):
(
observed_data,
observed_mask,
observed_tp,
gt_mask,
for_pattern_mask,
_,
) = self.process_data(batch)
if is_train == 0:
cond_mask = gt_mask
elif self.target_strategy != "random":
cond_mask = self.get_hist_mask(
observed_mask, for_pattern_mask=for_pattern_mask
)
else:
cond_mask = self.get_randmask(observed_mask)
side_info = self.get_side_info(observed_tp, cond_mask)
loss_func = self.calc_loss if is_train == 1 else self.calc_loss_valid
return loss_func(observed_data, cond_mask, observed_mask, side_info, is_train)
def evaluate(self, batch, n_samples):
(
observed_data,
observed_mask,
observed_tp,
gt_mask,
_,
cut_length,
) = self.process_data(batch)
with torch.no_grad():
cond_mask = gt_mask
target_mask = observed_mask - cond_mask
side_info = self.get_side_info(observed_tp, cond_mask)
samples = self.impute(observed_data, cond_mask, side_info, n_samples)
for i in range(len(cut_length)): # to avoid double evaluation
target_mask[i, ..., 0 : cut_length[i].item()] = 0
return samples, observed_data, target_mask, observed_mask, observed_tp
class CSDI_PM25(CSDI_base):
def __init__(self, config, device, target_dim=36):
super(CSDI_PM25, self).__init__(target_dim, config, device)
def process_data(self, batch):
observed_data = batch["observed_data"].to(self.device).float()
observed_mask = batch["observed_mask"].to(self.device).float()
observed_tp = batch["timepoints"].to(self.device).float()
gt_mask = batch["gt_mask"].to(self.device).float()
cut_length = batch["cut_length"].to(self.device).long()
for_pattern_mask = batch["hist_mask"].to(self.device).float()
observed_data = observed_data.permute(0, 2, 1)
observed_mask = observed_mask.permute(0, 2, 1)
gt_mask = gt_mask.permute(0, 2, 1)
for_pattern_mask = for_pattern_mask.permute(0, 2, 1)
return (
observed_data,
observed_mask,
observed_tp,
gt_mask,
for_pattern_mask,
cut_length,
)
class CSDI_Physio(CSDI_base):
def __init__(self, config, device, target_dim=35):
super(CSDI_Physio, self).__init__(target_dim, config, device)
def process_data(self, batch):
observed_data = batch["observed_data"].to(self.device).float()
observed_mask = batch["observed_mask"].to(self.device).float()
observed_tp = batch["timepoints"].to(self.device).float()
gt_mask = batch["gt_mask"].to(self.device).float()
observed_data = observed_data.permute(0, 2, 1)
observed_mask = observed_mask.permute(0, 2, 1)
gt_mask = gt_mask.permute(0, 2, 1)
cut_length = torch.zeros(len(observed_data)).long().to(self.device)
for_pattern_mask = observed_mask
return (
observed_data,
observed_mask,
observed_tp,
gt_mask,
for_pattern_mask,
cut_length,
)
```
|
{
"source": "JevonYang/stock-technical-analysis",
"score": 2
}
|
#### File: stock-technical-analysis/Statistics/financial_analysis.py
```python
import akshare as ak
import pandas as pd
import matplotlib.pyplot as plt
from pandas.tseries.offsets import YearEnd
import threading
import os
from utils.config import project_dir
CACHE_DIR = "../tmp/finance_reports"
def cache_dir() -> str:
return os.path.join(os.path.join(project_dir(), 'tmp', 'finance_reports'))
def error_file_dir() -> str:
return os.path.join(os.path.join(project_dir(), 'tmp', 'errors', 'finance.txt'))
def convert_float(value) -> float:
try:
return float(value)
except Exception:
return 0
def finance_info(stock_code) -> pd.DataFrame:
data_file = os.path.join(cache_dir(), stock_code + '.csv')
try:
stock_financial_analysis_indicator_df = pd.read_csv(data_file, index_col=0)
except FileNotFoundError:
stock_financial_analysis_indicator_df = ak.stock_financial_analysis_indicator(stock=stock_code)
stock_financial_analysis_indicator_df.to_csv(data_file)
stock_financial_analysis_indicator_df.index = pd.to_datetime(stock_financial_analysis_indicator_df.index)
stock_financial_analysis_indicator_df['净资产报酬率(%)'] = stock_financial_analysis_indicator_df['净资产报酬率(%)'].apply(
convert_float)
stock_financial_analysis_indicator_df['资产报酬率(%)'] = stock_financial_analysis_indicator_df['资产报酬率(%)'].apply(
convert_float)
stock_financial_analysis_indicator_df['总资产净利润率(%)'] = stock_financial_analysis_indicator_df['总资产净利润率(%)'].apply(
convert_float)
stock_financial_analysis_indicator_df['主营业务利润率(%)'] = stock_financial_analysis_indicator_df['主营业务利润率(%)'].apply(
convert_float)
stock_financial_analysis_indicator_df['净利润增长率(%)'] = stock_financial_analysis_indicator_df['净利润增长率(%)'].apply(
convert_float)
stock_financial_analysis_indicator_df['净资产增长率(%)'] = stock_financial_analysis_indicator_df['净资产增长率(%)'].apply(
convert_float)
stock_financial_analysis_indicator_df['总资产增长率(%)'] = stock_financial_analysis_indicator_df['总资产增长率(%)'].apply(
convert_float)
return stock_financial_analysis_indicator_df
def financial_info_plot(stock_code, y=None):
if y is None:
y = ['净资产报酬率(%)', '资产报酬率(%)', '净资产增长率(%)', '总资产增长率(%)']
finance_info(stock_code=stock_code)[lambda x: x.index.month == 12].head(5).plot.bar(use_index=True, y=y)
plt.show()
def save_finance_reports(classify_index_code):
sw_index_df = ak.sw_index_cons(index_code=classify_index_code)
for index, stock in sw_index_df.iterrows():
try:
finance_info(stock['stock_code'])
except Exception:
f = open(error_file_dir, 'a')
f.write(stock['stock_code'] + '\n')
f.close()
"""
指数代码 指数名称
0 801010 农林牧渔
1 801020 采掘
2 801030 化工
3 801040 钢铁
4 801050 有色金属
5 801080 电子
6 801110 家用电器
7 801120 食品饮料
8 801130 纺织服装
9 801140 轻工制造
10 801150 医药生物
11 801160 公用事业
12 801170 交通运输
13 801180 房地产
14 801200 商业贸易
15 801210 休闲服务
16 801230 综合
17 801710 建筑材料
18 801720 建筑装饰
19 801730 电气设备
20 801740 国防军工
21 801750 计算机
22 801760 传媒
23 801770 通信
24 801780 银行
25 801790 非银金融
26 801880 汽车
27 801890 机械设备
"""
if __name__ == '__main__':
df = finance_info(stock_code="600760")
df['date'] = df.index
# print(df[lambda x: x.index.month == 12].head(5)['净资产收益率(%)'])
# print(df[lambda x: x.index.month == 12].head(5)['资产报酬率(%)'])
# print(df[lambda x: x.index.month == 12].head(5)['净资产增长率(%)'])
# print(df[lambda x: x.index.month == 12].head(5)['总资产增长率(%)'])
# df = df[['净资产报酬率(%)']]
# print(df)
# df[lambda x: x.index.month == 12].head(5).plot.bar(use_index=True)
# plt.show()
import seaborn as sns
df = df[lambda x: x.index.month == 12].head(5)[['date', '净资产报酬率(%)', '资产报酬率(%)', '净资产增长率(%)', '总资产增长率(%)']]
df.reset_index(drop=True, inplace=True)
print(df)
df.plot.bar(use_index=False, x="date", y=['净资产报酬率(%)', '资产报酬率(%)', '净资产增长率(%)', '总资产增长率(%)'])
# ax = sns.lineplot(x="date", y="净资产报酬率(%)", data=df)
plt.show()
# ax.show()
# sw_index_spot_df = ak.sw_index_spot()
#
# for index, row in sw_index_spot_df.iterrows():
# save_finance_reports(classify_index_code=row['指数代码'])
```
#### File: stock-technical-analysis/utils/config.py
```python
import configparser
import os
cf = configparser.ConfigParser()
cf.read("../config.ini")
def tushare_token():
return cf.get("tushare", "token")
def databases():
db_type = cf.get("database", "type")
host = cf.get("database", "host")
port = cf.get("database", "port")
user = cf.get("database", "user")
password = cf.get("database", "password")
db = cf.get("database", "db")
charset = cf.get("database", "charset")
return '%s://%s:%s@%s:%s/%s?charset=%s' % (db_type, user, password, host, port, db, charset)
def project_dir(project_name=None):
"""
获取当前项目根路径
:param project_name:
:return: 根路径
"""
PROJECT_NAME = 'stock-technical-analysis' if project_name is None else project_name
project_path = os.path.abspath(os.path.dirname(__file__))
root_path = project_path[:project_path.find("{}\\".format(PROJECT_NAME)) + len("{}\\".format(PROJECT_NAME))]
return root_path
if __name__ == '__main__':
print(project_dir())
```
#### File: stock-technical-analysis/utils/save_db.py
```python
from sqlalchemy import create_engine
from utils import config
engine = create_engine(config.databases(), encoding='utf8')
def save_to_db(dataframe, table_name, index=False):
dataframe.to_sql(table_name, con=engine, if_exists='append', index=index)
```
|
{
"source": "JevyanJ/awstriggers",
"score": 2
}
|
#### File: awstriggers/awstriggers/exceptions.py
```python
class WrongInputException(Exception):
msg = 'Wrong trigger. This is not a {}.'
def __init__(self, trigger_type):
super(WrongInputException, self).__init__(
self.msg.format(trigger_type)
)
```
|
{
"source": "JevyanJ/Streamlabs-Chatbox-Currency-Ranking",
"score": 2
}
|
#### File: JevyanJ/Streamlabs-Chatbox-Currency-Ranking/Currency-Ranking_StreamlabsSystem.py
```python
import os
import re
import codecs
import json
from System.Collections.Generic import List
# ---------------------------
# [Required] Script Information
# ---------------------------
ScriptName = "Currerncy Ranking"
Website = "twitch.tv/jevyanj"
Description = "Currency Ranking"
Creator = "JevyanJ"
Version = "1.0.0"
# ---------------------------
# Define Global Variables
# ---------------------------
settings = {}
configFile = "config.json"
command = "!test"
output = "web/index.html"
templates = {
"index": "templates/index.template",
"category": "templates/category.template"
}
def Init():
global settings
# Load settings
path = os.path.dirname(__file__)
try:
with codecs.open(
os.path.join(path, configFile),
encoding='utf-8-sig',
mode='r') as file:
settings = json.load(file, encoding='utf-8-sig')
except Exception:
settings = {
"black_list": "",
"min_points": 0
}
process()
return
def Execute(data):
# Only uncomment to launch updating with a command
# if data.GetParam(0) == '!currency-ranking-test':
# process()
# Parent.SendStreamMessage('Ranking file updated')
return
def ReloadSettings(jsonData):
Init()
return
def OpenOutput():
path_out = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"web").replace("\\", "/")
location = os.path.join(os.path.dirname(__file__), "output.txt")
with open(location, 'w') as f:
f.write(path_out)
os.startfile(location)
return
def Tick():
process()
return
###############################################################################
def log(message):
Parent.Log(command, str(message))
return
def process():
"""Main process
"""
users = getStreamlabsUsers()
ranking = prepareRanking(users)
order = getOrderRanking(users)
writeRanking(ranking, order)
def equalRankin(ranking, other):
"""Compare two rankings
Args:
ranking (List<CurrencyUsers>)
other (List<CurrencyUsers>)
Return:
Boolean. True if both ranking are equals.
"""
if not other or not ranking:
return False
if len(ranking) != len(other):
return False
for user in ranking:
user_other = next(
(item for item in other if item.UserId == user.UserId), None)
if not user_other or user_other.TimeWatched != user.TimeWatched:
return False
return True
def getStreamlabsUsers():
"""Return all users with streamlabs format
Return:
List<CurrencyUsers>
CurrencyUsers:
string UserId
string UserName
long Points
long TimeWatched (In Minutes)
string Rank
"""
top = Parent.GetTopHours(-1)
users = top.keys()
mylist = List[str](users)
users = Parent.GetCurrencyUsers(mylist)
# Filter list
black_list = settings["black_list"].split(",")
regex = settings["black_list_regex"]
if not regex:
regex = "-"
black_list_regex = re.compile(regex)
black_list = list(map(str.strip, black_list))
users = [
u for u in users if (
u.UserName not in black_list and
not bool(black_list_regex.match(u.UserName)) and
u.TimeWatched >= int(settings["min_time"])
)]
return users
def prepareRanking(users):
"""Prepare ranking with a CurrencyUsers list
Args:
users (List<CurrencyUsers>): List of streamlabs users
Return:
Dict:
{
'rank1': [user1, user2,...],
'rank2': [...]
}
"""
output = {}
for user in users:
if user.Rank in output.keys():
output[user.Rank].append("{}".format(user.UserName))
else:
output[user.Rank] = [user.UserName]
return output
def getOrderRanking(users):
"""Return a ranking ordered list.
Args:
users (List<CurrencyUsers>): List of streamlabs users
Return:
List<str>
"""
usersSorted = sorted(
users, key=lambda d: d.TimeWatched, reverse=True)
ranking = []
for user in usersSorted:
if user.Rank not in ranking:
ranking.append(user.Rank)
return ranking
def writeRanking(rankings, order):
"""Write ranking on a file
Args:
ranking (Dict)
"""
location = os.path.join(os.path.dirname(__file__), output)
category_file = os.path.join(
os.path.dirname(__file__), templates["category"])
file = open(category_file, mode='r')
category_template = file.read()
file.close()
rankings_txt = ""
for ranking in order:
users = rankings[ranking]
ranking_users = ""
for user in users:
ranking_users += "<p>{}</p>\n".format(user)
rankings_txt += category_template.format(
NAME="{}".format(str.upper(ranking)), ELEMENTS=ranking_users)
index_file = os.path.join(
os.path.dirname(__file__), templates["index"])
file = open(index_file, mode='r')
index_template = file.read()
file.close()
with open(location, 'w') as f:
f.write(index_template.format(RANKING=rankings_txt))
```
|
{
"source": "jevyzhu/pieterraform",
"score": 2
}
|
#### File: pieterraform/pieterraform/level1.py
```python
from typing import List
import logging
from .runner_base import CmdRunnerBase
from .terraform import Terraform
from .argument_base import ArgumentBase
from .positional_base import PositionalBase
from .common_args import TfCommonArgs, TfVarArgs
from .option_base import OptionBase
class TfCmdRunner(CmdRunnerBase):
def run(self) -> Terraform:
return super().run()
class TfVersion(TfCmdRunner):
def __init__(self, parent_cmder: Terraform, logger: logging = None):
TfCmdRunner.__init__(self, parent_cmder, "version", logger)
class TfInit(TfCmdRunner, ArgumentBase, TfCommonArgs):
def __init__(self, parent_cmder: Terraform, logger: logging = None):
TfCmdRunner.__init__(self, parent_cmder, "init", logger)
ArgumentBase.__init__(self)
TfCommonArgs.__init__(self)
@OptionBase.option("-upgrade=false")
def no_upgrade(self):
pass
@OptionBase.option("--reconfigure=true")
def reconfigure(self):
pass
@ArgumentBase.param("-backend-config")
def backend_config(self, value: str):
return value
class TfPlan(TfCmdRunner, TfVarArgs):
def __init__(self, parent_cmder: Terraform, logger: logging = None):
TfCmdRunner.__init__(self, parent_cmder, "plan", logger)
TfVarArgs.__init__(self)
@OptionBase.option("-destroy")
def destroy(self):
pass
@ArgumentBase.param("-out")
def out(self, value: str):
return value
@ArgumentBase.param("-state")
def statefile(self, value: str):
return value
class TfApply(TfCmdRunner, TfVarArgs):
def __init__(self, parent_cmder: Terraform, logger: logging = None):
TfCmdRunner.__init__(self, parent_cmder, "apply", logger)
TfVarArgs.__init__(self)
@PositionalBase.positional
def use_plan(self, value: str):
return value
@ArgumentBase.param("-state")
def statefile(self, value: str):
return value
class TfDestroy(TfCmdRunner, TfVarArgs):
def __init__(self, parent_cmder: Terraform, logger: logging = None):
TfCmdRunner.__init__(self, parent_cmder, "destroy", logger)
TfVarArgs.__init__(self)
@OptionBase.option("-auto-approve")
def auto_approve(self):
pass
@ArgumentBase.param("-state")
def statefile(self, value: str):
return value
class TfOutput(TfCmdRunner, TfCommonArgs):
def __init__(self, parent_cmder: Terraform, logger: logging = None):
TfCmdRunner.__init__(self, parent_cmder, "output", logger)
TfCommonArgs.__init__(self)
@ArgumentBase.param("-state")
def statefile(self, value: str):
return value
@OptionBase.option("-json=True")
def json(self):
pass
```
#### File: pieterraform/pieterraform/tf_state.py
```python
import json
import pathlib
from argparse import Namespace
class TfState:
def __init__(self, state_file_path: str):
self.output = Namespace()
with open(state_file_path, "r") as f:
outputs = json.load(f)["outputs"]
for key in outputs:
value = outputs[key]["value"]
setattr(self.output, key, value)
```
|
{
"source": "jevzirwin/redirectDetective",
"score": 3
}
|
#### File: redirectangular/src/app.py
```python
from flask import Flask, render_template, request
import requests
#Create instance of Flask App
app = Flask(__name__)
#Define Route and Content of that page
@app.route("/")
def home():
return render_template("index.html")
@app.route("/detect")
def detect_redirects():
url = request.args.get('url')
r = requests.get(url)
str_response = ''
for i, response in enumerate(r.history, 1):
print(i, response.url)
str_response += str(i) + ' ' + response.url + '<br/>'
# print ("redirects to {} with a status code of {} ".format(r.url,r.status_code))
str_response += ("redirects to {} with a status code of {} ".format(r.url,r.status_code))
return str_response
#Define route 2 and content
#Define route 3 and content
#Running and Controlling the script
if (__name__ =="__main__"):
app.run(host='0.0.0.0', port=5000)
```
|
{
"source": "jewang/gesture-tutorial",
"score": 3
}
|
#### File: jewang/gesture-tutorial/gesture_detector.py
```python
import busio
import time
import datetime
import pandas as pd
from Adafruit_BNO055 import BNO055
import utils
import os
import collections
from sklearn.externals import joblib
import subprocess
import shlex
model = joblib.load('/home/pi/dev/gesture/models/167pt_model.joblib')
SOUNDS = {
'flippendo': 'sounds/flippendo.ogg',
'wingardium': 'sounds/wingardium.mp3'
}
def play(gesture):
subprocess.run(shlex.split('omxplayer /home/pi/dev/gesture/{} -o alsa:hw:1,0 &'.format(SOUNDS[gesture])))
def read_sensors(bno):
vector = bno._read_vector(BNO055.BNO055_ACCEL_DATA_X_LSB_ADDR, 22)
accel = [s / 100. for s in vector[:3]]
mag = [s / 16. for s in vector[3:6]]
gyro = [s / 900. for s in vector[6:9]]
euler = [s / 16. for s in vector[9:12]]
quaternion = [s / QUATERNION_SCALE for s in vector[12:16]]
lin_accel = [s / 100. for s in vector[16:19]]
gravity = [s / 100. for s in vector[19:22]]
return accel + mag + gyro + euler + quaternion + lin_accel + gravity
SAMPLE_RATE_HZ = 100
QUATERNION_SCALE = (1.0 / (1 << 14))
CHECK_TIME_INCREMENT_MS = 200
SAMPLE_SIZE_MS = 1500
bno = BNO055.BNO055(serial_port='/dev/serial0', rst=18)
# Initialize the BNO055 and stop if something went wrong.
if not bno.begin():
raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
i = 0
header = ["time_ms"] + utils.get_sensor_headers()
data = collections.deque(maxlen=int(SAMPLE_SIZE_MS / 10)) #10 Hz
print('Starting operation')
start = datetime.datetime.now()
elapsed_ms = 0
last_classified = 0
last_classification = "negative_trim"
while True:
row = [elapsed_ms] + read_sensors(bno)
data.append(row)
if elapsed_ms - last_classified >= CHECK_TIME_INCREMENT_MS and len(data) == data.maxlen:
df = pd.DataFrame(list(data), columns=header)
features = utils.get_model_features(df)
prediction = model.predict([features])[0]
#print(int(elapsed_ms), prediction)
if prediction != 'negative_trim':# and last_classification != prediction:
print("========================>", prediction)
play(prediction)
data.clear()
last_classified = elapsed_ms
last_classification = prediction
elapsed_ms = (datetime.datetime.now() - start).total_seconds() * 1000
#if elapsed_ms > 10000:
# break
```
|
{
"source": "jewdev/Skillz-ColdeMaster-2022",
"score": 3
}
|
#### File: jewdev/Skillz-ColdeMaster-2022/MyBot.py
```python
from penguin_game import *
def do_turn(game):
my_icebergs_count = len(game.get_my_icebergs())
for iceberg in game.get_my_icebergs():
if iceberg.can_upgrade() and iceberg.upgrade_cost + 10 < iceberg.penguin_amount:
iceberg.upgrade()
else:
if my_icebergs_count < 3:
for neutral_iceberg in game.get_neutral_icebergs():
if not already_sent(game, neutral_iceberg):
if iceberg.can_send_penguins(neutral_iceberg, neutral_iceberg.penguin_amount + 1):
iceberg.send_penguins(
neutral_iceberg, neutral_iceberg.penguin_amount + 1)
else:
for enemyIceberg in game.get_enemy_icebergs():
send_enemy(iceberg, enemyIceberg)
for enemy_group in game.get_enemy_penguin_groups():
if enemy_group.destination in game.get_my_icebergs():
counter_attack(enemy_group.destination, enemy_group.source,
enemy_group.penguin_amount, game.get_my_icebergs())
def already_sent(game, dest):
for group in game.get_my_penguin_groups():
if group.destination == dest:
return True
return False
def send_enemy(source, dest):
amount = dest.penguins_per_turn * \
source.get_turns_till_arrival(dest) + dest.penguin_amount + 1
if source.can_send_penguins(dest, amount):
source.send_penguins(dest, amount)
return True
return False
def counter_attack(my_iceberg, source, enemy_amount, icebergs):
penguin_amount = my_iceberg.penguin_amount
if enemy_amount < my_iceberg.penguin_amount:
neighbor = find_closest(icebergs, my_iceberg)
if my_iceberg.can_send_penguins(source, my_iceberg.penguin_amount - 1):
my_iceberg.send_penguins(source, my_iceberg.penguin_amount - 1)
if neighbor.can_send_penguins(my_iceberg, enemy_amount-penguin_amount):
neighbor.send_penguins(my_iceberg, enemy_amount-penguin_amount)
else:
neighbor = find_closest(icebergs, my_iceberg)
my_amount = source.get_turns_till_arrival(
my_iceberg) * my_iceberg.penguins_per_turn + my_iceberg.penguin_amount
if neighbor.can_send_penguins(my_iceberg, enemy_amount-my_amount + 1):
neighbor.send_penguins(my_iceberg, enemy_amount-my_amount + 1)
def find_closest(my_icebergs, iceberg):
min = 100000000
for my_iceberg in my_icebergs:
if my_iceberg.get_turns_till_arrival(iceberg) < min and not iceberg == my_iceberg:
min = my_iceberg.get_turns_till_arrival(iceberg)
minIceberg = my_iceberg
return minIceberg
```
|
{
"source": "jewee/ops-cli",
"score": 2
}
|
#### File: ops/cli/helmfile.py
```python
import logging
import os
import sys
from ops.cli.parser import SubParserConfig
from ops.hierarchical.composition_config_generator import CompositionConfigGenerator
logger = logging.getLogger(__name__)
class HelmfileParserConfig(SubParserConfig):
def get_name(self):
return 'helmfile'
def get_help(self):
return 'Wrap common helmfile tasks using hierarchical configuration support'
def configure(self, parser):
parser.add_argument(
'--helmfile-path',
type=str,
default=None,
help='Dir to where helmfile.yaml is located')
return parser
def get_epilog(self):
return '''
Examples:
# Run helmfile sync
ops data/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles helmfile sync
# Run helmfile sync for a single chart
ops data/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles helmfile --selector chart=nginx-controller sync
# Run helmfile sync with concurrency flag
ops data/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles helmfile --selector chart=nginx-controller sync --concurrency=1
'''
class HelmfileRunner(CompositionConfigGenerator, object):
def __init__(self, ops_config, cluster_config_path, execute):
super(HelmfileRunner, self).__init__(["helmfiles"])
logging.basicConfig(level=logging.INFO)
self.ops_config = ops_config
self.cluster_config_path = cluster_config_path
self.execute = execute
def run(self, args, extra_args):
config_path_prefix = os.path.join(self.cluster_config_path, '')
default_helmfiles = '../ee-k8s-infra/compositions/helmfiles'
args.helmfile_path = default_helmfiles if args.helmfile_path is None else os.path.join(
args.helmfile_path, '')
compositions = self.get_sorted_compositions(config_path_prefix)
if len(compositions) == 0 or compositions[0] != "helmfiles":
raise Exception(
"Please provide the full path to composition=helmfiles")
composition = compositions[0]
conf_path = self.get_config_path_for_composition(
config_path_prefix, composition)
data = self.generate_helmfile_config(conf_path, args)
self.setup_kube_config(data)
command = self.get_helmfile_command(args, extra_args)
return dict(command=command)
def setup_kube_config(self, data):
if data['helm']['global']['clusterType'] == 'eks':
cluster_name = data['helm']['global']['fqdn']
aws_profile = data['helm']['global']['aws']['profile']
region = data['helm']['global']['region']['location']
file_location = self.generate_eks_kube_config(
cluster_name, aws_profile, region)
os.environ['KUBECONFIG'] = file_location
else:
logger.warning('currently only eks type clusters supported')
sys.exit(1)
def generate_eks_kube_config(self, cluster_name, aws_profile, region):
file_location = self.get_tmp_file()
cmd = "aws eks update-kubeconfig --name {} --profile {} --region {} --kubeconfig {}".format(cluster_name,
aws_profile,
region,
file_location)
return_code = self.execute(dict(command=cmd))
if return_code != 0:
raise Exception(
"Unable to generate EKS kube config. Exit code was {}".format(return_code))
return file_location
@staticmethod
def get_tmp_file():
import tempfile
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
return tmp_file.name
def generate_helmfile_config(self, path, args):
output_file = args.helmfile_path + "/hiera-generated.yaml"
logger.info('Generating helmfiles config %s', output_file)
try:
excluded_keys = self.ops_config["compositions"]["excluded_config_keys"]["helmfile"]
except KeyError:
excluded_keys = []
try:
filtered_keys = self.ops_config["compositions"]["filtered_output_keys"]["helmfile"]
except KeyError:
filtered_keys = []
return self.config_generator.generate_config(config_path=path,
filters=filtered_keys,
exclude_keys=excluded_keys,
output_format="yaml",
output_file=output_file,
print_data=True)
def get_helmfile_command(self, args, extra_args):
helmfile_args = ' '.join(extra_args)
return "cd {helmfile_path} && helmfile {helmfile_args}".format(
helmfile_path=args.helmfile_path,
helmfile_args=helmfile_args)
```
#### File: ops/cli/packer.py
```python
import logging
from ops.cli.parser import SubParserConfig
from . import aws
logger = logging.getLogger(__name__)
class PackerParserConfig(SubParserConfig):
def get_name(self):
return 'packer'
def get_help(self):
return 'Wrap common packer tasks and inject variables from a cluster file'
def configure(self, parser):
parser.add_argument('subcommand', help='build | validate', type=str)
return parser
def get_epilog(self):
return '''
Examples:
# Validate a packer file
ops clusters/centos7.yaml packer validate
# Build a packer file
ops clusters/centos7.yaml packer build
'''
class PackerRunner(object):
def __init__(self, root_dir, cluster_config):
self.cluster_config = cluster_config
self.root_dir = root_dir
def run(self, args, extra_args):
logger.info("Found extra_args %s", extra_args)
config_all = self.cluster_config.all()
packer_variables = config_all['packer']['variables']
if config_all['packer']['clouds'] is not None:
if 'aws' in config_all['packer']['clouds']:
profile_name = config_all['packer']['clouds']['aws']['boto_profile']
packer_variables['aws_access_key'] = aws.acess_key(
profile_name)
packer_variables['aws_secret_key'] = aws.secret_key(
profile_name)
else:
# add other cloud logic here
pass
variables = ''
for key, value in packer_variables.items():
variables += " -var '%s=%s' " % (key, value)
if args.subcommand == 'build':
command = 'packer build %s %s' % (
variables, config_all['packer']['template'])
if args.subcommand == 'validate':
command = 'packer validate %s %s' % (
variables, config_all['packer']['template'])
return dict(
command=command
)
```
#### File: ops/cli/terraform.py
```python
import os
import hashlib
import logging
from ops.cli.parser import SubParserConfig
from ops.terraform.terraform_cmd_generator import TerraformCommandGenerator
from ops.hierarchical.composition_config_generator import TerraformConfigGenerator
from distutils.version import StrictVersion
from ops import validate_ops_version
import pkg_resources
logger = logging.getLogger(__name__)
class TerraformParserConfig(SubParserConfig):
def get_name(self):
return 'terraform'
def get_help(self):
return 'Wrap common terraform tasks with full templated configuration support'
def configure(self, parser):
parser.add_argument('subcommand',
help='apply | console | destroy | import | output | plan | '
'refresh | show | taint | template | untaint | validate',
type=str)
parser.add_argument(
'--var',
help='the output var to show',
type=str,
default='')
parser.add_argument('--module',
help='for use with "taint", "untaint" and "import". '
'The module to use. e.g.: vpc', type=str)
parser.add_argument('--resource',
help='for use with "taint", "untaint" and "import".'
'The resource to target. e.g.: aws_instance.nat',
type=str)
parser.add_argument('--name',
help='for use with "import". The name or ID of the imported resource. '
'e.g.: i-abcd1234',
type=str)
parser.add_argument('--plan', help='for use with "show", '
'show the plan instead of the statefile',
action='store_true')
parser.add_argument('--state-location', help='control how the remote states are used',
choices=['local', 'remote', 'any'], default='any', type=str)
parser.add_argument('--force-copy',
help='for use with "plan" to do force state change '
'automatically during init phase',
action='store_true')
parser.add_argument('--template-location',
help='for use with "template". The folder where to save the tf files, '
'without showing',
type=str)
parser.add_argument('--skip-refresh', help='for use with "plan". Skip refresh of statefile',
action='store_false', dest='do_refresh')
parser.set_defaults(do_refresh=True)
parser.add_argument('--raw-output',
help='for use with "plan". Show raw plan output without piping through '
'terraform landscape - https://github.com/coinbase/terraform-landscape '
'(if terraform landscape is not enabled in opsconfig.yaml '
'this will have no impact)', action='store_true',
dest='raw_plan_output')
parser.set_defaults(raw_plan_output=False)
parser.add_argument('--path-name',
help='in case multiple terraform paths are defined, '
'this allows to specify which one to use when running terraform',
type=str)
parser.add_argument(
'--terraform-path',
type=str,
default=None,
help='Path to terraform files')
parser.add_argument('--skip-plan',
help='for use with "apply"; runs terraform apply without running a plan first',
action='store_true')
parser.add_argument('--auto-approve',
help='for use with "apply". Proceeds with the apply without'
'waiting for user confirmation.',
action='store_true')
parser.add_argument(
'terraform_args',
type=str,
nargs='*',
help='Extra terraform args')
return parser
def get_epilog(self):
return '''
Examples:
# Create/update a new cluster with Terraform
ops clusters/qe1.yaml terraform plan
ops clusters/qe1.yaml terraform apply
# Run Terraform apply without running a plan first
ops clusters/qe1.yaml terraform apply --skip-plan
# Get rid of a cluster and all of its components
ops clusters/qe1.yaml terraform destroy
# Retrieve all output from a previously created Terraform cluster
ops clusters/qe1.yaml terraform output
# Retrieve a specific output from a previously created Terraform cluster
ops clusters/qe1.yaml terraform output --var nat_public_ip
# Refresh a statefile (no longer part of plan)
ops clusters/qe1.yaml terraform refresh
# Taint a resource- forces a destroy, then recreate on next plan/apply
ops clusters/qe1.yaml terraform taint --module vpc --resource aws_instance.nat
# Untaint a resource
ops clusters/qe1.yaml terraform untaint --module vpc --resource aws_instance.nat
# Show the statefile in human-readable form
ops clusters/qe1.yaml terraform show
# Show the plan in human-readable form
ops clusters/qe1.yaml terraform show --plan
# View parsed jinja on the terminal
ops clusters/qe1.yaml terraform template
# Import an unmanaged existing resource to a statefile
ops clusters/qe1.yaml terraform import --module vpc --resource aws_instance.nat --name i-abcd1234
# Use the Terraform Console on a cluster
ops clusters/qe1.yaml terraform console
# Validate the syntax of Terraform files
ops clusters/qe1.yaml terraform validate
# Specify which terraform path to use
ops clusters/qe1.yaml terraform plan --path-name terraformFolder1
# Run terraform v2 integration
ops data/env=dev/region=va6/project=ee/cluster=experiments terraform plan
'''
class TerraformRunner(object):
def __init__(self, root_dir, cluster_config_path, cluster_config, inventory_generator,
ops_config, template, execute):
self.cluster_config_path = cluster_config_path
self.cluster_config = cluster_config
self.root_dir = root_dir
self.inventory_generator = inventory_generator
self.ops_config = ops_config
self.template = template
self.execute = execute
def check_ops_version(self):
# Check if the cluster_config has a strict requirement of OPS version
# But only if 'ops_min_version' is specified. Not all clusters configs
# enforce this
if "terraform" in self.cluster_config.conf:
if "ops_min_version" in self.cluster_config.conf["terraform"]:
ops_min_version = str(
self.cluster_config.conf["terraform"]["ops_min_version"])
validate_ops_version(ops_min_version)
def run(self, args, extra_args):
logger.info("Found extra_args %s", extra_args)
self.check_ops_version()
terraform_config_path = os.environ.get(
"TF_CLI_CONFIG_FILE",
self.ops_config.terraform_config_path
)
os.environ["TF_CLI_CONFIG_FILE"] = terraform_config_path
logger.info("Set TF_CLI_CONFIG_FILE=%s", terraform_config_path)
if os.path.isdir(self.cluster_config_path):
return self.run_v2_integration(args)
else:
return self.run_v1_integration(args)
def run_v1_integration(self, args):
return self.run_composition(args, self.cluster_config)
def run_composition(self, args, config):
generator = TerraformCommandGenerator(self.root_dir,
config,
self.inventory_generator,
self.ops_config,
self.template)
return generator.generate(args)
def run_v2_integration(self, args):
logging.basicConfig(level=logging.INFO)
config_path = os.path.join(self.cluster_config_path, '')
terraform_path = '../ee-k8s-infra/' if args.terraform_path is None else os.path.join(
args.terraform_path, '')
terraform_path = '{}compositions/terraform/'.format(terraform_path)
ops_config = self.cluster_config.ops_config.config
composition_order = ops_config["compositions"]["order"]["terraform"]
excluded_config_keys = ops_config["compositions"]["excluded_config_keys"]
tf_config_generator = TerraformConfigGenerator(
composition_order, excluded_config_keys)
reverse_order = "destroy" == args.subcommand
compositions = tf_config_generator.get_sorted_compositions(
config_path, reverse=reverse_order)
if len(compositions) == 0:
raise Exception(
"No terraform compositions were detected in {}.".format(config_path))
return self.run_v2_compositions(
args, config_path, tf_config_generator, terraform_path, compositions)
def run_v2_compositions(self, args, config_path,
tf_config_generator, terraform_path, compositions):
should_finish = False
return_code = 0
for composition in compositions:
if should_finish:
logger.info(
"Skipping 'terraform %s' for composition '%s' because of previous failure.",
args.subcommand,
composition)
continue
logger.info("Running composition: %s", composition)
tf_config_generator.generate_files(
config_path, terraform_path, composition)
command = self.run_v2_composition(
args, terraform_path, composition)
return_code = self.execute(command)
if return_code != 0:
should_finish = True
logger.error(
"Command finished with nonzero exit code for composition '%s'."
"Will skip remaining compositions.", composition
)
return return_code
def run_v2_composition(self, args, terraform_path, composition):
config = self.cluster_config
config['terraform'] = {}
config['terraform']["path"] = "{}{}".format(
terraform_path, composition)
config['terraform']["variables_file"] = "variables.tfvars.json"
cluster_id = hashlib.md5(
self.cluster_config_path.encode('utf-8')).hexdigest()[:6]
config['cluster'] = "auto_generated_{}".format(cluster_id)
return self.run_composition(args, config)
```
|
{
"source": "jewelben/IC-Project",
"score": 4
}
|
#### File: IC-Project/Problem-1/huffman_encode.py
```python
class heapnode:
def __init__(self,frequency,symbols,left=None,right=None):
self.frequency=frequency
self.symbols=symbols
self.left=left
self.right=right
self.bits=''
# Function that creates our huffman tree by taking our priority queue as input and outputs the root node
# We first sort the queue by frequency and define the parent of these leafnode, with frequency as the sum of
# the frequncies of the first two nodes of our sorted priority queue. We then pop off those two nodes
# and push our newnode into our queue and sort it again. We repeat this process till the size of our priority queue is 1.
def huffmantree(leafnodes):
while len(leafnodes) > 1:
# sorting our queue
leafnodes.sort(key=lambda x:x.frequency)
left=leafnodes[0]
right=leafnodes[1]
# Defining the flags variables and pointers of our nodes
left.bits=0
right.bits=1
# defining our new parent node of our huffman tree
newHeapNode=heapnode(left.frequency+right.frequency,None,left,right)
# popping the first two elements and pushing our parent node
leafnodes.remove(left)
leafnodes.remove(right)
leafnodes.append(newHeapNode)
# returning our final root node
return leafnodes[0]
# A function that creates a dictionary that stores our symbols as our key and its codeword as its keyvalue
def EncodeTable(encode,Treenode,bitstring=''):
# codeword variable stores our code word, where as we move down the tree the corresponding bit gets concatenated
codeword=bitstring+str(Treenode.bits)
# if the left node isn't NULL, we recursively call the function to traverse the tree till we can go any further
# and then check if we can go to the right while going back ( this is basically similar to DFS of a tree)
if(Treenode.left!=None):
EncodeTable(encode,Treenode.left,codeword)
if(Treenode.right!=None):
EncodeTable(encode,Treenode.right,codeword)
# if we both left and right children are NULL, that means we have reached the leafnode and we
# have found its corresponding codeword
else:
encode[Treenode.symbols]=codeword
return encode
```
|
{
"source": "JewelBoyz/dfk",
"score": 3
}
|
#### File: quest/utils/utils.py
```python
FAIL_ON_NOT_FOUND = False
types = {
1: "attemptBased",
#2: "timeBased",
#3: "well",
}
def parse_type(id):
value = types.get(id, None)
if FAIL_ON_NOT_FOUND and value is None:
raise Exception("Quest type not found")
return value
def parse_quests(raw_quests):
if raw_quests is None:
return None
quests = []
for rq in raw_quests:
quests.append(parse_quest(rq))
return quests
def parse_quest(raw_quest):
if raw_quest is None:
return None
quest = {}
quest['id'] = raw_quest[0]
quest['address'] = raw_quest[1]
quest['heroes'] = raw_quest[2]
quest['player'] = raw_quest[3]
quest['startTime'] = raw_quest[4]
quest['startBlock'] = raw_quest[5]
quest['completeAtTime'] = raw_quest[6]
quest['attempts'] = raw_quest[7]
quest['type'] = parse_type(raw_quest[8])
return quest
```
|
{
"source": "jewelc/sslcommerz",
"score": 2
}
|
#### File: templates/pages/order_details.py
```python
import frappe
from frappe import _
from erpnext.e_commerce.doctype.e_commerce_settings.e_commerce_settings import show_attachments
def get_context(context):
context.no_cache = 1
context.show_sidebar = True
context.doc = frappe.get_doc("Sales Order", frappe.form_dict.name)
if hasattr(context.doc, "set_indicator"):
context.doc.set_indicator()
if show_attachments():
context.attachments = get_attachments(frappe.form_dict.doctype, frappe.form_dict.name)
context.parents = frappe.form_dict.parents
context.title = frappe.form_dict.name
context.payment_ref = frappe.db.get_value("Payment Request",
{"reference_name": frappe.form_dict.name}, "name")
context.enabled_checkout = frappe.get_doc("E Commerce Settings").enable_checkout
default_print_format = frappe.db.get_value('Property Setter', dict(property='default_print_format', doc_type=frappe.form_dict.doctype), "value")
if default_print_format:
context.print_format = default_print_format
else:
context.print_format = "Standard"
if not frappe.has_website_permission(context.doc):
frappe.throw(_("Not Permitted"), frappe.PermissionError)
# check for the loyalty program of the customer
customer_loyalty_program = frappe.db.get_value("Customer", context.doc.customer, "loyalty_program")
if customer_loyalty_program:
from erpnext.accounts.doctype.loyalty_program.loyalty_program import (
get_loyalty_program_details_with_points,
)
loyalty_program_details = get_loyalty_program_details_with_points(context.doc.customer, customer_loyalty_program)
context.available_loyalty_points = int(loyalty_program_details.get("loyalty_points"))
context.flow_status = "Placed"
pick_list = frappe.db.get_all("Pick List Item",filters={"sales_order":frappe.form_dict.name,'docstatus':1})
if pick_list:
if len(pick_list) == len(context.doc.items):
context.flow_status = "In Progress"
delivery_notes = frappe.db.get_all("Delivery Note Item",filters={"against_sales_order":frappe.form_dict.name,'docstatus':1})
frappe.log_error(delivery_notes,"delivery_notes")
if len(delivery_notes) == len(context.doc.items):
context.flow_status = "Delivery In Progress"
closed_delivery_notes = frappe.db.sql("""SELECT DI.name FROM `tabDelivery Note Item` DI INNER JOIN
`tabDelivery Note` D ON DI.parent = D.name
WHERE D.status ='Closed' AND DI.against_sales_order = %(order_id)s AND D.docstatus = 1
""",{"order_id":frappe.form_dict.name},as_dict=1)
if len(closed_delivery_notes) == len(context.doc.items):
context.flow_status = "Delivered"
def get_attachments(dt, dn):
return frappe.get_all("File",
fields=["name", "file_name", "file_url", "is_private"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt, "is_private":0})
```
|
{
"source": "Jewel-Hong/SC-projects",
"score": 4
}
|
#### File: SC101Assignment/SC101_Assignment0/class_reviews.py
```python
def main():
"""
TODO: Get the maximum, minimum and average score of class SC001 and SC101 respectively.
"""
cl = input('Which class? ').upper()
if cl == '-1':
print('No class scores were entered')
else:
score = int(input('Score: '))
n0 = 0
n1 = 0
max001 = float('NaN')
min001 = float('NaN')
avg001 = float('NaN')
max101 = float('NaN')
min101 = float('NaN')
avg101 = float('NaN')
if cl == 'SC001':
n0 += 1
max001 = score
min001 = score
avg001 = float(score)
elif cl == 'SC101':
n1 += 1
max101 = score
min101 = score
avg101 = float(score)
while True:
cl = input('Which class? ').upper()
if cl == '-1':
break
else:
score = int(input('Score: '))
if cl == 'SC001':
if n0 == 0:
n0 += 1
max001 = score
min001 = score
avg001 = float(score)
else:
n0 += 1
max001 = maximum(score, max001)
min001 = minimum(score, min001)
avg001 = avg(score, avg001, n0)
elif cl == 'SC101':
if n1 == 0:
n1 += 1
max101 = score
min101 = score
avg101 = float(score)
else:
n1 += 1
max101 = maximum(score, max101)
min101 = minimum(score, min101)
avg101 = avg(score, avg101, n1)
result(max001, min001, avg001, max101, min101, avg101, n0, n1)
def result(max001, min001, avg001, max101, min101, avg101, n0, n1):
"""
:param max001: int, the maximum among scores of SC001
:param min001: int, the minimum among scores of SC001
:param avg001: float, the average among scores of SC001
:param max101: int, the maximum among scores of SC101
:param min101: int, the minimum among scores of SC101
:param avg101: float, the average among scores of SC101
:param n0: int, the number of the scores of SC001
:param n1: int, the number of the scores of SC101
TODO: print the result
"""
equal()
print('SC001', end='')
equal()
if n0 == 0:
print('\nNo score for SC001')
else:
print('\nMax (001): ' + str(max001))
print('Min (001): ' + str(min001))
print('Avg (001): ' + str(avg001))
equal()
print('SC101', end='')
equal()
if n1 == 0:
print('\nNo score for SC101')
else:
print('\nMax (101): ' + str(max101))
print('Min (101): ' + str(min101))
print('Avg (101): ' + str(avg101))
def equal():
"""
TODO: print 13 equals continuously
"""
for i in range(13):
print('=', end='')
def maximum(a, b):
"""
:param a: int, any number
:param b: int, any number
:return: the bigger one
"""
if a >= b:
return a
else:
return b
def minimum(a, b):
"""
:param a: int, any number
:param b: int, any number
:return: the smaller one
"""
if a <= b:
return a
else:
return b
def avg(new, avg, times):
"""
:param new: int, the new score
:param avg: float, the average of score of the class
:param times: int, numbers of the scores of the class
:return: float, the new average
"""
new_avg = (avg * (times - 1) + new) / times
return new_avg
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
```
#### File: SC101Assignment/SC101_Assignment0/coin_flip_runs.py
```python
import random as r
def main():
"""
TODO: Print the result of coin-flipping that fits the number of runs.
"""
print('Let\'s flip a coin!')
num_run = int(input('Number of runs: '))
ht = ('H', 'T')
result = r.choice(ht)
while True:
result += r.choice(ht)
if finish(result, num_run):
break
print(result[0: len(result)-1])
def finish(result, num_run):
"""
:param result: str, the string that composed randomly by H and T
:param num_run: int, the number of runs that the player decided
:return: bool, return when the result fits the number of runs
"""
n = 0
for i in range(len(result)-2):
if result[i] == result[i+1] and result[i+1] != result[i+2]:
n += 1
if n == num_run:
return True
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
```
#### File: SC101_Assignment1/SC101_Assignment1/my_drawing.py
```python
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.graphics.gwindow import GWindow
def main():
"""
TODO: Literally busy to death...
"""
bg = GWindow(width=500, height=400)
face = GOval(300, 250, x=100, y=100)
face.filled = True
face.fill_color = 'beige'
face.color = 'grey'
reye1 = GOval(50, 50, x=150, y=220)
reye1.filled = True
reye1.fill_color = 'grey'
reye1.color = 'grey'
reye2 = GOval(30, 30, x=155, y=225)
reye2.filled = True
reye2.fill_color = 'white'
reye2.color = 'grey'
reye3 = GOval(10, 10, x=160, y=230)
reye3.filled = True
reye3.fill_color = 'grey'
reye3.color = 'grey'
leye1 = GOval(50, 50, x=300, y=220)
leye1.filled = True
leye1.fill_color = 'grey'
leye1.color = 'grey'
leye2 = GOval(30, 30, x=305, y=225)
leye2.filled = True
leye2.fill_color = 'white'
leye2.color = 'grey'
leye3 = GOval(10, 10, x=310, y=230)
leye3.filled = True
leye3.fill_color = 'grey'
leye3.color = 'grey'
mouth = GRect(120, 20, x=200, y=280)
mouth.filled = True
mouth.fill_color = 'grey'
mouth.color = 'grey'
label = GLabel('KILL ME...')
label.font = '-30'
label.color = 'grey'
bg.add(face)
bg.add(reye1)
bg.add(reye2)
bg.add(reye3)
bg.add(leye1)
bg.add(leye2)
bg.add(leye3)
bg.add(mouth)
bg.add(label, x=500-label.width, y=400)
if __name__ == '__main__':
main()
```
#### File: SC101Assignment/SC101_Assignment5/anagram.py
```python
FILE = 'dictionary.txt' # This is the filename of an English dictionary
EXIT = '-1' # Controls when to stop the loop
# Global variable
dict_lst = []
anagrams = []
def main():
read_dictionary()
print(f'Welcome to stanCode "Anagram Generator" (or {EXIT} to quit)')
while True:
s = input('Find anagrams for: ')
if s == EXIT:
break
else:
print('Searching...')
find_anagrams(s)
print(f'{len(anagrams)} anagrams: {anagrams}')
anagrams.clear()
def read_dictionary():
with open(FILE, 'r') as f:
for word in f:
global dict_lst
dict_lst.append(word.strip())
def find_anagrams(s):
"""
:param s (str): the word to find the anagrams
"""
find_anagrams_helper(s, [])
def find_anagrams_helper(s, index_lst):
cur_s = ''
for i in index_lst:
cur_s += s[i]
if has_prefix(cur_s):
if len(cur_s) == len(s):
if cur_s in dict_lst and cur_s not in anagrams:
print(f'Found: {cur_s}')
anagrams.append(cur_s)
print('Searching...')
else:
for i in range(len(s)):
if i not in index_lst:
index_lst.append(i)
find_anagrams_helper(s, index_lst)
index_lst.pop()
def has_prefix(sub_s):
"""
:param sub_s (str): the sub-string to be checked whether there is a word started with it
:return (bool): True/ False
"""
for word in dict_lst:
if word.startswith(sub_s):
return True
return False
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week1/whack_a_mole.py
```python
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GLabel
from campy.graphics.gimage import GImage
from campy.gui.events.mouse import onmouseclicked
from campy.gui.events.timer import pause
import random
# Constants control the diameter of the window
WINDOW_WIDTH = 900
WINDOW_HEIGHT = 600
# Constant controls the pause time of the animation
DELAY = 550
# Global variables
window = GWindow(width=WINDOW_WIDTH, height=WINDOW_HEIGHT)
score = 0
score_label = GLabel('Score: ' + str(score))
# TODO:
def main():
onmouseclicked(remove_mole)
score_label.font = '-50'
window.add(score_label, x=0, y=score_label.height)
while True:
mole = GImage('mole.png')
mole.x = random.randint(0, window.width - mole.width)
mole.y = random.randint(0, window.height - mole.height)
window.add(mole)
pause(DELAY)
def remove_mole(m):
global score
maybe_mole = window.get_object_at(m.x, m.y)
if maybe_mole is not None and maybe_mole is not score_label:
window.remove(maybe_mole)
score += 1
score_label.text = 'Score: ' + str(score)
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week2/pypal_starter.py
```python
from pypal import Pypal
def main():
bank = Pypal('Jewel', money=10000, withdraw_limit=6000)
bank.withdraw(7000)
bank.withdraw(5000)
bank.withdraw(6000)
name = bank.set_name('Guagua')
bank.withdraw(3000)
money = bank.get_money()
print(name, money)
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week4/draw_basic.py
```python
import tkinter as tk
# provided function, this code is complete
def make_canvas(width, height):
"""
Creates and returns a drawing canvas
of the given int size, ready for drawing.
"""
top = tk.Tk()
top.minsize(width=width + 10, height=height + 10)
canvas = tk.Canvas(top, width=width, height=height)
canvas.pack()
canvas.xview_scroll(6, "units") # hack so (0, 0) works correctly
canvas.yview_scroll(6, "units")
return canvas
def main():
w = make_canvas(1000, 500)
w.create_line(0, 0, 1000, 500, width=5, fill='red')
w.create_text(0, 0, text='SC101', anchor=tk.NW, font='times 80')
tk.mainloop() #告訴電腦不要關掉視窗
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week4/factorial.py
```python
def main():
# print(factorial(0)) # 1
# print(factorial(1)) # 1
# print(factorial(5)) # 120
# print(factorial(10)) # 3628800
print(m(37, 12)+m(14, 10))
def m(a, b):
if a < b:
return a
else:
return m(a-b, b)
# def factorial(n):
# if n == 0:
# return 1
# else:
# ans = n * factorial(n-1)
# return ans
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week4/web_crawler_directors.py
```python
import requests
from bs4 import BeautifulSoup
def main():
url = 'http://www.imdb.com/chart/top'
response = requests.get(url)
html = response.text
soup = BeautifulSoup(html)
#########################
items = soup.find_all('td', {'class': 'titleColumn'})
d = {}
for item in items:
dir = item.a['title'].split(',')[0]
# 去掉 (dir.)
i = dir.find('(')
dir = dir[:i]
if dir in d:
d[dir] += 1
else:
d[dir] = 1
for dir, times in sorted(d.items(), key=lambda t: t[1]):
print(f'{dir} -> {times}')
#########################
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week5/cantor_set_drawing.py
```python
from campy.graphics.gobjects import GLine
from campy.graphics.gwindow import GWindow
from campy.gui.events.timer import pause
# Constants
SPACE = 40 # The space between levels
START_X = 50 # The starting x of the line
START_Y = 50 # The starting y of the line
START_WIDTH = 729 # The lenth of cantor set level 1
LEVEL = 5 # The level of the cantor set
DELAY = 100 # The pause time in miliseconds
# Global Variables
window = GWindow(width=1000, height=700) # The canvas to draw lines on
def main():
cantor_set(START_X, START_Y, START_WIDTH, LEVEL)
def cantor_set(x, y, width, level):
if level == 0:
pass
else:
line = GLine(x, y, x + width, y)
window.add(line)
cantor_set(x, y + SPACE, width / 3, level - 1)
cantor_set(x + (2 / 3) * width, y + SPACE, width / 3, level - 1)
pause(DELAY)
if __name__ == '__main__':
main()
```
#### File: SC101Lecture_code/SC101_week6/linked_list.py
```python
class ListNode:
def __init__(self, data, pointer):
# value & next 為慣用語
self.value = data
self.next = pointer
def main():
# Way 1
node1 = ListNode(('A', 3), None)
node2 = ListNode(('B', 5), None)
node1.next = node2
node3 = ListNode(('C', 7), None)
node2.next = node3
# Way 2
node3 = ListNode(('C', 7), None)
node2 = ListNode(('B', 5), node3)
node1 = ListNode(('A', 3), node2)
linked_list = node1
traversal(linked_list)
def traversal(linked_list):
cur = linked_list
while cur is not None:
print(cur.value)
cur = cur.next
if __name__ == '__main__':
main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.