metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jesse989/home-price-predictions",
"score": 3
}
|
#### File: home-price-predictions/notebooks/SchoolCrawler.py
```python
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from opencage.geocoder import OpenCageGeocode
from opencage.geocoder import InvalidInputError, RateLimitExceededError, UnknownError
import dotenv
import os
project_dir = os.path.join(os.path.dirname(__file__), os.pardir)
dotenv_path = os.path.join(project_dir, '.env')
dotenv.load_dotenv(dotenv_path)
import time
class SchoolCrawler:
"""Take list of cities urls and create pandas DataFrame.
Can save as CSV or store in Database
"""
def __init__(self, cities=[]):
"""Create instance
Args:
urls (List): list of urls to scrape
"""
self.search_url = 'https://www.greatschools.org/search/search.page'
self.base_url = 'https://www.greatschools.org'
self.cities = cities
self.cities_soup = self.search_all_cities()
self.schools = self.parse_all_cities()
self.df = pd.DataFrame(self.schools)
def to_csv(self, path='./data/raw/school_ratings.csv'):
self.df.to_csv(path)
def parse_soup(self, school_soup):
"""Turn BS4 into dictionary
Args:
school_soup (BS4): tree to parse
Returns:
parsed_school (dict): results
"""
parsed_school = {
'name': '',
'rating': 0,
'street': '',
'city': '',
'zip': 0,
'lat': 0,
'long': 0,
}
parsed_school['name'] = school_soup.find('a', class_='name').get_text()
parsed_school['rating'] = school_soup.find('div', class_='circle-rating--small').get_text().split('/')[0] if school_soup.find('div', class_='circle-rating--small') else 0
if int(parsed_school['rating']) < 9:
return
address = school_soup.find('div', class_='address').get_text()
parsed_school['lat'], parsed_school['long'] = self.lat_long(address)
address = address.split(',')
parsed_school['street'] = address[0]
parsed_school['city'] = address[1]
parsed_school['state'] = address[2]
parsed_school['zip'] = address[3]
return parsed_school
def parse_all_cities(self):
results = []
for soup in self.cities_soup:
for li in soup:
result = self.parse_soup(li)
if result:
results.append(result)
print(result)
return results
def get_schools_html(self, city):
"""Get first 25 'li' elements by city
"""
city = city.replace(' ', '%20')
search_string = f'{self.search_url}?q={city}&sort=rating'
try:
with webdriver.Safari() as browser:
browser.get(search_string)
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "school-list")))
search_results_html = BeautifulSoup(browser.page_source, features="lxml")
schools = search_results_html.find_all('li', class_='unsaved')
return schools
except:
print(f'error fetching: {search_string}')
def search_all_cities(self):
results = []
for city in self.cities:
soup = self.get_schools_html(city)
results.append(soup)
return results
def lat_long(self, address):
key = os.getenv("GEO_CODE")
geocoder = OpenCageGeocode(key)
try:
results = geocoder.geocode(address)
lat = results[0]['geometry']['lat']
long = results[0]['geometry']['lng']
time.sleep(1)
return (lat, long)
except RateLimitExceededError as ex:
print(ex)
cities = ['Seattle', 'Kenmore', 'Sammamish', 'Federal Way', 'Maple Valley',
'Bellevue', 'Duvall', 'Auburn', 'Kent', 'Redmond', 'Issaquah',
'Renton', 'Kirkland', 'Mercer Island', 'Snoqualmie', 'Enumclaw',
'Bothell', 'Fall City', 'North Bend', 'Vashon', 'Woodinville',
'Carnation', 'Black Diamond', 'Medina']
sc = SchoolCrawler(cities)
print(len(sc.schools))
sc.to_csv()
```
|
{
"source": "Jesse989/tanzania",
"score": 3
}
|
#### File: tanzania/notebooks/preprocessing.py
```python
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
import pandas as pd
import math
class GPSHeightImputer(BaseEstimator, TransformerMixin):
def __init__(self,init_radius=0.1,increment_radius=0.3,method = 'custom'):
self.column_names = []
self.init_radius = init_radius
self.increment_radius = increment_radius
self.method = method
def __get_subset_records(self, latitude, longitude, df, radius):
latitude_from = latitude - radius
latitude_to = latitude + radius
longitude_from = longitude - radius
longitude_to = longitude + radius
df_temp = df[(df['latitude'] >= latitude_from) & (df['latitude'] <= latitude_to) &
(df['longitude'] >= longitude_from) & (df['longitude'] <= longitude_to)]
return df_temp
def fit(self, X, y=None):
if self.method == 'custom':
X['gps_height'] = X['gps_height'].astype(float)
X['latitude'] = X['latitude'].astype(float)
X['longitude'] = X['longitude'].astype(float)
self.df = X[X['gps_height'] != 0]
elif self.method == 'median':
X['gps_height'] = X['gps_height'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.median = np.median(list(X[X['gps_height'] != 0]['gps_height']))
if math.isnan(self.median):
self.median = 0
elif self.method == 'mean':
X['gps_height'] = X['gps_height'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.mean = np.mean(list(X[X['gps_height'] != 0]['gps_height']))
if math.isnan(self.mean):
self.mean = 0
self.column_names = X.columns
return self
def transform(self,X):
if self.method == 'custom':
X['gps_height'] = X['gps_height'].astype(float)
X['latitude'] = X['latitude'].astype(float)
X['longitude'] = X['longitude'].astype(float)
gps_height_transformed = []
for latitude, longitude, gps_height in \
zip(X['latitude'],X['longitude'],X['gps_height']):
radius = self.init_radius
if gps_height == 0:
gps_height_temp = gps_height
while gps_height_temp == 0 and radius <= 2:
df_temp = self.__get_subset_records\
(latitude,longitude,self.df,radius)
gps_height_temp = np.mean(df_temp[df_temp['gps_height']!=0]\
['gps_height'])
if math.isnan(gps_height_temp):
gps_height_temp = 0
radius = self.increment_radius + radius
else:
gps_height_temp = gps_height
gps_height_transformed.append(gps_height_temp)
X['gps_height'] = gps_height_transformed
#self.column_names = list(X.columns)
elif self.method == 'median':
gps_height = np.array(list(X['gps_height']))
gps_height[gps_height == 0] = self.median
#self.column_names = list(X.columns)
#return X[['latitude','longitude','gps_height']]
X['gps_height'] = gps_height
elif self.method == 'mean':
gps_height = np.array(list(X['gps_height']))
gps_height[gps_height == 0] = self.mean
#self.column_names = list(X.columns)
#return X[['latitude','longitude','gps_height']]
X['gps_height'] = gps_height
self.column_names = X.columns
X['gps_height'] = X['gps_height'].astype(float)
X['gps_height'] = X['gps_height'].fillna(0)
return X
# Here will be our strategy to handle gps_height:
# In fit() just save the input data as a data frame \
# with gps_height, lat, long, and gps_height > 0
# In transform(), if gps_height == 0, then
# start at 0.1 radius, and check if there are any non-zero gps_instances.
# If yes, get the average, else, increment search radius
# by 0.3 (0.1 increase corresponds to 11km approximately)
# If nothing is found within an increment of 2, then just ignore.
class LatLongImputer(BaseEstimator, TransformerMixin):
def __init__(self, method='custom'):
self.column_names = []
self.method = method
self.df = pd.DataFrame()
self.long_mean_map = {}
self.lat_mean_map = {}
pass
def __generate_mean_maps(self):
temp_df = self.df[(self.df['latitude'] != -2e-08) & (self.df['longitude'] != 0)]
for geo in ['ward', 'region', 'basin']:
self.long_mean_map[geo] = dict(zip(temp_df.groupby(geo)['longitude'].mean(
).keys(), temp_df.groupby(geo)['longitude'].mean().values))
self.lat_mean_map[geo] = dict(zip(temp_df.groupby(geo)['latitude'].mean(
).keys(), temp_df.groupby(geo)['latitude'].mean().values))
def fit(self, X, y=None):
if self.method == 'mean':
# find mean of all non-zero values
self.mean_lat = np.mean(X[X['latitude'] != -2e-08]['latitude'])
self.mean_long = np.mean(X[X['longitude'] != 0]['longitude'])
elif self.method == 'median':
# find median of all non-zero values
self.median_lat = np.median(X[X['latitude'] != -2e-08]['latitude'])
self.median_long = np.median(X[X['longitude'] != 0]['longitude'])
elif self.method == 'custom':
self.df = X
self.__generate_mean_maps()
self.column_names = ['latitude', 'longitude', 'gps_height']
return self
def transform(self, X):
if self.method == 'mean':
X['latitude'].replace(-2e-08, self.mean_lat, inplace=True)
X['longitude'].replace(0, self.mean_long, inplace=True)
elif self.method == 'median':
X['latitude'].replace(-2e-08, self.median_lat, inplace=True)
X['longitude'].replace(0, self.median_long, inplace=True)
elif self.method == 'custom':
X[(X['latitude'] == -2e-08)]['latitude'] = X['latitude'].map(self.lat_mean_map)
X[(X['longitude'] == 0)]['longitude'] = X['longitude'].map(self.long_mean_map)
self.column_names = X.columns
return X
# will work the same way as the gps imputer.
class PopulationImputer(BaseEstimator, TransformerMixin):
def __init__(self, method='custom'):
self.columns_names = []
self.method = method
self.df = pd.DataFrame()
def fit(self, X, y=None):
if self.method == 'mean':
self.mean = np.mean(X[X['population'] > 0]['population'])
elif self.method == 'median':
self.median = np.median(X[X['population'] > 0]['population'])
elif self.method == 'custom':
self.df['population'] = X[X['population'] > 0]['population']
self.column_names = ['latitude', 'longitude', 'population']
return self
def transform(self, X):
X.fillna(0, inplace=True)
if self.method == 'mean':
X['population'].replace(0, self.mean, inplace=True)
elif self.method == 'median':
X['population'].replace(0, self.median, inplace=True)
elif self.method == 'custom':
pass
self.column_names = ['latitude', 'longitude', 'population']
return X[['latitude', 'longitude', 'population']]
class ConstructionYearTransformer(BaseEstimator, TransformerMixin):
def __init__(self,method = 'custom'):
self.column_names = []
#self.init_radius = init_radius
#self.increment_radius = increment_radius
self.method = method
pass ##Nothing else to do
def fit(self, X, y=None):
X['construction_year'] = X['construction_year'].astype(float)
if self.method == 'custom':
year_recorded = X[X['construction_year'] > 0]\
['date_recorded'].\
apply(lambda x: int(x.split("-")[0]))
year_constructed = X[X['construction_year'] > 0]['construction_year']
self.median_age = np.median(year_recorded - year_constructed)
self.column_names = ['age']
return self
if self.method == 'median':
X['construction_year'] = X['construction_year'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.median = \
np.median(list(X[X['construction_year'] != 0]['construction_year']))
if math.isnan(self.median):
self.median = 0
self.column_names = ['construction_year']
return self
if self.method == 'mean':
X['construction_year'] = X['construction_year'].astype(float)
#X['gps_height'] = X['gps_height'].fillna(0)
self.mean = np.mean(list(X[X['construction_year'] != 0]['construction_year']))
if math.isnan(self.mean):
self.mean = 0
self.column_names = ['construction_year']
return self
if self.method == 'ignore':
self.column_names = ['construction_year']
return self
def transform(self,X):
if self.method == 'custom':
year_recorded = list(X['date_recorded'].apply(lambda x: int(x.split("-")[0])))
year_constructed = list(X['construction_year'])
age = []
for i,j in enumerate(year_constructed):
if j == 0:
age.append(self.median_age)
else:
temp_age = year_recorded[i] - year_constructed[i]
if temp_age < 0:
temp_age = self.median_age
age.append(temp_age)
X['age'] = age
self.column_names = ['age']
#self.column_names = X.columns
return X[['age']]
if self.method == 'median':
X['construction_year'] = X['construction_year'].astype(float)
X['construction_year'] = X['construction_year'].fillna(0)
construction_year = np.array(list(X['construction_year']))
construction_year[construction_year == 0] = self.median
self.column_names = ['construction_year']
X['construction_year'] = construction_year
return X[['construction_year']]
if self.method == 'mean':
X['construction_year'] = X['construction_year'].astype(float)
X['construction_year'] = X['construction_year'].fillna(0)
construction_year = np.array(list(X['construction_year']))
construction_year[construction_year == 0] = self.mean
self.column_names = ['construction_year']
X['construction_year'] = construction_year
return X[['construction_year']]
if self.method == 'ignore':
X['construction_year'] = X['construction_year'].astype(float)
X['construction_year'] = X['construction_year'].fillna(0)
self.column_names = ['construction_year']
return X[['construction_year']]
# take columns and turn them into 3 numerical columns representing percent of target
class HighCardTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
self.column_names = []
self.df = pd.DataFrame()
self.mean_map = {}
pass
def fit(self, X, y=None):
X = pd.concat([X, y], axis=1)
self.target_col = y.name
dummies_status = pd.get_dummies(X[self.target_col])
dummies_status.columns = [ f"{self.target_col}_{column_name}" for column_name in dummies_status.columns.tolist()]
self.dummies = dummies_status.columns
X = X.join(dummies_status)
for target in self.dummies:
self.mean_map[target] = {}
for col in X.columns:
for val in X[col].unique():
mean = np.mean(X[X[col] == val][target])
if (np.isnan(mean)) or (val in ['0','-']):
mean = 0
self.mean_map[target][val] = self.mean_map.get(
val, mean)
self.column_names = X.columns
return self
def transform(self, X):
for feature_name in X.columns:
for target_col in self.dummies:
X[feature_name+'_'+target_col[-1:]] = X[feature_name].map(self.mean_map[target_col])
X = X.drop(feature_name, axis=1)
self.column_names = X.columns
return X
# Takes 'region' and 'district_code' and combines them together
class DistrictCodeMerge(BaseEstimator, TransformerMixin):
def __init__(self):
self.column_names = []
pass
def fit(self, X, y=None):
self.column_names = ['district']
return self
def transform(self, X):
X['district'] = X['region'] + ', ' + X['district_code'].astype(str)
self.column_names = ['district']
return X[['district']]
# Take in 'extraction_type', 'extraction_type_group', 'extraction_type_class' and flatten them
class ExtractionMerge(BaseEstimator, TransformerMixin):
def __init__(self):
self.column_names = []
pass
def __unique(self, sequence):
seen = set()
return [x for x in sequence if not (x in seen or seen.add(x))]
def __merge(self, x):
x = x.split(',')
x = self.__unique(x)
return ":".join(x)
def fit(self, X, y=None):
self.column_names = ['extraction']
return self
def transform(self, X):
X['extraction'] = X['extraction_type_class'] + ',' + X['extraction_type_group'] + ',' + X['extraction_type']
X['extraction'] = X['extraction'].apply(self.__merge)
self.column_names = ['extraction']
return X[self.column_names]
# will work the same way as the gps imputer.
class MeanPumpTransformer(BaseEstimator, TransformerMixin):
def __init__(self, init_radius=0.1, increment_radius=0.3):
self.column_names = []
self.init_radius = init_radius
self.increment_radius = increment_radius
self.df = pd.DataFrame()
def __get_subset_records(self, latitude, longitude, df, radius):
latitude_from = latitude - radius
latitude_to = latitude + radius
longitude_from = longitude - radius
longitude_to = longitude + radius
df_temp = df[(df['latitude'] >= latitude_from) & (df['latitude'] <= latitude_to) & \
(df['longitude'] >= longitude_from) & (df['longitude'] <= longitude_to)]
return df_temp
def fit(self, X, y=None):
self.df['status_code'] = y.astype('category').cat.codes
self.df['latitude'] = X[X['latitude'] != -2e-08]['latitude']
self.df['longitude'] = X[X['longitude'] != 0]['longitude']
self.df.sort_values(['latitude', 'longitude'], inplace=True)
self.column_names = ['latitude', 'longitude']
return self
def transform(self, X):
mean_pump_functionality = []
for latitude, longitude in zip(X['latitude'],X['longitude']):
radius = self.init_radius
mean_pump_temp = 0
while mean_pump_temp == 0 and radius <= 2:
df_temp = self.__get_subset_records(latitude,longitude,self.df,radius)
mean_pump_temp = np.mean(df_temp['status_code'])
radius = self.increment_radius + radius
mean_pump_functionality.append(mean_pump_temp)
X['mean_pump_functionality'] = mean_pump_functionality
self.column_names = X.columns
return X[['mean_pump_functionality']]
```
|
{
"source": "jesse9/dlib2tensorflow",
"score": 3
}
|
#### File: dlib2tensorflow/converter/weights.py
```python
import numpy as np
from .xml_analyzer import parse_xml, get_conv_weights, get_affine_weights, get_fc_weights
def load_weights_in_affine_layers(model, reversed_affine_layers_info):
# Load weights in the affine layers
for af in reversed_affine_layers_info:
g, b = np.split(af['weights'], 2)
model.get_layer(af['name']).set_weights([g, b])
def load_weights_in_conv_layers(model, reversed_conv_layers_info):
# Load weights in the convolution layers
for cv in reversed_conv_layers_info:
# we need to first separate bias from W
weights = cv['weights']
num_filters = cv['num_filters']
w = weights[:-num_filters]
b = weights[-num_filters:]
mw = model.get_layer(cv['name']).get_weights()
assert len(mw) == 2
# we need to manually reshape and then transpose it
assert cv['nc'] == cv['nr']
filter_size = cv['nc']
depth = int(len(w)/(num_filters * filter_size * filter_size))
reshaped_w = np.reshape(w, [num_filters, depth, filter_size, filter_size])
transposed_w = np.transpose(reshaped_w, [2,3,1,0])
model.get_layer(cv['name']).set_weights([transposed_w, b])
def load_weights_in_fc_layer(model, fc_weights):
# Load weights in the fully connected layer
fcw = model.get_layer("embedding_layer").get_weights()
reshaped_fcw = np.reshape(fc_weights, fcw[0].shape)
model.get_layer("embedding_layer").set_weights([reshaped_fcw])
def load_weights(model, xml_weights):
xdict = parse_xml(xml_weights)
conv_layers_info = get_conv_weights(xdict)
reversed_conv_layers_info = conv_layers_info[::-1]
affine_layers_info = get_affine_weights(xdict)
reversed_affine_layers_info = affine_layers_info[::-1]
fc_weights = get_fc_weights(xdict)
load_weights_in_affine_layers(model, reversed_affine_layers_info)
load_weights_in_conv_layers(model, reversed_conv_layers_info)
load_weights_in_fc_layer(model, fc_weights)
```
|
{
"source": "JesseAbram/arbitrum",
"score": 2
}
|
#### File: arb-compiler-evm/arbitrum/ast.py
```python
from . import instructions
from . import value
PUSH_WEIGHT = 5
IF_ELSE_STATEMENT = 1
IF_STATEMENT = 2
WHILE_STATEMENT = 3
BLOCK_STATEMENT = 4
CALL_STATEMENT = 5
IMMEDIATE_OP = 6
INDIRECT_PUSH_STATEMENT = 7
BASIC_OP = 8
AVM_LABEL = 9
AVM_UNIQUE_LABEL = 10
FUNC_DEFINITION = 11
CAST_STATEMENT = 12
SET_ERROR_HANDLER_STATEMENT = 13
AVM_LABELED_POS = 14
AVM_LABELED_CODE_POINT = 15
class ASTNode:
def __init__(self, asttype, path):
if path is None:
path = []
self.path = path
self.asttype = asttype
def add_node(self, typ):
self.path.append(typ)
def add_label_to_ast(node, label):
node.add_node(label)
return node
# def impl(op):
# op.add_node(label)
# return op
# return node.modify_ast(impl)
class BlockStatement(ASTNode):
def __init__(self, code, path=None):
super(BlockStatement, self).__init__(BLOCK_STATEMENT, path)
assert isinstance(code, list)
self.code = code
self.stack_mod_saved = None
def clone(self):
return BlockStatement([op.clone() for op in self.code], list(self.path))
def __repr__(self):
res = "["
for i, op in enumerate(self.code):
res += str(op)
if i < len(self.code) - 1:
res += ", "
res += "]"
return res
def __len__(self):
return sum(len(op) for op in self.code)
def stack_mod(self):
if self.stack_mod_saved is None:
expectations = []
net_stacks = [0]
net = 0
for op in self.code:
mods, expect = op.stack_mod()
expectations += expect
net -= mods["pop"]
net_stacks.append(net)
net += mods["push"]
pop_count = -min(net_stacks)
push_count = max(net + pop_count, 0)
self.stack_mod_saved = (
{"pop": pop_count, "push": push_count},
expectations,
)
return self.stack_mod_saved
def typecheck(self, stack):
for op in self.code:
op.typecheck(stack)
def traverse_ast(self, func):
func(self)
for op in self.code:
op.traverse_ast(func)
def modify_ast(self, func):
self.code = [op.modify_ast(func) for op in self.code]
return func(self)
class IfElseStatement(ASTNode):
def __init__(self, true_code, false_code, path=None):
super(IfElseStatement, self).__init__(IF_ELSE_STATEMENT, path)
self.true_code = true_code
self.false_code = false_code
def clone(self):
return IfElseStatement(
self.true_code.clone(), self.false_code.clone(), list(self.path)
)
def __repr__(self):
return "IfElse({}, {})".format(self.true_code, self.false_code)
def __len__(self):
return len(self.true_code) + len(self.false_code) + 2 + 2 * PUSH_WEIGHT
def stack_mod(self):
true_mods, true_expectations = self.true_code.stack_mod()
false_mods, false_expectations = self.false_code.stack_mod()
expectations = true_expectations + false_expectations
expectations.append(
(
"eq",
true_mods["push"] - true_mods["pop"],
false_mods["push"] - false_mods["pop"],
)
)
return (
{
"pop": max(true_mods["pop"], false_mods["pop"]) + 1,
"push": max(true_mods["push"], false_mods["push"]),
},
expectations,
)
def typecheck(self, stack):
stack.pop(value.IntType())
temp = stack.clone()
self.true_code.typecheck(stack)
self.false_code.typecheck(temp)
stack.merge(temp)
def traverse_ast(self, func):
func(self)
self.true_code.traverse_ast(func)
self.false_code.traverse_ast(func)
def modify_ast(self, func):
self.true_code = self.true_code.modify_ast(func)
self.false_code = self.false_code.modify_ast(func)
return func(self)
class CastStatement(ASTNode):
def __init__(self, typ, path=None):
super(CastStatement, self).__init__(CAST_STATEMENT, path)
self.typ = typ
def clone(self):
return CastStatement(self.typ, list(self.path))
def __repr__(self):
return "CastStatement({})".format(self.typ)
def __len__(self):
return 0
def stack_mod(self):
return {"pop": 1, "push": 1}, []
def typecheck(self, stack):
stack.pop()
stack.push(self.typ)
def traverse_ast(self, func):
pass
def modify_ast(self, func):
return func(self)
class IfStatement(ASTNode):
def __init__(self, true_code, path=None):
super(IfStatement, self).__init__(IF_STATEMENT, path)
self.true_code = true_code
def clone(self):
return IfStatement(self.true_code.clone(), list(self.path))
def __repr__(self):
return "If({})".format(self.true_code)
def __len__(self):
return len(self.true_code) + 2 + PUSH_WEIGHT
def stack_mod(self):
true_mods, true_expectations = self.true_code.stack_mod()
expectations = list(true_expectations)
expectations.append(("eq", true_mods["push"] - true_mods["pop"], 0))
return {"pop": true_mods["pop"] + 1, "push": true_mods["push"]}, expectations
def typecheck(self, stack):
stack.pop(value.IntType())
temp = stack.clone()
self.true_code.typecheck(stack)
stack.merge(temp)
def traverse_ast(self, func):
func(self)
self.true_code.traverse_ast(func)
def modify_ast(self, func):
self.true_code = self.true_code.modify_ast(func)
return func(self)
class WhileStatement(ASTNode):
def __init__(self, cond_code, body_code, path=None):
super(WhileStatement, self).__init__(WHILE_STATEMENT, path)
self.cond_code = cond_code
self.body_code = body_code
def clone(self):
return WhileStatement(
self.cond_code.clone(), self.body_code.clone(), list(self.path)
)
def __repr__(self):
return "WhileStatement({}, {})".format(self.cond_code, self.body_code)
def __len__(self):
return len(self.cond_code) + len(self.body_code) + 3 + 2 * PUSH_WEIGHT
def stack_mod(self):
cmod, cond_expectations = self.cond_code.stack_mod()
bmod, body_expectation = self.body_code.stack_mod()
expectations = cond_expectations + body_expectation
expectations.append(
(
"eq",
cmod["push"] - cmod["pop"] - 1 + bmod["push"] - bmod["pop"],
0,
"while_loop({}, {}, {}, {})".format(
cmod["pop"], cmod["push"], bmod["pop"], bmod["push"]
),
)
)
pop_count = max(cmod["pop"], bmod["pop"] + 1 - cmod["push"] + cmod["pop"])
mods = {"push": pop_count + cmod["push"] - cmod["pop"] - 1, "pop": pop_count}
return mods, expectations
def typecheck(self, stack):
temp = stack.clone()
self.cond_code.typecheck(stack)
stack.pop(value.IntType())
self.body_code.typecheck(stack)
temp.merge(stack)
def traverse_ast(self, func):
func(self)
self.cond_code.traverse_ast(func)
self.body_code.traverse_ast(func)
def modify_ast(self, func):
self.cond_code = self.cond_code.modify_ast(func)
self.body_code = self.body_code.modify_ast(func)
return func(self)
class FuncDefinition(ASTNode):
def __init__(self, name, func, code, is_callable, path=None):
super(FuncDefinition, self).__init__(FUNC_DEFINITION, path)
self.name = name
self.func = func
self.code = code
self.is_callable = is_callable
if not isinstance(code, ASTNode):
raise Exception("Func definition expects ASTNode for code")
def clone(self):
raise Exception("Func definitions aren't clonable")
def __repr__(self):
return "FuncDefinition({})".format(self.code)
def __len__(self):
return len(self.code)
def can_typecheck(self):
return (
hasattr(self.func, "pops")
and hasattr(self.func, "pushes")
and self.func.typecheck
)
def typecheck(self):
stack = value.TypeStack()
for typ in self.func.pops[::-1]:
stack.push(typ)
try:
self.code.typecheck(stack)
except Exception as err:
raise Exception("Error typechecking {} body: {}".format(self.name, err))
try:
for typ in self.func.pushes:
stack.pop(typ)
except Exception as err:
raise Exception(
"Error typechecking {} return vals: {}".format(self.name, err)
)
def traverse_ast(self, func):
func(self)
self.code.traverse_ast(func)
def modify_ast(self, func):
self.code = self.code.modify_ast(func)
return func(self)
class CallStatement(ASTNode):
def __init__(self, func, path=None):
super(CallStatement, self).__init__(CALL_STATEMENT, path)
self.func = func
self.func_name = "{}.{}".format(func.__module__, func.__name__)
self.is_callable = True
if hasattr(self.func, "uncountable"):
self.mods = {"pop": 0, "push": 0}, [("invalid",)]
elif not hasattr(self.func, "pushes") or not hasattr(self.func, "pops"):
raise Exception("Call {} has unknown stack mods".format(self.func_name))
else:
self.mods = {"pop": len(self.func.pops), "push": len(self.func.pushes)}, []
self.pops = self.func.pops
self.pushes = self.func.pushes
def clone(self):
return CallStatement(self.func, list(self.path))
def __repr__(self):
return "Call({})".format(self.func_name)
def __len__(self):
# Approximation
return 12
def stack_mod(self):
return self.mods[0], self.mods[1]
def typecheck(self, stack):
try:
for typ in self.func.pops:
stack.pop(typ)
for typ in self.func.pushes[::-1]:
stack.push(typ)
except Exception as err:
raise Exception(
"Type error calling func {}: {}".format(self.func_name, err)
)
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
class SetErrorHandlerFunctionStatement(ASTNode):
def __init__(self, func, path=None):
super(SetErrorHandlerFunctionStatement, self).__init__(
SET_ERROR_HANDLER_STATEMENT, path
)
self.func = func
self.func_name = "{}.{}".format(func.__module__, func.__name__)
self.is_callable = False
def clone(self):
return SetErrorHandlerFunctionStatement(self.func, list(self.path))
def __repr__(self):
return "SetErrorHandlerFunction({})".format(self.func_name)
def __len__(self):
# Approximation
return 1
def stack_mod(self):
return {"pop": 0, "push": 0}, []
def typecheck(self, stack):
pass
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
class IndirectPushStatement(ASTNode):
def __init__(self, val, path=None):
super(IndirectPushStatement, self).__init__(INDIRECT_PUSH_STATEMENT, path)
self.val = val
def clone(self):
return IndirectPushStatement(self.val, list(self.path))
def __len__(self):
# Approximation
return 6
def stack_mod(self):
return {"pop": 0, "push": 1}, []
def typecheck(self, stack):
if isinstance(self.val, (AVMLabel,)):
typ = value.CodePointType()
else:
typ = self.val
stack.push(typ)
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
def __repr__(self):
return "Push({})".format(self.val)
class AVMLabel(ASTNode):
def __init__(self, name, path=None):
super(AVMLabel, self).__init__(AVM_LABEL, path)
self.name = name
# print("Label", name)
def clone(self):
raise Exception("You can't clone a label '{}'".format(self.name))
def __len__(self):
return 0
def __lt__(self, other):
return self.name < other.name
def stack_mod(self):
return {"pop": 0, "push": 0}, []
def typecheck(self, stack):
pass
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
def __repr__(self):
return "AVMLabel({})".format(self.name)
def __eq__(self, other):
if not isinstance(other, AVMLabel):
return False
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
assert isinstance(self.name, str)
return self.name.__hash__()
class AVMUniqueLabel(ASTNode):
def __init__(self, name, path=None):
super(AVMUniqueLabel, self).__init__(AVM_UNIQUE_LABEL, path)
self.name = name
def clone(self):
raise Exception("You can't clone a label '{}'".format(self.name))
def __len__(self):
return 0
def stack_mod(self):
return {"pop": 0, "push": 0}, []
def typecheck(self, stack):
pass
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
def __repr__(self):
return "AVMUniqueLabel({})".format(self.name)
def __eq__(self, other):
if not isinstance(other, AVMUniqueLabel):
return False
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
assert isinstance(self.name, str)
return self.name.__hash__()
class AVMLabeledPos(ASTNode):
def __init__(self, name, pc, path=None):
super(AVMLabeledPos, self).__init__(AVM_LABELED_POS, path)
self.name = name
self.pc = pc
def clone(self):
return AVMLabeledPos(self.name, self.pc, list(self.path))
def __len__(self):
return 0
def stack_mod(self):
return {"pop": 0, "push": 0}, []
def typecheck(self, stack):
pass
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
def __repr__(self):
return "AVMLabeledPos({}, {})".format(self.name, self.pc)
def __eq__(self, other):
if not isinstance(other, AVMLabeledPos):
return False
return self.pc == other.pc
class AVMLabeledCodePoint(ASTNode):
def __init__(self, name, pc, path=None):
super(AVMLabeledCodePoint, self).__init__(AVM_LABELED_CODE_POINT, path)
self.name = name
self.pc = pc
def clone(self):
return AVMLabeledCodePoint(self.name, self.pc, list(self.path))
def __len__(self):
return 0
def stack_mod(self):
return {"pop": 0, "push": 0}, []
def typecheck(self, stack):
pass
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
def __repr__(self):
return "AVMLabeledCodePoint({}, {})".format(self.name, self.pc)
def __eq__(self, other):
if not isinstance(other, AVMLabeledCodePoint):
return False
return self.pc == other.pc
def typecheck_tget(stack):
index = stack.pop(value.IntType())
tup = stack.pop(value.TupleType())
if isinstance(index, int) and not tup.has_member_at_index(index):
raise Exception("Tried to get index {} from tuple {}".format(index, tup))
stack.push(tup.get_tup(index))
OP_HANDLER = {
"auxpush": lambda stack: stack.push_aux(stack.pop()),
"auxpop": lambda stack: stack.push(stack.pop_aux()),
"dup0": instructions.dup0,
"dup1": instructions.dup1,
"dup2": instructions.dup2,
"swap1": instructions.swap1,
"swap2": instructions.swap2,
"tget": typecheck_tget,
"tset": instructions.tset,
}
class BasicOp(ASTNode):
def __init__(self, op_code, path=None):
super(BasicOp, self).__init__(BASIC_OP, path)
self.op_code = op_code
def clone(self):
return BasicOp(self.op_code, list(self.path))
def __len__(self):
return 1
def __repr__(self):
if self.op_code in instructions.OP_NAMES:
return instructions.OP_NAMES[self.op_code]
else:
return "Unhandled OpCode"
def get_op(self):
return self.op_code
def stack_mod(self):
info = instructions.OF_INFO[self.op_code]
mod = {"pop": len(info["pop"]), "push": len(info["push"])}
if (
instructions.OP_NAMES[self.op_code] == "jump"
or instructions.OP_NAMES[self.op_code] == "cjump"
):
return mod, [("invalid",)]
if instructions.OP_NAMES[self.op_code] == "halt":
return mod, [("invalid",)]
return mod, []
def typecheck(self, stack):
try:
name = instructions.OP_NAMES[self.op_code]
if name in OP_HANDLER:
OP_HANDLER[name](stack)
else:
info = instructions.OF_INFO[self.op_code]
for pop in info["pop"]:
stack.pop(pop)
for push in info["push"]:
stack.push(push)
except Exception as err:
raise Exception(
"Exception typechecking {}: {}".format(
instructions.OP_NAMES[self.op_code], err
)
)
def __eq__(self, other):
if not isinstance(other, BasicOp):
return False
return self.op_code == other.op_code
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
class ImmediateOp(ASTNode):
def __init__(self, op, val, path=None):
super(ImmediateOp, self).__init__(IMMEDIATE_OP, path)
self.op = op
self.val = val
def clone(self):
return ImmediateOp(self.op, self.val, list(self.path))
def __len__(self):
# Approximation
return 1
def __repr__(self):
return "Immediate({}, {})".format(self.op, self.val)
def get_op(self):
return self.op.get_op()
def stack_mod(self):
op_mod, constraints = self.op.stack_mod()
if op_mod["pop"] > 0:
op_mod["pop"] -= 1
else:
op_mod["push"] += 1
return op_mod, constraints
def typecheck(self, stack):
if isinstance(self.val, (AVMLabel,)):
typ = value.CodePointType()
else:
typ = self.val
stack.push(typ)
self.op.typecheck(stack)
def traverse_ast(self, func):
func(self)
def modify_ast(self, func):
return func(self)
```
#### File: arbitrum/evm/contract_templates.py
```python
import eth_utils
from importlib_resources import read_text
import json
from ..annotation import modifies_stack
from .. import value
from ..ast import AVMLabel
ERC20_ADDRESS_STRING = "0xfffffffffffffffffffffffffffffffffffffffe"
ERC721_ADDRESS_STRING = "0xfffffffffffffffffffffffffffffffffffffffd"
ARBSYS_ADDRESS_STRING = "0x0000000000000000000000000000000000000064"
ARBINFO_ADDRESS_STRING = "0x0000000000000000000000000000000000000065"
ERC20_ADDRESS = eth_utils.to_int(hexstr=ERC20_ADDRESS_STRING)
ERC721_ADDRESS = eth_utils.to_int(hexstr=ERC721_ADDRESS_STRING)
def get_templates():
raw_contract_templates_data = read_text("arbitrum.evm", "contract-templates.json")
raw_contract_templates = json.loads(raw_contract_templates_data)
token_templates = {}
for raw_contract in raw_contract_templates:
token_templates[raw_contract["name"]] = raw_contract
return token_templates
def get_arbsys():
arbsys_data = read_text("arbitrum.evm", "ArbSys.json")
arbsys = json.loads(arbsys_data)
arbsys["address"] = ARBSYS_ADDRESS_STRING
arbsys["code"] = "0x"
arbsys["name"] = "ArbSys"
return arbsys
def get_arbinfo():
arbinfo_data = read_text("arbitrum.evm", "ArbInfo.json")
arbinfo = json.loads(arbinfo_data)
arbinfo["address"] = ARBINFO_ADDRESS_STRING
arbinfo["code"] = "0x100000"
arbinfo["name"] = "ArbInfo"
return arbinfo
def get_erc20_contract():
erc20 = get_templates()["ArbERC20"]
erc20["address"] = ERC20_ADDRESS_STRING
return erc20
def get_erc721_contract():
erc721 = get_templates()["ArbERC721"]
erc721["address"] = ERC721_ADDRESS_STRING
return erc721
def get_info_contract():
info = get_templates()["ArbInfo"]
info["address"] = ARBINFO_ADDRESS_STRING
return info
@modifies_stack([], [value.CodePointType()])
def erc20_codepoint(vm):
vm.push(AVMLabel("contract_entry_" + str(ERC20_ADDRESS)))
@modifies_stack([], [value.CodePointType()])
def erc721_codepoint(vm):
vm.push(AVMLabel("contract_entry_" + str(ERC721_ADDRESS)))
```
#### File: arbitrum/std/array.py
```python
from .. import value
from ..annotation import modifies_stack
def build_array(arr):
arr_length = len(arr)
if len(arr) > 8:
base_chunk_size = arr_length // 8
ret = []
offset = 0
for chunk in range(8):
size = base_chunk_size
if (8 - chunk) <= arr_length % 8:
size += 1
ret.append(build_array(arr[offset : offset + size]))
offset += size
return value.Tuple(ret)
if arr_length == 1:
return arr[0]
return value.Tuple(arr)
def build_array_type(arr):
arr_length = len(arr)
if len(arr) > 8:
base_chunk_size = arr_length // 8
ret = []
offset = 0
for chunk in range(8):
size = base_chunk_size
if (8 - chunk) <= arr_length % 8:
size += 1
ret.append(build_array_type(arr[offset : offset + size]))
offset += size
return value.TupleType(ret)
if arr_length == 1:
return arr[0]
return value.TupleType(arr)
def array_path(arr_size, index):
if arr_size > 8:
base_chunk_size = arr_size // 8
offset = 0
for chunk in range(8):
size = base_chunk_size
if (8 - chunk) <= arr_size % 8:
size += 1
if offset <= index < offset + size:
return [chunk] + array_path(size, index - offset)
offset += size
assert False
return []
if arr_size == 1:
return []
return [index]
class Array:
def __init__(self, types):
if isinstance(types, int):
types = [value.ValueType()] * types
self.typ = build_array_type(types)
self.types = types
self.length = len(types)
def make(self):
return self.typ.empty_val()
def update_type(self, index, typ):
self.types[index] = typ
self.typ = build_array_type(self.types)
@staticmethod
def from_list(vals):
return build_array(vals)
def new(self, vm):
vm.push(self.make())
def build(self, vm):
vm.push(self.make())
for i in range(len(self.types)):
self.set_val(i)(vm)
def get(self, i):
if self.length == 1:
@modifies_stack([], [], "{}_0".format(self.length))
def get(vm):
pass
return get
def binder(index):
@modifies_stack(
[self.typ], [self.types[index]], "{}_{}".format(self.length, index)
)
def get(vm):
path = array_path(self.length, index)
for i in path:
vm.tgetn(i)
return get
return binder(i)
def set_val(self, i):
if self.length == 1:
@modifies_stack([], [], "{}_0".format(self.length))
def set_val(vm):
pass
return set_val
def binder(index):
@modifies_stack(
[self.typ, self.types[index]],
[self.typ],
"{}_{}".format(self.length, index),
)
def set_val(vm):
# [array, val]
path = array_path(self.length, index)
if len(path) == 1:
vm.tsetn(path[0])
elif len(path) == 2:
vm.swap1()
# [val, array]
vm.dup1()
vm.tgetn(path[0])
vm.tsetn(path[1])
vm.swap1()
vm.tsetn(path[0])
else:
vm.swap1()
vm.auxpush()
# [array]
for i in path[:-1]:
vm.dup0()
vm.tgetn(i)
vm.auxpop()
for i in path[::-1]:
vm.swap1()
vm.tsetn(i)
return set_val
return binder(i)
```
#### File: arbitrum/std/bignum.py
```python
from . import bigtuple
from . import random
from . import tup
from .locals import Locals
from .struct import Struct
from ..annotation import modifies_stack
from ..vm import VM
from .. import value
bignum = Struct("bignum", ["val", "size", "ispositive"])
# invariant: for all i>size, chunk[i] returns 0
# slots in arry hold values mod 2^126
_CHUNK_BITS = 126
_CHUNK_MOD = 2 ** _CHUNK_BITS
def make_zero():
return value.Tuple([value.Tuple([]), 0, 1])
def make_from_int(pint):
if pint < 0:
return negate_static(make_from_int(-pint))
val = make_zero()
i = 0
while pint > 0:
val = setchunk_static(val, i, pint % _CHUNK_MOD)
i += 1
pint //= _CHUNK_MOD
return val
def negate_static(val):
return val.set_tup_val(2, int(not val[2]))
def setchunk_static(bignum_val, chunk_num, val):
if chunk_num + 1 > bignum_val[1]:
bignum_val = bignum_val.set_tup_val(1, chunk_num + 1)
return bignum_val.set_tup_val(0, bigtuple.set_static(bignum_val[0], chunk_num, val))
def to_python_int(big):
acc = 0
val = big[0]
size = big[1]
ispositive = big[2]
for i in range(size):
sub_val = bigtuple.get_static(val, i)
if isinstance(sub_val, value.Tuple):
sub_val = 0
acc += sub_val * (_CHUNK_MOD ** i)
if not ispositive:
acc *= -1
return acc
@modifies_stack(0, 1)
def zero(vm):
vm.push(make_zero())
@modifies_stack(1, 1)
def fromint(vm):
vm.push(1)
vm.swap1()
vm.push(1)
vm.swap1()
# val 1 1
vm.push(0)
bigtuple.new(vm)
bigtuple.set_val(vm)
# bigtuple 1 1
tup.make(3)(vm)
def fromPythonInt(vm, pint):
if pint < 0:
fromPythonInt(vm, -pint)
negate(vm)
else:
zero(vm)
i = 0
while pint > 0:
# bignum
vm.push(pint % _CHUNK_MOD)
vm.swap1()
vm.push(i)
vm.swap1()
setchunk(vm)
i = i + 1
pint = pint // _CHUNK_MOD
def toPythonInt(vm):
if isinstance(vm, VM): # this requires that we're in the python emulator
acc = 0
size = vm.stack[0][1]
ispositive = vm.stack[0][2]
vm.tgetn(0)
# bigtuple
for i in range(size):
vm.push(i)
vm.dup1()
bigtuple.get(vm)
if isinstance(vm.stack[0], value.Tuple):
vm.pop()
vm.push(0)
acc = acc + vm.stack[0] * (_CHUNK_MOD ** i)
vm.pop()
vm.pop()
if ispositive == 1:
return acc
else:
return -acc
else:
vm.pop()
return 0
@modifies_stack(2, 1)
def getchunk(vm):
# bignum index
vm.swap1()
vm.dup1()
# bignum index bignum
bignum.get("size")(vm)
# size index bignum
vm.dup1()
# index size index bignum
vm.lt()
# indexValid index bignum
vm.ifelse(
lambda vm: [
# index bignum
vm.swap1(),
bignum.get("val")(vm),
bigtuple.get(vm),
vm.dup0(),
vm.tnewn(0),
vm.eq(),
vm.ifelse(lambda vm: [vm.pop(), vm.push(0)]),
],
lambda vm: [
# index bignum
vm.pop(),
vm.pop(),
vm.push(0),
],
)
# result
vm.dup0()
tup.make(0)(vm)
vm.eq()
# result==[] result
vm.ifelse(lambda vm: [vm.pop(), vm.push(0)])
@modifies_stack(3, 1) # bn chunkNum val -> updatedBn
def setchunk(vm):
# update size if needed
vm.dup0()
bignum.get("size")(vm)
# bnsize bn chunkNum val
vm.dup2()
vm.push(1)
vm.add()
# chunkNum+1 bnsize bn chunkNum val
vm.gt()
vm.ifelse(
lambda vm: [
# bn chunkNum val
vm.dup1(),
vm.push(1),
vm.add(),
vm.swap1(),
bignum.set_val("size")(vm),
]
)
# update the chunk
# bn chunkNum val
vm.swap2()
vm.dup2()
# bn val chunkNum bn
vm.swap2()
vm.swap1()
vm.swap2()
# bn chunkNum val bn
bignum.get("val")(vm)
bigtuple.set_val(vm)
# updatedVal bn
vm.swap1()
bignum.set_val("val")(vm)
@modifies_stack(1, 1)
def trim(vm):
local_vars = Locals(vm, ["i", "bn"])
# bn
vm.push(1)
vm.dup1()
bignum.get("size")(vm)
vm.sub()
local_vars.make()
vm.while_loop(
lambda vm: [
local_vars.get(["bn", "i", "i"]),
getchunk(vm),
vm.iszero(),
# chunk==0 i
vm.swap1(),
vm.push(-1),
vm.slt(),
vm.bitwise_and(),
],
lambda vm: [
local_vars.get("i"),
vm.push(-1),
vm.add(),
local_vars.set_val("i"),
],
)
vm.auxpop()
tup.tbreak(2)(vm)
# i bn
vm.push(1)
vm.add()
vm.swap1()
bignum.set_val("size")(vm)
@modifies_stack(1, 1) # bignum -> lengthinbits
def bitlength(vm):
trim(vm)
vm.dup0()
# bignum bignum
bignum.get("size")(vm)
# bnsizechunks bignum
vm.dup0()
vm.ifelse(
lambda vm: [
# bnsizechunks bignum
vm.push(-1),
vm.add(),
vm.dup0(),
vm.push(_CHUNK_BITS),
vm.mul(),
# bnFullChunkBits bnchunks-1 bignum
vm.swap2(),
# bignum bnchunks-1 bnFullChunkBits
getchunk(vm),
# chunk[0] bnFullChunkBits
bitlength_chunk(vm),
vm.add(),
],
lambda vm: [
# bnsizechunks(=0) bignum
vm.pop(),
vm.pop(),
vm.push(0),
],
)
@modifies_stack(1, 1) # chunk -> lengthinbits
def bitlength_chunk(vm):
vm.push(0)
vm.swap1()
bitlength_chunk2(vm, 128)
def bitlength_chunk2(vm, size): # size must be power of 2
# chunk soFar
if size == 1:
vm.ifelse(lambda vm: [vm.push(1), vm.add()])
else:
vm.dup0()
vm.push((1 << (size // 2)) - 1)
vm.lt()
# ((1<<(size//2))-1)<chunk chunk soFar
vm.ifelse(
lambda vm: [
vm.swap1(),
vm.push(size // 2),
vm.add(),
vm.swap1(),
# chunk soFar'
vm.push(1 << (size // 2)),
vm.swap1(),
vm.div(),
]
)
# chunk soFar
bitlength_chunk2(vm, size // 2)
@modifies_stack(2, 1) # val numChunks -> val[0..(numChunks-1)]
def loworderwords(vm):
local_vars = Locals(vm, ["result", "i", "num", "limit"])
vm.push(0)
zero(vm)
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["i", "limit"]), vm.lt()],
lambda vm: [
local_vars.get(["num", "i", "i", "result", "i"]),
getchunk(vm),
vm.swap2(),
setchunk(vm),
local_vars.set_val("result"),
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.discard("result")
@modifies_stack(2, 1) # bignum shiftBits
def shiftleft(vm):
local_vars = Locals(vm, ["res", "i", "bnsize", "bn", "blockCount"])
vm.swap1()
vm.dup0()
vm.push(_CHUNK_BITS)
vm.swap1()
vm.mod()
# bitsCount shiftBits bignum
vm.swap1()
vm.push(_CHUNK_BITS)
vm.swap1()
vm.div()
# chunksCount bitsCount bignum
vm.swap2()
vm.swap1()
# bitscount bignum blocksCount
vm.push(2)
vm.exp()
intmultiply(vm)
# bignum' blocksCount
vm.dup1()
vm.ifelse(
lambda vm: [
vm.dup0(),
bignum.get("size")(vm),
vm.push(0),
zero(vm),
local_vars.make(),
vm.while_loop(
lambda vm: [local_vars.get(["i", "bnsize"]), vm.lt()],
lambda vm: [
local_vars.get(["bn", "i", "i", "blockCount", "res", "i"]),
getchunk(vm),
# bn[i] i blockCount res i
vm.swap2(),
vm.add(),
# i+blockCount bn[i] res i
vm.swap1(),
vm.swap2(),
# res i+blockCount bn[i] i
setchunk(vm),
# res i
local_vars.set_val("res"),
# i
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
),
local_vars.discard("res"),
],
lambda vm: [
# bignum' blockcount
vm.swap1(),
vm.pop(),
],
)
trim(vm)
@modifies_stack(2, 1) # bignum shiftBits -> shiftedBignum
def shiftright(vm):
vm.swap1()
vm.dup0()
# shiftBits shiftBits bignum
vm.push(_CHUNK_BITS)
vm.swap1()
vm.mod()
# sb%chunkBits shiftBits bignum
vm.dup0()
vm.ifelse(
lambda vm: [
# shiftbits%chunkbits shiftbits bignum
vm.push(_CHUNK_BITS),
vm.sub(),
# reverseshiftbits shiftbits bignum
vm.dup0(),
vm.swap2(),
# shiftbits reverseshiftbits rsb bignum
vm.add(),
vm.swap2(),
# bignum reverseshiftbits modshiftbits
vm.swap1(),
vm.push(2),
vm.exp(),
intmultiply(vm),
vm.swap1(),
],
lambda vm: [
vm.pop(),
# shiftbits bignum
],
)
vm.push(_CHUNK_BITS)
vm.swap1()
vm.div()
# shiftchunks bignum
vm.swap1()
vm.dup0()
bignum.get("size")(vm)
# bnsize bignum shiftchunks
vm.dup2()
vm.swap1()
vm.sub()
# limit bignum shiftchunks
zero(vm)
vm.push(0)
local_vars = Locals(vm, ["i", "result", "limit", "bn", "shiftchunks"])
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["i", "limit"]), vm.slt()],
lambda vm: [
local_vars.get(["i", "shiftchunks", "bn"]),
vm.add(),
vm.swap1(),
getchunk(vm),
# chunk
local_vars.get(["result", "i"]),
# result i chunk
setchunk(vm),
local_vars.set_val("result"),
local_vars.get("i"),
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.discard("result")
@modifies_stack(2, 1)
def sizeoflarger(vm):
# bn1 bn2
bignum.get("size")(vm)
vm.swap1()
bignum.get("size")(vm)
_max2(vm)
@modifies_stack(2, 1)
def _max2(vm):
# v1 v2
vm.dup1()
vm.dup1()
vm.lt()
# v1<v2 v1 v2
vm.ifelse(
lambda vm: [
# v1 v2
vm.pop()
],
lambda vm: [vm.swap1(), vm.pop()],
)
@modifies_stack(1, 1) # bignum -bignum
def negate(vm):
vm.dup0()
bignum.get("ispositive")(vm)
# ispositive bignum
vm.iszero()
vm.swap1()
bignum.set_val("ispositive")(vm)
@modifies_stack(2, 1) # bn1 bn2 -> bn1+bn2 (assume both bn1, bn2 >= 0)
def add_bothpositive(vm):
local_vars = Locals(vm, ["i", "result", "carry", "limit", "bn1", "bn2"])
# bn1 bn2
vm.dup1()
vm.dup1()
sizeoflarger(vm)
# size bn1 bn2
vm.push(0)
zero(vm)
# result 0 size bn1 bn2
vm.push(0)
# 0 result 0 size bn1 bn2
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["i", "limit"]), vm.lt()],
lambda vm: [
local_vars.get(["bn1", "i", "i", "bn2", "carry"]),
getchunk(vm),
# val1 i bn2 carry
vm.swap2(),
getchunk(vm),
# val2 val1 carry
vm.add(),
vm.add(),
# newval
vm.dup0(),
vm.push(_CHUNK_MOD),
vm.swap1(),
vm.div(),
# newval//chunkMod newval
local_vars.set_val("carry"),
# newval
vm.push(_CHUNK_MOD),
vm.swap1(),
vm.mod(),
# truncatedNewval
local_vars.get(["result", "i"]),
setchunk(vm),
local_vars.set_val("result"),
local_vars.get("i"),
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.get("carry")
vm.push(0)
vm.dup1()
# carry 0 carry
vm.gt()
vm.ifelse(
lambda vm: [
local_vars.get(["result", "i"]),
setchunk(vm),
# result
],
lambda vm: [vm.pop(), local_vars.get("result")],
)
local_vars.discard()
@modifies_stack(2, 1) # bn1 bn2 -> difference
def subtract_bothpositive(vm):
vm.dup1()
vm.dup1()
ltbothpositive(vm)
# bn1<bn2 bn1 bn2
vm.ifelse(
lambda vm: [vm.swap1(), subtract_allpositive(vm), negate(vm)],
lambda vm: [subtract_allpositive(vm)],
)
@modifies_stack(2, 1) # bn1 bn2 -> difference
def subtract_allpositive(vm): # bn1 >= bn2 >= 0
# set up local_vars
local_vars = Locals(vm, ["i", "borrow", "limit", "bn1", "bn2"])
vm.dup0()
bignum.get("size")(vm)
vm.push(0)
vm.push(0)
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["i", "limit"]), vm.lt()],
lambda vm: [
local_vars.get(["bn2", "i", "borrow"]),
getchunk(vm),
# bn2[i] borrow
local_vars.get(["bn1", "i"]),
getchunk(vm),
# bn1[i] bn2[i] borrow
vm.sub(),
vm.sub(),
# diff
vm.dup0(),
vm.push(0),
vm.sgt(),
# 0>diff diff
vm.ifelse(
lambda vm: [
vm.push(1),
local_vars.set_val("borrow"),
vm.push(_CHUNK_MOD),
vm.add(),
],
lambda vm: [vm.push(0), local_vars.set_val("borrow")],
),
# diff
local_vars.get(["bn1", "i"]),
# bn1 i diff
setchunk(vm),
local_vars.set_val("bn1"),
local_vars.get("i"),
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.discard("bn1")
# bn1 bn2 modulus -> (bn1+bn2)%modulus (assume bn1,bn2>=0; modulus>0)
@modifies_stack(3, 1)
def modadd(vm):
add(vm)
modallpositive(vm)
@modifies_stack(2, 1) # bn1 bn2 -> sum
def add(vm):
vm.dup1()
bignum.get("ispositive")(vm)
vm.dup1()
bignum.get("ispositive")(vm)
# ispositive1 ispositive2 bn1 bn2
# ispositive1 ispositive2 bn1 bn2
vm.ifelse(
lambda vm: [
# ispositive2 bn1 bn2
vm.ifelse(
lambda vm: [
add_bothpositive(vm),
vm.push(1),
vm.swap1(),
bignum.set_val("ispositive")(vm),
],
lambda vm: [
vm.swap1(),
negate(vm),
vm.swap1(),
subtract_bothpositive(vm),
],
)
],
lambda vm: [
vm.push(1),
vm.eq(),
# ispositive2 bn1 bn2
vm.ifelse(
lambda vm: [
# bn1 bn2
negate(vm),
vm.swap1(),
subtract_bothpositive(vm),
],
lambda vm: [
negate(vm),
vm.swap1(),
negate(vm),
add_bothpositive(vm),
negate(vm),
],
),
],
)
@modifies_stack(2, 1) # bn1 bn2 -> sum
def subtract(vm):
vm.swap1()
negate(vm)
add(vm)
# bn1 bn2 modulus -> (bn1*bn2)%modulus (assume bn1*bn2>=0, modulus>0)
@modifies_stack(3, 1)
def modmul(vm):
# TODO: make this more efficient by working with smaller intermediate values
multiply(vm)
modallpositive(vm)
@modifies_stack(2, 1) # bn1 bn2 -> bn1*bn2
def multiply(vm):
vm.dup1()
bignum.get("ispositive")(vm)
vm.dup1()
bignum.get("ispositive")(vm)
vm.eq()
# samesign bn1 bn2
vm.swap2()
multiplyignoringsign(vm)
# product samesign
vm.swap1()
vm.iszero()
vm.ifelse(lambda vm: [negate(vm)])
@modifies_stack(2, 1) # bn1 bn2 -> bn1*bn2 (assume bn1, bn2 both >= 0)
def multiplyignoringsign(vm):
local_vars = Locals(
vm, ["result", "scratch", "i", "j", "size1", "size2", "bn1", "bn2"]
)
vm.dup1()
bignum.get("size")(vm)
vm.dup1()
bignum.get("size")(vm)
vm.push(0)
vm.push(0)
zero(vm)
zero(vm)
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["i", "size1"]), vm.lt()],
lambda vm: [
vm.push(0),
local_vars.set_val("j"),
zero(vm),
local_vars.set_val("scratch"),
vm.while_loop(
lambda vm: [local_vars.get(["j", "size2"]), vm.lt()],
lambda vm: [
local_vars.get(["bn1", "i"]),
getchunk(vm),
local_vars.get(["bn2", "j"]),
getchunk(vm),
vm.mul(),
local_vars.get(["i", "j", "scratch"]),
# i j scratch bn1[i]*bn2[j]
vm.add(),
vm.swap1(),
setchunk(vm),
local_vars.set_val("scratch"),
local_vars.get("j"),
vm.push(1),
vm.add(),
local_vars.set_val("j"),
],
),
local_vars.get(["result", "scratch"]),
add_bothpositive(vm),
local_vars.set_val("result"),
local_vars.get("i"),
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.discard("result")
@modifies_stack(2, 1) # int bignum -> int*bignum
def intmultiply(vm):
vm.dup1()
bignum.get("ispositive")(vm)
vm.dup1()
vm.push(-1)
# -1 int ispos(bignum) int bignum
vm.slt()
# ispos(int) ispos(bignum) int bignum
vm.dup0()
vm.iszero()
# isneg(int) ispos(int) ispos(bignum) int bignum
vm.ifelse(lambda vm: [vm.swap2(), vm.push(0), vm.sub(), vm.swap2()])
vm.eq()
# samesign abs(int) bignum
vm.swap2()
# bignum abs(int) samesign
intmultiplyignoringsign(vm)
# product samesign
bignum.set_val("ispositive")(vm)
@modifies_stack(2, 1) # bignum int -> bignum*int (assume int>=0)
def intmultiplyignoringsign(vm):
local_vars = Locals(vm, ["carry", "i", "limit", "bn", "int"])
# bignum int
vm.dup0()
bignum.get("size")(vm)
vm.push(0)
vm.push(0)
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["i", "limit"]), vm.lt()],
lambda vm: [
local_vars.get(["bn", "i", "int", "carry"]),
getchunk(vm),
vm.mul(),
vm.add(),
# prodWithCarry
vm.push(_CHUNK_MOD),
vm.dup1(),
vm.div(),
local_vars.set_val("carry"),
# prodWithCarry
vm.push(_CHUNK_MOD),
vm.swap1(),
vm.mod(),
local_vars.get(["bn", "i"]),
setchunk(vm),
local_vars.set_val("bn"),
local_vars.get("i"),
vm.push(1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.get("carry")
vm.ifelse(
lambda vm: [
local_vars.get(["bn", "i", "carry"]),
setchunk(vm),
local_vars.set_val("bn"),
]
)
local_vars.discard("bn")
@modifies_stack(3, 1) # x y m -> (x^y)%m (assume x,y >= 0; m>0)
def modpow(vm):
# x y m
vm.dup2()
vm.swap1()
modallpositive(vm)
modpow2(vm)
@modifies_stack(3, 1) # x y m -> (x^y)%m (assume x,y >= 0; m>0, x<m)
def modpow2(vm):
vm.dup1()
# y x y m
zero(vm)
lt(vm)
vm.ifelse(
lambda vm: [
# x y m
vm.dup2(),
vm.dup2(),
# y m x y m
vm.push(1),
vm.swap1(),
shiftright(vm),
# y//2 m x y m
vm.dup2(),
modpow2(vm),
# x^(y//2)%m x y m
vm.swap2(),
# y x x^(y//2)%m m
vm.push(0),
vm.swap1(),
getchunk(vm),
# y[0] x x^(y//2)%m m
vm.push(1),
vm.bitwise_and(),
# y[0]%2 x x^(y//2)%m m
vm.ifelse(
lambda vm: [
# x x^(y//2)%m m
vm.dup2(),
# m x x^(y//2)%m m
vm.swap1(),
vm.swap2(),
# x^(y//2)%m m x m
vm.dup0(),
modmul(vm),
# x^(2*(y//2))%m x m
modmul(vm),
],
lambda vm: [vm.pop(), vm.dup0(), modmul(vm)],
),
],
lambda vm: [
# x 0 m
vm.pop(),
vm.pop(),
vm.pop(),
vm.push(1),
fromint(vm),
],
)
@modifies_stack(2, 1) # x y -> x%y (assume y>0)
def mod(vm):
mod_modpositive(vm)
@modifies_stack(2, 1) # x y -> x%y (assume y>0)
def mod_modpositive(vm):
vm.dup0()
zero(vm)
leq(vm)
vm.ifelse(
lambda vm: [
# x y
modallpositive(vm)
],
lambda vm: [
vm.dup1(),
vm.swap1(),
negate(vm),
modallpositive(vm),
# (-x)%y y
vm.dup0(),
zero(vm),
vm.eq(),
vm.ifelse(
lambda vm: [
vm.swap1(),
vm.pop(),
# bignum(0)
],
lambda vm: [vm.swap1(), subtract(vm)],
),
],
)
@modifies_stack(2, 1) # x y -> x%y (assume x>=0, y>0)
def modallpositive(vm):
divmodallpositive(vm)
vm.pop()
@modifies_stack(2, 2) # x y -> x//y x%y (assume x>=0, y>0)
def divmodallpositive(vm):
trim(vm)
vm.swap1()
trim(vm)
# denom num
vm.dup0()
div_initscale(vm)
vm.dup0()
vm.auxpush() # push initscale onto the auxstack
# initscale denom num
vm.dup0()
vm.swap2()
# denom initscale initscale num
shiftleft(vm)
# scaledDenom initscale num
vm.swap2()
# num initscale scaledDenom
shiftleft(vm)
# scaledNum scaledDenom
divmod2(vm)
# q r'
vm.swap1()
# r' q
vm.auxpop()
# initscale r' q
vm.swap1()
shiftright(vm)
trim(vm)
# r q
vm.swap1()
@modifies_stack(2, 2) # num denom -> quotient remainder
def divmod2(vm):
local_vars = Locals(vm, ["qp", "rp", "shiftbits", "shiftwords", "num", "denom"])
trim(vm)
vm.swap1()
trim(vm)
vm.swap1()
vm.dup1()
bignum.get("size")(vm)
vm.dup1()
bignum.get("size")(vm)
vm.dup1()
vm.dup1()
# numsize denomsize numsize denomsize num denom
vm.lt()
vm.ifelse(
lambda vm: [
vm.pop(),
vm.pop(),
vm.swap1(),
vm.pop(),
# num
zero(vm),
# 0 num
],
lambda vm: [
# numsize denomsize num denom
vm.eq(),
vm.ifelse(
lambda vm: [
# num denom
vm.dup1(),
vm.dup1(),
ltbothpositive(vm),
vm.ifelse(
lambda vm: [
# num denom
vm.swap1(),
vm.pop(),
zero(vm),
# 0 num
],
lambda vm: [
# num denom
subtract(vm),
vm.push(1),
fromint(vm),
# 1 num-denom
],
),
],
lambda vm: [
# num denom
vm.dup1(),
bignum.get("size")(vm),
vm.dup1(),
bignum.get("size")(vm),
# numsize denomsize num denom
vm.dup1(),
vm.push(1),
vm.add(),
vm.dup1(),
vm.eq(),
# numsize==denomsize+1 numsize denomsize num denom
vm.ifelse(
lambda vm: [vm.pop(), vm.pop(), divmod3(vm)],
lambda vm: [
vm.sub(),
vm.push(-1),
vm.add(),
# shiftwords num denom
vm.dup0(),
vm.push(_CHUNK_BITS),
vm.mul(),
# shiftbits shiftwords num denom
vm.push(0),
vm.dup0(),
local_vars.make(),
local_vars.get(["num", "shiftbits", "denom"]),
shiftright(vm),
# num' denom
divmod3(vm),
# q' r'
local_vars.set_val(["qp", "rp"]),
local_vars.get(["num", "shiftwords", "denom"]),
loworderwords(vm),
# s denom
local_vars.get(["rp", "shiftbits"]),
shiftleft(vm),
add(vm),
divmod2(vm),
# q r
local_vars.get(["qp", "shiftbits"]),
shiftleft(vm),
add(vm),
# quot rem
local_vars.discard(),
],
),
],
),
],
)
vm.swap1()
trim(vm)
vm.swap1()
@modifies_stack(2, 2) # num denom -> quotient remainder
def divmod3(vm):
local_vars = Locals(vm, ["t", "q", "num", "denom"])
vm.dup1()
vm.push(_CHUNK_MOD)
# _chunkMod denom num denom
intmultiply(vm)
vm.dup0()
vm.dup2()
# num denom*_chunkMod denom*_chunkMod num denom
geq(vm)
vm.ifelse(
lambda vm: [
# denom*_chunkMod num denom
vm.swap1(),
subtract(vm),
# num-(denom<<_chunkBits) denom
divmod3(vm),
# q r
vm.push(_CHUNK_BITS),
vm.push(1),
fromint(vm),
shiftleft(vm),
add(vm),
],
lambda vm: [
vm.pop(),
# num denom
vm.dup1(),
vm.dup1(),
divmod_approxquotient(vm),
# q num denom
vm.push(_CHUNK_MOD - 1),
vm.dup1(),
# q _chunkMod-1 q num denom
vm.gt(),
vm.ifelse(
lambda vm: [
# q num denom
vm.pop(),
vm.push(_CHUNK_MOD - 1),
]
),
# q num denom
vm.push(0),
# local_vars: ['t', 'q', 'num', 'denom']
local_vars.make(),
local_vars.get(["q", "denom"]),
intmultiply(vm),
local_vars.set_val("t"),
local_vars.get(["t", "num"]),
gt(vm),
vm.ifelse(
lambda vm: [
local_vars.get(["q", "t", "denom"]),
vm.push(-1),
vm.add(),
local_vars.set_val("q"),
# t denom
subtract(vm),
local_vars.set_val("t"),
]
),
local_vars.get(["t", "num"]),
gt(vm),
vm.ifelse(
lambda vm: [
local_vars.get(["q", "t", "denom"]),
vm.push(-1),
vm.add(),
local_vars.set_val("q"),
# t denom
subtract(vm),
local_vars.set_val("t"),
]
),
local_vars.get(["num", "t", "q"]),
subtract(vm),
trim(vm),
vm.swap1(),
fromint(vm),
local_vars.discard()
# quotient remainder
],
)
@modifies_stack(2, 1) # num denom -> approxquot
def divmod_approxquotient(vm):
vm.swap1()
vm.dup0()
bignum.get("size")(vm)
vm.push(-1)
vm.add()
# size(denom)-1 denom num
vm.swap1()
getchunk(vm)
vm.swap1()
# num approxdenom
vm.dup0()
bignum.get("size")(vm)
# size(num) num approxdenom
vm.dup1()
vm.dup1()
# size(num) num size(num) num approxdenom
vm.push(-1)
vm.add()
vm.swap1()
getchunk(vm)
vm.push(_CHUNK_MOD)
vm.mul()
# _chunkMod*num[-1] size(num) num approxdenom
vm.swap2()
vm.swap1()
# size(num) num _chunkmod*num[-1] approxdenom
vm.push(-2)
vm.add()
vm.swap1()
getchunk(vm)
vm.add()
vm.div()
@modifies_stack(1, 1) # denom -> bitsToShift
def div_initscale(vm):
vm.dup0()
bignum.get("size")(vm)
vm.push(-1)
vm.add()
vm.swap1()
# denom size(denom)-1
getchunk(vm)
# topchunk
vm.push(0)
# i topchunk
vm.while_loop(
lambda vm: [vm.dup1(), vm.push(2 ** 125), vm.bitwise_and(), vm.iszero()],
lambda vm: [
# i topchunk
vm.push(1),
vm.add(),
vm.swap1(),
vm.push(2),
vm.mul(),
vm.swap1(),
],
)
# i topchunk
vm.swap1()
vm.pop()
@modifies_stack(2, 2) # x y -> x//y x%y. (assume x>=0, y>0)
def divmodallpositive_save(vm):
local_vars = Locals(vm, ["m", "partial", "x", "y"])
zero(vm)
vm.dup0()
local_vars.make()
local_vars.get(["x", "y"])
lt(vm)
vm.ifelse(
lambda vm: [
local_vars.get("x"),
zero(vm),
# 0 x
],
lambda vm: [
local_vars.get(["x", "y"]),
quotlowerbound(vm),
vm.dup0(),
vm.push(1),
fromint(vm),
geq(vm),
# 1>=m m
vm.ifelse(
lambda vm: [
vm.pop(),
local_vars.get(["x", "y", "y"]),
subtract(vm),
divmodallpositive(vm),
vm.push(1),
fromint(vm),
add(vm),
],
lambda vm: [
local_vars.set_val("m"),
local_vars.get(["m", "y", "x"]),
multiply(vm),
vm.swap1(),
# x m*y
divmodallpositive(vm),
# q' r'
local_vars.set_val("partial"),
# r'
local_vars.get("y"),
vm.swap1(),
# r' y
divmodallpositive(vm),
# q'' r
local_vars.get(["partial", "m"]),
multiply(vm),
add(vm),
# q r
],
),
],
)
local_vars.discard()
@modifies_stack(2, 1) # x y -> lowerbound(x/y). (assume x>=y)
def quotlowerbound(vm):
local_vars = Locals(vm, ["lb", "prevlb", "x", "y"])
vm.push(1)
fromint(vm)
vm.push(2)
fromint(vm)
local_vars.make()
vm.while_loop(
lambda vm: [local_vars.get(["lb", "y", "x"]), multiply(vm), leq(vm)],
lambda vm: [
local_vars.get("lb"),
vm.dup0(),
local_vars.set_val("prevlb"),
vm.dup0(),
multiply(vm),
local_vars.set_val("lb"),
],
)
local_vars.discard("prevlb")
@modifies_stack(2, 1) # bn1 bn2 -> bn1<bn2
def eq(vm):
local_vars = Locals(vm, ["eqsofar", "i", "bn1", "bn2"])
vm.dup1()
vm.dup1()
sizeoflarger(vm)
# size bn1 bn2
vm.push(-1)
vm.add()
vm.push(1)
local_vars.make()
vm.while_loop(
lambda vm: [
local_vars.get(["i", "eqsofar"]),
vm.push(-1),
vm.slt(),
vm.bitwise_and(),
],
lambda vm: [
local_vars.get(["bn1", "i", "i", "bn2"]),
getchunk(vm),
vm.swap2(),
getchunk(vm),
vm.eq(),
vm.iszero(),
vm.ifelse(lambda vm: [vm.push(0), local_vars.set_val("eqsofar")]),
local_vars.get("i"),
vm.push(-1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.discard("eqsofar")
@modifies_stack(2, 1) # bn1 bn2 -> bn1<bn2
def lt(vm):
vm.dup1()
bignum.get("ispositive")(vm)
vm.dup1()
bignum.get("ispositive")(vm)
# ispos(bn1) ispos(bn2) bn1 bn2
vm.ifelse(
lambda vm: [
vm.ifelse(
lambda vm: [ltbothpositive(vm)],
lambda vm: [vm.pop(), vm.pop(), vm.push(0)],
)
],
lambda vm: [
vm.ifelse(
lambda vm: [vm.pop(), vm.pop(), vm.push(1)],
lambda vm: [negate(vm), vm.swap1(), negate(vm), ltbothpositive(vm)],
)
],
)
@modifies_stack(2, 1) # bn1 bn2 -> bn1<bn2
def gt(vm):
vm.swap1()
lt(vm)
@modifies_stack(2, 1) # bn1 bn2 -> bn1>=bn2
def geq(vm):
lt(vm)
vm.iszero()
@modifies_stack(2, 1) # bn1 bn2 -> bn1<=bn2
def leq(vm):
vm.swap1()
geq(vm)
# bn1 bn2 -> bn1<bn2. (assume both bn1,bn2 non-negative)
@modifies_stack(2, 1)
def ltbothpositive(vm):
local_vars = Locals(vm, ["undecided", "islt", "i", "bn1", "bn2"])
vm.dup1()
vm.dup1()
sizeoflarger(vm)
# size bn1 bn2
vm.push(-1)
vm.add()
vm.push(0)
vm.push(1)
local_vars.make()
vm.while_loop(
lambda vm: [
local_vars.get("i"),
vm.push(-1),
vm.slt(),
local_vars.get("undecided"),
vm.bitwise_and(),
],
lambda vm: [
local_vars.get(["bn2", "i", "i", "bn1"]),
getchunk(vm),
vm.swap2(),
getchunk(vm),
# bn1[i] bn2[i]
vm.dup1(),
vm.dup1(),
vm.lt(),
vm.ifelse(
lambda vm: [
vm.push(1),
vm.push(0),
local_vars.set_val(["undecided", "islt"]),
]
),
vm.gt(),
vm.ifelse(lambda vm: [vm.push(0), local_vars.set_val("undecided")]),
local_vars.get("i"),
vm.push(-1),
vm.add(),
local_vars.set_val("i"),
],
)
local_vars.discard("islt")
@modifies_stack(2, 3) # a b -> gcd x y
def egcd(vm):
local_vars = Locals(vm, ["g", "x", "y", "a", "b"])
vm.dup0()
zero(vm)
vm.eq()
vm.ifelse(
lambda vm: [
vm.pop(),
zero(vm),
vm.push(1),
fromint(vm),
vm.swap2(),
# b 0 1
],
lambda vm: [
# a b
vm.push(0),
vm.dup0(),
vm.dup0(),
local_vars.make(),
local_vars.get(["b", "a", "a"]),
modallpositive(vm),
egcd(vm),
local_vars.set_val(["g", "y", "x"]),
local_vars.get(["b", "a", "y", "x", "y"]),
divmodallpositive(vm),
vm.swap1(),
vm.pop(),
multiply(vm),
vm.swap1(),
subtract(vm),
local_vars.discard("g"),
# g x-(b//a)*y y
],
)
# a m -> b such that (a*b)%m==1, or Error if none exists
@modifies_stack(2, 1)
def modinv(vm):
vm.dup1()
vm.swap1()
# a m m
egcd(vm)
# g x y m
vm.swap2()
vm.pop()
vm.swap1()
# g x m
vm.push(1)
fromint(vm)
vm.eq()
# g==1 x m
vm.ifelse(lambda vm: [mod_modpositive(vm)], lambda vm: [deliberate_error(vm)])
@modifies_stack(2, 2) # bn rand -> randbn rand
def randomgen_pos_lessthan(vm):
vm.push(1)
fromint(vm)
vm.swap1()
subtract(vm)
randomgen_lessthan(vm)
vm.push(1)
fromint(vm)
add(vm)
@modifies_stack(2, 2) # bn rand -> randbn rand
def randomgen_lessthan(vm):
vm.swap1()
vm.dup1()
# bn rand bn
bitlength(vm)
randomgen(vm)
# randbn rand bn
vm.while_loop(
lambda vm: [
vm.dup2(),
vm.dup1(),
# randbn bn randbn rand bn
geq(vm),
],
lambda vm: [
# randbn rand bn
vm.pop(),
vm.dup1(),
bitlength(vm),
randomgen(vm),
],
)
# randbn rand bn
vm.swap2()
vm.pop()
# rand randbn
vm.swap1()
@modifies_stack(2, 2) # nbits rand -> bignum rand
def randomgen(vm):
local_vars = Locals(vm, ["ret", "i", "bitsleft", "rand"])
vm.push(0)
zero(vm)
local_vars.make()
vm.while_loop(
lambda vm: [vm.push(0), local_vars.get("bitsleft"), vm.sgt()],
lambda vm: [
vm.push(_CHUNK_BITS),
local_vars.get("bitsleft"),
vm.slt(),
vm.ifelse(
lambda vm: [
# add the final, partial chunk
local_vars.get(["bitsleft", "rand"]),
vm.push(2),
vm.exp(),
# 2**bitsleft rand
vm.swap1(),
random.getmodn(vm),
# val rand'
local_vars.get(["ret", "i"]),
setchunk(vm),
# ret' rand'
vm.push(0),
local_vars.set_val(["bitsleft", "ret", "rand"]),
],
lambda vm: [
# add a full chunk
vm.push(_CHUNK_MOD),
local_vars.get("rand"),
random.getmodn(vm),
# chunk gen'
local_vars.get(["ret", "i"]),
setchunk(vm),
# ret' gen'
local_vars.get(["i", "bitsleft"]),
vm.push(1),
vm.add(),
vm.swap1(),
vm.push(-_CHUNK_BITS),
vm.add(),
# bitsleft' i' ret' gen'
local_vars.set_val(["bitsleft", "i", "ret", "rand"]),
],
),
],
)
local_vars.get(["ret", "rand"])
local_vars.discard()
@modifies_stack(2, 2) # nbits rand -> bignum rand
def randomgen_odd(vm):
randomgen(vm)
# bignum rand
vm.push(0)
vm.dup1()
getchunk(vm)
# chunk[0] bignum rand
vm.push(1)
vm.bitwise_or()
vm.swap1()
vm.push(0)
vm.swap1()
setchunk(vm)
@modifies_stack(1, 1) # bignum -> mrctx
def _millerrabin_makectx(vm):
local_vars = Locals(vm, ["a", "n", "r", "d", "one", "mone", "looksprime"])
local_vars.new()
vm.push(-1)
vm.push(0)
vm.push(1)
fromint(vm)
# bn(1) 0 -1 bignum
local_vars.set_val(["one", "looksprime", "r", "n"])
local_vars.get(["n", "one"])
subtract(vm)
vm.dup0()
local_vars.set_val(["mone", "d"])
vm.while_loop(
lambda vm: [
vm.push(0),
local_vars.get("d"),
getchunk(vm),
# d[0]
vm.push(1),
vm.bitwise_and(),
vm.iszero(),
],
lambda vm: [
local_vars.get(["r", "d"]),
vm.push(1),
vm.add(),
# r' d
vm.swap1(),
vm.push(1),
vm.swap1(),
shiftright(vm),
# d' r'
local_vars.set_val(["d", "r"]),
],
)
# nonstandard move here--return our local_vars
vm.auxpop()
@modifies_stack(2, 2) # mrctx rand -> looksprime rand
def _millerrabin_step(vm):
local_vars = Locals(vm, ["a", "n", "r", "d", "one", "mone", "looksprime"])
vm.auxpush() # nonstandard move -- take our local_vars as arg
local_vars.get("n")
randomgen_pos_lessthan(vm)
local_vars.set_val("a")
local_vars.get(["a", "d", "n", "one", "mone"])
modpow(vm)
vm.dup0()
local_vars.set_val("a")
# a' bn(1) bn(n-1)
vm.swap1()
vm.dup1()
# a' bn(1) a' bn(n-1)
eq(vm)
vm.swap2()
eq(vm)
vm.bitwise_or()
vm.ifelse(
lambda vm: [
# looks prime
vm.push(1),
local_vars.set_val("looksprime"),
],
lambda vm: [
# still looks composite
vm.while_loop(
lambda vm: [local_vars.get("r"), vm.push(0), vm.slt()],
lambda vm: [
local_vars.get(["a", "n", "mone"]),
vm.dup0(),
modmul(vm),
# a' bn(-1)
vm.dup0(),
# a' a' bn(-1)
vm.swap2(),
# bn(-1) a' a'
eq(vm),
vm.ifelse(
lambda vm: [
# looks prime
# a'
vm.pop(),
vm.push(-1),
vm.push(1),
local_vars.set_val(["looksprime", "r"]),
],
lambda vm: [
# still looks composite
# a'
local_vars.get("r"),
vm.push(-1),
vm.add(),
# r-1 a'
local_vars.set_val(["r", "a"]),
],
),
],
)
],
)
local_vars.discard("looksprime")
@modifies_stack(3, 1) # bignum rand bitsOfConfidence -> isprime
def isprime(vm):
local_vars = Locals(vm, ["looksprime", "mrctx", "rand", "confNeeded"])
_millerrabin_makectx(vm)
vm.push(1)
local_vars.make()
vm.while_loop(
lambda vm: [vm.push(0), local_vars.get("confNeeded"), vm.sgt()],
lambda vm: [
local_vars.get(["mrctx", "rand"]),
_millerrabin_step(vm),
vm.ifelse(
lambda vm: [
# mr says looks prime
# rand
local_vars.get("confNeeded"),
vm.push(-2),
vm.add(),
local_vars.set_val(["confNeeded", "rand"]),
],
lambda vm: [
# mr says composite
# rand
vm.push(0),
vm.dup0(),
local_vars.set_val(["looksprime", "confNeeded", "rand"]),
],
),
],
)
local_vars.discard("looksprime")
@modifies_stack(0, 0)
def deliberate_error(vm):
vm.push(0)
vm.push(1)
vm.div()
```
#### File: arbitrum/std/keyvalue.py
```python
import eth_utils
from . import tup
from ..annotation import modifies_stack
from .. import value
from .struct import Struct
def make_keyvalue_type(key_type, value_type, default_val=None):
keyvalue_type = Struct("keyvalue[{}][{}]".format(key_type, value_type), [])
if default_val is None:
default_val = value.Tuple([])
class KeyValue:
@staticmethod
def make():
return value.Tuple([])
@staticmethod
@modifies_stack(0, [keyvalue_type.typ], default_val)
def new(vm):
vm.push(KeyValue.make())
vm.cast(keyvalue_type.typ)
@staticmethod
@modifies_stack([keyvalue_type.typ, key_type], [value_type], default_val)
def get(vm):
# kvs key
vm.swap1()
vm.hash()
vm.swap1()
# kvs keyhash
vm.cast(value.TupleType())
vm.while_loop(
lambda vm: [vm.dup0(), vm.tlen(), vm.push(8), vm.eq()],
lambda vm: [
vm.cast(value.TupleType(8)),
# kvs keyhash
vm.push(8),
vm.dup2(),
vm.mod(),
# keyhash%8 kvs keyhash
vm.tget(),
# subkvs keyhash
vm.swap1(),
vm.push(8),
vm.swap1(),
vm.div(),
vm.swap1(),
],
)
vm.dup0()
vm.tnewn(0)
vm.eq()
vm.ifelse(
lambda vm: [vm.pop(), vm.pop(), vm.push(default_val)],
lambda vm: [
# kvs keyhash
vm.cast(value.TupleType(3)),
vm.swap1(),
vm.dup1(),
vm.tgetn(0),
# tupkeyhash keyhash kvs
vm.eq(),
vm.ifelse(
lambda vm: [
# kvs
vm.tgetn(2)
],
lambda vm: [
# kvs
vm.pop(),
vm.push(default_val),
],
),
],
)
vm.cast(value_type)
@staticmethod
@modifies_stack(
[keyvalue_type.typ, key_type, value_type], [keyvalue_type.typ], default_val
)
def set_val(vm):
# kvs key value
vm.cast(value.TupleType())
vm.dup2()
vm.dup2()
# key value kvs key value
vm.dup0()
vm.hash()
# hash(key) key value kvs key value
tup.make(3)(vm)
# [hash(key) key value] kvs key value
vm.swap1()
KeyValue._set_impl(vm)
vm.cast(keyvalue_type.typ)
# newkvs key value
vm.swap2()
vm.pop()
vm.pop()
# newkvs
@staticmethod
def get_static(kvs, key):
return KeyValue._get_static_impl(
kvs, eth_utils.big_endian_to_int(value.value_hash(key))
)
@staticmethod
def set_static(kvs, key, val):
update = value.Tuple(
[eth_utils.big_endian_to_int(value.value_hash(key)), key, val]
)
return KeyValue._set_impl_static(kvs, update)
@staticmethod
def _get_static_impl(kvs, key):
while len(kvs) == 8:
kvs = kvs[key % 8]
key //= 8
if not kvs:
return default_val
if kvs[0] == key:
return kvs[2]
return default_val
@staticmethod
@modifies_stack(
[
value.TupleType(),
value.TupleType(
[value.IntType(), value.ValueType(), value.ValueType()]
),
],
[value.TupleType()],
default_val,
)
def _set_impl(vm):
# kvs [hash(key) key value]
vm.dup0()
vm.tlen()
# len(kvs) kvs [...]
vm.dup0()
vm.ifelse(
lambda vm: [
# len(kvs) kvs [...]
vm.push(3),
vm.eq(),
# len(kvs)==3 kvs [...]
vm.ifelse(
lambda vm: [
vm.cast(
value.TupleType(
[
value.IntType(),
value.ValueType(),
value.ValueType(),
]
)
),
# [oldhash oldkey oldval] [newhash newkey newval]
vm.dup1(),
vm.tgetn(1),
# newkey [oldhash oldkey oldval] [newhash newkey newval]
vm.dup1(),
vm.tgetn(1),
vm.eq(),
# oldkey==newkey [old...] [new...]
vm.ifelse(
lambda vm: [
vm.pop(),
# [newhash newkey newval]
],
lambda vm: [
vm.tnewn(8),
# empty8tuple [old...] [new...]
KeyValue._set_impl(vm),
KeyValue._set_impl(vm),
],
),
],
lambda vm: [
# kvs is full 8-tuple
# kvstuple [newhash newkey newval]
vm.cast(value.TupleType(8)),
vm.swap1(),
vm.dup0(),
vm.tgetn(0),
# newhash [newhash newkey newval] kvstuple
vm.push(8),
vm.dup1(),
vm.div(),
# newhash/8 newhash [new...] kvstuple
vm.swap1(),
vm.push(8),
vm.swap1(),
vm.mod(),
# newhash%8 newhash/8 [new...] kvstuple
vm.swap2(),
# [new...] newhash/8 newhash%8 kvstuple
vm.tsetn(0),
# subtriple newhash%8 kvstuple
vm.dup2(),
vm.dup2(),
# newhash%8 kvstuple subtriple newhash%8 kvstuple
vm.tget(),
vm.cast(
value.TupleType(
[
value.IntType(),
value.ValueType(),
value.ValueType(),
]
)
),
# subkvs subtriple newhash%8 kvstuple
KeyValue._set_impl(vm),
# newsubkvs newhash%8 kvstuple
vm.swap2(),
vm.swap1(),
# newhash%8 kvstuple newsubkvs
vm.tset(),
],
),
],
lambda vm: [
# len(kvs) None [hash(key) key value]
vm.pop(),
vm.pop(),
],
)
@staticmethod
def _set_impl_static(kvs, update):
if kvs == value.Tuple([]):
return update
if len(kvs) == 3:
if kvs[1] == update[1]:
return update
new_kvs = value.Tuple([value.Tuple([])] * 8)
new_kvs = KeyValue._set_impl_static(new_kvs, kvs)
return KeyValue._set_impl_static(new_kvs, update)
return kvs.set_tup_val(
update[0] % 8,
KeyValue._set_impl_static(
kvs[update[0] % 8], update.set_tup_val(0, update[0] // 8)
),
)
KeyValue.typ = keyvalue_type.typ
return KeyValue
keyvalue = make_keyvalue_type(value.ValueType(), value.ValueType())
keyvalue_int_int = make_keyvalue_type(value.IntType(), value.IntType(), 0)
```
#### File: arbitrum/std/queue.py
```python
from ..annotation import modifies_stack
from . import make_boundedq_type
from .. import value
from .struct import Struct
# a queue is just a boundedq; when the boundedq gets full
# we generate a new, bigger one
def make_queue_type(typ):
boundedq = make_boundedq_type(typ)
queue_type = Struct("queue[{}]".format(typ), [("boundedq", boundedq.typ)])
# stack_type.fields[]
class Queue:
@staticmethod
def make():
return boundedq.make(8)
@staticmethod
@modifies_stack([], [queue_type.typ])
def new(vm):
vm.push(Queue.make())
vm.cast(queue_type.typ)
@staticmethod
@modifies_stack([queue_type.typ], [value.IntType()])
def isempty(vm):
# q -> isempty
queue_type.get("boundedq")(vm)
boundedq.isempty(vm)
@staticmethod
@modifies_stack([queue_type.typ, typ], [queue_type.typ])
def put(vm):
# q item -> updatedq
queue_type.get("boundedq")(vm)
vm.dup0()
boundedq.isfull(vm)
# bqisfull bq item
vm.ifelse(
lambda vm: [
# bq item
vm.dup0(),
boundedq.struct.get("capacity")(vm),
vm.push(2),
vm.mul(),
boundedq.new(vm),
vm.swap1(),
# oldbq newbq item
vm.while_loop(
lambda vm: [
vm.dup0(),
boundedq.isempty(vm),
vm.push(0),
vm.eq()
# (oldbq is nonempty) oldbq newbq item
],
lambda vm: [
# oldbq newbq item
boundedq.get(vm),
# moveitem oldbq newbq item
vm.swap1(),
vm.swap2(),
# newbq moveitem oldbq item
boundedq.put(vm),
# newbq oldbq item
vm.swap1(),
],
),
# oldbq newbq item
vm.pop(),
]
)
# bq item
boundedq.put(vm)
queue_type.set_val("boundedq")(vm)
@staticmethod
@modifies_stack([queue_type.typ], [typ, queue_type.typ])
def get(vm):
# assume queue is non-empty
# q -> item q
queue_type.get("boundedq")(vm)
boundedq.get(vm)
vm.swap1()
queue_type.set_val("boundedq")(vm)
vm.swap1()
Queue.typ = queue_type.typ
return Queue
queue = make_queue_type(value.ValueType())
queue_tup = make_queue_type(value.TupleType())
```
#### File: arbitrum/std/random.py
```python
from . import tup
from ..annotation import modifies_stack
from .locals import Locals
@modifies_stack(1, 1) # seed -> generator
def new(vm):
vm.hash()
@modifies_stack(1, 2) # gen -> value gen
def getint(vm):
vm.push(1)
vm.dup1()
tup.make(2)(vm)
vm.hash()
# newgen oldgen
vm.swap1()
vm.push(0)
vm.swap1()
tup.make(2)(vm)
vm.hash()
@modifies_stack(2, 2) # gen n -> value gen
def getmodn(vm): # get a random int, 0<=result<n
local_vars = Locals(vm, ["cutoff", "dummy"])
# gen n
vm.dup1()
vm.push((1 << 256) - 1)
vm.div()
# ff//n gen n
vm.dup2()
vm.mul()
vm.dup0()
# cutoff dummy gen n
local_vars.make()
# gen n
getint(vm)
# val gen n
vm.while_loop(
lambda vm: [
vm.dup0(),
local_vars.get("cutoff"),
# cutoff val val gen n
vm.gt(),
vm.iszero(),
],
lambda vm: [
# val gen n
vm.pop(),
getint(vm),
],
)
# can now discard cutoff from auxstack
local_vars.discard()
# val gen n
vm.swap1()
vm.swap2()
# n val gen
vm.swap1()
vm.mod()
# value gen
```
#### File: arbitrum/std/sized_byterange.py
```python
import eth_utils
from . import byterange, sized_common, bytestack, tup, stack_int, bigtuple_int
from ..annotation import modifies_stack
from ..vm import VM
from .. import value
from .struct import Struct
sized_byterange = Struct(
"sized_byterange", [("data", byterange.typ), ("size", value.IntType())]
)
typ = sized_byterange.typ
def make():
return sized_common.make(byterange.make)
# [] -> [tuple]
@modifies_stack(0, [typ])
def new(vm):
sized_common.new(vm, byterange.new)
vm.cast(typ)
@modifies_stack([typ], [value.IntType()])
def length(vm):
sized_byterange.get("size")(vm)
# [tuple, index, value] -> [tuple]
@modifies_stack([typ, value.IntType(), value.IntType()], [typ])
def set_val(vm):
sized_common.set_val(vm, sized_byterange, byterange.set_val, 32)
@modifies_stack([typ, value.IntType(), value.IntType()], [typ])
def set_val8(vm):
sized_common.set_val(vm, sized_byterange, byterange.set_val8, 1)
# [tuple, index] -> [value]
@modifies_stack([typ, value.IntType()], [value.IntType()])
def get(vm):
sized_common.get(vm, sized_byterange, byterange.get)
# [bytestack] -> [sized_byterange]
@modifies_stack([bytestack.typ], [typ])
def from_bytestack(vm):
vm.dup0()
bytestack.get("size")(vm)
vm.swap1()
bytestack.get("stack")(vm)
vm.dup1()
vm.push(31)
vm.add()
vm.push(32)
vm.swap1()
vm.div()
vm.push(1)
vm.swap1()
vm.sub()
# index stack size
bigtuple_int.new(vm)
# bigtuple index stack size
vm.swap1()
vm.swap2()
# stack bigtuple index size
vm.while_loop(
lambda vm: [vm.dup0(), stack_int.isempty(vm), vm.iszero()],
lambda vm: [
stack_int.pop(vm),
vm.swap1(),
vm.auxpush(),
# next_val bigtuple index
vm.swap1(),
vm.dup2(),
vm.swap1(),
# bigtuple index next_val index
bigtuple_int.set_val(vm),
# bigtuple index
vm.swap1(),
vm.push(1),
vm.swap1(),
vm.sub(),
vm.swap1(),
vm.auxpop()
# stack bigtuple index size
],
)
vm.pop()
vm.swap1()
vm.pop()
vm.cast(byterange.typ)
new(vm)
sized_byterange.set_val("data")(vm)
sized_byterange.set_val("size")(vm)
# [sized_byterange] -> [bytestack]
@modifies_stack([typ], [bytestack.typ])
def to_bytestack(vm):
vm.dup0()
sized_byterange.get("size")(vm)
vm.swap1()
vm.push(0)
vm.swap1()
sized_byterange.get("data")(vm)
stack_int.new(vm)
tup.make(4)(vm)
# [stack data index size]
vm.while_loop(
lambda vm: [vm.dup0(), vm.tgetn(3), vm.dup1(), vm.tgetn(2), vm.lt()],
lambda vm: [
# [stack data index size]
vm.dup0(),
vm.tgetn(2),
vm.dup1(),
vm.tgetn(1),
byterange.get(vm),
# cell [stack data index size]
vm.dup1(),
vm.tgetn(0),
stack_int.push(vm),
vm.swap1(),
vm.tsetn(0),
vm.dup0(),
vm.tgetn(2),
vm.push(32),
vm.add(),
vm.swap1(),
vm.tsetn(2),
],
)
vm.dup0()
vm.tgetn(3)
vm.swap1()
vm.tgetn(0)
vm.tnewn(2)
vm.cast(bytestack.typ)
bytestack.set_val("stack")(vm)
bytestack.set_val("size")(vm)
def get_static(val, index):
return sized_common.get_static(val, index, byterange.get_static)
def set_static(byterange_val, index, val):
return sized_common.set_static(byterange_val, index, val, byterange.set_static, 32)
def frombytes(data):
return value.Tuple([byterange.frombytes(data), len(data)])
def tohex(byterange_val):
tot = ""
for i in range(0, byterange_val[1], 32):
segment = eth_utils.to_hex(byterange.get_static(byterange_val[0], i))[2:]
segment = (64 - len(segment)) * "0" + segment
tot += segment
return "0x" + tot[: byterange_val[1] * 2]
def create_sized_bytearray(data):
vm = VM()
new(vm)
for item in data:
vm.push(item[0])
vm.push(item[1])
vm.swap2()
set(vm)
return vm.stack[0]
```
#### File: arbitrum/tests/test_bitwise.py
```python
from unittest import TestCase
import random
from arbitrum.std import bitwise
from arbitrum import VM
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
TT255 = 2 ** 255
def to_signed(i):
return i if i < TT255 else i - TT256
class TestArray(TestCase):
def test_flip_endianness(self):
hexstr = bytearray.fromhex(
"ada5013122d395ba3c54772283fb069b10426056ef8ca54750cb9bb552a59e7d"
)
littleInt = int.from_bytes(hexstr, byteorder="little")
bigInt = int.from_bytes(hexstr, byteorder="big")
vm = VM()
vm.push(littleInt)
bitwise.flip_endianness(vm)
self.assertEqual(vm.stack[0], bigInt)
def test_set_byte(self):
origstring = bytearray.fromhex(
"ada5013122d395ba3c54772283fb069b10426056ef8ca54750cb9bb552a59e7d"
)
bigInt = int.from_bytes(origstring, byteorder="big")
for i in range(32):
new_val = random.getrandbits(8)
vm = VM()
vm.push(new_val)
vm.push(i)
vm.push(bigInt)
bitwise.set_byte(vm)
finalstring = bytearray(origstring)
finalstring[i] = new_val
self.assertEqual(vm.stack[0], int.from_bytes(finalstring, byteorder="big"))
def test_arithmetic_shift_right(self):
cases = [(TT256 - 100, 2), (100, 2)]
for case in cases:
vm = VM()
vm.push(case[1])
vm.push(case[0])
bitwise.arithmetic_shift_right(vm)
self.assertEqual(to_signed(case[0]) >> case[1], to_signed(vm.stack[0]))
def test_right_shift(self):
cases = [(TT256 - 100, 2), (100, 2)]
for case in cases:
vm = VM()
vm.push(case[1])
vm.push(case[0])
bitwise.shift_right(vm)
self.assertEqual(case[0] >> case[1], vm.stack[0])
def test_left_shift(self):
cases = [(TT256 - 100, 2), (100, 2)]
for case in cases:
vm = VM()
vm.push(case[1])
vm.push(case[0])
bitwise.shift_left(vm)
self.assertEqual((case[0] << case[1]) & TT256M1, vm.stack[0])
vm2 = VM()
vm2.push(case[1])
vm2.push(case[0])
bitwise.shift_left(vm2)
self.assertEqual(
to_signed((case[0] << case[1]) & TT256M1), to_signed(vm2.stack[0])
)
```
#### File: arbitrum/tests/test_tuple.py
```python
from unittest import TestCase
from arbitrum.std import tup
from arbitrum import VM
class TestTuple(TestCase):
def test_pack(self):
for i in range(1, 100):
with self.subTest():
data = list(range(100))
vm = VM()
vm.stack.items = list(data)
tup.pack(i)(vm)
self.assertEqual(vm.stack[1:], data[i:])
def test_pack_unpack(self):
for i in range(1, 100):
with self.subTest():
data = list(range(100))
vm = VM()
vm.stack.items = list(data)
tup.pack(i)(vm)
tup.unpack(i)(vm)
self.assertEqual(vm.stack[:], data[:])
```
#### File: arbitrum/scripts/initialize_validators.py
```python
import argparse
import os
import sys
import shutil
import setup_states
# package configuration
NAME = "initialize_rollup_validators_rinkeby"
DESCRIPTION = "Manage Arbitrum dockerized deployments"
# filename constants
VALIDATOR_STATE_DIRNAME = "validator-states/validator"
### ----------------------------------------------------------------------------
### Deploy
### ----------------------------------------------------------------------------
# Compile contracts to `contract.ao` and export to Docker and run validators
def deploy(args, sudo_flag=False):
if os.path.isdir(setup_states.VALIDATOR_STATES):
shutil.rmtree(setup_states.VALIDATOR_STATES)
setup_states.setup_validator_states_folder(args.contract, args.n_validators)
config = {
"rollup_address": args.rollup_address.strip(),
"eth_url": args.eth_url,
"blocktime": 13,
}
setup_states.setup_validator_configs(config, args.n_validators)
def check_file(name):
if not os.path.isfile(name):
raise argparse.ArgumentTypeError("%s is not a valid file" % name)
return name
def main():
parser = argparse.ArgumentParser(prog=NAME, description=DESCRIPTION)
parser.add_argument(
"contract", type=check_file, help="The Arbitrum bytecode contract to deploy"
)
parser.add_argument(
"n_validators",
choices=range(1, 101),
metavar="[1-100]",
type=int,
help="The number of validators to deploy",
)
parser.add_argument(
"rollup_address", help="The address of a deployed arbitrum rollup contract"
)
parser.add_argument("eth_url", help="RPC or Websocket url for Ethereum node")
args = parser.parse_args()
deploy(args)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(1)
```
#### File: arbitrum/scripts/setup_local_rollup.py
```python
import argparse
import os
import sys
import json
import shutil
import setup_states
import build_validator_docker
from support.run import run
# package configuration
NAME = "arb-deploy"
DESCRIPTION = "Manage Arbitrum dockerized deployments"
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# filename constants
DOCKER_COMPOSE_FILENAME = "docker-compose.yml"
VALIDATOR_STATE_DIRNAME = "validator-states/validator"
### ----------------------------------------------------------------------------
### Deploy
### ----------------------------------------------------------------------------
# Compile contracts to `contract.ao` and export to Docker and run validators
def deploy(args, sudo_flag=False):
if not args.nobuild:
if build_validator_docker.build_validator(sudo_flag) != 0:
exit(1)
if os.path.isdir(setup_states.VALIDATOR_STATES):
shutil.rmtree(setup_states.VALIDATOR_STATES)
if args.is_parity:
image_name = "arb-bridge-eth"
ws_port = 7546
elif args.is_ganache:
image_name = "arb-bridge-eth-ganache"
ws_port = 7545
elif args.is_geth:
image_name = "arb-bridge-eth-geth"
ws_port = 7546
else:
raise Exception("Must select either parity or ganache")
setup_states.setup_validator_states_docker(
args.contract, args.n_validators, image_name, args.is_geth, sudo_flag
)
ethaddrs = "bridge_eth_addresses.json"
layer = run(
"docker create %s" % image_name, capture_stdout=True, quiet=True, sudo=sudo_flag
).strip()
if layer == "":
print("Docker image %s does not exist" % image_name)
return
run(
"docker cp %s:/home/user/bridge_eth_addresses.json %s" % (layer, ethaddrs),
sudo=sudo_flag,
)
run("docker rm %s" % layer, quiet=True, sudo=sudo_flag)
with open("bridge_eth_addresses.json") as json_file:
data = json.load(json_file)
factory_address = data["ArbFactory"]
os.remove(ethaddrs)
rollup_creation_cmd = (
"docker run -it --network=arb-network -v %s:/home/user/state arb-validator create --password <PASSWORD> state ws://%s:%s %s"
% (
os.path.abspath("validator-states/validator0"),
image_name,
ws_port,
factory_address,
)
)
rollup_address = run(rollup_creation_cmd, capture_stdout=True, quiet=False)
print("rollup_address", rollup_address)
config = {
"rollup_address": rollup_address.strip(),
"eth_url": "ws://localhost:" + str(ws_port),
"password": "<PASSWORD>",
"blocktime": 2,
}
setup_states.setup_validator_configs(config, args.n_validators)
def check_file(name):
if not os.path.isfile(name):
raise argparse.ArgumentTypeError("%s is not a valid file" % name)
return name
def check_json(name):
if not os.path.isfile(name):
raise argparse.ArgumentTypeError("%s is not a valid file" % name)
try:
with open(name, "r") as f:
json.loads(f.read())
except ValueError:
raise argparse.ArgumentTypeError("%s is not valid json" % name)
return name
def main():
parser = argparse.ArgumentParser(prog=NAME, description=DESCRIPTION)
parser.add_argument(
"contract", type=check_file, help="The Arbitrum bytecode contract to deploy"
)
parser.add_argument(
"n_validators",
choices=range(1, 101),
metavar="[1-100]",
type=int,
help="The number of validators to deploy",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--ganache",
action="store_true",
dest="is_ganache",
help="Generate states based on arb-bridge-eth docker images",
)
group.add_argument(
"--geth",
action="store_true",
dest="is_geth",
help="Generate states based on arb-bridge-eth docker images",
)
group.add_argument(
"--parity",
action="store_true",
dest="is_parity",
help="Generate states based on arb-bridge-eth docker images",
)
parser.add_argument(
"--no-build",
action="store_true",
dest="nobuild",
help="Don't rebuild the validator docker image",
)
args = parser.parse_args()
if not args.is_parity and not args.is_ganache and not args.is_geth:
args.is_geth = True
deploy(args)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(1)
```
|
{
"source": "JesseAldridge/clipmon",
"score": 3
}
|
#### File: JesseAldridge/clipmon/clipmon.py
```python
import time, sys, os, subprocess, re, traceback
from datetime import datetime
import pyperclip
import conf
def clip_str_to_path_line(clip_str, path_exists=os.path.exists):
if clip_str.count('\n') > 1:
return
for f in test_replacements, test_partial_path:
result_line = f(clip_str, path_exists)
if result_line:
return result_line
def test_replacements(clip_str, path_exists):
replaced_str = clip_str
for find_regex, replace_str in conf.find_replace_map:
replaced_str = re.sub(find_regex, replace_str, replaced_str)
match = re.search(
# file extension
# path |
# | |
r'((?:~|/)[^@^:^\\^\(]+\.[a-z]{2,3}).*(?:line.*?|\()([0-9]+)', replaced_str)
if match and path_exists(os.path.expanduser(match.group(1))):
return ':'.join([match.group(1), match.group(2)])
match = re.search(
# file extension
# path |
# | |
r'((?:~|/)[^@^:^\\^\(]+\.[a-z]{2,3}):([0-9]+)', replaced_str)
if match and path_exists(os.path.expanduser(match.group(1))):
return ':'.join([match.group(1), match.group(2)])
def test_partial_path(clip_str, path_exists):
match = re.search(
r'([a-zA-Z_/\-\.0-9]+/[a-zA-Z_0-9\-]+\.[a-z]{2,3}).*?(line.*?|:)([0-9]+)', clip_str)
if match:
partial_path = match.group(1)
if partial_path.startswith('./'):
partial_path = partial_path.replace('./', '')
for proj_dir in conf.curr_proj_dirs:
full_path = os.path.join(proj_dir, partial_path)
if path_exists(os.path.expanduser(full_path)):
return ':'.join([full_path, match.group(3)])
if __name__ == '__main__':
try:
clip_str = None
is_first_run = True
while True:
prev_value = clip_str
try:
if not is_first_run:
time.sleep(1)
is_first_run = False
clip_str = pyperclip.paste()
# (the value that was initially on clipboard before running script)
if prev_value is None:
prev_value = clip_str
except UnicodeDecodeError:
pass
else:
if clip_str == prev_value:
continue
print('new value:', clip_str)
path_line = clip_str_to_path_line(clip_str)
if path_line:
subprocess.Popen([conf.editor_cmd, path_line])
except Exception as e:
import Tkinter
import tkMessageBox
window = Tkinter.Tk()
window.wm_withdraw()
exception_str = traceback.format_exc()
print('exception_str:', exception_str)
tkMessageBox.showinfo(title="Error", message="{}\n{}".format(
str(e), exception_str))
sys.stderr.write(str(datetime.now()) + '\n')
raise
```
#### File: JesseAldridge/clipmon/test.py
```python
import os, shutil
import clipmon
import conf
1/0
test_paths = {os.path.expanduser(path) for path in (
'~/Dropbox/CardBrew/03_move/tests/move_test.js',
'~/Dropbox/CardBrew/01_chars/chars.js',
'~/Dropbox/CardBrew/src/02_commands_server/command_listener.js',
'~/Dropbox/CardBrew/src/01_server/tests/test_server.js',
'~/Dropbox/CardBrew/src/00_game/00_game.js',
'~/gigwalk/apps/gigwalk_apps_platform_api/tests/api_app/templates/test_controller.py',
(
'~/gigwalk/apps/gigwalk_apps_platform_api/gigwalk_api_app/organization_location_lists/'
'resources.py'
),
'~/gigwalk/apps/gigwalk_apps_platform_api/back/lib/python2.7/site-packages/_pytest/config.py',
'~/gigwalk/apps/gigwalk_apps_platform_api/gigwalk_api_app/lib/gigwalk_api.py',
'~/gigwalk/apps/gigwalk_apps_platform_api/back/lib/python2.7/site-packages/_pytest/core.py',
'~/gigwalk/apps/api-tests/test/index.js',
'~/airlab/repos/rookery/app/mailers/homes_collections_mailer.rb',
'/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py',
)}
def path_exists(path):
return path in test_paths
with open('test_cases.txt') as f:
lines = f.read().splitlines()
for i in range(0, len(lines), 3):
test_line, expected = lines[i:i+2]
if expected == 'None':
expected = None
actual = clipmon.clip_str_to_path_line(test_line, path_exists)
print 'line: ', test_line
print 'expected:', expected
print 'actual: ', actual
print
assert actual == expected
print 'ohhh yeahhh'
```
|
{
"source": "JesseAldridge/modulecounter",
"score": 3
}
|
#### File: JesseAldridge/modulecounter/pull.py
```python
import re, json, time, traceback, sys, os
from datetime import datetime
import requests
import config
def pull_all(testing):
name_to_count = {}
tracker_dicts = config.config_dict['trackers']
if testing:
tracker_dicts = tracker_dicts[:3]
for d in tracker_dicts:
try:
pull_count(name_to_count, **d)
except Exception as e:
print (u'exception: {}; {}'.format(type(e).__name__, e.message)).encode('utf8')
traceback.print_exc()
continue
out_dict = {'date': datetime.utcnow(), 'name_to_count': name_to_count}
def dt_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
out_str = json.dumps(out_dict, default=dt_handler, indent=2) + ',\n'
with open(os.path.expanduser(config.config_dict['out_path']), 'a') as f:
f.write(out_str)
def pull_count(name_to_count, name=None, url=None, regex=None, key=None, is_full_list=None):
print 'getting url:', url
resp = requests.get(url, timeout=10)
if is_full_list:
count = len(json.loads(resp.content))
elif regex:
count = re.search(regex, resp.content).group(1)
else:
count = json.loads(resp.content).get(key)
if isinstance(count, basestring):
count = count.replace(',', '')
count = re.sub("&#?\w+;", '', count)
name_to_count[name] = int(count)
print 'count:', count
if __name__ == '__main__':
while True:
testing = (sys.platform == "darwin")
pull_all(testing)
time.sleep(1 if testing else 60 * 60 * 24)
```
|
{
"source": "JesseAldridge/sane_logger",
"score": 3
}
|
#### File: sane_logger/sane_logger/sane_logger.py
```python
import logging, sys
def sane_logger(log_level=logging.INFO):
logger = logging.getLogger()
logger.setLevel(log_level)
sh = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S %Z'
)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
if __name__ == '__main__':
logger = sane_logger(logging.DEBUG)
logger.info('test log')
```
|
{
"source": "JesseAllardice/Action-Recognition",
"score": 3
}
|
#### File: Action-Recognition/predictors/actionpredictor.py
```python
import tensorflow as tf
import numpy as np
import time
import pickle
import marshal
import types
from collections import deque
from sklearn.linear_model import LogisticRegression
# unique modules
from predictors.predictor import Predictor
class ActionPredictor(Predictor):
"""
Instantisation methods
"""
def __init__(self, N_samples, recording_fps):
# number of samples to expect
self.n_samples = int(N_samples)
# recording/sampling rate
self.recording_fps = int(recording_fps)
# pose specification
self.num_keypoints = None
self.dimension = None
# inputs
self.input_data = None
self.kinetic = None
# preprocessing function
self.preprocess = None
# action prediction model
self.model = None
# classes list
self.classes_list = None
# prep and model file
#self.prep_and_model_file = "models\\actionnet_flat_linear_preprocess_and_model_10_fps.pickle"
self.prep_and_model_file = "models\\actionnet_CNN_masked_preprocess_and_model_10_fps.pickle"
# load the prep and model
self.load_preprocessing_and_model(self.prep_and_model_file)
# outputs
self.action = None
"""
Inheriteted abstract methods
"""
def predict(self, data: deque) -> np.ndarray:
# set the num_keypoints and dimension
self.set_pose_specification(data[0])
# convert to a np.ndarray
self.kinetic = np.array([data])
# preprocess the data to the required form for the model
X = self.preprocess(self.kinetic, image_size=np.array([480, 640])) # TODO: get image_size from person object
self.input_data = X
# predict using the model
if self.classes_list is None:
y_hat = self.model.predict(X)
self.action = y_hat[0]
return np.array([y_hat[0]])
else:
y_hat = np.argmax(self.model.predict(X))
self.action = self.classes_list[y_hat]
return np.array([self.action])
# def transform(self): pass
# def fit(self): pass
# def fit_transform(self): pass
"""
Methods
"""
def load_preprocessing_and_model(self, model_path):
with open(model_path, "rb") as f:
data = pickle.load(f)
function_code = marshal.loads(data["preprocess_function"])
self.preprocess = types.FunctionType(function_code, globals(), 'preprocess')
if "model" in data.keys():
self.model = data["model"]
elif "model_path" in data.keys():
self.model = tf.keras.models.load_model(data["model_path"])
self.classes_list = data["classes_list"]
else:
raise Exception("No model specified for ActionPredictor.")
def set_pose_specification(self, pose_example: np.ndarray):
self.num_keypoints, self.dimension = pose_example.shape
"""
Set Methods
"""
def set_recording_fps(self, recording_fps):
self.recording_fps = int(recording_fps)
def set_N_samples(self, N_samples):
self.n_samples = int(N_samples)
"""
Get Methods
"""
def get_action(self):
return self.action
def get_kinetic(self):
return self.kinetic
def get_input(self):
return self.input_data
"""
Static Methods
"""
def main():
pass
if __name__ == "__main__":
main()
```
|
{
"source": "jessearodriguez/LJ-Audio-dataset-generator",
"score": 3
}
|
#### File: jessearodriguez/LJ-Audio-dataset-generator/dataset generator.py
```python
from youtube_transcript_api import YouTubeTranscriptApi
import pydub
import youtube_dl
import os
import subprocess
import num2words
import random
def textnum2str(text): # converts a sentence containing numbers to a sentence with those numbers converted to strings
strarr = text.split()
for i in range(len(strarr)):
if is_number(strarr[i]):
strarr[i] = num2words.num2words(strarr[i])
formatted = ""
for str in strarr:
formatted = formatted + str + " "
return formatted
def is_number(s): # determines if input is a number or not
try:
float(s)
return True
except ValueError:
return False
def randomConcat(transcript): # randomly concatenates the transcript and returns it in a similar format
newtranscript = []
newitem = {}
i = 0
while i < len(transcript):
concated = False
if len(transcript[i]['text']) > 15: #check if the sentence meets minimum length requirements, useful to filter out "[music]" type subtittles
if i == len(transcript) - 1: #is the last item in the list
newitem = transcript[i]
i += 1
else: # there's probably a cleaner way to do this
try: # to catch the last few index out of bounds errors from trying to access i+x approaching towards the end of the list
if random.random() < .8: #1 random chance concatination
if random.random() < 0.75:#2 random chance concationation
if random.random() < 0.7:#3 random concatination
j = 0
newtext = ""
start =transcript[i]['start']
duration = transcript[i]['duration']
while j < 3:
if transcript[i]['text'] != '[Music]':
if len(transcript[i]['text']) > 15:
newtext += transcript[i]['text'] + " "
j += 1
i += 1
else:
i += 1
continue
newitem = {
"text": newtext,
"start": start,
"duration": duration,
}
concated = True
if not concated:
j = 0
newtext = ""
start = transcript[i]['start']
duration = transcript[i]['duration']
while j < 2:
if transcript[i]['text'] != "[Music]":
if len(transcript[i]['text']) > 15:
newtext += transcript[i]['text'] + " "
j += 1
i += 1
else:
i += 1
continue
newitem = {
"text": newtext,
"start": start,
"duration": duration,
}
concated = True
if not concated:
j = 0
newtext = ""
start = transcript[i]['start']
duration = transcript[i]['duration']
while j < 1:
if transcript[i]['text'] != "[Music]":
if len(transcript[i]['text']) > 15:
newtext += transcript[i]['text'] + " "
j += 1
i += 1
else:
i += 1
continue
newitem = {
"text": newtext,
"start": start,
"duration": duration,
}
concated = True
else:
newitem = transcript[i]
i += 1
except IndexError:
try:
newitem = transcript[i]
i += 1
except:
pass
newtranscript.append(newitem)
else:
i += 1
return newtranscript
url = "https://www.youtube.com/watch?v="
ids = []
text = ""
while text != "done": # loop to get all video ids
text = input("enter video id; type \"done\" to move on")
if text != "done":
ids.append(text)
print("%d videos loaded" % len(ids))
for id in ids:
print(id + "\n")
f = open("dataset/" + "metadata" + ".csv", "w", encoding="utf-8") #create the metadata cvs file for the dataset
videonum = 1
textid = 1
for id in ids:
transcript = YouTubeTranscriptApi.get_transcript(id) #get the video transcript
transcript = randomConcat(transcript)
ydl_opts = {'noplaylist' : True,
'format' : 'bestaudio/best',
'outtmpl': 'tempaudio/%(id)s.%(ext)s'}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
#info = ydl.extract_info(url+id, download=False)
#ydl.list_formats(info)
ydl.download([url + id]) #downloads video audio as a webm
filename = "tempaudio/" + os.listdir("tempaudio/")[0]
#filename = "tempaudio/" + id + ".*"
newfile = "tempaudio/" + id + ".wav"
subprocess.run( #converts the webm to wav using ffmpeg
(['ffmpeg', '-y', '-i', filename, newfile]))
os.remove(filename)
audio = pydub.AudioSegment.from_wav(newfile)
audio = audio.set_frame_rate(22050) #sample rate used in the lj dataset
#audio = audio.set_channels(1) #stereo to mono conversion
#uncomment the above line if you are not planning on doing further audio proccessing
lastitem = False
for i in range(len(transcript)): #generates audio splices based off of transcript start times
if i > len(transcript)-3: #stops the inclusion of the last 3 lines (these tend to be broken from youtube transcripts)
continue
elif '[Music]' in transcript[i]['text']: #prevents long clips of speech containing long music pauses
continue
item = transcript[i]
futureitem = item
if i != len(transcript)-1:
futureitem = transcript[i+1]
else:
lastitem = True
id_tag = "video-" + str(videonum) + "-" + str(textid)
if len(item['text']) > 20:
start = int(item['start']*1000+ 150) #converting to milliseconds
#the addition of 150 ms and 300 ms helps out the time youtube's transcripts to a more correct value
if lastitem:
end = start + int(futureitem['duration'] * 1000 )
else:
end = int(futureitem['start'] * 1000 + 300)
audioselection = audio[start:end]
if audioselection.duration_seconds > 30: #stops audio files larger than 30 seconds from appearing. these are mostly created when there is a long pause between the current and next transcription item.
continue
audioselection.export("dataset/wavs/"+ id_tag+".wav", format="wav")
formattedtext = textnum2str(item['text'])
linewrite = id_tag + "|" + item['text'] + "|" + formattedtext + "\n"
f.write(linewrite)
textid += 1
os.remove(newfile)
videonum += 1
textid = 1
f.close()
```
|
{
"source": "Jesse-Back/ImageModels",
"score": 3
}
|
#### File: Jesse-Back/ImageModels/Preprocess.py
```python
import os
import numpy as np
from keras.utils import np_utils
from keras.preprocessing.image import load_img
def load_cat_dog(HEIGHT, WIDTH, train_frac, test_frac):
img_dir = './Datasets/cat_dog/train/'
files = os.listdir(img_dir)
N_CATEGORY = 2
N_ALL = len(files)
N_TRAIN = int(train_frac * N_ALL)
x_train = np.empty((N_TRAIN, HEIGHT, WIDTH, 3), np.uint8)
y_train = np.empty((N_TRAIN,), np.int8)
for i in range(N_TRAIN):
filename = files[i]
img = load_img(img_dir + filename)
img = img.resize((HEIGHT,WIDTH))
x_train[i,] = img
entry = filename.split('.')
y_train[i] = 1 if entry[0] == 'dog' else 0
N_TEST = int(test_frac * N_ALL)
x_test = np.empty((N_TEST, HEIGHT, WIDTH, 3), np.uint8)
y_test = np.empty((N_TEST,), np.int8)
for i in range(N_TEST):
filename = files[i + N_TRAIN]
img = load_img(img_dir + filename)
img = img.resize((HEIGHT,WIDTH))
x_test[i,] = img
entry = filename.split('.')
y_test[i] = 1 if entry[0] == 'dog' else 0
return x_train, y_train, x_test, y_test, N_CATEGORY
def load_cifar101(HEIGHT, WIDTH, train_frac, test_frac):
img_dir = './Datasets/101_ObjectCategories/'
categories = os.listdir(img_dir)
N_CATEGORY = len(categories)
cat_to_ind = dict()
for ind, cat in enumerate(categories):
cat_to_ind[cat] = ind
all_img, all_label = [], []
for category in categories:
files = [ f for f in os.listdir(img_dir + category)]
for file in files:
filename = img_dir + category + '/' + file
img = load_img(filename)
img = img.resize((HEIGHT,WIDTH))
all_img.append(img)
all_label.append(cat_to_ind[category])
N_ALL = len(all_img)
N_CATEGORY = len(np.unique(all_label))
x_all = np.empty((N_ALL, HEIGHT, WIDTH, 3), np.uint8)
y_all = np_utils.to_categorical(all_label, N_CATEGORY)
for i in range(N_ALL):
x_all[i,:,:,:] = all_img[i]
# shuffle data
ind_list = [i for i in range(N_ALL)]
np.random.seed(1234)
np.random.shuffle(ind_list)
x_all = x_all[ind_list, :,:,:]
y_all = y_all[ind_list,]
N_TRAIN = int(train_frac * N_ALL)
N_TEST = int(test_frac * N_ALL)
x_train = x_all[:N_TRAIN, :,:,:]
y_train = y_all[:N_TRAIN,]
x_test = x_all[N_TRAIN:N_TRAIN + N_TEST, :,:,:]
y_test = y_all[N_TRAIN:N_TRAIN + N_TEST, ]
return x_train, y_train, x_test, y_test, N_CATEGORY
```
|
{
"source": "Jesse-Back/pynetdicom",
"score": 3
}
|
#### File: apps/storescp/storescp.py
```python
import argparse
import os
import sys
from pydicom.dataset import Dataset
from pydicom.uid import (
ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian
)
from pynetdicom import (
AE, evt,
AllStoragePresentationContexts,
VerificationPresentationContexts,
)
from pynetdicom.apps.common import setup_logging, handle_store
from pynetdicom._globals import ALL_TRANSFER_SYNTAXES, DEFAULT_MAX_LENGTH
__version__ = '0.6.0'
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description=(
"The storescp application implements a Service Class "
"Provider (SCP) for the Storage and Verification SOP Classes. It "
"listens for a DICOM C-STORE message from a Service Class User "
"(SCU) and stores the resulting DICOM dataset."
),
usage="storescp [options] port"
)
# Parameters
req_opts = parser.add_argument_group('Parameters')
req_opts.add_argument(
"port",
help="TCP/IP port number to listen on",
type=int
)
# General Options
gen_opts = parser.add_argument_group('General Options')
gen_opts.add_argument(
"--version",
help="print version information and exit",
action="store_true"
)
output = gen_opts.add_mutually_exclusive_group()
output.add_argument(
"-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_const",
dest='log_type', const='q'
)
output.add_argument(
"-v", "--verbose",
help="verbose mode, print processing details",
action="store_const",
dest='log_type', const='v'
)
output.add_argument(
"-d", "--debug",
help="debug mode, print debug information",
action="store_const",
dest='log_type', const='d'
)
gen_opts.add_argument(
"-ll", "--log-level", metavar='[l]',
help=(
"use level l for the logger (critical, error, warn, info, debug)"
),
type=str,
choices=['critical', 'error', 'warn', 'info', 'debug']
)
# Network Options
net_opts = parser.add_argument_group('Network Options')
net_opts.add_argument(
"-aet", "--ae-title", metavar='[a]etitle',
help="set my AE title (default: STORESCP)",
type=str,
default='STORESCP'
)
net_opts.add_argument(
"-ta", "--acse-timeout", metavar='[s]econds',
help="timeout for ACSE messages (default: 30 s)",
type=float,
default=30
)
net_opts.add_argument(
"-td", "--dimse-timeout", metavar='[s]econds',
help="timeout for DIMSE messages (default: 30 s)",
type=float,
default=30
)
net_opts.add_argument(
"-tn", "--network-timeout", metavar='[s]econds',
help="timeout for the network (default: 30 s)",
type=float,
default=30
)
net_opts.add_argument(
"-pdu", "--max-pdu", metavar='[n]umber of bytes',
help=(
f"set max receive pdu to n bytes (0 for unlimited, "
f"default: {DEFAULT_MAX_LENGTH})"
),
type=int,
default=DEFAULT_MAX_LENGTH
)
net_opts.add_argument(
"-ba", "--bind-address", metavar="[a]ddress",
help=(
"The address of the network interface to "
"listen on. If unset, listen on all interfaces."
),
default=''
)
# Transfer Syntaxes
ts_opts = parser.add_argument_group('Preferred Transfer Syntaxes')
ts = ts_opts.add_mutually_exclusive_group()
ts.add_argument(
"-x=", "--prefer-uncompr",
help="prefer explicit VR local byte order",
action="store_true"
)
ts.add_argument(
"-xe", "--prefer-little",
help="prefer explicit VR little endian TS",
action="store_true"
)
ts.add_argument(
"-xb", "--prefer-big",
help="prefer explicit VR big endian TS",
action="store_true"
)
ts.add_argument(
"-xi", "--implicit",
help="accept implicit VR little endian TS only",
action="store_true"
)
# Output Options
out_opts = parser.add_argument_group('Output Options')
out_opts.add_argument(
'-od', "--output-directory", metavar="[d]irectory",
help="write received objects to directory d",
type=str
)
out_opts.add_argument(
'--ignore',
help="receive data but don't store it",
action="store_true"
)
# Miscellaneous Options
misc_opts = parser.add_argument_group('Miscellaneous Options')
misc_opts.add_argument(
"--no-echo",
help="don't act as a verification SCP",
action="store_true"
)
return parser.parse_args()
def main(args=None):
"""Run the application."""
if args is not None:
sys.argv = args
args = _setup_argparser()
if args.version:
print(f'storescp.py v{__version__}')
sys.exit()
APP_LOGGER = setup_logging(args, 'storescp')
APP_LOGGER.debug(f'storescp.py v{__version__}')
APP_LOGGER.debug('')
# Set Transfer Syntax options
transfer_syntax = ALL_TRANSFER_SYNTAXES[:]
if args.prefer_uncompr:
transfer_syntax.remove(ImplicitVRLittleEndian)
transfer_syntax.append(ImplicitVRLittleEndian)
elif args.prefer_little:
transfer_syntax.remove(ExplicitVRLittleEndian)
transfer_syntax.insert(0, ExplicitVRLittleEndian)
elif args.prefer_big:
transfer_syntax.remove(ExplicitVRBigEndian)
transfer_syntax.insert(0, ExplicitVRBigEndian)
elif args.implicit:
transfer_syntax = [ImplicitVRLittleEndian]
handlers = [(evt.EVT_C_STORE, handle_store, [args, APP_LOGGER])]
# Create application entity
ae = AE(ae_title=args.ae_title)
# Add presentation contexts with specified transfer syntaxes
for context in AllStoragePresentationContexts:
ae.add_supported_context(context.abstract_syntax, transfer_syntax)
if not args.no_echo:
for context in VerificationPresentationContexts:
ae.add_supported_context(context.abstract_syntax, transfer_syntax)
ae.maximum_pdu_size = args.max_pdu
# Set timeouts
ae.network_timeout = args.network_timeout
ae.acse_timeout = args.acse_timeout
ae.dimse_timeout = args.dimse_timeout
ae.start_server((args.bind_address, args.port), evt_handlers=handlers)
if __name__ == "__main__":
main()
```
#### File: pynetdicom/benchmarks/bench_presentation.py
```python
from copy import deepcopy
from pydicom._uid_dict import UID_dictionary
from pydicom.uid import UID
from pynetdicom import StoragePresentationContexts, build_context
from pynetdicom.presentation import (
PresentationContext,
negotiate_as_acceptor,
negotiate_as_requestor
)
class TimePresentationContext:
def setup(self):
self.contexts = []
for x in range(500):
cx = PresentationContext()
cx.context_id = 1
cx.abstract_syntax = '1.2.840.10008.5.1.4.1.1.2'
cx.transfer_syntax = ['1.2.840.10008.1.2',
'1.2.840.10008.1.2.1',
'1.2.840.10008.1.2.2']
self.contexts.append(cx)
def time_create_single_transfer_syntax(self):
"""Time creating contexts with a single transfer syntax"""
for x in range(500):
cx = PresentationContext()
cx.context_id = 1
cx.abstract_syntax = '1.2.840.10008.5.1.4.1.1.2'
cx.transfer_syntax = ['1.2.840.10008.1.2']
def time_create_double_transfer_syntax(self):
"""Time creating context with two transfer syntaxes."""
for x in range(500):
cx = PresentationContext()
cx.context_id = 1
cx.abstract_syntax = '1.2.840.10008.5.1.4.1.1.2'
cx.transfer_syntax = ['1.2.840.10008.1.2', '1.2.840.10008.1.2.1']
def time_create_triple_transfer_syntax(self):
"""Time creating context with three transfer syntaxes."""
for x in range(500):
cx = PresentationContext()
cx.context_id = 1
cx.abstract_syntax = '1.2.840.10008.5.1.4.1.1.2'
cx.transfer_syntax = ['1.2.840.10008.1.2',
'1.2.840.10008.1.2.1',
'1.2.840.10008.1.2.2']
def time_create_from_sop(self):
"""Test the time taken to create a PresentationContext from every
available standard DICOM UID.
"""
for uid in UID_dictionary:
cx = PresentationContext()
cx.context_id = 1
cx.abstract_syntax = uid
cx.transfer_syntax = ['1.2.840.10008.1.2',
'1.2.840.10008.1.2.1',
'1.2.840.10008.1.2.2']
class TimePresentationAcceptorRoleNegotiation:
"""Time presentation context negotiation as acceptor with SCP/SCU Role
Selection
"""
def setup(self):
# Requestor presentation contexts - max 126
self.requestor_contexts = []
for ii, cx in enumerate(StoragePresentationContexts):
cx.context_id = ii * 2 + 1
cx.scp_role = True
cx.scu_role = True
self.requestor_contexts.append(cx)
# Acceptor presentation contexts - no max
self.acceptor_contexts = []
for uid in UID_dictionary:
cx = PresentationContext()
cx.abstract_syntax = uid
cx.transfer_syntax = ['1.2.840.10008.1.2',
'1.2.840.10008.1.2.1',
'1.2.840.10008.1.2.2']
self.acceptor_contexts.append(cx)
self.ac_roles = {uid : (True, False) for uid in UID_dictionary}
def time_ps_ac_role(self):
"""Time a presentation service with SCP/SCU role negotiation."""
for ii in range(100):
negotiate_as_requestor(
self.requestor_contexts,
self.acceptor_contexts,
self.ac_roles
)
class TimePresentationRequestorRoleNegotiation:
"""Time presentation context negotiation as requestor with SCP/SCU Role
Selection
"""
def setup(self):
# Requestor presentation contexts - max 126
self.requestor_contexts = []
for ii, cx in enumerate(StoragePresentationContexts):
cx.context_id = ii * 2 + 1
cx.SCP = True
cx.SCU = True
self.requestor_contexts.append(cx)
# Acceptor presentation contexts - no max
self.acceptor_contexts = []
for uid in UID_dictionary:
context = PresentationContext()
context.context_id = 1
context.abstract_syntax = uid
context.transfer_syntax = ['1.2.840.10008.1.2']
context.Result = 0x00
context.SCP = True
context.SCU = True
self.acceptor_contexts.append(context)
def time_ps_rq_role(self):
"""Time a presentation service with SCP/SCU role negotiation."""
for ii in range(100):
negotiate_as_requestor(
self.requestor_contexts,
self.acceptor_contexts
)
class TimePresentationAcceptor:
"""Time presentation context negotiation as acceptor"""
def setup(self):
# Requestor presentation contexts - max 128
self.requestor_contexts = []
for ii, cx in enumerate(StoragePresentationContexts):
cx.context_id = ii * 2 + 1
self.requestor_contexts.append(cx)
# Acceptor presentation contexts - no max
self.acceptor_contexts = []
for uid in UID_dictionary:
cx = PresentationContext()
cx.abstract_syntax = uid
cx.transfer_syntax = ['1.2.840.10008.1.2',
'1.2.840.10008.1.2.1',
'1.2.840.10008.1.2.2']
self.acceptor_contexts.append(cx)
def time_ps_ac_basic(self):
"""Time a basic presentation service negotiation"""
for ii in range(100):
negotiate_as_acceptor(
self.requestor_contexts,
self.acceptor_contexts
)
class TimePresentationRequestor:
"""Time presentation context negotiation as requestor"""
def setup(self):
# Requestor presentation contexts - max 126
self.requestor_contexts = []
for ii, cx in enumerate(StoragePresentationContexts):
cx.context_id = ii * 2 + 1
self.requestor_contexts.append(cx)
# Acceptor presentation contexts - no max
self.acceptor_contexts = []
for ii, cx in enumerate(StoragePresentationContexts):
cx = deepcopy(cx)
cx.context_id = ii * 2 + 1
cx.transfer_syntax = ['1.2.840.10008.1.2']
cx.result = 0x00
self.acceptor_contexts.append(cx)
def time_ps_rq_basic(self):
"""Time a basic presentation service negotiation."""
for ii in range(100):
negotiate_as_requestor(
self.requestor_contexts,
self.acceptor_contexts
)
```
#### File: pynetdicom/pynetdicom/sop_class.py
```python
import inspect
import logging
import sys
from typing import Optional, Type, Any, cast, Dict
from pydicom.uid import UID
from pynetdicom.service_class import (
BasicWorklistManagementServiceClass,
ColorPaletteQueryRetrieveServiceClass,
DefinedProcedureProtocolQueryRetrieveServiceClass,
HangingProtocolQueryRetrieveServiceClass,
ImplantTemplateQueryRetrieveServiceClass,
NonPatientObjectStorageServiceClass,
ProtocolApprovalQueryRetrieveServiceClass,
QueryRetrieveServiceClass,
RelevantPatientInformationQueryServiceClass,
ServiceClass,
StorageServiceClass,
SubstanceAdministrationQueryServiceClass,
VerificationServiceClass,
)
from pynetdicom.service_class_n import (
ApplicationEventLoggingServiceClass,
DisplaySystemManagementServiceClass,
InstanceAvailabilityNotificationServiceClass,
MediaCreationManagementServiceClass,
PrintManagementServiceClass,
ProcedureStepServiceClass,
RTMachineVerificationServiceClass,
StorageCommitmentServiceClass,
UnifiedProcedureStepServiceClass,
)
LOGGER = logging.getLogger('pynetdicom.sop')
def uid_to_service_class(uid: str) -> Type[ServiceClass]:
"""Return the :class:`~pynetdicom.service_class.ServiceClass` object
corresponding to `uid`.
Parameters
----------
uid : pydicom.uid.UID
The SOP or Service Class UID to use to find the corresponding Service
Class.
Returns
-------
subclass of service_class.ServiceClass
The Service Class corresponding to the SOP Class UID or the base class
if support for the SOP Class isn't implemented.
"""
if uid in _VERIFICATION_CLASSES.values():
return VerificationServiceClass
elif uid in _QR_CLASSES.values():
return QueryRetrieveServiceClass
elif uid in _STORAGE_CLASSES.values():
return StorageServiceClass
elif uid in _SERVICE_CLASSES:
return _SERVICE_CLASSES[uid]
elif uid in _APPLICATION_EVENT_CLASSES.values():
return ApplicationEventLoggingServiceClass
elif uid in _BASIC_WORKLIST_CLASSES.values():
return BasicWorklistManagementServiceClass
elif uid in _COLOR_PALETTE_CLASSES.values():
return ColorPaletteQueryRetrieveServiceClass
elif uid in _DEFINED_PROCEDURE_CLASSES.values():
return DefinedProcedureProtocolQueryRetrieveServiceClass
elif uid in _DISPLAY_SYSTEM_CLASSES.values():
return DisplaySystemManagementServiceClass
elif uid in _HANGING_PROTOCOL_CLASSES.values():
return HangingProtocolQueryRetrieveServiceClass
elif uid in _IMPLANT_TEMPLATE_CLASSES.values():
return ImplantTemplateQueryRetrieveServiceClass
elif uid in _INSTANCE_AVAILABILITY_CLASSES.values():
return InstanceAvailabilityNotificationServiceClass
elif uid in _MEDIA_CREATION_CLASSES.values():
return MediaCreationManagementServiceClass
elif uid in _MEDIA_STORAGE_CLASSES.values():
return ServiceClass # Not yet implemented
elif uid in _NON_PATIENT_OBJECT_CLASSES.values():
return NonPatientObjectStorageServiceClass
elif uid in _PRINT_MANAGEMENT_CLASSES.values():
return PrintManagementServiceClass
elif uid in _PROCEDURE_STEP_CLASSES.values():
return ProcedureStepServiceClass
elif uid in _PROTOCOL_APPROVAL_CLASSES.values():
return ProtocolApprovalQueryRetrieveServiceClass
elif uid in _RELEVANT_PATIENT_QUERY_CLASSES.values():
return RelevantPatientInformationQueryServiceClass
elif uid in _RT_MACHINE_VERIFICATION_CLASSES.values():
return RTMachineVerificationServiceClass
elif uid in _STORAGE_COMMITMENT_CLASSES.values():
return StorageCommitmentServiceClass
elif uid in _SUBSTANCE_ADMINISTRATION_CLASSES.values():
return SubstanceAdministrationQueryServiceClass
elif uid in _UNIFIED_PROCEDURE_STEP_CLASSES.values():
return UnifiedProcedureStepServiceClass
# No SCP implemented
return ServiceClass
class SOPClass(UID):
"""Extend :class:`~pydicom.uid.UID` to include the corresponding Service
Class.
"""
_service_class: Optional[Type[ServiceClass]] = None
def __new__(cls: Type["SOPClass"], val: str) -> "SOPClass":
if isinstance(val, SOPClass):
return val
return super(SOPClass, cls).__new__(cls, val)
def __getattribute__(self, name: str) -> Any:
return super(SOPClass, self).__getattribute__(name)
@property
def service_class(self) -> ServiceClass:
"""Return the corresponding Service Class implementation."""
return cast(ServiceClass, self._service_class)
def _generate_sop_classes(sop_class_dict: Dict[str, str]) -> None:
"""Generate the SOP Classes."""
for name in sop_class_dict:
uid = sop_class_dict[name]
sop_class: SOPClass = SOPClass(uid)
sop_class._service_class = uid_to_service_class(uid)
docstring = f"``{uid}``"
# if uid in _x:
# docstring += "\n\n.. versionadded:: 1.4"
sop_class.__doc__ = docstring
globals()[name] = sop_class
# Table of service classes with assigned UIDs
_SERVICE_CLASSES = {
'1.2.840.10008.4.2': StorageServiceClass,
'1.2.840.10008.5.1.4.34.6': UnifiedProcedureStepServiceClass,
}
# Generate the various SOP classes
# pylint: disable=line-too-long
_APPLICATION_EVENT_CLASSES = {
'ProceduralEventLogging': '1.2.840.10008.1.40',
'SubstanceAdministrationLogging': '1.2.840.10008.1.42',
}
_BASIC_WORKLIST_CLASSES = {
'ModalityWorklistInformationFind': '1.2.840.10008.5.1.4.31',
}
_COLOR_PALETTE_CLASSES = {
'ColorPaletteInformationModelFind': '1.2.840.10008.5.1.4.39.2',
'ColorPaletteInformationModelMove': '1.2.840.10008.5.172.16.17.32',
'ColorPaletteInformationModelGet': '1.2.840.10008.5.1.4.39.4',
}
_DEFINED_PROCEDURE_CLASSES = {
'DefinedProcedureProtocolInformationModelFind': '1.2.840.10008.5.1.4.20.1',
'DefinedProcedureProtocolInformationModelMove': '1.2.840.10008.5.1.4.20.2',
'DefinedProcedureProtocolInformationModelGet': '1.2.840.10008.5.1.4.20.3',
}
_DISPLAY_SYSTEM_CLASSES = {
'DisplaySystem': '1.2.840.10008.5.1.1.40',
}
_HANGING_PROTOCOL_CLASSES = {
'HangingProtocolInformationModelFind': '1.2.840.10008.5.1.4.38.2',
'HangingProtocolInformationModelMove': '1.2.840.10008.5.1.4.38.3',
'HangingProtocolInformationModelGet': '1.2.840.10008.5.1.4.38.4',
}
_IMPLANT_TEMPLATE_CLASSES = {
'GenericImplantTemplateInformationModelFind': '1.2.840.10008.5.1.4.43.2',
'GenericImplantTemplateInformationModelMove': '1.2.840.10008.5.1.4.43.3',
'GenericImplantTemplateInformationModelGet': '1.2.840.10008.5.1.4.43.4',
'ImplantAssemblyTemplateInformationModelFind': '1.2.840.10008.5.1.4.44.2',
'ImplantAssemblyTemplateInformationModelMove': '1.2.840.10008.5.1.4.44.3',
'ImplantAssemblyTemplateInformationModelGet': '1.2.840.10008.5.1.4.44.4',
'ImplantTemplateGroupInformationModelFind': '1.2.840.10008.5.1.4.45.2',
'ImplantTemplateGroupInformationModelMove': '1.2.840.10008.5.1.4.45.3',
'ImplantTemplateGroupInformationModelGet': '1.2.840.10008.5.1.4.45.4',
}
_INSTANCE_AVAILABILITY_CLASSES = {
'InstanceAvailabilityNotification': '1.2.840.10008.5.1.4.33',
}
_MEDIA_CREATION_CLASSES = {
'MediaCreationManagement': '1.2.840.10008.5.1.1.33',
}
_MEDIA_STORAGE_CLASSES = {
'MediaStorageDirectoryStorage': '1.2.840.10008.1.3.10',
}
_NON_PATIENT_OBJECT_CLASSES = {
'HangingProtocolStorage': '1.2.840.10008.5.1.4.38.1',
'ColorPaletteStorage': '1.2.840.10008.5.1.4.39.1',
'GenericImplantTemplateStorage': '1.2.840.10008.5.1.4.43.1',
'ImplantAssemblyTemplateStorage': '1.2.840.10008.5.1.4.44.1',
'ImplantTemplateGroupStorage': '1.2.840.10008.5.1.4.45.1',
'CTDefinedProcedureProtocolStorage': '1.2.840.10008.5.1.4.1.1.200.1',
'ProtocolApprovalStorage': '1.2.840.10008.5.1.4.1.1.200.3',
}
_PRINT_MANAGEMENT_CLASSES = {
'BasicFilmSession': '1.2.840.10008.5.1.1.1',
'BasicFilmBox': '1.2.840.10008.5.1.1.2',
'BasicGrayscaleImageBox': '1.2.840.10008.5.1.1.4',
'BasicColorImageBox': '1.2.840.10008.5.1.1.4.1',
'PrintJob': '1.2.840.10008.5.1.1.14',
'BasicAnnotationBox': '1.2.840.10008.5.1.1.15',
'Printer': '1.2.840.10008.5.1.1.16',
'PrinterConfigurationRetrieval': '1.2.840.10008.5.1.1.16.376',
'PresentationLUT': '1.2.840.10008.5.1.1.23',
# Print Management Meta SOP Classes
# Basic Film Session, Basic Film Box, Basic Grayscale, Printer
'BasicGrayscalePrintManagementMeta': '1.2.840.10008.5.1.1.9',
# Basic Film Session, Basic Film Box, Basic Color, Printer
'BasicColorPrintManagementMeta': '1.2.840.10008.5.1.1.18',
}
_PROCEDURE_STEP_CLASSES = {
'ModalityPerformedProcedureStep': '1.2.840.10008.3.1.2.3.3',
'ModalityPerformedProcedureStepRetrieve': '1.2.840.10008.3.1.2.3.4',
'ModalityPerformedProcedureStepNotification': '1.2.840.10008.3.1.2.3.5',
}
_PROTOCOL_APPROVAL_CLASSES = {
'ProtocolApprovalInformationModelFind': '1.2.840.10008.5.1.4.1.1.200.4',
'ProtocolApprovalInformationModelMove': '1.2.840.10008.5.1.4.1.1.200.5',
'ProtocolApprovalInformationModelGet': '1.2.840.10008.5.1.4.1.1.200.6'
}
_QR_CLASSES = {
'PatientRootQueryRetrieveInformationModelFind': '1.2.840.10008.5.1.4.1.2.1.1',
'PatientRootQueryRetrieveInformationModelMove': '1.2.840.10008.5.1.4.1.2.1.2',
'PatientRootQueryRetrieveInformationModelGet': '1.2.840.10008.5.1.4.1.2.1.3',
'StudyRootQueryRetrieveInformationModelFind': '1.2.840.10008.5.1.4.1.2.2.1',
'StudyRootQueryRetrieveInformationModelMove': '1.2.840.10008.5.1.4.1.2.2.2',
'StudyRootQueryRetrieveInformationModelGet': '1.2.840.10008.5.1.4.1.2.2.3',
'PatientStudyOnlyQueryRetrieveInformationModelFind': '1.2.840.10008.5.1.4.1.2.3.1',
'PatientStudyOnlyQueryRetrieveInformationModelMove': '1.2.840.10008.5.1.4.1.2.3.2',
'PatientStudyOnlyQueryRetrieveInformationModelGet': '1.2.840.10008.5.1.4.1.2.3.3',
'CompositeInstanceRootRetrieveMove': '1.2.840.10008.5.1.4.1.2.4.2',
'CompositeInstanceRootRetrieveGet': '1.2.840.10008.5.1.4.1.2.4.3',
'CompositeInstanceRetrieveWithoutBulkDataGet': '1.2.840.10008.5.1.4.1.2.5.3',
}
_RELEVANT_PATIENT_QUERY_CLASSES = {
'GeneralRelevantPatientInformationQuery': '1.2.840.10008.5.1.4.37.1',
'BreastImagingRelevantPatientInformationQuery': '1.2.840.10008.5.1.4.37.2',
'CardiacRelevantPatientInformationQuery': '1.2.840.10008.5.1.4.37.3',
}
_RT_MACHINE_VERIFICATION_CLASSES = {
'RTConventionalMachineVerification': '1.2.840.10008.5.192.168.3.11',
'RTIonMachineVerification': '1.2.840.10008.5.1.4.34.9',
}
_STORAGE_CLASSES = {
'ComputedRadiographyImageStorage': '1.2.840.10008.5.1.4.1.1.1', # A.2
'DigitalXRayImageStorageForPresentation': '1.2.840.10008.5.1.4.1.1.1.1', # A.26
'DigitalXRayImageStorageForProcessing': '1.2.840.10008.5.1.4.1.1.1.1.1', # A.26
'DigitalMammographyXRayImageStorageForPresentation': '1.2.840.10008.5.1.4.1.1.1.2', # A.27
'DigitalMammographyXRayImageStorageForProcessing': '1.2.840.10008.5.1.4.1.1.1.2.1', # A.27
'DigitalIntraOralXRayImageStorageForPresentation': '1.2.840.10008.5.1.4.1.1.1.3', # A.28
'DigitalIntraOralXRayImageStorageForProcessing': '1.2.840.10008.5.1.4.1.1.1.3.1', # A.28
'CTImageStorage': '1.2.840.10008.5.1.4.1.1.2', # A.3
'EnhancedCTImageStorage': '1.2.840.10008.5.1.4.1.1.2.1', # A.38
'LegacyConvertedEnhancedCTImageStorage': '1.2.840.10008.5.1.4.1.1.2.2', # A.70
'UltrasoundMultiFrameImageStorage': '1.2.840.10008.5.1.4.1.1.3.1', # A.7
'MRImageStorage': '1.2.840.10008.5.1.4.1.1.4', # A.4
'EnhancedMRImageStorage': '1.2.840.10008.5.1.4.1.1.4.1', # A.36.2
'MRSpectroscopyStorage': '1.2.840.10008.5.1.4.1.1.4.2', # A.36.3
'EnhancedMRColorImageStorage': '1.2.840.10008.5.1.4.1.1.4.3', # A.36.4
'LegacyConvertedEnhancedMRImageStorage': '1.2.840.10008.5.1.4.1.1.4.4', # A.71
'UltrasoundImageStorage': '1.2.840.10008.5.1.4.1.1.6.1', # A.6
'EnhancedUSVolumeStorage': '1.2.840.10008.5.1.4.1.1.6.2', # A.59
'SecondaryCaptureImageStorage': '1.2.840.10008.5.1.4.1.1.7', # A.8.1
'MultiFrameSingleBitSecondaryCaptureImageStorage': '1.2.840.10008.5.1.4.1.1.7.1', # A.8.2
'MultiFrameGrayscaleByteSecondaryCaptureImageStorage': '1.2.840.10008.5.1.4.1.1.7.2', # A.8.3
'MultiFrameGrayscaleWordSecondaryCaptureImageStorage': '1.2.840.10008.5.1.4.1.1.7.3', # A.8.4
'MultiFrameTrueColorSecondaryCaptureImageStorage': '1.2.840.10008.5.1.4.1.1.7.4', # A.8.5
'TwelveLeadECGWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.1.1', # A.34.3
'GeneralECGWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.1.2', # A.34.4
'AmbulatoryECGWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.1.3', # A.34.5
'HemodynamicWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.2.1', # A.34.6
'CardiacElectrophysiologyWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.3.1', # A.34.7
'BasicVoiceAudioWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.4.1', # A.34.2
'GeneralAudioWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.4.2', # A.34.10
'ArterialPulseWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.5.1', # A.34.8
'RespiratoryWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.6.1', # A.34.9
'MultichannelRespiratoryWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.6.2', # A.34.16
'RoutineScalpElectroencephalogramWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.7.1', # A.34.12
'ElectromyogramWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.7.2', # A.34.13
'ElectrooculogramWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.7.3', # A.34.14
'SleepElectroencephalogramWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.7.4', # A.34.15
'BodyPositionWaveformStorage': '1.2.840.10008.5.1.4.1.1.9.8.1', # A.34.17
'GrayscaleSoftcopyPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.1', # A.33.1
'ColorSoftcopyPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.2', # A.33.2
'PseudoColorSoftcopyPresentationStageStorage': '1.2.840.10008.5.1.4.1.1.11.3', # A.33.3
'BlendingSoftcopyPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.4', # A.33.4
'XAXRFGrayscaleSoftcopyPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.5', # A.33.6
'GrayscalePlanarMPRVolumetricPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.6', # A.80.1
'CompositingPlanarMPRVolumetricPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.7', # A.80.1
'AdvancedBlendingPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.8', # A.33.7
'VolumeRenderingVolumetricPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.9', # A.80.2
'SegmentedVolumeRenderingVolumetricPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.10', # A.80.2
'MultipleVolumeRenderingVolumetricPresentationStateStorage': '1.2.840.10008.5.1.4.1.1.11.11', # A.80.2
'XRayAngiographicImageStorage': '1.2.840.10008.5.1.4.1.1.12.1', # A.14
'EnhancedXAImageStorage': '1.2.840.10008.5.1.4.1.1.12.1.1', # A.47
'XRayRadiofluoroscopicImageStorage': '1.2.840.10008.5.1.4.1.1.12.2', # A.16
'EnhancedXRFImageStorage': '1.2.840.10008.5.1.4.1.1.12.2.1', # A.48
'XRay3DAngiographicImageStorage': '1.2.840.10008.5.1.4.1.1.13.1.1', # A.53
'XRay3DCraniofacialImageStorage': '1.2.840.10008.5.1.4.1.1.13.1.2', # A.54
'BreastTomosynthesisImageStorage': '1.2.840.10008.5.1.4.1.1.13.1.3', # A.55
'BreastProjectionXRayImageStorageForPresentation': '1.2.840.10008.5.1.4.1.1.13.1.4', # A.74
'BreastProjectionXRayImageStorageForProcessing': '1.2.840.10008.5.1.4.1.1.13.1.5', # A.74
'IntravascularOpticalCoherenceTomographyImageStorageForPresentation': '1.2.840.10008.5.1.4.1.1.14.1', # A.66
'IntravascularOpticalCoherenceTomographyImageStorageForProcessing': '1.2.840.10008.5.1.4.1.1.14.2', # A.66
'NuclearMedicineImageStorage': '1.2.840.10008.5.1.4.1.1.20', # A.5
'ParametricMapStorage': '1.2.840.10008.5.1.4.1.1.30', # A.75
'RawDataStorage': '1.2.840.10008.5.1.4.1.1.66', # A.37
'SpatialRegistrationStorage': '1.2.840.10008.5.1.4.1.1.66.1', # A.39.1
'SpatialFiducialsStorage': '1.2.840.10008.5.1.4.1.1.66.2', # A.40
'DeformableSpatialRegistrationStorage': '1.2.840.10008.5.1.4.1.1.66.3', # A.39.2
'SegmentationStorage': '1.2.840.10008.5.1.4.1.1.66.4', # A.51
'SurfaceSegmentationStorage': '1.2.840.10008.5.1.4.1.1.66.5', # A.57
'TractographyResultsStorage': '1.2.840.10008.5.1.4.1.1.66.6', # A.78
'RealWorldValueMappingStorage': '1.2.840.10008.5.1.4.1.1.67', # A.46
'SurfaceScanMeshStorage': '1.2.840.10008.5.1.4.1.1.68.1', # A.68
'SurfaceScanPointCloudStorage': '1.2.840.10008.5.1.4.1.1.68.2', # A.69
'VLEndoscopicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.1', # A.32.1
'VideoEndoscopicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.1.1', # A.32.5
'VLMicroscopicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.2', # A.32.2
'VideoMicroscopicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.2.1', # A.32.6
'VLSlideCoordinatesMicroscopicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.3', # A.32.3
'VLPhotographicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.4', # A.32.4
'VideoPhotographicImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.4.1', # A.32.7
'OphthalmicPhotography8BitImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.1', # A.41
'OphthalmicPhotography16BitImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.2', # A.42
'StereometricRelationshipStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.3', # A.43
'OphthalmicTomographyImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.4', # A.52
'WideFieldOphthalmicPhotographyStereographicProjectionImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.5', # A.76
'WideFieldOphthalmicPhotography3DCoordinatesImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.6', # A.77
'OphthalmicOpticalCoherenceTomographyEnFaceImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.7', # A.83
'OphthlamicOpticalCoherenceTomographyBscanVolumeAnalysisStorage': '1.2.840.10008.5.1.4.1.1.77.1.5.8', # A.84
'VLWholeSlideMicroscopyImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.6', # A.32.8
'DermoscopicPhotographyImageStorage': '1.2.840.10008.5.1.4.1.1.77.1.7', # A.32.11
'LensometryMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.78.1', # A.60.1
'AutorefractionMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.78.2', # A.60.2
'KeratometryMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.78.3', # A.60.3
'SubjectiveRefractionMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.78.4', # A.60.4
'VisualAcuityMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.78.5', # A.60.5
'SpectaclePrescriptionReportStorage': '1.2.840.10008.5.1.4.1.1.78.6', # A.35.9
'OphthalmicAxialMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.78.7', # A.60.6
'IntraocularLensCalculationsStorage': '1.2.840.10008.5.1.4.1.1.78.8', # A.60.7
'MacularGridThicknessAndVolumeReportStorage': '1.2.840.10008.5.1.4.1.1.79.1', # A.35.11
'OphthalmicVisualFieldStaticPerimetryMeasurementsStorage': '1.2.840.10008.5.1.4.1.1.80.1', # A.65
'OphthalmicThicknessMapStorage': '1.2.840.10008.5.1.4.1.1.81.1', # A.67
'CornealTopographyMapStorage': '1.2.840.10008.5.1.4.1.1.82.1', # A.73
'BasicTextSRStorage': '1.2.840.10008.5.1.4.1.1.88.11', # A.35.1
'EnhancedSRStorage': '1.2.840.10008.5.1.4.1.1.88.22', # A.35.2
'ComprehensiveSRStorage': '1.2.840.10008.5.1.4.1.1.88.33', # A.35.3
'Comprehensive3DSRStorage': '1.2.840.10008.5.1.4.1.1.88.34', # A.35.13
'ExtensibleSRStorage': '1.2.840.10008.5.1.4.1.1.88.35', # A.35.15
'ProcedureLogStorage': '1.2.840.10008.5.1.4.1.1.88.40', # A.35.7
'MammographyCADSRStorage': '1.2.840.10008.5.1.4.1.1.88.50', # A.35.5
'KeyObjectSelectionDocumentStorage': '1.2.840.10008.5.1.4.1.1.88.59', # A.35.4
'ChestCADSRStorage': '1.2.840.10008.5.1.4.1.1.88.65', # A.35.6
'XRayRadiationDoseSRStorage': '1.2.840.10008.5.1.4.1.1.88.67', # A.35.8
'RadiopharmaceuticalRadiationDoseSRStorage': '1.2.840.10008.5.1.4.1.1.88.68', # A.35.14
'ColonCADSRStorage': '1.2.840.10008.5.1.4.1.1.88.69', # A.35.10
'ImplantationPlanSRStorage': '1.2.840.10008.5.1.4.1.1.88.70', # A.35.12
'AcquisitionContextSRStorage': '1.2.840.10008.5.1.4.1.1.88.71', # A.35.16
'SimplifiedAdultEchoSRStorage': '1.2.840.10008.5.1.4.1.1.88.72', # A.35.17
'PatientRadiationDoseSRStorage': '1.2.840.10008.5.1.4.1.1.88.73', # A.35.18
'PlannedImagingAgentAdministrationSRStorage': '1.2.840.10008.5.1.4.1.1.88.74', # A.35.19
'PerformedImagingAgentAdministrationSRStorage': '1.2.840.10008.5.1.4.1.1.88.75', # A.35.20
'EnhancedXRayRadiationDoseSRStorage': '1.2.840.10008.5.1.4.1.1.88.76', # A.35.
'ContentAssessmentResultsStorage': '1.2.840.10008.5.1.4.1.1.90.1', # A.81
'EncapsulatedPDFStorage': '1.2.840.10008.5.1.4.1.1.104.1', # A.45.1
'EncapsulatedCDAStorage': '1.2.840.10008.5.1.4.1.1.104.2', # A.45.2
'EncapsulatedSTLStorage': '1.2.840.10008.5.1.4.1.1.104.3', # A.85.1
'EncapsulatedOBJStorage': '1.2.840.10008.5.1.4.1.1.104.4', # A.85.2
'EncapsulatedMTLStorage': '1.2.840.10008.5.1.4.1.1.104.5', # A.85.3
'PositronEmissionTomographyImageStorage': '1.2.840.10008.5.1.4.1.1.128', # A.21
'LegacyConvertedEnhancedPETImageStorage': '1.2.840.10008.5.1.4.1.1.128.1', # A.72
# 128 presentation context limit here
'EnhancedPETImageStorage': '1.2.840.10008.5.1.4.1.1.130', # A.56
'BasicStructuredDisplayStorage': '1.2.840.10008.5.1.4.1.1.131', # A.33.5
'CTPerformedProcedureProtocolStorage': '1.2.840.10008.5.1.4.1.1.200.2', # A.82.1
'RTImageStorage': '1.2.840.10008.5.1.4.1.1.481.1', # A.17
'RTDoseStorage': '1.2.840.10008.5.1.4.1.1.481.2', # A.18
'RTStructureSetStorage': '1.2.840.10008.5.1.4.1.1.481.3', # A.19
'RTBeamsTreatmentRecordStorage': '1.2.840.10008.5.1.4.1.1.481.4', # A.29
'RTPlanStorage': '1.2.840.10008.5.1.4.1.1.481.5', # A.20
'RTBrachyTreatmentRecordStorage': '1.2.840.10008.5.1.4.1.1.481.6', # A.20
'RTTreatmentSummaryRecordStorage': '1.2.840.10008.5.1.4.1.1.481.7', # A.31
'RTIonPlanStorage': '1.2.840.10008.5.1.4.1.1.481.8', # A.49
'RTIonBeamsTreatmentRecordStorage': '1.2.840.10008.5.1.4.1.1.481.9', # A.50
'RTPhysicianIntentStorage': '1.2.840.10008.5.1.4.1.1.481.10', # A.86.1.2
'RTSegmentAnnotationStorage': '1.2.840.10008.5.1.4.1.1.481.11', # A.86.1.3
'RTRadiationSetStorage': '1.2.840.10008.5.1.4.1.1.481.12', # A.86.1.4
'CArmPhotonElectronRadiationStorage': '1.2.840.10008.5.1.4.1.1.481.13', # A.86.1.5
'TomotherapeuticRadiationStorage': '1.2.840.10008.5.1.4.1.1.481.14', # A.86.1.6
'RoboticArmRadiationStorage': '1.2.840.10008.5.1.4.1.1.481.15', # A.86.1.7
'RTRadiationRecordSetStorage': '1.2.840.10008.5.1.4.1.1.481.16', # A.86.1.8
'RTRadiationSalvageRecordStorage': '1.2.840.10008.5.1.4.1.1.481.17', # A.86.1.9
'TomotherapeuticRadiationRecordStorage': '1.2.840.10008.5.1.4.1.1.481.18', # A.86.1.10
'CArmPhotonElectronRadiationRecordStorage': '1.2.840.10008.5.1.4.1.1.481.19', # A.86.1.11
'RoboticArmRadiationRecordStorage': '1.2.840.10008.5.1.4.1.1.481.20', # A.86.1.12
'RTBeamsDeliveryInstructionStorage': '1.2.840.10008.5.1.4.34.7', # A.64
'RTBrachyApplicationSetupDeliveryInstructionsStorage': '1.2.840.10008.5.1.4.34.10', # A.79
}
_STORAGE_COMMITMENT_CLASSES = {
'StorageCommitmentPushModel': '1.2.840.10008.1.20.1',
}
_SUBSTANCE_ADMINISTRATION_CLASSES = {
'ProductCharacteristicsQuery': '1.2.840.10008.5.1.4.41',
'SubstanceApprovalQuery': '1.2.840.10008.5.1.4.42'
}
_UNIFIED_PROCEDURE_STEP_CLASSES = {
'UnifiedProcedureStepPush': '1.2.840.10008.5.1.4.34.6.1',
'UnifiedProcedureStepWatch': '1.2.840.10008.5.1.4.34.6.2',
'UnifiedProcedureStepPull': '1.2.840.10008.5.1.4.34.6.3',
'UnifiedProcedureStepEvent': '1.2.840.10008.5.1.4.34.6.4',
'UnifiedProcedureStepQuery': '1.2.840.10008.5.1.4.34.6.5',
}
_VERIFICATION_CLASSES = {
'Verification': '1.2.840.10008.1.1',
}
# pylint: enable=line-too-long
_generate_sop_classes(_APPLICATION_EVENT_CLASSES)
_generate_sop_classes(_BASIC_WORKLIST_CLASSES)
_generate_sop_classes(_COLOR_PALETTE_CLASSES)
_generate_sop_classes(_DEFINED_PROCEDURE_CLASSES)
_generate_sop_classes(_DISPLAY_SYSTEM_CLASSES)
_generate_sop_classes(_HANGING_PROTOCOL_CLASSES)
_generate_sop_classes(_IMPLANT_TEMPLATE_CLASSES)
_generate_sop_classes(_INSTANCE_AVAILABILITY_CLASSES)
_generate_sop_classes(_MEDIA_CREATION_CLASSES)
_generate_sop_classes(_MEDIA_STORAGE_CLASSES)
_generate_sop_classes(_NON_PATIENT_OBJECT_CLASSES)
_generate_sop_classes(_PRINT_MANAGEMENT_CLASSES)
_generate_sop_classes(_PROCEDURE_STEP_CLASSES)
_generate_sop_classes(_PROTOCOL_APPROVAL_CLASSES)
_generate_sop_classes(_QR_CLASSES)
_generate_sop_classes(_RELEVANT_PATIENT_QUERY_CLASSES)
_generate_sop_classes(_RT_MACHINE_VERIFICATION_CLASSES)
_generate_sop_classes(_STORAGE_CLASSES)
_generate_sop_classes(_STORAGE_COMMITMENT_CLASSES)
_generate_sop_classes(_SUBSTANCE_ADMINISTRATION_CLASSES)
_generate_sop_classes(_UNIFIED_PROCEDURE_STEP_CLASSES)
_generate_sop_classes(_VERIFICATION_CLASSES)
def uid_to_sop_class(uid: str) -> SOPClass:
"""Return the :class:`SOPClass` object corresponding to `uid`.
Parameters
----------
uid : pydicom.uid.UID
Return the corresponding object for this UID.
Returns
-------
sop_class.SOPClass subclass
The SOP class corresponding to `uid`.
Raises
------
NotImplementedError
If the SOP Class corresponding to the given UID has not been
implemented.
"""
# Get a list of all the class members of the current module
members = inspect.getmembers(
sys.modules[__name__],
lambda mbr: isinstance(mbr, str)
)
for obj in members:
if hasattr(obj[1], 'service_class') and obj[1] == uid:
return cast(SOPClass, obj[1])
sop_class = SOPClass(uid)
sop_class._service_class = ServiceClass
return sop_class
# Well-known SOP Instance UIDs for the supported Service Classes
DisplaySystemInstance = UID('1.2.840.10008.5.1.1.40.1')
"""``1.2.840.10008.5.1.1.40.1``
.. versionadded:: 1.5
"""
PrinterConfigurationRetrievalInstance = UID('1.2.840.10008.5.1.1.17.376')
"""``1.2.840.10008.5.1.1.17.376``
.. versionadded:: 1.5
"""
PrinterInstance = UID('1.2.840.10008.5.1.1.17')
"""``1.2.840.10008.5.1.1.17``
.. versionadded:: 1.5
"""
ProceduralEventLoggingInstance = UID('1.2.840.10008.1.40.1')
"""``1.2.840.10008.1.40.1``
.. versionadded:: 1.5
"""
StorageCommitmentPushModelInstance = UID('1.2.840.10008.192.168.127.12')
"""``1.2.840.10008.192.168.127.12``
.. versionadded:: 1.5
"""
SubstanceAdministrationLoggingInstance = UID('1.2.840.10008.1.42.1')
"""``1.2.840.10008.1.42.1``
.. versionadded:: 1.5
"""
UPSFilteredGlobalSubscriptionInstance = UID('1.2.840.10008.5.172.16.31.10.1')
"""``1.2.840.10008.5.172.16.31.10.1``
.. versionadded:: 1.5
"""
UPSGlobalSubscriptionInstance = UID('1.2.840.10008.5.1.4.34.5')
"""``1.2.840.10008.5.172.16.31.10``
.. versionadded:: 1.5
"""
```
#### File: pynetdicom/tests/test_ae.py
```python
import logging
import os
import signal
import threading
import time
import pytest
from pydicom import read_file
from pydicom.dataset import Dataset
from pydicom.uid import UID, ImplicitVRLittleEndian
from pynetdicom import (
AE, evt, debug_logger, build_context,
DEFAULT_TRANSFER_SYNTAXES,
StoragePresentationContexts,
VerificationPresentationContexts,
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION
)
from pynetdicom.presentation import build_context
from pynetdicom.sop_class import RTImageStorage, Verification
from pynetdicom.transport import AssociationServer, RequestHandler
# debug_logger()
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')
DATASET = read_file(os.path.join(TEST_DS_DIR, 'RTImageStorage.dcm'))
COMP_DATASET = read_file(os.path.join(TEST_DS_DIR, 'MRImageStorage_JPG2000_Lossless.dcm'))
def test_blocking_handler():
"""Test binding events to the blocking AssociationServer."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
def handle_echo(event):
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle_echo)]
thread = threading.Thread(
target=ae.start_server,
args=(('', 11112), ),
kwargs={'evt_handlers' : handlers}
)
thread.daemon = True
thread.start()
time.sleep(0.1)
ae.shutdown()
class TestMakeServer:
"""Tests for AE.make_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_default_arguments(self):
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(('', 11112))
assert isinstance(server, AssociationServer)
def test_custom_request_handler(self):
class MyRequestHandler(RequestHandler):
pass
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(('', 11112), request_handler=MyRequestHandler)
assert server.RequestHandlerClass is MyRequestHandler
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
server = ae.start_server(('', 11112), block=False, ae_title=b'BADAE2')
assert server.ae_title == 'BADAE2'
server.shutdown()
class TestStartServer:
"""Tests for AE.start_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_title(self):
"""Test the `ae_title` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = 'TESTAET'
assert ae.ae_title == 'TESTAET'
ae.add_supported_context(Verification)
server = ae.start_server(('', 11112), block=False)
assert server.ae_title == ae.ae_title
server.shutdown()
server = ae.start_server(('', 11112), block=False, ae_title='MYAE')
assert server.ae_title == 'MYAE'
ae.require_called_aet = True
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112, ae_title='MYAE')
assert assoc.is_established
assoc.release()
assert assoc.is_released
server.shutdown()
def test_contexts(self):
"""Test the `contexts` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = 'TESTAET'
assert ae.ae_title == 'TESTAET'
cx = build_context(Verification)
server = ae.start_server(('', 11112), block=False, contexts=[cx])
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112, ae_title='MYAE')
assert assoc.is_established
assert (
assoc.accepted_contexts[0].abstract_syntax == Verification
)
assoc.release()
assert assoc.is_released
server.shutdown()
class TestAEVerificationSCP:
"""Check verification SCP"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
@pytest.mark.skipif(os.name == "nt", reason="Kills pytest on windows")
def test_start_server_keyboard_interrupt(self):
"""Test stopping the SCP with keyboard"""
pid = os.getpid()
def trigger_signal():
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
self.ae = ae = AE()
ae.add_supported_context('1.2.3')
thread = threading.Thread(target=trigger_signal)
thread.daemon = True
thread.start()
ae.start_server(('', 11112))
ae.shutdown()
def test_no_supported_contexts(self):
"""Test starting with no contexts raises"""
ae = AE()
with pytest.raises(ValueError, match=r"No supported Presentation"):
ae.start_server(('', 11112))
def test_new_scu_scp_warning(self):
"""Test that a warning is given if scu_role and scp_role bad."""
ae = AE()
ae.add_supported_context('1.2.3.4', scp_role=False)
msg = r"The following presentation contexts have "
with pytest.raises(ValueError, match=msg):
ae.start_server(('', 11112))
def test_str_empty(self):
"""Test str output for default AE"""
ae = AE()
ae.__str__()
class TestAEPresentationSCU:
"""Tests for AE presentation contexts when running as an SCU"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_context(self):
"""Test that AE.associate doesn't modify the supplied contexts"""
# Test AE.requested_contexts
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.requested_contexts = VerificationPresentationContexts
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert ae.requested_contexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
'1.2.840.10008.1.1'
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
# Test associate(contexts=...)
ae.requested_contexts = []
assoc = ae.associate('localhost', 11112,
contexts=VerificationPresentationContexts)
assert assoc.is_established
assert VerificationPresentationContexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
'1.2.840.10008.1.1'
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_context_raises(self):
"""Test that AE.associate raises exception if no requested contexts"""
self.ae = ae = AE()
with pytest.raises(RuntimeError):
assoc = ae.associate('localhost', 11112)
class TestAEGoodTimeoutSetters:
def test_acse_timeout(self):
""" Check AE ACSE timeout change produces good value """
ae = AE()
assert ae.acse_timeout == 30
ae.acse_timeout = None
assert ae.acse_timeout is None
ae.acse_timeout = -100
assert ae.acse_timeout == 30
ae.acse_timeout = 'a'
assert ae.acse_timeout == 30
ae.acse_timeout = 0
assert ae.acse_timeout == 0
ae.acse_timeout = 30
assert ae.acse_timeout == 30
def test_dimse_timeout(self):
""" Check AE DIMSE timeout change produces good value """
ae = AE()
assert ae.dimse_timeout == 30
ae.dimse_timeout = None
assert ae.dimse_timeout is None
ae.dimse_timeout = -100
assert ae.dimse_timeout == 30
ae.dimse_timeout = 'a'
assert ae.dimse_timeout == 30
ae.dimse_timeout = 0
assert ae.dimse_timeout == 0
ae.dimse_timeout = 30
assert ae.dimse_timeout == 30
def test_network_timeout(self):
""" Check AE network timeout change produces good value """
ae = AE()
assert ae.network_timeout == 60
ae.network_timeout = None
assert ae.network_timeout is None
ae.network_timeout = -100
assert ae.network_timeout == 60
ae.network_timeout = 'a'
assert ae.network_timeout == 60
ae.network_timeout = 0
assert ae.network_timeout == 0
ae.network_timeout = 30
assert ae.network_timeout == 30
def test_connection_timeout(self):
""" Check AE connection timeout change produces good value """
ae = AE()
assert ae.connection_timeout is None
ae.connection_timeout = None
assert ae.connection_timeout is None
ae.connection_timeout = -100
assert ae.connection_timeout is None
ae.connection_timeout = 'a'
assert ae.connection_timeout is None
ae.connection_timeout = 0
assert ae.connection_timeout is None
ae.connection_timeout = 30
assert ae.connection_timeout == 30
def test_active_acse(self):
"""Test changing acse_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.acse_timeout == 30
ae.acse_timeout = 5
assert assoc.acse_timeout == 5
assoc.release()
scp.shutdown()
ae.shutdown()
def test_active_dimse(self):
"""Test changing dimse_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.dimse_timeout == 30
ae.dimse_timeout = 5
assert assoc.dimse_timeout == 5
assoc.release()
scp.shutdown()
def test_active_network(self):
"""Test changing network_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.network_timeout == 60
ae.network_timeout = 5
assert assoc.network_timeout == 5
assoc.release()
scp.shutdown()
def test_active_connection(self):
"""Test changing connection_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.connection_timeout is None
ae.connection_timeout = 5
assert assoc.connection_timeout == 5
assoc.release()
scp.shutdown()
class TestAEGoodAssociation:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_establish_release(self):
""" Check SCU Association with SCP """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_max_pdu(self):
""" Check Association has correct max PDUs on either end """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.maximum_pdu_size = 54321
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
scu_ae = AE()
scu_ae.acse_timeout = 5
scu_ae.dimse_timeout = 5
scu_ae.network_timeout = 5
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate('localhost', 11112, max_pdu=12345)
assert assoc.is_established
assert scp.active_associations[0].acceptor.maximum_length == (
54321
)
assert scp.active_associations[0].requestor.maximum_length == (
12345
)
assert assoc.requestor.maximum_length == 12345
assert assoc.acceptor.maximum_length == 54321
assoc.release()
time.sleep(0.1)
assert scp.active_associations == []
# Check 0 max pdu value - max PDU value maps to 0x10000 internally
assoc = scu_ae.associate('localhost', 11112, max_pdu=0)
assert assoc.requestor.maximum_length == 0
assert scp.active_associations[0].requestor.maximum_length == 0
assoc.release()
scp.shutdown()
def test_association_timeouts(self):
""" Check that the Association timeouts are being set correctly and
work """
acse_delay = None
dimse_delay = None
def handle_echo(event):
if dimse_delay:
time.sleep(dimse_delay)
return 0x0000
def handle_acse_recv(event):
if acse_delay:
time.sleep(acse_delay)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.5
ae.add_supported_context(Verification)
scp = ae.start_server(
('', 11112), block=False, evt_handlers=[(evt.EVT_ACSE_RECV, handle_acse_recv), (evt.EVT_C_ECHO, handle_echo)]
)
scu_ae = AE()
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 30
scu_ae.network_timeout = 30
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
# Hit the network timeout
time.sleep(1.0)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = None
ae.dimse_timeout = None
ae.network_timeout = None
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 0
dimse_delay = 1
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
time.sleep(1.5)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
# FIXME: If this is `0` we can process an ABORT primitive where
# we expect an ASSOCIATION primitive.
scu_ae.acse_timeout = 0.5
scu_ae.dimse_timeout = 30
acse_delay = 1
assoc = scu_ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_aborted
time.sleep(1.5)
assert len(scp.active_associations) == 0
scu_ae.acse_timeout = 30
# `0` is an invalid value
scu_ae.connection_timeout = 0.5
scu_ae.dimse_timeout = 30
# The host exists and is routable, but there is a middlebox ignoring
# the initial TCP SYN.
assoc = scu_ae.associate('example.com', 11112)
assert not assoc.is_established
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = 21
ae.dimse_timeout = 22
scu_ae.acse_timeout = 31
scu_ae.connection_timeout = None
scu_ae.dimse_timeout = 32
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
assert scp.active_associations[0].acse_timeout == 21
assert scp.active_associations[0].dimse_timeout == 22
assert assoc.acse_timeout == 31
assert assoc.dimse_timeout == 32
assoc.release()
scp.shutdown()
def test_connection_timeout(self, caplog):
# * ACSE timeout does not start until connection timeout completes
# * Logs indicate that we hit the timeout case
scu_ae = AE()
scu_ae.acse_timeout = 1
scu_ae.connection_timeout = 2
scu_ae.add_requested_context(Verification)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = scu_ae.associate('example.com', 11112)
assert not assoc.is_established
assert assoc.is_aborted
msgs = [
"TCP Initialisation Error: timed out",
"TCP Initialisation Error: [Errno -2] Name or service "
"not known"
]
assert len([m for m in msgs if m in caplog.text]) == 1
def test_select_timeout_okay(self):
"""Test that using start works OK with timeout."""
# Multiple release/association in a sort time causes an OSError as
# the port is still in use due to the use of select.select() with
# a timeout. Fixed by using socket.shutdown in stop()
for ii in range(3):
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
server = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
assoc = ae.associate('', 11112, ae_title=b'BADAE2')
assert assoc.acceptor.ae_title == 'BADAE2'
assert assoc.requestor.ae_title == 'PYNETDICOM'
server.shutdown()
class TestAEBadAssociation:
def test_raise(self):
"""Test bad associate call"""
ae = AE()
ae.add_requested_context(Verification)
with pytest.raises(TypeError):
ae.associate(1112, 11112)
with pytest.raises(TypeError):
ae.associate('localhost', '1.2.3.4')
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
ae = AE()
ae.add_requested_context(Verification
)
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title=' ')
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
aet = b"\xe2\x80\x8b\x35".decode('utf8')
ae.associate('localhost', 11112, ae_title=aet)
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='1234567890ABCDEFG')
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='')
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='TEST\\ME')
msg = r"'ae_title' must be str, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.associate('localhost', 11112, ae_title=12345)
class TestAEGoodMiscSetters:
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_ae_title_good(self):
""" Check AE title change produces good value """
ae = AE()
ae.ae_title = ' TEST '
assert ae.ae_title == ' TEST '
ae.ae_title = ' TEST'
assert ae.ae_title == ' TEST'
ae.ae_title = 'a TES'
assert ae.ae_title == 'a TES'
ae.ae_title = 'a TEST'
assert ae.ae_title == 'a TEST'
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae = AE(b'BADAE')
assert ae.ae_title == 'BADAE'
def test_implementation(self):
"""Check the implementation version name and class UID setters"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_class_uid = '1.2.3'
assert ae.implementation_class_uid == '1.2.3'
def test_max_assoc_good(self):
""" Check AE maximum association change produces good value """
ae = AE()
ae.maximum_associations = -10
assert ae.maximum_associations == 1
ae.maximum_associations = ['a']
assert ae.maximum_associations == 1
ae.maximum_associations = '10'
assert ae.maximum_associations == 1
ae.maximum_associations = 0
assert ae.maximum_associations == 1
ae.maximum_associations = 5
assert ae.maximum_associations == 5
def test_max_pdu_good(self):
""" Check AE maximum pdu size change produces good value """
ae = AE()
ae.maximum_pdu_size = -10
assert ae.maximum_pdu_size == 16382
ae.maximum_pdu_size = 0
assert ae.maximum_pdu_size == 0
ae.maximum_pdu_size = 5000
assert ae.maximum_pdu_size == 5000
def test_require_calling_aet(self):
"""Test AE.require_calling_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_calling_aet = ['MYAE']
assert ae.require_calling_aet == ['MYAE']
assoc = ae.associate('localhost', 11112)
assert assoc.is_rejected
ae.require_calling_aet = ['PYNETDICOM']
assert ae.require_calling_aet == ['PYNETDICOM']
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
msg = r"Invalid 'require_calling_aet' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.require_calling_aet = ['']
assert ae.require_calling_aet == ['PYNETDICOM']
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_aec_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
ae = AE()
msg = (
r"The use of a list of bytes with 'require_calling_aet' is "
r"deprecated, use a list of ASCII str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae.require_calling_aet = [b'BADAE', 'GOODAE']
assert ae.require_calling_aet == ['BADAE', 'GOODAE']
def test_require_called_aet(self):
"""Test AE.require_called_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_called_aet = True
assert ae.require_called_aet is True
assoc = ae.associate('localhost', 11112)
assert assoc.is_rejected
assoc = ae.associate('localhost', 11112, ae_title='PYNETDICOM')
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_req_calling_aet(self):
""" Check AE require calling aet change produces good value """
ae = AE()
ae.require_calling_aet = ['10', 'asdf']
assert ae.require_calling_aet == ['10', 'asdf']
def test_req_called_aet(self):
""" Check AE require called aet change produces good value """
ae = AE()
assert ae.require_called_aet is False
ae.require_called_aet = True
assert ae.require_called_aet is True
ae.require_called_aet = False
assert ae.require_called_aet is False
def test_string_output(self):
"""Test string output"""
ae = AE()
ae.add_requested_context(Verification)
ae.require_calling_aet = ['something']
ae.require_called_aet = True
assert 'Explicit VR' in ae.__str__()
assert 'Verification' in ae.__str__()
assert '0/10' in ae.__str__()
assert 'something' in ae.__str__()
assert 'Require called AE title: True' in ae.__str__()
ae.supported_contexts = StoragePresentationContexts
assert 'CT Image' in ae.__str__()
ae = AE()
ae.add_requested_context(Verification)
assert 'None' in ae.__str__()
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.is_established
assert 'Explicit VR' in ae.__str__()
assert 'Peer' in ae.__str__()
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_init_implementation_class(self):
"""Test the default implementation class uid"""
ae = AE()
assert ae.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
def test_init_implementation_version(self):
"""Test the default implementation version name"""
ae = AE()
assert ae.implementation_version_name == PYNETDICOM_IMPLEMENTATION_VERSION
def test_implementation_version(self):
"""Test implementation_version_name"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_version_name = " "
assert ae.implementation_version_name == " "
msg = "'implementation_version_name' must be str or None, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.implementation_version_name = 1234
msg = (
"Invalid 'implementation_version_name' value - must not be an "
"empty str"
)
with pytest.raises(ValueError, match=msg):
ae.implementation_version_name = ""
assert ae.implementation_version_name == " "
def test_implementation_class(self):
"""Test implementation_class_uid"""
ae = AE()
ae.implementation_class_uid = '12.3.4'
assert isinstance(ae.implementation_class_uid, UID)
assert ae.implementation_class_uid == UID('12.3.4')
msg = (
r"'implementation_class_uid' must be str, bytes or UID, not "
r"'NoneType'"
)
with pytest.raises(TypeError, match=msg):
ae.implementation_class_uid = None
assert ae.implementation_class_uid == UID('12.3.4')
msg = (
r"Invalid 'implementation_class_uid' value - must not be an "
r"empty str"
)
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = ''
msg = r"Invalid 'implementation_class_uid' value '1.2.04'"
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = '1.2.04'
assert ae.implementation_class_uid == UID('12.3.4')
class TestAEBadInitialisation:
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
AE(ae_title=' ')
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title=b"\xe2\x80\x8b\x35".decode('utf8'))
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title='1234567890ABCDEFG')
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
AE(ae_title='')
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title='TEST\\ME')
msg = r"'ae_title' must be str, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
AE(ae_title=None)
class TestAE_GoodExit:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_release_assoc(self):
""" Association releases OK """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/release cycles
for ii in range(5):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert not assoc.is_aborted
assert assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
def test_ae_aborts_assoc(self):
""" Association aborts OK """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/abort cycles
for ii in range(5):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.abort()
assert not assoc.is_established
assert assoc.is_aborted
assert not assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
class TestAESupportedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCP"""
def setup(self):
self.ae = AE()
def test_add_supported_context_str(self):
"""Tests for AE.add_supported_context using str."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_supported_context_sop_class(self):
"""Tests for AE.add_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_uid(self):
"""Tests for AE.add_supported_context using UID."""
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_supported_context('1.2', '1.3')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.add_supported_context('1.2', UID('1.4'))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3', '1.4']
def test_add_supported_context_duplicate_transfer(self):
"""Test adding duplicate transfer syntaxes."""
self.ae.add_supported_context('1.2', ['1.3', '1.3'])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.supported_contexts = []
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.1')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.supported_contexts = []
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.1', [DEFAULT_TRANSFER_SYNTAXES[0]])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate_multi(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_abs(self):
"""Test AE.add_supported_context with a private abstract syntax"""
self.ae.add_supported_context('1.2.3.4')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_tran(self):
"""Test AE.add_supported_context with a private transfer syntax"""
self.ae.add_supported_context('1.2.3.4',
['1.2.3', '1.2.840.10008.1.1'])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == ['1.2.3', '1.2.840.10008.1.1']
def test_add_supported_context_more_128(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(300):
self.ae.add_supported_context(str(ii))
contexts = self.ae.supported_contexts
assert len(contexts) == 300
def test_supported_contexts_setter(self):
"""Test the AE.supported_contexts property setter."""
context = build_context('1.2.840.10008.1.1')
self.ae.supported_contexts = [context]
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_supported_contexts_empty(self):
"""Test the setting supported_contexts to an empty list."""
context = build_context('1.2.840.10008.1.1')
self.ae.supported_contexts = [context]
assert len(self.ae.supported_contexts) == 1
self.ae.supported_contexts = []
assert len(self.ae.supported_contexts) == 0
def test_supported_contexts_setter_raises(self):
"""Test the AE.supported_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.supported_contexts = ['1.2.3']
def test_supported_contexts_sorted(self):
"""Test that the supported_contexts returns contexts in order."""
self.ae.add_supported_context('1.2.3.4')
self.ae.add_supported_context('1.2.3.5')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.supported_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5']
self.ae.add_supported_context('0.1.2.3')
self.ae.add_supported_context('2.1.2.3')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.supported_contexts
]
assert asyntaxes == ['0.1.2.3', '1.2.3.4', '1.2.3.5', '2.1.2.3']
def test_supported_contexts_more_128(self):
"""Test setting supported_contexts with more than 128 contexts."""
contexts = []
for ii in range(300):
contexts.append(build_context(str(ii)))
self.ae.supported_contexts = contexts
assert len(self.ae.supported_contexts) == 300
def test_remove_supported_context_str(self):
"""Tests for AE.remove_supported_context using str."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.4', ['1.2.3.4'])
assert len(self.ae.supported_contexts) == 2
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 1
for context in self.ae.supported_contexts:
assert context.abstract_syntax != '1.2.840.10008.1.1'
def test_remove_supported_context_uid(self):
"""Tests for AE.remove_supported_context using UID."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(UID('1.2.840.10008.1.1'))
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_sop_class(self):
"""Tests for AE.remove_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(RTImageStorage)
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_default(self):
"""Tests for AE.remove_supported_context with default transfers."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_single_transfer(self):
"""Tests for AE.remove_supported_context with single transfer."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1', DEFAULT_TRANSFER_SYNTAXES[0])
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_supported_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.supported_contexts) == 1
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.supported_contexts) == 2
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert self.ae.supported_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_remove_supported_context_all(self):
"""Tests for AE.remove_supported_context with all transfers."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
context = self.ae.supported_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_supported_context_all_plus(self):
"""Test remove_supported_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append('1.2.3')
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.supported_contexts) == 0
def test_scu_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_update(self):
"""Test updating add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_raises(self):
"""Test add_supported_context raises if scu_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context('1.2.3', scu_role='abc')
assert self.ae.supported_contexts == []
def test_scp_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_update(self):
"""Test updating add_supported_context with scp_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.add_supported_context('1.2.3', scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_raises(self):
"""Test add_supported_context raises if scp_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context('1.2.3', scp_role='abc')
assert self.ae.supported_contexts == []
class TestAERequestedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCU"""
def setup(self):
self.ae = AE()
def test_add_requested_context_str(self):
"""Tests for AE.add_requested_context using str."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_requested_context_sop_class(self):
"""Tests for AE.add_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_uid(self):
"""Tests for AE.add_requested_context using UID."""
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate(self):
"""Test AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert contexts[1].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate_multi(self):
"""Tests for AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == [DEFAULT_TRANSFER_SYNTAXES[0]]
assert contexts[1].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_requested_context('1.2', '1.3')
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.add_requested_context('1.2', UID('1.4'))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[1].abstract_syntax == '1.2'
assert contexts[1].transfer_syntax == ['1.4']
def test_add_requested_context_duplicate_transfer(self):
"""Test add_requested_context using duplicate transfer syntaxes"""
self.ae.add_requested_context('1.2', ['1.3', '1.3'])
contexts = self.ae.requested_contexts
assert contexts[0].transfer_syntax == ['1.3']
def test_add_requested_context_private_abs(self):
"""Test AE.add_requested_context with a private abstract syntax"""
self.ae.add_requested_context('1.2.3.4')
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_private_tran(self):
"""Test AE.add_requested_context with a private transfer syntax"""
self.ae.add_requested_context('1.2.3.4',
['1.2.3', '1.2.840.10008.1.1'])
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == ['1.2.3', '1.2.840.10008.1.1']
def test_add_requested_context_more_128_raises(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(128):
self.ae.add_requested_context(str(ii))
assert len(self.ae.requested_contexts) == 128
with pytest.raises(ValueError):
self.ae.add_requested_context('129')
assert len(self.ae.requested_contexts) == 128
def test_requested_contexts_setter(self):
"""Test the AE.requested_contexts property setter."""
context = build_context('1.2.840.10008.1.1')
self.ae.requested_contexts = [context]
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_requested_contexts_empty(self):
"""Test the setting requested_contexts to an empty list."""
context = build_context('1.2.840.10008.1.1')
self.ae.requested_contexts = [context]
assert len(self.ae.requested_contexts) == 1
self.ae.requested_contexts = []
assert len(self.ae.requested_contexts) == 0
def test_requested_contexts_setter_raises(self):
"""Test the AE.requested_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.requested_contexts = ['1.2.3']
def test_requested_contexts_not_sorted(self):
"""Test that requested_contexts returns contexts in supplied order."""
self.ae.add_requested_context('1.2.3.4')
self.ae.add_requested_context('1.2.3.5')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.requested_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5']
self.ae.add_requested_context('0.1.2.3')
self.ae.add_requested_context('2.1.2.3')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.requested_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5', '0.1.2.3', '2.1.2.3']
def test_requested_contexts_more_128(self):
"""Test setting requested_contexts with more than 128 contexts."""
contexts = []
for ii in range(128):
contexts.append(build_context(str(ii)))
self.ae.requested_contexts = contexts
assert len(self.ae.requested_contexts) == 128
contexts.append(build_context('129'))
with pytest.raises(ValueError):
self.ae.requested_contexts = contexts
def test_remove_requested_context_str(self):
"""Tests for AE.remove_requested_context using str."""
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1')
self.ae.add_requested_context('1.2.840.10008.1.1', ['1.2.3.4'])
self.ae.add_requested_context('1.2.840.10008.1.4', ['1.2.3.4'])
assert len(self.ae.requested_contexts) == 3
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 1
for context in self.ae.requested_contexts:
assert context.abstract_syntax != '1.2.840.10008.1.1'
def test_remove_requested_context_uid(self):
"""Tests for AE.remove_requested_context using UID."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(UID('1.2.840.10008.1.1'))
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_sop_class(self):
"""Tests for AE.remove_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(RTImageStorage)
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_default(self):
"""Tests for AE.remove_requested_context with default transfers."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_single(self):
"""Tests for AE.remove_requested_context with single transfer."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1', DEFAULT_TRANSFER_SYNTAXES[0])
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_requested_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1')
self.ae.add_requested_context(RTImageStorage)
self.ae.add_requested_context('1.2.840.10008.1.1', ['1.2.3.4'])
self.ae.remove_requested_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.requested_contexts) == 3
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert self.ae.requested_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert self.ae.requested_contexts[2].transfer_syntax == ['1.2.3.4']
assert self.ae.requested_contexts[2].abstract_syntax == '1.2.840.10008.1.1'
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 1
assert self.ae.requested_contexts[0].abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_requested_context_all(self):
"""Tests for AE.remove_requested_context with all transfers."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_requested_context_all_plus(self):
"""Test remove_requested_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append('1.2.3')
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
```
#### File: pynetdicom/tests/test_dul.py
```python
import logging
import socket
import threading
import time
import pytest
from pynetdicom import AE, debug_logger, evt
from pynetdicom.dul import DULServiceProvider
from pynetdicom.pdu import (
A_ASSOCIATE_RQ, A_ASSOCIATE_AC, A_ASSOCIATE_RJ,
A_RELEASE_RQ, A_RELEASE_RP, P_DATA_TF, A_ABORT_RQ
)
from pynetdicom.pdu_primitives import A_ASSOCIATE, A_RELEASE, A_ABORT, P_DATA
from .encoded_pdu_items import a_associate_ac, a_release_rq
from .parrot import start_server, ThreadedParrot, ParrotRequest
from .utils import sleep
#debug_logger()
class DummyACSE:
"""Dummy ACSE class"""
@staticmethod
def debug_receive_associate_rq(): pass
@staticmethod
def debug_receive_associate_ac(): pass
@staticmethod
def debug_receive_associate_rj(): pass
@staticmethod
def debug_receive_data_tf(): pass
@staticmethod
def debug_receive_release_rq(): pass
@staticmethod
def debug_receive_release_rp(): pass
@staticmethod
def debug_receive_abort(): pass
class DummyAssociation:
"""Dummy Association class"""
acse = DummyACSE()
class TestDUL:
"""Run tests on DUL service provider."""
def setup(self):
self.scp = None
def teardown(self):
if self.scp:
self.scp.commands = [('exit', None)]
self.scp.step
self.scp.commands = []
self.scp.shutdown()
for thread in threading.enumerate():
if isinstance(thread, ThreadedParrot):
thread.shutdown()
def test_primitive_to_event(self):
"""Test that parameter returns expected results"""
dul = DULServiceProvider(DummyAssociation())
p2e = dul._primitive_to_event
primitive = A_ASSOCIATE()
primitive.result = None
assert p2e(primitive) == 'Evt1'
primitive.result = 0
assert p2e(primitive) == 'Evt7'
primitive.result = 1
assert p2e(primitive) == 'Evt8'
primitive = A_RELEASE()
primitive.result = None
assert p2e(primitive) == 'Evt11'
primitive.result = 'affirmative'
assert p2e(primitive) == 'Evt14'
primitive = A_ABORT()
assert p2e(primitive) == 'Evt15'
primitive = P_DATA()
assert p2e(primitive) == 'Evt9'
with pytest.raises(ValueError):
p2e('TEST')
def test_recv_failure_aborts(self):
"""Test connection close during PDU recv causes abort."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('send', b"\x07\x00\x00\x00\x00\x04"),
('exit', None)
]
self.scp = scp = start_server(commands)
def handle(event):
scp.step()
scp.step()
hh = [(evt.EVT_REQUESTED, handle)]
ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.2
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112, evt_handlers=hh)
assert assoc.is_established
scp.step() # send short pdu
scp.step() # close connection
scp.shutdown()
# Need to wait for network timeout to expire
timeout = 0
while not assoc.is_aborted and timeout < 1:
time.sleep(0.05)
timeout += 0.05
assert assoc.is_aborted
def test_recv_short_aborts(self):
"""Test receiving short PDU causes abort."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('send', b"\x07\x00\x00\x00\x00\x04\x00\x00"), # Send short PDU
('exit', None)
]
self.scp = scp = start_server(commands)
def handle(event):
scp.step()
scp.step()
hh = [(evt.EVT_REQUESTED, handle)]
ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.2
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112, evt_handlers=hh)
assert assoc.is_established
scp.step() # send short pdu
# Need to wait for network timeout to expire
timeout = 0
while not assoc.is_aborted and timeout < 1:
time.sleep(0.05)
timeout += 0.05
assert assoc.is_aborted
scp.step()
scp.shutdown()
def test_recv_missing_data(self):
"""Test missing data when receiving."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('send', b"\x07\x00\x00\x00\x00\x02\x00"), # Send short PDU
('exit', None)
]
self.scp = scp = start_server(commands)
def handle(event):
scp.step()
scp.step()
hh = [(evt.EVT_REQUESTED, handle)]
def recv(nr_bytes):
return assoc.dul.socket.socket.recv(6)
ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.5
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112, evt_handlers=hh)
assert assoc.is_established
assoc.dul.socket.recv = recv
scp.step() # send short pdu
# Need to wait for network timeout to expire
timeout = 0
while not assoc.is_aborted and timeout < 1:
time.sleep(0.05)
timeout += 0.05
assert assoc.is_aborted
scp.step()
scp.shutdown()
def test_recv_bad_pdu_aborts(self):
"""Test receiving undecodable PDU causes abort."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('send', b"\x07\x00\x00\x00\x00\x02\x00\x00"),
('recv', None),
('exit', None)
]
self.scp = scp = start_server(commands)
def handle(event):
scp.step()
scp.step()
hh = [(evt.EVT_REQUESTED, handle)]
ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112, evt_handlers=hh)
assert assoc.is_established
scp.step() # send bad PDU
scp.step() # receive abort
assert assoc.is_aborted
scp.step()
scp.shutdown()
def test_exception_in_reactor(self):
"""Test that an exception being raised in the DUL reactor kills the
DUL and aborts the association.
"""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('send', a_release_rq), # Trigger the exception
('recv', None), # recv a-abort
('exit', None),
]
self.scp = scp = start_server(commands)
def handle(event):
scp.step()
scp.step()
hh = [(evt.EVT_REQUESTED, handle)]
ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112, evt_handlers=hh)
assert assoc.is_established
def patch_read_pdu():
raise NotImplementedError
assoc.dul._read_pdu_data = patch_read_pdu
scp.step()
scp.step()
assert assoc.is_aborted
scp.step()
scp.shutdown()
```
|
{
"source": "JesseBanning/pyna",
"score": 3
}
|
#### File: pyna/pyfy/readingjson02.py
```python
import json
def main():
with open("ciscoex.json", "r") as myfile:
myjson = json.load(myfile)
with open("ciscoex.text", "w") as myfile:
myfile.write(str(myjson["time"]) + " " + myjson["host"] + " " + myjson["type"])
main()
```
#### File: JesseBanning/pyna/readyaml03.py
```python
import yaml
def main():
"""runtime code"""
## Open a blob of YAML data
with open("myYAML.yml", "r") as yf:
## convert YAML into Python data structures (lists and dictionaries)
pyyammy = yaml.load(yf)
# display our new Python data
print(pyyammy)
if __name__ == "__main__":
main()
```
#### File: pyna/sewing/topcoat.py
```python
import threading
## py standard library
import time
def groundcontrol(x):
for i in range(x, -1, -1):
print(i)
time.sleep(1)
def orion():
print("I forgot my socks.")
time.sleep(1)
print("Can we stop this ride?")
time.sleep(2)
print("No? Alright. Ugh. I forgot to close the garage too.")
time.sleep(1)
print("To infinity, and beyond!")
print("Orion, you are primed for launch. Count down begins...")
countdown = 10
## Create a thread object (target is the function to call)
mythread = threading.Thread(target=groundcontrol, args=(countdown, ))
astrothread = threading.Thread(target=orion)
## begin the threads
mythread.start()
astrothread.start()
# Wait until the threads finish before moving on.
mythread.join()
astrothread.join()
## Ask the user to press any key to exit.
input("Press Enter to exit.")
exit()
```
|
{
"source": "JesseBausell/Hydrolight_MFile_reader_py",
"score": 3
}
|
#### File: JesseBausell/Hydrolight_MFile_reader_py/Hydrolight_MFile_reader.py
```python
import h5py
import numpy as np
from tkinter import filedialog as fd
import os
### 2. Define functions used for the script.
def createFolder(directory):
""" createFolder searches for a dirctory specified by the user. If there is none, it creates one"""
try:
if not os.path.exists(directory): # If the folder doesn't exist
os.makedirs(directory) # Create a folder
except OSError: # If there is an error other than a non-existant folder
print ('Error: Creating directory. ' + directory) # report error and shut down createFolder
def hdf5_fileWRITER(filE_NAME,HE53_dict):
"""Takes the python dictionary that was generated by ascii_MFILE_compiler and writes them
into a hdf5 (.h5) file. Data within hdf5 file is formatted the same as the aforementioned
python dictionary. User should note however that "bb/b ratio" will be changed to "bb fraction"
in all headers.
Inputs:
filE_NAME - name of future hdf5 file that will contain dictionary data
HE53_dict - dictionary formatted by ascii_MFILE_compiler
Outputs:
filE_NAME.h5 - hdf5 file containing python dictionary data"""
filE_NAME = filE_NAME[:-4]
with h5py.File(filE_NAME + '.h5','w') as hf: # Create an open hdf5 file for writing.
for k in HE53_dict:
# for-loop disects the m-file dictionary and writes data and dictionary elements
# into a hdf5 file.
k1 = k.replace('/','-') # replace the forward slash with a hyphen
hf.create_group(k1) # Create a key/element in hdf5 file based on nested python dictionary
for l in HE53_dict[k]:
# Within the python dictionary, take all elements (keys and data) and incorporate them
# into hdf5 file
hf[k1][l] = HE53_dict[k][l] # Create new nested element with data in hdf5 file
def ASCII_to_hdf5(fileNAME_mfile):
"""ASCII_to_hdf5 takes an ascii file produced by Hydrolight (m-file) and puts the data into
a python dictionary.
Input:
fileNAME_mfile - name of the ascii file
Output:
Hydro_OUTPUT - python dictionary containing data from ascii file"""
with open(fileNAME_mfile) as FID_mFILE: # Open a Hydrolight m-file that is prescribed by the user
Hydro_OUTPUT = {} # Create an empty dictionary to store all of the data extracted from the
# Before any data is collected and stored, process the first four lines of the m-file
for n in range(4):
# for-loop discards the first four lines of mfile text because they are worthless~
tLINE = FID_mFILE.readline() # Grab a line from the mfile, but don't save it
#print(n,tLINE)
if n == 1:
# if the script is examining the second header line of the entire m-file (sometimes called ascii file)
tLINE = tLINE.split() # Assign the second header line to a variable and split it into a list
wv_NUM = int(tLINE[0]) # Take the first list element (number of wavelengths). Set it equal to wv_NUM
keY = 0 # Set Key equal to 0. This variable will determine when to break the subsequent while loop
# every time the subequent while loop doesn't complete itself from start to finish,
while 1:
# while loop will cycle through the entire m-file until it reaches the end. It will allow
# all data to be examined, filtered, and stored in the Hydro_OUTPUT dictionary
#######################################################################################
if keY > 1:
# if script is unable to run twice
break # break the while loop!
### The code below places ascii data into a dictionary.
#######################################################################################
try: # attempt to run the following code for each while-loop iteration
### 1. For each section of the ascii file, prepare the first three header lines
temP_DICT = {} # Create an empty dictionary with each new while loop iteration
temP_DICT['linE'] = FID_mFILE.readline()[:-1].split('"') # Grab one line of the m-file
temP_DICT['linE2'] = FID_mFILE.readline()[:-1].split('"') # take the second line of the m-file and (again) split it by "
temP_DICT['linE3'] = FID_mFILE.readline()[:-1].split('"') # take the third line of the m-file and (again) split it by "
#print(temP_DICT['linE3'])
for t in temP_DICT:
# for-loop cycles through the temporary dictionary (temP_DICT), which contains
# ascii data headers, and eliminates empty list elements.
for i in np.flip(np.arange(len(temP_DICT[t])),0):
# nested for-loop removes empty elements from each dictionary key (list)
if not temP_DICT[t][i].strip():
# if the list element is empty...
temP_DICT[t].pop(i) # excise the element from the list completely
if temP_DICT['linE'] == []:
# If the first line of the ascii header is empty
temP_DICT['linE'] = temP_DICT['linE2'] # make the first ascii header the second
temP_DICT['linE2'] = [] # make the second ascii header the first
################################################################################################################################
### 2. Now that the first three header lines have been fixed, try and determine the
### dimensions of the data below the three-line header
try:
# If the last element of the line1 list contains number of rows and columns, create variables
roW,coL = np.asarray(temP_DICT['linE'][-1].split(),dtype=int) # take the last list element (matrix dimensions) and split it
temP_DICT['linE'].pop(-1) # remove last element from the line 1 list
except:
# If there are no row and column values listed in line 1 list
coL = np.nan # set column number equal to nan
Hydro_OUTPUT[temP_DICT['linE'][0]] = {} # Create a dictionary within a dictionary
Hydro_OUTPUT[temP_DICT['linE'][0]]['Meta'] = temP_DICT['linE'][-1] # Include a metadata description of each nested dictionary
################################################################################################################################
### 3. m-file sections have several different formats. Some are matrices, others are headered columns
### It is therefore important to distinguish between each type of m-file section and preceed accordingly
if coL == len(temP_DICT['linE3']):
# If the number of column headers, as indicated in the first line of the header, is the same as the
# number of column headers listed in the third line of the header. These AOPs are typically modeled
# according to wavelength, but NOT according to depth.
for r in range(roW):
# for-loop sorts through data row-by-row and sorts them into the appropriate dictionary lists.
# the for-loop will run for as many iterations as there are subsequent rows of data.
linE4 = FID_mFILE.readline()[:-1] # Grab a new line of data and remove end-of-line character
if "in air" in linE4:
# If the words, "in air" appear in the row of data...
INDr = linE4.rfind('"') # Find index the end of the "in air" statement
linE4 = '-1 ' + linE4[INDr+1:] # replace "in air" with "-1" within the string
linE4 = np.asarray(linE4.split(),dtype=float) # linE4 string and make it into a numpy array
for c,k3 in enumerate(temP_DICT['linE3']):
# nested for-loop distributes linE4 into appropriate dictionary elements via indexing
try:
# if nested Hydro_OUTPUT dictionary key (and element) already exist
Hydro_OUTPUT[temP_DICT['linE'][0]][k3] = np.append(Hydro_OUTPUT[temP_DICT['linE'][0]][k3],linE4[c]) #append
except:
# if nested Hydro_OUTPUT dictionary key (and element) do not yet exist
Hydro_OUTPUT[temP_DICT['linE'][0]][k3] = np.array(linE4[c]) #create a new one
else:
# If the number of columns headers, as indicated in the first line of the header, is NOT the same as
# the number of column headers listed in the third line of the header. These AOPs are typically structured
# as a 2D matrix, with one matrix dimension representing depth bins and the other dimension representing
# wavelengths
### Set up the appropriate dictionary keys/elements using the ascii header
temP_DICT['linE3'].pop(0) # remove the first element of the third header line (now a list)
try:
# Attempt to convert the rest of the third header line (now a list) into a numpy array
Hydro_OUTPUT[temP_DICT['linE'][0]]['depth'] = np.asarray(temP_DICT['linE3'][0].split(),dtype=float)
except:
# If the list to numpy array conversion (see above) was unsuccessful, it means that the first list element is a string
temP_DICT['linE3'][0] = -1 # replace the first list element with "-1"
deptH = [temP_DICT['linE3'][0]] + temP_DICT['linE3'][1].split() # Re-create a list with the third header
Hydro_OUTPUT[temP_DICT['linE'][0]]['depth'] = np.asarray(deptH,dtype=float) # Convert list of depths into numpy array
### Set up the row and column numbers, as well as a nan-matrix in which to place data
coL = len(Hydro_OUTPUT[temP_DICT['linE'][0]]['depth']) + 1 # calculate the number of columns based on depth bins
roW = wv_NUM # re-assign the number of rows based on the number of wavelengths in the m-file
TEMP_MATRIX = np.ones([roW,coL])*np.nan # Create a nan matrix in which to place AOP and wavelenth data
### Fill TEMP_MATRIX with data from m-file
for r in range(roW):
# for-loop goes through the m-file row by row and fills the nan-matrix (TEMP_MATRIX)
linE4 = np.asarray(FID_mFILE.readline()[:-1].split(),dtype=float) # grab line from m-file. Convert to numpy array
TEMP_MATRIX[r,:] = linE4 # Fill row of TEMP_MATRIX
Hydro_OUTPUT[temP_DICT['linE'][0]]['data'] = TEMP_MATRIX[:,1:] # Assign all columns (except for the first) of TEMP_MATRIX as "data"
Hydro_OUTPUT[temP_DICT['linE'][0]]['wvl'] = TEMP_MATRIX[:,0] # Assign first column of TEMP_MATRIX as "wvl"
keY = 0 # upon successful completion of "try" script, reset keY to zero
except:
keY += 1 # if "try" script fails to run for ANY REASON, increase keY by one
pass # skip to the next ascii header section
return(Hydro_OUTPUT) # returns full ascii file in a dictionary
### 3. Create a script that converts Hydrolight m-files one at a time
### 3a. Create new folder in which to place newly-created HDF5 files
template_dir = fd.askdirectory() # Select directory containing m-files
if '/' in template_dir:
# If files are on a mac
dasH = '/' # Folder separator for directory pathway
else:
# If files are on a pc
dasH = '\\' # Folder separator for directory pathway
dasH_IND = template_dir.rfind(dasH) # Find the last nested directory
repository_dir = template_dir[:dasH_IND]+dasH+'HDF5'+dasH # Create a new pathway for HDF5 files
createFolder(repository_dir) # Create a new folder adjacent to m-file folder
matLISt = os.listdir(template_dir) # list all files in m-file directory
### 3b. Covert m-files into HDF5
for i,mFILE in enumerate(matLISt):
# This for-loop cyles through m-files in user-selected folder. Data in each m-file (ascii)
# is re-formatted into a hdf5 file (.h5) which is placed into a folder named "hdf5"
# adjacent to the user-selected m-file folder.
try: # If mFILE is a Hydroligth m-file
HE53_dict = ASCII_to_hdf5(template_dir+dasH+mFILE) # Puts m-file data into dictionary
hdf5_fileWRITER(repository_dir+mFILE,HE53_dict) # Converts dictionary into hdf5 file
except: # If mFILE is NOT a Hydrolight m-file
pass # Ignore it!
```
|
{
"source": "JesseBerdowski/seq2seq",
"score": 2
}
|
#### File: JesseBerdowski/seq2seq/lib_translator_model.py
```python
import tensorflow as tf
import numpy as np
from lib_data import create_inference_array, rev_dec_sorted_dict
from lib_layers import Encoder, Decoder
from lib_util import load_pickle
from lib_evaluate import prediction
_defaults = dict(
latent_dim=256,
embedding_dim=256,
batch_size=1,
epochs=1,
max_seq_len=None,
optimizer='adam',
learning_rate=0.001,
loss='categorical_crossentropy',
metrics=['accuracy'],
data_path='C:\\Users\\<NAME>\\Downloads\\fra.txt',
save_dir='static\\',
num_samples=1,
training=False
)
class Translator:
def __init__(self):
enc_weights, self.dec_weights = load_pickle('C:\\Users\\<NAME>\\weights.pickle', 'weights')
self.enc_dict, self.dec_dict, \
self.enc_max_seq_len, self.dec_max_seq_len = load_pickle('C:\\Users\\<NAME>\\dicts.pickle', 'dicts')
self.rev_dec_dict = rev_dec_sorted_dict(self.dec_dict)
self.encoder = Encoder(feats=len(self.enc_dict), embed_dim=256, units=32)
self.decoder = Decoder(feats=len(self.dec_dict), units=32)
_, __ = self.encoder(tf.keras.layers.Input(shape=(None,)))
self.encoder.set_weights(enc_weights)
self.decoder(tf.keras.layers.Input(shape=(None,)), _, __, training=False)
self.decoder.set_weights(self.dec_weights)
def __call__(self, lst_inputs):
assert isinstance(lst_inputs, list)
self.lst_inputs = lst_inputs
def translate(self):
predicted_sentences = []
for sentence in self.lst_inputs:
in_arr = create_inference_array([sentence], self.enc_dict, False)
enc_out, state_h = self.encoder(in_arr)
start = np.zeros(shape=(1, self.dec_max_seq_len))
start[0, self.dec_dict['<start>']] = 1
predict_sentence = ''
for _ in range(self.dec_max_seq_len):
out, state_h = self.decoder(start, enc_out, state_h, False)
x = prediction(out, self.rev_dec_dict)
if x is '<stop>' or _ == self.dec_max_seq_len - 1:
predicted_sentences.append(predict_sentence.replace('<start>', '').replace('<stop>', ''))
predict_sentence += x
return predicted_sentences
```
#### File: JesseBerdowski/seq2seq/lib_util.py
```python
import pickle
import tensorflow.keras.backend as K
import json
import os
def load_from_bat(lst_argv):
assert len(lst_argv) == 3
hparams = json.loads(lst_argv[1])
num_samples = lst_argv[2]
return hparams, int(num_samples)
def save_dicts(enc_dic, dec_dic, enc_max_seq_len, dec_max_seq_len):
path = get_dir('static/saved/dicts.pickle')
with open(path, 'wb') as f:
dikt = dict(enc_dict=enc_dic,
dec_dict=dec_dic,
enc_max_seq_len=enc_max_seq_len,
dec_max_seq_len=dec_max_seq_len)
pickle.dump(dikt, f)
def save_model_weights(enc_w, dec_w):
path = get_dir('static/saved/weights.pickle')
with open(path, 'wb') as f:
lst = []
for item in enc_w:
lst.append(K.eval(item))
lst_ = []
for item in dec_w:
lst_.append(K.eval(item))
dikt = dict(enc_weights=lst,
dec_weights=lst_)
pickle.dump(dikt, f)
def load_pickle(path, sort):
with open(path, 'rb') as f:
dikt = pickle.load(f)
if sort is 'dicts':
return dikt['enc_dict'], dikt['dec_dict'], dikt['enc_max_seq_len'], dikt['dec_max_seq_len'],
elif sort is 'weights':
return dikt['enc_weights'], dikt['dec_weights']
def get_dir(path):
try:
return os.path.join(os.path.dirname(__file__), path)
except:
return 'It looks like you moved the dirs and files, please dont change the git\'s file structure'
```
|
{
"source": "jessebluestein/steel_beam_analysis",
"score": 2
}
|
#### File: steel_beam_analysis/steel_beam_analysis/beam.py
```python
import numpy as np
import itertools
from sortedcontainers import SortedSet, SortedList
import warnings
import sys
import operator
import jinja2
import os
from jinja2 import Template
from pdflatex import PDFLaTeX
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
import json
# -------------------------- CUSTOM IMPORTED PACKAGES --------------------------
from steel_beam_analysis import units, vGetLoc, vGetBaseUnits, vGetMag
from steel_beam_analysis.load import AreaLoad, LineLoad, LoadCombo, PointLoad
from steel_beam_analysis.element import Element
from steel_beam_analysis.node import Node
from steel_beam_analysis.span import Span
from steel_beam_analysis.unbracedSpan import UnbracedSpan
import steel_beam_analysis.stringFixer as sf
# config jinja templating environment
latex_jinja_env = jinja2.Environment(block_start_string =
'\BLOCK{', block_end_string = '}', variable_start_string = '\VAR{', variable_end_string = '}',
comment_start_string = '\#{', comment_end_string = '}', line_statement_prefix = '%%',
line_comment_prefix = '%#', trim_blocks = True, autoescape = False,
loader = jinja2.FileSystemLoader(os.path.abspath('.')))
latex_jinja_env.globals['len'] = len
latex_jinja_env.globals['str'] = str
# config matplotlib settings
matplotlib.use('pgf')
matplotlib.rcParams.update({'pgf.texsystem': 'pdflatex',
'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False})
class Beam:
"""Model a beam composed of nodes, elements, material props and loads."""
def __init__(self, nodes, **kwargs):
# a
self.A = None
self.ASCE7version = kwargs.get('ASCE7version', 16)
self.avgNodeSpacing = None
# b
self.bendingAxis = kwargs.get('bendingAxis', 'strong')
self.bendingCheck = None
# c
self.considerSelfWeight = kwargs.get('considerSelfWeight', True)
# d
self.deflChecks = []
self.deflCombos = set()
self.deflLimGlass = kwargs.get('deflLimGlass', 0.25 * units.inch)
self.deflRatios = kwargs.get('deflRatios', {'TL': 240, 'LL': 360})
self.depthClass = kwargs.get('depthClass', 10)
# e
self.elements = SortedSet([])
self.eleSpacing = kwargs.get('eleSpacing', 1 * units.inch)
# f
self.F = {}
self.F0 = {}
self.F0Body = {}
self.F0Node = {}
self.FF = {}
self.freeDOFs = []
# g
self.glassEverywhere = kwargs.get('glassEverywhere', False)
# i
self.I = None
# k
self.K = None
self.KFF = None
# l
self.Lcombos = set()
self.len = None
self.loadTypes = SortedSet([])
self.loadTypesSub = None
# m
self.M = {}
self.maxMomentNode = None
self.maxShearNode = None
self.maxDeflNodes = {}
# n
self._nodes = SortedSet([])
# o
self.omega0 = kwargs.get('omega0', 2.5)
self.outUnit = kwargs.get('outUnit', {'M': 'kft', 'V': 'kip', 'defl': 'inch', 'loc': 'ft'})
self.outputPDF = kwargs.get('outputPDF', False)
self.overallCheck = None
# p
self.patternLoads = kwargs.get('patternLoads', ['L', 'Lr'])
self.pointLoads = []
self.projectInfo_memberName = kwargs.get('name', 'demo')
self.projectInfo_project = kwargs.get('project', '123 Maple Street, San Francisco CA')
self.projectInfo_level = kwargs.get('level', 2)
self.projectInfo_firm = kwargs.get('firm', 'ABC Company')
self.projectInfo_engineer = kwargs.get('engineer', 'Jesse')
self.projectInfo_checker = kwargs.get('checker', 'Joey')
# r
self._rawDistLoads = kwargs.get('rawDistLoads', [])
self.realNodes = SortedSet([])
self.restrainDOFs = []
self.rho = kwargs.get('rho', 1.3)
# s
self.S = None
self.SDS = kwargs.get('SDS', 1.0)
self.seismicFactors = self.seismicFactors = {'omega0': self.omega0, 'rho': self.rho}
self.seismicFactorUse = kwargs.get('seismicFactorUse', 'omega0')
self.seismicLfactor = kwargs.get('seismicLfactor', 0.5)
self.shape = None
self.shearCheck = None
self.spans = SortedList()
self.strengthCombos = set()
self.supports = None
# u
self.U = {}
self.UF = {}
self.unbracedSpanBoundaryPts = SortedSet([])
self.unbracedSpans = []
# v
self.V = {}
# w
self.weight = None
# convert runtime warnings to errors to get traceback and address them
warnings.simplefilter('error', RuntimeWarning)
@property
def rawDistLoads(self):
return self._rawDistLoads
@rawDistLoads.setter
def rawDistLoads(self, vals):
for distLoad in vals:
if not isinstance(distLoad, LineLoad):
raise TypeError
self._rawDistLoads = vals
def plotEnvelope(self, attr, units, combos, title, close, maxNodes, maxVals, labelNames, maxCombos):
"""Plot envelope of values at a node."""
if close:
locs = [0]
else:
locs = []
for node in self.nodes:
locs.append(node.loc.to('ft').magnitude)
if close:
locs.append(self.nodes[-1].loc.to('ft').magnitude)
fig = plt.figure(figsize = (8, 3))
ax = fig.add_subplot(1, 1, 1)
for lc in combos:
if close:
vals = [0]
else:
vals = []
for node in self.nodes:
plotVal = getattr(node, attr)
vals.append(plotVal[str(lc)].to(units).magnitude)
if close:
vals.append(0)
ax.plot(locs, vals, linewidth = 1.5)
for idx, maxNode in enumerate(maxNodes):
xCoord = maxNode.loc.to('ft').magnitude
yCoord = maxVals[idx].to(units).magnitude
maxVal = round(maxVals[idx].to(units).magnitude, 1)
textCoord = 100 if xCoord <= self.len.to('ft').magnitude / 2 else -100 # vary label loc
textAlign = 'right' if xCoord <= self.len.to('ft').magnitude / 2 else 'left' # vary label alignment
ax.annotate(f'${labelNames[idx]} =$ {round(maxVals[idx].to(units), 1)}', xy=(xCoord, yCoord), xycoords='data', xytext=(textCoord, 0), textcoords='offset points', size=10, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"), arrowprops=dict(arrowstyle='-'), ha=textAlign, va='center')
plt.text(0, 0.15, f'Max combo: {maxCombos[idx]}', ha='left', va='top', transform = ax.transAxes, size = 10, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"))
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')
plt.grid(b=True, which='major', color='k', linestyle='dotted')
plt.savefig(f'{self.outputPath}/{self.projectInfo_memberName}_{title}.pgf')
def plotLoadDiagram(self):
"""Plot a diagram of the applied loads."""
fig, axs = plt.subplots(2, gridspec_kw={'hspace': -0.09, 'height_ratios': [3, 1]}, figsize = (8, 3), sharex = True)
prop_cycle = plt.rcParams['axes.prop_cycle']
cycle_colors = prop_cycle.by_key()['color']
# create dictionaries from dist loads to store offset magnitudes
distLoads = []
for load in sorted(self.rawDistLoads, reverse = True):
distLoads.append({'load': load, 'offset': 0})
# assign vertical offsets to overlapping loads
plotLoads = []
for load in distLoads:
for plotLoad in plotLoads[::-1]:
if plotLoad['load'].iLoc <= load['load'].iLoc <= plotLoad['load'].jLoc:
load['offset'] += max(plotLoad['load'].iLineLoad, plotLoad['load'].jLineLoad).to('plf').magnitude + plotLoad['offset']
break
plotLoads.append(load)
# plot distributed loads
for idx, load in enumerate(plotLoads):
iMag = load['load'].iLineLoad.to('plf').magnitude
jMag = load['load'].jLineLoad.to('plf').magnitude
iLoc = load['load'].iLoc.to('ft').magnitude
jLoc = load['load'].jLoc.to('ft').magnitude
points = [[iLoc, load['offset']], [iLoc, iMag + load['offset']], [jLoc, jMag + load['offset']], [jLoc, load['offset']]]
polygon = plt.Polygon(points, fill = True, alpha = 0.4, color = cycle_colors[idx])
axs[0].add_patch(polygon)
axs[0].text((iLoc + jLoc) / 2, (jMag) / 2 + load['offset'], f'w\\textsubscript{{{idx+1}}}', bbox = dict(boxstyle = "round4,pad=.5", fc = "0.8"), ha = 'center', va = 'center')
# plot beam flanges
d = self.d.to('in').magnitude
tf = d / 12 # flange width set to constant for aesthetic reasons
locs = [0, self.len.to('ft').magnitude]
topTopFlange = [0, 0]
botTopFlange = [0 - tf, 0 - tf]
topBotFlange = [-d, -d]
botBotFlange = [-d + tf, -d + tf]
flanges = [topTopFlange, botTopFlange, topBotFlange, botBotFlange]
for flange in flanges:
axs[1].plot(locs, flange, color = 'black', linewidth = 1)
axs[1].text(self.len.to('ft').magnitude / 2, -self.d.to('in').magnitude / 2, self.shape, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"), ha='center', va='center')
# plot vertical lines at ends of beam
leftEndX = [0, 0]
leftEndY = [-d, 0]
rightEndX = [self.len.to('ft').magnitude, self.len.to('ft').magnitude]
rightEndY = leftEndY
axs[1].plot(leftEndX, leftEndY, color = 'black', linewidth = 1)
axs[1].plot(rightEndX, rightEndY, color = 'black', linewidth = 1)
# plot gravity support locations
pins = [support for support in self.supports if support.condition == 'pin']
fixes = [support for support in self.supports if support.condition == 'fix']
pinX = [pin.loc.to('ft').magnitude for pin in pins]
pinY = [-d - 3 for pin in pins]
fixX = [fix.loc.to('ft').magnitude for fix in fixes]
fixY = [-d - 3 for fix in fixes]
axs[1].scatter(pinX, pinY, marker = '^', s = 200, c = 'red')
axs[1].scatter(fixX, fixY, marker = 's', s = 200, c = 'blue')
# plot dimensions between supports
spanPts = [span.iNode.loc.to('ft').magnitude for span in self.spans]
spanPts.append(self.len.to('ft').magnitude)
for idx, support in enumerate(spanPts):
if idx != len(spanPts) - 1:
dist = spanPts[idx + 1] - support
# plot dimension line (no text)
axs[1].annotate(f'', xy=(support, -d-5), xycoords='data', xytext=(support + dist, -d-5), textcoords='data', arrowprops=dict(arrowstyle='<->, head_width=0.5', color = '#33ADFF'), ha='center')
# plot text in center of dimension line
axs[1].text(support + dist/2, -d-5, f'Span {idx} = {dist} ft', bbox=dict(boxstyle="round4, pad=0.5", fc="0.8"), size = 10, ha='center', va='center')
# plot applied point loads
pointLoadLocs = []
pointLoadNodes = [node for node in self.nodes if node.pointLoads]
for node in pointLoadNodes:
for pointLoad in node.pointLoads:
pointLoadLocs.append(node.loc.to('ft').magnitude)
pointLoadLocs = set(pointLoadLocs)
for loc in pointLoadLocs:
axs[0].annotate(f'$P_x$', xy=(loc, 0), xycoords='data', xytext=(0, 100), textcoords='offset points', size=12, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"), arrowprops=dict(arrowstyle='->, head_width=0.5'), ha='center')
# plot settings and save
fig.patch.set_visible(False)
axs[0].axis('off')
axs[1].axis('off')
axs[1].autoscale()
axs[0].autoscale()
plt.savefig(f'{self.outputPath}/{self.projectInfo_memberName}_loadDiagram.pgf', dpi=90, pad_inches=0.5)
def runAnalysis(self):
"""Run analysis on the beam system."""
# ---------------------- FORM ELEMENT LOAD ARRAYS ----------------------
for element in self.elements:
element.formf0e()
# ------------------------ CALC DEMAND USING FEA -----------------------
# demands from applied distributed loads on elements
for type in self.loadTypesSub:
self.F0Body[type] = np.zeros((len(self.nodes) * 2, 1))
for idx, element in enumerate(self.elements):
f0e = element.f0e.get(type, np.array(([0],[0],[0],[0])))
self.F0Body[type][2*idx: 2*idx+4] = np.add(self.F0Body[type][2*idx: 2*idx+4], f0e)
# form F0Node
for type in self.loadTypesSub:
self.F0Node[type] = np.zeros((len(self.nodes) * 2, 1))
for idx, node in enumerate(self.nodes):
self.F0Node[type][2*idx, 0] = node.rawVapply.get(type,
0 * units.lbf).to(units.lbf).magnitude
self.F0Node[type][2*idx+1, 0] = node.rawMapply.get(type,
0 * units.lbin).to(units.lbin).magnitude
# form point loads list used for plotting and output table
idx = 1 # starts at 1 because used in output
for node in self.nodes:
for pointLoad in node.pointLoads:
loc = sf.fixUnits(node.loc, type = 'text')
shear = sf.fixUnits(-pointLoad.shear, type = 'text')
self.pointLoads.append({'id': f'P\\textsubscript{{{idx}}}', 'loc': loc, 'shear': shear, 'type': pointLoad.type, 'desc': pointLoad.desc})
idx +=1
# combination of demands from elements and nodes
for type in self.loadTypesSub:
self.F0[type] = np.add(self.F0Body[type], self.F0Node[type])
# global applied forces at free DOFs
for type in self.loadTypesSub:
self.FF[type] = self.F0[type][np.ix_(self.freeDOFs)]
# global stiffness array
self.K = np.zeros((len(self.nodes) * 2, len(self.nodes) * 2))
for idx, element in enumerate(self.elements):
self.K[2*idx: 2*idx+4, 2*idx: 2*idx+4] = np.add(self.K[2*idx: 2*idx+4, 2*idx: 2*idx+4], element.kE)
# global stiffness at free DOFs
self.KFF = self.K[np.ix_(self.freeDOFs, self.freeDOFs)]
# displacements at free DOFs
for type in self.loadTypesSub:
self.UF[type] = np.matmul(np.linalg.inv(self.KFF), self.FF[type])
# pass displacements at free DOFs to nodes
for type in self.loadTypesSub:
for idx, dof in enumerate(self.freeDOFs):
if dof % 2 == 0:
self.nodes[int(dof/2)].rawDefls[type] = self.UF[type][idx, 0]
else:
self.nodes[int((dof-1)/2)].rawRotations[type] = self.UF[type][idx, 0]
# assemble displacement array (translational + rotational) for global system
for type in self.loadTypesSub:
self.U[type] = np.zeros((len(self.nodes) * 2, 1))
for idx, node in enumerate(self.nodes):
self.U[type][2*idx, 0] = node.rawDefls.get(type, 0)
self.U[type][2*idx+1, 0] = node.rawRotations.get(type, 0)
# set units for raw displacements at nodes
for node in self.nodes:
node.setRawDispUnits()
# forces (moments + shears) in global system
for type in self.loadTypesSub:
F = np.zeros((len(self.nodes) * 2, 1))
fEvals = []
for idx, elem in enumerate(self.elements):
f0e = elem.f0e.get(type, np.array(([0],[0],[0],[0])))
fEvals.append(np.add(np.matmul(elem.kE,
self.U[type][2*idx:2*idx+4]), -f0e))
for idx, elem in enumerate(self.elements):
F[2*idx:2*idx+2] = fEvals[idx][0:2]
F[-2:] = -fEvals[len(self.elements) - 1][2:4]
self.F[type] = F
# extract bending and shear demands from global array, set nodal values
for type in self.loadTypesSub:
self.M[type] = - self.F[type][1::2] # sign flipped for convention
self.V[type] = self.F[type][0::2]
for idx, node in enumerate(self.nodes):
node.rawM[type] = self.M[type][idx, 0] * units.lbin
node.rawV[type] = self.V[type][idx, 0] * units.lbf
# calc factored values and set values at each node
# IDEA: any "shorthand" way to do all these nested for loops?
# https://stackoverflow.com/questions/5462047/pythonic-shortcut-for-doubly-nested-for-loops
for node in self.nodes:
for lc in self.strengthCombos:
node.ultV[str(lc)] = sum([node.rawV.get(load['type'], 0) * load['factor'] for load in lc.loads])
node.ultVapply[str(lc)] = sum([node.rawVapply.get(load['type'], 0) * load['factor'] for load in lc.loads])
node.ultM[str(lc)] = sum([node.rawM.get(load['type'], 0) * load['factor'] for load in lc.loads])
node.ultMapply[str(lc)] = sum([node.rawMapply.get(load['type'], 0) * load['factor'] for load in lc.loads])
for lc in self.deflCombos:
node.ultDefls[str(lc)] = sum([node.rawDefls.get(load['type'], 0 * units.inch) * load['factor'] for load in lc.loads])
for lc in self.Lcombos:
node.ultDeflsL[str(lc)] = sum([node.rawDefls.get(load['type'], 0 * units.inch) * load['factor'] for load in lc.loads])
node.deflMaxAbs['combo'] = max(node.ultDefls, key = lambda y: abs(node.ultDefls[y]))
node.deflMaxAbs['val'] = node.ultDefls[node.deflMaxAbs['combo']]
node.MuMaxAbs['combo'] = max(node.ultM, key = lambda y: abs(node.ultM[y]))
node.MuMaxAbs['val'] = node.ultM[node.MuMaxAbs['combo']]
node.VuMaxAbs['combo'] = max(node.ultV, key = lambda y: abs(node.ultV[y]))
node.VuMaxAbs['val'] = node.ultV[node.VuMaxAbs['combo']]
if 'L' in self.loadTypes:
node.deflMaxAbsL['combo'] = max(node.ultDeflsL, key = lambda y: abs(node.ultDeflsL[y]))
node.deflMaxAbsL['val'] = node.ultDeflsL[node.deflMaxAbsL['combo']]
# get max demand nodes
self.maxMomentNode = max(self.nodes, key = lambda node: abs(node.MuMaxAbs['val']))
self.maxShearNode = max(self.nodes, key = lambda node: abs(node.VuMaxAbs['val']))
self.maxDeflNodes['TL'] = max(self.nodes, key = lambda node: abs(node.deflMaxAbs['val']))
if 'L' in self.loadTypes:
self.maxDeflNodes['LL'] = max(self.nodes, key = lambda node: abs(node.deflMaxAbsL['val']))
# set max total load & live load deflection nodes in each span
for span in self.spans:
span.setMaxTLdeflNode()
if 'L' in self.loadTypes:
span.setMaxLLdeflNode()
# -------------------------- SET UNBRACED SPANS ------------------------
# always include both ends of the beam (even if cantilevered ends)
self.unbracedSpanBoundaryPts.add(self.nodes[0])
self.unbracedSpanBoundaryPts.add(self.nodes[-1])
# add in top/bottom brace points based on sign of moment demand at node
for node in self.nodes:
if (node.addUnbracedBoundaryPt()):
self.unbracedSpanBoundaryPts.add(node)
for idx, node in enumerate(self.unbracedSpanBoundaryPts):
if idx != 0:
self.unbracedSpans.append(UnbracedSpan(self, self.unbracedSpanBoundaryPts[idx-1], node))
spanIter = 0
for node in self.nodes:
if node.loc == self.unbracedSpans[spanIter].jNode.loc:
self.unbracedSpans[spanIter].nodes.add(node)
node.assignUnbracedSpan(self.unbracedSpans[spanIter])
spanIter += 1
if spanIter < len(self.unbracedSpans):
self.unbracedSpans[spanIter].nodes.add(node)
node.assignUnbracedSpan(self.unbracedSpans[spanIter])
# set max moment in unbraced spans
for span in self.unbracedSpans:
span.setMaxMomentNode()
# ------------- CALC CAPACITY, DCRs AND CHECK BENDING/SHEAR ------------
self.calcCapacity()
self.calcDCRs()
self.checkBending()
self.checkShear()
# --------------------------- CALC REACTIONS ---------------------------
for idx, node in enumerate(self.nodes):
if node in self.supports:
if node.loc == 0 * units.ft:
node.calcReaction(type = 'leftEnd')
elif node.loc == self.len:
node.calcReaction(type = 'rightEnd')
else:
node.calcReaction(leftNode = self.nodes[idx-1], rightNode = self.nodes[idx+1])
# ------------------------- CHECK DEFLECTIONS --------------------------
for span in self.spans:
if span.maxDeflNodes['TL'].deflMaxAbs['val'] != 0 * units.inch:
span.setTLdeflRatios(self.deflRatios['TL'])
span.checkDeflections('TL')
if 'L' in self.loadTypes:
if span.maxDeflNodes['LL'].deflMaxAbsL['val'] != 0 * units.inch:
span.setLLdeflRatios(self.deflRatios['LL'])
span.checkDeflections('LL')
for span in self.spans:
self.deflChecks.append(span.isDeflectionOK())
# deflection check w/ glass at discrete points
for node in self.nodes:
if node.condition == 'glass':
node.checkGlassDeflection(self.deflLimGlass)
# set beam deflection check at glass based on nodal deflections
self.deflChecks.append('NG' if any(n.glassDeflCheck == 'NG' for n in self.nodes) else 'OK')
# if glass everywhere, check that glass deflection passes everywhere
if self.glassEverywhere:
self.deflChecks.append('OK' if all(abs(n.deflMaxAbs['val']) < self.deflLimGlass for n in self.nodes) else 'NG')
# --------------------------- CHECK OVERALL ----------------------------
checks = []
checks.append(self.bendingCheck)
checks.append(self.shearCheck)
for check in self.deflChecks:
checks.append(check)
self.overallCheck = 'OK' if all(c == 'OK' for c in checks) else 'NG'
# ----------------------------- OUTPUT PDF ----------------------------
maxDeflNodes = [self.maxDeflNodes['TL']]
maxDeflVals = [self.maxDeflNodes['TL'].deflMaxAbs['val']]
maxDeflLabels = ['\Delta']
maxDeflAnnos = [self.maxDeflNodes['TL'].deflMaxAbs['combo']]
if 'L' in self.loadTypes:
maxDeflNodes.append(self.maxDeflNodes['LL'])
maxDeflVals.append(self.maxDeflNodes['LL'].deflMaxAbsL['val'])
maxDeflLabels.append('\Delta')
maxDeflAnnos.append(self.maxDeflNodes['LL'].deflMaxAbsL['combo'])
if self.outputPDF:
self.plotLoadDiagram()
self.plotEnvelope('ultV', self.outUnit['V'],
self.strengthCombos, 'shear', True, [self.maxShearNode],
[self.maxShearNode.VuMaxAbs['val']], ['V_u'],
[self.maxShearNode.VuMaxAbs['combo']])
self.plotEnvelope('ultM', self.outUnit['M'],
self.strengthCombos, 'moment', True, [self.maxMomentNode],
[self.maxMomentNode.MuMaxAbs['val']], ['M_u'],
[self.maxMomentNode.MuMaxAbs['combo']])
self.plotEnvelope('ultDefls', self.outUnit['defl'],
self.deflCombos, 'defl', False, maxDeflNodes, maxDeflVals,
maxDeflLabels, maxDeflAnnos)
self.plotMaterialSpecificFigures()
self.outputPDFreport()
@property
def nodes(self):
return self._nodes
@nodes.setter
def nodes(self, vals):
for node in vals:
if (not isinstance(node, Node)):
raise TypeError
self._nodes = vals
def setBeamSystem(self, nodes):
"""Set generic beam system parameters."""
if len(set(nodes)) < len(nodes):
sys.exit('ERROR: Multiple nodes cannot have the same location.')
for node in nodes:
self.nodes.add(node)
self.setShape()
self.setRefDesignValues()
self.len = self.nodes[-1].loc - self.nodes[0].loc
# ----------------- SET SPAN GEOMETRY (i AND j NODES) ------------------
for idx, node in enumerate(self.nodes):
if idx == 0:
self.realNodes.add(node) # 'real' nodes define spans
elif node.condition == 'pin' or node.condition == 'fix':
self.realNodes.add(node)
elif idx == (len(self.nodes) - 1):
self.realNodes.add(node)
for idx, iNode in enumerate(self.realNodes[:-1]):
self.spans.add(Span(iNode, self.realNodes[idx + 1]))
supportTypes = ['pin', 'fix']
self.supports = [node for node in self.realNodes if node.condition in supportTypes]
# --------------------------- SET LOAD TYPES ---------------------------
# NOTE: if load types are set after distributed loads, then seperate
# lines for including D if self weight is to be considered can be removed
# include self weight in load types if self weight is considered
if self.considerSelfWeight == True:
self.loadTypes.add('D')
# include load types for point loads
for node in self.nodes:
for pointLoad in node.pointLoads:
if (not isinstance(pointLoad, PointLoad)):
raise TypeError
self.loadTypes.add(pointLoad.type)
# include load types for distributed loads
for distLoad in self.rawDistLoads:
self.loadTypes.add(distLoad.type)
# remove pattern load types that aren't actually on beam
self.patternLoads = [load for load in self.patternLoads if load in self.loadTypes]
# set subdivided load types list (non-pattern load types + pattern load types w/ indicies)
tempList0 = [load for load in self.loadTypes if load not in self.patternLoads]
tempList1 = []
for patLoad in self.patternLoads:
for idx, span in enumerate(self.spans):
tempList1.append(f'{patLoad}{idx}')
self.loadTypesSub = tempList0 + tempList1
self.setMaterialSpecificSystem()
# ----------- CREATE NODES THAT ARE PROGRAMATICALLY REQUIRED -----------
# create nodes where dist loads start and end if no node already exists
if self.rawDistLoads:
for distLoad in self.rawDistLoads:
self.nodes.add(Node(distLoad.iLoc))
self.nodes.add(Node(distLoad.jLoc))
# add mesh nodes at element spacing interval
meshNodes = []
for idx in range(len(self.nodes)-1):
# skip the nodes it pertains to
for location in np.arange(self.nodes[idx].loc + self.eleSpacing, self.nodes[idx + 1].loc - self.eleSpacing, self.eleSpacing, dtype = 'object'):
meshNodes.append(Node(location))
for meshNode in meshNodes:
self.nodes.add(meshNode)
# ------------------------ ASSIGN NODES TO SPANS -----------------------
spanIter = 0
for node in self.nodes:
if node.loc == self.spans[spanIter].jNode.loc:
self.spans[spanIter].nodes.add(node)
spanIter += 1
if spanIter < len(self.spans):
self.spans[spanIter].nodes.add(node)
node.span = self.spans[spanIter]
# ----------------------- CREATE ELEMENT GEOMETRY ----------------------
for idx, iNode in enumerate(self.nodes[:-1]):
self.elements.add(Element(self, iNode, self.nodes[idx+1]))
# ----------------------- ASSIGN ELEMENTS TO SPANS ---------------------
spanIter = 0
for element in self.elements:
if element.jNode.loc == self.spans[spanIter].jNode.loc:
self.spans[spanIter].elements.add(element)
spanIter += 1
if spanIter < len(self.spans):
self.spans[spanIter].elements.add(element)
# ----------- CALC SELF WEIGHT & INCLUDE AS DISTRIBUTED LOAD -----------
self.weight = round((self.unitWt * self.A).to(units.plf), 1)
if self.considerSelfWeight:
self.rawDistLoads.append(LineLoad(iLoc = 0 * units.ft, jLoc = self.len, iLineLoad = self.weight, jLineLoad = self.weight, desc = 'Self weight'))
# ----------------- ASSIGN SUPERIMPOSED LOADS TO ELEMENTS --------------
if self.rawDistLoads:
for dl in self.rawDistLoads:
rangeElems = [elem for elem in self.elements if elem.iNode.loc >= dl.iLoc and elem.jNode.loc <= dl.jLoc]
for elem in rangeElems:
iDist = elem.iNode.loc - dl.iLoc
jDist = elem.jNode.loc - dl.iLoc
iMag = dl.iLineLoad + dl.slope * (iDist)
jMag = dl.iLineLoad + dl.slope * (jDist)
elem.iDistLoads[dl.type] = elem.iDistLoads.get(dl.type, 0) + iMag.to(units.pli).magnitude
elem.jDistLoads[dl.type] = elem.jDistLoads.get(dl.type, 0) + jMag.to(units.pli).magnitude
# ------------------- ASSIGN SUPERIMPOSED LOADS TO NODES ---------------
for node in self.nodes:
for type in [pointLoad.type for pointLoad in node.pointLoads]:
node.rawVapply[type] = sum([ptLd.shear for ptLd in node.pointLoads if ptLd.type == type])
node.rawMapply[type] = sum([ptLd.moment for ptLd in node.pointLoads if ptLd.type == type])
# ------------------------ BREAK OUT PATTERN LOADS ---------------------
# for applied distributed loads
for idx, span in enumerate(self.spans):
for elem in span.elements:
for patLoad in self.patternLoads:
elem.iDistLoads[f'{patLoad}{idx}'] = elem.iDistLoads.pop(patLoad, 0)
elem.jDistLoads[f'{patLoad}{idx}'] = elem.jDistLoads.pop(patLoad, 0)
# for applied point loads
for idx, span in enumerate(self.spans):
for node in span.nodes:
for patLoad in self.patternLoads:
node.rawVapply[f'{patLoad}{idx}'] = node.rawVapply.pop(patLoad, 0 * units.kip)
node.rawMapply[f'{patLoad}{idx}'] = node.rawMapply.pop(patLoad, 0 * units.kft)
# ------- SET SYSTEM PARAMETERS AND CHECK THAT ANALYSIS CAN RUN --------
for idx,node in enumerate(self.nodes):
if node.trans:
self.restrainDOFs.append(2*idx)
else:
self.freeDOFs.append(2*idx)
if node.rotate:
self.restrainDOFs.append(2*idx + 1)
else:
self.freeDOFs.append(2*idx + 1)
self.avgNodeSpacing = self.len / len(self.nodes)
if len(self.restrainDOFs) <= 1:
sys.exit('ERROR: Insufficient supports provided. Beam needs (2) pinned nodes or (1) fixed node to be stable.')
for idx, node in enumerate(self.nodes):
if node.condition == 'fix':
if idx == 0 or idx == len(self.nodes) - 1:
continue
else:
sys.exit('ERROR: Fixed nodes can only be at beginning or end.')
def setLoadCombos(self, collection, targetAttr):
"""Set load combinations for strength and deflection checks given a
collection of load combinations to pull from and a target attribute to
set with the list of load combinations."""
# determine which collection to look for load combos
if collection == 'LRFD':
db_path = f'../steel_beam_analysis/db/lrfd_combos.json'
elif collection == 'ASD':
db_path = f'../steel_beam_analysis/db/asd_combos.json'
elif collection == 'L':
db_path = f'../steel_beam_analysis/db/L_combos.json'
else:
sys.exit('bad collection option for load combos!')
# read load combo data from json db
with open(db_path) as f:
raw_combos = json.load(f)
# filter raw load combo data
filtered_combos = []
for combo in raw_combos:
filtered_combo = {}
for k in combo.keys():
if combo[k] != None and k in self.loadTypes:
filtered_combo[k] = combo[k]
if k == 'ref':
filtered_combo[k] = combo[k]
if len(filtered_combo) > 1:
filtered_combos.append(filtered_combo) # don't append 0 length combos
# build load combo objects
for combo in filtered_combos:
comboNoRef = {k: v for k, v in combo.items() if k != 'ref'}
patLoads = list(set(list(comboNoRef.keys())) & set(self.patternLoads))
nonPatLoads = [load for load in comboNoRef if load not in patLoads]
if patLoads:
tfPerms = list(itertools.product([True, False], repeat = len(self.spans)))
spanIdxsCombos = []
for perm in tfPerms:
spanIdxsCombos.append([i for i, v in enumerate(perm) if v])
spanIdxsCombos.remove([])
for perm in spanIdxsCombos:
lcLoads = []
for load in nonPatLoads:
lcLoads.append({'type': load, 'factor': eval(str(combo[load]))})
for spanIdx in perm:
for load in patLoads:
lcLoads.append({'type': f'{load}{spanIdx}', 'factor': eval(str(combo[load]))})
if lcLoads:
targetAttr.add(LoadCombo(self, lcLoads, combo['ref']))
else:
lcLoads = []
for load in nonPatLoads:
lcLoads.append({'type': load, 'factor': eval(str(combo[load]))})
if lcLoads:
targetAttr.add(LoadCombo(self, lcLoads, combo['ref']))
def __str__ (self):
bendingDCR = round(self.maxMomentNode.bendingDCR, 3)
shearDCR = round(self.maxShearNode.shearDCR, 3)
string = f'Bending DCR... \t{bendingDCR}\n'
string += f'Shear DCR... \t{shearDCR}\n'
string += f'Analysis ran successfully!'
return string
```
#### File: steel_beam_analysis/steel_beam_analysis/element.py
```python
from collections import defaultdict
import numpy as np
# -------------------------- CUSTOM IMPORTED PACKAGES --------------------------
from steel_beam_analysis import units
from steel_beam_analysis.node import Node
class Element:
"""Model an element as a portion of a beam between two nodes."""
def __init__(self, beam, iNode, jNode):
if ((type(iNode) != Node) or (type(jNode) != Node)):
raise TypeError
self.beam = beam
self.f0e = {}
self._iDistLoads = {}
self.iNode = iNode
self._jDistLoads = {}
self.jNode = jNode
self.kE = None
self.L = abs(self.jNode.loc-self.iNode.loc)
self.formKE()
@property
def iDistLoads(self):
return self._iDistLoads
@iDistLoads.setter
def iDistLoads(self, val):
if val is not dict:
sys.exit(f'ERROR: i-End Distributed Loads is a {type(val)} and it must be a dict.')
self._iDistLoads = val
@property
def jDistLoads(self):
return self._jDistLoads
@jDistLoads.setter
def jDistLoads(self, val):
if val is not dict:
sys.exit(f'ERROR: j-End Distributed Loads is a {type(val)} and it must be a dict.')
self._jDistLoads = val
def formf0e(self):
"""Form f0e-element body force array for trapezoidal load."""
L = self.L.to(units.inch).magnitude
for type in self.iDistLoads:
w0 = self.iDistLoads[type]
w1 = self.jDistLoads[type]
# load array for uniform distributed load
if w0 == w1:
val0 = (-1/2)*w0*L
val1 = (-1/12)*w0*L**2
val2 = (-1/2)*w0*L
val3 = (-1/12)*w0*L**2
# load array for ascending trapezoidal load
elif w0 < w1:
val0 = (-1/2)*w0*L-(3/20)*(w1-w0)*L
val1 = (-1/12)*w0*L**2-(1/30)*(w1-w0)*L**2
val2 = (-1/2)*w0*L-(7/20)*(w1-w0)*L
val3 = (-1/12)*w0*L**2-(1/20)*(w1-w0)*L**2
# load array for descending trapezoidal load
elif w0 > w1:
val0 = (-1/2)*w1*L-(7/20)*(w0-w1)*L
val1 = (-1/12)*w1*L**2-(1/20)*(w0-w1)*L**2
val2 = (-1/2)*w1*L-(3/20)*(w0-w1)*L
val3 = (-1/12)*w1*L**2-(1/30)*(w0-w1)*L**2
f0e = np.array(([val0],[val1],[val2],[val3]))
self.f0e[type] = f0e
def formKE(self):
"""Form element stiffness array."""
E = self.beam.E.to(units.psi).magnitude
I = self.beam.I.to(units.inch**4).magnitude
L = self.L.to(units.inch).magnitude
if self.beam.considerShearDeformations:
G = self.beam.G.to(units.psi).magnitude
if hasattr(self.beam, 'Aw'):
# for steel beams, shear area = web area
shearArea = self.beam.Aw.to(units.inch**2).magnitude
else:
# for wood beams, shear area = 5/6*cross-sectional area
shearArea = 5/6*self.beam.A.to(units.inch**2).magnitude
b = (12*E*I)/(G*shearArea*L**2)
self.kE = np.matrix([[12*E*I/((1+b)*L**3), 6*E*I/((1+b)*L**2)\
,-12*E*I/((1+b)*L**3), 6*E*I/((1+b)*L**2)],\
[6*E*I/((1+b)*L**2), 4*E*I*(1+b/4)/((1+b)*L)\
,-6*E*I/((1+b)*L**2), 2*E*I*(1-b/2)/((1+b)*L)],\
[-12*E*I/((1+b)*L**3), -6*E*I/((1+b)*L**2)\
,12*E*I/((1+b)*L**3), -6*E*I/((1+b)*L**2)],\
[6*E*I/((1+b)*L**2), 2*E*I*(1-b/2)/((1+b)*L)\
,-6*E*I/((1+b)*L**2), 4*E*I*(1+b/4)/((1+b)*L)]])
else:
self.kE = np.matrix([[12*E*I/L**3 ,6*E*I/L**2 ,-12*E*I/L**3 ,6*E*I/L**2],\
[6*E*I/L**2 ,4*E*I/L ,-6*E*I/L**2 ,2*E*I/L],\
[-12*E*I/L**3 ,-6*E*I/L**2 ,12*E*I/L**3 ,-6*E*I/L**2],\
[6*E*I/L**2 ,2*E*I/L ,-6*E*I/L**2 ,4*E*I/L]])
def __lt__(self, other):
return self.iNode.loc < other.iNode.loc
def __gt__(self, other):
return self.iNode.loc > other.iNode.loc
def __eq__(self, other):
return self.iNode.loc == other.iNode.loc
def __hash__(self):
return hash(self.iNode.loc)
```
#### File: steel_beam_analysis/steel_beam_analysis/span.py
```python
from operator import attrgetter
from sortedcontainers import SortedSet
# -------------------------- CUSTOM IMPORTED PACKAGES --------------------------
from steel_beam_analysis import units
import steel_beam_analysis.stringFixer as sf
from steel_beam_analysis.node import Node
from steel_beam_analysis.element import Element
class Span:
"""Model a simple span where the i node is the left support and the j node
is the right support. Or, model a cantilever span where the i node or j node
is a free node, and the other node is a support."""
def __init__ (self, iNode, jNode):
if ((type(iNode) != Node ) or (type(jNode) != Node)):
raise TypeError
self.cantilever = None
self.deflChecks = {}
self.deflLimits = None
self.deflRatioLimits = {}
self.deflRatios = {}
self._elements = SortedSet([])
self.iNode = iNode
self.jNode = jNode
self.L = self.jNode.loc - self.iNode.loc
self.maxDeflNodes = {}
self._nodes = None
self.nodes = SortedSet([])
self.ultDefls = None
self.checkCantilever()
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, val):
if not isinstance(val, Element):
raise TypeError
self._elements = val
@property
def nodes(self):
return self._nodes
@nodes.setter
def nodes(self, vals):
if not isinstance(vals, SortedSet) and not isinstance(vals, list):
raise TypeError
if any(not isinstance(node, Node) for node in vals):
raise TypeError
self._nodes = vals
def checkDeflections(self, type):
"""Check that deflections in the span are within limits."""
ratio = self.deflRatios[type].to_reduced_units().magnitude
if ratio:
if ratio >= self.deflRatioLimits[type]:
self.comparison = '<'
result = 'OK'
else:
self.comparison = '>' # sign flipped because comparison done with reciprocal
result = 'NG'
else:
self.comparison = 'N/A'
result = 'Not calculated'
result = 'OK'
self.deflChecks[type] = result
if type == 'LL':
maxDefl = round(self.maxDeflNodes['LL'].deflMaxAbsL['val'], 2)
elif type == 'TL':
maxDefl = round(self.maxDeflNodes['TL'].deflMaxAbs['val'], 2)
equation = f'\\Delta_{{max}} = {maxDefl} = \\cfrac{{L}}{{{int(ratio)}}} {self.comparison} \\cfrac{{L}}{{{self.deflRatioLimits[type]}}} _sp_ _bf_{{({result})}}'
return f'{type.title()} Deflection Check: \r${sf.fixUnits(equation)}$'
def isDeflectionOK(self):
return 'NG' if 'NG' in self.deflChecks.values() else 'OK'
def setTLdeflRatios(self, ratioLimit):
"""Set the total load deflection ratio and limiting ratio in the span."""
self.deflRatios['TL'] = abs(self.L / self.maxDeflNodes['TL'].deflMaxAbs['val'])
self.deflRatioLimits['TL'] = ratioLimit
self.deflRatioLimits['TL'] /= 2 if self.cantilever else self.deflRatioLimits['TL']
def setLLdeflRatios(self, ratioLimit):
"""Set the live load deflection ratio and limiting ratio in the span."""
self.deflRatios['LL'] = abs(self.L / self.maxDeflNodes['LL'].deflMaxAbsL['val'])
self.deflRatioLimits['LL'] = ratioLimit
self.deflRatioLimits['LL'] /= 2 if self.cantilever else self.deflRatioLimits['LL']
def setMaxTLdeflNode(self):
"""Set the node with the max total load deflection in the span."""
self.maxDeflNodes['TL'] = max(self.nodes, key = lambda node: abs(node.deflMaxAbs['val']))
def setMaxLLdeflNode(self):
"""Set the node with the max live load deflection in the span."""
self.maxDeflNodes['LL'] = max(self.nodes, key = lambda node: abs(node.deflMaxAbsL['val']))
def checkCantilever(self):
"""If either the i node or the j node of the span is free, report that
the span is a cantilever."""
if self.iNode.condition == 'free' or self.jNode.condition == 'free':
self.cantilever = True
else:
self.cantilever = False
def __cmp__(self, other):
return cmp(self.iNode.loc, other.iNode.loc)
def __lt__(self, other):
return self.iNode.loc < other.iNode.loc
def __gt__(self, other):
return self.iNode.loc > other.iNode.loc
def __eq__(self, other):
return self.iNode.loc == other.iNode.loc
def __hash__(self):
return hash(self.iNode.loc)
def __str__(self):
string = f'span from {self.iNode.loc} to {self.jNode.loc}'
return sf.fixUnits(string)
```
|
{
"source": "jessebmurray/landfills",
"score": 3
}
|
#### File: jessebmurray/landfills/landfill_2.py
```python
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
types = ['paper', ' food', 'yard trimmings', 'wood', 'steel', 'rubber and leather', 'textiles', 'aluminum', 'plastic',
'glass']
years = [1960, 1970, 1980, 1990, 2000, 2005, 2010, 2015, 2016, 2017]
years1 = list(range(1960, 2020, 5)) + [2016, 2017]
amounts = [[24910, 37390, 42560, 43570, 40450, 35080, 22000, 18280, 17660, 18350],
[12200, 12750, 12740, 19800, 24200, 26370, 28620, 30250, 30680, 30630],
[20000, 23110, 26950, 25560, 11900, 9990, 11690, 10800, 9640, 8650],
[3030, 3710, 6860, 10000, 9910, 10690, 11120, 11070, 12250, 12140],
[10250, 12150, 12000, 8720, 7860, 8550, 9310, 9970, 10310, 10430],
[1510, 2710, 4000, 4590, 3880, 4130, 4400, 4490, 4790, 4950],
[1710, 1970, 2320, 4270, 6280, 7570, 8900, 10540, 11130, 11150],
[340, 790, 1390, 1500, 1940, 2230, 2390, 2490, 2640, 2650],
[390, 2900, 6670, 13780, 19950, 23270, 24370, 26030, 26290, 26820],
[6620, 12520, 14080, 8660, 8100, 8290, 7030, 6840, 6880, 6870]]
total_amounts = [73.4456976, 99.79035, 117.54396045, 127.41413325000002, 121.98916695, 123.53138145000001, 117.77982855000002, 118.62351059999999, 119.99335995000001, 120.32901840000001]
per_capita = [2.68, 2.96, 3.25, 3.25, 3.66, 3.83, 4.57, 4.52, 4.74, 4.69, 4.45, 4.48, 4.53, 4.51]
graphs_location = '/Users/jessemurray/Documents is here/Documents/Odd Projects/Landfill Volume/Graphs/'
for i in range(len(per_capita)):
per_capita[i] = per_capita[i] * 0.453592 * 365 * 0.001
print(len(per_capita))
print(len(years1))
tick_size = 14
label_size = 15
legend_size = 14
# PLOTTING PER CAPITA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plt.style.use('seaborn')
# plt.tick_params(labelsize=tick_size)
# plt.xlabel('Year', fontsize=label_size)
plt.xlabel('Year')
# plt.ylabel('Metric Tons/Person/Year', fontsize=label_size)
plt.ylabel('Metric Tons/Person/Year')
plt.plot(years1, per_capita, linestyle='--', marker='o', label='Per Capita MSW Generation')
# plt.legend(fontsize=legend_size)
plt.legend()
file_location0 = graphs_location + 'epa_per_capita1.png'
plt.savefig(file_location0, dpi=300)
# PLOTTING TOTAL AMOUNTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# plt.style.use('seaborn')
# # plt.style.use('seaborn-talk')
#
#
# # plt.tick_params(labelsize=tick_size)
# # plt.xlabel('Year', fontsize=label_size)
# plt.xlabel('Year')
# # plt.ylabel('Million Metric Tons', fontsize=label_size)
# plt.ylabel('Million Metric Tons')
#
# # plt.xlabel('Year')
# # plt.ylabel('Million Metric Tons')
#
# plt.plot(years, total_amounts, linestyle='--', marker='o', label='Total MSW Landfilled')
# plt.legend()
# # plt.legend(fontsize=legend_size)
#
# file_location0 = graphs_location + 'epa_previous_data.png'
#
# plt.show()
# # plt.savefig(file_location0, dpi=300)
#
# in_image = Image.open(file_location0)
# n = 0.2
# width, height = in_image.size
# out_image = in_image.resize((int(width * n), int(height * n)))
# out_image.save(file_location0)
# print(width * n)
# print(height * n)
# plt.show()
def main():
extended_amounts = generate_new_amount_list(amounts)
converted_extended_amounts = convert_extended_amounts(extended_amounts)
print(converted_extended_amounts)
print(converted_extended_amounts[-1][-1])
print(len(converted_extended_amounts[0]))
def convert_extended_amounts(extended_amounts):
converted_extended_amounts = []
for amount_list in extended_amounts:
converted_amount_list = []
for value in amount_list:
# convert from thousands of tons to millions of tons
# then convert to millions of metric tons
new_value = value * 0.001 * 0.907185
converted_amount_list.append(new_value)
converted_extended_amounts.append(converted_amount_list)
return converted_extended_amounts
def generate_new_amount_list(all_amount_list):
new_amount_list = []
for amount_list in amounts:
new_amount_list.append(generate_new_amounts(years, amount_list))
return new_amount_list
def new_amount_index(year_list, amount_list, index):
year_difference = year_list[index] - year_list[index - 1]
this_years_amount = amount_list[index]
previous_years_amount = amount_list[index - 1]
increment = (this_years_amount - previous_years_amount) / year_difference
return [previous_years_amount + (increment * y) for y in range(year_difference)]
def generate_new_amounts(year_list, amount_list):
new_amounts = []
for index in range(1, len(years)):
new_amounts += new_amount_index(year_list, amount_list, index)
new_amounts += [amount_list[-1]]
return new_amounts
# main()
# total_amounts = [0] * len(amounts[0])
#
# for year_list in amounts:
# for year_num in range(len(year_list)):
# total_amounts[year_num] += year_list[year_num]
#
# for total_year_num in range(len(total_amounts)):
# total_amounts[total_year_num] = total_amounts[total_year_num] * 0.001 * 0.907185
```
|
{
"source": "jessebmurray/polygenic",
"score": 4
}
|
#### File: polygenic/archive/tree_problem_0.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
# from scipy.special import erf
# Global variables that were just used in main
number_of_iterations = 100
z_range = 8
r = 0.9
r_s = 0.9
mean_gen = 0
sd_gen = 1
k_val = -2
percent_step = 0.33
# Global variables that are used in here (the module) not just in main
ROUND_NUMBER = 6
# FUNDAMENTAL STRUCTURE FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def f_norm(x, mean, sd):
return (1 / (sd * ((2 * np.pi) ** 0.5))) * np.exp(-1 * ((((x - mean) / sd) ** 2) / 2))
def normal_distribution(number_of_steps, z_score_range, mean=0, sd=1, above_k_value=None, below_k_value=None):
# This creates a normal distribution with a certain number of steps. The motivation for using number of steps is
# that it is related to the number of operations. It can make all y values 0, for x below k_value
two_d_distribution = []
bound = z_score_range * sd
number = number_of_steps
increment = bound / number
if above_k_value is not None:
above_n = ((0.5 * z_score_range * sd) + above_k_value - mean) / increment
else:
above_n = 0
if below_k_value is not None:
below_n = ((0.5 * z_score_range * sd) + below_k_value - mean) / increment
else:
below_n = 0
x_start = mean - (bound / 2)
x = round(x_start, ROUND_NUMBER)
for num in range(number + 1):
x_y_pair = [0, 0]
go_ahead = True
if above_k_value is not None:
if num < above_n:
x_y_pair = [x, 0]
go_ahead = False
if below_k_value is not None:
if num > below_n:
x_y_pair = [x, 0]
go_ahead = False
if go_ahead:
x_y_pair = [x, f_norm(x, mean, sd)]
two_d_distribution.append(x_y_pair)
x = round(x_start + (increment * (num + 1)), ROUND_NUMBER)
two_d_distribution[0] += [['increment', increment], ['number', number], ['bound', bound], ['mean', mean],
['sd', sd]]
return two_d_distribution
def one_offspring_distribution(par_distribution, index_num, reg_coefficient, sd_reg_coefficient, above_k_value=None,
below_k_value=None):
# we need a function that takes a value from the parent distribution and multiplies every value in the offspring
# distribution by that value, essentially scaling it by that value.
# Also the x values in the offspring distribution need to be shifted by r * z_p
parent_mean = par_distribution[0][5][1]
shift = reg_coefficient * (par_distribution[index_num][0] - parent_mean)
offspring_mean = parent_mean + shift
parent_sd = par_distribution[0][6][1]
offspring_sd = sd_reg_coefficient * parent_sd
scale_factor = par_distribution[index_num][1]
number = par_distribution[0][3][1]
z_score_range = par_distribution[0][4][1] / offspring_sd
offspring_distribution = normal_distribution(number, z_score_range, offspring_mean, offspring_sd, above_k_value,
below_k_value)
for row in offspring_distribution:
row[1] *= scale_factor
offspring_distribution[0] += [['parent mean', parent_mean]]
return offspring_distribution
def offspring_distributions(par_distribution, reg_coefficient, sd_reg_coefficient, above_k_v_p=None, below_k_v_p=None,
above_k_v_o=None, below_k_v_o=None):
parent_increment = par_distribution[0][2][1]
parent_bound = par_distribution[0][4][1]
parent_mean = par_distribution[0][5][1]
if above_k_v_p is not None:
above_num = (above_k_v_p - parent_mean + (parent_bound * 0.5)) / parent_increment
else:
above_num = 0
if below_k_v_p is not None:
below_num = (below_k_v_p - parent_mean + (parent_bound * 0.5)) / parent_increment
else:
below_num = 0
all_offspring_distributions = []
for index in range(len(par_distribution)):
go_ahead = True
if above_k_v_p is not None:
if index < above_num:
go_ahead = False
if below_k_v_p is not None:
if index > below_num:
go_ahead = False
if go_ahead:
all_offspring_distributions.append(one_offspring_distribution(par_distribution, index, reg_coefficient,
sd_reg_coefficient, above_k_v_o, below_k_v_o))
# add the parent area to the top of offspring distributions
parent_area = area_under_one_distribution(par_distribution)
all_offspring_distributions[0][0] += [['parent area', parent_area]]
return all_offspring_distributions
# SUPERIMPOSED DISTRIBUTION FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def superimposed_offspring_distribution(distributions):
set_of_x = set()
for distribution in distributions:
set_of_x.update([row[0] for row in distribution])
list_of_x = sorted(list(set_of_x))
superimposed_distribution = [[x, 0] for x in list_of_x]
for superimposed_row in superimposed_distribution:
value = superimposed_row[0]
for distribution in distributions:
for row in distribution:
if row[0] == value:
superimposed_row[1] += row[1]
# the below increment is wrong in a lot of cases, but it doesn't matter because we never use it
increment = round(superimposed_distribution[1][0] - superimposed_distribution[0][0], ROUND_NUMBER)
parent_increment = distributions[0][0][2][1]
parent_mean = distributions[0][0][7][1]
parent_area = distributions[0][0][8][1]
# Jan 2020
parent_number = distributions[0][0][3][1]
parent_bound = distributions[0][0][4][1]
superimposed_distribution[0] += [['increment', increment], ['parent increment', parent_increment],
['parent mean', parent_mean], ['parent area', parent_area], # Jan 2020
['parent number', parent_number], ['parent bound', parent_bound]]
return superimposed_distribution
def normalized_superimposed_distribution_to_parent_increment(superimposed_distribution):
parent_increment = superimposed_distribution[0][3][1]
parent_mean = superimposed_distribution[0][4][1]
smallest_x = superimposed_distribution[0][0]
n = int(abs(smallest_x - parent_mean) / parent_increment)
par_inc_norm_superimposed_distribution = []
for num in range((2 * n) + 1):
x_value_prev = round((num - n - 1) * parent_increment, ROUND_NUMBER)
x_value = round((num - n) * parent_increment, ROUND_NUMBER)
par_inc_norm_superimposed_distribution.append([x_value, 0])
for row in superimposed_distribution:
if x_value_prev < row[0] <= x_value:
par_inc_norm_superimposed_distribution[num][1] += row[1]
# ideally we'd like to stop this loop to stop once row[0] is greater than the x_value
par_inc_norm_superimposed_distribution[0] += [['increment', parent_increment]]
par_inc_norm_superimposed_distribution[0] += superimposed_distribution[0][3:]
return par_inc_norm_superimposed_distribution
def final_superimposed_distribution_all_not_area_adj(parent_distribution, reg_coefficient, sd_reg_coefficient):
offspring_distributions_all = offspring_distributions(parent_distribution, reg_coefficient, sd_reg_coefficient)
super_offspring_distribution_all = superimposed_offspring_distribution(offspring_distributions_all)
par_inc_super_offspring_distribution_all = \
normalized_superimposed_distribution_to_parent_increment(super_offspring_distribution_all)
return par_inc_super_offspring_distribution_all
def normalized_superimposed_distribution_to_parent_area(superimposed_distribution, area_scale_factor):
par_area_norm_superimposed_distribution = \
[[row[0], row[1] / area_scale_factor] for row in superimposed_distribution]
par_area_norm_superimposed_distribution[0] += superimposed_distribution[0][2:]
return par_area_norm_superimposed_distribution
def final_superimposed_distribution(parent_distribution, reg_coefficient, sd_reg_coefficient, above_k_v_p=None,
below_k_v_p=None, above_k_v_o=None, below_k_v_o=None):
offspring_distributions_ = offspring_distributions(parent_distribution, reg_coefficient, sd_reg_coefficient,
above_k_v_p, below_k_v_p, above_k_v_o, below_k_v_o)
super_offspring_distribution = superimposed_offspring_distribution(offspring_distributions_)
par_inc_super_offspring_distribution = \
normalized_superimposed_distribution_to_parent_increment(super_offspring_distribution)
par_inc_super_offspring_distribution_all = \
final_superimposed_distribution_all_not_area_adj(parent_distribution, reg_coefficient, sd_reg_coefficient)
parent_area_factor = area_scale_factor_entire(par_inc_super_offspring_distribution_all)
par_area_super_offspring_distribution = \
normalized_superimposed_distribution_to_parent_area(par_inc_super_offspring_distribution, parent_area_factor)
return par_area_super_offspring_distribution
def final_superimposed_distribution_all_area_adj(parent_distribution, reg_coefficient, sd_reg_coefficient):
par_inc_super_offspring_distribution_all = \
final_superimposed_distribution_all_not_area_adj(parent_distribution, reg_coefficient, sd_reg_coefficient)
parent_area_factor = area_scale_factor_entire(par_inc_super_offspring_distribution_all)
par_area_super_offspring_distribution_all = \
normalized_superimposed_distribution_to_parent_area(par_inc_super_offspring_distribution_all,
parent_area_factor)
return par_area_super_offspring_distribution_all
# AREA FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def area_scale_factor_entire(entire_superimposed_distribution):
parent_area = entire_superimposed_distribution[0][5][1]
superimposed_distribution_area = area_under_one_distribution(entire_superimposed_distribution)
return superimposed_distribution_area / parent_area
def area_under_one_distribution(one_distribution):
increment = one_distribution[0][2][1]
return increment * (sum(row[1] for row in one_distribution))
def area_under_distributions(distributions):
area = 0
for distribution in distributions:
area += area_under_one_distribution(distribution)
return area
# CONVERSION FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def percentile_to_value(percentile, distribution, distribution_sd=None):
if distribution[0][6][0] == 'sd':
return (distribution[0][6][1] * st.norm.ppf(percentile)) + distribution[0][5][1]
else:
if distribution_sd is not None:
standard_dev = distribution_sd
else:
standard_dev = st_dev_of_distribution(distribution)
return (standard_dev * st.norm.ppf(percentile)) + distribution[len(distribution) // 2][0]
def sd_to_value(sd, distribution, distribution_sd=None):
if distribution[0][6][0] == 'sd':
return (distribution[0][6][1] * sd) + distribution[0][5][1]
else:
if distribution_sd is not None:
standard_dev = distribution_sd
else:
standard_dev = st_dev_of_distribution(distribution)
return (standard_dev * sd) + distribution[len(distribution) // 2][0]
def z_score_to_index(z_score, number_of_steps, z_score_range):
z_to_index_conversion = number_of_steps / z_score_range
z_to_travel = z_score + (z_score_range / 2)
return int((z_to_travel * z_to_index_conversion) + 0.5)
# PLOTTING FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def plot_distributions(distributions, label=None):
all_x = []
all_y = []
for distribution in distributions:
x = []
y = []
for row in distribution:
x.append(row[0])
y.append(row[1])
all_x.append(x)
all_y.append(y)
for dist_num in range(len(all_x)):
plt.plot(all_x[dist_num], all_y[dist_num])
plt.plot(label='hi')
def plot_distribution(distribution, label=None):
x = [row[0] for row in distribution]
y = [row[1] for row in distribution]
if label is not None:
plt.plot(x, y, label=label)
else:
plt.plot(x, y, label=label)
def plot_generations_sd(generations):
sd_list = []
for generation in generations:
sd_list.append(st_dev_of_distribution(generation))
x_label_list = list(range(len(generations)))
x_generation_labels = [str(value + 1) for value in x_label_list]
plt.xlabel('generation')
plt.ylabel('standard deviation')
plt.xticks(x_label_list, x_generation_labels)
plt.plot(sd_list, '-o')
def bar_graph_step(step_list, step_labels=None):
num_groups = len(step_list[0][1])
if step_labels is None:
step_labels = list(range(1, num_groups + 1))
percent_group_values = []
for i in range(len(step_list[0][1])):
values_list = [row[1][i][1] for row in step_list]
percent_group_values.append(values_list)
one_or_zero = num_groups % 2
for num in range(len(percent_group_values[0]) - one_or_zero - 1, -1, -1):
extra_values = []
for row in percent_group_values:
extra_values.append(row[num])
extra_values.reverse()
for i in range(len(percent_group_values)):
percent_group_values[i].append(extra_values[i])
pal = ['xkcd:light navy blue', 'xkcd:windows blue', 'xkcd:turquoise blue', 'xkcd:carolina blue', 'xkcd:light blue']
values_sum_list = [0] * len(percent_group_values[0])
plt.ylim(0, 1)
for j in range(len(percent_group_values)):
if num_groups <= len(pal):
plt.bar(step_labels, percent_group_values[j], bottom=values_sum_list, color=pal[j], alpha=1)
else:
plt.bar(step_labels, percent_group_values[j], bottom=values_sum_list, alpha=1)
for a, b, c in zip(step_labels, values_sum_list, percent_group_values[j]):
num = (b + c / 2) - 0.02
plt.text(a, num, ' ' + "{:0.0%}".format(c), va='bottom', ha='center', color='w', size=15)
for i in range(len(values_sum_list)):
values_sum_list[i] += percent_group_values[j][i]
# PROPORTION ATTRIBUTABLE FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def proportion_attributable_value(parent_distribution, r_mean, r_sd, above_k_p=None, below_k_p=None, above_k_o=None,
below_k_o=None, area_all_distributions=None):
if area_all_distributions is None:
all_distributions = offspring_distributions(parent_distribution, r_mean, r_sd, above_k_v_o=above_k_o,
below_k_v_o=below_k_o)
area_all_distributions = area_under_distributions(all_distributions)
return select_over_all(parent_distribution, r_mean, r_sd, above_k_p, below_k_p, above_k_o, below_k_o,
area_all_distributions)
def proportion_attributable_standard_deviation(parent_distribution, r_mean, r_sd, above_k_p=None, below_k_p=None,
above_k_o=None, below_k_o=None, area_all_distributions=None, same=False):
k_list = [above_k_p, below_k_p, above_k_o, below_k_o]
for i in range(len(k_list)):
if same:
if k_list[i] is not None:
k_list[i] = sd_to_value(k_list[i], parent_distribution)
else:
return True # come back to this!
return proportion_attributable_value(parent_distribution, r_mean, r_sd, k_list[0], k_list[1], k_list[2], k_list[3],
area_all_distributions)
def proportion_attributable_percentile(parent_distribution, r_mean, r_sd, above_k_p=None, below_k_p=None,
above_k_o=None, below_k_o=None, area_all_distributions=None,
offspring_distribution=None, offspring_sd=None, same=False,):
k_list = [above_k_p, below_k_p, above_k_o, below_k_o]
if same is False:
if offspring_distribution is None:
offspring_distribution = final_superimposed_distribution_all_area_adj(parent_distribution, r_mean, r_sd)
if offspring_sd is None:
standard_dev = st_dev_of_distribution(offspring_distribution)
else:
standard_dev = offspring_sd
for i in range(len(k_list)):
if same is True:
if k_list[i] is not None:
k_list[i] = percentile_to_value(k_list[i], parent_distribution)
else:
if k_list[i] is not None:
if i <= 1:
k_list[i] = percentile_to_value(k_list[i], parent_distribution)
elif i >= 2:
k_list[i] = percentile_to_value(k_list[i], offspring_distribution, standard_dev)
return proportion_attributable_value(parent_distribution, r_mean, r_sd, k_list[0], k_list[1], k_list[2], k_list[3],
area_all_distributions)
def step_proportion_attributable_percentile(parent_distribution, reg_coefficient, sd_reg_coefficient, percentile_step):
# offspring zones are in the first column of every row and the percent of offspring attributable to each parent
# zone is in the second column of every row
stepwise_percentile_list = []
above_k_o = 1 - percentile_step
below_k_o = 1
while below_k_o > 0.5:
step_list_offspring = [[above_k_o, below_k_o]]
above_k_p = 1 - percentile_step
below_k_p = 1
step_list_offspring_parents = []
above_k_o_v = percentile_to_value(above_k_o, parent_distribution)
below_k_o_v = percentile_to_value(below_k_o, parent_distribution)
# different
all_distributions = offspring_distributions(parent_distribution, reg_coefficient, sd_reg_coefficient,
above_k_v_o=above_k_o_v, below_k_v_o=below_k_o_v)
area_all_distributions = area_under_distributions(all_distributions)
while below_k_p > 0.001:
# different
step_list_parent = [[above_k_p, below_k_p], proportion_attributable_percentile(
parent_distribution, reg_coefficient, sd_reg_coefficient, above_k_p, below_k_p, above_k_o, below_k_o,
area_all_distributions)]
step_list_offspring_parents.append(step_list_parent)
above_k_p = round(above_k_p - percentile_step, ROUND_NUMBER)
below_k_p = round(below_k_p - percentile_step, ROUND_NUMBER)
step_list_offspring.append(step_list_offspring_parents)
stepwise_percentile_list.append(step_list_offspring)
above_k_o = round(above_k_o - percentile_step, ROUND_NUMBER)
below_k_o = round(below_k_o - percentile_step, ROUND_NUMBER)
return stepwise_percentile_list
def select_over_all(parent_distribution, r_mean, r_sd, above_k_p=None, below_k_p=None, above_k_o=None, below_k_o=None,
area_all_distributions=None):
select_distributions = offspring_distributions(parent_distribution, r_mean, r_sd, above_k_p, below_k_p, above_k_o,
below_k_o)
area_select_distributions = area_under_distributions(select_distributions)
# return area_all_distributions
return area_select_distributions / area_all_distributions
def step_tree_question_z_score(parent_distribution, r_mean, r_sd, z_score_increment, z_score_bound):
z_score = - z_score_bound / 2
proportion_list = []
while z_score <= z_score_bound / 2:
proportion = proportion_attributable_standard_deviation(parent_distribution, r_mean, r_sd,
below_k_p=z_score, above_k_o=z_score)
proportion_list.append(proportion)
z_score += z_score_increment
return proportion_list
# PROPORTION DESTINED FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def proportion_destined_value(parent_distribution, r_mean, r_sd, above_k_p=None, below_k_p=None, above_k_o=None,
below_k_o=None, area_all_distributions=None):
if area_all_distributions is None:
all_distributions = offspring_distributions(parent_distribution, r_mean, r_sd, above_k_v_p=above_k_p,
below_k_v_p=below_k_p)
area_all_distributions = area_under_distributions(all_distributions)
return select_over_all(parent_distribution, r_mean, r_sd, above_k_p, below_k_p, above_k_o, below_k_o,
area_all_distributions)
def proportion_destined_percentile(parent_distribution, r_mean, r_sd, above_k_p=None, below_k_p=None,
above_k_o=None, below_k_o=None, area_all_distributions=None):
k_list = [above_k_p, below_k_p, above_k_o, below_k_o]
for i in range(len(k_list)):
if k_list[i] is not None:
k_list[i] = percentile_to_value(k_list[i], parent_distribution)
return proportion_destined_value(parent_distribution, r_mean, r_sd, k_list[0], k_list[1], k_list[2], k_list[3],
area_all_distributions)
def step_proportion_destined_percentile(parent_distribution, reg_coefficient, sd_reg_coefficient, percentile_step):
# parent zones are in the first column of every row and the percent of the parent zone's offspring that are destined
# to each offspring zone in the second column of every row
stepwise_percentile_list = []
above_k_p = 1 - percentile_step
below_k_p = 1
while below_k_p > 0.5:
step_list_parent = [[above_k_p, below_k_p]]
above_k_o = 1 - percentile_step
below_k_o = 1
step_list_parents_offspring = []
above_k_p_v = percentile_to_value(above_k_p, parent_distribution)
below_k_p_v = percentile_to_value(below_k_p, parent_distribution)
all_distributions = offspring_distributions(parent_distribution, reg_coefficient, sd_reg_coefficient,
above_k_v_p=above_k_p_v, below_k_v_p=below_k_p_v)
area_all_distributions = area_under_distributions(all_distributions)
while below_k_o > 0.001:
step_list_offspring = [[above_k_o, below_k_o], proportion_destined_percentile(
parent_distribution, reg_coefficient, sd_reg_coefficient, above_k_p, below_k_p, above_k_o, below_k_o,
area_all_distributions)]
step_list_parents_offspring.append(step_list_offspring)
above_k_o = round(above_k_o - percentile_step, ROUND_NUMBER)
below_k_o = round(below_k_o - percentile_step, ROUND_NUMBER)
step_list_parent.append(step_list_parents_offspring)
stepwise_percentile_list.append(step_list_parent)
above_k_p = round(above_k_p - percentile_step, ROUND_NUMBER)
below_k_p = round(below_k_p - percentile_step, ROUND_NUMBER)
return stepwise_percentile_list
# REPRODUCING FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def final_super_to_parent(final_super_distribution):
super_max_index = len(final_super_distribution) - 1
super_parent_max_index = final_super_distribution[0][6][1]
final = []
# If it's bigger than the parent, make it only as big as the bound
if super_max_index > super_parent_max_index:
super_start_index = (super_max_index - super_parent_max_index) // 2
super_end_index = super_start_index + super_parent_max_index + 1
# If's it's equal to or smaller than the parent, make it as big as it is already
else:
super_start_index = 0
super_end_index = len(final_super_distribution)
for row_num in range(super_start_index, super_end_index):
final_row = []
for column_num in range(2):
final_row.append(final_super_distribution[row_num][column_num])
final.append(final_row)
mid_index = (len(final) - 1) // 2
increment = final_super_distribution[0][2][1]
number = len(final) - 1
bound = final[-1][0] - final[0][0]
mean = final[mid_index][0]
st_dev = st_dev_of_distribution(final_super_distribution)
final[0] += [['increment', increment], ['number', number], ['bound', bound], ['mean', mean], ['sd', st_dev]]
return final
def st_dev_of_distribution(distribution):
mid_index = (len(distribution) - 1) // 2
mean = distribution[mid_index][0]
weights = [value[1] for value in distribution]
x = [value[0] for value in distribution]
sum_of_sq = 0
for i in range(len(weights)):
sum_of_sq += weights[i] * ((x[i] - mean) ** 2)
n = sum(weights)
st_dev = (sum_of_sq / n) ** 0.5
return st_dev
# NOT USED FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# def z_erf(num):
# return erf(num / (2 ** 0.5))
```
#### File: jessebmurray/polygenic/model_verification.py
```python
import scipy.stats as st
import numpy as np
import matplotlib.pyplot as plt
def normal_qq(data):
"""Get the Q-Q for the normal distribution.
Returns the theoretical values and the order statistics to be plotted against
each other.
For a normal distribution, we expect Phi(x_(k)) (the cdf of the kth order
statistic) to be approximately k / (n+1).
Hence, the theoretical z-score should be Phi^-1( k/(n+1) ). That is, the inverse
cdf of k/(n+1). To convert the theoretical z-score to to to theoretical (actual)
x-score, we multiply by the population standard deviation and add the population
mean.
The data argument must be a numpy array.
"""
# Get the number of data points
n = data.size
# Get the k values (for the order statistics) from 1 through n
k = np.arange(1, n + 1)
# Get the population standard deviation
sigma = data.std()
# Get the population mean
mu = data.mean()
# Calculate the theoretical data values
theor = sigma * st.norm.ppf(k / (n + 1)) + mu
# Return the theoretical values, and the order statistics
return theor, np.sort(data)
def f_test(numer, denom):
"""Calculate the F test and the corresponding p-value for a
numerator and denominator.
The numerator and denominator arrays can be numpy arrays or lists."""
numer = np.array(numer)
denom = np.array(denom)
# Calculate F test statistic
f = np.var(numer, ddof=1) / np.var(denom, ddof=1)
# Define the degrees of freedom numerator
dfn = numer.size - 1
# Define the degrees of freedom denominator
dfd = denom.size - 1
# Get the p-value of the F test statistic
p = 1 - st.f.cdf(f, dfn, dfd)
return {'statistic': f, 'pvalue': p}
def check_normality(array, rn=5):
print("""
The null hypothesis for all of these tests is that
the population is drawn from a normal distribution.
Thus, the p-values should all be greater than 0.05.""", end='\n\n')
print('Skew =', np.round(st.skew(array), rn))
print(st.skewtest(array), end='\n\n')
print('Kurtosis =', np.round(st.kurtosis(array), rn))
print(st.kurtosistest(array), end='\n\n')
print('D\'Agostino and Pearson',
st.normaltest(array), sep='\n', end='\n\n')
# Plotting functions
def scatter_plot(x, y, lim=4):
"""Simple square scatter plot with light grid lines and hollow blue circular
data points. The limit (lim) argument provides the upper and lower bound of the
x and y axes for the (square) plot. """
plt.figure(figsize=(6, 6))
plt.scatter(x, y, alpha=0.5, facecolors='none', edgecolors='#1f77b4')
plt.grid(alpha=0.5)
plt.xlim([-lim, lim])
plt.ylim([-lim, lim])
plt.xlabel('Parent score')
plt.ylabel('Child score')
# Make the plot square
plt.gca().set_aspect('equal', adjustable='box')
def plot_normal_qq(data, lim=3.5):
"""Plots the theoretical values (x-axis) against the order statistics (y-axis)
to see if the points lie on an approximate straight line (with gradient
population SD and intercept population mean).
The limit (lim) argument provides the upper and lower bound of the x and y
axes for the (square) plot."""
plt.figure(figsize=(5, 5))
x_theor, x_sample = normal_qq(data)
plt.plot([-5, 5], [-5, 5], color='grey')
plt.scatter(x_theor, x_sample, alpha=0.6,
facecolors='none', edgecolors='#1f77b4')
plt.xlim([-lim, lim])
plt.ylim([-lim, lim])
plt.grid(alpha=0.3)
# Make the plot square
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel('Theoretical')
plt.ylabel('Observed')
plt.tight_layout()
def plot_residuals_by_parent(true_bins, resid_means, resid_cis):
plt.figure(figsize=(5, 4))
plt.plot(true_bins, resid_means, color='black', linewidth=2)
plt.errorbar(true_bins, resid_means, yerr=resid_cis.T,
color='grey', alpha=1, linewidth=1.4)
plt.axhline(y=0, color='grey')
plt.grid(alpha=0.3)
plt.xlabel('Parent Score')
plt.ylabel('Child Residual')
plt.tight_layout()
```
#### File: jessebmurray/polygenic/probability_kernels.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
# Basic model functions
def pdf(x_i, sigma_i=1, mu=0):
"""Returns the marginal (population) pdf of X_i ~ Normal(mu, sigma_i^2)."""
return st.norm.pdf(x_i, scale=sigma_i, loc=mu)
def stable_rs(r):
"""Calculates r_s from r under stable population variance, where
r^2 + r_s^2 = 1"""
return np.sqrt(1 - np.square(r))
# Conditional descendant distribution parameters
def get_mu_tilda(x_i, r, n):
"""Calculates the conditional descendant normal distribution *expectation*
for generation-gap n.
Latex equation:
tilde{\mu}_{i+n} = r^n X_i
(See the paper for the derivation.)"""
return (r**n) * x_i
def get_sigma_tilda(sigma_i, r, rs, n):
"""Calculates the conditional descendant normal distribution *standard deviation*
(SD) for generation-gap n.
Latex equation for the variance (square of the SD):
tilde{\sigma}_{i+n}^2 = [(r^2+r_s^2)^n - r^{2n}] \sigma_i^2
(See the paper for the derivation.)"""
# Get the added part of the square root
add_part = (np.square(r) + np.square(rs)) ** n
# Get the subtracted part of the square root
subtract_part = r ** (2*n)
return sigma_i * np.sqrt(add_part - subtract_part)
# Ancestor and descendant bounds
def get_percentile_bounds(m):
"""Returns an m x 2 percentile-bounds matrix of the lower and upper bounds
of the percentiles.
For example: (0, 0.2),.., (0.8, 1). Where there are m equal-size continuous
percentile sets.
See the "Percentile transition matrices" section of the paper, where the percentile
sets are denoted Q_1, Q_2,..., Q_m. Here, row j corresponds to Q_j.
See the `test_percentile_bounds()` function for an example output."""
lower_percentiles = np.linspace(start=0, stop=1, num=m, endpoint=False)
upper_percentiles = lower_percentiles + (1 / m)
return np.column_stack((lower_percentiles, upper_percentiles))
def get_real_bounds(m, sigma):
"""Returns an m x 2 real-bounds matrix of the lower and upper real-number values.
Obtains a percentile-bounds matrix and converts to the real line.
Uses the normal `ppf` or percent point function (inverse of cdf) for the conversion.
Assumes for simplicity a location (population mean) of zero (standardization).
Note that percentiles of 0 and 1 are converted to -infinity and +infinity,
respectively."""
percentile_bounds = get_percentile_bounds(m=m)
return st.norm.ppf(q=percentile_bounds, loc=0, scale=sigma)
def expand_real_bounds(real_bounds, x_i, sigma_i, r, rs, n):
"""Converts real bounds into z-scores of the conditional distribution(s) of x_i.
That is, it converts the score relative to the conditional descendant distribution into
z-scores relative to the conditional distributions of the ancestor score(s) x_i, which
can be a scalar or vector.
This is the same as, in the paper (D_ - mu_tilda) / sigma_tilda in the
P_n(D, x_i) equation, which is calculated for each D_min and D_max in the real_bounds.
Note that the real_bounds are generally descendant_bounds, which are real values
corresponding to percentiles of the marginal (population) descendant distribution.
That distribution is normal with 0 mean and sigma_n SD.
We input sigma_i, which is the SD of the ancestor distribution, it is needed to
calculate sigma_tilda, which is the same for each x_i.
The conditional means are shaped into three dimensions. We do this because the
real_bounds is an (m x 2) matrix. The size of x_i will be n_iters in the get_matrix
function. Then, when we subtract the conditional means from the real_bounds,
we get an (n_iters x m x 2) array. That is, each 'row' (0th dimension) is a
conditionalized real_bound.
"""
# Get the conditional mean (mu_tilda), which has the same shape as x_i
# (scalar or vector)
mu_tilda = get_mu_tilda(x_i=x_i, r=r, n=n)
# Reshape mu_tilda into three dimensions, with as many rows (0th dimension)
# as needed to fit in the size of mu_tilda (1 if scalar, more if vector)
mu_tilda = np.reshape(mu_tilda, (-1, 1, 1))
# Get the conditional SD (sigma_tilda), which is the same for all conditional
# descendant distributions. That is, sigma_tilda is a scalar.
sigma_tilda = get_sigma_tilda(sigma_i, r, rs, n)
# Return the (n_iters x m x 2) array
return (real_bounds - mu_tilda) / sigma_tilda
# State to set probability
def get_state_set(m_descendant, x_i, r, rs, n, sigma_i):
"""
Calculates the the state to set probabilities for m_descendant equally spaced
(by percentile) sets, where a set is referred to in the paper as D.
This function carries out the P_n(D, x_i) calculation from the paper for each D.
The input x_i is a vector (of size n_iters) or could even be a scalar. In the
get_matrix function, x_i is a vector of evenly spaced ancestor states over an
ancestor bound. In the paper, this is denoted by x_i \in A.
For an (m_descendant x 2) real-bound matrix, or 1 by 2 element thereof,
returns the state to set probability.
Requires the right element of each 1 by 2 a-vector element to be greater than the
left element (tested elsewhere)."""
# SD of the marginal (population) descendant distribution
sigma_n = np.sqrt((np.square(r) + np.square(rs)) ** n) * sigma_i
# Calculate the real descendant bounds
descendant_bounds = get_real_bounds(m=m_descendant, sigma=sigma_n)
# Get the expanded (conditionalized) bounds according to x_i
expanded_bounds = expand_real_bounds(descendant_bounds, x_i, sigma_i, r, rs, n)
# Convert z-score to cdf
expanded_bounds = st.norm.cdf(expanded_bounds)
# Take the difference (along the last axis, which has size 2)
# This gets the area/probability between the m lower and upper bounds for each x_i
probabilities = np.diff(expanded_bounds)
# Remove the axis of length one (the last one which was collapsed when taking diff)
probabilities = np.squeeze(probabilities)
# Return the conditional probabilities scaled by the densities of the x_i
# The output is transposed so that it is a matrix of shape (m_descendant x n_iters)
return probabilities.T * pdf(x_i)
# Percentile transition matrix
def trim_real_bounds(real_bounds, trim_score=5):
"""
Symmetrically trim the ends of a real_bounds matrix to an absolute-value trim_score.
This is done so that manual integration is possible for the tail bounds (and manual
integration cannot be accomplished over an infinite length). The approximation works
because the density at a substantial trim_score (distance) from the mean will be so low,
that integrating further adds an immaterial amount of area (probability).
It should be noted that the trim_score is equal to the z-score if and only if the SD of
the real_bounds is 1. This the case for the ancestor_bounds in the get_matrix function.
"""
real_bounds[np.isneginf(real_bounds)] = -1 * trim_score
real_bounds[np.isposinf(real_bounds)] = trim_score
return real_bounds
def get_x_i_matrix(m_ancestor, trim_score, num_iters, sigma_i):
"""
Obtain a (m_ancestor x num_iters) matrix, where each row is the vector
of x_i for each of the m_ancestor real sets (couples).
"""
# Calculate the bounds for the ancestor states
ancestor_bounds = get_real_bounds(m=m_ancestor, sigma=sigma_i)
ancestor_bounds = trim_real_bounds(ancestor_bounds, trim_score=trim_score)
# Get the line-space from the lower bound to upper bound for each
# of the m_ancestor couples.
x_i_matrix = np.linspace(ancestor_bounds[:, 0], ancestor_bounds[:, 1],
num=num_iters, axis=1)
return x_i_matrix
def get_matrix(r, rs, n=1, num_iters=100_000, trim_score=5, m_descendant=5, m_ancestor=5):
"""
Obtain an (m_descendant x m_ancestor) percentile transition matrix.
As described in the paper, m_ancestor = m_descendant. However, this function allows
for the flexibility of different m's, if desired.
There are num_iters number of iterations over the numerically calculated integrals of
each entry in the matrix. As there are m_descendant x m_ancestor entries in the matrix,
that means num_iters x m_descendant x m_ancestor total iterations.
"""
# Set sigma_i (the marginal ancestor distribution SD) to be equal to one
sigma_i = 1
x_i_matrix = get_x_i_matrix(m_ancestor=m_ancestor, trim_score=trim_score,
num_iters=num_iters, sigma_i=sigma_i)
# Initialize the percentile transition matrix
matrix = np.zeros((m_descendant, m_ancestor))
# Loop through the ancestor states, filling in the columns of the matrix one by one
for j in range(m_ancestor):
# Get the x_i vector for the jth ancestor set (out of m_ancestor)
# The resultant x_i has size num_iters
x_i = x_i_matrix[j]
# Calculate the state to set probabilities: P_n(D, x_i) in the paper
# for each of the (m_descendant) descendant states.
state_set = get_state_set(m_descendant=m_descendant, x_i=x_i, sigma_i=sigma_i,
r=r, rs=rs, n=n)
# Numerical integration of the probabilities to obtain the total probability/area
# within each element of column j of the percentile transition matrix
matrix[:, j] = np.trapz(state_set, x_i)
# End for loop
# Because we want to obtain the probability for each ancestor state (rather than
# the overall probability), we normalize to the probability of an ancestor state.
# This is the same as doing: matrix /= matrix.sum(axis=0)
ancestor_state_probability = 1 / m_ancestor
matrix /= ancestor_state_probability
return matrix
# Plotting functions
def plot_ax(ax, matrix, i=0, j=0, title=None, title_loc='left', x_label=True, child=False):
"""Plots a percentile transition matrix on an axis."""
from matplotlib.ticker import PercentFormatter
ancestors = ['Parent', 'Grandparent', 'Great-Grandparent', 'Great-Great-Grandparent',
'Great$^3$-Grandparent', 'Great$^4$-Grandparent']
# ancestors = ['Generation {}'.format(i) for i in range(1, 20)]
if title:
ax.set_title(title, fontsize=17, loc=title_loc)
if matrix.shape[1] == 5:
step_labels = ['Bottom', 'Second', 'Third', 'Fourth', 'Top']
if x_label:
ax.set_xlabel('{}\'s Quintile'.format(ancestors[i]), fontsize=15)
else:
if j >= 4:
ax.set_xlabel('{}\'s Quintile'.format(ancestors[i]), fontsize=15)
else:
ax.set_xlabel(' '.format(ancestors[i]), fontsize=15)
# ax.set_xlabel("Generation {} Quintile".format(i+1), fontsize=15)
if j % 2 == 0:
if child:
ax.set_ylabel('Cumulative Probability of Child\'s Quintile', fontsize=15)
else:
ax.set_ylabel('Cumulative Probability of Descendant\'s Quintile', fontsize=15)
else:
step_labels = list(range(1, matrix.shape[1] + 1))
pal = ['#c6dbef', '#9ecae1', '#6baed6', '#4292c6', '#2171b5'][::-1]
ax.set_ylim(0, 1)
values_sum_list = [1] * matrix.shape[1]
for j in range(len(matrix) - 1, -1, -1):
if len(matrix) <= 5:
ax.bar(step_labels, [- value for value in matrix[j]],
bottom=values_sum_list, color=pal[j])
else:
ax.bar(step_labels, [- value for value in matrix[j]],
bottom=values_sum_list)
for a, b, c in zip(step_labels, values_sum_list, [value for value in matrix[j]]):
if c >= 0.01:
num = (b - c / 2) - 0.018
color = 'w'
if j >= 2:
color = 'k'
round_str = "{:0.0%}"
if i > 3:
round_str = "{:0.1%}"
ax.text(a, num, ' ' + round_str.format(c),
va='bottom', ha='center', color=color, size=13, alpha=0.8)
for k in range(len(values_sum_list)):
values_sum_list[k] -= matrix[j][k]
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.set_xticklabels(step_labels, Fontsize=14)
ax.yaxis.set_major_formatter(PercentFormatter(1))
def plot_matrix(matrix, n=1, child=True, legend=True):
"""Plots a figure with only one percentile transition matrix."""
fig, axes = plt.subplots(1, 1, figsize=(13 * 0.95 * 0.75, 8 / 0.95 * 0.75))
plot_ax(ax=axes, matrix=matrix, i=n-1, child=child)
term = 'Descendant'
if matrix.shape[1] == 5:
if n == 1:
term = 'Child'
if legend:
legend = ['{} in the\nTop Quintile'.format(term), 'Fourth Quintile',
'Third Quintile', 'Second Quintile', 'Bottom Quintile']
fig.legend(legend, bbox_to_anchor=(1, 0.977), loc="upper left", fontsize=15)
plt.tight_layout()
def get_rv_rsv(mv):
"""Get the corresponding r vector and rs vector from a mobility vector"""
rv = 1 / np.sqrt(mv**2 + 1)
rsv = stable_rs(rv)
return rv, rsv
def report_mobility(mv, rv, rsv, i):
"""Give a 'report' of the mobility, and corresponding regression and residual
coefficients.
mv is short for mobility_vector (a vector of mobility values)"""
# If the mobility value is an integer, display it as an integer
if mv[i] % 1 == 0:
return "$m$ = {:.0f}, $r$ = {:.3f}, $s$ = {:.3f}".format(mv[i], rv[i], rsv[i])
# Otherwise, display the first decimal of the mobility value
else:
return "$m$ = {:.1f}, $r$ = {:.3f}, $s$ = {:.3f}".format(mv[i], rv[i], rsv[i])
# Functions for handling (Pearson) data
def get_percentiles(vector):
"""Convert an vector of data into percentiles"""
return st.rankdata(vector) / vector.size
def get_matrix_data(x, y, m_ancestor=5, m_descendant=5, return_raw=False):
"""Obtains the observed percentile transition matrix from data.
x is the ancestor values and y is the descendant values (typically parent-child
parallel vectors).
If return_raw = True, then the counts are returned, rather than the proportions.
To estimate the probability (and obtain an estimated percentile transition matrix)
it is necessary that return_raw = False.
"""
# Create (representing percentiles) of the data
bins_ancestor = np.linspace(0, 1, m_ancestor, endpoint=False)
bins_descendant = np.linspace(0, 1, m_descendant, endpoint=False)
# Obtain the bin for each data-point based on its percentile
xb = np.digitize(get_percentiles(x), bins_ancestor)
yb = np.digitize(get_percentiles(y), bins_descendant)
# Initialize the percentile transition matrix
matrix = np.zeros((m_ancestor, m_descendant))
# Loop through the ancestor bins
for i in range(m_ancestor):
# Get the descendants of this ancestor bin
desc = xb[yb == i+1]
# Loop through the descendant bins
for j in range(m_descendant):
if return_raw:
# Get the total number of descendants in the
# ancestor bin, descendant bin pair
matrix[j, i] = np.sum(desc == j+1)
else:
# Get the proportion of descendants in the
# ancestor bin, descendant bin pair (approximates probability)
matrix[j, i] = np.mean(desc == j+1)
# End of for loop
return matrix
# Testing functions
def test_percentile_bounds():
expected = np.array([[0., 0.2],
[0.2, 0.4],
[0.4, 0.6],
[0.6, 0.8],
[0.8, 1.]])
assert np.allclose(get_percentile_bounds(m=5).ravel(), expected.ravel())
def test_expanded_real_bounds():
"""Test that this gives the correct shape. (Further tests can be added"""
x_i_trial = np.array([1, 2, 3])
rb = np.array([[-5., -0.84162123],
[-0.84162123, -0.2533471],
[-0.2533471, 0.2533471],
[0.2533471, 0.84162123],
[0.84162123, 5.]])
expand_real_bounds(real_bounds=rb, x_i=x_i_trial, sigma_i=1, r=0.5, rs=0.9, n=1)
assert (rb - np.reshape([1, 2, 3], (-1, 1, 1))).shape == (3, 5, 2)
def test_trim_real_bounds():
rb = get_real_bounds(m=5, sigma=1)
trim_score = 4
rb[0, 0] = -1 * trim_score
rb[-1, -1] = trim_score
assert (rb == trim_real_bounds(rb, trim_score)).all()
def test_functions():
test_percentile_bounds()
test_expanded_real_bounds()
test_trim_real_bounds()
test_functions()
print('Tests passed')
```
|
{
"source": "JesseBoise/covidconvene",
"score": 3
}
|
#### File: JesseBoise/covidconvene/main.py
```python
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib
import matplotlib.pyplot as plt
import mysql.connector as mcon
import pandas as pd
import tkinter as tk
import tkinter.ttk as ttk
connect_args = {
"host": "localhost",
"user": "root",
"passwd": "<PASSWORD>",
"database": "covid-data"
}
class Country:
ISO_Code = ""
Name = ""
def __init__(self, iso, name):
self.ISO_Code = iso
self.Name = name
def __repr__(self):
return f"{self.ISO_Code} ({self.Name})"
@staticmethod
def unpack_repr(value):
return value[:3]
class Application(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.title("CovidConvene")
self.create_widgets()
def create_widgets(self):
self.main_frame = tk.Frame(self, bg="white")
self.main_frame.pack(side="top", fill="both", expand=True)
self.header_frame = tk.Frame(self.main_frame)
self.header_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
if self.header_frame:
self.search_frame = tk.Frame(self.header_frame, bg="#fff")
self.search_frame.pack(side="left", fill="x", padx=5, pady=5, ipadx=12, ipady=8)
if self.search_frame:
self.iso_label = tk.Label(self.search_frame, text="Select a country to load", )
self.iso_label.pack()
iso_data = get_iso_data()
self.iso_select = ttk.Combobox(self.search_frame, values=iso_data)
self.iso_select.pack(side=tk.TOP)
self.isofilter_button = ttk.Button(self.search_frame, text="Update",
command=self.update_plot)
self.isofilter_button.pack(side=tk.TOP, pady=5)
self.data_viewtype = tk.StringVar()
self.current_viewtype = ""
self.viewtype_frame = tk.Frame(self.header_frame)
self.viewtype_frame.pack(side=tk.LEFT, padx=12, pady=12)
if self.viewtype_frame:
self.viewtype_bar_rb = ttk.Radiobutton(self.viewtype_frame, text="Bar", variable=self.data_viewtype, value="bar",
command=self.adjust_viewtype)
self.viewtype_bar_rb.pack(anchor=tk.W)
self.viewtype_line_rb = ttk.Radiobutton(self.viewtype_frame, text="Line", variable=self.data_viewtype, value="line",
command=self.adjust_viewtype)
self.viewtype_line_rb.pack(anchor=tk.W)
self.summary_tcases = tk.StringVar()
self.summary_ncases = tk.StringVar()
self.summary_tdeaths = tk.StringVar()
self.summary_ndeaths = tk.StringVar()
self.summary_frame = tk.Frame(self.header_frame)
self.summary_frame.pack(side=tk.RIGHT, ipadx=15)
if self.summary_frame:
# Total Cases Label
self.summary_totalcases_label = ttk.Label(self.summary_frame,
textvariable=self.summary_tcases)
self.summary_totalcases_label.pack(fill="x", expand=True)
# New Cases Label
self.summary_newcases_label = ttk.Label(self.summary_frame,
textvariable=self.summary_ncases)
self.summary_newcases_label.pack(fill="x", expand=True)
# Total Deaths Label
self.summary_totaldeaths_label = ttk.Label(self.summary_frame,
textvariable=self.summary_tdeaths)
self.summary_totaldeaths_label.pack(fill="x", expand=True)
# New Deaths Label
self.summary_newdeaths_label = ttk.Label(self.summary_frame,
textvariable=self.summary_ndeaths)
self.summary_newdeaths_label.pack(fill="x", expand=True)
def update_plot(self):
if self.iso_select.current() == -1:
return
iso = Country.unpack_repr(self.iso_select.get())
plotting = get_plot_data(iso)
if hasattr(self, "covid_plot"):
self.covid_plot.pack_forget()
cols = get_columns_total(["total_cases", "new_cases", "total_deaths", "total_cases"], iso)
self.summary_tcases.set(f"Total Cases: {cols[0]}")
self.summary_ncases.set(f"New Cases: {cols[1]}")
self.summary_tdeaths.set(f"Total Deaths: {cols[2]}")
self.summary_ndeaths.set(f"New Deaths: {int(cols[3])}")
self.covid_plot = self.create_matplotlib_plot(
plotting["dates"],
{x: plotting[x] for x in plotting if x != "dates"}
)
self.covid_plot.pack(side="top")
def adjust_viewtype(self):
if self.data_viewtype.get() != self.current_viewtype:
self.current_viewtype = self.data_viewtype.get()
self.update_plot()
def create_matplotlib_plot(self, x, y):
f = Figure(figsize=(9,7), dpi=100)
ax = f.subplots(nrows=len(y), ncols=1)
idx = 0
for key in y:
el1, el2 = zip(*y[key])
ax[idx].set_title(key.capitalize())
if self.data_viewtype.get() == "bar":
ax[idx].bar(x, el2, color="b")
ax[idx].bar(x, el1, color="r")
elif self.data_viewtype.get() == "line":
ax[idx].plot(x, el2, color="b")
ax[idx].plot(x, el1, color="r")
else:
ax[idx].plot(x, el2, color="b")
ax[idx].plot(x, el1, color="r")
idx += 1
f.legend(labels=(f"Totals to date", f"New ocurrence on date"))
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
return canvas.get_tk_widget()
def refresh_matplotlib_plot(self, x, y):
self.covid_figure.clear()
ax = self.covid_figure.subplots(nrows=len(y), ncols=1)
idx = 0
for key in y:
el1, el2 = zip(*y[key])
if self.data_viewtype.get() == "bar":
ax[idx].bar(x, el1, color="r")
ax[idx].bar(x, el2, color="b")
elif self.data_viewtype.get() == "line":
ax[idx].plot(x, el1, color="r")
ax[idx].plot(x, el2, color="b")
idx += 1
canvas = FigureCanvasTkAgg(self.covid_figure, self)
canvas.draw()
return canvas.get_tk_widget()
def plot(x, y):
fig, ax = plt.subplots(nrows=len(y), ncols=1)
idx = 0
for key in y:
el1, el2 = zip(*y[key])
ax[idx].bar(x, el1)
ax[idx].bar(x, el2)
idx += 1
plt.show()
def get_iso_data():
con = mcon.connect(**connect_args)
cursor = con.cursor()
cursor.execute("""
SELECT DISTINCT(`ISO_CODE`), `COUNTRY_NAME`
FROM `global-tolls`
""")
data = []
for item in cursor.fetchall():
data.append(Country(item[0], item[1]))
return data
# return list(zip(*cursor.fetchall()))[0]
def get_columns_total(columns, iso):
if type(columns) is list:
con = mcon.connect(**connect_args)
cursor = con.cursor()
cols = ",".join(map(
lambda x: f"SUM({x})", columns
))
cursor.execute(f"""
SELECT {cols}
FROM `global-tolls`
WHERE `ISO_CODE` = %s
""", (iso,))
data = cursor.fetchall()
return data[0]
elif type(columns) is str:
pass
def get_plot_data(iso):
con = mcon.connect(**connect_args)
cursor = con.cursor()
cursor.execute("""
SELECT *
FROM `global-tolls`
WHERE `ISO_CODE` = %s
""", (iso,))
data_cols = ["dates", "cases", "deaths"]
plot_data = {x:[] for x in data_cols}
day = cursor.fetchone()
while day:
date = day[3]
new_cases = day[4]
total_cases = day[5]
new_deaths = day[6]
total_deaths = day[7]
plot_data["dates"].append(date)
plot_data["cases"].append((new_cases, total_cases))
plot_data["deaths"].append((new_deaths, total_deaths))
day = cursor.fetchone()
plot_df = pd.DataFrame(plot_data, columns=data_cols)
return plot_df
if __name__ == "__main__":
matplotlib.use("TkAgg")
matplotlib.style.use("ggplot")
app = Application()
app.mainloop()
```
|
{
"source": "JesseBoise/tweetypy",
"score": 2
}
|
#### File: tweetypy/miner/views.py
```python
from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse, JsonResponse, Http404
from mongoengine import Q
import datetime
from .models import Miner, Status
import json
import tweetymine
def index(request):
""" Show All potential miners on startup. """
surl = request.build_absolute_uri(reverse("minersearch"))
aurl = request.build_absolute_uri(reverse("mineradd"))
murl = request.build_absolute_uri(
reverse("minemanage", kwargs={"minerid": "0"}))
uurl = request.build_absolute_uri(reverse("minerupdate"))
latest_updated = Miner.objects.aggregate([
{"$sort": {"last_updated": -1}},
{"$project": {"last_updated": 1}}
]).next()
context = {
"last_updated": latest_updated["last_updated"],
"search_url": surl,
"add_url": aurl,
"miner_url": murl,
"update_url": uurl
}
return HttpResponse(render(request, "miner/index.html", context))
def add(request):
print("POST", request.POST)
handle = request.POST.get("handle")
miner = Miner.objects(handle=handle)
if len(miner) > 0:
return HttpResponse("Miner already exists.", status=304)
miner = Miner(handle=handle).save()
miner = json.loads(miner.to_json())
tweetymine.main()
return JsonResponse(miner)
def update_mine(request):
tweetymine.main()
latest_updated = Miner.objects.aggregate([
{"$sort": {"last_updated": -1}},
{"$project": {"last_updated": 1}}
]).next()
print(latest_updated["last_updated"])
data = {
"last_updated": latest_updated["last_updated"]
}
return JsonResponse(data)
def manage(request, minerid):
miner = Miner.objects(tid=str(minerid)).first()
if miner is None:
return Http404("Miner with ID does not exist.")
latest_updated = Miner.objects.aggregate([
{"$sort": {"last_updated": -1}},
{"$project": {"last_updated": 1}}
]).next()
tweets = Status.objects.filter(user__tid=str(minerid))
retweet_list = []
favourite_list = []
creation_dates = []
for idx, tweet in enumerate(tweets):
retweet_list.append(tweet["retweet_count"])
favourite_list.append(tweet["favourite_count"])
d = datetime.datetime.strptime(
tweet["creationDate"], "%a %b %d %H:%M:%S %z %Y")
creation_dates.append(d)
context = {
"last_updated": latest_updated["last_updated"],
"retweet_list": retweet_list,
"favourite_list": favourite_list,
"creation_dates": creation_dates,
"miner": miner,
"tweets": tweets
}
return HttpResponse(render(request, "miner/manage.html", context))
def search(request):
query = request.GET.get("q")
ajax = bool(request.GET.get("ajax"))
if query == "":
miners = Miner.objects.all().to_json()
else:
miners = Miner.objects.filter(
Q(handle__icontains=query) | Q(name__icontains=query)
).to_json()
if ajax:
return JsonResponse(json.loads(miners), safe=False)
```
|
{
"source": "JesseBonanno/IndeterminateBeam",
"score": 2
}
|
#### File: JesseBonanno/IndeterminateBeam/app.py
```python
import base64
import json
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_table
from dash_table.Format import Format, Scheme, Sign, Symbol
from indeterminatebeam.indeterminatebeam import (
Beam, Support
)
from indeterminatebeam.loading import (
PointLoad,
TrapezoidalLoad,
PointTorque
)
from datetime import datetime
import time
from indeterminatebeam.version import __version__
from indeterminatebeam.units import IMPERIAL_UNITS, METRIC_UNITS, UNIT_KEYS, UNIT_VALUES, default_units
from indeterminatebeam.data_validation import (
assert_list_contents,
assert_contents,
)
from dash_extensions import Download
from dash.exceptions import PreventUpdate
from plotly.io import to_html
import plotly.graph_objects as go
# the style arguments for the sidebar.
SIDEBAR_STYLE = {
'position': 'fixed',
'top': 0,
'left': 0,
'bottom': 0,
'width': '20%',
'padding': '40px 40px',
'background-color': '#f8f9fa'
}
# the style arguments for the main content page.
CONTENT_STYLE = {
'margin-left': '25%',
'margin-right': '5%',
'padding': '20px 10p'
}
TEXT_STYLE = {
'textAlign': 'center',
'color': '#191970'
}
CARD_TEXT_STYLE = {
'textAlign': 'center',
'color': '#0074D9'
}
# side bar markdown text
about = dcc.Markdown(f'''
This webpage is a graphical user interface (GUI) for the opensource \
`IndeterminateBeam` Python package created using Dash.
For more, you can view the following:
* [](https://github.com/JesseBonanno/IndeterminateBeam)
The Python package
* [](https://indeterminatebeam.readthedocs.io/en/main/?badge=main)
The package documentation
* [](https://indeterminatebeam.readthedocs.io/en/main/theory.html#sign-convention)
The sign conventions used
* [](https://colab.research.google.com/github/JesseBonanno/IndeterminateBeam/blob/main/docs/examples/simple_demo.ipynb)
The Python based Jupyter Notebook examples
* [](https://doi.org/10.21105/jose.00111)
JOSE Article
''')
# the content for the sidebar
sidebar_content = html.Div(
[
about
]
)
sidebar = html.Div(
[
html.H2('About', style=TEXT_STYLE),
html.Hr(),
sidebar_content
],
style=SIDEBAR_STYLE,
)
# Copyright content for footer
copyright_ = dbc.Row(
[
dbc.Col(
dcc.Markdown("[](https://github.com/JesseBonanno/IndeterminateBeam/blob/main/LICENSE.txt)")),
dbc.Col(
dcc.Markdown("Copyright (c) 2020, <NAME>")),
dbc.Col(
dcc.Markdown("Contact: <EMAIL>")),
])
def create_table(id_, table, init, row_deletable = True):
if init:
data = [init]
# used to initialise no data for the query table
else:
data = []
table = dash_table.DataTable(
id=id_,
columns=[{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': table[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=table[d]['units'])
} for d in table.keys()],
data=data,
editable=True,
row_deletable=row_deletable,
)
return table
def create_content(instruction_name, instructions, table, button_label="", button_id="", add_button = True):
if add_button:
_ = [
table,
html.Br(),
html.Button(button_label, id=button_id, n_clicks=0),
dbc.Collapse(
dbc.Card(dbc.CardBody(instructions)),
id=instruction_name,
),
]
else:
_ = [
table,
html.Br(),
dbc.Collapse(
dbc.Card(dbc.CardBody(instructions)),
id=instruction_name,
),
]
card = dbc.Card(
dbc.CardBody(_),
className="mt-3",
)
return card
# Properties for Beam Tab
beam_table_data = {
'Length': {
'init': 5,
'units': ' m',
'type': 'numeric'
},
"Young's Modulus": {
'init': 200 * 10**9,
'units': ' Pa',
'type': 'numeric'
},
"Second Moment of Area": {
'init': 9.05 * 10**-6,
'units': ' m4',
'type': 'numeric'
},
"Cross-Sectional Area": {
'init': 0.23,
'units': ' m2',
'type': 'numeric'
},
}
beam_table_init = {k: v['init'] for k, v in beam_table_data.items()}
beam_table = create_table('beam-table', beam_table_data, beam_table_init, row_deletable=False)
beam_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify the length of the beam
2. Specify the beam sectional properties as indicated for:
* Young's Modulus (E)
* Second Moment of Area (I)
* Cross-sectional Area (A)
Note: E and I will only affect the deflection unless a spring in the y direction is specified
in which case they will also affect the load distribution. Where a spring in the x direction
is specified E and A will affect the load distribution for the horizontal loads only.
''')
beam_content = create_content('beam_instructions', beam_instructions, beam_table, add_button=False)
# Properties for (Advanced) Support Tab
# Just do as a table but let inputs be
# R - Restraint, F- Free, or number for spring, Spring not an option for m.
support_table_data = {
'Coordinate': {
'init': 0,
'units': ' m',
'type': 'numeric'
},
"X": {
'init': 'R',
'units': ' N/m',
'type': 'any'
},
"Y": {
'init': 'R',
'units': ' N/m',
'type': 'any'
},
"M": {
'init': 'R',
'units': ' N/m',
'type': 'any',
}
}
support_table_init = {k: v['init'] for k, v in support_table_data.items()}
support_table = create_table('support-table', support_table_data, support_table_init)
support_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify the coodinate location of the support
2. For each direction specify one of the following:
* f or F - Indicates a free support
* r or R - Indicates a rigid support
* n - Indicates a stiffness of n (default unit N/m)
(where n is (generally) a positive number)
''')
support_content = create_content('support_instructions', support_instructions, support_table, "Add Support", 'support-rows-button')
# Basic support
basic_support_table_data = {
'Coordinate': {
'init': 0,
'type': 'numeric',
'presentation': 'input',
},
"Support": {
'init': 'fixed',
'type': 'any',
'presentation': 'dropdown',
}
}
basic_support_table_init = {k: v['init']
for k, v in basic_support_table_data.items()}
basic_support_table = dash_table.DataTable(
id='basic-support-table',
columns=[{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': basic_support_table_data[d]['type'],
'presentation': basic_support_table_data[d]['presentation'],
} for d in basic_support_table_data.keys()],
data=[basic_support_table_init],
editable=True,
row_deletable=True,
dropdown={
"Support": {
'options': [
{'label': 'Fixed', 'value': 'fixed'},
{'label': 'Pinned', 'value': 'pinned'},
{'label': 'Roller', 'value': 'roller'},
]
}
},
# dropdowns arent visible unless you add the code below.
# solution taken from https://github.com/plotly/dash-table/issues/221
# - reesehopkins commented on 29 Sep 2020
css=[{"selector": ".Select-menu-outer", "rule": "display: block !important"}],
)
basic_support_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify the coodinate location of the support
2. For each direction specify the conventional
support type from the dropdown.
''')
basic_support_content = dbc.Card(
dbc.CardBody(
[
basic_support_table,
html.Br(),
html.Button(
'Add Support',
id='basic-support-rows-button',
n_clicks=0),
dbc.Collapse(
[
html.Br(),
dbc.Card(
dbc.CardBody(basic_support_instructions))],
id="basic_support_instructions",
),
]),
className="mt-3",
)
# Properties for point_load Tab
point_load_table_data = {
'Coordinate': {
'init': 0,
'units': ' m',
'type': 'numeric'
},
"Force": {
'init': 0,
'units': ' N',
'type': 'numeric'
},
"Angle (deg)": {
'init': 90,
'units': '',
'type': 'numeric'
},
}
point_load_table_init = {k: v['init']
for k, v in point_load_table_data.items()}
point_load_table = create_table('point-load-table', point_load_table_data, point_load_table_init)
point_load_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify the coodinate location of the point load.
2. Specify the force (default units N)
3. Specify the load angle where:
* A positive force with an angle of 0 points horizontally to the right.
* A positive force with an angle of 90 points vertically in the
positive y direction chosen in the options tab (default downwards).
''')
point_load_content = create_content('point_load_instructions', point_load_instructions, point_load_table, 'Add Point Load', 'point-load-rows-button')
# Properties for point_torque Tab
point_torque_table_data = {
'Coordinate': {
'init': 0,
'units': ' m',
'type': 'numeric'
},
"Torque": {
'init': 0,
'units': ' N.m',
'type': 'numeric'
},
}
point_torque_table_init = {k: v['init']
for k, v in point_torque_table_data.items()}
point_torque_table = create_table('point-torque-table', point_torque_table_data, point_torque_table_init)
point_torque_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify the coodinate location of the point torque.
2. Specify the moment (default units N.m)
Note: A positive moment indicates an anti-clockwise moment direction.
''')
point_torque_content = create_content('point_torque_instructions', point_torque_instructions, point_torque_table, 'Add Point Torque', 'point-torque-rows-button')
# Properties for distributed_load Tab
distributed_load_table_data = {
'Start Coordinate': {
'init': 0,
'units': ' m',
'type': 'numeric'
},
'End Coordinate': {
'init': 0,
'units': ' m',
'type': 'numeric'
},
'Start Load': {
'init': 0,
'units': ' N/m',
'type': 'numeric'
},
'End Load': {
'init': 0,
'units': ' N/m',
'type': 'numeric'
},
}
distributed_load_table_init = {k: v['init']
for k, v in distributed_load_table_data.items()}
distributed_load_table = create_table('distributed-load-table', distributed_load_table_data, distributed_load_table_init)
distributed_load_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify the start and end locations of the distributed load.
2. Specify the start and end loads (default units N/m)
Note: A positive load acts in the positive y direction chosen
in the options tab (default downwards).
''')
distributed_load_content = create_content(
'distributed_load_instructions',
distributed_load_instructions,
distributed_load_table,
'Add Distributed Load',
'distributed-load-rows-button'
)
# Properties for query tab
query_table_init = {
'Query coordinate': 0
}
query_table_data = {
'Query coordinate': {
'init': 0,
'units': ' m',
'type': 'numeric'
},
}
query_table = create_table('query-table',query_table_data, None)
query_instructions = dcc.Markdown('''
###### **Instructions:**
1. Specify a point of interest to have values annotated on graph.
''')
query_content = create_content('query_instructions', query_instructions, query_table, 'Add Query', 'query-rows-button')
# Properties for results section
results_columns = [
{"name": "", "id": "val"},
{"name": 'Normal Force', "id": "NF"},
{"name": 'Shear Force', "id": "SF"},
{"name": 'Bending Moment', "id": "BM"},
{"name": 'Deflection', "id": "D"},
]
results_data = [
{'val': 'Max', 'NF': 0, 'SF': 0, 'BM': 0, 'D': 0},
{'val': 'Min', 'NF': 0, 'SF': 0, 'BM': 0, 'D': 0}
]
results_table = dash_table.DataTable(
id='results-table',
columns=results_columns,
data=results_data,
merge_duplicate_headers=True,
editable=False,
row_deletable=False,
style_cell={'textAlign': 'center'},
)
results_content = dbc.Collapse(
[
dbc.Card(
dbc.CardBody(
[
results_table,
]
),
className="mt-3",
),
],
id='results-collapse'
)
# Options
#button to reset options
#
reset_setting_button = dbc.Col(
[
dbc.Button(
"Reset Options",
id="reset-options-button",
className="mb-3",
color="info",
n_clicks=0,
block=True,
),
],
width=12
)
option_instructions = dcc.Markdown('''
###### **Instructions:**
Toggle options as desired.
1. Results Table:
- Choose to show or hide the table that summarises the maximum
and minimum effects determined over the beam
2. Support Input:
- Choose mode to use for support input where:
- Basic: Provides a dropdown for conventional supports
- Advanced: Allows for custom support configurations, as well
as spring supports
3. Positive y direction:
- Choose the positive y direction.
- Note: The python package conventionally takes `UP` as being the
direction for positive forces, as indicated in the package
documentaion. Due to popular request the option to change the
positive direction for y forces to be downwards has been allowed.
This is actually achieved by reversing the angle direction
of loading behind the scenes, (multiplying by negative 1)
which can be revealed by hoverlabels.
4. Data points:
- Number of increments used for plotting graphs, higher number
results in longer calculation speeds.
''')
def create_option(label, id_, options=[],default=None,option_dict = None):
if option_dict:
options = option_dict
else:
options=[{'label':a, 'value':a.lower()} for a in options]
option = dbc.FormGroup(
[
dbc.Label(label, html_for=id_, width=3),
dbc.Col(
dbc.RadioItems(
id=id_,
options=options,
value=default,
inline=True,
),
width=8,
),
],
row=True,
)
return option
option_support_input = create_option(
'Support Mode',
"option_support_input",
['Basic', 'Advanced'],
'basic'
)
option_default_support = create_option(
'Default Support Type',
'option_default_support',
['Fixed', 'Pinned', 'Roller'],
'fixed'
)
option_positive_direction_y = create_option(
'Positive y direction',
'option_positive_direction_y',
['Up', 'Down'],
'down'
)
option_result_table = create_option(
'Result Table',
'option_result_table',
['Hide', 'Show'],
'show'
)
option_data_point = dbc.FormGroup(
[
dbc.Label("Graph Data Points", html_for="option_data_points", width=3),
dbc.Col(
dcc.Slider(
id='option_data_points',
min=50,
max=500,
value=50,
step=50,
marks={
50: {'label': '50'},
250: {'label': '250'},
500: {'label': '500'}
},
included=True,
),
width=8,
),
],
row=True,
)
# unit option implementation
def unit_option_formgroup(group='SI',label='length',units=('m'),default ='m'):
"""Define formgroup for a single unit option"""
assert_contents(group, ("SI","metric","imperial"), "group")
assert_contents(label, UNIT_KEYS, "label")
assert_list_contents(units, UNIT_VALUES[label], "units")
assert_contents(default, units, "default")
id_ = group + "_" + label
options = [{'label':a, 'value':a} for a in units]
_ = dbc.FormGroup(
[
dbc.Label(label, html_for=id_, width=3),
dbc.Col(
dbc.RadioItems(
id=id_,
options=options,
value=default,
inline=True,
),
width=8,
),
],
row=True,
)
return _
# create a simplified method to write the SI_editor
SI_editor = []
group = "SI"
for label in UNIT_KEYS:
units = [a for a in METRIC_UNITS[label].keys()]
SI_editor.append(
unit_option_formgroup(group, label, [default_units[group][label]], default_units[group][label])
)
metric_editor = []
group = "metric"
for label in UNIT_KEYS:
units = [a for a in METRIC_UNITS[label].keys()]
id_ = group + "_" + label
metric_editor.append(
unit_option_formgroup(group, label, units, default_units[group][label])
)
imperial_editor = []
group = "imperial"
for label in UNIT_KEYS:
units = [a for a in IMPERIAL_UNITS[label].keys()]
id_ = group + "_" + label
imperial_editor.append(
unit_option_formgroup(group, label, units, default_units[group][label])
)
#option to change units for inputs and outputs
option_units = create_option(
'Units',
'option_units',
None,
default = 'SI',
option_dict=[
{'label': 'SI', 'value': 'SI'},
{'label': 'Metric (Custom)', 'value': 'metric'},
{'label': 'Imperial (Custom)', 'value': 'imperial'},
]
)
option_general_tab = dbc.Form([
html.Br(),
option_result_table,
option_support_input,
option_default_support,
option_positive_direction_y,
option_data_point,
])
option_unit_tab = dbc.Form([
html.Br(),
option_units,
dbc.Tab(
[
dbc.Collapse(
SI_editor,
id='SI-editor',
is_open = True,
),
dbc.Collapse(
metric_editor,
id='metric-editor',
is_open=False
),
dbc.Collapse(
imperial_editor,
id='imperial-editor',
is_open=False
),
],
),
])
option_content = dbc.Card(
dbc.CardBody(
[
dbc.Tabs([
dbc.Tab(option_general_tab, label="General Options"),
dbc.Tab(option_unit_tab, label="Unit Options"),
]),
html.Br(),
reset_setting_button,
html.Br(),
dbc.Collapse(
[
html.Br(),
dbc.Card(dbc.CardBody(option_instructions))
],
id="option_instructions",
),
]
),
className="mt-3",
)
# Assemble different input tabs
tabs = dbc.Tabs(
[
dbc.Tab(beam_content, label="Beam"),
dbc.Tab(
[
dbc.Collapse(
support_content,
id='advanced-support',
is_open = False,
),
dbc.Collapse(
basic_support_content,
id='basic-support',
is_open=True
),
],
label="Supports",
),
dbc.Tab(point_load_content, label="Point Loads"),
dbc.Tab(point_torque_content, label="Point Torques"),
dbc.Tab(distributed_load_content, label="Distributed Load"),
dbc.Tab(query_content, label="Query"),
dbc.Tab(option_content, label='Options')
]
)
# Create a submit button
submit_button = dbc.Button(
id='submit_button',
n_clicks=0,
children='Analyse',
color='primary',
block=True,
disabled=False,
)
# Create a status bar/Alert
calc_status = dbc.Alert(
children="No analysis has been run.",
id="alert-fade",
dismissable=True,
is_open=True,
color='danger',
)
report_upload_section = dbc.Row(
[
dbc.Label("Work from Report", width=3),
html.Div([
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files'),
]),
style={
'width': '150%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '5px'
},
# Allow multiple files to be uploaded
multiple=False
),
])
]
)
# Assemble main application content
main_content = html.Div(
dbc.Row(
[
dbc.Col(
[
dbc.Row(
dbc.Col(
[
dbc.Card(
dbc.Spinner(dcc.Graph(id='graph_1')),
),
html.Br(),
tabs,
html.Br(),
],
width=12,
)
),
dbc.Row(
dbc.Col(
report_upload_section,
width=12
)
),
html.Br(),
dbc.Row(
[
dbc.Col(
dbc.Button(
"Toggle Instructions",
id="instruction-button",
className="mb-3",
color="info",
n_clicks=0,
block=True,
),
width=4
),
dbc.Col(
[
dbc.Button(
"Generate Report",
id="report-button",
className="mb-3",
color="info",
n_clicks=0,
block=True,
),
Download(id='report'),
],
width=4
),
dbc.Col(
[
dbc.Button(
"Clear Beam",
id="clear-inputs-button",
className="mb-3",
color="info",
n_clicks=0,
block=True,
),
],
width=4
),
],
),
dbc.Row(
dbc.Col(
dbc.Spinner(submit_button),
width=12
)
),
html.Br(),
],
width={"size": 5.5, "offset": 0},
style={'padding': '5px'}
),
dbc.Col(
[
dbc.Card(
dbc.Spinner(dcc.Graph(id='graph_2'))
),
results_content,
],
width={"size": 5.8, "offset": 0},
style={'padding': '5px'}
)
]
)
)
content = html.Div(
[
html.H2(
'IndeterminateBeam Calculator',
style={
'textAlign': 'center',
'color': '#191970',
'padding': '40px 0px 0px 0px',
}
),
html.Hr(),
calc_status,
dcc.Store(id='input-json', storage_type='local'),
html.Div(id='dummy-div', style=dict(display='none')),
main_content,
html.Hr(),
copyright_
],
style=CONTENT_STYLE
)
# Initialise app
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.MINTY],
external_scripts=['https://cdn.jsdelivr.net/gh/JesseBonanno/IndeterminateBeam/assets/gtag.js']
)
server = app.server
app.layout = html.Div([sidebar, content])
# add tab title
app.title = "IndeterminateBeam"
# # update units store
# unit_callback_input = [Input('SI_'+a,'value') for a in METRIC_UNITS.keys()]
# unit_callback_input += [Input('metric_'+a,'value') for a in METRIC_UNITS.keys()]
# unit_callback_input += [Input('imperial_'+a,'value') for a in IMPERIAL_UNITS.keys()]
# @app.callback(
# Output('json-units', 'data'),
# [Input('submit_button', 'n_clicks')]+
# unit_callback_input,
# )
# def unit_options_setup(
# n,
# SI_length,
# SI_force,
# SI_moment,
# SI_distributed,
# SI_stiffness,
# SI_A,
# SI_E,
# SI_I,
# SI_deflection,
# metric_length,
# metric_force,
# metric_moment,
# metric_distributed,
# metric_stiffness,
# metric_A,
# metric_E,
# metric_I,
# metric_deflection,
# imperial_length,
# imperial_force,
# imperial_moment,
# imperial_distributed,
# imperial_stiffness,
# imperial_A,
# imperial_E,
# imperial_I,
# imperial_deflection,
# ):
# units = {}
# units['SI'] = {
# 'length':SI_length,
# 'force':SI_force,
# 'moment':SI_moment,
# 'distributed':SI_distributed,
# 'stiffness':SI_stiffness,
# 'A':SI_A,
# 'E':SI_E,
# 'I':SI_I,
# 'deflection':SI_deflection,
# }
# units['metric'] = {
# 'length':metric_length,
# 'force':metric_force,
# 'moment':metric_moment,
# 'distributed':metric_distributed,
# 'stiffness':metric_stiffness,
# 'A':metric_A,
# 'E':metric_E,
# 'I':metric_I,
# 'deflection':metric_deflection,
# }
# units['imperial'] = {
# 'length':imperial_length,
# 'force':imperial_force,
# 'moment':imperial_moment,
# 'distributed':imperial_distributed,
# 'stiffness':imperial_stiffness,
# 'A':imperial_A,
# 'E':imperial_E,
# 'I':imperial_I,
# 'deflection':imperial_deflection,
# }
# return json.dumps(units)
# ANALYSIS
unit_callback_state = [State('SI_'+a,'value') for a in METRIC_UNITS.keys()]
unit_callback_state += [State('metric_'+a,'value') for a in METRIC_UNITS.keys()]
unit_callback_state += [State('imperial_'+a,'value') for a in IMPERIAL_UNITS.keys()]
@app.callback(
[
Output('graph_1', 'figure'),
Output('graph_2', 'figure'),
Output('alert-fade', 'color'),
Output('alert-fade', 'children'),
Output('alert-fade', 'is_open'),
Output('results-table', 'data'),
Output('input-json', 'data'),
Output('submit_button', 'disabled'),
],
[
Input('submit_button', 'n_clicks'),
Input('dummy-div', 'children'),
],
[
State('beam-table', 'data'),
State('point-load-table', 'data'),
State('point-torque-table', 'data'),
State('query-table', 'data'),
State('distributed-load-table', 'data'),
State('support-table', 'data'),
State('basic-support-table', 'data'),
State('graph_1', 'figure'),
State('graph_2', 'figure'),
State('input-json', 'data'),
State('option_support_input', 'value'),
State('option_default_support','value'),
State('option_positive_direction_y', 'value'),
State('option_data_points', 'value'),
State('option_result_table', 'value'),
State('option_units', 'value'),
] + unit_callback_state
)
def analyse_beam(
click,
dummy_div,
beams,
point_loads,
point_torques,
querys,
distributed_loads,
advanced_supports,
basic_supports,
graph_1,
graph_2,
prev_input,
option_support,
option_default_support,
positive_y_direction,
data_points,
option_result_table,
option_units,
SI_length,
SI_force,
SI_moment,
SI_distributed,
SI_stiffness,
SI_A,
SI_E,
SI_I,
SI_deflection,
metric_length,
metric_force,
metric_moment,
metric_distributed,
metric_stiffness,
metric_A,
metric_E,
metric_I,
metric_deflection,
imperial_length,
imperial_force,
imperial_moment,
imperial_distributed,
imperial_stiffness,
imperial_A,
imperial_E,
imperial_I,
imperial_deflection,
):
ctx = dash.callback_context
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
# if an update was raised by button, and that was by a additional row, dont run.
if dummy_div is False and button_id == 'dummy-div':
raise PreventUpdate
t1 = time.perf_counter()
units = {}
units['SI'] = {
'length':SI_length,
'force':SI_force,
'moment':SI_moment,
'distributed':SI_distributed,
'stiffness':SI_stiffness,
'A':SI_A,
'E':SI_E,
'I':SI_I,
'deflection':SI_deflection,
}
units['metric'] = {
'length':metric_length,
'force':metric_force,
'moment':metric_moment,
'distributed':metric_distributed,
'stiffness':metric_stiffness,
'A':metric_A,
'E':metric_E,
'I':metric_I,
'deflection':metric_deflection,
}
units['imperial'] = {
'length':imperial_length,
'force':imperial_force,
'moment':imperial_moment,
'distributed':imperial_distributed,
'stiffness':imperial_stiffness,
'A':imperial_A,
'E':imperial_E,
'I':imperial_I,
'deflection':imperial_deflection,
}
# jsonify all inputs
input_json = json.dumps(
{
'beam': beams,
'advanced_supports': advanced_supports,
'basic_supports': basic_supports,
'point_loads': point_loads,
'point_torques': point_torques,
'distributed_loads': distributed_loads,
'querys': querys,
'adv_sup': option_support,
'default_support':option_default_support,
'y': positive_y_direction,
'data_points': data_points,
'option_units': option_units,
'result_table': option_result_table,
'unit_dictionary': units,
}
)
for i, s in enumerate(basic_supports):
sup = s.pop('Support')
if sup == 'fixed':
s['X'] = 'R'
s['Y'] = 'R'
s['M'] = 'R'
elif sup == 'pinned':
s['X'] = 'R'
s['Y'] = 'R'
s['M'] = 'F'
elif sup == 'roller':
s['X'] = 'F'
s['Y'] = 'R'
s['M'] = 'F'
basic_supports[i] = s
if option_support == 'advanced':
supports = advanced_supports
else:
supports = basic_supports
# if all inputs the same as stored inputs then
# no need to calculate again.
# if clicks 0 then inputs are set to prev input
# hence they will be the same but will need to run the
# analysis to show the results.
if input_json == prev_input and click > 0:
raise PreventUpdate
# try:
if positive_y_direction == 'up':
d_ = 1
else:
d_ = -1
for row in beams:
beam = Beam(*(float(a) for a in row.values()))
beam._DATA_POINTS = data_points
beam.update_units('length', units[option_units]['length'])
beam.update_units('force', units[option_units]['force'])
beam.update_units('moment', units[option_units]['moment'])
beam.update_units('distributed', units[option_units]['distributed'])
beam.update_units('stiffness', units[option_units]['stiffness'])
beam.update_units('A', units[option_units]['A'])
beam.update_units('E', units[option_units]['E'])
beam.update_units('I', units[option_units]['I'])
beam.update_units('deflection', units[option_units]['deflection'])
if supports:
for row in supports:
if row['X'] in ['r', 'R']:
DOF_x = 1
kx = 0
elif row['X'] in ['f', 'F']:
DOF_x = 0
kx = 0
elif float(row['X']) > 0:
DOF_x = 0
kx = float(row['X'])
else:
raise ValueError(
'input incorrect for x restraint of support')
if row['Y'] in ['r', 'R']:
DOF_y = 1
ky = 0
elif row['Y'] in ['f', 'F']:
DOF_y = 0
ky = 0
elif float(row['Y']) > 0:
DOF_y = 0
ky = float(row['Y'])
else:
raise ValueError(
'input incorrect for y restraint of support')
if row['M'] in ['r', 'R']:
DOF_m = 1
elif row['M'] in ['f', 'F']:
DOF_m = 0
else:
raise ValueError(
'input incorrect for m restraint of support')
beam.add_supports(
Support(
float(row['Coordinate']),
(
DOF_x,
DOF_y,
DOF_m
),
ky=ky,
kx=kx,
),
)
# TO DO: add capitals
if distributed_loads:
for row in distributed_loads:
beam.add_loads(
TrapezoidalLoad(
force=(
float(row['Start Load']),
float(row['End Load'])
),
span=(
float(row['Start Coordinate']),
float(row['End Coordinate'])
),
angle=(d_ * 90)
)
)
if point_loads:
for row in point_loads:
beam.add_loads(
PointLoad(
float(row['Force']),
float(row['Coordinate']),
d_ * float(row['Angle (deg)'])
)
)
if point_torques:
for row in point_torques:
beam.add_loads(
PointTorque(
float(row['Torque']),
float(row['Coordinate']),
)
)
beam.analyse()
if querys:
for row in querys:
beam.add_query_points(
float(row['Query coordinate']),
)
graph_1 = beam.plot_beam_external()
graph_2 = beam.plot_beam_internal()
# results data is actually adding to the calc time significantly.
# Might be worth trying to find a more efficient method,
# for example getting max, min and x values all in one go could mean
# dont need to generate vectors multiple times, can save time.
results_data = [
{
'val': 'Max',
'NF (' + units[option_units]['force'] +')': f'{beam.get_normal_force(return_max=True):.3f}',
'SF (' + units[option_units]['force'] +')': f'{beam.get_shear_force(return_max=True):.3f}',
'BM (' + units[option_units]['moment'] +')': f'{beam.get_bending_moment(return_max=True):.3f}',
'D (' + units[option_units]['deflection'] +')': f'{beam.get_deflection(return_max=True):.3f}',
},
{
'val': 'Min',
'NF (' + units[option_units]['force'] +')': f'{beam.get_normal_force(return_min=True):.3f}',
'SF (' + units[option_units]['force'] +')': f'{beam.get_shear_force(return_min=True):.3f}',
'BM (' + units[option_units]['moment'] +')': f'{beam.get_bending_moment(return_min=True):.3f}',
'D (' + units[option_units]['deflection'] +')': f'{beam.get_deflection(return_min=True):.3f}',
},
]
if querys:
for row in querys:
x_ = row['Query coordinate']
u_ = units[option_units]['length']
results_data.append(
{
'val': f'x = {x_} {u_}',
'NF (' + units[option_units]['force'] +')': f'{beam.get_normal_force(x_):.3f}',
'SF (' + units[option_units]['force'] +')': f'{beam.get_shear_force(x_):.3f}',
'BM (' + units[option_units]['moment'] +')': f'{beam.get_bending_moment(x_):.3f}',
'D (' + units[option_units]['deflection'] +')': f'{beam.get_deflection(x_):.3f}',
},
)
t2 = time.perf_counter()
t = t2 - t1
dt = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
color = "success"
message = f"Calculation completed in {t:.2f} seconds, at {dt}"
# except BaseException:
# color = "danger"
# e = sys.exc_info()[1]
# message = f"Error with calculation. Please check inputs. \
# The following error was observed: {e}"
# results_data = [
# {'type': 'Normal Force', 'max': 0, 'min': 0},
# {'type': 'Shear Force', 'max': 0, 'min': 0},
# {'type': 'Bending Moment', 'max': 0, 'min': 0},
# {'type': 'Deflection', 'max': 0, 'min': 0},
# ]
# if click == 0 and button_id == 'dummy-div':
# color = "danger"
# message = "No analysis has been run."
return graph_1, graph_2, color, message, True, results_data, input_json, False
# ADD ROWS AND RESTORE DATA AND CLEAR DATA
# (ANYTHING TABLE RELATED)
# update units store
unit_output = [Output('SI_'+a,'value') for a in METRIC_UNITS.keys()]
unit_output += [Output('metric_'+a,'value') for a in METRIC_UNITS.keys()]
unit_output += [Output('imperial_'+a,'value') for a in IMPERIAL_UNITS.keys()]
unit_state = [State('SI_'+a,'value') for a in METRIC_UNITS.keys()]
unit_state += [State('metric_'+a,'value') for a in METRIC_UNITS.keys()]
unit_state += [State('imperial_'+a,'value') for a in IMPERIAL_UNITS.keys()]
@app.callback(
[
Output('beam-table', 'data'),
Output('support-table', 'data'),
Output('basic-support-table', 'data'),
Output('point-load-table', 'data'),
Output('point-torque-table', 'data'),
Output('distributed-load-table', 'data'),
Output('query-table', 'data'),
Output('option_support_input', 'value'),
Output('option_default_support','value'),
Output('option_positive_direction_y', 'value'),
Output('option_result_table', 'value'),
Output('option_data_points', 'value'),
Output('option_units', 'value'),
Output('dummy-div','children'),
] + unit_output,
[
Input('support-rows-button', 'n_clicks'),
Input('basic-support-rows-button', 'n_clicks'),
Input('point-load-rows-button', 'n_clicks'),
Input('point-torque-rows-button', 'n_clicks'),
Input('distributed-load-rows-button', 'n_clicks'),
Input('query-rows-button', 'n_clicks'),
Input('clear-inputs-button', 'n_clicks'),
Input('reset-options-button', 'n_clicks'),
Input('upload-data', 'contents'),
],
[
State('beam-table', 'data'),
State('support-table', 'data'),
State('basic-support-table', 'data'),
State('point-load-table', 'data'),
State('point-torque-table', 'data'),
State('distributed-load-table', 'data'),
State('query-table', 'data'),
State('option_support_input', 'value'),
State('option_default_support','value'),
State('option_positive_direction_y', 'value'),
State('option_result_table', 'value'),
State('option_data_points', 'value'),
State('option_units','value'),
State('input-json', 'data'),
] + unit_state,
)
def update_tables(
support_table_clicks,
basic_support_table_clicks,
point_load_table_clicks,
point_torque_table_clicks,
distributed_load_table_clicks,
query_table_clicks,
clear_inputs_clicks,
reset_settings_clicks,
upload_data,
beam_table_rows,
advanced_support_table_rows,
basic_support_table_rows,
point_load_table_rows,
point_torque_table_rows,
distributed_load_table_rows,
query_table_rows,
option_support_input,
option_default_support,
option_positive_direction_y,
option_result_table,
option_data_points,
option_units,
input_json_data,
SI_length,
SI_force,
SI_moment,
SI_distributed,
SI_stiffness,
SI_A,
SI_E,
SI_I,
SI_deflection,
metric_length,
metric_force,
metric_moment,
metric_distributed,
metric_stiffness,
metric_A,
metric_E,
metric_I,
metric_deflection,
imperial_length,
imperial_force,
imperial_moment,
imperial_distributed,
imperial_stiffness,
imperial_A,
imperial_E,
imperial_I,
imperial_deflection,
):
#solution summary:
# in order to automatically update tables to previously stored information
# it is necessary to use the same function to add table rows and to add the past information
# as graphs are produced from clicking analysis, a dummy variable was created that triggers
# an analysis run. In order to make the model not run every time a row is added, the value is set to
# FALSE which makes the analysis not run as per the analysis function. As the data in the function
# can remain FALSE or TRUE while data is changed and analysis is re run, a check on the trigger context
# is also needed in the analysis function.
# Also, as data is now always automatically added from previous, it is useful to be able to clear data
# so a clear inputs button and functionality was added.
if not input_json_data:
raise PreventUpdate
units = {}
units['SI'] = {
'length':SI_length,
'force':SI_force,
'moment':SI_moment,
'distributed':SI_distributed,
'stiffness':SI_stiffness,
'A':SI_A,
'E':SI_E,
'I':SI_I,
'deflection':SI_deflection,
}
units['metric'] = {
'length':metric_length,
'force':metric_force,
'moment':metric_moment,
'distributed':metric_distributed,
'stiffness':metric_stiffness,
'A':metric_A,
'E':metric_E,
'I':metric_I,
'deflection':metric_deflection,
}
units['imperial'] = {
'length':imperial_length,
'force':imperial_force,
'moment':imperial_moment,
'distributed':imperial_distributed,
'stiffness':imperial_stiffness,
'A':imperial_A,
'E':imperial_E,
'I':imperial_I,
'deflection':imperial_deflection,
}
units_values =[]
for a in units.keys():
units_values += [a for a in units[a].values()]
ctx = dash.callback_context
dummy_div = False
# website just started or triggered by uploading report
if not ctx.triggered or ctx.triggered[0]['prop_id'].split('.')[0] == 'upload-data':
# website just started with no saved data
if not input_json_data:
return [
beam_table_rows,
advanced_support_table_rows,
basic_support_table_rows,
point_load_table_rows,
point_torque_table_rows,
distributed_load_table_rows,
query_table_rows,
option_support_input,
option_default_support,
option_positive_direction_y,
option_result_table,
option_data_points,
option_units,
dummy_div,
] + units_values
# report uploaded
elif ctx.triggered[0]['prop_id'].split('.')[0] == 'upload-data':
data = upload_data.encode("utf8").split(b";base64,")[1]
data = base64.b64decode(data)
data = data.decode('utf-8')
data = data.split('--')[1]
data.replace('null', 'True')
data.replace('None', 'True')
data = json.loads(data)
#website started with saved data
else:
data = json.loads(input_json_data)
dummy_div = True
beam_table_rows = data['beam']
basic_support_table_rows =data['basic_supports']
advanced_support_table_rows = data['advanced_supports']
point_load_table_rows = data['point_loads']
point_torque_table_rows = data['point_torques']
distributed_load_table_rows = data['distributed_loads']
query_table_rows = data['querys']
option_support_input = data['adv_sup']
option_default_support = data['default_support']
option_positive_direction_y = data['y']
option_result_table = data['result_table']
option_data_points = data['data_points']
option_units = data['option_units']
units = data['unit_dictionary']
units_values = []
for a in units.keys():
units_values += [b for b in units[a].values()]
return [
beam_table_rows,
advanced_support_table_rows,
basic_support_table_rows,
point_load_table_rows,
point_torque_table_rows,
distributed_load_table_rows,
query_table_rows,
option_support_input,
option_default_support,
option_positive_direction_y,
option_result_table,
option_data_points,
option_units,
dummy_div,
] + units_values
# triggered by adding in new row to table
else:
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
if button_id == 'support-rows-button':
advanced_support_table_rows.append(support_table_init)
elif button_id == 'basic-support-rows-button':
bs_table_data = {
'Coordinate': {
'init': 0,
},
"Support": {
'init': option_default_support,
}
}
bs_table_init = {
k: v['init'] for k, v in bs_table_data.items()
}
basic_support_table_rows.append(bs_table_init)
elif button_id == 'point-load-rows-button':
point_load_table_rows.append(point_load_table_init)
elif button_id == 'point-torque-rows-button':
point_torque_table_rows.append(point_torque_table_init)
elif button_id == 'distributed-load-rows-button':
distributed_load_table_rows.append(distributed_load_table_init)
elif button_id == 'query-rows-button':
query_table_rows.append(query_table_init)
# clear inputs but save options
elif button_id == 'clear-inputs-button':
return [
[beam_table_init],
[support_table_init],
[basic_support_table_init],
[point_load_table_init],
[point_torque_table_init],
[distributed_load_table_init],
[],
option_support_input,
option_default_support,
option_positive_direction_y,
option_result_table,
option_data_points,
option_units,
True,
] + units_values
# clear options
elif button_id == 'reset-options-button':
#use default unit properties
units_values =[]
for a in default_units.keys():
units_values += [b for b in default_units[a].values()]
return [
beam_table_rows,
advanced_support_table_rows,
basic_support_table_rows,
point_load_table_rows,
point_torque_table_rows,
distributed_load_table_rows,
query_table_rows,
'basic',
'fixed',
'down',
'show',
50,
"SI",
True,
] + units_values
return [
beam_table_rows,
advanced_support_table_rows,
basic_support_table_rows,
point_load_table_rows,
point_torque_table_rows,
distributed_load_table_rows,
query_table_rows,
option_support_input,
option_default_support,
option_positive_direction_y,
option_result_table,
option_data_points,
option_units,
dummy_div
] + units_values
# options - support mode
@app.callback(
[Output('advanced-support', 'is_open'),
Output('basic-support', 'is_open')],
Input('option_support_input', 'value')
)
def support_setup(mode):
if mode == 'basic':
return False, True
return True, False
# options - units mode
@app.callback(
[
Output('SI-editor', 'is_open'),
Output('metric-editor', 'is_open'),
Output('imperial-editor', 'is_open')
],
Input('option_units', 'value')
)
def support_setup(mode):
if mode == 'SI':
return True, False, False
elif mode == 'metric':
return False, True, False
else:
return False, False, True
# option - result data (to be query data in future really)
@app.callback(
Output('results-collapse', 'is_open'),
Input('option_result_table', 'value')
)
def results_setup(mode):
if mode == 'hide':
return False
return True
#instructions open
@app.callback(
[Output("beam_instructions", "is_open"),
Output("support_instructions", "is_open"),
Output("basic_support_instructions", "is_open"),
Output("point_load_instructions", "is_open"),
Output("point_torque_instructions", "is_open"),
Output("distributed_load_instructions", "is_open"),
Output("query_instructions", "is_open"),
Output("option_instructions", "is_open")],
Input("instruction-button", "n_clicks"),
State("beam_instructions", "is_open"),
)
def toggle_collapse(n, is_open):
if n:
a = not is_open
else:
a = is_open
return a, a, a, a, a, a, a, a
# if any of the unit values change update the table columns
unit_input = [Input('SI_'+a,'value') for a in METRIC_UNITS.keys()]
unit_input += [Input('metric_'+a,'value') for a in METRIC_UNITS.keys()]
unit_input += [Input('imperial_'+a,'value') for a in IMPERIAL_UNITS.keys()]
@app.callback(
[
Output('beam-table', 'columns'),
Output('support-table', 'columns'),
Output('basic-support-table','columns'),
Output('point-load-table', 'columns'),
Output('point-torque-table', 'columns'),
Output('distributed-load-table', 'columns'),
Output('query-table', 'columns'),
Output('results-table', 'columns'),
],
[
Input('option_units', 'value'),
] + unit_input,
State('input-json', 'data'),
)
def update_tables(
option_units,
SI_length,
SI_force,
SI_moment,
SI_distributed,
SI_stiffness,
SI_A,
SI_E,
SI_I,
SI_deflection,
metric_length,
metric_force,
metric_moment,
metric_distributed,
metric_stiffness,
metric_A,
metric_E,
metric_I,
metric_deflection,
imperial_length,
imperial_force,
imperial_moment,
imperial_distributed,
imperial_stiffness,
imperial_A,
imperial_E,
imperial_I,
imperial_deflection,
input_json_data,
):
if not input_json_data:
raise PreventUpdate
units = {}
units['SI'] = {
'length':SI_length,
'force':SI_force,
'moment':SI_moment,
'distributed':SI_distributed,
'stiffness':SI_stiffness,
'A':SI_A,
'E':SI_E,
'I':SI_I,
'deflection':SI_deflection,
}
units['metric'] = {
'length':metric_length,
'force':metric_force,
'moment':metric_moment,
'distributed':metric_distributed,
'stiffness':metric_stiffness,
'A':metric_A,
'E':metric_E,
'I':metric_I,
'deflection':metric_deflection,
}
units['imperial'] = {
'length':imperial_length,
'force':imperial_force,
'moment':imperial_moment,
'distributed':imperial_distributed,
'stiffness':imperial_stiffness,
'A':imperial_A,
'E':imperial_E,
'I':imperial_I,
'deflection':imperial_deflection,
}
#update table default propertie
beam_table_data['Length']['units'] = ' '+units[option_units]['length']
beam_table_data["Young's Modulus"]['units'] = ' ' +units[option_units]['E']
beam_table_data['Second Moment of Area']['units'] = ' '+units[option_units]['I']
beam_table_data['Cross-Sectional Area']['units'] = ' '+units[option_units]['A']
beam_table_columns = [
{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': beam_table_data[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=beam_table_data[d]['units'])
} for d in beam_table_data.keys()]
support_table_data['Coordinate']['units'] = ' '+units[option_units]['length']
support_table_data['X']['units'] = ' '+units[option_units]['stiffness']
support_table_data['Y']['units'] = ' '+units[option_units]['stiffness']
support_table_data['M']['units'] = ' '+units[option_units]['stiffness']
support_table_columns =[
{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': support_table_data[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=support_table_data[d]['units'])
} for d in support_table_data.keys()]
basic_support_table_columns =[
{
'name': 'Coordinate',
'id': 'Coordinate',
'deletable': False,
'renamable': False,
'type': 'numeric',
'presentation': 'input',
'format': Format(
symbol=Symbol.yes,
symbol_suffix=' '+units[option_units]['length'])
},
{
'name': 'Support',
'id': 'Support',
'deletable': False,
'renamable': False,
'type': 'any',
'presentation': 'dropdown',
},
]
support_table_columns =[
{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': support_table_data[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=support_table_data[d]['units'])
} for d in support_table_data.keys()
]
# Properties for point_load Tab
point_load_table_data['Coordinate']['units'] = ' '+units[option_units]['length']
point_load_table_data['Force']['units'] = ' '+units[option_units]['force']
point_load_table_columns= [
{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': point_load_table_data[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=point_load_table_data[d]['units'])
} for d in point_load_table_data.keys()
]
point_torque_table_data['Coordinate']['units'] = ' '+units[option_units]['length']
point_torque_table_data['Torque']['units'] = ' '+units[option_units]['moment']
point_torque_table_columns=[{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': point_torque_table_data[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=point_torque_table_data[d]['units'])
} for d in point_torque_table_data.keys()
]
# Properties for distributed_load Tab
distributed_load_table_data['Start Coordinate']['units'] = ' '+units[option_units]['length']
distributed_load_table_data['End Coordinate']['units'] = ' '+units[option_units]['length']
distributed_load_table_data['Start Load']['units'] = ' '+units[option_units]['distributed']
distributed_load_table_data['End Load']['units'] = ' '+units[option_units]['distributed']
distributed_load_table_columns=[{
'name': d,
'id': d,
'deletable': False,
'renamable': False,
'type': distributed_load_table_data[d]['type'],
'format': Format(
symbol=Symbol.yes,
symbol_suffix=distributed_load_table_data[d]['units'])
} for d in distributed_load_table_data.keys()
]
# Properties for query tab
query_table_columns= [
{
'name': i,
'id': i,
'deletable': False,
'renamable': False,
'type': 'numeric',
'format': Format(
symbol=Symbol.yes,
symbol_suffix=' '+units[option_units]['length'])
} for i in query_table_init.keys()
]
results_table_columns = [
{"name": "", "id": "val"},
{"name": f"Normal Force ({units[option_units]['force']})", "id": 'NF (' + units[option_units]['force'] +')'},
{"name": f"Shear Force ({units[option_units]['force']})", "id": 'SF (' + units[option_units]['force'] +')'},
{"name": f"Bending Moment ({units[option_units]['moment']})", "id": 'BM (' + units[option_units]['moment'] +')'},
{"name": f"Deflection ({units[option_units]['deflection']})", "id": 'D (' + units[option_units]['deflection'] +')'},
]
return [
beam_table_columns,
support_table_columns,
basic_support_table_columns,
point_load_table_columns,
point_torque_table_columns,
distributed_load_table_columns,
query_table_columns,
results_table_columns,
]
# Generate Report
@app.callback(
Output("report", "data"),
Input('report-button', 'n_clicks'),
[State("graph_1", "figure"),
State("graph_2", "figure"),
State('results-table', 'data'),
State('input-json','data')]
)
def report(n, graph_1, graph_2, results, input_json):
if not json or n==0:
raise PreventUpdate
unit_information = json.loads(input_json)
option_units = unit_information['option_units']
units = unit_information['unit_dictionary']
date = datetime.now().strftime("%d/%m/%Y")
#if the botton has been clicked.
beam_data = "<!--" + input_json + "-->"
if n > 0:
# for each row in the results table,
# write html table row
table = [f"""<tr>
<td class="tg-baqh">{a['val']}</td>
<td class="tg-baqh">{a['NF (' + units[option_units]['force'] +')']}</td>
<td class="tg-baqh">{a['SF (' + units[option_units]['force'] +')']}</td>
<td class="tg-baqh">{a['BM (' + units[option_units]['moment'] +')']}</td>
<td class="tg-baqh">{a['D (' + units[option_units]['deflection'] +')']}</td>
</tr>
""" for a in results]
# join all the strings for each table row
table = ''.join(table)
#help graph 2 fit better on the second page.
graph_2 = go.Figure(graph_2)
graph_2.update_layout(height=950)
# report to consist of graph_1, table and graph_2, and date generated tag
# cant remember why to_html properties are set the way they are set.
# table format appropriated from an online generator.
# added page-break-after:always for formatting when print to pdf
content = [
"<!DOCTYPE html><html>",
beam_data,
to_html(fig=graph_1, full_html=False, include_plotlyjs='cdn'),
"""
<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;margin:20px;page-break-after:always}
.tg td{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
overflow:hidden;padding:10px 20px;word-break:normal;}
.tg th{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
font-weight:normal;overflow:hidden;padding:10px 20px;word-break:normal;}
.tg .tg-5gn2{background-color:#efefef;font-family:Arial, Helvetica, sans-serif !important;;font-size:12px;text-align:center;
vertical-align:middle}
.tg .tg-uqo3{background-color:#efefef;text-align:center;vertical-align:top}
.tg .tg-baqh{text-align:center;vertical-align:top}
</style>
<table class="tg">
<thead>
"""+
f"""<tr>
<th class="tg-5gn2"></th>
<th class="tg-uqo3">Normal Force {units[option_units]['force']}</th>
<th class="tg-uqo3">Shear Force {units[option_units]['force']}</th>
<th class="tg-uqo3">Bending Moment {units[option_units]['moment']}</th>
<th class="tg-uqo3">Deflection {units[option_units]['deflection']}</th>
</tr>""" + table + """</tbody>
</table>
""",
to_html(fig=graph_2, full_html=False, include_plotlyjs='cdn'),
f'<i>Report generated at https://indeterminate-beam.herokuapp.com/ {__version__} on {date}</i>',
"</html>"
]
content = "<br>".join(content)
filename = "IndeterminateBeam_Report_"+ datetime.now().strftime("%d/%m/%Y") + ".html"
return dict(content=content, filename=filename)
if __name__ == '__main__':
app.run_server()
```
|
{
"source": "JessebotX/WeatherApp",
"score": 3
}
|
#### File: WeatherApp/src/data.py
```python
import json
import requests
import os
from data_model import DataModel
class WeatherData(DataModel):
def __init__(self, location):
super().__init__()
self.begin(location)
def begin(self, location):
self.current = None
self.currentReq = None
# set default
self.area = location
# set apikey to the read file
with open("private.json", "r") as config:
self.apikey = json.loads(config.read())
self.connect_api()
self.set_data()
def connect_api(self):
"""
Connect to openweathermap's api
"""
try:
self.currentReq = requests.get(
"https://api.openweathermap.org/data/2.5/weather?q=" + self.area + "&appid="
+ self.apikey["apitoken"], timeout=5
)
except (requests.Timeout, requests.ConnectionError):
print("No Internet Connection")
def set_data(self):
"""
Set all data to the DataModel properties
"""
# read json from the current weather request
self.current = json.loads(self.currentReq.text)
self.kelvin = self.current["main"]["temp"]
self.condition = self.current["weather"][0]["main"]
self.description = self.current["weather"][0]["description"]
self.time = self.current["dt"]
self.icon_id = self.current["weather"][0]["icon"]
self.area = self.current["name"] + ", " + self.current["sys"]["country"]
```
#### File: WeatherApp/src/main.py
```python
import tkinter as tk
import tkinter.font as tkFont
import webbrowser
import imageio
import os
from data import WeatherData
class Main(WeatherData):
"""
The application's entry point
"""
def __init__(self):
"""
The application's constructor
"""
self.window = tk.Tk()
self.height = 400
self.width = 400
self.title = "JessebotX/WeatherApp"
self.bg_color = "#121212"
self.fg_color = "#fff"
self.cityEntry = ""
self.countryEntry = ""
self.h1_font = tkFont.Font(family="Helvetica", size=48)
self.h2_font = tkFont.Font(family="Helvetica", size=36)
self.h3_font = tkFont.Font(family="Helvetica", size=24)
self.setState()
def setState(self, city="Vancouver", country="CA", first_time = True):
self.begin(city + "," + country)
if not first_time:
self.forget_view()
self.configure_view()
self.construct_view()
self.end_view()
def configure_view(self):
"""
Configure the application's window properties
"""
# if os.path.isfile("weather.ico"):
# os.remove("weather.ico")
# icon_img = imageio.imread("http://openweathermap.org/img/wn/" + self.icon_id + "@2x.png")
# imageio.imwrite("weather.ico", icon_img)
self.window.iconbitmap("weather.ico")
self.window.configure(bg="#121212")
self.window.title(self.title)
self.window.geometry(str(self.width) + "x" + str(self.height))
self.window.resizable(False, False)
def construct_view(self):
"""
Construct the ui elements
"""
self.displayAreaLabel = tk.Label(
text = self.area,
font = self.h2_font,
fg = self.fg_color,
bg = self.bg_color
)
self.displayAreaLabel.grid(row=0, column=0)
# Create line
self.horizontalLine1 = tk.Frame(self.window, width=400, height=1, bg=self.fg_color)
self.horizontalLine1.grid(row=1, column=0)
# Temperature
self.temp_frame = tk.Frame(self.window, bg=self.bg_color)
self.temp_frame.grid(row=2, column=0)
self.displayFahrenheitLabel = tk.Label(
self.temp_frame,
text=str(self.fahrenheit) + "°F",
bg=self.bg_color,
fg="#ffeb3b",
font=self.h3_font
)
self.displayFahrenheitLabel.grid(row=3, column=3, padx=5)
self.displayCelsiusLabel = tk.Label(
self.temp_frame,
text=str(self.celsius) + "°C",
bg=self.bg_color,
fg=self.fg_color,
font=self.h3_font
)
self.displayCelsiusLabel.grid(row=3, column=2, padx=5)
self.displayKelvinLabel = tk.Label(
self.temp_frame,
text=str(self.kelvin) + "°K",
bg=self.bg_color,
fg="#f44336",
font=self.h3_font
)
self.displayKelvinLabel.grid(row=3, column=1, padx=5)
# Create line
self.horizontalLine2 = tk.Frame(self.window, width=400, height=1, bg=self.fg_color)
self.horizontalLine2.grid(row=4, column=0)
# Weather condition and description
self.displayConditionLabel = tk.Label(
text=self.condition,
bg=self.bg_color,
fg=self.fg_color,
font=self.h2_font
)
self.displayConditionLabel.grid(row=5, column=0)
self.displayDescriptionLabel = tk.Label(text=self.description, bg=self.bg_color, fg=self.fg_color)
self.displayDescriptionLabel.grid(row=6, column=0)
# Create line
self.horizontalLine3 = tk.Frame(self.window, width=200, height=1, bg=self.fg_color)
self.horizontalLine3.grid(row=7, column=0)
self.displayTimeLabel = tk.Label(
text="Last updated: " + self.time + "\n from openweathermap.org",
bg=self.bg_color,
fg=self.fg_color
)
self.displayTimeLabel.grid(row=8, column=0)
self.displaySeeAlsoLabel = tk.Label(
text="\nCheck out the project on github @",
bg=self.bg_color,
fg=self.fg_color
)
self.displaySeeAlsoLabel.grid(row=9, column=0)
self.link = tk.Label(
text="https://github.com/JessebotX/WeatherApp",
bg=self.bg_color,
fg="#0080ff",
cursor="hand2"
)
self.link.grid(row=10, column=0)
self.link.bind("<Button-1>", lambda e: webbrowser.open_new("https://github.com/JessebotX/WeatherApp"))
# Create line
self.horizontalLine4 = tk.Frame(self.window, width=200, height=1, bg=self.fg_color)
self.horizontalLine4.grid(row=11, column=0)
self.enterFrame = tk.Frame(self.window, bg=self.bg_color)
self.enterFrame.grid(row=12, pady=5)
self.displayCityLabel = tk.Label(self.enterFrame, text="Enter city", bg=self.bg_color, fg=self.fg_color)
self.displayCityLabel.grid(row=1, column=1)
self.cityEntry = tk.Entry(self.enterFrame)
self.cityEntry.grid(row=1, column=2)
self.countryLabel = tk.Label(
self.enterFrame,
text="(OPTIONAL) Enter [ISO-3166] Country Code \n(i.e \"US\" = United States of America)",
bg=self.bg_color,
fg="#0080ff",
cursor="hand2"
)
self.countryLabel.grid(row=2, column=1)
self.countryLabel.bind("<Button-1>", lambda e: webbrowser.open_new("https://www.iso.org/obp/ui/#search"))
self.countryEntry = tk.Entry(self.enterFrame)
self.countryEntry.grid(row=2, column=2)
self.displaySubmitButton = tk.Button(self.enterFrame, text="Enter", command=self.submit)
self.displaySubmitButton.grid(row=3)
def submit(self):
self.setState(city=self.cityEntry.get(), country=self.countryEntry.get(), first_time=False)
def end_view(self):
"""
Complete the loop
"""
self.window.mainloop()
def forget_view(self):
self.displayAreaLabel.grid_forget()
self.temp_frame.grid_forget()
self.displayConditionLabel.grid_forget()
self.displayDescriptionLabel.grid_forget()
if __name__ == "__main__":
Main()
```
|
{
"source": "jessebraham/comicvine-search",
"score": 3
}
|
#### File: comicvine-search/comicvine_search/client.py
```python
import requests
import requests_cache
from .exceptions import (
ComicVineApiError, ComicVineUnauthorizedError, ComicVineForbiddenError
)
from .response import Response
class ComicVineClient(object):
'''
Interacts with the ``search`` resource of the ComicVine API. Requires an
account on https://comicvine.gamespot.com/ in order to obtain an API key.
'''
# All API requests made by this client will be made to this URL.
API_URL = 'https://www.comicvine.com/api/search/'
# A valid User-Agent header must be set in order for our API requests to
# be accepted, otherwise our request will be rejected with a
# **403 - Forbidden** error.
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:7.0) '
'Gecko/20130825 Firefox/36.0'}
# A set of valid resource types to return in results.
RESOURCE_TYPES = {
'character', 'issue', 'location', 'object', 'person', 'publisher',
'story_arc', 'team', 'volume',
}
def __init__(self, api_key, expire_after=300):
'''
Store the API key in a class variable, and install the requests cache,
configuring it using the ``expire_after`` parameter.
:param api_key: Your personal ComicVine API key.
:type api_key: str
:param expire_after: The number of seconds to retain an entry in cache.
:type expire_after: int or None
'''
self.api_key = api_key
self._install_requests_cache(expire_after)
def _install_requests_cache(self, expire_after):
'''
Monkey patch Requests to use requests_cache.CachedSession rather than
requests.Session. Responses will have the `from_cache` attribute set
to True if the value being returned is a cached value.
Responses will be held in cache for the number of seconds assigned to
the ``expire_after`` class variable.
:param expire_after: The number of seconds to retain an entry in cache.
:type expire_after: int
'''
requests_cache.install_cache(
__name__,
backend='memory',
expire_after=expire_after
)
def search(self, query, offset=0, limit=10, resources=None,
use_cache=True):
'''
Perform a search against the API, using the provided query term. If
required, a list of resource types to filter search results to can
be included.
Take the JSON contained in the response and provide it to the custom
``Response`` object's constructor. Return the ``Response`` object.
:param query: The search query with which to make the request.
:type query: str
:param offset: The index of the first record returned.
:type offset: int or None
:param limit: How many records to return **(max 10)**
:type limit: int or None
:param resources: A list of resources to include in the search results.
:type resources: list or None
:param use_cache: Toggle the use of requests_cache.
:type use_cache: bool
:return: The response object containing the results of the search
query.
:rtype: comicvine_search.response.Response
'''
params = self._request_params(query, offset, limit, resources)
json = self._query_api(params, use_cache=use_cache)
response = Response(json)
return response
def _request_params(self, query, offset, limit, resources):
'''
Construct a dict containing the required key-value pairs of parameters
required in order to make the API request.
The documentation for the ``search`` resource can be found at
https://comicvine.gamespot.com/api/documentation#toc-0-30.
Regarding 'limit', as per the documentation:
The number of results to display per page. This value defaults to
10 and can not exceed this number.
:param query: The search query with which to make the request.
:type query: str
:param offset: The index of the first record returned.
:type offset: int
:param limit: How many records to return **(max 10)**
:type limit: int
:param resources: A list of resources to include in the search results.
:type resources: list or None
:return: A dictionary of request parameters.
:rtype: dict
'''
return {'api_key': self.api_key,
'format': 'json',
'limit': min(10, limit), # hard limit of 10
'offset': max(0, offset), # cannot provide negative offset
'query': query,
'resources': self._validate_resources(resources)}
def _validate_resources(self, resources):
'''
Provided a list of resources, first convert it to a set and perform an
intersection with the set of valid resource types, ``RESOURCE_TYPES``.
Return a comma-separted string of the remaining valid resources, or
None if the set is empty.
:param resources: A list of resources to include in the search results.
:type resources: list or None
:return: A comma-separated string of valid resources.
:rtype: str or None
'''
if not resources:
return None
valid_resources = self.RESOURCE_TYPES & set(resources)
return ','.join(valid_resources) if valid_resources else None
def _query_api(self, params, use_cache):
'''
Query the ComicVine API's ``search`` resource, providing the required
headers and parameters with the request. Optionally allow the caller
of the function to disable the request cache.
If an error occurs during the request, handle it accordingly. Upon
success, return the JSON from the response.
:param params: Parameters to include with the request.
:type params: dict
:param use_cache: Toggle the use of requests_cache.
:type use_cache: bool
:return: The JSON contained in the response.
:rtype: dict
'''
# Since we're performing the identical action regardless of whether
# or not the request cache is to be used, store the procedure in a
# local function to avoid repetition.
def __httpget():
response = requests.get(
self.API_URL, headers=self.HEADERS, params=params)
if not response.ok:
self._handle_http_error(response)
return response.json()
# To disable the use of the request cache, make the HTTP request from
# within the `requests_cache.disabled()` context.
if not use_cache:
with requests_cache.disabled():
return __httpget()
return __httpget()
def _handle_http_error(self, response):
'''
Provided a ``requests.Response`` object, if the status code is
anything other than **200**, we will treat it as an error.
Using the response's status code, determine which type of exception to
raise. Construct an exception message from the response's status code
and reason properties before raising the exception.
:param response: The requests.Response object returned by the HTTP
request.
:type response: requests.Response
:raises ComicVineUnauthorizedException: if no API key provided.
:raises ComicVineForbiddenException: if no User-Agent header provided.
:raises ComicVineApiException: if an unidentified error occurs.
'''
exception = {
401: ComicVineUnauthorizedError,
403: ComicVineForbiddenError
}.get(response.status_code, ComicVineApiError)
message = f'{response.status_code} {response.reason}'
raise exception(message)
```
|
{
"source": "jessebraham/jujutsu",
"score": 3
}
|
#### File: jujutsu/jujutsu/main.py
```python
import itertools
import os
import sys
from pathlib import Path
from tkinter import *
import keyboard
import pyautogui
import pytesseract
from PIL import Image
from pystray import Icon, Menu, MenuItem
# ----------------------------------------------------------------------------
# Tesseract-OCR Configuration
# Determine the installation location of the Tesseract-OCR executable for each
# supported operating system. If the host operating system is not listed below
# then the $TESSERACT_BIN_PATH environment variable *MUST* be set.
# FIXME: add support for platforms 'darwin', 'linux'/'linux2'
if sys.platform == "win32":
TESSERACT_BIN_PATH = "C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
elif os.environ.get("TESSERACT_BIN_PATH") is None:
raise RuntimeError(
"Tesseract binary path detection not available for your operating "
"system, please set $TESSERACT_BIN_PATH and try again."
)
# Specify the path to the Tesseract-OCR executable, overriding the default
# installation path with $TESSERACT_BIN_PATH if it has been set.
pytesseract.pytesseract.tesseract_cmd = os.environ.get(
"TESSERACT_BIN_PATH", TESSERACT_BIN_PATH
)
# Use more optimal configuration specifically for Chinese/Japanese languages.
#
# OCR Engine Mode: 1 Neural nets LSTM engine only.
# Page Segmentation Mode: 6 Assume a single uniform block of text.
#
# This PSM was chosen to support both horizontal and vertical Japanese text.
# The remaining configuration comes from the Tesseract-OCR documentation.
TESSERACT_CONFIG = " ".join(
itertools.chain(
("--oem", "1"),
("--psm", "6"),
("-c", "chop_enable=T"),
("-c", "use_new_state_cost=F"),
("-c", "segment_segcost_rating=F"),
("-c", "enable_new_segsearch=0"),
("-c", "language_model_ngram_on=0"),
("-c", "textord_force_make_prop_words=F"),
("-c", "edges_max_children_per_outline=40"),
)
)
# ----------------------------------------------------------------------------
# Screen Canvas Class
class ScreenCanvas:
def __init__(self):
self.x = 0
self.y = 0
self.start_x = None
self.start_y = None
self.cur_x = None
self.cur_y = None
self.rect = None
# Create the root window, but immediately hide it.
self.root = Tk()
self.root.withdraw()
# ???
self._init_top_level()
self._init_screen_canvas()
def capture(self):
self.root.mainloop()
def abort_capture(self):
self._lower_screen_canvas()
self.root.quit()
def _init_top_level(self):
self.top_level = Toplevel(self.root)
self.top_level.attributes("-alpha", 0.3)
self.top_level.attributes("-fullscreen", True)
self.top_level.attributes("-topmost", True)
self.top_level.attributes("-transparent", "blue")
self.top_level.lift()
def _init_screen_canvas(self):
picture_frame = Frame(self.top_level, background="blue")
picture_frame.pack(fill=BOTH, expand=YES)
self.screen_canvas = Canvas(picture_frame, cursor="cross", bg="grey11")
self.screen_canvas.pack(fill=BOTH, expand=YES)
self.screen_canvas.bind("<ButtonPress-1>", self._on_mouse_down)
self.screen_canvas.bind("<B1-Motion>", self._on_mouse_move)
self.screen_canvas.bind("<ButtonRelease-1>", self._on_mouse_up)
def _lower_screen_canvas(self):
if self.screen_canvas is not None:
self.screen_canvas.destroy()
self.screen_canvas = None
self.top_level.attributes("-fullscreen", False)
self.top_level.attributes("-topmost", False)
self.top_level.withdraw()
def _take_bounded_screenshot(self, x0, y0, x1, y1):
self._lower_screen_canvas()
im = pyautogui.screenshot(region=(x0, y0, x1, y1))
result = pytesseract.image_to_string(
im, lang="jpn+jpn_vert", config=TESSERACT_CONFIG
)
# FIXME: open a small window displaying the text in an editable field
print(result)
self.root.clipboard_clear()
self.root.clipboard_append(result)
self.root.update()
self.abort_capture()
def _on_mouse_down(self, event):
# Save the mouse drag start position.
self.start_x = self.screen_canvas.canvasx(event.x)
self.start_y = self.screen_canvas.canvasy(event.y)
# Create the selection rectangle.
self.rect = self.screen_canvas.create_rectangle(
self.x, self.y, 1, 1, outline="red", width=3, fill="blue"
)
def _on_mouse_move(self, event):
# Update the current mouse position.
self.cur_x = event.x
self.cur_y = event.y
# Expand the rectangle as you drag the mouse.
self.screen_canvas.coords(
self.rect, self.start_x, self.start_y, self.cur_x, self.cur_y
)
def _on_mouse_up(self, event):
if self.start_x <= self.cur_x and self.start_y <= self.cur_y:
# Moving the cursor to the right and down.
self._take_bounded_screenshot(
self.start_x,
self.start_y,
self.cur_x - self.start_x,
self.cur_y - self.start_y,
)
elif self.start_x >= self.cur_x and self.start_y <= self.cur_y:
# Moving the cursor to the left and down.
self._take_bounded_screenshot(
self.cur_x,
self.start_y,
self.start_x - self.cur_x,
self.cur_y - self.start_y,
)
elif self.start_x <= self.cur_x and self.start_y >= self.cur_y:
# Moving the cursor to the right and up.
self._take_bounded_screenshot(
self.start_x,
self.cur_y,
self.cur_x - self.start_x,
self.start_y - self.cur_y,
)
elif self.start_x >= self.cur_x and self.start_y >= self.cur_y:
# Moving the cursor to the left and up.
self._take_bounded_screenshot(
self.cur_x,
self.cur_y,
self.start_x - self.cur_x,
self.start_y - self.cur_y,
)
return event
# ----------------------------------------------------------------------------
# Application
class TrayApplication:
def __init__(self):
icon_path = Path("resources/icon.ico").absolute()
self.icon = Icon(
"呪術 (jujutsu)",
Image.open(icon_path),
menu=Menu(
MenuItem("Capture (ctrl+`)", self._capture_action),
MenuItem("Quit", self._exit_action),
),
)
self.screen_canvas = None
keyboard.add_hotkey("ctrl+`", self._capture_action)
keyboard.add_hotkey("esc", self._abort_action)
def run(self):
self.icon.run()
def _capture_action(self):
if self.screen_canvas is not None:
return
self.screen_canvas = ScreenCanvas()
self.screen_canvas.capture()
self.screen_canvas = None
def _abort_action(self):
if self.screen_canvas is None:
return
self.screen_canvas.abort_capture()
self.screen_canvas = None
def _exit_action(self):
self._abort_action()
self.icon.visible = False
self.icon.stop()
def main():
app = TrayApplication()
app.run()
if __name__ == "__main__":
main()
```
|
{
"source": "jessebrennan/azul",
"score": 3
}
|
#### File: deployments/prod/environment.py
```python
from typing import (
Mapping,
Optional,
)
def env() -> Mapping[str, Optional[str]]:
"""
Returns a dictionary that maps environment variable names to values. The
values are either None or strings. String values can contain references to
other environment variables in the form `{FOO}` where FOO is the name of an
environment variable. See
https://docs.python.org/3.8/library/string.html#format-string-syntax
for the concrete syntax. These references will be resolved *after* the
overall environment has been compiled by merging all relevant
`environment.py` and `environment.local.py` files.
Entries with a `None` value will be excluded from the environment. They
can be used to document a variable without a default value in which case
other, more specific `environment.py` or `environment.local.py` files must
provide the value.
"""
return {
# Set variables for the `prod` (short for production) deployment here.
#
# Only modify this file if you intend to commit those changes. To change the
# environment with a setting that's specific to you AND the deployment, create
# a environment.local.py right next to this file and make your changes there.
# Settings applicable to all environments but specific to you go into
# environment.local.py at the project root.
'AZUL_VERSIONED_BUCKET': 'edu-ucsc-gi-azul-dcp2-prod-config.{AWS_DEFAULT_REGION}',
'AZUL_DOMAIN_NAME': 'azul.data.humancellatlas.org',
'AZUL_DEPLOYMENT_STAGE': 'prod',
'AZUL_S3_BUCKET': 'edu-ucsc-gi-azul-dcp2-prod-storage-{AZUL_DEPLOYMENT_STAGE}',
'AZUL_CATALOGS': ','.join([
f'hca:{name}{rel}:repository/tdr:metadata/hca'
for rel in (3, 4, 1)
for name in ('dcp', 'it')
]),
'AZUL_TDR_SOURCES': ','.join([
'tdr:broad-datarepo-terra-prod-hca2:snapshot/hca_prod_20201118_dcp1___20201209',
]),
**{
f'AZUL_TDR_{catalog.upper()}_SOURCES': ','.join([
'tdr:broad-datarepo-terra-prod-hca2:snapshot/hca_prod_20201120_dcp2___20210315',
])
for catalog in ('dcp3', 'it3')
},
**{
f'AZUL_TDR_{catalog.upper()}_SOURCES': ','.join([
'tdr:broad-datarepo-terra-prod-hca2:snapshot/hca_prod_20201120_dcp2___20210401_dcp4',
])
for catalog in ('dcp4', 'it4')
},
'AZUL_TDR_SERVICE_URL': 'https://jade-terra.datarepo-prod.broadinstitute.org',
'AZUL_SAM_SERVICE_URL': 'https://sam.dsde-prod.broadinstitute.org',
'AZUL_URL_REDIRECT_BASE_DOMAIN_NAME': 'azul.data.humancellatlas.org',
'AZUL_URL_REDIRECT_FULL_DOMAIN_NAME': 'url.{AZUL_URL_REDIRECT_BASE_DOMAIN_NAME}',
# $0.372/h × 4 × 24h/d × 30d/mo = $1071.36/mo
'AZUL_ES_INSTANCE_TYPE': 'r5.xlarge.elasticsearch',
'AZUL_ES_INSTANCE_COUNT': '4',
'AZUL_ES_VOLUME_SIZE': '128',
'AZUL_DEBUG': '1',
'AZUL_OWNER': '<EMAIL>',
'AZUL_AWS_ACCOUNT_ID': '542754589326',
'AWS_DEFAULT_REGION': 'us-east-1',
'GOOGLE_PROJECT': 'platform-hca-prod',
}
```
#### File: lambdas/indexer/app.py
```python
import logging
from typing import (
Optional,
)
# noinspection PyPackageRequirements
import chalice
from azul import (
CatalogName,
cached_property,
config,
)
from azul.chalice import (
AzulChaliceApp,
)
from azul.health import (
HealthController,
)
from azul.indexer.index_controller import (
IndexController,
)
from azul.logging import (
configure_app_logging,
)
log = logging.getLogger(__name__)
class IndexerApp(AzulChaliceApp):
@property
def health_controller(self):
# Don't cache. Health controller is meant to be short-lived since it
# applies it's own caching. If we cached the controller, we'd never
# observe any changes in health.
return HealthController(lambda_name='indexer')
@cached_property
def index_controller(self) -> IndexController:
return IndexController()
def __init__(self):
super().__init__(app_name=config.indexer_name,
# see LocalAppTestCase.setUpClass()
unit_test=globals().get('unit_test', False))
app = IndexerApp()
configure_app_logging(app, log)
@app.route('/version', methods=['GET'], cors=True)
def version():
from azul.changelog import (
compact_changes,
)
return {
'git': config.lambda_git_status,
'changes': compact_changes(limit=10)
}
@app.route('/health', methods=['GET'], cors=True)
def health():
return app.health_controller.health()
@app.route('/health/basic', methods=['GET'], cors=True)
def basic_health():
return app.health_controller.basic_health()
@app.route('/health/cached', methods=['GET'], cors=True)
def cached_health():
return app.health_controller.cached_health()
@app.route('/health/fast', methods=['GET'], cors=True)
def fast_health():
return app.health_controller.fast_health()
@app.route('/health/{keys}', methods=['GET'], cors=True)
def health_by_key(keys: Optional[str] = None):
return app.health_controller.custom_health(keys)
@app.schedule('rate(1 minute)', name='indexercachehealth')
def update_health_cache(_event: chalice.app.CloudWatchEvent):
app.health_controller.update_cache()
@app.route('/', cors=True)
def hello():
return {'Hello': 'World!'}
@app.route('/{catalog}/{action}', methods=['POST'])
def post_notification(catalog: CatalogName, action: str):
"""
Receive a notification event and queue it for indexing or deletion.
"""
return app.index_controller.handle_notification(catalog, action, app.current_request)
# Work around https://github.com/aws/chalice/issues/856
def new_handler(self, event, context):
app.lambda_context = context
return old_handler(self, event, context)
old_handler = chalice.app.EventSourceHandler.__call__
chalice.app.EventSourceHandler.__call__ = new_handler
@app.on_sqs_message(queue=config.notifications_queue_name(), batch_size=1)
def contribute(event: chalice.app.SQSEvent):
app.index_controller.contribute(event)
@app.on_sqs_message(queue=config.tallies_queue_name(),
batch_size=IndexController.document_batch_size)
def aggregate(event: chalice.app.SQSEvent):
app.index_controller.aggregate(event)
# Any messages in the tallies queue that fail being processed will be retried
# with more RAM in the tallies_retry queue.
@app.on_sqs_message(queue=config.tallies_queue_name(retry=True),
batch_size=IndexController.document_batch_size)
def aggregate_retry(event: chalice.app.SQSEvent):
app.index_controller.aggregate(event, retry=True)
# Any messages in the notifications queue that fail being processed will be
# retried with more RAM and a longer timeout in the notifications_retry queue.
@app.on_sqs_message(queue=config.notifications_queue_name(retry=True),
batch_size=1)
def contribute_retry(event: chalice.app.SQSEvent):
app.index_controller.contribute(event, retry=True)
```
#### File: azul/scripts/can_dss_bundle.py
```python
import argparse
import json
import logging
import os
import sys
from humancellatlas.data.metadata.api import (
Bundle,
)
from humancellatlas.data.metadata.helpers.dss import (
download_bundle_metadata,
)
from humancellatlas.data.metadata.helpers.json import (
as_json,
)
from azul import (
config,
)
import azul.dss
from azul.files import (
write_file_atomically,
)
from azul.logging import (
configure_script_logging,
)
logger = logging.getLogger(__name__)
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--dss-url', '-u',
default=config.dss_endpoint,
help='The URL of the DSS REST API endpoint from which to download the bundle to be canned '
'(default: %(default)s).')
parser.add_argument('--replica', '-r',
default='aws',
help="The replica from which to download the bundle to be canned (default: %(default)s).")
parser.add_argument('--uuid', '-b',
required=True,
help='The UUID of the bundle to can.')
parser.add_argument('--version', '-v',
help='The version of the bundle to can (default: the latest version).')
parser.add_argument('--output-dir', '-O',
default=os.path.join(config.project_root, 'test', 'indexer', 'data'),
help='The path to the output directory (default: %(default)s).')
parser.add_argument('--api-json', '-A',
default=False,
action='store_true',
help="Dump the return value of metadata-api's as_json function (default off).")
args = parser.parse_args(argv)
dss_client = azul.dss.direct_access_client(dss_endpoint=args.dss_url,
num_workers=config.num_dss_workers)
version, manifest, metadata_files = download_bundle_metadata(client=dss_client,
replica=args.replica,
uuid=args.uuid,
version=args.version,
num_workers=config.num_dss_workers)
logger.info('Downloaded bundle %s version %s from replica %s.', args.uuid, version, args.replica)
api_json = as_json(Bundle(args.uuid, version, manifest, metadata_files)) if args.api_json else None
for obj, suffix in [(manifest, ".manifest.json"),
(metadata_files, '.metadata.json'),
*([(api_json, ".api.json")] if api_json else [])]:
path = os.path.join(args.output_dir, args.uuid + suffix)
with write_file_atomically(path) as f:
json.dump(obj, f, indent=4)
logger.info("Successfully wrote %s", path)
if __name__ == '__main__':
configure_script_logging(logger)
main(sys.argv[1:])
```
#### File: azul/scripts/envhook.py
```python
from importlib.abc import (
Loader,
MetaPathFinder,
)
import importlib.util
from itertools import (
chain,
)
import os
import pathlib
import sys
from typing import (
Mapping,
MutableMapping,
Tuple,
TypeVar,
)
__all__ = ('setenv', 'main')
def main(argv):
import argparse
parser = argparse.ArgumentParser(description='Install a hook into Python that automatically sources `environment`')
parser.add_argument('action', choices=['install', 'remove'])
options = parser.parse_args(argv)
# Confirm virtual environment is active `venv || virtualenv`
if 'VIRTUAL_ENV' in os.environ:
import site
if hasattr(site, 'getsitepackages'):
# Both plain Python and `venv` have `getsitepackages()`
sys_prefix = Path(sys.prefix).resolve()
link_dir = next(p for p in map(Path, site.getsitepackages())
if sys_prefix.is_prefix_of(p))
else:
# virtualenv's `site` does not have getsitepackages()
link_dir = (Path(site.__file__).parent / 'site-packages').resolve()
else:
raise NoActiveVirtualenv
dst = Path(__file__).absolute()
# This is the least invasive way of looking up `sitecustomize`, AFAIK. The
# alternative is `import sitecustomize` which would propagate exceptions
# occurring in that module and trigger the side effects of loading that
# module. This approach is really only safe when that module was already
# loaded which is not the case if -S was passed or PYTHONNOUSERSITE is set.
# We really only want to know if it's us or a different module. Another
# alternative would be sys.modules.get('sitecustomize') but that would yield
# None with -S or PYTHONNOUSERSITE even when there is a sitecustomize.py,
# potentially one different from us.
sitecustomize = importlib.util.find_spec('sitecustomize')
if sitecustomize is not None:
sitecustomize = Path(sitecustomize.origin)
if sitecustomize.resolve() != dst.resolve():
raise ThirdPartySiteCustomize(sitecustomize)
link = link_dir / 'sitecustomize.py'
if link.exists():
if link.is_symlink():
cur_dst = link.follow()
else:
raise NotASymbolicLinkError(link)
else:
cur_dst = None
if options.action == 'install':
if cur_dst is None:
_print(f'Installing by creating symbolic link from {link} to {dst}.')
link.symlink_to(dst)
elif dst == cur_dst:
_print(f'Already installed. Symbolic link from {link} to {dst} exists.')
else:
raise BadSymlinkDestination(link, cur_dst, dst)
elif options.action == 'remove':
if cur_dst is None:
_print(f'Not currently installed. Symbolic link {link} does not exist.')
elif cur_dst == dst:
_print(f'Uninstalling by removing {link}.')
link.unlink()
else:
raise BadSymlinkDestination(link, cur_dst, dst)
else:
assert False
def setenv():
export_environment = _import_export_environment()
redact = getattr(export_environment, 'redact')
resolve_env = getattr(export_environment, 'resolve_env')
load_env = getattr(export_environment, 'load_env')
new = resolve_env(load_env())
old = os.environ
pycharm_hosted = bool(int(os.environ.get('PYCHARM_HOSTED', '0')))
for k, (o, n) in sorted(zip_dict(old, new).items()):
if o is None:
if pycharm_hosted:
_print(f"Setting {k} to '{redact(k, n)}'")
os.environ[k] = n
else:
_print(f"Warning: {k} is not set but should be {redact(k, n)}, "
f"you should run `source environment`")
elif n is None:
pass
elif n != o:
if k.startswith('PYTHON'):
_print(f"Ignoring change in {k} from '{redact(k, o)}' to '{redact(k, n)}'")
else:
if pycharm_hosted:
_print(f"Changing {k} from '{redact(k, o)}' to '{redact(k, n)}'")
os.environ[k] = n
else:
_print(f"Warning: {k} is '{redact(k, o)}' but should be '{redact(k, n)}', "
f"you must run `source environment`")
def _import_export_environment():
# When this module is loaded from the `sitecustomize.py` symbolic link, the
# directory containing the physical file may not be on the sys.path so we
# cannot use a normal import to load the `export_environment` module.
module_name = 'export_environment'
file_name = module_name + '.py'
parent_dir = Path(__file__).follow().parent
spec = importlib.util.spec_from_file_location(name=module_name,
location=parent_dir / file_name)
export_environment = importlib.util.module_from_spec(spec)
assert isinstance(spec.loader, Loader)
spec.loader.exec_module(export_environment)
return export_environment
K = TypeVar('K')
OV = TypeVar('OV')
NV = TypeVar('NV')
def zip_dict(old: Mapping[K, OV], new: Mapping[K, NV], missing=None) -> MutableMapping[K, Tuple[OV, NV]]:
"""
Merge two dictionaries. The resulting dictionary contains an entry for every
key in either `old` or `new`. Each entry in the result associates a key to
two values: the value from `old` for that key followed by the value from
`new` for that key. If the key is absent from either argument, the
respective tuple element will be `missing`, which defaults to None. If
either `old` or `new` could contain None values, some other value should be
passed for `missing` in order to distinguish None values from values for
absent entries.
>>> zip_dict({1:2}, {1:2})
{1: (2, 2)}
>>> zip_dict({1:2}, {3:4})
{1: (2, None), 3: (None, 4)}
>>> zip_dict({1:2}, {1:3})
{1: (2, 3)}
>>> zip_dict({1:2}, {})
{1: (2, None)}
>>> zip_dict({}, {1:2})
{1: (None, 2)}
>>> zip_dict({'deleted': 1, 'same': 2, 'changed': 3}, {'same': 2, 'changed': 4, 'added': 5}, missing=-1)
{'deleted': (1, -1), 'same': (2, 2), 'changed': (3, 4), 'added': (-1, 5)}
"""
result = ((k, (old.get(k, missing), n)) for k, n in new.items())
removed = ((k, o) for k, o in old.items() if k not in new)
removed = ((k, (o, missing)) for k, o in removed)
return dict(chain(removed, result))
def _print(msg):
print(Path(__file__).resolve().name + ':', msg, file=sys.stderr)
def _parse(env: str) -> MutableMapping[str, str]:
return {k: v for k, _, v in (line.partition('=') for line in env.splitlines())}
class SanitizingFinder(MetaPathFinder):
def __init__(self) -> None:
super().__init__()
assert __name__ == 'sitecustomize'
sitecustomize_py = Path(__file__)
assert sitecustomize_py.is_symlink()
envhook_py = sitecustomize_py.follow()
self.bad_path = str(envhook_py.parent.parent / 'src' / 'azul')
def find_spec(self, *_args, **_kwargs):
sys_path = sys.path
while True:
try:
index = sys_path.index(self.bad_path)
except ValueError:
return None
else:
_print(f"Sanitizing sys.path by removing entry {index} containing '{self.bad_path}'.")
del sys_path[index]
def sanitize_sys_path():
"""
Certain PyCharm support scripts like docrunner.py add the directory
containing a module to `sys.path`, presumably with the intent to emulate
Python behavior for scripts run from the command line:
https://docs.python.org/3.8/using/cmdline.html#cmdoption-c
This has negative consequences when the module resides in the `src/azul`
directory of this project because that directory also contains modules
whose name conflicts with that of important built-in or third-party
packages, `json.py` for example. This project relies on the fully-qualified
package path of those modules to disambiguate them from the built-in ones
but placing their containing parent directory on `sys.path` defeats that.
This method attempts to counteract that by removing the directory again.
"""
# Can't remove the entry immediately because it might not yet be present.
# Instead, install a hook into the import machinery so it will be removed
# soon after is added.
sys.meta_path.insert(0, SanitizingFinder())
def share_aws_cli_credential_cache():
"""
By default, boto3 and botocore do not use a cache for the assume-role
provider even though the credentials cache mechanism exists in botocore.
This means that if assuming a role requires you to enter a MFA code, you
will have to enter it every time you instantiate a boto3 or botocore client,
even if your previous session would have lasted longer.
This function connects the assume-role provider with the cache used by the
AWS CLI, saving tedious code reentry. It does so only for boto3.
"""
try:
import boto3
import botocore.credentials
import botocore.session
except ImportError:
_print('Looks like boto3 is not installed. Skipping credential sharing with AWS CLI.')
else:
# Get the AssumeRole credential provider
session = botocore.session.get_session()
resolver = session.get_component('credential_provider')
assume_role_provider = resolver.get_provider('assume-role')
# Make the provider use the same cache as the AWS CLI
cli_cache = Path('~', '.aws', 'cli', 'cache').expanduser()
assume_role_provider.cache = botocore.credentials.JSONFileCache(cli_cache)
# Calls to boto3.client() and .resource() use the default session and
# therefore hit the cached credentials
boto3.setup_default_session(botocore_session=session)
class Path(pathlib.PosixPath):
# Work around https://bugs.python.org/issue30618, fixed on 3.7+
# noinspection PyProtectedMember,PyUnresolvedReferences
def readlink(self) -> 'Path':
"""
Return the path to which the symbolic link points.
"""
path = self._accessor.readlink(self)
obj = self._from_parts((path,), init=False)
obj._init(template=self)
return obj
def follow(self) -> 'Path':
"""
This methods performs one level of symbolic link resolution. For paths
representing a symbolic link with an absolute target, this methods is
equivalent to readlink(). For symbolic links with relative targets, this
method returns the result of appending the target to the parent of this
path. The returned path is always absolute.
Unless you need the target of the symbolic link verbatim, you should
prefer this method over readlink().
"""
target = self.readlink()
if target.is_absolute():
return target
else:
return (self.parent / target).absolute()
# Sorely needed, added in 3.8
# noinspection PyProtectedMember,PyUnresolvedReferences
def link_to(self, target: 'Path'):
"""
Create a hard link pointing to a path named target.
"""
if self._closed:
self._raise_closed()
os.link(str(self), str(target))
def is_relative(self):
return not self.is_absolute()
def is_prefix_of(self, other: 'Path'):
"""
>>> Path('/').is_prefix_of(Path('/'))
True
>>> Path('/').is_prefix_of(Path('/a'))
True
>>> Path('/a').is_prefix_of(Path('/'))
False
>>> Path('/a').is_prefix_of(Path('/a/b'))
True
>>> Path('/a/b').is_prefix_of(Path('/a'))
False
"""
if self.is_relative():
raise ValueError('Need absolute path', self)
elif other.is_relative():
raise ValueError('Need absolute path', other)
else:
return other.parts[:len(self.parts)] == self.parts
class EnvhookError(RuntimeError):
pass
class NoActiveVirtualenv(EnvhookError):
def __init__(self) -> None:
super().__init__('Need to be run from within a virtualenv')
class NotASymbolicLinkError(EnvhookError):
def __init__(self, link: Path) -> None:
super().__init__(
f'{link} is not a symbolic link. Make a backup of that file, '
f'remove the original and try again. Note that removing the file '
f'may break other, third-party site customizations.'
)
class BadSymlinkDestination(EnvhookError):
def __init__(self, link: Path, actual: Path, expected: Path) -> None:
super().__init__(
f'Symbolic link {link} points to {actual} instead of {expected}. '
f'Try removing the symbolic link and try again.'
)
class ThirdPartySiteCustomize(EnvhookError):
def __init__(self, sitecustomize: Path) -> None:
super().__init__(
f'A different `sitecustomize` module already exists at '
f'{sitecustomize}. Make a backup of that file, remove the original '
f'and try again. Note that removing the file may break other, '
f'third-party site customizations.'
)
if __name__ == '__main__':
try:
main(sys.argv[1:])
except EnvhookError as e:
_print(e.args[0])
sys.exit(1)
elif __name__ == 'sitecustomize':
if int(os.environ.get('ENVHOOK', '1')) == 0:
_print('Currently disabled because the ENVHOOK environment variable is set to 0.')
else:
sanitize_sys_path()
setenv()
share_aws_cli_credential_cache()
```
#### File: azul/scripts/log_api_gateway.py
```python
import json
import sys
# Converted to a string that expresses the structure of API log entries
# For more info see https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html
from azul.deployment import (
aws,
)
# This script should only be called by Terraform.
# Do NOT run manually.
JSON_LOG_FORMAT = {
"requestId": "$context.requestId",
"ip": "$context.identity.sourceIp",
"caller": "$context.identity.caller",
"user": "$context.identity.user",
"requestTime": "$context.requestTime",
"httpMethod": "$context.httpMethod",
"resourcePath": "$context.resourcePath",
"status": "$context.status",
"protocol": "$context.protocol",
"responseLength": "$context.responseLength"
}
def clean_arn(arn: str) -> str:
return arn[:-2] if arn.endswith(':*') else arn
def add_field(client, path: str, value: str, api_id: str, stage_name: str):
client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
{
'op': 'add',
'path': path,
'value': value
}
]
)
def add_logging(api_id: str, stage_name: str, destination_arn: str):
client = aws.client('apigateway')
destination_arn = clean_arn(destination_arn)
for path, value in [('/accessLogSettings/destinationArn', destination_arn),
('/accessLogSettings/format', json.dumps(JSON_LOG_FORMAT))]:
add_field(client, path, value, api_id, stage_name)
if __name__ == "__main__":
add_logging(*sys.argv[1:])
```
#### File: azul/scripts/register_sam.py
```python
from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
def main():
configure_script_logging(log)
tdr = TDRClient()
tdr.register_with_sam()
tdr_catalogs = (
catalog.name
for catalog in config.catalogs.values()
if catalog.plugins['repository'] == 'tdr'
)
for source in set(chain(*map(config.tdr_sources, tdr_catalogs))):
source = TDRSourceName.parse(source)
api_project = tdr.lookup_source_project(source)
require(api_project == source.project,
'Actual Google project of TDR source differs from configured '
'one',
api_project, source)
tdr.check_api_access(source)
tdr.check_bigquery_access(source)
if __name__ == '__main__':
main()
```
#### File: src/azul/bigquery_reservation.py
```python
from typing import (
Optional,
Union,
)
from google.cloud.bigquery_reservation_v1 import (
Assignment,
CapacityCommitment,
Reservation,
ReservationServiceClient,
)
from google.cloud.bigquery_reservation_v1.services.reservation_service.pagers import (
ListAssignmentsPager,
ListCapacityCommitmentsPager,
ListReservationsPager,
)
from google.oauth2.service_account import (
Credentials,
)
from azul import (
RequirementError,
cached_property,
logging,
require,
)
from azul.deployment import (
aws,
)
log = logging.getLogger(__name__)
class SlotManager:
slots = 100
_reservation_id = 'azul-reindex'
capacity_commitment_name: Optional[str]
reservation_name: Optional[str]
assignment_name: Optional[str]
def __init__(self):
self.refresh()
def refresh(self):
for resource_type, path_suffix in [
('capacity_commitment', ''),
('reservation', ''),
('assignment', '/reservations/-')
]:
pager_method = getattr(self._client, f'list_{resource_type}s')
pager = pager_method(parent=self._reservation_parent_path + path_suffix)
setattr(self, f'{resource_type}_name', self._single_resource_name(pager))
# Verify state
self.has_active_slots()
@cached_property
def credentials(self) -> Credentials:
with aws.service_account_credentials() as file_name:
return Credentials.from_service_account_file(file_name)
@cached_property
def _client(self) -> ReservationServiceClient:
return ReservationServiceClient(credentials=self.credentials)
@property
def _project(self) -> str:
return self.credentials.project_id
@property
def _reservation_parent_path(self) -> str:
return self._client.common_location_path(project=self._project,
location='US')
def has_active_slots(self) -> bool:
resources = {
self.capacity_commitment_name,
self.reservation_name,
self.assignment_name
}
if resources == {None}:
return False
elif None not in resources:
return True
else:
raise RequirementError('BigQuery slot commitment state is '
'inconsistent: some, but not all resources '
'are missing',
resources)
def ensure_slots_active(self) -> None:
"""
Idempotently purchase flex slots.
"""
if self.has_active_slots():
log.info('Slot commitment already active')
else:
self.capacity_commitment_name = self._purchase_commitment().name
self.reservation_name = self._create_reservation().name
self.assignment_name = self._create_assignment(self.reservation_name).name
def ensure_slots_deleted(self) -> None:
"""
Idempotently delete flex slots.
"""
if self.has_active_slots():
for resource_type in ('assignment',
'reservation',
'capacity_commitment'):
attr_name = resource_type + '_name'
resource_name = getattr(self, attr_name)
delete_method = getattr(self._client, 'delete_' + resource_type)
delete_method(name=resource_name)
log.info('Deleted resource %r', f'{resource_type}:{resource_name}')
setattr(self, attr_name, None)
else:
log.info('No slot commitment active')
ResourcePager = Union[ListCapacityCommitmentsPager,
ListReservationsPager,
ListAssignmentsPager]
def _single_resource_name(self, resources: ResourcePager) -> Optional[str]:
resources = [resource.name for resource in resources]
try:
resource_name, *extras = resources
except ValueError:
return None
else:
require(not extras,
'Too many resources in path (should be 0 or 1)',
self._reservation_parent_path, resources)
return resource_name
def _purchase_commitment(self) -> CapacityCommitment:
commitment = CapacityCommitment(dict(slot_count=self.slots,
plan=CapacityCommitment.CommitmentPlan.FLEX))
commitment = self._client.create_capacity_commitment(capacity_commitment=commitment,
parent=self._reservation_parent_path)
log.info('Purchased %d BigQuery slots, commitment name: %r',
commitment.slot_count, commitment.name)
return commitment
def _create_reservation(self) -> Reservation:
reservation = Reservation(dict(slot_capacity=self.slots,
ignore_idle_slots=False))
reservation = self._client.create_reservation(reservation=reservation,
reservation_id=self._reservation_id,
parent=self._reservation_parent_path)
log.info('Reserved %d BigQuery slots, reservation name: %r',
reservation.slot_capacity, reservation.name)
return reservation
def _create_assignment(self, reservation_name: str) -> Assignment:
assignment = Assignment(dict(assignee=f'projects/{self._project}',
job_type=Assignment.JobType.QUERY))
assignment = self._client.create_assignment(parent=reservation_name,
assignment=assignment)
log.info('Assigned slots, assignment name: %r', assignment.name)
return assignment
```
#### File: src/azul/es.py
```python
import logging
from aws_requests_auth.boto_utils import (
BotoAWSRequestsAuth,
)
from elasticsearch import (
Elasticsearch,
RequestsHttpConnection,
)
from azul import (
config,
lru_cache,
)
from azul.deployment import (
aws,
)
logger = logging.getLogger(__name__)
class CachedBotoAWSRequestsAuth(BotoAWSRequestsAuth):
def __init__(self, *args, **kwags):
super().__init__(*args, **kwags)
# We use the botocore session from Boto3 since it is pre-configured by
# envhook.py to use cached credentials for the AssumeRoleProvider. This
# avoids repeated entry of MFA tokens when running this code locally.
# noinspection PyProtectedMember
self._refreshable_credentials = aws.boto3_session.get_credentials()
class ESClientFactory:
@classmethod
def get(cls) -> Elasticsearch:
host, port = aws.es_endpoint
return cls._create_client(host, port, config.es_timeout)
@classmethod
@lru_cache(maxsize=32)
def _create_client(cls, host, port, timeout):
logger.debug(f'Creating ES client [{host}:{port}]')
# Implicit retries don't make much sense in conjunction with optimistic locking (versioning). Consider a
# write request that times out in ELB with a 504 while the upstream ES node actually finishes the request.
# Retrying that individual write request will fail with a 409. Instead of retrying just the write request,
# the entire read-modify-write transaction needs to be retried. In order to be in full control of error
# handling, we disable the implicit retries via max_retries=0.
common_params = dict(hosts=[dict(host=host, port=port)],
timeout=timeout,
max_retries=0)
if host.endswith(".amazonaws.com"):
aws_auth = CachedBotoAWSRequestsAuth(aws_host=host,
aws_region=aws.region_name,
aws_service='es')
return Elasticsearch(http_auth=aws_auth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection, **common_params)
else:
return Elasticsearch(**common_params)
```
#### File: src/azul/remote_debug.py
```python
from contextlib import (
ContextDecorator,
)
from logging import (
getLogger,
)
import os
log = getLogger(__name__)
class RemoteDebugSession(ContextDecorator):
"""
Adapted from https://stackoverflow.com/a/60333250/7830612
According to the Stackoverflow post, since the debug session is over a
long-running TCP connection, we need to close the connection explicitly
within the lambda, otherwise the lambda will time out. A context manager
handles this conveniently.
How to use:
1. In PyCharm select ``Run > Edit Configuration``, then The **+** to add
a new **Python Debug Server**.
Enter your IP address under **IDE host name**. If you don't have a
static IP, leave the **host name** as ``localhost`` and install
https://ngrok.com/. We will use Ngrok to tunnel a URL to the debug
server on your machine.
Enter a **Port**, such as ``8000``.
Add the following **Path mappings**:
+---------------------------------------+-----------------------------+
| local path | remote path |
+=======================================+=============================+
| ``<absolute project root>/src/azul`` | ``/var/task/azul`` |
+---------------------------------------+-----------------------------+
| ``<absolute project root>/.venv/ | ``/var/task/chalice`` |
| lib/python3.8/site-packages/chalice`` | |
+---------------------------------------+-----------------------------+
| ``<absolute project root | ``/opt/python`` |
| >/.venv/lib/python3.8/site-packages`` | |
+---------------------------------------+-----------------------------+
| ``<home directory>/ | ``/var/lang/lib/python3.8`` |
| .pyenv/versions/3.8.3/lib/python3.8`` | |
+---------------------------------------+-----------------------------+
Depending on which lambda you're debugging, add **one** of the
following:
=============================================== ====================
local path remote path
=============================================== ====================
``<absolute path>/azul/lambdas/service/app.py`` ``/var/task/app.py``
``<absolute path>/azul/lambdas/indexer/app.py`` ``/var/task/app.py``
=============================================== ====================
Copy the ``pydevd-pycharm`` version listed in the configurations.
2. Next make some changes.
Add ``pydevd-pycharm==<version from previous step>`` to
``requirements.txt``.
If using Ngrok run:
::
ngrok tcp 8000
and in your deployment's ``environment.py`` set
``AZUL_REMOTE_DEBUG_ENDPOINT`` to the Forwarding URL and port listed
by Ngrok. Otherwise set these variables to the values you used in
your configurations.
3. Start the debug server, by clicking the debug icon with the
configuration selected.
4. Activate the remote debugger in one of two ways:
- Decorate the route you wish to debug with
``RemoteDebugSession()``. Make sure the ``RemoteDebugSession()``
decorator is applied first:
::
@app.route(...)
@RemoteDebugSession()
def my_route(...):
...
- Use ``RemoteDebugSession()`` as a context manager around the code
you wish to debug.
5. Deploy:
::
make deploy
6. Set breakpoints, trigger the lambda, and debug!
Warnings / caveats:
~~~~~~~~~~~~~~~~~~~
- DO NOT USE IN PRODUCTION. There are some security concerns with this
process. Ngrok can potentially snoop on any data sent through its
service. Secondly, a malicious party could connect to your debugger
instead of the lambda and potentially extract data or crash your
system. Also setting the wrong IP and port could also allow a
malicious party to take control of the lambda.
- If the debug server isn't running lambdas will hang until they time
out. Make sure you redeploy untainted code when done.
- By default our lambdas timeout after 30 seconds, which can make
debugging difficult. For lambdas triggered by API Gateway, this is
unavoidable. For other lambdas, you can increase the timeout manually
by increasing ``lambda_timeout`` in ``config.json.template.py``.
- If multiple lambda instances try to connect to the debug server,
latecomers will block and may eventually time out. One way to prevent
this is to manually set the lambda's ``reserved_concurrency`` to 1 in
``config.json.template.py``.
"""
def __init__(self):
try:
endpoint = os.environ['AZUL_REMOTE_DEBUG_ENDPOINT']
except KeyError:
log.error('Set AZUL_REMOTE_DEBUG_ENDPOINT to use remote debugging')
raise
else:
self.host, port = endpoint.split(':')
self.port = int(port)
try:
import pydevd
except ImportError:
log.error('Add correct version of pydevd-pycharm to requirements '
'to use remote debugging')
raise
else:
self.pydevd = pydevd
self.active = False
def __enter__(self):
self.pydevd.settrace(self.host,
port=self.port,
suspend=False,
stdoutToServer=True,
stderrToServer=True)
log.info('Starting remote debugging session')
self.active = True
def __exit__(self, exc_type, exc_val, exc_tb):
if self.active:
log.info(f'Stopping remote debugging on {self.host}:{self.port}')
self.pydevd.stoptrace()
self.active = False
return False
```
#### File: azul/service/catalog_controller.py
```python
from azul import (
cache,
config,
)
from azul.openapi import (
schema,
)
from azul.plugins import (
Plugin,
RepositoryPlugin,
)
from azul.service import (
Controller,
)
class CatalogController(Controller):
# The custom return type annotation is an experiment. Please don't adopt
# this just yet elsewhere in the program.
def list_catalogs(self) -> schema.object(
default_catalog=str,
catalogs=schema.object(
additional_properties=schema.object(
atlas=str,
internal=bool,
plugins=schema.array(
schema.object(
name=str,
type=str,
additional_properties=True
)
)
)
)
):
return {
'default_catalog': config.default_catalog,
'catalogs': {
catalog.name: {
'internal': catalog.is_internal,
'atlas': catalog.atlas,
'plugins': [
{
'type': plugin_type,
'name': plugin,
**self._plugin_config(plugin_type, catalog.name)
}
for plugin_type, plugin in catalog.plugins.items()
]
}
for catalog in config.catalogs.values()
}
}
@cache
def _plugin_config(self, plugin_base_cls: str, catalog: str):
plugin_base_cls = Plugin.type_for_name(plugin_base_cls)
if issubclass(plugin_base_cls, RepositoryPlugin):
plugin_cls = plugin_base_cls.load(catalog)
plugin = plugin_cls.create(catalog)
return {
'sources': list(map(str, plugin.sources))
}
else:
return {}
```
#### File: azul/service/repository_controller.py
```python
import time
from typing import (
Mapping,
Optional,
Sequence,
Tuple,
)
from chalice import (
BadRequestError,
NotFoundError,
)
from azul import (
CatalogName,
cache,
cached_property,
config,
reject,
)
from azul.plugins import (
RepositoryPlugin,
)
from azul.service import (
Controller,
)
from azul.service.index_query_service import (
IndexQueryService,
)
class RepositoryController(Controller):
@cached_property
def service(self):
return IndexQueryService()
@classmethod
@cache
def repository_plugin(cls, catalog: CatalogName) -> RepositoryPlugin:
return RepositoryPlugin.load(catalog).create(catalog)
def _parse_range_request_header(self,
range_specifier: str
) -> Sequence[Tuple[Optional[int], Optional[int]]]:
"""
>>> rc = RepositoryController(lambda_context=None, file_url_func=None)
>>> rc._parse_range_request_header('bytes=100-200,300-400')
[(100, 200), (300, 400)]
>>> rc._parse_range_request_header('bytes=-100')
[(None, 100)]
>>> rc._parse_range_request_header('bytes=100-')
[(100, None)]
>>> rc._parse_range_request_header('foo=100')
[]
>>> rc._parse_range_request_header('')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid range specifier ''
>>> rc._parse_range_request_header('100-200')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid range specifier '100-200'
>>> rc._parse_range_request_header('bytes=')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid range specifier 'bytes='
>>> rc._parse_range_request_header('bytes=100')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid range specifier 'bytes=100'
>>> rc._parse_range_request_header('bytes=-')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid range specifier 'bytes=-'
>>> rc._parse_range_request_header('bytes=--')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid range specifier 'bytes=--'
"""
def to_int_or_none(value: str) -> Optional[int]:
return None if value == '' else int(value)
parsed_ranges = []
try:
unit, ranges = range_specifier.split('=')
if unit == 'bytes':
for range_spec in ranges.split(','):
start, end = range_spec.split('-')
reject(start == '' and end == '', 'Empty range')
parsed_ranges.append((to_int_or_none(start), to_int_or_none(end)))
else:
reject(unit == '', 'Empty range unit')
except Exception as e:
raise BadRequestError(f'Invalid range specifier {range_specifier!r}') from e
return parsed_ranges
def download_file(self,
catalog: CatalogName,
fetch: bool,
file_uuid: str,
query_params: Mapping[str, str],
headers: Mapping[str, str]):
file_version = query_params.get('version')
replica = query_params.get('replica')
file_name = query_params.get('fileName')
drs_path = query_params.get('drsPath')
wait = query_params.get('wait')
request_index = int(query_params.get('requestIndex', '0'))
token = query_params.get('token')
plugin = self.repository_plugin(catalog)
download_cls = plugin.file_download_class()
if request_index == 0:
file = self.service.get_data_file(catalog=catalog,
file_uuid=file_uuid,
file_version=file_version)
if file is None:
raise NotFoundError(f'Unable to find file {file_uuid!r}, '
f'version {file_version!r} in catalog {catalog!r}')
file_version = file['version']
drs_path = file['drs_path']
file_size = file['size']
if file_name is None:
file_name = file['name']
else:
file_size = None
assert file_version is not None
assert file_name is not None
# Due to https://github.com/curl/curl/issues/6740 causing curl to error
# when trying to resume a previously completed file download, we check
# for a range request starting at the end of the file and instead of
# a returning a 416 (Range Not Satisfiable) as specified in RFC7233
# https://tools.ietf.org/html/rfc7233#section-4.4 we return a 206
# (Partial Content) with an empty body.
try:
range_specifier = headers['range']
except KeyError:
pass
else:
requested_range = self._parse_range_request_header(range_specifier)
if requested_range == [(file_size, None)]:
return {
'Status': 206,
'Content-Length': 0
}
download = download_cls(file_uuid=file_uuid,
file_name=file_name,
file_version=file_version,
drs_path=drs_path,
replica=replica,
token=token)
download.update(plugin)
if download.retry_after is not None:
retry_after = min(download.retry_after, int(1.3 ** request_index))
query_params = {
'version': download.file_version,
'fileName': download.file_name,
'requestIndex': request_index + 1
}
if download.drs_path is not None:
query_params['drsPath'] = download.drs_path
if download.token is not None:
query_params['token'] = download.token
if download.replica is not None:
query_params['replica'] = download.replica
if wait is not None:
if wait == '0':
pass
elif wait == '1':
# Sleep in the lambda but ensure that we wake up before it
# runs out of execution time (and before API Gateway times
# out) so we get a chance to return a response to the client
remaining_time = self.lambda_context.get_remaining_time_in_millis() / 1000
server_side_sleep = min(float(retry_after),
remaining_time - config.api_gateway_timeout_padding - 3)
time.sleep(server_side_sleep)
retry_after = round(retry_after - server_side_sleep)
else:
assert False, wait
query_params['wait'] = wait
return {
'Status': 301,
**({'Retry-After': retry_after} if retry_after else {}),
'Location': self.file_url_func(catalog=catalog,
file_uuid=file_uuid,
fetch=fetch,
**query_params)
}
elif download.location is not None:
return {
'Status': 302,
'Location': download.location
}
else:
assert False
```
#### File: azul/service/storage_service.py
```python
from concurrent.futures import (
ThreadPoolExecutor,
as_completed,
)
from dataclasses import (
dataclass,
)
from logging import (
getLogger,
)
from threading import (
BoundedSemaphore,
)
import time
from typing import (
Mapping,
Optional,
)
from urllib.parse import (
urlencode,
)
from azul import (
config,
)
from azul.deployment import (
aws,
)
logger = getLogger(__name__)
AWS_S3_DEFAULT_MINIMUM_PART_SIZE = 5242880 # 5 MB; see https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
MULTIPART_UPLOAD_MAX_WORKERS = 4
# The amount of pending tasks that can be queued for execution. A value of 0
# allows no tasks to be queued, only running tasks allowed in the thread pool.
MULTIPART_UPLOAD_MAX_PENDING_PARTS = 4
Tagging = Mapping[str, str]
class StorageService:
def __init__(self, bucket_name=config.s3_bucket):
self.bucket_name = bucket_name
@property
def client(self):
return aws.client('s3')
def head(self, object_key: str) -> dict:
return self.client.head_object(Bucket=self.bucket_name, Key=object_key)
def get(self, object_key: str) -> bytes:
return self.client.get_object(Bucket=self.bucket_name, Key=object_key)['Body'].read()
def put(self,
object_key: str,
data: bytes,
content_type: Optional[str] = None,
tagging: Optional[Tagging] = None,
**kwargs):
self.client.put_object(Bucket=self.bucket_name,
Key=object_key,
Body=data,
**self._object_creation_kwargs(content_type=content_type, tagging=tagging),
**kwargs)
def put_multipart(self,
object_key: str,
content_type: Optional[str] = None,
tagging: Optional[Tagging] = None):
return MultipartUploadHandler(object_key,
**self._object_creation_kwargs(content_type=content_type, tagging=tagging))
def upload(self,
file_path: str,
object_key: str,
content_type: Optional[str] = None,
tagging: Optional[Tagging] = None):
self.client.upload_file(Filename=file_path,
Bucket=self.bucket_name,
Key=object_key,
ExtraArgs=self._object_creation_kwargs(content_type=content_type))
# upload_file doesn't support tags so we need to make a separate request
# https://stackoverflow.com/a/56351011/7830612
if tagging:
self.put_object_tagging(object_key, tagging)
def _object_creation_kwargs(self, *,
content_type: Optional[str] = None,
tagging: Optional[Tagging] = None):
kwargs = {}
if content_type is not None:
kwargs['ContentType'] = content_type
if tagging is not None:
kwargs['Tagging'] = urlencode(tagging)
return kwargs
def get_presigned_url(self, key: str, file_name: Optional[str] = None) -> str:
"""
Return a pre-signed URL to the given key.
:param key: the key of the S3 object whose content a request to the signed URL will return
:param file_name: the file name to be returned as part of a Content-Disposition header in the response to a
request to the signed URL. If None, no such header will be present in the response.
"""
assert file_name is None or '"' not in file_name
return self.client.generate_presigned_url(
ClientMethod=self.client.get_object.__name__,
Params={
'Bucket': self.bucket_name,
'Key': key,
**({} if file_name is None else {'ResponseContentDisposition': f'attachment;filename="{file_name}"'})
})
def create_bucket(self, bucket_name: str = None):
self.client.create_bucket(Bucket=(bucket_name or self.bucket_name))
def put_object_tagging(self, object_key: str, tagging: Tagging = None):
deadline = time.time() + 60
tagging = {'TagSet': [{'Key': k, 'Value': v} for k, v in tagging.items()]}
while True:
try:
self.client.put_object_tagging(Bucket=self.bucket_name,
Key=object_key,
Tagging=tagging)
except self.client.exceptions.NoSuchKey:
if time.time() > deadline:
logger.error('Unable to tag %s on object.', tagging)
raise
else:
logger.warning('Object key %s is not found. Retrying in 5 s.', object_key)
time.sleep(5)
else:
break
def get_object_tagging(self, object_key: str) -> Tagging:
response = self.client.get_object_tagging(Bucket=self.bucket_name, Key=object_key)
tagging = {tag['Key']: tag['Value'] for tag in response['TagSet']}
return tagging
class MultipartUploadHandler:
"""
A context manager that facilitates multipart upload to S3. It uploads parts
concurrently.
Sample usage:
.. code-block:: python
with MultipartUploadHandler('samples.txt'):
handler.push(b'abc')
handler.push(b'defg')
# ...
Upon exit of the body of the with statement, all parts will have been
uploaded and the S3 object is guaranteed to exist, or an exception is raised.
When an exception is raised within the context, the upload will be aborted
automatically.
"""
bucket_name = config.s3_bucket
def __init__(self, object_key, **kwargs):
self.object_key = object_key
self.kwargs = kwargs
self.upload_id = None
self.mp_upload = None
self.next_part_number = 1
self.parts = []
self.futures = []
self.thread_pool = None
self.semaphore = None
def __enter__(self):
api_response = aws.client('s3').create_multipart_upload(Bucket=self.bucket_name,
Key=self.object_key,
**self.kwargs)
self.upload_id = api_response['UploadId']
self.mp_upload = aws.resource('s3').MultipartUpload(self.bucket_name, self.object_key, self.upload_id)
self.thread_pool = ThreadPoolExecutor(max_workers=MULTIPART_UPLOAD_MAX_WORKERS)
self.semaphore = BoundedSemaphore(MULTIPART_UPLOAD_MAX_PENDING_PARTS + MULTIPART_UPLOAD_MAX_WORKERS)
return self
def __exit__(self, etype, value, traceback):
if etype:
logger.error('Upload %s: Error detected within the MPU context.',
self.upload_id,
exc_info=(etype, value, traceback)
)
self.__abort()
else:
self.__complete()
def __complete(self):
for future in as_completed(self.futures):
exception = future.exception()
if exception is not None:
logger.error('Upload %s: Error detected while uploading a part.',
self.upload_id,
exc_info=exception)
self.__abort()
raise MultipartUploadError(self.bucket_name, self.object_key) from exception
try:
self.mp_upload.complete(MultipartUpload={"Parts": [part.to_dict() for part in self.parts]})
except self.mp_upload.meta.client.exceptions.ClientError as exception:
logger.error('Upload %s: Error detected while completing the upload.',
self.upload_id,
exc_info=exception)
self.__abort()
raise MultipartUploadError(self.bucket_name, self.object_key) from exception
self.mp_upload = None
self.thread_pool.shutdown()
def __abort(self):
logger.info('Upload %s: Aborting', self.upload_id)
# This implementation will ignore any pending/active part uploads and force the thread pool to shut down.
self.mp_upload.abort()
self.mp_upload = None
self.thread_pool.shutdown(wait=False)
logger.warning('Upload %s: Aborted', self.upload_id)
def _submit(self, fn, *args, **kwargs):
# Taken from https://www.bettercodebytes.com/theadpoolexecutor-with-a-bounded-queue-in-python/
self.semaphore.acquire()
try:
future = self.thread_pool.submit(fn, *args, **kwargs)
except Exception as e:
self.semaphore.release()
raise e
else:
future.add_done_callback(lambda _future: self.semaphore.release())
return future
def push(self, data: bytes):
part = self._create_new_part(data)
self.futures.append(self._submit(self._upload_part, part))
def _create_new_part(self, data: bytes):
part = Part(part_number=self.next_part_number, etag=None, content=data)
self.parts.append(part)
self.next_part_number += 1
return part
def _upload_part(self, part):
upload_part = self.mp_upload.Part(part.part_number)
result = upload_part.upload(Body=part.content)
part.etag = result['ETag']
part.content = None
@dataclass
class Part:
etag: Optional[str] # If ETag is defined, the content is already pushed to S3.
part_number: int
content: bytes
@property
def already_uploaded(self):
return self.etag is not None
def to_dict(self):
return dict(PartNumber=self.part_number, ETag=self.etag)
class MultipartUploadError(RuntimeError):
def __init__(self, bucket_name, object_key):
super(MultipartUploadError, self).__init__(f'{bucket_name}/{object_key}')
```
#### File: src/azul/terraform.py
```python
from itertools import (
chain,
)
import json
import logging
from pathlib import (
Path,
)
import subprocess
from typing import (
Dict,
Iterable,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import attr
from azul import (
cached_property,
config,
require,
)
from azul.template import (
emit,
)
from azul.types import (
AnyJSON,
JSON,
JSONs,
)
log = logging.getLogger(__name__)
@attr.s(auto_attribs=True, kw_only=True, frozen=True)
class TerraformSchema:
versions: Sequence[str]
document: JSON
path: Path
@classmethod
def load(cls, path: Path):
with path.open() as f:
doc = json.load(f)
return cls(versions=doc['versions'],
document=doc['schema'],
path=path)
def store(self):
with self.path.open('w') as f:
json.dump(dict(versions=self.versions,
schema=self.document), f, indent=4)
class Terraform:
def taggable_resource_types(self) -> Sequence[str]:
schema = self.schema.document
require(schema['format_version'] == '0.1')
resources = chain.from_iterable(
schema['provider_schemas'][provider]['resource_schemas'].items()
for provider in schema['provider_schemas']
)
return [
resource_type
for resource_type, resource in resources
if 'tags' in resource['block']['attributes']
]
def run(self, *args: str) -> str:
terraform_dir = Path(config.project_root) / 'terraform'
args = ['terraform', *args]
log.info('Running %r', args)
cmd = subprocess.run(args,
cwd=terraform_dir,
check=True,
stdout=subprocess.PIPE,
text=True,
shell=False)
return cmd.stdout
schema_path = Path(config.project_root) / 'terraform' / '_schema.json'
@cached_property
def schema(self):
return TerraformSchema.load(self.schema_path)
def update_schema(self):
schema = self.run('providers', 'schema', '-json')
schema = TerraformSchema(versions=self.versions,
document=json.loads(schema),
path=self.schema_path)
schema.store()
# Reset the cache
try:
# noinspection PyPropertyAccess
del self.schema
except AttributeError:
pass
@cached_property
def versions(self) -> Sequence[str]:
# `terraform -version` prints a warning if you are not running the latest
# release of Terraform; we discard it, otherwise, we would need to update
# the tracked schema every time a new version of Terraform is released
output = self.run('-version')
log.info('Terraform output:\n%s', output)
versions, footer = output.split('\n\n')
return sorted(versions.splitlines())
terraform = Terraform()
del Terraform
def _sanitize_tf(tf_config: JSON) -> JSON:
"""
Avoid errors like
Error: Missing block label
on api_gateway.tf.json line 12:
12: "resource": []
At least one object property is required, whose name represents the resource
block's type.
"""
return {k: v for k, v in tf_config.items() if v}
def _normalize_tf(tf_config: Union[JSON, JSONs]) -> Iterable[Tuple[str, AnyJSON]]:
"""
Certain levels of a Terraform JSON structure can either be a single
dictionary or a list of dictionaries. For example, these are equivalent:
{"resource": {"resource_type": {"resource_id": {"foo": ...}}}}
{"resource": [{"resource_type": {"resource_id": {"foo": ...}}}]}
So are these:
{"resource": {"type": {"id": {"foo": ...}, "id2": {"bar": ...}}}}
{"resource": {"type": [{"id": {"foo": ...}}, {"id2": {"bar": ...}}]}}
This function normalizes input to prefer the second form of both cases to
make parsing Terraform configuration simpler. It returns an iterator of the
dictionary entries in the argument, regardless which form is used.
>>> list(_normalize_tf({'foo': 'bar'}))
[('foo', 'bar')]
>>> list(_normalize_tf([{'foo': 'bar'}]))
[('foo', 'bar')]
>>> list(_normalize_tf({"foo": "bar", "baz": "qux"}))
[('foo', 'bar'), ('baz', 'qux')]
>>> list(_normalize_tf([{"foo": "bar"}, {"baz": "qux"}]))
[('foo', 'bar'), ('baz', 'qux')]
>>> list(_normalize_tf([{"foo": "bar", "baz": "qux"}]))
[('foo', 'bar'), ('baz', 'qux')]
"""
if isinstance(tf_config, dict):
return tf_config.items()
elif isinstance(tf_config, list):
return chain.from_iterable(d.items() for d in tf_config)
else:
assert False, type(tf_config)
def populate_tags(tf_config: JSON) -> JSON:
"""
Add tags to all taggable resources and change the `name` tag to `Name`
for tagged AWS resources.
"""
taggable_resource_types = terraform.taggable_resource_types()
try:
resources = tf_config['resource']
except KeyError:
return tf_config
else:
return {
k: v if k != 'resource' else [
{
resource_type: [
{
resource_name: {
**arguments,
'tags': _adjust_name_tag(resource_type,
_tags(resource_name, **arguments.get('tags', {})))
} if resource_type in taggable_resource_types else arguments
}
for resource_name, arguments in _normalize_tf(resource)
]
}
for resource_type, resource in _normalize_tf(resources)
]
for k, v in tf_config.items()
}
def emit_tf(tf_config: Optional[JSON]):
if tf_config is None:
return emit(tf_config)
else:
return emit(_sanitize_tf(populate_tags(tf_config)))
def _tags(resource_name: str, **overrides: str) -> Dict[str, str]:
"""
Return tags named for cloud resources based on :class:`azul.Config`.
:param resource_name: The Terraform name of the resource.
:param overrides: Additional tags that override the defaults.
>>> from azul.doctests import assert_json
>>> assert_json(_tags('service')) #doctest: +ELLIPSIS
{
"project": "dcp",
"service": "azul",
"deployment": "...",
"owner": ...,
"name": "azul-service-...",
"component": "azul-service"
}
>>> from azul.doctests import assert_json
>>> assert_json(_tags('service', project='foo')) #doctest: +ELLIPSIS
{
"project": "foo",
"service": "azul",
"deployment": "...",
"owner": ...,
"name": "azul-service-...",
"component": "azul-service"
}
"""
return {
'project': 'dcp',
'service': config.resource_prefix,
'deployment': config.deployment_stage,
'owner': config.owner,
'name': config.qualified_resource_name(resource_name),
'component': f'{config.resource_prefix}-{resource_name}',
**overrides
}
def _adjust_name_tag(resource_type: str, tags: Dict[str, str]) -> Dict[str, str]:
return {
'Name' if k == 'name' and resource_type.startswith('aws_') else k: v
for k, v in tags.items()
}
U = TypeVar('U', bound=AnyJSON)
class Chalice:
def package_dir(self, lambda_name):
return Path(config.project_root) / 'lambdas' / lambda_name / '.chalice' / 'terraform'
def module_dir(self, lambda_name):
return Path(config.project_root) / 'terraform' / lambda_name
package_zip_name = 'deployment.zip'
tf_config_name = 'chalice.tf.json'
resource_name_suffix = '-event'
def resource_name_mapping(self, tf_config: JSON) -> Dict[Tuple[str, str], str]:
"""
Some Chalice-generated resources have names that are incompatible with
our convention for generating fully qualified resource names. This
method returns a dictionary that, for each affected resource in the
given configuration, maps the resource's type and current name to a name
that's compatible with the convention.
"""
mapping = {}
for resource_type, resources in tf_config['resource'].items():
for name in resources:
if name.endswith(self.resource_name_suffix):
new_name = name[:-len(self.resource_name_suffix)]
mapping[resource_type, name] = new_name
return mapping
def patch_resource_names(self, tf_config: JSON) -> JSON:
"""
Some Chalice-generated resources have names that are incompatible with
our convention for generating fully qualified resource names. This
method transforms the given Terraform configuration to use names that
are compatible with the convention.
>>> from azul.doctests import assert_json
>>> assert_json(chalice.patch_resource_names({
... "resource": {
... "aws_cloudwatch_event_rule": {
... "indexercachehealth-event": { # patch
... "name": "indexercachehealth-event" # leave
... }
... },
... "aws_cloudwatch_event_target": {
... "indexercachehealth-event": { # patch
... "rule": "${aws_cloudwatch_event_rule.indexercachehealth-event.name}", # patch
... "target_id": "indexercachehealth-event", # leave
... "arn": "${aws_lambda_function.indexercachehealth.arn}"
... }
... },
... "aws_lambda_permission": {
... "indexercachehealth-event": { # patch
... "function_name": "azul-indexer-prod-indexercachehealth",
... "source_arn": "${aws_cloudwatch_event_rule.indexercachehealth-event.arn}" # patch
... }
... },
... "aws_lambda_event_source_mapping": {
... "contribute-sqs-event-source": {
... "batch_size": 1
... }
... }
... }
... }))
{
"resource": {
"aws_cloudwatch_event_rule": {
"indexercachehealth": {
"name": "indexercachehealth-event"
}
},
"aws_cloudwatch_event_target": {
"indexercachehealth": {
"rule": "${aws_cloudwatch_event_rule.indexercachehealth.name}",
"target_id": "indexercachehealth-event",
"arn": "${aws_lambda_function.indexercachehealth.arn}"
}
},
"aws_lambda_permission": {
"indexercachehealth": {
"function_name": "azul-indexer-prod-indexercachehealth",
"source_arn": "${aws_cloudwatch_event_rule.indexercachehealth.arn}"
}
},
"aws_lambda_event_source_mapping": {
"contribute-sqs-event-source": {
"batch_size": 1
}
}
}
}
"""
mapping = self.resource_name_mapping(tf_config)
tf_config = {
block_name: {
resource_type: {
mapping.get((resource_type, name), name): resource
for name, resource in resources.items()
}
for resource_type, resources in block.items()
} if block_name == 'resource' else block
for block_name, block in tf_config.items()
}
def ref(resource_type, name):
return '${' + resource_type + '.' + name + '.'
ref_map = {
ref(resource_type, name): ref(resource_type, new_name)
for (resource_type, name), new_name in mapping.items()
}
def patch_refs(v: U) -> U:
if isinstance(v, dict):
return {k: patch_refs(v) for k, v in v.items()}
elif isinstance(v, str):
for old_ref, new_ref in ref_map.items():
if old_ref in v:
return v.replace(old_ref, new_ref)
return v
else:
return v
return patch_refs(tf_config)
chalice = Chalice()
```
#### File: terraform/gitlab/gitlab.tf.json.template.py
```python
import gzip
from itertools import (
chain,
)
import json
import os
from textwrap import (
dedent,
)
from typing import (
Iterable,
List,
Set,
Tuple,
Union,
)
from azul import (
config,
lru_cache,
)
from azul.aws_service_model import (
ServiceActionType,
)
from azul.collections import (
dict_merge,
explode_dict,
)
from azul.deployment import (
aws,
)
from azul.strings import (
departition,
)
from azul.terraform import (
emit_tf,
)
from azul.types import (
JSON,
)
# This Terraform config creates a single EC2 instance with a bunch of Docker containers running on it:
#
# ╔═══════════════════════════════════════════════════════════════════════════════════════╗
# ║ gitlab ║
# ║ ┏━━━━━━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ║
# ┌─────┐ ║ ┃ gitlab ┃ ┃ gitlab-runner ┃ ┃ gitlab-dind ┃ ║
# │ ALB │ ║ ┃ ┌─────────┐ ┃ ┃ ┌───────────────┐ ┃ ┃ ┌────────────┐ ┃ ║
# ┌──▶ 443├ ─ ─ ┼──╬─╋───▶ 80│ nginx │ ┃ ┃ │ gitlab-runner ├─╋─╋──▶ 2375│ dockerd │docker.sock ◀──┐ ┃ ║
# │ └─────┘ ║ ┃ └─────────┘ ┃ ┃ └───────┬───┬───┘ ┃ ┃ └────────────┘ │ ┃ ║
# │ ┌─────┐ ║ ┃ ┌─────────┐ ┃ ┗━━━━━━━━━╋━━━━━━━━━┛ ┃ ┌────────────┐ │ ┃ ║
# │ 22├ ─ ─ ┼──╬─╋─▶ 2222│ sshd │ ┃ ▲ │ │ ┃ │ containerd │ │ ┃ ║
# │ │ │ ║ ┃ └─────────┘ ┃ │ ┃ └────────────┘ │ ┃ ║
# │ │ │ ║ ┃ ┌─────────┐ ┃ │ │ │ ┃ ┏━━━━━━━━━━━━━━━━┓ │ ┃ ║
# │ │ │ ║ ┃ │ rails │─┃─ ─ ─ ─ │ ┃ ┃ "build" ┃ │ ┃ ║
# │ │ │ ║ ┃ └─────────┘ ┃ │ │ ┃ ┃ ┌────────────┐ ┃ │ ┃ ║
# │ │ │ ║ ┃ ┌─────────┐ ┃ │ ┃ ┃ │ make │ ┃ │ ┃ ║
# │ │ NLB │ ║ ┃ │postgres │ ┃ │ └ ─ ─ ─ ╋ ─ ─ ▶┃ └────────────┘ ┃ │ ┃ ║
# │ │ │ ║ ┃ └─────────┘ ┃ │ ┃ ┃ ┌────────────┐ ┃ │ ┃ ║
# │ │ │ ║ ┗━━━━━━━━━━━━━━━━━━━┛ │ ┃ ┌────╋─┤ python ├─╋─────────────┘ ┃ ║
# │ │ │ ║ ┏━━━━━━━━━━━━━━━━━━━┓ │ ┃ │ ┃ └──────┬─────┘ ┃ ┃ ║
# │ │ │ ║ ┃ console ┃ │ ┃ │ ┗━━━━━━━━━━━━━━━━┛ ┃ ║
# │ │ │ ║ ┃ ┌─────────┐ ┃ │ ┃ │ └ ─ ─ ─ ─ ─ ─ ─ ┐ ┃ ║
# │ 2222├ ─ ─ ┼──╬─╋───▶ 22│ sshd │ ┃ │ ┃ │ ┏━━━━━━━━━━━━━━━━━━━━━━┓ ┃ ║
# │ └─────┘ ║ ┃ └─────────┘ ┃ │ ┃ │ ┃ elasticsearch ┃ │ ┃ ║
# │ ║ ┗━━━━━━━━━━━━━━━━━━━┛ │ ┃ │ ┃ ┌────────────┐ ┃ ┃ ║
# └────────────────╬─────────────────────────────────┘ ┃ ├─╋─▶ 9200│ java │ ┃◀ ─ ┤ ┃ ║
# ║ ┃ │ ┃ └────────────┘ ┃ ┃ ║
# ║ ┃ │ ┗━━━━━━━━━━━━━━━━━━━━━━┛ │ ┃ ║
# ║ ┃ │ ┏━━━━━━━━━━━━━━━━━━━━━━┓ ┃ ║
# ║ ┃ │ ┃ dynamodb-local ┃ │ ┃ ║
# ║ ┃ │ ┃ ┌────────────┐ ┃ ┃ ║
# ║ ┃ └─╋─▶ 8000│ java │ ┃◀ ─ ┘ ┃ ║
# ║ ┃ ┃ └────────────┘ ┃ ┃ ║
# ║ ┃ ┗━━━━━━━━━━━━━━━━━━━━━━┛ ┃ ║
# ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║
# ╚═══════════════════════════════════════════════════════════════════════════════════════╝
#
# ╔══════════╗ ┏━━━━━━━━━━━┓ ┌─────────┐
# ║ instance ║ ┃ container ┃ │ process │ ──────interact──────▶ ─ ─ ─ ─invoke ─ ─ ─ ▶
# ╚══════════╝ ┗━━━━━━━━━━━┛ └─────────┘
#
# The instance is fronted by two AWS load balancers:
#
# 1) an application load balancer (ALB) that terminates SSL and forwards to the Gitlab web UI
#
# 2) an network load balancer that forwards port 22 to an SSH daemon in the Gitlab container (for git+ssh://) and
# port 2222 to an SSH daemon for shell access in RancherOS' `console` container.
#
# The instance itself does not have a public IP and is only reachable from the internet through the load balancers.
#
# The NLB's public IP is bound to ssh.gitlab.{dev,prod}.singlecell.gi.ucsc.edu
# The ALB's public IP is bound to gitlab.{dev,prod}.singlecell.gi.ucsc.edu
# To log into the instance run `ssh [email protected] -p 2222`. Your SSH key
# must be mentioned in public_key or other_public_keys below.
#
# The Gitlab web UI is at https://gitlab.{dev,prod}.singlecell.gi.ucsc.edu/.
# It's safe to destroy all resources in this TF config. You can always build them up again. The only golden egg is
# the EBS volume that's attached to the instance. See below under ebs_volume_name.
# RancherOS was chosen for the AMI because it has Docker pre installed and supports cloud-init user data.
#
# The container wiring is fairly complicated as it involves docker-in-docker. It is inspired by
#
# https://medium.com/@tonywooster/docker-in-docker-in-gitlab-runners-220caeb708ca
#
# In this setup the build container is not privileged while allowing for image layer caching between builds. The
# `elasticsearch` and `dynamodb-local` containers are included as examples of test fixtures launched during test
# setup. This aspect may evolve over time. It's worth noting that these fixture containers are siblings of the build
# container. When the tests are run locally or on Travis, the tests run on the host. The above diagram also glosses
# over the fact that there are multiple separate bridge networks involved. The `gitlab-dind` and `gitlab-runner`
# containers are attached to a separate bridge network. The `gitlab` container is on the default bridge network.
# IMPORTANT: There is a bug in the Terraform AWS provider (I think it's conflating the listeners) which causes one of
# the NLB listeners to be missing after `terraform apply`.
# The name of an EBS volume to attach to the instance. This EBS volume must exist and be formatted with ext4. We
# don't manage the volume in Terraform because that would require formatting it once after creation. That can only be
# one after attaching it to an EC2 instance but before mounting it. This turns out to be difficult and risks
# overwriting existing data on the volume. We'd also have to prevent the volume from being deleted during `terraform
# destroy`.
#
# If this EBS volume does not exist you must create it with the desired size before running Terraform. To then format
# the volume, you can then either attach it to some other Linux instance and format it there or use `make terraform`
# to create the actual Gitlab instance and attach the volume. For the latter you would need to ssh into the Gitlab
# instance, format `/dev/xvdf` (`/dev/nvme1n1` on newer instance types) and reboot the instance.
#
# The EBS volume should be backed up (EBS snapshot) periodically. Not only does it contain Gitlab's data but also its
# config.
#
ebs_volume_name = "azul-gitlab"
num_zones = 2 # An ALB needs at least two availability zones
# List of port forwardings by the network load balancer (NLB). The first element in the tuple is the port on the
# external interface of the NLB, the second element is the port on the instance the NLB forwards to.
#
nlb_ports = [(22, 2222, 'git'), (2222, 22, 'ssh')]
# The Azul Gitlab instance uses one VPC. This variable specifies the IPv4 address block to be used by that VPC.
#
# Be sure to avoid the default Docker address pool:
#
# https://github.com/docker/libnetwork/blob/a79d3687931697244b8e03485bf7b2042f8ec6b6/ipamutils/utils.go#L10
#
vpc_cidr = "192.168.127.12/16"
# The name of the SSH keypair whose public key is to be deposited on the instance by AWS
#
key_name = "<EMAIL>"
# The public key of that keypair
#
public_key = (
"ssh-rsa"
" "
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
" "
"<EMAIL>"
)
other_public_keys = [
(
"ssh-ed25519"
" "
"<KEY>"
" "
"<EMAIL>"
),
(
"ssh-rsa"
" "
"AAAAB3NzaC1yc2EAAAADAQABAAACAQDLz+TFlfqDmzkTnqEq4wK/yvZVGXDeezzzxaGfesEzXdJoST2br1cxvaImg3TkB"
"NEQam9vxBlQ6ZfyydskJpXyIADMt/YTr4gqMfqC0drqaX0MVRU2mpD+n+N83ADNqq5KJdTvfBW4yGCn/duXDKpYeNde92"
"/W6AM2gmEilIbgSkRR4b++p3cJ4Gnb81cQbNl87dZ4EKVgN1QClOQR/l24xuQyuUT5AiEzSsmHH1BHc3p8fhg7TZg77R9"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
" "
"<EMAIL>"
),
(
"ssh-rsa"
" "
"<KEY>"
"S+iR6M80WhZskivMg62xoLORnYrtj4ZDbVLCqnkphVHhhDXXl/Rdid9217+iIsRemiIk7qHfD3WG78WERGsTqKayjmzW4"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"oB17qteWRvuXV1tHo+EWJwf9nexOe+1YczMFYdFoKqqkjwG0BHLWlWOJQja+g5Q=="
" "
"<EMAIL>"
),
(
"ssh-rsa"
" "
"<KEY>zMU7JwjO7a/BvsWg0tXESgpL59i5QcycpYq6q"
"<KEY>"
"<KEY>
"<KEY>"
" "
"<EMAIL>"
),
(
"ssh-rsa"
" "
"<KEY>"
"nFUw2mJkMmWMwEAgoj39aeWEtdtvDG6o6eqQfLPTc+2Xh3E7NQBuODAnA5HY866DWmcgCzhgzepEMJD+tKUrmTU7IUQhu"
"eUjhuLebowCBGWt6mIBOhFnf8rc2KXzJVpTsRjxVYPm/m29HceCAvOqqHg1Ozk7p4wJv3lCFgdNDJaGEA8JgaiRYRkmyf"
"Ix42VFzAUM+3gkbV+d0a79Vf3NxF2vZSV10seA1vV8WnX1NOfSO6YnXJQfJ0YjtM562K+7813CNfYSxBZfhCCsa6g3eXs"
"pMYSb0KD9to+nyJ0kPbTInjdGwGAa3rMF9vfXagv+kwjaj9WQ+OqM2yKh5DjgSaJ5gDr+Ea51AzByOeM+sKqXJwU0EFVu"
"GxfBF6lpA87dgfr9Y9roT+TFu/Y184TS7GPBNGWX7zFh385LbJsPPYVRG5eCsvmcQWOl8C1vU6hLMfK8WSrj9waCzPe0x"
"SBjnfa3fOeLtJU8uSEx9M31i+ON6kWCU+6BLNTbDxrzdm2Nu3CQvUBYzy0nuOisDGYzE9MpTRyaIh0Uz+EpBhrzfZwxwZ"
"+sXCZlloWk419/JMgeSasO5KbKNU4RJYa462WuLNFvBCCZVpV+enz/8SS2TRKXQ=="
" "
"<EMAIL>"
)
]
# AWS accounts we trust enough to assume roles in
#
friend_accounts = {
861229788715: 'hca-dev',
109067257620: 'hca-prod',
122796619775: 'platform-hca-dev',
542754589326: 'platform-hca-prod'
}
ingress_egress_block = {
"cidr_blocks": None,
"ipv6_cidr_blocks": None,
"prefix_list_ids": None,
"from_port": None,
"protocol": None,
"security_groups": None,
"self": None,
"to_port": None,
"description": None,
}
@lru_cache(maxsize=1)
def iam() -> JSON:
with gzip.open(os.path.join(os.path.dirname(__file__), 'aws_service_model.json.gz'), 'rt') as f:
return json.load(f)
def aws_service_actions(service: str, types: Set[ServiceActionType] = None, is_global: bool = None) -> List[str]:
if types is None and is_global is None:
return [iam()['services'][service]['serviceName'] + ':*']
else:
actions = iam()['actions'][service]
return [name for name, action in actions.items()
if (types is None or ServiceActionType[action['type']] in types)
and (is_global is None or bool(action['resources']) == (not is_global))]
def aws_service_arns(service: str, *resource_names: str, **arn_fields: str) -> List[str]:
resources = iam()['resources'].get(service, {})
resource_names = set(resource_names)
all_names = resources.keys()
invalid_names = resource_names.difference(all_names)
assert not invalid_names, f"No such resource in {service}: {invalid_names}"
arn_fields = {
'Account': aws.account,
'Region': aws.region_name,
**arn_fields
}
arns = []
for arn_fields in explode_dict(arn_fields):
for name, arn in resources.items():
if not resource_names or name in resource_names:
arn = arn.replace('${', '{')
arn = arn.format_map(arn_fields)
arns.append(arn)
return arns
def subnet_name(public):
return 'public' if public else 'private'
def subnet_number(zone, public):
# Even numbers for private subnets, odd numbers for public subnets. The advantage of this numbering scheme is
# that it won't be perturbed by adding zones.
return 2 * zone + int(public)
# If the attachment of an instance to an NLB target group is by instance ID, the NLB preserves the source IP of
# ingress packets. For that to work, the security group protecting the instance must allow ingress from everywhere
# for the port being forwarded by the NLB. This should be ok because the instance is in a private subnet.
#
# If the attachment is by IP, the source IP is rewritten to be that of the load balancer's internal interface. The
# security group can be restricted to the internal subnet but the original source IP is lost and can't be used for
# logging and the like.
#
nlb_preserve_source_ip = True
def merge(sets: Iterable[Iterable[str]]) -> Iterable[str]:
return sorted(set(chain(*sets)))
def allow_global_actions(service, types: Set[ServiceActionType] = None) -> JSON:
return {
"actions": aws_service_actions(service, types=types, is_global=True),
"resources": ["*"]
}
def allow_service(service: str,
*resource_names: str,
action_types: Set[ServiceActionType] = None,
global_action_types: Set[ServiceActionType] = None,
**arn_fields: Union[str, List[str], Set[str], Tuple[str, ...]]) -> List[JSON]:
if global_action_types is None:
global_action_types = action_types
return remove_inconsequential_statements([
allow_global_actions(service, types=global_action_types),
{
"actions": aws_service_actions(service, types=action_types),
"resources": aws_service_arns(service, *resource_names, **arn_fields)
}
])
def remove_inconsequential_statements(statements: List[JSON]) -> List[JSON]:
return [s for s in statements if s['actions'] and s['resources']]
dss_direct_access_policy_statement = {
"actions": [
"sts:AssumeRole",
],
"resources": [
f"arn:aws:iam::{account}:role/azul-*"
for account in friend_accounts.keys()
]
}
emit_tf({} if config.terraform_component != 'gitlab' else {
"data": {
"aws_availability_zones": {
"available": {}
},
"aws_ebs_volume": {
"gitlab": {
"filter": [
{
"name": "volume-type",
"values": ["gp2"]
},
{
"name": "tag:Name",
"values": [ebs_volume_name]
}
],
"most_recent": True
}
},
# This Route53 zone also has to exist.
"aws_route53_zone": {
"gitlab": {
"name": config.domain_name + ".",
"private_zone": False
}
},
"aws_ami": {
"rancheros": {
"owners": ['605812595337'],
"filter": [
{
"name": "name",
"values": ["rancheros-v1.4.2-hvm-1"]
}
]
}
},
"aws_iam_policy_document": {
# This policy is really close to the policy size limit, if you get LimitExceeded: Cannot exceed quota for
# PolicySize: 6144, you need to strip the existing policy down by essentially replacing the calls to the
# helper functions like allow_service() with a hand-curated list of actions, potentially by starting from
# a copy of the template output.
"gitlab_boundary": {
"statement": [
allow_global_actions('S3', types={ServiceActionType.read, ServiceActionType.list}),
{
"actions": aws_service_actions('S3'),
"resources": merge(
aws_service_arns('S3', BucketName=bucket_name, ObjectName='*')
for bucket_name in (
[
'edu-ucsc-gi-singlecell-azul-*',
'*.url.singlecell.gi.ucsc.edu',
'url.singlecell.gi.ucsc.edu'
] if 'singlecell' in config.domain_name else [
'edu-ucsc-gi-azul-*',
'*.azul.data.humancellatlas.org',
]
)
)
},
*allow_service('KMS',
action_types={ServiceActionType.read, ServiceActionType.list},
KeyId='*',
Alias='*'),
*allow_service('SQS',
QueueName='azul-*'),
# API Gateway ARNs refer to APIs by ID so we cannot restrict to name or prefix
*allow_service('API Gateway',
ApiGatewayResourcePath="*"),
*allow_service('Elasticsearch Service',
global_action_types={ServiceActionType.read, ServiceActionType.list},
DomainName="azul-*"),
{
'actions': ['es:ListTags'],
'resources': aws_service_arns('Elasticsearch Service', DomainName='*')
},
*allow_service('STS',
action_types={ServiceActionType.read, ServiceActionType.list},
RelativeId='*',
RoleNameWithPath='*',
UserNameWithPath='*'),
dss_direct_access_policy_statement,
*allow_service('Certificate Manager',
# ACM ARNs refer to certificates by ID so we cannot restrict to name or prefix
CertificateId='*',
# API Gateway certs must reside in us-east-1, so we'll always add that region
Region={aws.region_name, 'us-east-1'}),
*allow_service('DynamoDB',
'table',
'index',
global_action_types={ServiceActionType.list, ServiceActionType.read},
TableName='azul-*',
IndexName='*'),
# Lambda ARNs refer to event source mappings by UUID so we cannot restrict to name or prefix
*allow_service('Lambda',
LayerName="azul-*",
FunctionName='azul-*',
UUID='*',
LayerVersion='*'),
# CloudWatch does not describe any resource-level permissions
{
"actions": ["cloudwatch:*"],
"resources": ["*"]
},
*allow_service('CloudWatch Events',
global_action_types={ServiceActionType.list, ServiceActionType.read},
RuleName='azul-*'),
# Route 53 ARNs refer to resources by ID so we cannot restrict to name or prefix
# FIXME: this is obviously problematic
{
"actions": ["route53:*"],
"resources": ["*"]
},
# Secret Manager ARNs refer to secrets by UUID so we cannot restrict to name or prefix
# FIXME: this is obviously problematic
*allow_service('Secrets Manager', SecretId='*'),
{
"actions": ['ssm:GetParameter'],
"resources": aws_service_arns('Systems Manager',
'parameter',
FullyQualifiedParameterName='dcp/dss/*')
},
{
"actions": [
"states:*"
],
"resources": aws_service_arns('Step Functions',
'execution',
'statemachine',
StateMachineName='azul-*',
ExecutionId='*')
},
{
"actions": [
"states:ListStateMachines",
"states:CreateStateMachine"
],
"resources": [
"*"
]
},
# CloudFront does not define any ARNs. We need it for friendly domain names for API Gateways
{
"actions": ["cloudfront:*"],
"resources": ["*"]
},
allow_global_actions('CloudWatch Logs'),
{
"actions": aws_service_actions('CloudWatch Logs',
types={ServiceActionType.list}),
"resources": aws_service_arns('CloudWatch Logs',
LogGroupName='*',
LogStream='*',
LogStreamName='*')
},
{
"actions": aws_service_actions('CloudWatch Logs'),
"resources": merge(aws_service_arns('CloudWatch Logs',
LogGroupName=log_group_name,
LogStream='*',
LogStreamName='*')
for log_group_name in ['/aws/apigateway/azul-*',
'/aws/lambda/azul-*',
'/aws/aes/domains/azul-*'])
}
]
},
"gitlab_iam": {
"statement": [
# Let Gitlab manage roles as long as they specify the permissions boundary
# This prevent privilege escalation.
{
"actions": [
"iam:CreateRole",
"iam:TagRole",
"iam:PutRolePolicy",
"iam:DeleteRolePolicy",
"iam:AttachRolePolicy",
"iam:DetachRolePolicy",
"iam:PutRolePermissionsBoundary"
],
"resources": aws_service_arns('IAM', 'role', RoleNameWithPath='azul-*'),
"condition": {
"test": "StringEquals",
"variable": "iam:PermissionsBoundary",
"values": [aws.permissions_boundary_arn]
}
},
dss_direct_access_policy_statement,
{
"actions": [
"iam:UpdateAssumeRolePolicy",
"iam:TagRole",
"iam:DeleteRole",
"iam:PassRole" # FIXME: consider iam:PassedToService condition
],
"resources": aws_service_arns('IAM', 'role', RoleNameWithPath='azul-*')
},
{
"actions": aws_service_actions('IAM', types={ServiceActionType.read, ServiceActionType.list}),
"resources": ["*"]
},
*(
# Permissions required to deploy Data Browser and Portal
[
{
"actions": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket",
"s3:DeleteObject",
"s3:PutObjectAcl"
],
"resources": [
"arn:aws:s3:::dev.singlecell.gi.ucsc.edu/*",
"arn:aws:s3:::dev.explore.singlecell.gi.ucsc.edu/*",
"arn:aws:s3:::dev.explore.lungmap.net/*",
"arn:aws:s3:::dev.explore.singlecell.gi.ucsc.edu",
"arn:aws:s3:::dev.singlecell.gi.ucsc.edu",
"arn:aws:s3:::dev.explore.lungmap.net"
]
},
{
"actions": [
"cloudfront:CreateInvalidation"
],
"resources": [
"arn:aws:cloudfront::122796619775:distribution/E3562WJBOLN8W8",
"arn:aws:cloudfront::122796619775:distribution/E21CJFOUWO9Q7X"
]
}
] if config.domain_name == 'dev.singlecell.gi.ucsc.edu' else [
{
"actions": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket",
"s3:DeleteObject",
"s3:PutObjectAcl"
],
"resources": [
"arn:aws:s3:::org-humancellatlas-data-portal-dcp2-prod/*",
"arn:aws:s3:::org-humancellatlas-data-browser-dcp2-prod/*",
"arn:aws:s3:::org-humancellatlas-data-browser-dcp2-prod",
"arn:aws:s3:::org-humancellatlas-data-portal-dcp2-prod"
]
},
{
"actions": [
"cloudfront:CreateInvalidation"
],
"resources": [
"arn:aws:cloudfront::122796619775:distribution/E1LYQC3LZXO7M3"
]
}
] if config.domain_name == 'azul.data.humancellatlas.org' else [
]
)
]
}
},
},
"resource": {
"aws_vpc": {
"gitlab": {
"cidr_block": vpc_cidr,
"tags": {
"Name": "azul-gitlab"
}
}
},
"aws_subnet": { # a public and a private subnet per availability zone
f"gitlab_{subnet_name(public)}_{zone}": {
"availability_zone": f"${{data.aws_availability_zones.available.names[{zone}]}}",
"cidr_block": f"${{cidrsubnet(aws_vpc.gitlab.cidr_block, 8, {subnet_number(zone, public)})}}",
"map_public_ip_on_launch": public,
"vpc_id": "${aws_vpc.gitlab.id}",
"tags": {
"Name": f"azul-gitlab-{subnet_name(public)}-{subnet_number(zone, public)}"
}
} for public in (False, True) for zone in range(num_zones)
},
"aws_internet_gateway": {
"gitlab": {
"vpc_id": "${aws_vpc.gitlab.id}",
"tags": {
"Name": "azul-gitlab"
}
}
},
"aws_route": {
"gitlab": {
"destination_cidr_block": "0.0.0.0/0",
"gateway_id": "${aws_internet_gateway.gitlab.id}",
"route_table_id": "${aws_vpc.gitlab.main_route_table_id}"
}
},
"aws_eip": {
f"gitlab_{zone}": {
"depends_on": [
"aws_internet_gateway.gitlab"
],
"vpc": True,
"tags": {
"Name": f"azul-gitlab-{zone}"
}
} for zone in range(num_zones)
},
"aws_nat_gateway": {
f"gitlab_{zone}": {
"allocation_id": f"${{aws_eip.gitlab_{zone}.id}}",
"subnet_id": f"${{aws_subnet.gitlab_public_{zone}.id}}",
"tags": {
"Name": f"azul-gitlab-{zone}"
}
} for zone in range(num_zones)
},
"aws_route_table": {
f"gitlab_{zone}": {
"route": [
{
"cidr_block": "0.0.0.0/0",
"nat_gateway_id": f"${{aws_nat_gateway.gitlab_{zone}.id}}",
"egress_only_gateway_id": None,
"gateway_id": None,
"instance_id": None,
"ipv6_cidr_block": None,
"network_interface_id": None,
"transit_gateway_id": None,
"vpc_peering_connection_id": None
}
],
"vpc_id": "${aws_vpc.gitlab.id}",
"tags": {
"Name": f"azul-gitlab-{zone}"
}
} for zone in range(num_zones)
},
"aws_route_table_association": {
f"gitlab_{zone}": {
"route_table_id": f"${{aws_route_table.gitlab_{zone}.id}}",
"subnet_id": f"${{aws_subnet.gitlab_private_{zone}.id}}"
} for zone in range(num_zones)
},
"aws_security_group": {
"gitlab_alb": {
"name": "azul-gitlab-alb",
"vpc_id": "${aws_vpc.gitlab.id}",
"egress": [
{
**ingress_egress_block,
"cidr_blocks": ["0.0.0.0/0"],
"protocol": -1,
"from_port": 0,
"to_port": 0
}
],
"ingress": [
{
**ingress_egress_block,
"cidr_blocks": ["0.0.0.0/0"],
"protocol": "tcp",
"from_port": 443,
"to_port": 443
},
*({
**ingress_egress_block,
"cidr_blocks": ["0.0.0.0/0"],
"protocol": "tcp",
"from_port": ext_port,
"to_port": ext_port
} for ext_port, int_port, name in nlb_ports)
]
},
"gitlab": {
"name": "azul-gitlab",
"vpc_id": "${aws_vpc.gitlab.id}",
"egress": [
{
**ingress_egress_block,
"cidr_blocks": ["0.0.0.0/0"],
"protocol": -1,
"from_port": 0,
"to_port": 0
}
],
"ingress": [
{
**ingress_egress_block,
"from_port": 80,
"protocol": "tcp",
"security_groups": [
"${aws_security_group.gitlab_alb.id}"
],
"to_port": 80,
},
*({
**ingress_egress_block,
"cidr_blocks": [
"0.0.0.0/0" if nlb_preserve_source_ip else "${aws_vpc.gitlab.cidr_block}"
],
"protocol": "tcp",
"from_port": int_port,
"to_port": int_port
} for ext_port, int_port, name in nlb_ports)
]
}
},
"aws_lb": {
"gitlab_nlb": {
"name": "azul-gitlab-nlb",
"load_balancer_type": "network",
"subnets": [
f"${{aws_subnet.gitlab_public_{zone}.id}}" for zone in range(num_zones)
],
"tags": {
"Name": "azul-gitlab"
}
},
"gitlab_alb": {
"name": "azul-gitlab-alb",
"load_balancer_type": "application",
"subnets": [
f"${{aws_subnet.gitlab_public_{zone}.id}}" for zone in range(num_zones)
],
"security_groups": [
"${aws_security_group.gitlab_alb.id}"
],
"tags": {
"Name": "azul-gitlab"
}
}
},
"aws_lb_listener": {
**({
"gitlab_" + name: {
"port": ext_port,
"protocol": "TCP",
"default_action": [
{
"target_group_arn": "${aws_lb_target_group.gitlab_" + name + ".id}",
"type": "forward"
}
],
"load_balancer_arn": "${aws_lb.gitlab_nlb.id}"
} for ext_port, int_port, name in nlb_ports
}),
"gitlab_http": {
"port": 443,
"protocol": "HTTPS",
"ssl_policy": "ELBSecurityPolicy-2016-08",
"certificate_arn": "${aws_acm_certificate.gitlab.arn}",
"default_action": [
{
"target_group_arn": "${aws_lb_target_group.gitlab_http.id}",
"type": "forward"
}
],
"load_balancer_arn": "${aws_lb.gitlab_alb.id}"
}
},
"aws_lb_target_group": {
**({
"gitlab_" + name: {
"name": "azul-gitlab-" + name,
"port": int_port,
"protocol": "TCP",
"target_type": "instance" if nlb_preserve_source_ip else "ip",
"stickiness": {
"type": "lb_cookie",
"enabled": False
},
"vpc_id": "${aws_vpc.gitlab.id}"
} for ext_port, int_port, name in nlb_ports
}),
"gitlab_http": {
"name": "azul-gitlab-http",
"port": 80,
"protocol": "HTTP",
"target_type": "instance",
"stickiness": {
"type": "lb_cookie",
"enabled": False
},
"vpc_id": "${aws_vpc.gitlab.id}",
"health_check": {
"protocol": "HTTP",
"path": "/",
"port": "traffic-port",
"healthy_threshold": 5,
"unhealthy_threshold": 2,
"timeout": 5,
"interval": 30,
"matcher": "302"
},
"tags": {
"Name": "azul-gitlab-http"
}
}
},
"aws_lb_target_group_attachment": {
**({
"gitlab_" + name: {
"target_group_arn": "${aws_lb_target_group.gitlab_" + name + ".arn}",
"target_id": f"${{aws_instance.gitlab.{'id' if nlb_preserve_source_ip else 'private_ip'}}}"
} for ext_port, int_port, name in nlb_ports
}),
"gitlab_http": {
"target_group_arn": "${aws_lb_target_group.gitlab_http.arn}",
"target_id": "${aws_instance.gitlab.id}"
}
},
"aws_acm_certificate": {
"gitlab": {
"domain_name": "${aws_route53_record.gitlab.name}",
"subject_alternative_names": ["${aws_route53_record.gitlab_docker.name}"],
"validation_method": "DNS",
"tags": {
"Name": "azul-gitlab"
},
"lifecycle": {
"create_before_destroy": True
}
}
},
"aws_acm_certificate_validation": {
"gitlab": {
"certificate_arn": "${aws_acm_certificate.gitlab.arn}",
"validation_record_fqdns": [
"${aws_route53_record.gitlab_validation.fqdn}",
"${aws_route53_record.gitlab_validation_docker.fqdn}"
],
}
},
"aws_route53_record": {
**dict_merge(
{
departition('gitlab_validation', '_', subdomain): {
"name": f"${{aws_acm_certificate.gitlab.domain_validation_options.{i}.resource_record_name}}",
"type": f"${{aws_acm_certificate.gitlab.domain_validation_options.{i}.resource_record_type}}",
"zone_id": "${data.aws_route53_zone.gitlab.id}",
"records": [
f"${{aws_acm_certificate.gitlab.domain_validation_options.{i}.resource_record_value}}"],
"ttl": 60
},
departition('gitlab', '_', subdomain): {
"zone_id": "${data.aws_route53_zone.gitlab.id}",
"name": departition(subdomain, '.', f"gitlab.{config.domain_name}"),
"type": "A",
"alias": {
"name": "${aws_lb.gitlab_alb.dns_name}",
"zone_id": "${aws_lb.gitlab_alb.zone_id}",
"evaluate_target_health": False
}
}
} for i, subdomain in enumerate([None, 'docker'])),
"gitlab_ssh": {
"zone_id": "${data.aws_route53_zone.gitlab.id}",
"name": f"ssh.gitlab.{config.domain_name}",
"type": "A",
"alias": {
"name": "${aws_lb.gitlab_nlb.dns_name}",
"zone_id": "${aws_lb.gitlab_nlb.zone_id}",
"evaluate_target_health": False
}
}
},
"aws_network_interface": {
"gitlab": {
"subnet_id": "${aws_subnet.gitlab_private_0.id}",
"security_groups": [
"${aws_security_group.gitlab.id}"
],
"tags": {
"Name": "azul-gitlab"
}
}
},
"aws_volume_attachment": {
"gitlab": {
"device_name": "/dev/sdf",
"volume_id": "${data.aws_ebs_volume.gitlab.id}",
"instance_id": "${aws_instance.gitlab.id}",
"provisioner": {
"local-exec": {
"when": "destroy",
"command": "aws ec2 stop-instances --instance-ids ${self.instance_id}"
" && aws ec2 wait instance-stopped --instance-ids ${self.instance_id}"
}
}
}
},
"aws_key_pair": {
"gitlab": {
"key_name": "azul-gitlab",
"public_key": public_key
}
},
"aws_iam_role": {
"gitlab": {
"name": "azul-gitlab",
"path": "/",
"assume_role_policy": json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
})
}
},
"aws_iam_instance_profile": {
"gitlab": {
"name": "azul-gitlab",
"role": "${aws_iam_role.gitlab.name}",
}
},
"aws_iam_policy": {
"gitlab_iam": {
"name": "azul-gitlab-iam",
"path": "/",
"policy": "${data.aws_iam_policy_document.gitlab_iam.json}"
},
"gitlab_boundary": {
"name": config.permissions_boundary_name,
"path": "/",
"policy": "${data.aws_iam_policy_document.gitlab_boundary.json}"
}
},
"aws_iam_role_policy_attachment": {
"gitlab_iam": {
"role": "${aws_iam_role.gitlab.name}",
"policy_arn": "${aws_iam_policy.gitlab_iam.arn}"
},
# Since we are using the boundary as a policy Gitlab can explicitly
# do everything within the boundary
"gitlab_boundary": {
"role": "${aws_iam_role.gitlab.name}",
"policy_arn": "${aws_iam_policy.gitlab_boundary.arn}"
}
},
"google_service_account": {
"gitlab": {
"project": "${local.google_project}",
"account_id": name,
"display_name": name,
}
for name in [
"azul-gitlab"
]
},
"google_project_iam_member": {
"gitlab_" + name: {
"project": "${local.google_project}",
"role": role,
"member": "serviceAccount:${google_service_account.gitlab.email}"
}
for name, role in [
("write", "${google_project_iam_custom_role.gitlab.id}"),
("read", "roles/viewer")
]
},
"google_project_iam_custom_role": {
"gitlab": {
"role_id": "azul_gitlab",
"title": "azul_gitlab",
"permissions": [
"resourcemanager.projects.setIamPolicy",
*[
f"iam.{resource}.{operation}"
for operation in ("create", "delete", "get", "list", "update", "undelete")
for resource in ("roles", "serviceAccountKeys", "serviceAccounts")
if resource != "serviceAccountKeys" or operation not in ("update", "undelete")
]
]
}
},
"aws_instance": {
"gitlab": {
"iam_instance_profile": "${aws_iam_instance_profile.gitlab.name}",
"ami": "${data.aws_ami.rancheros.id}",
"instance_type": "t3a.xlarge",
"key_name": "${aws_key_pair.gitlab.key_name}",
"network_interface": {
"network_interface_id": "${aws_network_interface.gitlab.id}",
"device_index": 0
},
"user_data": dedent(rf"""
#cloud-config
mounts:
- ["/dev/nvme1n1", "/mnt/gitlab", "ext4", ""]
rancher:
ssh_authorized_keys: {other_public_keys if config.deployment_stage == 'dev' else []}
write_files:
- path: /etc/rc.local
permissions: "0755"
owner: root
content: |
#!/bin/bash
wait-for-docker
docker network \
create gitlab-runner-net
docker run \
--detach \
--name gitlab-dind \
--privileged \
--restart always \
--network gitlab-runner-net \
--volume /mnt/gitlab/docker:/var/lib/docker \
--volume /mnt/gitlab/runner/config:/etc/gitlab-runner \
docker:18.03.1-ce-dind
docker run \
--detach \
--name gitlab \
--hostname ${{aws_route53_record.gitlab.name}} \
--publish 80:80 \
--publish 2222:22 \
--restart always \
--volume /mnt/gitlab/config:/etc/gitlab \
--volume /mnt/gitlab/logs:/var/log/gitlab \
--volume /mnt/gitlab/data:/var/opt/gitlab \
gitlab/gitlab-ce:13.10.0-ce.0
docker run \
--detach \
--name gitlab-runner \
--restart always \
--volume /mnt/gitlab/runner/config:/etc/gitlab-runner \
--network gitlab-runner-net \
--env DOCKER_HOST=tcp://gitlab-dind:2375 \
gitlab/gitlab-runner:v13.10.0
"""[1:]), # trim newline char at the beginning as dedent() only removes indent common to all lines
"tags": {
"Name": "azul-gitlab",
"Owner": config.owner
}
}
}
}
})
```
#### File: test/service/test_async_manifest_service.py
```python
import datetime
import json
from typing import (
Optional,
)
from unittest import (
mock,
)
import unittest.result
from botocore.exceptions import (
ClientError,
)
from furl import (
furl,
)
from moto import (
mock_sts,
)
import requests
from app_test_case import (
LocalAppTestCase,
)
from azul import (
config,
)
from azul.logging import (
configure_test_logging,
)
from azul.modules import (
load_app_module,
)
from azul.service.async_manifest_service import (
AsyncManifestService,
)
from azul.service.manifest_service import (
Manifest,
ManifestFormat,
)
from azul.service.step_function_helper import (
StateMachineError,
StepFunctionHelper,
)
from azul_test_case import (
AzulUnitTestCase,
)
from retorts import (
ResponsesHelper,
)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging()
patch_step_function_helper = mock.patch('azul.service.async_manifest_service.AsyncManifestService.step_function_helper')
state_machine_name = 'foo'
class TestAsyncManifestService(AzulUnitTestCase):
def test_token_encoding_invertibility(self):
"""
Parameter encoding and decoding functions should be inverse of each other
"""
uuid = {"execution_id": "6c9dfa3f-e92e-11e8-9764-ada973595c11"}
self.assertEqual(uuid, AsyncManifestService.decode_token(AsyncManifestService.encode_token(uuid)))
def test_token_validation(self):
token = {'no': 'id'}
self.assertRaises(ValueError, AsyncManifestService.decode_token, AsyncManifestService.encode_token(token))
# @mock_sts is required for tests calling the arn helper methods in StepFunctionHelper
# because they require an account id
@mock_sts
@patch_step_function_helper
def test_manifest_status_success(self, step_function_helper):
"""
A successful manifest job should return a 302 status and a url to the manifest
"""
manifest_url = 'https://url.to.manifest'
execution_id = '5b1b4899-f48e-46db-9285-2d342f3cdaf2'
execution_success_output = {
'executionArn': StepFunctionHelper().execution_arn(state_machine_name, execution_id),
'stateMachineArn': StepFunctionHelper().state_machine_arn(state_machine_name),
'name': execution_id,
'status': 'SUCCEEDED',
'startDate': datetime.datetime(2018, 11, 15, 18, 30, 44, 896000),
'stopDate': datetime.datetime(2018, 11, 15, 18, 30, 59, 295000),
'input': '{"filters": {}}',
'output': json.dumps(
{
'location': manifest_url,
'was_cached': False,
'properties': {}
}
)
}
step_function_helper.describe_execution.return_value = execution_success_output
manifest_service = AsyncManifestService(state_machine_name)
token = manifest_service.encode_token({'execution_id': execution_id})
format_ = ManifestFormat.compact
filters = manifest_service.parse_filters('{}')
wait_time, manifest = manifest_service.start_or_inspect_manifest_generation(self_url='',
format_=format_,
catalog=self.catalog,
filters=filters,
token=token)
self.assertEqual(type(wait_time), int)
self.assertEqual(wait_time, 0)
expected_obj = Manifest(location=manifest_url,
was_cached=False,
properties={})
self.assertEqual(expected_obj, manifest)
@mock_sts
@patch_step_function_helper
def test_manifest_status_running(self, step_function_helper):
"""
A running manifest job should return a 301 status and a url to retry checking the job status
"""
execution_id = 'd4ee1bed-0bd7-4c11-9c86-372e07801536'
execution_running_output = {
'executionArn': StepFunctionHelper().execution_arn(state_machine_name, execution_id),
'stateMachineArn': StepFunctionHelper().state_machine_arn(state_machine_name),
'name': execution_id,
'status': 'RUNNING',
'startDate': datetime.datetime(2018, 11, 15, 18, 30, 44, 896000),
'input': '{"filters": {}}'
}
step_function_helper.describe_execution.return_value = execution_running_output
manifest_service = AsyncManifestService(state_machine_name)
token = manifest_service.encode_token({'execution_id': execution_id})
retry_url = config.service_endpoint() + '/manifest/files'
format_ = ManifestFormat.compact
filters = manifest_service.parse_filters('{}')
wait_time, manifest = manifest_service.start_or_inspect_manifest_generation(self_url=retry_url,
format_=format_,
catalog=self.catalog,
filters=filters,
token=token)
self.assertEqual(type(wait_time), int)
self.assertEqual(wait_time, 1)
expected_token = manifest_service.encode_token({'execution_id': execution_id, 'request_index': 1})
location = furl(retry_url, args={'token': expected_token})
expected_obj = Manifest(location=location.url,
was_cached=False,
properties={})
self.assertEqual(expected_obj, manifest)
@mock_sts
@patch_step_function_helper
def test_manifest_status_failed(self, step_function_helper):
"""
A failed manifest job should raise a StateMachineError
"""
execution_id = '068579b6-9d7b-4e19-ac4e-77626851be1c'
execution_failed_output = {
'executionArn': StepFunctionHelper().execution_arn(state_machine_name, execution_id),
'stateMachineArn': StepFunctionHelper().state_machine_arn(state_machine_name),
'name': execution_id,
'status': 'FAILED',
'startDate': datetime.datetime(2018, 11, 14, 16, 6, 53, 382000),
'stopDate': datetime.datetime(2018, 11, 14, 16, 6, 55, 860000),
'input': '{"filters": {"organ": {"is": ["lymph node"]}}}',
}
step_function_helper.describe_execution.return_value = execution_failed_output
manifest_service = AsyncManifestService(state_machine_name)
token = manifest_service.encode_token({'execution_id': execution_id})
format_ = ManifestFormat.compact
filters = manifest_service.parse_filters('{}')
self.assertRaises(StateMachineError,
manifest_service.start_or_inspect_manifest_generation, '', format_, filters, token)
class TestAsyncManifestServiceEndpoints(LocalAppTestCase):
def run(self, result: Optional[unittest.result.TestResult] = None) -> Optional[unittest.result.TestResult]:
# Suppress generate manifests functionality to prevent false assertion positives
with mock.patch('azul.service.manifest_service.ManifestService.__init__') as __init__:
__init__.return_value = None
with mock.patch('azul.service.manifest_service.ManifestService.get_cached_manifest') as get_cached_manifest:
get_cached_manifest.return_value = None, None
return super().run(result)
@classmethod
def lambda_name(cls) -> str:
return 'service'
patch_current_request = mock.patch('lambdas.service.app.app.current_request')
@mock_sts
@patch_step_function_helper
@mock.patch('uuid.uuid4')
def test_manifest_endpoint_start_execution(self, mock_uuid, step_function_helper):
"""
Calling start manifest generation without a token should start an
execution and return a response with Retry-After and Location in the
headers.
"""
with ResponsesHelper() as helper:
helper.add_passthru(self.base_url)
for fetch in True, False:
with self.subTest(fetch=fetch):
execution_name = '6c9dfa3f-e92e-11e8-9764-ada973595c11'
mock_uuid.return_value = execution_name
step_function_helper.describe_execution.return_value = {'status': 'RUNNING'}
format_ = ManifestFormat.compact.value
filters = {'organ': {'is': ['lymph node']}}
params = {
'catalog': self.catalog,
'filters': json.dumps(filters),
'format': format_
}
if fetch:
response = requests.get(self.base_url + '/fetch/manifest/files',
params=params)
response.raise_for_status()
response = response.json()
else:
response = requests.get(self.base_url + '/manifest/files',
params=params,
allow_redirects=False)
self.assertEqual(301, response['Status'] if fetch else response.status_code)
self.assertIn('Retry-After', response if fetch else response.headers)
self.assertIn('Location', response if fetch else response.headers)
service = load_app_module('service')
step_function_helper.start_execution.assert_called_once_with(
# Since this is a LocalAppTestCase, we need the actual
# state machine name
config.state_machine_name(service.generate_manifest.lambda_name),
execution_name,
execution_input=dict(catalog=self.catalog,
format=format_,
filters=filters,
object_key=None)
)
step_function_helper.describe_execution.assert_called_once()
step_function_helper.reset_mock()
@patch_step_function_helper
def test_manifest_endpoint_check_status(self, step_function_helper):
"""
Calling start manifest generation with a token should check the status
without starting an execution.
"""
params = {
'token': '<KEY>
}
step_function_helper.describe_execution.return_value = {'status': 'RUNNING'}
response = requests.get(self.base_url + '/fetch/manifest/files', params=params)
response.raise_for_status()
step_function_helper.start_execution.assert_not_called()
step_function_helper.describe_execution.assert_called_once()
@patch_step_function_helper
def test_manifest_endpoint_execution_not_found(self, step_function_helper):
"""
Manifest status check should raise a BadRequestError (400 status code)
if execution cannot be found.
"""
params = {
'token': '<KEY>
}
step_function_helper.describe_execution.side_effect = ClientError({
'Error': {
'Code': 'ExecutionDoesNotExist'
}
}, '')
response = requests.get(self.base_url + '/fetch/manifest/files', params=params)
self.assertEqual(response.status_code, 400)
@patch_step_function_helper
@patch_current_request
def test_manifest_endpoint_boto_error(self, _current_request, step_function_helper):
"""
Manifest status check should reraise any ClientError that is not caused by ExecutionDoesNotExist
"""
params = {
'token': '<KEY>
}
step_function_helper.describe_execution.side_effect = ClientError({
'Error': {
'Code': 'OtherError'
}
}, '')
response = requests.get(self.base_url + '/fetch/manifest/files', params=params)
self.assertEqual(response.status_code, 500)
@patch_step_function_helper
@patch_current_request
def test_manifest_endpoint_execution_error(self, _current_request, step_function_helper):
"""
Manifest status check should return a generic error (500 status code)
if the execution errored.
"""
params = {
'token': '<KEY>
}
step_function_helper.get_manifest_status.side_effect = StateMachineError
response = requests.get(self.base_url + '/fetch/manifest/files', params=params)
self.assertEqual(response.status_code, 500)
@patch_current_request
def test_manifest_endpoint_invalid_token(self, _current_request):
"""
Manifest endpoint should raise a BadRequestError when given a token that cannot be decoded
"""
params = {'token': 'Invalid base64'}
response = requests.get(self.base_url + '/fetch/manifest/files', params=params)
self.assertEqual(response.status_code, 400)
```
#### File: test/service/test_collection_data_access.py
```python
import json
from unittest import (
TestCase,
skipIf,
)
import responses
from azul import (
config,
)
from azul.service.collection_data_access import (
ClientError,
CollectionDataAccess,
CreationError,
RetrievalError,
ServerTimeoutError,
UnauthorizedClientAccessError,
UpdateError,
)
from retorts import (
ResponsesHelper,
)
@skipIf(config.dss_endpoint is None,
'DSS endpoint is not configured')
class CollectionDataAccessTestCase(TestCase):
def setUp(self):
fake_access_token = 'fake_access_<PASSWORD>'
self.cda = CollectionDataAccess(fake_access_token)
@responses.activate
def test_get_ok(self):
test_collection_uuid = 'abcdef123456'
test_collection_version = '1980-01-01'
fake_collection = {'hello': 'world'}
with ResponsesHelper() as helper:
helper.add(responses.Response(responses.GET,
self.cda.endpoint_url('collections', test_collection_uuid),
json=fake_collection))
collection = self.cda.get(test_collection_uuid, test_collection_version)
self.assertEqual(collection,
dict(uuid=test_collection_uuid,
version=test_collection_version,
collection=fake_collection))
@responses.activate
def test_get_raises_retrival_error(self):
test_collection_uuid = 'abcdef123456'
test_collection_version = '1980-01-01'
with ResponsesHelper() as helper:
helper.add(responses.CallbackResponse(responses.GET,
self.cda.endpoint_url('collections', test_collection_uuid),
callback=RequestCallback(567, '{}'),
content_type='application/json'))
with self.assertRaises(RetrievalError):
self.cda.get(test_collection_uuid, test_collection_version)
@responses.activate
def test_create_ok(self):
test_collection_uuid = 'abcdef123456'
test_collection_version = '1980-01-01'
expected_collection = dict(uuid=test_collection_uuid, version=test_collection_version)
with ResponsesHelper() as helper:
helper.add(responses.CallbackResponse(responses.PUT,
self.cda.endpoint_url('collections'),
callback=RequestCallback(201, json.dumps(expected_collection)),
content_type='application/json'))
collection = self.cda.create(test_collection_uuid, 'foo bar', 'bar', test_collection_version, [])
self.assertEqual(collection, expected_collection)
@responses.activate
def test_create_raises_creation_error(self):
test_collection_uuid = '<KEY>6'
test_collection_version = '1980-01-01'
fake_dss_response = {"code": "unknown"}
with ResponsesHelper() as helper:
helper.add(responses.CallbackResponse(responses.PUT,
self.cda.endpoint_url('collections'),
callback=RequestCallback(500, json.dumps(fake_dss_response)),
content_type='application/json'))
with self.assertRaises(CreationError):
self.cda.create(test_collection_uuid, 'foo bar', 'bar', test_collection_version, [])
@responses.activate
def test_append_with_no_items_successful(self):
test_collection_uuid = '<KEY>6'
test_collection_version = '1980-01-01'
expected_collection = dict(uuid=test_collection_uuid, version=test_collection_version)
with ResponsesHelper() as helper:
helper.add(responses.CallbackResponse(responses.PATCH,
self.cda.endpoint_url('collections', test_collection_uuid),
callback=RequestCallback(200, json.dumps(expected_collection)),
content_type='application/json'))
collection = self.cda.append(test_collection_uuid, test_collection_version, [])
self.assertEqual(collection, expected_collection)
@responses.activate
def test_append_with_some_items_successful(self):
test_collection_uuid = '<KEY>6'
test_collection_version = '1980-01-01'
expected_collection = dict(uuid=test_collection_uuid, version=test_collection_version)
with ResponsesHelper() as helper:
helper.add(responses.CallbackResponse(responses.PATCH,
self.cda.endpoint_url('collections', test_collection_uuid),
callback=RequestCallback(200, json.dumps(expected_collection)),
content_type='application/json'))
collection = self.cda.append(test_collection_uuid,
test_collection_version,
[dict(type='foo_1', uuid='bar_1', version='baz_1'),
dict(type='foo_2', uuid='bar_2', version='baz_2'),
dict(type='foo_n', uuid='bar_n', version='baz_n')])
self.assertEqual(collection, expected_collection)
@responses.activate
def test_append_raises_update_error(self):
test_collection_uuid = 'abcdef123456'
test_collection_version = '1980-01-01'
with ResponsesHelper() as helper:
helper.add(responses.CallbackResponse(responses.PATCH,
self.cda.endpoint_url('collections', test_collection_uuid),
callback=RequestCallback(405, '{}'),
content_type='application/json'))
with self.assertRaises(UpdateError):
self.cda.append(test_collection_uuid, test_collection_version, [])
@responses.activate
def test_send_request_successful_with_auto_retry_on_http_504_timeout(self):
test_collection_uuid = 'abcdef123456'
expected_response = {'code': 'hello_world'}
with ResponsesHelper() as helper:
url = self.cda.endpoint_url(test_collection_uuid)
helper.add(responses.CallbackResponse(responses.GET,
url,
callback=RequestCallback(200,
json.dumps(expected_response),
delay=True),
content_type='application/json'))
response = self.cda.send_request(test_collection_uuid, 'get', url, {})
self.assertEqual(response.json(), expected_response)
@responses.activate
def test_send_request_successful_with_auto_retry_on_http_502(self):
test_collection_uuid = 'abcdef123456'
expected_response = {'code': 'hello_world'}
mock_response_sequence = [
(502, {}, '{"code": "mock_error"}'),
(200, {}, json.dumps(expected_response))
]
def mock_request_handler(_request):
return mock_response_sequence.pop(0)
with ResponsesHelper() as helper:
url = self.cda.endpoint_url(test_collection_uuid)
helper.add(responses.CallbackResponse(responses.GET,
url,
callback=mock_request_handler,
content_type='application/json'))
response = self.cda.send_request(test_collection_uuid, 'get', url, {})
self.assertEqual(response.json(), expected_response)
@responses.activate
def test_send_request_fails_after_too_many_retries(self):
test_collection_uuid = 'abcdef123456'
with self.assertRaises(ServerTimeoutError):
self.cda.send_request(test_collection_uuid, 'get', 'fake_url', {}, delay=64)
@responses.activate
def test_send_request_with_unexpected_response_code_raises_client_error(self):
test_collection_uuid = 'abcdef123456'
expected_response = {'code': 'hello_world'}
with ResponsesHelper() as helper:
url = self.cda.endpoint_url(test_collection_uuid)
helper.add(responses.CallbackResponse(responses.GET,
url,
callback=RequestCallback(201, json.dumps(expected_response)),
content_type='application/json'))
with self.assertRaises(ClientError):
self.cda.send_request(test_collection_uuid, 'get', url, {}, expected_status_code=200)
@responses.activate
def test_send_request_with_unexpected_response_code_raises_unauthorized_client_access_error(self):
test_collection_uuid = 'abcdef123456'
expected_response = {'code': 'mock_error'}
with ResponsesHelper() as helper:
url = self.cda.endpoint_url(test_collection_uuid)
helper.add(responses.CallbackResponse(responses.GET,
url,
callback=RequestCallback(401, json.dumps(expected_response)),
content_type='application/json'))
with self.assertRaises(UnauthorizedClientAccessError):
self.cda.send_request(test_collection_uuid, 'get', url, {}, expected_status_code=200)
class RequestCallback:
def __init__(self, code, content, delay=False):
self.content = content
self.code = code
self.delay = delay
def __call__(self, request):
if not self.delay:
return self.code, {}, self.content
else:
self.delay = False
return 504, {}, '{"code": "timed_out"}'
```
#### File: azul/test/test_tagging.py
```python
import json
from typing import (
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import unittest
from unittest.mock import (
MagicMock,
patch,
)
from more_itertools import (
first,
)
from azul.json import (
AnyJSON,
)
from azul.logging import (
configure_test_logging,
)
from azul.terraform import (
populate_tags,
)
from azul_test_case import (
AzulUnitTestCase,
)
# noinspection PyPep8Naming
def setupModule():
configure_test_logging()
class TestTerraformResourceTags(AzulUnitTestCase):
def assertDictEqualPermissive(self,
expected: AnyJSON,
actual: AnyJSON
) -> None:
path = self.permissive_compare(expected, actual)
self.assertIsNone(path, f'Discrepancy at path: {path}')
def permissive_compare(self,
expected: AnyJSON,
actual: AnyJSON,
*path: Union[str, int]
) -> Optional[Tuple[Union[int, str], ...]]:
"""
Recursive JSON comparison. A None value in `expected` matches any value
at the same position in `actual`.
:return: None, if the two arguments, the path of the discrepancy as a
tuple of strings otherwise.
>>> t = TestTerraformResourceTags()
>>> t.permissive_compare(
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 789}]},
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 789}]}
... )
>>> t.permissive_compare(
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': None}]},
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 'abc'}]}
... )
>>> t.permissive_compare(
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 'def'}]},
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 'abc'}]}
... )
...
('qaz', 0, '456')
>>> t.permissive_compare(
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 'def'}]},
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': None}]}
... )
('qaz', 0, '456')
>>> t.permissive_compare(
... {'foo': 'bar', 'qaz': None, '456': 'def'},
... {'foo': 'bar', 'qaz': [{'qux': 123, '456': 'abc'}]}
... )
('456',)
>>> t.permissive_compare(
... {'foo': 'bar', 'qaz': [{'qux': {'123': 890}}]},
... {'foo': 'bar', 'qaz': [{'qux': {'123': 456}}]}
... )
('qaz', 0, 'qux', '123')
>>> t.permissive_compare(None, 123)
>>> t.permissive_compare({}, [])
()
>>> t.permissive_compare([], [1])
(0,)
>>> t.permissive_compare([1], [])
(0,)
>>> t.permissive_compare({}, {'0':1})
('0',)
>>> t.permissive_compare({'0':1}, {})
('0',)
"""
primitive_json = (str, int, float, bool)
if isinstance(actual, primitive_json) and isinstance(expected, primitive_json):
if expected != actual:
return path
elif expected is None:
pass
elif isinstance(actual, Sequence) and isinstance(expected, Sequence):
if len(actual) > len(expected):
return *path, len(expected)
else:
for i, expected_v in enumerate(expected):
try:
actual_v = actual[i]
except IndexError:
return *path, i
else:
diff = self.permissive_compare(expected_v, actual_v, *path, i)
if diff is not None:
return diff
elif isinstance(actual, Mapping) and isinstance(expected, Mapping):
if len(actual) > len(expected):
return *path, first(actual.keys() - expected.keys())
else:
for k, expected_v in expected.items():
assert isinstance(k, str)
try:
actual_v = actual[k]
except KeyError:
return *path, k
else:
diff = self.permissive_compare(expected_v, actual_v, *path, k)
if diff is not None:
return diff
else:
return path
@patch('subprocess.run', new_callable=MagicMock)
def test(self, terraform_mock):
terraform_mock.return_value.stdout = json.dumps({
'format_version': '0.1',
'provider_schemas': {
'aws': {
'resource_schemas': {
'aws_vpc': {
'block': {
'attributes': {
'tags': {}
}
}
}
}
},
'gcp': {
'resource_schemas': {
'google_compute_instance': {
'block': {
'attributes': {
'tags': {}
}
}
}
}
}
}
}).encode()
tagged_aws_resource = {
'resource': {
'aws_vpc': {
'name': {}
}
}
}
expected = {
'resource': [{
'aws_vpc': [{
'name': {
'tags': {
'project': None,
'service': None,
'deployment': None,
'owner': None,
'Name': None,
'component': None
}
}
}]
}]
}
tagged = populate_tags(tagged_aws_resource)
self.assertDictEqualPermissive(expected, tagged)
tagged_gcp_resource = {
'resource': {
'google_compute_instance': {
'name': {}
}
}
}
expected = {
'resource': [{
'google_compute_instance': [{
'name': {
'tags': {
'project': None,
'service': None,
'deployment': None,
'owner': None,
'name': None,
'component': None
}
}
}]
}]
}
tagged = populate_tags(tagged_gcp_resource)
self.assertDictEqualPermissive(expected, tagged)
untaggable_aws_resource = {
'resource': {
'aws_untaggable_resource': {'name': {}}
}
}
expected = {
'resource': [
{'aws_untaggable_resource': [{'name': {}}]}
]
}
tagged = populate_tags(untaggable_aws_resource)
self.assertDictEqualPermissive(expected, tagged)
manually_tagged_resource = {
'resource': {
'aws_vpc': {
'name': {
'tags': {
'component': 'foo'
}
}
}
}
}
expected = {
'resource': [{
'aws_vpc': [{
'name': {
'tags': {
'project': None,
'service': None,
'deployment': None,
'owner': None,
'Name': None,
'component': 'foo'
}
}
}]
}]
}
tagged = populate_tags(manually_tagged_resource)
self.assertDictEqualPermissive(expected, tagged)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JesseBropez/EscapeNonPrintablesPython",
"score": 3
}
|
#### File: JesseBropez/EscapeNonPrintablesPython/escape.py
```python
def escape(string):
return ''.join(c if ord(c) < 127 and ord(c) > 31 else '\\x%02x' % ord(c) for c in string)
```
|
{
"source": "JesseBuesking/clocked",
"score": 3
}
|
#### File: clocked/test/test_clocked.py
```python
from time import sleep
import unittest
# noinspection PyDocstring
from clocked.clockit import Clocked
from clocked.decorators import clocked
class TestClocked(unittest.TestCase):
def _assert(self, mini, val, maxi):
self.assertTrue(
mini <= val <= maxi,
'{} <= {} <= {} is not true'.format(
mini,
val,
maxi
)
)
def ten_ms(self):
sleep(.01)
# def template(self):
# Clocked.initialize('template')
# # time stuff
# with Clocked('a'):
# # time something
# pass
# # print report
# Profiler.print_hotspot_report()
def test_raw_simple(self):
""" Simple raw test using Clocked object. """
Clocked.initialize('test raw simple')
with Clocked('loop 1'):
for i in range(4):
with Clocked('loop 2'):
for j in range(2):
with Clocked('loop 3'):
for j in range(2):
self.ten_ms()
for j in range(2):
with Clocked('loop 4'):
for j in range(2):
self.ten_ms()
expected_total_time = 320
delta_upper_bound = 10
Clocked.verbose_report()
Clocked.hotspot_report()
print('')
total = 0.0
for timing in Clocked.get('loop 3'):
total += timing.duration_milliseconds
d = delta_upper_bound / 2
e = expected_total_time / 2
self._assert(e - d, total, e + d)
total = 0.0
for timing in Clocked.get('loop 4'):
total += timing.duration_milliseconds
d = delta_upper_bound / 2
e = expected_total_time / 2
self._assert(e - d, total, e + d)
total = 0.0
for timing in Clocked.get('loop 2'):
total += timing.duration_milliseconds
d = delta_upper_bound
e = expected_total_time
self._assert(e - d, total, e + d)
def test_raise(self):
def raises():
with Clocked('test exception'):
raise ValueError('some value error')
self.assertRaises(ValueError, raises)
# noinspection PyDocstring
class TestDecorators(unittest.TestCase):
def _assert(self, mini, val, maxi):
self.assertTrue(
mini <= val <= maxi,
'{} <= {} <= {} is not true'.format(
mini,
val,
maxi
)
)
def test_function_decorator(self):
Clocked.initialize('test function decorator')
TestDecorators.TestFunctionObj.delay_method()
t = [i for i in Clocked.get('test_clocked.delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
class TestFunctionObj(object):
@classmethod
@clocked
def delay_method(cls):
sleep(.02)
def test_class_decorator(self):
Clocked.initialize('test class decorator')
TestDecorators.TestClassObj.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
@clocked
class TestClassObj(object):
@classmethod
def delay_method(cls):
sleep(.02)
def test_function_and_class_decorators(self):
Clocked.initialize('test function and class decorators')
TestDecorators.TestFunctionAndClassObj.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
class TestFunctionAndClassObj(object):
@classmethod
@clocked
def delay_method(cls):
sleep(.02)
def test_not_classmethods(self):
Clocked.initialize('test function and class decorators')
to = TestDecorators.TestNotClassmethods()
to.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
@clocked
class TestNotClassmethods(object):
def delay_method(self):
sleep(.02)
def test_static_method(self):
Clocked.initialize('test function and class decorators')
TestDecorators.TestStaticMethod.delay_method()
t = [i for i in Clocked.get('.*delay_method.*')]
self.assertEqual(1, len(t))
t = t[0]
self._assert(20-2, t.duration_milliseconds, 20+2)
# noinspection PyDocstring
@clocked
class TestStaticMethod(object):
@staticmethod
def delay_method():
sleep(.02)
```
|
{
"source": "jessebuildersday/awsbuildersday",
"score": 2
}
|
#### File: config/byod/EndpointConfig.py
```python
import json
import os
def handler(event,context):
print(json.dumps(event,indent=2))
return {
"ProductionVariants": [{
"InitialInstanceCount":event["params"]["hostinstancecount"],
"InstanceType":event["params"]["hostinstancetype"],
"ModelName":event["args"]["model"]["ModelName"],
"VariantName":event["params"]["version"],
}]
}
```
|
{
"source": "jessebutterfield/wwtl",
"score": 2
}
|
#### File: league/templatetags/league_helpers.py
```python
from typing import Dict, Optional
from django import template
register = template.Library()
@register.simple_tag
def tabindex(match_row: int, set_id: str, offset: int):
return (match_row-1) * 12 + int(set_id) * 4 + offset
@register.simple_tag
def get_score(set_dict: Dict[int, Dict[int, Dict[str, Optional[int]]]], match_id: int, set_id: str, score_type: str):
value = set_dict[match_id][int(set_id)].get(score_type)
return str(value) if value is not None else ""
```
|
{
"source": "jessecambon/advent_of_code_2021",
"score": 4
}
|
#### File: advent_of_code_2021/day3/part2.py
```python
import numpy as np
def round_properly(d):
if d == 0.5:
return(1)
else:
return(round(d))
def flip(d):
if d == 0:
return(1)
elif d == 1:
return(0)
# read data
with open('input.txt') as f:
data = [list(line.rstrip()) for line in f]
# convert input data to numeric list of lists
data_numeric = [[ int(digit) for digit in l] for l in data ]
def find_value(val_type):
remaining_numbers = data_numeric
# iterate through data and remove items that don't match criteria
for i in range(len(data_numeric[0])):
if len(remaining_numbers) == 1:
break
# extract ith column from remaining numbers
number_column = [x[i] for x in remaining_numbers]
# find most or least common digit
d_select = round_properly(sum(number_column)/len(number_column))
if val_type == 'scrubber':
d_select = flip(d_select)
# print('d_select:')
# print(d_select)
# only keep numbers that match criteria
remaining_numbers = [ l for l in remaining_numbers if l[i] == d_select]
# use indices to keep to extract relevant number and convert from
# decimal to binary
binary_value = ''.join([str(d) for d in remaining_numbers[0]])
# print("binary_value: " + binary_value)
final_value = int(binary_value, 2)
return(final_value)
oxygen = find_value('oxygen')
scrubber = find_value('scrubber')
print('oxygen: %d' % oxygen)
print('scrubber: %d' % scrubber)
print('product: %d' % (oxygen * scrubber))
```
|
{
"source": "jessecantu/tensorflow",
"score": 2
}
|
#### File: python/distribute/vars_test.py
```python
import itertools
import uuid
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import checkpoint_management as ckpt_manager
from tensorflow.python.training.tracking import util as trackable_utils
_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)
def strategy_and_run_tf_function_combinations():
# Test the combination of different strategies and whether a tf.function
# is passed into strategy.run."""
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"],
experimental_run_tf_function=[True, False],
use_var_policy=[True, False]) + combinations.combine(
distribution=[
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"],
experimental_run_tf_function=[True],
use_var_policy=[True, False])
def strategy_with_var_policy():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"],
use_var_policy=[True, False])
class OnWriteVariableSync(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssign(self, distribution, experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if (not cross_replica and aggregation ==
variables_lib.VariableAggregation.SUM):
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignOnWriteVar(self, distribution, experimental_run_tf_function):
with distribution.scope():
v_to_assign = variable_scope.variable(
2., aggregation=variables_lib.VariableAggregation.MEAN)
v_to_assign_sub = variable_scope.variable(
-2., aggregation=variables_lib.VariableAggregation.MEAN)
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", v_to_assign), ("assign_add", v_to_assign),
("assign_sub", v_to_assign_sub)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if aggregation == variables_lib.VariableAggregation.SUM:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(2.0, self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaVal(self, distribution, experimental_run_tf_function):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Assigning PerReplica values is not supported. See"
" sponge/80ba41f8-4220-4516-98ce-bbad48f9f11a.")
with distribution.scope():
per_replica_value = values.PerReplica(
[constant_op.constant(2.0),
constant_op.constant(2.0)])
per_replica_sub_value = values.PerReplica(
[constant_op.constant(-2.0),
constant_op.constant(-2.0)])
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", per_replica_value), ("assign_add", per_replica_value),
("assign_sub", per_replica_sub_value)]
# We don't support assigning PerReplica valus to vars in replica context
# with aggregation=NONE.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if cross_replica:
# We don't support assigning PerReplica values to MirroredVariables in
# cross replica context
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
if aggregation == variables_lib.VariableAggregation.SUM:
expected = 4.0
else:
expected = 2.0
for component in v._values:
self.assertAllEqual(expected, self.evaluate(component.read_value()))
@combinations.generate(strategy_with_var_policy())
def testValueInReplicaContext(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
1., aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def f():
with ops.control_dependencies([v.assign_add(1.)]):
return v.value()
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(f)))
for value in results:
self.assertEqual(2., value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(read_var_fn)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInCrossReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
2.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = read_var_fn()
for component in v._values:
self.assertEqual(self.evaluate(component.read_value()),
self.evaluate(results))
@combinations.generate(strategy_with_var_policy())
def testAssignOutOfScope(self, distribution):
with distribution.scope():
mirrored = variables_lib.Variable(1.)
self.evaluate(mirrored.assign(3.))
self.assertEqual(self.evaluate(mirrored.read_value()), 3.)
for component in mirrored.values:
self.assertEqual(self.evaluate(component.read_value()), 3.)
@combinations.generate(strategy_with_var_policy())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only test")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(random_ops.random_normal([]))
distribution.run(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(strategy_with_var_policy())
def testAggregationOnlyFirstReplica(self, distribution):
with distribution.scope():
v = variable_scope.variable(
15.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
per_replica_results = self.evaluate(distribution.experimental_local_results(
distribution.run(assign)))
# The per-replica values should always match the first replicas value.
self.assertAllEqual(
array_ops.zeros(distribution.num_replicas_in_sync, dtypes.float32),
per_replica_results)
@combinations.generate(strategy_with_var_policy())
def testInitScope(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
class C(object):
pass
obj = C()
obj.w = None
obj.v = None
@def_function.function
def assign():
with ops.init_scope():
if obj.w is None:
obj.w = variables_lib.Variable(
0., aggregation=variables_lib.VariableAggregation.MEAN)
obj.v = variables_lib.Variable(
obj.w.read_value(),
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
return obj.v.assign_add(2.)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
self.assertAllEqual([2., 2.], per_replica_results)
@combinations.generate(strategy_with_var_policy())
def testOperatorOverride(self, distribution):
with distribution.scope():
v = variable_scope.variable(
1, aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
self.assertEqual(2, self.evaluate(v + 1))
@def_function.function
def add():
return v + 1
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(add)))
self.assertAllEqual([2, 2], per_replica_results)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"],
use_var_policy=[True, False]))
def testSaveAndRestoreOnWrite(self, strategy):
aggregation = [
variable_scope.VariableAggregation.NONE,
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA,
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN
]
for agg in aggregation:
v_normal_restore = variables_lib.Variable(1.0)
v_normal_save = variables_lib.Variable(3.0)
with strategy.scope():
v_on_write = variables_lib.Variable(2.0, aggregation=agg)
# Save ONWRITE Restore ONWRITE
# Save
ckpt = trackable_utils.Checkpoint(var=v_on_write)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restore
ckpt.restore(manager.latest_checkpoint)
self.assertEqual(2.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(2.0, self.evaluate(v_on_write.read_value()))
# Save Mirrored Restore Normal
# We've already saved Mirrored, so we only need to restore normal
ckpt_normal = trackable_utils.Checkpoint(var=v_normal_restore)
ckpt_normal.restore(manager.latest_checkpoint)
self.assertEqual(2.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(2.0, self.evaluate(v_normal_restore.read_value()))
# Save Normal Restore Mirrored
# Save
ckpt = trackable_utils.Checkpoint(var=v_normal_save)
manager_2 = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckptckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager_2.save()
# Restore
ckpt_on_write = trackable_utils.Checkpoint(var=v_on_write)
ckpt_on_write.restore(manager_2.latest_checkpoint)
self.assertEqual(3.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(3.0, self.evaluate(v_on_write.read_value()))
ms_combination = combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"])
tpu_combination = combinations.combine(
distribution=[strategy_combinations.tpu_strategy_packed_var],
mode=["graph", "eager"])
class OnWriteVariableSyncScatterTests(test.TestCase, parameterized.TestCase):
@combinations.generate(ms_combination)
def testScatterSub(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_sub():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.stack([
math_ops.cast(replica_id, dtypes.float32),
math_ops.cast(replica_id + 1, dtypes.float32)
]),
indices=array_ops.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_sub(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_sub)))
self.assertAllEqual([[0., -1., -1.], [0., -1., -1.]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterAdd(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_add():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.stack([replica_id, replica_id + 1]),
indices=array_ops.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_add(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_add)))
self.assertAllEqual([[0, 2, 2], [0, 2, 2]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterDiv(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[1, 6, 1], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_div():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(replica_id + 2, [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_div(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_div)))
self.assertAllEqual([[0, 2, 1], [0, 2, 1]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMul(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_mul():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(
math_ops.cast(replica_id + 2, dtypes.float32), [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_mul(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_mul)))
self.assertAllClose([[2., 1.5, 1.], [2., 1.5, 1.]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMin(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 2, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 2, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_min(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_min(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_min.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_min, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_min, args=(v2,))))
self.assertAllClose([[0, 1, 0], [0, 1, 0]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterMax(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_max(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([0]),
dense_shape=(3,))
return v.scatter_max(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_max.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_max, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_max, args=(v2,))))
self.assertAllClose([[1, 0, 0], [1, 0, 0]], per_replica_results)
@combinations.generate(ms_combination)
def testScatterUpdate(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_update(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([3]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_update(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_update.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_update, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_update, args=(v2,))))
self.assertAllClose([[0, 3, 0], [0, 3, 0]], per_replica_results)
@combinations.generate(ms_combination + tpu_combination)
def testScatterOpsWithNoneAggregation(self, distribution):
def assert_close(v, op, delta, expect):
scatter_op = getattr(v, op)
@def_function.function
def scatter_xxx():
return scatter_op(delta)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_xxx)))
self.assertAllClose([expect, expect], per_replica_results)
with distribution.scope():
v = variables_lib.Variable(
[4.], aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
delta = indexed_slices.IndexedSlices(
values=array_ops.identity([2.]),
indices=array_ops.identity([0]),
dense_shape=(1,))
assert_close(v, "scatter_sub", delta, [2.])
assert_close(v, "scatter_add", delta, [4.])
assert_close(v, "scatter_max", delta, [4.])
assert_close(v, "scatter_min", delta, [2.])
assert_close(v, "scatter_mul", delta, [4.])
assert_close(v, "scatter_div", delta, [2.])
assert_close(v, "scatter_update", delta, [2.])
@combinations.generate(ms_combination + tpu_combination)
def testScatterOpsInCrossReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[1, 1, 1], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable([1, 1, 1])
self.evaluate(variables_lib.global_variables_initializer())
value = indexed_slices.IndexedSlices(
values=array_ops.identity([2]),
indices=array_ops.identity([0]),
dense_shape=(3,))
with distribution.scope():
self.evaluate(v1.scatter_add(value))
self.assertAllEqual([3, 1, 1], self.evaluate(v1.read_value()))
self.evaluate(v2.scatter_min(value))
self.assertAllEqual([1, 1, 1], self.evaluate(v2.read_value()))
class OnReadVariableSyncTest(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssign(self, distribution, experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# VariableAggregation.SUM in cross-replica mode is tested below,
# VariableAggregation.NONE in cross-replica mode is not supported.
if cross_replica and aggregation in [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.NONE,
]:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignOnReadVar(self, distribution, experimental_run_tf_function):
with distribution.scope():
v_to_assign = variable_scope.variable(
2., aggregation=variables_lib.VariableAggregation.MEAN)
v_to_assign_sub = variable_scope.variable(
-2., aggregation=variables_lib.VariableAggregation.MEAN)
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", v_to_assign), ("assign_add", v_to_assign),
("assign_sub", v_to_assign_sub)]
expected_cross_replica = {
variables_lib.VariableAggregation.SUM: 1.0,
variables_lib.VariableAggregation.MEAN: 2.0,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA: 2.0
}
expected_replica = {
variables_lib.VariableAggregation.SUM: 2.0,
variables_lib.VariableAggregation.MEAN: 2.0,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA: 2.0
}
# aggregation=NONE is not supported for OnReadVariables.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if aggregation == variables_lib.VariableAggregation.SUM:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
if cross_replica:
for component in v._values:
self.assertAllEqual(expected_cross_replica.get(aggregation),
self.evaluate(component.read_value()))
else:
for component in v._values:
self.assertAllEqual(expected_replica.get(aggregation),
self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaVal(self, distribution, experimental_run_tf_function):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Assigning PerReplica values is not supported. See"
" sponge/80ba41f8-4220-4516-98ce-bbad48f9f11a.")
self.skipTest("We don't support assiging PerReplica values in cross "
"replica context or replica context. see error in "
"sponge/2b2e54c1-eda6-4534-82e1-c73b1dcd517f.")
with distribution.scope():
per_replica_value = values.PerReplica(
[constant_op.constant(2.0),
constant_op.constant(2.0)])
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", per_replica_value)]
# We don't support assigning PerReplica valus to vars in replica context
# with aggregation=NONE.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
# with self.assertRaisesRegex(ValueError, "Attempt to convert a value "):
self.evaluate(assign(fn, v, update_value, cross_replica))
if aggregation == variables_lib.VariableAggregation.SUM:
expected = 4.0
else:
expected = 2.0
for component in v._values:
self.assertAllEqual(expected, self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignDtypeConversion(self, distribution,
experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1), ("assign_add", 1), ("assign_sub", -1)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# VariableAggregation.SUM in cross-replica mode is tested below,
# VariableAggregation.NONE in cross-replica mode is not supported.
if cross_replica and aggregation in [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.NONE,
]:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_with_var_policy())
def testAssignWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(v.assign(1. * distribution.num_replicas_in_sync))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_with_var_policy())
def testAssignAddSubWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_add(1.))
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_sub(1.))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(read_var_fn)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInCrossReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
if isinstance(distribution, _TPU_STRATEGIES):
resolver = tpu_cluster_resolver.TPUClusterResolver("")
tpu_strategy_util.initialize_tpu_system(resolver)
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(v=v):
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
if experimental_run_tf_function:
assign = def_function.function(assign)
self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
num_replicas = distribution.num_replicas_in_sync
sum_of_replica_values = num_replicas * (num_replicas - 1) / 2.
if aggregation == variables_lib.VariableAggregation.SUM:
expected = sum_of_replica_values
elif aggregation == variables_lib.VariableAggregation.MEAN:
expected = sum_of_replica_values / num_replicas
else:
expected = 0
self.assertEqual(expected, self.evaluate(v.read_value()), aggregation)
self.assertEqual(expected, self.evaluate(v.value()), aggregation)
self.assertEqual(expected, self.evaluate(v), aggregation)
self.assertEqual(expected, self.evaluate(array_ops.identity(v)),
aggregation)
# TODO(b/145574622): Re-enable this test once ReduceOp argument is
# respected on GPUs.
@combinations.generate(strategy_and_run_tf_function_combinations())
def disable_testAllReduce(self, distribution,
experimental_run_tf_function):
with distribution.scope():
v = variable_scope.variable(
2.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
def all_reduce():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return ctx.all_reduce("SUM", v) + math_ops.cast(replica_id,
dtypes.float32)
if experimental_run_tf_function:
all_reduce = def_function.function(all_reduce)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(all_reduce)))
expected_result = []
for i in range(distribution.num_replicas_in_sync):
expected_result.append(2.0 * distribution.num_replicas_in_sync +
1.0 * i)
self.assertEqual(per_replica_results, tuple(expected_result))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaBeforeRead(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(var=v):
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return var.assign(math_ops.cast(replica_id, dtypes.float32))
if experimental_run_tf_function:
assign = def_function.function(assign)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
expected_result = []
for i in range(distribution.num_replicas_in_sync):
expected_result.append(1.0 * i)
self.assertEqual(per_replica_results, tuple(expected_result))
@combinations.generate(strategy_with_var_policy())
def testReadValueWithAggregationNoneInCrossReplicaContext(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "Could not convert from .* VariableAggregation\\.NONE"):
self.evaluate(v.read_value())
@combinations.generate(strategy_with_var_policy())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(
random_ops.random_normal([]),
synchronization=variables_lib.VariableSynchronization.ON_READ)
distribution.run(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(strategy_with_var_policy())
def testOperatorOverride(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.0,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
# Assign different replicas with different values.
self.evaluate(distribution.experimental_local_results(
distribution.run(assign)))
self.assertEqual(1.5, self.evaluate(v + 1))
@def_function.function
def add():
return v + 1
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(add)))
self.assertAllEqual([1, 2], per_replica_results)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"],
use_var_policy=[True, False]))
def testSaveAndRestoreOnRead(self, strategy):
aggregation = [variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN]
for agg in aggregation:
v_normal_restore = variables_lib.Variable(1.0)
v_normal_save = variables_lib.Variable(2.0)
with strategy.scope():
v_on_read = variables_lib.Variable(
1.0, synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=agg)
@def_function.function
def assign_fn():
cluster_resolver = strategy.cluster_resolver
replica_ctx = ds_context.get_replica_context()
if ((cluster_resolver and cluster_resolver.task_type == "worker") or
math_ops.equal(replica_ctx.replica_id_in_sync_group,
constant_op.constant(1))):
v_on_read.assign(3.) # pylint:disable=cell-var-from-loop
else:
v_on_read.assign(4.) # pylint:disable=cell-var-from-loop
strategy.run(assign_fn)
# Save ONREAD, restore ONREAD
# Saves v[0] + v[1] = 7 for SUM and 3.5 for MEAN.
ckpt = trackable_utils.Checkpoint(var=v_on_read)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restores a value of 7/2 = 3.5 for SUM and 3.5 for MEAN.
ckpt.restore(manager.latest_checkpoint)
self.assertEqual(3.5, self.evaluate(v_on_read._values[0]))
# Save ONREAD, restore normal
ckpt_normal = trackable_utils.Checkpoint(var=v_normal_restore)
ckpt_normal.restore(manager.latest_checkpoint)
if agg == variable_scope.VariableAggregation.SUM:
self.assertEqual(7.0, self.evaluate(v_normal_restore.read_value()))
else:
self.assertEqual(3.5, self.evaluate(v_normal_restore.read_value()))
# Save normal, restore ONREAD
ckpt = trackable_utils.Checkpoint(var=v_normal_save)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restores a value of 2/2 = 1.0 for SUM and 2.0 for MEAN.
ckpt_on_read = trackable_utils.Checkpoint(var=v_on_read)
ckpt_on_read.restore(manager.latest_checkpoint)
if agg == variable_scope.VariableAggregation.SUM:
self.assertEqual(1.0, self.evaluate(v_on_read._values[0]))
else:
self.assertEqual(2.0, self.evaluate(v_on_read._values[0]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
aggregation=[
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
],
mode=["graph", "eager"],
use_var_policy=[True, False]))
class SyncOnReadScatterReplicaTest(test.TestCase, parameterized.TestCase):
def testScatterSub(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_sub, args=(delta,)))
def testScatterAdd(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_add, args=(delta,)))
def testScatterDiv(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 6., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [3.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_div, args=(delta,)))
def testScatterMul(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [3.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[4.], [5.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_mul, args=(delta,)))
def testScatterMin(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
def testScatterMax(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_max, args=(delta,)))
def testScatterUpdate(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
if __name__ == "__main__":
test_util.main()
```
|
{
"source": "Jessecar96/youtube-dl-1",
"score": 2
}
|
#### File: youtube_dl/extractor/thisoldhouse.py
```python
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none
)
class ThisOldHouseIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisoldhouse\.com/(?:watch|how-to|tv-episode|(?:[^/]+/)?\d+)/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.thisoldhouse.com/how-to/how-to-build-storage-bench',
'info_dict': {
'id': '5dcdddf673c3f956ef5db202',
'ext': 'mp4',
'title': 'How to Build a Storage Bench',
'description': 'In the workshop, <NAME> and <NAME> build a storage bench for an entryway.',
'timestamp': 1442548800,
'upload_date': '20150918',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
'only_matching': True,
}, {
'url': 'https://www.thisoldhouse.com/tv-episode/ask-toh-shelf-rough-electric',
'only_matching': True,
}, {
'url': 'https://www.thisoldhouse.com/furniture/21017078/how-to-build-a-storage-bench',
'only_matching': True,
}, {
'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost',
'only_matching': True,
}, {
# iframe www.thisoldhouse.com
'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
'only_matching': True,
}]
_ZYPE_TMPL = 'https://www.thisoldhouse.com/videos/zype/%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'<iframe[^>]+src=[\'"](?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})',
webpage, 'video id')
page_title = self._html_search_regex(r'<h1 class="c-page-title">(.+)<\/h1>', webpage, 'title')
series = self._html_search_meta('author', webpage)
season_number = int_or_none(self._search_regex(
r'S(\d+)', page_title, 'season number',
default=None))
episode_number = int_or_none(self._search_regex(
r'E(\d+)', page_title, 'episode number',
default=None))
title = self._search_regex(
r': (.+)', page_title, 'episode title',
default=None)
if series:
series = series.replace(' TV', '')
test = self._request_webpage(self._ZYPE_TMPL % video_id, video_id)
zype_url = test.geturl()
return {
'_type': 'url_transparent',
'id': video_id,
'title': title,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'url': zype_url,
'ie_key': 'Zype',
}
```
|
{
"source": "jessecarbon/cbapi-python",
"score": 2
}
|
#### File: examples/legacy/binary_export.py
```python
import sys
import optparse
import cbapi
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Dump All MD5s from the binary index")
# for each supported output type, add an option
#
parser.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
parser.add_option("-p", "--pagesize", action="store", default=128, dest="pagesize",
help="Number of MD5s to retrieve during each API invocation")
parser.add_option("-f", "--file", action="store", default=None, dest="filename",
help="filename of file to write all md5s to")
return parser
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.url or not opts.token or not opts.pagesize or not opts.filename:
print "Missing required param; run with --help for usage"
sys.exit(-1)
# build a cbapi object
#
cb = cbapi.CbApi(opts.url, token=opts.token, ssl_verify=opts.ssl_verify)
start = 0
md5s = []
total = 0
while True:
# perform a single binary search
#
binaries = cb.binary_search("", rows=int(opts.pagesize), start=start)
if 0 == start:
total = int(binaries['total_results'])
print "Total MD5 count is %s" % (binaries['total_results'])
# api indicates "no more" by returning an empty result set
#
if 0 == len(binaries['results']):
break
# for each result
for binary in binaries['results']:
md5s.append(binary['md5'])
print '%s of %s complete (%s%%)' % (len(md5s), total, (100 * len(md5s)) / total)
start = start + int(opts.pagesize)
f = open(opts.filename, 'w')
for md5 in md5s:
f.write("%s\n" % (md5,))
f.close()
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: examples/legacy/check_ioc.py
```python
import sys
import struct
import socket
from optparse import OptionParser
from cbapi import CbApi
# if you run this in a cron job,
# put the interval here. This uses the format
# In the last xxx minutes format. The parser accepts
# h, m or s suffixes.
#CRON_INTERVAL = "24h"
CRON_INTERVAL = None
class CBQuery(object):
def __init__(self, url, token, ssl_verify):
self.cb = CbApi(url, token=token, ssl_verify=ssl_verify)
self.cb_url = url
def report(self, ioc, type, procs, detail=False):
for result in procs["results"]:
# print the results to stdout. you could do anything here -
# log to syslog, send a SMS, fire off a siren and strobe light, etc.
print
print "Found %s IOC for %s in:" % (type, ioc)
print
print "\tPath: %s" % result["path"]
print "\tHostname: %s" % result["hostname"]
print "\tStarted: %s" % result["start"]
print "\tLast Updated: %s" % result["last_update"]
print "\tDetails: %s/#analyze/%s/%s" % (self.cb_url, result["id"], result["segment_id"])
print
if detail:
self.report_detail(ioc, type, result)
def report_detail(self, ioc, type, result):
events = self.cb.process_events(result["id"], result["segment_id"])
proc = events["process"]
if type == "domain" and proc.has_key("netconn_complete"):
for netconn in proc["netconn_complete"]:
ts, ip, port, proto, domain, dir = netconn.split("|")
if ioc in domain:
str_ip = socket.inet_ntoa(struct.pack("!i", int(ip)))
print "%s\t%s (%s:%s)" % (ts, domain, str_ip, port)
elif type == "ipaddr" and proc.has_key("netconn_complete"):
for netconn in proc["netconn_complete"]:
ts, ip, port, proto, domain, direction = netconn.split("|")
packed_ip = struct.unpack("!i", socket.inet_aton(ioc))[0]
#import code; code.interact(local=locals())
if packed_ip == int(ip):
str_ip = socket.inet_ntoa(struct.pack("!i", int(ip)))
print "%s\t%s (%s:%s)" % (ts, domain, str_ip, port)
elif type == "md5" and proc.has_key("modload_complete"):
for modload in proc["modload_complete"]:
ts, md5, path = modload.split("|")
if ioc in md5:
print "%s\t%s %s" % (ts, md5, path)
if result["process_md5"] == ioc:
print "%s\t%s %s" % (result["start"], result["process_md5"], result["path"])
def check(self, iocs, type, detail=False):
# for each ioc, do a search for (type):(ioc)
# e.g,
# domain:bigfish.com
# md5:ce7a81ceccfa03e5e0dfd0d9a7f41466
#
# additionally, if a cron interval is specified, limit searches
# to processes updated in the last CRON_INTERVAL period
#
# note - this is a very inefficient way to do this, since you test only one
# IOC per request - you could build a large OR clause together with a few hundred
# to efficiently and quickly check 1000s of IOCs, at the cost of increased complexity
# when you discover a hit.
#
# note 2 - with a list of flat indicators, what you really want is a CB feed
# see http://github.com/carbonblack/cbfeeds
#
for ioc in iocs:
if CRON_INTERVAL:
q = "%s:%s and last_update:-%s" % (type, ioc, CRON_INTERVAL)
else:
q = "%s:%s" % (type, ioc)
print q
procs = self.cb.process_search(q)
# if there are _any_ hits, give us the details.
# then check the next ioc
if len(procs["results"]) > 0:
self.report(ioc, type, procs, detail)
else:
sys.stdout.write(".")
sys.stdout.flush()
def build_cli_parser():
parser = OptionParser(usage="%prog [options]", description="check Cb index for provided IOCs")
# for each supported output type, add an option
parser.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-f", "--file", action="store", default=None, dest="fname",
help="Filename with CRLF-delimited list of IOCs")
parser.add_option("-t", "--type", action="store", default=None, dest="type",
help="Type of IOCs in the file. Must be one of md5, domain or ipaddr")
parser.add_option("-d", "--detail", action="store_true", default=False, dest="detail",
help="Get full detail about each IOC hit.")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
return parser
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.url or not opts.token or not opts.fname or not opts.type:
print "Missing required param."
sys.exit(-1)
if not opts.type in ["md5", "domain", "ipaddr"]:
print "Unknown type: ", opts.type
sys.exit(-1)
# setup the CbApi object
cb = CBQuery(opts.url, opts.token, ssl_verify=opts.ssl_verify)
# get the IOCs to check; this is a list of strings, one indicator
# per line. strip off the newlines as they come in
vals = [val.strip() for val in open(opts.fname, "r")]
# check each!
cb.check(vals, opts.type, opts.detail)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: examples/legacy/feed_modify.py
```python
__author__ = 'bwolfson'
import sys
import optparse
# in the github repo, cbapi is not in the example directory
sys.path.append('../src/cbapi')
import cbapi
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Modify an existing feed")
# for each supported output type, add an option
#
parser.add_option("-c", "--cburl", action="store", default=None, dest="server_url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
parser.add_option("-f", "--feedname", action="store", default=None, dest="feedname",
help="Feed Name")
parser.add_option("-i", "--id", action="store", default=None, dest="feedid",
help="Feed Id")
return parser
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.server_url or not opts.token or (not opts.feedname and not opts.feedid):
print "Missing required param; run with --help for usage"
print "One of -f or -i must be specified"
sys.exit(-1)
# build a cbapi object
#
cb = cbapi.CbApi(opts.server_url, token=opts.token, ssl_verify=opts.ssl_verify)
if not opts.feedid:
id = cb.feed_get_id_by_name(opts.feedname)
if id is None:
print "-> No configured feed with name '%s' found!" % (opts.feedname)
return
else:
id = opts.feedid
old_feed = cb.feed_info(id)
#create a new updated feed based on user input
# create
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: examples/legacy/feed_query_validate.py
```python
import sys
import json
import urllib
import optparse
import cbapi
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Perform a process search")
# for each supported output type, add an option
#
parser.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
parser.add_option("-f", "--feed", action="store", default=None, dest="feed",
help="feed filename")
return parser
def search_wrapper(cb, query, index):
"""
simple search wrapper
"""
result = {}
result['Query'] = query
try:
if 'events' == index:
results = cb.process_search(query, rows=0)
elif 'modules' == index:
results = cb.binary_search(query, rows=0)
else:
raise Exception("Unrecognized index %s" % index)
result['TotalResults'] = results['total_results']
result['QTime'] = int(1000*results['elapsed'])
except Exception, e:
result['e'] = e
return result
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.url or not opts.token or opts.feed is None:
print "Missing required param; run with --help for usage"
sys.exit(-1)
# build a cbapi object
#
cb = cbapi.CbApi(opts.url, token=opts.token, ssl_verify=opts.ssl_verify)
# read in the entire feed, decode as JSON, and store
#
feed = json.loads(open(opts.feed).read())
# print a legend
#
print "%-20s | %-4s | %-4s | %-7s | %s" % ("report id", "ver", "hits", "QTime", "Query")
print "%-20s | %-4s | %-4s | %-7s | %s" % ("-" * 20, "-" * 4, "-" * 4, "-" * 7, "-" * 100)
# iterate over each report
#
for report in feed['reports']:
# ensure report has an iocs element and skip over any reports without a query ioc
if not report.has_key('iocs') or not report['iocs'].has_key('query'):
continue
# ensure report has both an index_type and search_query field
q = report['iocs']['query'][0]
if not q.has_key('index_type') or not q.has_key('search_query'):
continue
# ensure that the search_query has a query ("q=") value
query = None
urlver = None
for kvpair in q['search_query'].split('&'):
if 2 != len(kvpair.split('=')):
continue
key = kvpair.split('=')[0]
val = kvpair.split('=')[1]
if key == 'q':
query = val
if key == 'cb.urlver':
urlver = val
# without a query, nothing to validate
if query is None:
continue
result = search_wrapper(cb, urllib.unquote(query), q['index_type'])
if not result.has_key('e'):
print "%-20s | %-4s | %-4s | %-7s | %s" % (report.get('id', "<none>"), str(urlver), result.get('TotalResults', 0), str(result.get('QTime', 0)) + "ms", result['Query'])
else:
print "ERROR: %s" % result['e']
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: examples/legacy/pci-fim.py
```python
import sys
from optparse import OptionParser
from cbapi import CbApi
class CBQuery(object):
def __init__(self, url, token, ssl_verify):
self.cb = CbApi(url, token=token, ssl_verify=ssl_verify)
self.cb_url = url
def report(self, result, search_filename):
# return the events associated with this process segment
# this will include netconns, as well as modloads, filemods, etc.
#
events = self.cb.process_events(result["id"], result["segment_id"])
proc = events["process"]
# for convenience, use locals for some process metadata fields
#
host_name = result.get("hostname", "<unknown>")
process_name = result.get("process_name", "<unknown>")
user_name = result.get("username", "<unknown>")
if proc.has_key("filemod_complete"):
# examine each filemod in turn
#
for filemod in proc["filemod_complete"]:
# filemods are of the form:
# 1|2014-06-19 15:40:05.446|c:\dir\filename||
#
parts = filemod.split('|')
action, ts, filename, filemd5, filetype = parts[:5]
# the _document as a whole_ matched the query
# that doesn't mean each _indvidual filemod_ within the document matched
# the user-specified filename to search for
#
# iterate over each filemod and determine if the path matches
# what was specified
#
# make sense? hope so!
#
if search_filename.lower() not in filename.lower():
continue
if "1" == action:
action = "creation"
elif "2" == action or "8" == action:
action = "modification"
elif "4" == action:
action = "deletion"
print "%s|%s|%s|%s|%s|%s" % (host_name, ts, filename, process_name, user_name, action)
def check(self, filename):
# build the query string
#
q = "filemod:%s" % (filename)
# begin with the first result - we'll perform the search in pages
# the default page size is 10 (10 reslts)
#
start = 0
# loop over the entire result set
#
while True:
# get the next page of results
#
procs = self.cb.process_search(q, start=start)
# if there are no results, we are done paging
#
if len(procs["results"]) == 0:
break
# examine each result individually
# each result represents a single process segment
#
for result in procs["results"]:
self.report(result, filename)
# move forward to the next page
#
start = start + 10
def build_cli_parser():
parser = OptionParser(usage="%prog [options]", description="Output all file modifications to a set of known filenames. This can be used in support of PCI compliance for file integrity monitoring.")
parser.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
parser.add_option("-f", "--filelist", action="store", default=None, dest="filelist",
help="Filename containing list of newline-delimited filenames to check for modifications")
return parser
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.url or not opts.token or not opts.filelist:
print "Missing required command line switch; use -h for usage."
sys.exit(-1)
cb = CBQuery(opts.url, opts.token, ssl_verify=opts.ssl_verify)
# open the "filelist" file and read it's contents
# this is a newline-delimited file of filenames to be monitored
# both \n and \r\n line endings are supported
#
files = open(opts.filelist).read().split("\n")
# print a legend
#
print "%s|%s|%s|%s|%s|%s" % ("hostname", "timestamp", "filename", "process name", "username", "action")
# iterate over the list of files, removing any trailing whitespace
# and searching the CB database for any file modifications to that file
#
for file in files:
file = file.strip()
if len(file) < 1:
continue
cb.check(file)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: examples/legacy/watchlist_hits_enum.py
```python
import sys
import optparse
import cbapi
import urllib
import pprint
import json
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]", description="Dump Binary Info")
# for each supported output type, add an option
#
parser.add_option("-c", "--cburl", action="store", default=None, dest="url",
help="CB server's URL. e.g., http://127.0.0.1 ")
parser.add_option("-a", "--apitoken", action="store", default=None, dest="token",
help="API Token for Carbon Black server")
parser.add_option("-n", "--no-ssl-verify", action="store_false", default=True, dest="ssl_verify",
help="Do not verify server SSL certificate.")
parser.add_option("-f", "--full", action="store_true", default=False, dest="fulloutput",
help="Do not truncate watchlist queries in the output")
parser.add_option("-r", "--rows", action="store", default=10, type="int", dest="numrows",
help="number of rows to display")
parser.add_option("-w", "--watchlistid", action="store", default=None, type="int", dest="watchlistid",
help="Enumerate hits from this watchlist id")
return parser
def truncate(fulloutput, string, length):
if fulloutput:
return string
if len(string) + 2 > length:
return string[:length] + "..."
return string
def printWatchlistHits(serverurl, watchlistid, watchlisttype, rows):
global cb
pp = pprint.PrettyPrinter(indent=2)
print rows
getparams = {"cb.urlver": 1,
"watchlist_%d" % watchlistid : "*",
"rows": rows }
if watchlisttype == 'modules':
getparams["cb.q.server_added_timestamp"] = "-1440m"
r = cb.cbapi_get("%s/api/v1/binary?%s" % (serverurl, urllib.urlencode(getparams)))
parsedjson = json.loads(r.text)
pp.pprint(parsedjson)
elif watchlisttype == 'events':
getparams["cb.q.start"] = "-1440m"
r = cb.cbapi_get("%s/api/v1/process?%s" % (serverurl, urllib.urlencode(getparams)))
parsedjson = json.loads(r.text)
pp.pprint(parsedjson)
else:
return
print
print "Total Number of results returned: %d" % len(parsedjson['results'])
print
def main(argv):
parser = build_cli_parser()
opts, args = parser.parse_args(argv)
if not opts.url or not opts.token:
print "Missing required param; run with --help for usage"
sys.exit(-1)
watchlistIds = []
global cb
#
# build a cbapi object
#
cb = cbapi.CbApi(opts.url, token=opts.token, ssl_verify=opts.ssl_verify)
#
# enumerate all watchlists
#
watchlists = cb.watchlist()
print "%-4s | %-32s |" % ('id', 'name')
print "%-4s + %-32s +" % ('-' * 4, '-' * 32)
#
# for each result
#
for watchlist in watchlists:
print "%-4s | %-32s |" % (watchlist['id'], watchlist['name'] )
watchlistIds.append(watchlist['id'])
print "%-4s + %-32s +" % ('-' * 4, '-' * 32)
if not opts.watchlistid:
print "Missing watchlist ID parameter; run with --help for usage"
sys.exit(-1)
if opts.watchlistid not in watchlistIds:
print "Error: Watchlist ID not found"
sys.exit(-1)
print
for watchlist in watchlists:
if opts.watchlistid == watchlist['id']:
print "Printing %d results for watchlist: %s" % (opts.numrows, watchlist['name'])
printWatchlistHits(cb.server, opts.watchlistid, watchlist['index_type'], opts.numrows)
break
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
```
#### File: legacy/util/live_response_helpers.py
```python
import threading
import time
class LiveResponseHelper(threading.Thread):
"""
Threaded class that should do a keep-alive and handle the establishing of
the live response session.
"""
def __init__(self, ext_cbapi, sensor_id):
self.cb = ext_cbapi
self.sensor_id = sensor_id
self.session_id = None
self.keep_alive_time = 60
self.go = True
self.ready_event = threading.Event()
self.ready_event.clear()
threading.Thread.__init__(self)
###########################################################################
def __create_session(self):
target_session = self.cb.live_response_session_create(self.sensor_id)
self.session_id = target_session.get('id')
while target_session.get('status') == "pending":
# could make this configurable to only wait certain number
# of seconds or at least configure how many seconds at a
# time to wait.
time.sleep(5.0)
target_session = self.cb.live_response_session_status(self.session_id)
if not self.go:
break
def __post_and_wait(self, command, command_object=None):
resp = self.cb.live_response_session_command_post(self.session_id, command, command_object)
# TODO -- handle errors
command_id = resp.get('id')
return self.cb.live_response_session_command_get(self.session_id, command_id, wait=True)
###########################################################################
def run(self):
# THIS THREAD IS FOR KEEP-ALIVE
self.__create_session()
self.ready_event.set()
while self.go:
self.cb.live_response_session_keep_alive(self.session_id)
for i in xrange(self.keep_alive_time):
# sleep for a second just to wait up to make sure we weren't
# told to stop
time.sleep(1.0)
if not self.go:
break
def stop(self, wait=True):
self.go = False
if wait:
self.join()
###########################################################################
def process_list(self):
"""
Returns list of dictionaries containing information about each running process.
"""
self.ready_event.wait()
return self.__post_and_wait("process list").get('processes', [])
def kill(self, pid):
"""
Kills pid on target
"""
self.ready_event.wait()
return self.__post_and_wait("kill", pid)
def get_file(self, filepath):
"""
Returns file data for <filepath> on sensor's host.
"""
self.ready_event.wait()
ret = self.__post_and_wait("get file", filepath)
fileid = ret["file_id"]
return self.cb.live_response_session_command_get_file(self.session_id, fileid)
def put_file(self, rfile, lfile):
"""
Uploads file data from <lfile> to <rfile> on sensor
"""
self.ready_event.wait()
fileid = {'file_id':self.cb.live_response_session_command_put_file(self.session_id, lfile)}
return self.__post_and_wait("put file", [rfile, fileid])
def del_file(self, filepath):
"""
Deletes file from target
"""
self.ready_event.wait()
return self.__post_and_wait("delete file", filepath)
def execute(self, procpath, wait_opt=None):
"""
Creates process on target
"""
self.ready_event.wait()
return self.__post_and_wait("create process", procpath)
def get_registry_value(self, registry_path):
"""
Retrieve the data from a registry value.
"""
self.ready_event.wait()
ret = self.__post_and_wait("reg query value", registry_path)
return ret.get('value', None)
```
|
{
"source": "jessecarvalho/BBB-Game-Boninho-SImulator",
"score": 4
}
|
#### File: data/classes/Cast.py
```python
from random import randint
from .Profile import Person
from time import sleep
# Classe Cast deverá criar e gerenciar o elenco
class Cast:
# Método de inicialização da classe
def __init__(self):
# "vida" de cada participante setado incialmente em 0
self.life = 0
# Método para criação do elenco
def newShow(self, person):
print(f'Nome: {person.name}')
print(f'Genero: {person.gender}')
print(f'Traços de personalidade: {person.personalityList}')
print(f'Profissião: {person.profession}')
print(f'Habilidades: {person.hability}')
print(f'Emoji: {person.emoji}')
print(f'Seguidores: {person.followers}')
print(f'% de apoiadores: {person.support}')
print('-----------------------------')
def totalParticipants(self):
print("Quantos participantes irão participar desta edição?")
numParticipantes = int(input("> "))
if 6 <= numParticipantes <= 22:
print("Número de participantes cadastrado.")
self.numParticipant = numParticipantes
else:
print("Por favor, o elenco terá de ter no minimo 6 e no máximo 22 participantes.")
print("--------------------------------------------------------------------------")
self.totalParticipants()
def newCastGenerator(self):
self.totalParticipants()
i = 0
self.castList = []
while i < self.numParticipant:
self.Pessoa = Person()
self.Pessoa.personGenerator()
self.newShow(self.Pessoa)
print("Para aprovar o candidato digite 's' ")
print("Para reprova-lo digite 'n'")
decision = input("> ")
if decision == "s" or decision =="S":
print('-----------------------------')
print("Candidato aprovado!")
print('-----------------------------')
self.castList.append(self.Pessoa)
i += 1
print(f"Faltam apenas {self.numParticipant - i} candidatos para fechar o elenco!")
else:
print('-----------------------------')
print("Candidato Reprovado!")
print('-----------------------------')
return self.castList
def castGenerator(self):
self.totalParticipants()
# Contador setado em 0
i = 0
# Lista vazia criada para armazenar o elenco
self.castList = []
# Gere o elenco e adiciona na lista
while i < self.numParticipant:
# Instância a classe Person
self.Pessoa = Person()
# Gera uma pessoa a partir da classe instanciada
self.Pessoa.personGenerator()
# Adiciona na lista
self.castList.append(self.Pessoa)
# Contador incrementado
i += 1
# Após fim de while retornar o elenco
return self.castList
# Método para exibir o elenco
def show(self):
# Contador setado em 1
self.count = 1
# Exibe ao player todas as infos sobre cada participante
for i in self.castList:
print(f'{self.count}º participante')
print(f'Nome: {i.name}')
print(f'Genero: {i.gender}')
print(f'Traços de personalidade: {i.personalityList}')
print(f'Profissião: {i.profession}')
print(f'Habilidades: {i.hability}')
print(f'Emoji: {i.emoji}')
print(f'Seguidores: {i.followers}')
print(f'% de apoiadores: {i.support}')
print('-----------------------------')
self.count += 1
print("Pressione ENTER para passar para o próximo")
input("> ")
```
#### File: data/classes/Events.py
```python
import random
class Events:
def __init__(self):
self.events = open("csvFiles/eventos.csv", encoding="UTF-8")
self.listEvents = []
def readingFiles(self, file, list):
# Separar cada registro dos arquivos a partir da quebra de linha
for record in file.splitlines():
# Adicionar linha a linha numa lista
list.append(record)
def eventsAct(self, personList):
i = 0
self.readingFiles(self.events.read(), self.listEvents)
while i < random.randint(1, 5):
person = random.choice(personList)
event = random.choice(self.listEvents)
point = int(event.split("=")[1]) * random.uniform(1.1, 2.8) / random.uniform(1.1, 2.8)
event = event.split("=")[0]
gender = "ele" if person.gender == "Masculino" else "ela"
lastSupport = int(person.support)
person.support = int(person.support + point)
print('________________________________________________________')
print(f"{person.name} {event}")
print(f"Por conta disso {gender} ganhou popularidade saindo de {lastSupport}% e indo para {person.support}%") \
if point > 0 else print(f"Por conta disso ele(a) perdeu:\
popularidade e viu seu apoio cair de {lastSupport}% para {person.support}%")
i += 1
return
```
#### File: data/csvFiles/debug.py
```python
from random import randint
def sorteio():
sorteado = randint(0,15)
return sorteado
def a():
i = 0
lista = []
while i < 3:
newSorteado = sorteio()
print(newSorteado)
if newSorteado in lista:
pass
else:
lista.append(newSorteado)
i += 1
print(lista)
a()
```
#### File: BBB-Game-Boninho-SImulator/data/game.py
```python
from classes import Cast
from classes import Elimination
from classes import Events
from classes import Final
from actions import Prove
from time import sleep
from random import randint
# Criação da classe principal do jogo
class Game():
# Inicializa o objeto
def __init__(self):
# Instância a classe Cast para criação do elenco
self.Cast = Cast()
self.leaderVote = False
self.othersVote = False
self.thirdVote = False
self.immunezed = False
# Inicializa o game com um inicio padronizado
def start(self):
# Menu de navegação do player
while True:
print("-- Escolha uma opção --")
print("-- 1 -> Gerar elenco aleatóriamente --")
print("-- 2 -> Escolher participantes --")
decision = int(input("> "))
if (decision == 1):
# Chama a função que cria o elenco
self.cast = self.createcast(True)
break
elif (decision == 2):
self.cast = self.createcast(False)
break
else:
print("Digito invalido")
while True:
# Menu de navegação do player
print("-- Elenco criado com sucesso! --")
print("-- Escolha uma opção --")
print("-- 1 -> Gerar outro elenco aleatóriamente --")
print("-- 2 -> Ver elenco --")
print("-- 3 -> Avançar para primeira prova do lider --")
decision = int(input("> "))
if (decision == 1):
# Recria o elenco caso usuario deseje
self.createcast(True)
elif (decision == 2):
# Chama-se a função para exibir os participantes
self.showcast()
elif (decision == 3):
break
else:
print("Digito invalido")
def createcast(self, random):
# Cria-se o elenco
if random == True:
cast = self.Cast.castGenerator()
else:
cast = self.Cast.newCastGenerator()
return cast
def showcast(self):
# Exibe os participantes um a um ao player
self.Cast.show()
# Núcleo principal do jogo
def main(self):
# Chama a função start que traz o roteiro inicial
self.start()
print(" Bem vindo, vamos começar o jogo! ")
# Roteiro do jogo até a grande final
while len(self.cast) > 3:
self.immunezed = False
self.leaderVote = False
self.othersVote = False
self.thirdVote = False
# cria a instância da prova do lider do modelo provas
self.leaderProve = Prove(self.cast)
print("________________________________________________________")
# executa a prova do lider armazenando o lider
self.leader = self.leaderProve.leader()
# cria a instância da prova do anjo do modelo provas
sleep(3)
self.angelProve = Prove(self.cast)
# executa a prova do anjo armazenando o anjo
self.angel = self.angelProve.angel()
# Cria a instância de eliminações
self.elimination = Elimination(self.cast, self.leader, self.angel)
# Esse método irá trazer o imunizado
if len(self.cast) > 4:
while self.immunezed == False:
self.immunezed = self.elimination.immunezed()
# A seguir executa-se métodos que iram gerar os votos e emparedados
while self.leaderVote == False:
self.leaderVote = self.elimination.leadervote()
while self.othersVote == False:
self.othersVote = self.elimination.othersvote()
while self.thirdVote == False:
self.thirdVote = self.elimination.thirdperson()
# Exibição ao player dos emparedados e imunizado
print("----------------------------------------------------------")
print(f'Vamos montar o paredão dessa semana!')
print("----------------------------------------------------------")
sleep(3)
print(f"O anjo imunizou {self.immunezed.name}") if self.immunezed is not False else ""
sleep(1)
print(f"O voto do lider foi em: {self.leaderVote.name}")
sleep(1)
print(f"O grupo no confessionario votou em: {self.othersVote.name}")
sleep(1)
print(f"O terceiro a ir ao paredão é: {self.thirdVote.name}")
sleep(1)
print("----------------------------------------------------------")
# Intervenção divina?
greatWall = [self.leaderVote, self.othersVote, self.thirdVote]
self.elimination.intervention(greatWall)
# Método para eliminação
print(f"Momento de tensão no sofá")
sleep(1)
print(
f"Batimentos cardiacos: {self.leaderVote.name}: {randint(80, 140)}, {self.othersVote.name}: {randint(80, 140)}, {self.thirdVote.name}: {randint(80, 140)}")
sleep(2)
self.elimination.toeliminate()
sleep(2)
print("O tempo continua e vamos para mais uma semana")
sleep(1)
print("...")
print("Listamos para você os principais acontecimentos da semana: ")
event = Events()
event.eventsAct(self.cast)
sleep(3)
print("Na casa restam apenas mais 3.")
print("Isso significa que chegamos na grande final!")
finalists = []
for i in self.cast:
finalists.append(i)
final = Final(self.cast)
final.theFinal()
print("Obrigado por jogar nosso joguinho")
print("Desenvolvido por BlackHAts")
if __name__ == '__main__':
game = Game()
game.main()
```
|
{
"source": "jessecarvalho/Exercicios_python",
"score": 4
}
|
#### File: jessecarvalho/Exercicios_python/joKenPo.py
```python
from random import randint
def shot():
print('Suas opções: ')
print('[0] Pedra')
print('[1] Papel')
print('[2] tesoura')
print('Qual a sua jogada?')
move = int(input('>'))
movePc = randint(0,2)
plays = [move, movePc]
return plays
def tester(move):
if (move == 0 or move == 1 or move == 2):
return True;
else:
print("Jogada invalida, tente novamente")
shot();
def play(move, movePc):
if move == 0:
if movePc == 0:
print('Você jogou pedra')
print('O computador jogou pedra')
print('Deu empate!')
if movePc == 1:
print('Você jogou pedra')
print('O computador jogou papel')
print('Você perdeu!')
if movePc == 2:
print('Você jogou pedra')
print('O computador jogou tesoura')
print('Você ganhou!')
elif move == 1:
if movePc == 0:
print('Você jogou papel')
print('O computador jogou pedra')
print('Você ganhou!')
if movePc == 1:
print('Você jogou papel')
print('O computador jogou papel')
print('Deu empate!')
if movePc == 2:
print('Você jogou papel')
print('O computador jogou tesoura')
print('Você perdeu!')
elif move == 2:
if movePc == 0:
print('Você jogou tesoura')
print('O computador jogou pedra')
print('Você perdeu!')
if movePc == 1:
print('Você jogou tesoura')
print('O computador jogou papel')
print('Você ganhou!')
if movePc == 2:
print('Você jogou tesoura')
print('O computador jogou tesoura')
print('Deu empate!')
if __name__ == '__main__':
plays = shot()
tester = tester(plays[0])
play = play(plays[0], plays[1]);
```
|
{
"source": "jessecha/OPCAS",
"score": 2
}
|
#### File: drive_ros_msgs/msg/_ObstacleArray.py
```python
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import drive_ros_msgs.msg
import std_msgs.msg
class ObstacleArray(genpy.Message):
_md5sum = "1bedd6a57408423b561d03b3446fbdd8"
_type = "drive_ros_msgs/ObstacleArray"
_has_header = False #flag to mark the presence of a Header object
_full_text = """drive_ros_msgs/Obstacle[] obstacles
================================================================================
MSG: drive_ros_msgs/Obstacle
Header header
# possible obstacle types
uint8 TYPE_GENERIC = 0
uint8 TYPE_CAMERA = 1
uint8 TYPE_LIDAR = 2
uint8 obstacle_type
geometry_msgs/Polygon polygon
float32 width
float32 trust
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Polygon
#A specification of a polygon where the first and last points are assumed to be connected
Point32[] points
================================================================================
MSG: geometry_msgs/Point32
# This contains the position of a point in free space(with 32 bits of precision).
# It is recommeded to use Point wherever possible instead of Point32.
#
# This recommendation is to promote interoperability.
#
# This message is designed to take up less space when sending
# lots of points at once, as in the case of a PointCloud.
float32 x
float32 y
float32 z"""
__slots__ = ['obstacles']
_slot_types = ['drive_ros_msgs/Obstacle[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
obstacles
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ObstacleArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.obstacles is None:
self.obstacles = []
else:
self.obstacles = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.obstacles)
buff.write(_struct_I.pack(length))
for val1 in self.obstacles:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(val1.obstacle_type))
_v3 = val1.polygon
length = len(_v3.points)
buff.write(_struct_I.pack(length))
for val3 in _v3.points:
_x = val3
buff.write(_get_struct_3f().pack(_x.x, _x.y, _x.z))
_x = val1
buff.write(_get_struct_2f().pack(_x.width, _x.trust))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.obstacles is None:
self.obstacles = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.obstacles = []
for i in range(0, length):
val1 = drive_ros_msgs.msg.Obstacle()
_v4 = val1.header
start = end
end += 4
(_v4.seq,) = _get_struct_I().unpack(str[start:end])
_v5 = _v4.stamp
_x = _v5
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v4.frame_id = str[start:end].decode('utf-8')
else:
_v4.frame_id = str[start:end]
start = end
end += 1
(val1.obstacle_type,) = _get_struct_B().unpack(str[start:end])
_v6 = val1.polygon
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v6.points = []
for i in range(0, length):
val3 = geometry_msgs.msg.Point32()
_x = val3
start = end
end += 12
(_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])
_v6.points.append(val3)
_x = val1
start = end
end += 8
(_x.width, _x.trust,) = _get_struct_2f().unpack(str[start:end])
self.obstacles.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.obstacles)
buff.write(_struct_I.pack(length))
for val1 in self.obstacles:
_v7 = val1.header
buff.write(_get_struct_I().pack(_v7.seq))
_v8 = _v7.stamp
_x = _v8
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v7.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(val1.obstacle_type))
_v9 = val1.polygon
length = len(_v9.points)
buff.write(_struct_I.pack(length))
for val3 in _v9.points:
_x = val3
buff.write(_get_struct_3f().pack(_x.x, _x.y, _x.z))
_x = val1
buff.write(_get_struct_2f().pack(_x.width, _x.trust))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.obstacles is None:
self.obstacles = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.obstacles = []
for i in range(0, length):
val1 = drive_ros_msgs.msg.Obstacle()
_v10 = val1.header
start = end
end += 4
(_v10.seq,) = _get_struct_I().unpack(str[start:end])
_v11 = _v10.stamp
_x = _v11
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v10.frame_id = str[start:end].decode('utf-8')
else:
_v10.frame_id = str[start:end]
start = end
end += 1
(val1.obstacle_type,) = _get_struct_B().unpack(str[start:end])
_v12 = val1.polygon
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v12.points = []
for i in range(0, length):
val3 = geometry_msgs.msg.Point32()
_x = val3
start = end
end += 12
(_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])
_v12.points.append(val3)
_x = val1
start = end
end += 8
(_x.width, _x.trust,) = _get_struct_2f().unpack(str[start:end])
self.obstacles.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2f = None
def _get_struct_2f():
global _struct_2f
if _struct_2f is None:
_struct_2f = struct.Struct("<2f")
return _struct_2f
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3f = None
def _get_struct_3f():
global _struct_3f
if _struct_3f is None:
_struct_3f = struct.Struct("<3f")
return _struct_3f
```
#### File: src/bag_converters/bag_converter.py
```python
PKG = 'bag_converter'
import roslib; roslib.load_manifest(PKG)
import rosbag
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Int32, String
# Reading bag filename from command line or roslaunch parameter.
import os
import sys
from PyQt4 import QtGui
import subprocess
from optparse import OptionParser
from datetime import datetime
import csv
import std_msgs.msg
import string
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# Define method to fit polynomial to binary image with lines extracted, using sliding window
def sliding_window_polyfit(img):
# Take a histogram of the bottom half of the image
histogram = np.sum(img, axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
# Previously the left/right base was the max of the left/right half of the histogram
# this changes it so that only a quarter of the histogram (directly to the left/right) is considered
leftx_base = np.argmax(histogram[:])
rightx_base = np.argmax(histogram[:])
# leftx_base = np.argmax(histogram[:midpoint])
# rightx_base = np.argmax(histogram[midpoint:])
# print('base pts:', leftx_base, rightx_base)
# Choose the number of sliding windows
nwindows = 10
# Set height of windows
window_height = np.int(img.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 600
# Set minimum number of pixels found to recenter window
minpix = 40
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Rectangle data for visualization
rectangle_data = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
rectangle_data.append((win_y_low, win_y_high, win_xleft_low, win_xleft_high, win_xright_low, win_xright_high))
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit, right_fit = (None, None)
# Fit a second order polynomial to each
if len(leftx) != 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit = np.polyfit(righty, rightx, 2)
visualization_data = (rectangle_data, histogram)
return left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data
print('...')
class ImageCreator():
# Must have __init__(self) function for a class, similar to a C++ class constructor.
def __init__(self):
# Get parameters when starting node from a launch file.
if len(sys.argv) < 1:
pathtwo = "/home/nvidia"
path = "/home/nvidia"
save_dir = os.path.join(pathtwo)
filename = "test.bag"
rospy.loginfo("Bag filename is = %s", filename)
# Get parameters as arguments to 'rosrun my_package bag_to_images.py <save_dir> #', where save_dir and filename exist relative to this executable file.
else:
pathtwo = "/home/nvidia"
path = "/home/nvidia/Desktop/imagefiles"
save_dir = os.path.join(pathtwo)
print (save_dir)
filename = "test.bag"
rospy.loginfo("Bag filename = %s", filename)
# Use a CvBridge to convert ROS images to OpenCV images so they can be saved.
self.bridge = CvBridge()
none = 1
ntwo = 1
# Open bag file.
pathtwo = "/home/nvidia"
os.chdir(pathtwo)
retval = os.getcwd()
print "Current working directory %s" % retval
with rosbag.Bag(filename, 'r') as bag:
for topic, msg, time in bag.read_messages():
if (cmp(topic, "image_topic") == 0):
try:
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
cv_image = cv_image[:,:,:]
height, width, channels = cv_image.shape
cv_image = cv2.resize(cv_image,(640, 512), interpolation = cv2.INTER_AREA)
except CvBridgeError, e:
print e
timestr = str(datetime.fromtimestamp(time.to_time()).strftime('%Y/%m/%d/%H:%M:%S.%f'))
secstr = str(int(msg.header.stamp.to_sec()))
nsecstr = str(msg.header.stamp.to_sec()-int(msg.header.stamp.to_sec()))[2:]
image_name = str("time@" + timestr[11:23])+'.png'
os.chdir(pathtwo)
cv2.imwrite(os.path.join(os.path.expanduser('~'),'Desktop','imagefiles','image', image_name), cv_image)
with open('cameraimages.csv', 'a') as csvfile:
fieldnames = ['time', '.header.stamp.secs', '.header.stamp.nsecs', 'file_location', 'position']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if str(ntwo) == "1":
writer.writeheader()
writer.writerow({'time': timestr + "\n", '.header.stamp.secs': secstr+"\n", '.header.stamp.nsecs': nsecstr+"\n", 'file_location' :'/home/nvidia/Desktop/imagefiles/image'+'/'+"time@" + timestr[11:23]+'.png'+ "\n"})
ntwo = ntwo + 1;
# Main function.
if __name__ == '__main__':
# Initialize the node and name it.
rospy.init_node(PKG)
# Go to class functions that do all the heavy lifting. Do error checking.
try:
image_creator = ImageCreator()
except rospy.ROSInterruptException: pass
```
#### File: CNN_Model/data_utils/data_processor.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import time
import os
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.externals import joblib
import matplotlib.pyplot as plt
def load_x_dataset(n_stacked, path, n_jump=None, w=157, h=157, d=3):
print("image loading...")
if n_jump is None:
n_jump = n_stacked
assert n_jump <= n_stacked
x, fnames = [], []
for i in os.walk(path):
_, _, files_ = i
fnames.extend(files_)
fnames = sorted(fnames, key=(lambda fname: int(os.path.splitext(fname)[0])))
img_stack = []
print("image processing (resizing, cropping)")
for i, fname in tqdm(enumerate(fnames), total=len(fnames), leave=False):
img = cv2.imread(os.path.join(path, fname))
img = img[200:500, 70:570]
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
if i == 100:
cv2.imshow(str(img.shape), img)
cv2.waitKey(3000)
cv2.destroyAllWindows()
img_stack.append(img.astype(np.float32))
if len(img_stack) > n_stacked:
img_stack = img_stack[-n_stacked:]
if (i+1) >= n_stacked and (i+1 - n_stacked) % n_jump == 0:
x.append(np.stack(img_stack))
return np.stack(x) # train_x, test_x
def load_y_dataset(n_stacked, path, n_jump=None):
if n_jump is None:
n_jump = n_stacked
attrs = ['steering', 'throttle']
raw_data = pd.read_csv(path, header=0, encoding='utf-8')
y = raw_data[attrs].values
y = y[n_stacked-1:: n_jump] # start:end:n_steps
return y #train_y.values, test_y.values
def load_dataset(n_stacked, img_path, out_path, w=157, h=157, d=3,
val_size=None, test_size=None, n_jump=None):
assert test_size is None or (test_size >= 0.0 and test_size <= 1.0)
assert val_size is None or (val_size >= 0.0 and val_size <= 1.0)
x = load_x_dataset(n_stacked, img_path, n_jump=n_jump, h=h, w=w, d=d)
y = load_y_dataset(n_stacked, out_path, n_jump=n_jump)
assert len(x) == len(y), "xlen{}, ylen{}".format(len(x), len(y))
train_x, train_y = x, y
val_x, val_y = test_x, test_y = None, None
if test_size is not None:
train_x, test_x, train_y, test_y = train_test_split(
x, y, test_size=test_size,
random_state=123, shuffle=True
)
if val_size is not None:
train_x, val_x, train_y, val_y = train_test_split(
train_x, train_y, test_size=val_size,
random_state=123, shuffle=True
)
return train_x, val_x, test_x, train_y, val_y, test_y
def load_shuffled_dataset(n_stacked, img_path, out_path, w=157, h=157, d=3,
val_size=None, test_size=None, n_jump=None):
assert test_size is None or (test_size >= 0.0 and test_size <= 1.0)
assert val_size is None or (val_size >= 0.0 and val_size <= 1.0)
x = load_x_dataset(n_stacked, img_path, n_jump=n_jump, h=h, w=w, d=d)
y = load_y_dataset(n_stacked, out_path, n_jump=n_jump)
assert len(x) == len(y), "xlen{}, ylen{}".format(len(x), len(y))
train_x, train_y = x, y
val_x, val_y = test_x, test_y = None, None
if test_size is not None:
train_x, test_x, train_y, test_y = train_test_split(
x, y, test_size=test_size,
random_state=123, shuffle=True
)
if val_size is not None:
train_x, val_x, train_y, val_y = train_test_split(
train_x, train_y, test_size=val_size,
random_state=123, shuffle=True
)
return train_x, val_x, test_x, train_y, val_y, test_y
```
#### File: OPCAS/CNN_Model/FinalDeployCode.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from random import shuffle
from std_msgs.msg import UInt16
from std_msgs.msg import String
from sensor_msgs.msg import Joy
from optparse import OptionParser
from datetime import datetime
from PIL import Image
from sensor_msgs.msg import Image as ImageMsg
from cv_bridge import CvBridge, CvBridgeError
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.externals import joblib
from keras.models import Sequential
from keras.layers import Flatten, Conv2D, Activation, Dense, Dropout, MaxPooling3D, Conv3D
from keras import optimizers
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.layers.noise import AlphaDropout
from keras import callbacks
from keras.backend.tensorflow_backend import set_session
import serial
import matplotlib.pyplot as plt
import glob
import roslib.message
import os
import csv
import sys
import time
import subprocess
import csv
import math
import rospy
import numpy as np
import cv2
import pickle
import pandas as pd
import argparse
import tensorflow as tf
from tensorflow.python.client import device_lib
from tensorflow.python.util import nest
from model.models import build_3d_cnn
config = tf.ConfigProto(allow_soft_placement=True, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
os.environ["CUDA_VISIBLE_DEVICES"] = ""
global bridge
bridge = CvBridge()
def deploy_dataset():
x = []
img_stack = []
img = []
length_of_stacked_images = 2
width_of_downsize = 100
height_of_downsize =100
for i in range (0, length_of_stacked_images):
cam.get_image(img, timeout=20000)
cv2_img = img.get_image_data_numpy()
img.append(cv2_img)
img[i] = cv2.resize(img[i],(640, 512), interpolation = cv2.INTER_AREA)
img[i] = img[i][210:500, 70:570]
img[i] = cv2.resize(img[i], (width_of_downsize, height_of_downsize), interpolation=cv2.INTER_CUBIC)
img_stack.append(img[i].astype(np.float32))
x.append(np.stack(img_stack))
return np.stack(x)
def main():
rospy.init_node('image_to_neural_net')
increment_val = 0
cam = xiapi.Camera(dev_id=0)
#cam.set_debug_level("XI_DL_TRACE")
cam.open_device()
cam.set_downsampling_type('XI_SKIPPING')
cam.set_downsampling("XI_DWN_2x2")
cam.set_exposure(int(20000))
cam.set_gain(float(5))
cam.set_limit_bandwidth(2500)
cam.set_imgdataformat('XI_RGB24')
#create instance of Image to store image data and metadata
img = xiapi.Image()
print('Starting data acquisition...')
cam.start_acquisition()
arduino = serial.Serial('/dev/ttyACM0', 115200, timeout=.1)
time.sleep(1)
with tf.device('/gpu:0'):
model = build_3d_cnn(width_of_downsize, height_of_downsize, 3, length_of_stacked_images)
saved_file_name = './3DCNN.hdf5'
model.load_weights(saved_file_name)
throttler = 0
while True:
deploy_x = deploy_dataset()
model_y = model.predict(deploy_x, batch_size=1, verbose=0)
attrs = ['steering', 'throttle']
steering = float(model_y[0][0])
throttle = float(model_y[0][1])
#throttleposition = (1489 + 100*(throttle))
if throttler == 1:
throttleposition = 1500
throttler = 0
if throttler == 0:
throttleposition = 1489
throttler = throttler + 1
steeringangle = (109 + 100*((steering) + 0.25))
if steeringangle < 83:
steeringangle = 83
if steeringangle >127:
steeringangle = 127
arduino.write(str(throttleposition) + str(steeringangle))
print(steeringangle)
print(throttleposition)
if __name__ == '__main__':
main()
```
#### File: CNN_Model/model_test_utils/metrics.py
```python
import numpy as np
def mean_absolute_relative_error(y_true, y_pred):
assert y_true.shape == y_pred.shape
return np.mean(np.abs((y_true - y_pred) / y_true))
def coefficient_of_determination(y_true, y_pred):
assert y_true.shape == y_pred.shape
u = ((y_true - y_pred) ** 2).sum()
v = ((y_true - np.mean(y_true)) ** 2).sum()
R2_val = 1-u/v
return R2_val
```
#### File: OPCAS/CNN_Model/salient_features_3d.py
```python
from keras.layers import Input, Dense, merge
from keras.layers.convolutional import Conv3DTranspose
from keras.models import Model
from keras.models import Sequential
from keras.layers import Convolution3D, Conv3D, MaxPooling3D, Reshape, BatchNormalization, Lambda
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers.advanced_activations import ELU
from keras import regularizers
from keras import optimizers
from keras import backend as K
import tensorflow as tf
import numpy as np
import cv2
import os, os.path
from matplotlib import pyplot as plt
from matplotlib import animation
from IPython.display import display, HTML
from glob import iglob
from model.models import build_3d_cnn
import matplotlib.animation as animation
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(allow_soft_placement=True, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
global n_stacked
n_stacked = 2
model = build_3d_cnn(w=104, h=104, d=3, s=n_stacked)
model.load_weights('3DCNN.hdf5')
img_in = Input(shape=(n_stacked, 104, 104, 3), name='img_in')
h = 104
w = 104
d = 3
s = n_stacked
x = img_in
x = Conv3D(24, (5,5,5), strides=(1,2,2), activation='elu', name='conv1', border_mode='same', data_format='channels_last', input_shape=(s, h, w, d))(x)
x = Conv3D(32, (5,5,5), strides=(1,2,2), activation='elu', name='conv2', border_mode='same', data_format='channels_last', input_shape=(s, h, w, d))(x)
x = Conv3D(64, (5,5,5), strides=(1,2,2), activation='elu', name='conv3', border_mode='same', data_format='channels_last', input_shape=(s, h, w, d))(x)
x = Conv3D(64, (3,3,3), strides=(1,1,1), activation='elu', name='conv4', border_mode='same', data_format='channels_last', input_shape=(s, h, w, d))(x)
conv_5 = Conv3D(64, (3,3,3), strides=(1,1,1), activation='elu', name='conv5', border_mode='same', data_format='channels_last', input_shape=(s, h, w, d))(x)
convolution_part = Model(inputs=[img_in], outputs=[conv_5])
for layer_num in ('1', '2', '3', '4', '5'):
convolution_part.get_layer('conv' + layer_num).set_weights(model.get_layer('conv' + layer_num).get_weights())
inp = convolution_part.input # input placeholder
outputs = [layer.output for layer in convolution_part.layers] # all layer outputs
functor = K.function([inp], outputs)
kernel_3x3x3 = tf.constant(np.array(
[
[
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]]
],
[
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]]
],
[
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]]]
]
]
), tf.float32)
kernel_5x5x5 = tf.constant(np.array(
[
[
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
],
[
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
],
[
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
],
[
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
],
[
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]],
[[[1]], [[1]], [[1]], [[1]], [[1]]]
]
]
), tf.float32)
layers_kernels = {5: kernel_3x3x3, 4: kernel_3x3x3, 3: kernel_5x5x5, 2: kernel_5x5x5, 1: kernel_5x5x5}
layers_strides = {5: [1, 1, 1, 1, 1], 4: [1, 1, 1, 1, 1], 3: [1, 1, 2, 2, 1], 2: [1, 1, 2, 2, 1], 1: [1, 1, 2, 2, 1]}
def compute_visualisation_mask(img):
activations = functor([np.array([img])])
upscaled_activation = np.ones((2,13,13))
for layer in [5, 4, 3, 2, 1]:
the_layers = np.mean(activations[layer], axis=4).squeeze(axis=0)
averaged_activation = the_layers * upscaled_activation
outputs_shape = (activations[layer - 1].shape[1], activations[layer - 1].shape[2], activations[layer - 1].shape[3])
x = np.reshape(averaged_activation, (1, averaged_activation.shape[0],averaged_activation.shape[1],averaged_activation.shape[2],1))
modeltwo = Sequential()
if layer == 5:
modeltwo.add(Conv3DTranspose(filters=1, kernel_size=(3,3,3), strides=(1,1,1),
input_shape=(2, 13, 13, 1), data_format='channels_last',
padding='same'))
if layer == 4:
modeltwo.add(Conv3DTranspose(filters=1, kernel_size=(3,3,3), strides=(1,1,1),
input_shape=(2, 13, 13, 1), data_format='channels_last',
padding='same'))
if layer == 3:
modeltwo.add(Conv3DTranspose(filters=1, kernel_size=(5,5,5), strides=(1,2,2),
input_shape=(2, 13, 13, 1), data_format='channels_last',
padding='same'))
if layer == 2:
modeltwo.add(Conv3DTranspose(filters=1, kernel_size=(5,5,5), strides=(1,2,2),
input_shape=(2, 26, 26, 1), data_format='channels_last',
padding='same'))
if layer == 1:
modeltwo.add(Conv3DTranspose(filters=1, kernel_size=(5,5,5), strides=(1,2,2),
input_shape=(2, 52, 52, 1), data_format='channels_last',
padding='same'))
result = modeltwo.predict(x)
result = result.squeeze(axis=0)
upscaled_activation = np.reshape(result, outputs_shape)
final_visualisation_mask = upscaled_activation
return (final_visualisation_mask - np.min(final_visualisation_mask))/(np.max(final_visualisation_mask) - np.min(final_visualisation_mask))
def plot_movie_mp4(image_array):
dpi = 70.0
xpixels, ypixels = image_array[0].shape[0], image_array[0].shape[1]
fig = plt.figure(figsize=(ypixels/dpi, xpixels/dpi), dpi=dpi)
im = plt.figimage(image_array[0])
def animate(i):
im.set_array(image_array[i])
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(HTML(anim.to_html5_video()))
anim.save('/home/jesse/Desktop/animation.mp4', writer='imagemagick', fps=40)
imgs = []
alpha = 0.005
beta = 1.0 - alpha
counter = 0
img_stack = []
z = []
number = 69800
numbertwo = 69800
target_image = []
path, dirs, files = os.walk('/home/jesse/Desktop/imagefiles/image_set/').next()
length = len(files)
quarterlength = (length - (length % n_stacked))/n_stacked
for a in range(quarterlength):
display_img_stack = []
for b in range(n_stacked):
img = cv2.imread('/home/jesse/Desktop/imagefiles/image_set/'+ str(number) + '.png')
img = img[210:500, 70:570]
img = cv2.resize(img, (104, 104), interpolation=cv2.INTER_CUBIC)
img_stack.append(img.astype(np.float32))
display_img_stack.append(img.astype(np.float32))
number = number + 1
counter += 1
if counter == 1:
cv2.imshow(str(img.shape), img)
cv2.waitKey(1000)
cv2.destroyAllWindows()
if len(img_stack) > 1:
img_stack = img_stack[-n_stacked:]
z.append(np.stack(img_stack))
img = np.stack(img_stack)
salient_mask = compute_visualisation_mask(img)
for a in range(n_stacked):
temp_img = cv2.imread('/home/jesse/Desktop/imagefiles/image_set/'+ str(numbertwo) + '.png')
numbertwo = numbertwo + 1
temp_img = temp_img[210:500, 70:570]
temp_img = cv2.resize(temp_img, (104, 104), interpolation=cv2.INTER_AREA)
temp_img = cv2.cvtColor(temp_img, cv2.COLOR_BGR2RGB)
salient_masked_one = salient_mask[0,:,:]
salient_masked_two = salient_mask[1,:,:]
#salient_masked_three = salient_mask[2,:,:]
salient_mask_stacked = np.dstack((salient_masked_one,salient_masked_two))
salient_mask_stacked = np.dstack((salient_mask_stacked,salient_masked_two))
cv2.imshow(str(salient_mask_stacked.shape), salient_mask_stacked)
cv2.waitKey(100)
cv2.destroyAllWindows()
blend = cv2.addWeighted(temp_img.astype('float32'), alpha, salient_mask_stacked, beta, 0.0)
imgs.append(blend)
print(counter)
if counter >= 70800:
break
plot_movie_mp4(imgs)
```
|
{
"source": "Jesse-Clarkayx/yx_motor",
"score": 2
}
|
#### File: yx_motor/yx_motor/api.py
```python
__all__ = ['log_request', 'API', 'logger', 'default_headers']
# Cell
from types import SimpleNamespace
import functools
import requests
# TODO: Break this out into import
logger = SimpleNamespace(debug=print)
default_headers = {
"Content-Type": "application/json",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
}
# TODO: Wrap this one layer deeper and allow for
# passing in a custom logging function as a parameterized
# decorator.
def log_request(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
# TODO: Add error handling and input validation for this
request_method = response.request.method
request_url = response.url
request_headers = response.request.headers
if logger:
logger.debug(
f"{request_method} sent to: {request_url}\n"
f"with headers: {request_headers}\n"
f"Response Status: {response.status_code}"
)
return response
return wrapper
class API(object):
def __init__(self, api_url, headers=None):
self.api_url = api_url
headers = headers or default_headers
self.headers = headers.copy()
self.is_authenticated = False
self.jar = requests.cookies.RequestsCookieJar()
self.delete = log_request(self._delete)
self.get = log_request(self._get)
self.post = log_request(self._post)
self.put = log_request(self._put)
def get_path(self, url):
api_url = self.api_url if not self.api_url.endswith("/") else self.api_url[:-1]
template = "{}{}" if url.startswith("/") else "{}/{}"
return template.format(api_url, url)
def _post(
self,
url,
json=None,
params=None,
files=None,
data=None,
cookies=None,
verify=False,
non_default_headers=None,
):
if not cookies:
cookies = self.jar
path = self.get_path(url)
if non_default_headers:
headers = non_default_headers
else:
headers = self.headers
response = requests.post(
path,
json=json,
params=params,
headers=headers,
files=files,
data=data,
cookies=cookies,
verify=verify,
)
return response
def _put(self, url, json=None, params=None):
path = self.get_path(url)
logger.debug(
"PUT request sent to: {} \n\theaders: {}\n\tjson: {}\n\tparams: {}".format(
path, self.headers, json, params
)
)
response = requests.put(path, json=json, params=params, headers=self.headers)
return response
def _get(
self,
url,
json=None,
params=None,
files=None,
data=None,
cookies=None,
verify=False,
):
if not cookies:
cookies = self.jar
path = self.get_path(url)
response = requests.get(
path,
params=params,
headers=self.headers,
json=json,
files=files,
data=data,
cookies=cookies,
verify=verify,
)
return response
def _delete(self, url, json=None, params=None):
path = self.get_path(url)
response = requests.delete(path, params=params, headers=self.headers, json=json)
return response
```
#### File: yx_motor/yx_motor/authenticate.py
```python
__all__ = ['Authenticate']
# Cell
import requests
from .api import API
class Authenticate:
"Class for handling authenticate API actions"
def __init__(self, api: API):
self.api = api
self.base_endpoint = "authenticate/"
def authenticate(self, login_email: str, login_pwd: str) -> requests.Response:
payload = {"email": login_email, "password": <PASSWORD>}
response = self.api.post(url=self.base_endpoint, json=payload)
if response.status_code == 200:
self.api.jar.update(response.cookies)
self.api.is_authenticated = True
return response
def logout(self):
logout_endpoint = f"{self.base_endpoint}logout"
response = self.api.post(url=logout_endpoint)
if response.status_code == 204:
self.api.jar.update(response.cookies)
self.api.is_authenticated = False
return response
```
|
{
"source": "JesseClarkND/AWS-Route53-Monitor",
"score": 2
}
|
#### File: JesseClarkND/AWS-Route53-Monitor/main.py
```python
import sys
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
import boto3
from botocore.exceptions import ClientError
import botocore.exceptions
import simplejson as json
import os.path
from os import path
import smtplib, ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
import datetime
#Based off the work here:
#https://www.cloudconformity.com/knowledge-base/aws/Route53/dangling-dns-records.html
#REMEMBER TO SET YOUR SMTP CREDS IN credentials-email.txt
PROFILE_NAMES = ["default"]
REGIONS = ["us-east-1", "us-east-2", "us-west-1", "us-west-2"]
RECORD_TYPES = ['A', 'AAAA', 'CNAME']
ENABLE_DIFFERENCE_ALERT = True
WORK_FOLDER = "./"
VERBOSE = True
SENDER_EMAIL = "<EMAIL>" # Enter your address
RECEIVER_EMAIL = ["<EMAIL>"] # Enter receiver addresses, its an array, put in many values
#Find all the route53 hosted zones that are assoicated with the current environment session
def list_hosted_zones(session):
route53 = session.client('route53')
response = route53.list_hosted_zones()
return response['HostedZones']
#Find the Recourse records
def list_resource_record_sets(session, hosted_zone_id):
route53 = session.client('route53')
response = route53.list_resource_record_sets(HostedZoneId=hosted_zone_id)
return response['ResourceRecordSets']
#Attempt to find IP address assoicated with an EC2 instance
def describe_address(session, eip, region):
try:
ec2 = session.client('ec2', region_name=region)
response = ec2.describe_addresses(PublicIps=[eip])
return response['Addresses']
except botocore.exceptions.ClientError as error:
#print(error)
if str(error).startswith('An error occurred (InvalidAddress.NotFound) when calling the DescribeAddresses operation: Address'):
return []
def log(message):
global VERBOSE
now = datetime.datetime.now()
message = now.strftime("%d/%m/%Y %H:%M:%S") + " " + message + "\n"
#Here you can set up a log file it you are pleased to do so, else we will just dump to screen
#logpath = now.strftime("C:/logs/route53-%d-%m-%Y")+'.txt'
#mode = 'a+' if os.path.exists(logpath) else 'w+'
#f = open(logpath,mode)
#f.write(message)
#f.close()
if VERBOSE:
print(message)
#Big ugly function to generate HTML report
def create_report(report_name, dangling_pointers, regions, types):
global WORK_FOLDER
f = open(WORK_FOLDER+report_name+'.html','w')
message = "<html>\n"
message += "<center><h2>Route 53 Dangling DNS Pointers</h2>\n"
message += "Regions Checked: " + str(regions) +"<br/>"
message += "Types Checked: " + str(types) +"<br/>"
message += "</center>"
message += "<table width='100%'>\n"
message += "<tr>\n"
message += " <th>Profile</th>\n"
message += " <th>Hosted Zone</th>\n"
message += " <th>EIP</th>\n"
message += " <th>Resource Name</th>\n"
message += " <th>Type</th>\n"
message += "</tr>\n"
for pointer_key in dangling_pointers:
for pointer in dangling_pointers[pointer_key].dangling_pointers:
message += "<tr>\n"
message += "<td style='text-align: center'>\n"
message += pointer.profile_name
message += "</td>\n"
message += "<td style='text-align: center'>\n"
message += "<a target='_blank' href='https://console.aws.amazon.com/route53/v2/hostedzones#ListRecordSets/"+pointer.environment_hosted_zone+"'>"+pointer.environment_hosted_zone+"</a>"
message += "</td>\n"
message += "<td style='text-align: center'>\n"
message += pointer.eip
message += "</td>\n"
message += "<td style='text-align: center'>\n"
message += "<a target-'_blank' href='https://"+pointer.resourse_name.strip('.')+"'>"+pointer.resourse_name+"</a>"
message += "</td>\n"
message += "<td style='text-align: center'>\n"
message += pointer.type
message += "</td>\n"
message += "</tr>\n"
message += "</table>\n"
message += "</html>\n"
f.write(message)
f.close()
#Save our environment JSON to a file so we have something to compare to in the future
def create_json(environment_results):
global WORK_FOLDER
for environment_name in environment_results:
f = open(WORK_FOLDER+environment_name+'.json','w')
f.write(environment_results[environment_name].toJSON())
f.close()
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
#print(url)
headers = {'User-Agent': 'Mozilla/5.0 (platform; rv:geckoversion) Gecko/geckotrail Firefox/firefoxversion'}
with closing(get(url, stream=True, headers=headers, allow_redirects=True, timeout=8)) as resp:
#print(str(resp.content))
return (resp.status_code == 200)
except RequestException as e:
#log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return ""
#Find differences between two lists, so we can note changes in our env
def list_diff(li1, li2):
li_dif = []
for i in li1:
boolFound = False
for x in li2:
if x.eip == i.eip and x.resourse_name == i.resourse_name and x.environment_hosted_zone == i.environment_hosted_zone:
boolFound = True
if boolFound == False:
li_dif.append(i)
return li_dif
class EnvironmentResults:
def __init__(self):
self.profile_name = ''
self.regions_tested = []
self.dangling_pointers = []
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class DanglingPointer:
def __init__(self):
self.profile_name = ''
self.environment_hosted_zone = ''
self.eip = ''
self.type = ''
self.resourse_name = ''
#self.regions_tested = []
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def send_email(sender_email, receiver_emails, msg):
global WORK_FOLDER
global SENDER_EMAIL
email_info = []
with open(WORK_FOLDER+'credentials-email.txt', 'r') as f:
email_info = json.load(f)
context = ssl.create_default_context()
with smtplib.SMTP(email_info['server'], email_info['port']) as server:
server.ehlo() # Can be omitted
server.starttls(context=context)
server.ehlo() # Can be omitted
server.login(email_info['username'], email_info['password'])
server.sendmail(SENDER_EMAIL, receiver_emails, msg.as_string())
server.quit()
def send_difference_email():
global WORK_FOLDER
global SENDER_EMAIL
global RECEIVER_EMAIL
msg = MIMEMultipart('alternative')
msg['Subject'] = "Route 53 Difference!"
msg['From'] = SENDER_EMAIL
msg['To'] = ", ".join(RECEIVER_EMAIL)
email_body = ''
with open(WORK_FOLDER+'report_difference.html', 'r') as f:
email_body = f.read()
# Create the body of the message (a plain-text and an HTML version).
text = "Hi!\nYou should enable HTML emails"
html = email_body
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
with open('report_difference.html', "rb") as fil:
part = MIMEApplication(
fil.read(),
Name='report_difference.html'
)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % 'report_difference.html'
msg.attach(part)
send_email(SENDER_EMAIL, RECEIVER_EMAIL, msg)
def main():
log("Starting Route53 Checker")
log(str(len(PROFILE_NAMES))+ " profiles loaded")
found_dangling_pointers = {}
environment_differences = {}
for name in PROFILE_NAMES:
temp_result = EnvironmentResults()
temp_result.profile_name = name
temp_result.regions_tested = REGIONS
found_dangling_pointers[name] = temp_result
temp_result2 = EnvironmentResults()
temp_result2.profile_name = name
temp_result2.regions_tested = REGIONS
environment_differences[name] = temp_result2
for profile_name in PROFILE_NAMES:
log("Testing Env: "+ profile_name)
session = boto3.Session(profile_name=profile_name)
environment_hosted_zones = list_hosted_zones(session)
for environment_hosted_zone in environment_hosted_zones:
eip_list = list_resource_record_sets(session, environment_hosted_zone['Id'].split('/')[2])
for eip in eip_list:
#print(eip)
if eip['Type'] in RECORD_TYPES: #The list_resource_record_sets record isn't supported, even tho its in the boto3 documentation :(
if 'ResourceRecords' in eip.keys():
if eip['Type'] == 'CNAME':
for resource_record in eip['ResourceRecords']:
log(resource_record['Value'])
log('-------------------')
#print(resource_record['Value'])
#look for it
boolFound = simple_get(resource_record['Value'].strip('.'))
if boolFound==False:
log("No record found!")
d_pointer = DanglingPointer()
d_pointer.profile_name = profile_name
d_pointer.environment_hosted_zone = environment_hosted_zone['Id'].split('/')[2]
d_pointer.type = eip['Type']
d_pointer.eip = resource_record['Value']
d_pointer.resourse_name = eip['Name']
found_dangling_pointers[profile_name].dangling_pointers.append(d_pointer)
else:
for resource_record in eip['ResourceRecords']:
log(resource_record['Value'])
log('-------------------')
boolFound = False
for region in REGIONS:
addresses = describe_address(session, resource_record['Value'], region)
if len(addresses) != 0:
boolFound = True
break
log(region + " : " + str(len(addresses)))
if boolFound==False:
log("No record found!")
d_pointer = DanglingPointer()
d_pointer.profile_name = profile_name
d_pointer.environment_hosted_zone = environment_hosted_zone['Id'].split('/')[2]
d_pointer.type = eip['Type']
d_pointer.eip = resource_record['Value']
d_pointer.resourse_name = eip['Name']
found_dangling_pointers[profile_name].dangling_pointers.append(d_pointer)
boolSendEmail = False
for profile_name in PROFILE_NAMES:
if path.exists(WORK_FOLDER+profile_name+'.json'):
# print('here')
with open(WORK_FOLDER+profile_name+'.json', 'r') as f:
environment_difference = EnvironmentResults()
environment_difference.regions_tested = REGIONS
environment_difference.profile_name = profile_name
old_environment_result = json.load(f)
if 'dangling_pointers' in old_environment_result:
temp_pointers_json = old_environment_result['dangling_pointers']
old_environment_result['dangling_pointers'] = []
for i in temp_pointers_json:
dangler = DanglingPointer()
dangler.profile_name = i['profile_name']
dangler.environment_hosted_zone = i['environment_hosted_zone']
dangler.eip = i['eip']
dangler.type = i['type']
dangler.resourse_name = i['resourse_name']
old_environment_result['dangling_pointers'].append(dangler)
print("Length of found: " + str(len(found_dangling_pointers[profile_name].dangling_pointers)))
print("Length of old: " + str(len(old_environment_result['dangling_pointers'])))
environment_difference.dangling_pointers = list_diff(found_dangling_pointers[profile_name].dangling_pointers, old_environment_result['dangling_pointers'])
if len(environment_difference.dangling_pointers) > 0:
boolSendEmail = True
environment_differences[profile_name] = environment_difference
log("Creating report")
create_report('report', found_dangling_pointers, REGIONS, RECORD_TYPES)
log("Creating difference report")
create_report('report_difference', environment_differences, REGIONS, RECORD_TYPES)
if boolSendEmail and ENABLE_DIFFERENCE_ALERT:
log("Sending email")
send_difference_email()
log("Creating json")
create_json(found_dangling_pointers)
log("Ending Route53 Checker")
if __name__ == "__main__":
main()
```
|
{
"source": "jesseclin/veriloggen",
"score": 2
}
|
#### File: examples/fifo_rtl/test_fifo_rtl.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import fifo_rtl
expected_verilog = """
module test;
reg CLK;
reg RST;
main
uut
(
.CLK(CLK),
.RST(RST)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut, CLK, RST);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
endmodule
module main
(
input CLK,
input RST
);
wire myfifo_enq;
wire [32-1:0] myfifo_wdata;
wire myfifo_full;
wire myfifo_almost_full;
wire myfifo_deq;
wire [32-1:0] myfifo_rdata;
wire myfifo_empty;
wire myfifo_almost_empty;
myfifo
inst_myfifo
(
.CLK(CLK),
.RST(RST),
.myfifo_enq(myfifo_enq),
.myfifo_wdata(myfifo_wdata),
.myfifo_full(myfifo_full),
.myfifo_almost_full(myfifo_almost_full),
.myfifo_deq(myfifo_deq),
.myfifo_rdata(myfifo_rdata),
.myfifo_empty(myfifo_empty),
.myfifo_almost_empty(myfifo_almost_empty)
);
reg [8-1:0] count_myfifo;
reg [32-1:0] count;
reg [32-1:0] sum;
reg [32-1:0] fsm;
localparam fsm_init = 0;
assign myfifo_wdata = (fsm == 1)? count : 'hx;
assign myfifo_enq = (fsm == 1)? (fsm == 1) && !myfifo_almost_full : 0;
localparam _tmp_0 = 1;
wire [_tmp_0-1:0] _tmp_1;
assign _tmp_1 = !myfifo_almost_full;
reg [_tmp_0-1:0] __tmp_1_1;
assign myfifo_deq = ((fsm == 3) && !myfifo_empty)? 1 : 0;
localparam _tmp_2 = 1;
wire [_tmp_2-1:0] _tmp_3;
assign _tmp_3 = (fsm == 3) && !myfifo_empty;
reg [_tmp_2-1:0] __tmp_3_1;
reg [32-1:0] _d1_fsm;
reg _fsm_cond_3_0_1;
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
localparam fsm_4 = 4;
localparam fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
_d1_fsm <= fsm_init;
count <= 0;
sum <= 0;
_fsm_cond_3_0_1 <= 0;
end else begin
_d1_fsm <= fsm;
case(_d1_fsm)
fsm_3: begin
if(_fsm_cond_3_0_1) begin
$display("sum=%d", sum);
end
end
endcase
case(fsm)
fsm_init: begin
count <= 0;
fsm <= fsm_1;
end
fsm_1: begin
if(!myfifo_almost_full) begin
count <= count + 1;
end
if(__tmp_1_1) begin
$display("count=%d space=%d has_space=%d", count_myfifo, (127 - count_myfifo), (count_myfifo + 1 < 127));
end
if(!myfifo_almost_full && (count == 125)) begin
fsm <= fsm_2;
end
end
fsm_2: begin
count <= 0;
fsm <= fsm_3;
end
fsm_3: begin
if(__tmp_3_1) begin
sum <= sum + myfifo_rdata;
count <= count + 1;
$write("count=%d space=%d has_space=%d ", count_myfifo, (127 - count_myfifo), (count_myfifo + 1 < 127));
end
_fsm_cond_3_0_1 <= __tmp_3_1;
if(count == 126) begin
fsm <= fsm_4;
end
end
fsm_4: begin
$display("expected_sum=%d", 7875);
fsm <= fsm_5;
end
endcase
end
end
always @(posedge CLK) begin
if(RST) begin
count_myfifo <= 0;
__tmp_1_1 <= 0;
__tmp_3_1 <= 0;
end else begin
if(myfifo_enq && !myfifo_full && (myfifo_deq && !myfifo_empty)) begin
count_myfifo <= count_myfifo;
end else if(myfifo_enq && !myfifo_full) begin
count_myfifo <= count_myfifo + 1;
end else if(myfifo_deq && !myfifo_empty) begin
count_myfifo <= count_myfifo - 1;
end
__tmp_1_1 <= _tmp_1;
__tmp_3_1 <= _tmp_3;
end
end
endmodule
module myfifo
(
input CLK,
input RST,
input myfifo_enq,
input [32-1:0] myfifo_wdata,
output myfifo_full,
output myfifo_almost_full,
input myfifo_deq,
output [32-1:0] myfifo_rdata,
output myfifo_empty,
output myfifo_almost_empty
);
reg [32-1:0] mem [0:128-1];
reg [7-1:0] head;
reg [7-1:0] tail;
wire is_empty;
wire is_almost_empty;
wire is_full;
wire is_almost_full;
assign is_empty = head == tail;
assign is_almost_empty = head == (tail + 1 & 127);
assign is_full = (head + 1 & 127) == tail;
assign is_almost_full = (head + 2 & 127) == tail;
reg [32-1:0] rdata_reg;
assign myfifo_full = is_full;
assign myfifo_almost_full = is_almost_full || is_full;
assign myfifo_empty = is_empty;
assign myfifo_almost_empty = is_almost_empty || is_empty;
assign myfifo_rdata = rdata_reg;
always @(posedge CLK) begin
if(RST) begin
head <= 0;
rdata_reg <= 0;
tail <= 0;
end else begin
if(myfifo_enq && !is_full) begin
mem[head] <= myfifo_wdata;
head <= head + 1;
end
if(myfifo_deq && !is_empty) begin
rdata_reg <= mem[tail];
tail <= tail + 1;
end
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = fifo_rtl.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: examples_obsolete/dataflow_stencil/dataflow_stencil.py
```python
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import math
from functools import reduce
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
from veriloggen import *
import veriloggen.dataflow as dataflow
import veriloggen.types.fixed as fixed
import veriloggen.types.ram as ram
from veriloggen.thread import RAM
def stencil(coe, data):
data = map(lambda x, y: x * y, data, coe)
rslt = reduce(lambda x, y: x + y, data)
return rslt
def mkStencilPipeline2D(coe=None, size=3, width=32, point=16):
# size-port stream inputs
iports = [dataflow.Variable('idata%d' % i, valid='ivalid%d' % i,
width=width, point=point, signed=True)
for i in range(size)]
if coe is None:
coe = [[dataflow.Constant(1.0 / (1.0 * size * size), point=point) for i in range(size)]
for j in range(size)]
# source data
data = [[d.prev(j) for j in range(size)] for d in iports]
# from 2D list to 1D list
data_list = []
coe_list = []
for d, c in zip(data, coe):
data_list.extend(d)
coe_list.extend(c)
# computation by calling standard method
rslt = stencil(coe_list, data_list)
rslt.output('odata', valid='ovalid')
df = dataflow.Dataflow(rslt)
m = df.to_module('stencil_pipeline_2d')
# try:
# df.draw_graph()
# except:
# print('Dataflow graph could not be generated.', file=sys.stderr)
return m
def mkStencil(n=16, size=3, datawidth=32, point=16, coe_test=False):
m = Module('stencil')
addrwidth = int(math.log(n, 2))
clk = m.Input('CLK')
rst = m.Input('RST')
start = m.Input('start')
busy = m.OutputReg('busy', initval=0)
done = m.TmpReg(initval=0)
# external RAM I/F
ext_src_rams = [ram.RAMSlaveInterface(m, 'ext_src_ram%d' % i,
datawidth=datawidth, addrwidth=addrwidth)
for i in range(size)]
ext_dst_ram = ram.RAMSlaveInterface(m, 'ext_dst_ram',
datawidth=datawidth, addrwidth=addrwidth)
# RAM
addrwidth = int(math.log(n, 2)) * 2
src_rams = [RAM(m, 'src_ram%d' % i, clk, rst,
datawidth=datawidth, addrwidth=addrwidth, numports=2)
for i in range(size)]
dst_ram = RAM(m, 'dst_ram', clk, rst,
datawidth=datawidth, addrwidth=addrwidth, numports=2)
# connect RAM I/Fs
for src_ram, ext_src_ram in zip(src_rams, ext_src_rams):
src_ram[1].connect(ext_src_ram)
dst_ram[1].connect(ext_dst_ram)
# read FSM
read_fsm = FSM(m, 'read_fsm', clk, rst)
read_count = m.Reg('read_count', 32, initval=0)
read_addr = m.Reg('read_addr', 32, initval=0)
read_fsm(
read_addr(0),
read_count(0),
busy(0)
)
read_fsm.If(start)(
busy(1)
)
read_fsm.Then().goto_next()
read_fsm(
read_addr.inc(),
read_count.inc()
)
idata = []
ivalid = []
for i, src_ram in enumerate(src_rams):
src_ram.disable_write(0)
rdata, rvalid = src_ram.read_rtl(read_addr, port=0, cond=read_fsm)
idata.append(rdata)
ivalid.append(rvalid)
read_fsm.If(read_count == n - 1)(
read_addr(0),
read_count(0)
)
read_fsm.Then().goto_next()
read_fsm.If(done)(
busy(0)
)
read_fsm.Then().goto_init()
read_fsm.make_always()
# instance
odata = m.Wire('odata', datawidth)
ovalid = m.Wire('ovalid')
ports = []
ports.append(('CLK', clk))
ports.append(('RST', rst))
for i, (d, v) in enumerate(zip(idata, ivalid)):
ports.append(('idata%d' % i, d))
ports.append(('ivalid%d' % i, v))
ports.append(('odata', odata))
ports.append(('ovalid', ovalid))
coe = None
if coe_test:
coe = [[dataflow.Constant(1, point=point) for i in range(size)]
for j in range(size)]
point = 0
st = mkStencilPipeline2D(size=3, width=datawidth, point=point, coe=coe)
m.Instance(st, 'inst_stencil', ports=ports)
skip_offset = int(math.floor(size / 2))
# write FSM
write_fsm = FSM(m, 'write_fsm', clk, rst)
write_count = m.Reg('write_count', 32, initval=0)
write_addr = m.Reg('write_addr', 32, initval=skip_offset)
write_fsm(
done(0)
)
write_fsm.If(Ands(ovalid, write_count > skip_offset))(
write_addr.inc()
)
dst_ram.write_rtl(write_addr, odata, port=0, cond=write_fsm.then)
write_fsm.If(ovalid)(
write_count.inc(),
)
write_fsm.If(write_count == n)(
write_count(0),
write_addr(skip_offset),
done(1)
)
write_fsm.Then().goto_init()
write_fsm.make_always()
return m
def mkTest(n=16, size=3, datawidth=32, point=16, coe_test=False):
if coe_test:
point = 0
m = Module('test')
addrwidth = int(math.log(n, 2))
main = mkStencil(n, size, datawidth, point, coe_test)
params = m.copy_params(main)
ports = m.copy_sim_ports(main)
clk = ports['CLK']
rst = ports['RST']
start = ports['start']
busy = ports['busy']
uut = m.Instance(main, 'uut',
params=m.connect_params(main),
ports=m.connect_ports(main))
reset_done = m.Reg('reset_done', initval=0)
reset_stmt = []
reset_stmt.append(reset_done(0))
reset_stmt.append(start(0))
# src RAM
for i in range(3):
addr = ports['ext_src_ram%d_addr' % i]
rdata = ports['ext_src_ram%d_rdata' % i]
wdata = ports['ext_src_ram%d_wdata' % i]
wenable = ports['ext_src_ram%d_wenable' % i]
reset_stmt.append(addr(0))
reset_stmt.append(wdata(0))
reset_stmt.append(wenable(0))
# dst RAM
addr = ports['ext_dst_ram_addr']
rdata = ports['ext_dst_ram_rdata']
wdata = ports['ext_dst_ram_wdata']
wenable = ports['ext_dst_ram_wenable']
reset_stmt.append(addr(2))
reset_stmt.append(wdata(0))
reset_stmt.append(wenable(0))
# simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, reset_stmt, period=100)
nclk = simulation.next_clock
init.add(
Delay(1000),
reset_done(1),
nclk(clk),
Delay(100000),
Systask('finish'),
)
fsm = FSM(m, 'fsm', clk, rst)
fsm.goto_next(cond=reset_done)
for i in range(3):
addr = ports['ext_src_ram%d_addr' % i]
fsm.add(addr(-1))
fsm.goto_next()
for i in range(3):
addr = ports['ext_src_ram%d_addr' % i]
rdata = ports['ext_src_ram%d_rdata' % i]
wdata = ports['ext_src_ram%d_wdata' % i]
wenable = ports['ext_src_ram%d_wenable' % i]
next_addr = (addr + 1) % (n * n)
fsm.add(addr.inc())
fsm.add(wdata(fixed.FixedConst(90, point).raw))
fsm.add(wenable(1))
fsm.add(wenable(0), cond=AndList(wenable, addr == 2**addrwidth - 1))
fsm.goto_next(cond=AndList(
wenable, ports['ext_src_ram0_addr'] == 2**addrwidth - 1))
fsm.goto_next(cond=Not(busy))
fsm.add(start(1))
fsm.add(start(0), delay=1)
fsm.goto_next()
fsm.goto_next(cond=busy)
fsm.goto_next(cond=Not(busy))
fsm.add(Systask('finish'))
fsm.make_always()
return m
if __name__ == '__main__':
n = 16
#test = mkTest(n, coe_test=True)
test = mkTest(n)
verilog = test.to_verilog('tmp.v')
# print(verilog)
# run simulator (Icarus Verilog)
sim = simulation.Simulator(test)
rslt = sim.run()
print(rslt)
```
#### File: examples/thread_matmul_wide/thread_matmul_wide.py
```python
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import numpy as np
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
axi_datawidth = 64
datawidth = 32
matrix_size = 16
a_offset = 0
b_offset = 4096
c_offset = 4096 * 2
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
addrwidth = 10
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth)
maxi = vthread.AXIM(m, 'maxi', clk, rst, datawidth *
(axi_datawidth // datawidth))
saxi = vthread.AXISLiteRegister(m, 'saxi', clk, rst, 32, length=8)
def matmul():
while True:
saxi.wait_flag(0, value=1, resetvalue=0)
matrix_size = saxi.read(1)
a_offset = saxi.read(2)
b_offset = saxi.read(3)
c_offset = saxi.read(4)
comp(matrix_size, a_offset, b_offset, c_offset)
saxi.write_flag(5, 1, resetvalue=0)
def comp(matrix_size, a_offset, b_offset, c_offset):
a_addr, c_addr = a_offset, c_offset
for i in range(matrix_size):
maxi.dma_read(ram_a, 0, a_addr, matrix_size)
b_addr = b_offset
for j in range(matrix_size):
maxi.dma_read(ram_b, 0, b_addr, matrix_size)
sum = 0
for k in range(matrix_size):
x = ram_a.read(k)
y = ram_b.read(k)
sum += x * y
ram_c.write(j, sum)
b_addr += matrix_size * (datawidth // 8)
maxi.dma_write(ram_c, 0, c_addr, matrix_size)
a_addr += matrix_size * (datawidth // 8)
c_addr += matrix_size * (datawidth // 8)
th = vthread.Thread(m, 'th_matmul', clk, rst, matmul)
fsm = th.start()
return m
def mkTest(memimg_name=None):
a_shape = (matrix_size, matrix_size)
b_shape = (matrix_size, matrix_size)
c_shape = (a_shape[0], b_shape[0])
n_raw_a = axi.shape_to_length(a_shape)
n_raw_b = axi.shape_to_length(b_shape)
n_a = axi.shape_to_memory_size(a_shape, datawidth)
n_b = axi.shape_to_memory_size(b_shape, datawidth)
a = np.zeros(a_shape, dtype=np.int64)
b = np.zeros(b_shape, dtype=np.int64)
value = 1
for y in range(a_shape[0]):
for x in range(a_shape[1]):
if x == y:
a[y][x] = value
value += 1
else:
a[y][x] = 0
for y in range(b_shape[0]):
for x in range(b_shape[1]):
if x == y:
b[y][x] = 2
else:
b[y][x] = 0
a_addr = a_offset
size_a = n_a * datawidth // 8
b_addr = b_offset
size_b = n_b * datawidth // 8
mem = np.zeros([1024 * 1024 * 8 // axi_datawidth], dtype=np.int64)
axi.set_memory(mem, a, axi_datawidth, datawidth, a_addr)
axi.set_memory(mem, b, axi_datawidth, datawidth, b_addr)
led = mkLed()
m = Module('test')
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst,
datawidth=axi_datawidth,
mem_datawidth=axi_datawidth,
memimg=mem, memimg_name=memimg_name)
memory.connect(ports, 'maxi')
# AXI-Slave controller
_saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
# Timer
counter = m.Reg('counter', 32, initval=0)
seq = Seq(m, 'seq', clk, rst)
seq(
counter.inc()
)
def ctrl():
for i in range(100):
pass
awaddr = 4
print('# matrix_size = %d' % matrix_size)
_saxi.write(awaddr, matrix_size)
awaddr = 8
print('# a_offset = %d' % a_offset)
_saxi.write(awaddr, a_offset)
awaddr = 12
print('# b_offset = %d' % b_offset)
_saxi.write(awaddr, b_offset)
awaddr = 16
print('# c_offset = %d' % c_offset)
_saxi.write(awaddr, c_offset)
awaddr = 0
start_time = counter
print('# start time = %d' % start_time)
_saxi.write(awaddr, 1)
araddr = 20
v = _saxi.read(araddr)
while v == 0:
v = _saxi.read(araddr)
end_time = counter
print('# end time = %d' % end_time)
time = end_time - start_time
print('# exec time = %d' % time)
all_ok = True
for y in range(matrix_size):
for x in range(matrix_size):
v = memory.read(
c_offset + (y * matrix_size + x) * datawidth // 8)
if y == x and vthread.verilog.NotEql(v, (y + 1) * 2):
all_ok = False
print("NG [%d,%d] = %d" % (y, x, v))
if y != x and vthread.verilog.NotEql(v, 0):
all_ok = False
print("NG [%d,%d] = %d" % (y, x, v))
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
```
#### File: examples/thread_stream_axi_stream_fifo_ipxact/test_thread_stream_axi_stream_fifo_ipxact.py
```python
from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import thread_stream_axi_stream_fifo_ipxact
expected_verilog = """
module blinkled
(
input CLK,
input RST,
output reg [32-1:0] maxi_awaddr,
output reg [8-1:0] maxi_awlen,
output [3-1:0] maxi_awsize,
output [2-1:0] maxi_awburst,
output [1-1:0] maxi_awlock,
output [4-1:0] maxi_awcache,
output [3-1:0] maxi_awprot,
output [4-1:0] maxi_awqos,
output [2-1:0] maxi_awuser,
output reg maxi_awvalid,
input maxi_awready,
output reg [32-1:0] maxi_wdata,
output reg [4-1:0] maxi_wstrb,
output reg maxi_wlast,
output reg maxi_wvalid,
input maxi_wready,
input [2-1:0] maxi_bresp,
input maxi_bvalid,
output maxi_bready,
output reg [32-1:0] maxi_araddr,
output reg [8-1:0] maxi_arlen,
output [3-1:0] maxi_arsize,
output [2-1:0] maxi_arburst,
output [1-1:0] maxi_arlock,
output [4-1:0] maxi_arcache,
output [3-1:0] maxi_arprot,
output [4-1:0] maxi_arqos,
output [2-1:0] maxi_aruser,
output reg maxi_arvalid,
input maxi_arready,
input [32-1:0] maxi_rdata,
input [2-1:0] maxi_rresp,
input maxi_rlast,
input maxi_rvalid,
output maxi_rready,
input [32-1:0] saxi_awaddr,
input [4-1:0] saxi_awcache,
input [3-1:0] saxi_awprot,
input saxi_awvalid,
output saxi_awready,
input [32-1:0] saxi_wdata,
input [4-1:0] saxi_wstrb,
input saxi_wvalid,
output saxi_wready,
output [2-1:0] saxi_bresp,
output reg saxi_bvalid,
input saxi_bready,
input [32-1:0] saxi_araddr,
input [4-1:0] saxi_arcache,
input [3-1:0] saxi_arprot,
input saxi_arvalid,
output saxi_arready,
output reg [32-1:0] saxi_rdata,
output [2-1:0] saxi_rresp,
output reg saxi_rvalid,
input saxi_rready,
input [32-1:0] axi_in_tdata,
input axi_in_tvalid,
output axi_in_tready,
input axi_in_tlast,
output reg [32-1:0] axi_out_tdata,
output reg axi_out_tvalid,
input axi_out_tready,
output reg axi_out_tlast
);
assign maxi_awsize = 2;
assign maxi_awburst = 1;
assign maxi_awlock = 0;
assign maxi_awcache = 3;
assign maxi_awprot = 0;
assign maxi_awqos = 0;
assign maxi_awuser = 0;
assign maxi_bready = 1;
assign maxi_arsize = 2;
assign maxi_arburst = 1;
assign maxi_arlock = 0;
assign maxi_arcache = 3;
assign maxi_arprot = 0;
assign maxi_arqos = 0;
assign maxi_aruser = 0;
reg [3-1:0] outstanding_wcount_0;
reg _maxi_read_start;
reg [8-1:0] _maxi_read_op_sel;
reg [32-1:0] _maxi_read_local_addr;
reg [32-1:0] _maxi_read_global_addr;
reg [33-1:0] _maxi_read_size;
reg [32-1:0] _maxi_read_local_stride;
reg _maxi_read_idle;
reg _maxi_write_start;
reg [8-1:0] _maxi_write_op_sel;
reg [32-1:0] _maxi_write_local_addr;
reg [32-1:0] _maxi_write_global_addr;
reg [33-1:0] _maxi_write_size;
reg [32-1:0] _maxi_write_local_stride;
reg _maxi_write_idle;
wire _maxi_write_data_done;
assign saxi_bresp = 0;
assign saxi_rresp = 0;
reg signed [32-1:0] _saxi_register_0;
reg signed [32-1:0] _saxi_register_1;
reg signed [32-1:0] _saxi_register_2;
reg signed [32-1:0] _saxi_register_3;
reg signed [32-1:0] _saxi_register_4;
reg signed [32-1:0] _saxi_register_5;
reg signed [32-1:0] _saxi_register_6;
reg signed [32-1:0] _saxi_register_7;
reg _saxi_flag_0;
reg _saxi_flag_1;
reg _saxi_flag_2;
reg _saxi_flag_3;
reg _saxi_flag_4;
reg _saxi_flag_5;
reg _saxi_flag_6;
reg _saxi_flag_7;
reg signed [32-1:0] _saxi_resetval_0;
reg signed [32-1:0] _saxi_resetval_1;
reg signed [32-1:0] _saxi_resetval_2;
reg signed [32-1:0] _saxi_resetval_3;
reg signed [32-1:0] _saxi_resetval_4;
reg signed [32-1:0] _saxi_resetval_5;
reg signed [32-1:0] _saxi_resetval_6;
reg signed [32-1:0] _saxi_resetval_7;
localparam _saxi_maskwidth = 3;
localparam _saxi_mask = { _saxi_maskwidth{ 1'd1 } };
localparam _saxi_shift = 2;
reg [32-1:0] _saxi_register_fsm;
localparam _saxi_register_fsm_init = 0;
reg [32-1:0] addr_1;
reg writevalid_2;
reg readvalid_3;
reg prev_awvalid_4;
reg prev_arvalid_5;
assign saxi_awready = (_saxi_register_fsm == 0) && (!writevalid_2 && !readvalid_3 && !saxi_bvalid && prev_awvalid_4);
assign saxi_arready = (_saxi_register_fsm == 0) && (!readvalid_3 && !writevalid_2 && prev_arvalid_5 && !prev_awvalid_4);
reg [_saxi_maskwidth-1:0] _tmp_6;
wire signed [32-1:0] _tmp_7;
assign _tmp_7 = (_tmp_6 == 0)? _saxi_register_0 :
(_tmp_6 == 1)? _saxi_register_1 :
(_tmp_6 == 2)? _saxi_register_2 :
(_tmp_6 == 3)? _saxi_register_3 :
(_tmp_6 == 4)? _saxi_register_4 :
(_tmp_6 == 5)? _saxi_register_5 :
(_tmp_6 == 6)? _saxi_register_6 :
(_tmp_6 == 7)? _saxi_register_7 : 'hx;
wire _tmp_8;
assign _tmp_8 = (_tmp_6 == 0)? _saxi_flag_0 :
(_tmp_6 == 1)? _saxi_flag_1 :
(_tmp_6 == 2)? _saxi_flag_2 :
(_tmp_6 == 3)? _saxi_flag_3 :
(_tmp_6 == 4)? _saxi_flag_4 :
(_tmp_6 == 5)? _saxi_flag_5 :
(_tmp_6 == 6)? _saxi_flag_6 :
(_tmp_6 == 7)? _saxi_flag_7 : 'hx;
wire signed [32-1:0] _tmp_9;
assign _tmp_9 = (_tmp_6 == 0)? _saxi_resetval_0 :
(_tmp_6 == 1)? _saxi_resetval_1 :
(_tmp_6 == 2)? _saxi_resetval_2 :
(_tmp_6 == 3)? _saxi_resetval_3 :
(_tmp_6 == 4)? _saxi_resetval_4 :
(_tmp_6 == 5)? _saxi_resetval_5 :
(_tmp_6 == 6)? _saxi_resetval_6 :
(_tmp_6 == 7)? _saxi_resetval_7 : 'hx;
reg _saxi_cond_0_1;
assign saxi_wready = _saxi_register_fsm == 3;
reg _axi_in_read_start;
reg [8-1:0] _axi_in_read_op_sel;
reg [32-1:0] _axi_in_read_local_addr;
reg [33-1:0] _axi_in_read_size;
reg [32-1:0] _axi_in_read_local_stride;
reg _axi_in_read_idle;
reg _axi_out_write_start;
reg [8-1:0] _axi_out_write_op_sel;
reg [32-1:0] _axi_out_write_local_addr;
reg [33-1:0] _axi_out_write_size;
reg [32-1:0] _axi_out_write_local_stride;
reg _axi_out_write_idle;
wire fifo_a_enq;
wire [32-1:0] fifo_a_wdata;
wire fifo_a_full;
wire fifo_a_almost_full;
wire fifo_a_deq;
wire [32-1:0] fifo_a_rdata;
wire fifo_a_empty;
wire fifo_a_almost_empty;
fifo_a
inst_fifo_a
(
.CLK(CLK),
.RST(RST),
.fifo_a_enq(fifo_a_enq),
.fifo_a_wdata(fifo_a_wdata),
.fifo_a_full(fifo_a_full),
.fifo_a_almost_full(fifo_a_almost_full),
.fifo_a_deq(fifo_a_deq),
.fifo_a_rdata(fifo_a_rdata),
.fifo_a_empty(fifo_a_empty),
.fifo_a_almost_empty(fifo_a_almost_empty)
);
reg [9-1:0] count_fifo_a;
wire fifo_b_enq;
wire [32-1:0] fifo_b_wdata;
wire fifo_b_full;
wire fifo_b_almost_full;
wire fifo_b_deq;
wire [32-1:0] fifo_b_rdata;
wire fifo_b_empty;
wire fifo_b_almost_empty;
fifo_b
inst_fifo_b
(
.CLK(CLK),
.RST(RST),
.fifo_b_enq(fifo_b_enq),
.fifo_b_wdata(fifo_b_wdata),
.fifo_b_full(fifo_b_full),
.fifo_b_almost_full(fifo_b_almost_full),
.fifo_b_deq(fifo_b_deq),
.fifo_b_rdata(fifo_b_rdata),
.fifo_b_empty(fifo_b_empty),
.fifo_b_almost_empty(fifo_b_almost_empty)
);
reg [9-1:0] count_fifo_b;
wire fifo_c_enq;
wire [32-1:0] fifo_c_wdata;
wire fifo_c_full;
wire fifo_c_almost_full;
wire fifo_c_deq;
wire [32-1:0] fifo_c_rdata;
wire fifo_c_empty;
wire fifo_c_almost_empty;
fifo_c
inst_fifo_c
(
.CLK(CLK),
.RST(RST),
.fifo_c_enq(fifo_c_enq),
.fifo_c_wdata(fifo_c_wdata),
.fifo_c_full(fifo_c_full),
.fifo_c_almost_full(fifo_c_almost_full),
.fifo_c_deq(fifo_c_deq),
.fifo_c_rdata(fifo_c_rdata),
.fifo_c_empty(fifo_c_empty),
.fifo_c_almost_empty(fifo_c_almost_empty)
);
reg [9-1:0] count_fifo_c;
wire [10-1:0] ram_b_0_addr;
wire [32-1:0] ram_b_0_rdata;
wire [32-1:0] ram_b_0_wdata;
wire ram_b_0_wenable;
wire ram_b_0_enable;
ram_b
inst_ram_b
(
.CLK(CLK),
.ram_b_0_addr(ram_b_0_addr),
.ram_b_0_rdata(ram_b_0_rdata),
.ram_b_0_wdata(ram_b_0_wdata),
.ram_b_0_wenable(ram_b_0_wenable),
.ram_b_0_enable(ram_b_0_enable)
);
reg _mystream_reduce_stream_ivalid;
wire _mystream_reduce_stream_oready;
wire _mystream_reduce_stream_internal_oready;
assign _mystream_reduce_stream_internal_oready = 1;
reg [32-1:0] _mystream_reduce_fsm;
localparam _mystream_reduce_fsm_init = 0;
wire _mystream_reduce_run_flag;
reg _mystream_reduce_source_start;
wire _mystream_reduce_source_stop;
reg _mystream_reduce_source_busy;
wire _mystream_reduce_sink_start;
wire _mystream_reduce_sink_stop;
wire _mystream_reduce_sink_busy;
wire _mystream_reduce_busy;
reg _mystream_reduce_busy_reg;
wire _mystream_reduce_is_root;
assign _mystream_reduce_is_root = 1;
reg _mystream_reduce_a_idle;
reg [33-1:0] _mystream_reduce_a_source_count;
reg [5-1:0] _mystream_reduce_a_source_mode;
reg [16-1:0] _mystream_reduce_a_source_generator_id;
reg [32-1:0] _mystream_reduce_a_source_offset;
reg [33-1:0] _mystream_reduce_a_source_size;
reg [32-1:0] _mystream_reduce_a_source_stride;
reg [32-1:0] _mystream_reduce_a_source_offset_buf;
reg [33-1:0] _mystream_reduce_a_source_size_buf;
reg [32-1:0] _mystream_reduce_a_source_stride_buf;
reg [8-1:0] _mystream_reduce_a_source_sel;
reg [32-1:0] _mystream_reduce_a_source_ram_raddr;
reg _mystream_reduce_a_source_ram_renable;
wire [32-1:0] _mystream_reduce_a_source_ram_rdata;
reg _mystream_reduce_a_source_fifo_deq;
wire [32-1:0] _mystream_reduce_a_source_fifo_rdata;
reg [32-1:0] _mystream_reduce_a_source_empty_data;
reg [32-1:0] _mystream_reduce_reduce_size_next_parameter_data;
reg [33-1:0] _mystream_reduce_sum_sink_count;
reg [5-1:0] _mystream_reduce_sum_sink_mode;
reg [16-1:0] _mystream_reduce_sum_sink_generator_id;
reg [32-1:0] _mystream_reduce_sum_sink_offset;
reg [33-1:0] _mystream_reduce_sum_sink_size;
reg [32-1:0] _mystream_reduce_sum_sink_stride;
reg [32-1:0] _mystream_reduce_sum_sink_offset_buf;
reg [33-1:0] _mystream_reduce_sum_sink_size_buf;
reg [32-1:0] _mystream_reduce_sum_sink_stride_buf;
reg [8-1:0] _mystream_reduce_sum_sink_sel;
reg [32-1:0] _mystream_reduce_sum_sink_waddr;
reg _mystream_reduce_sum_sink_wenable;
reg [32-1:0] _mystream_reduce_sum_sink_wdata;
reg _mystream_reduce_sum_sink_fifo_enq;
reg [32-1:0] _mystream_reduce_sum_sink_fifo_wdata;
reg [32-1:0] _mystream_reduce_sum_sink_immediate;
reg [33-1:0] _mystream_reduce_sum_valid_sink_count;
reg [5-1:0] _mystream_reduce_sum_valid_sink_mode;
reg [16-1:0] _mystream_reduce_sum_valid_sink_generator_id;
reg [32-1:0] _mystream_reduce_sum_valid_sink_offset;
reg [33-1:0] _mystream_reduce_sum_valid_sink_size;
reg [32-1:0] _mystream_reduce_sum_valid_sink_stride;
reg [32-1:0] _mystream_reduce_sum_valid_sink_offset_buf;
reg [33-1:0] _mystream_reduce_sum_valid_sink_size_buf;
reg [32-1:0] _mystream_reduce_sum_valid_sink_stride_buf;
reg [8-1:0] _mystream_reduce_sum_valid_sink_sel;
reg [32-1:0] _mystream_reduce_sum_valid_sink_waddr;
reg _mystream_reduce_sum_valid_sink_wenable;
reg [1-1:0] _mystream_reduce_sum_valid_sink_wdata;
reg _mystream_reduce_sum_valid_sink_fifo_enq;
reg [1-1:0] _mystream_reduce_sum_valid_sink_fifo_wdata;
reg [1-1:0] _mystream_reduce_sum_valid_sink_immediate;
reg _mystream_bias_stream_ivalid;
wire _mystream_bias_stream_oready;
wire _mystream_bias_stream_internal_oready;
assign _mystream_bias_stream_internal_oready = 1;
reg [32-1:0] _mystream_bias_fsm;
localparam _mystream_bias_fsm_init = 0;
wire _mystream_bias_run_flag;
reg _mystream_bias_source_start;
wire _mystream_bias_source_stop;
reg _mystream_bias_source_busy;
wire _mystream_bias_sink_start;
wire _mystream_bias_sink_stop;
wire _mystream_bias_sink_busy;
wire _mystream_bias_busy;
reg _mystream_bias_busy_reg;
wire _mystream_bias_is_root;
assign _mystream_bias_is_root = 1;
reg _mystream_bias_x_idle;
reg [33-1:0] _mystream_bias_x_source_count;
reg [5-1:0] _mystream_bias_x_source_mode;
reg [16-1:0] _mystream_bias_x_source_generator_id;
reg [32-1:0] _mystream_bias_x_source_offset;
reg [33-1:0] _mystream_bias_x_source_size;
reg [32-1:0] _mystream_bias_x_source_stride;
reg [32-1:0] _mystream_bias_x_source_offset_buf;
reg [33-1:0] _mystream_bias_x_source_size_buf;
reg [32-1:0] _mystream_bias_x_source_stride_buf;
reg [8-1:0] _mystream_bias_x_source_sel;
reg [32-1:0] _mystream_bias_x_source_ram_raddr;
reg _mystream_bias_x_source_ram_renable;
wire [32-1:0] _mystream_bias_x_source_ram_rdata;
reg _mystream_bias_x_source_fifo_deq;
wire [32-1:0] _mystream_bias_x_source_fifo_rdata;
reg [32-1:0] _mystream_bias_x_source_empty_data;
reg _mystream_bias_y_idle;
reg [33-1:0] _mystream_bias_y_source_count;
reg [5-1:0] _mystream_bias_y_source_mode;
reg [16-1:0] _mystream_bias_y_source_generator_id;
reg [32-1:0] _mystream_bias_y_source_offset;
reg [33-1:0] _mystream_bias_y_source_size;
reg [32-1:0] _mystream_bias_y_source_stride;
reg [32-1:0] _mystream_bias_y_source_offset_buf;
reg [33-1:0] _mystream_bias_y_source_size_buf;
reg [32-1:0] _mystream_bias_y_source_stride_buf;
reg [8-1:0] _mystream_bias_y_source_sel;
reg [32-1:0] _mystream_bias_y_source_ram_raddr;
reg _mystream_bias_y_source_ram_renable;
wire [32-1:0] _mystream_bias_y_source_ram_rdata;
reg _mystream_bias_y_source_fifo_deq;
wire [32-1:0] _mystream_bias_y_source_fifo_rdata;
reg [32-1:0] _mystream_bias_y_source_empty_data;
reg [33-1:0] _mystream_bias_z_sink_count;
reg [5-1:0] _mystream_bias_z_sink_mode;
reg [16-1:0] _mystream_bias_z_sink_generator_id;
reg [32-1:0] _mystream_bias_z_sink_offset;
reg [33-1:0] _mystream_bias_z_sink_size;
reg [32-1:0] _mystream_bias_z_sink_stride;
reg [32-1:0] _mystream_bias_z_sink_offset_buf;
reg [33-1:0] _mystream_bias_z_sink_size_buf;
reg [32-1:0] _mystream_bias_z_sink_stride_buf;
reg [8-1:0] _mystream_bias_z_sink_sel;
reg [32-1:0] _mystream_bias_z_sink_waddr;
reg _mystream_bias_z_sink_wenable;
reg [32-1:0] _mystream_bias_z_sink_wdata;
reg _mystream_bias_z_sink_fifo_enq;
reg [32-1:0] _mystream_bias_z_sink_fifo_wdata;
reg [32-1:0] _mystream_bias_z_sink_immediate;
reg [32-1:0] th_comp;
localparam th_comp_init = 0;
reg signed [32-1:0] _th_comp_read_size_0;
reg signed [32-1:0] _th_comp_write_size_1;
reg signed [32-1:0] _th_comp_reduce_size_2;
reg signed [32-1:0] _th_comp_bias_addr_3;
reg axim_flag_10;
reg [32-1:0] _d1_th_comp;
reg _th_comp_cond_14_0_1;
reg _maxi_ram_b_0_read_start;
reg [8-1:0] _maxi_ram_b_0_read_op_sel;
reg [32-1:0] _maxi_ram_b_0_read_local_addr;
reg [32-1:0] _maxi_ram_b_0_read_global_addr;
reg [33-1:0] _maxi_ram_b_0_read_size;
reg [32-1:0] _maxi_ram_b_0_read_local_stride;
reg [32-1:0] _maxi_read_fsm;
localparam _maxi_read_fsm_init = 0;
reg [32-1:0] _maxi_read_cur_global_addr;
reg [33-1:0] _maxi_read_cur_size;
reg [33-1:0] _maxi_read_rest_size;
reg [32-1:0] _wdata_11;
reg _wvalid_12;
reg [33-1:0] _tmp_13;
reg _tmp_14;
wire [32-1:0] _dataflow__variable_odata_0;
wire _dataflow__variable_ovalid_0;
wire _dataflow__variable_oready_0;
assign _dataflow__variable_oready_0 = (_tmp_13 > 0) && !_tmp_14;
reg [10-1:0] _tmp_15;
reg [32-1:0] _tmp_16;
reg _tmp_17;
assign ram_b_0_wdata = (_tmp_17)? _tmp_16 : 'hx;
assign ram_b_0_wenable = (_tmp_17)? 1'd1 : 0;
reg _ram_b_cond_0_1;
reg [9-1:0] counter_18;
reg _maxi_cond_0_1;
assign maxi_rready = _maxi_read_fsm == 3;
reg [32-1:0] _d1__maxi_read_fsm;
reg __maxi_read_fsm_cond_3_0_1;
reg axim_flag_19;
reg __maxi_read_fsm_cond_4_1_1;
reg axistreamin_flag_20;
reg _th_comp_cond_19_1_1;
reg _axi_in_fifo_a_read_start;
reg [8-1:0] _axi_in_fifo_a_read_op_sel;
reg [33-1:0] _axi_in_fifo_a_read_size;
reg [32-1:0] _axi_in_read_fsm;
localparam _axi_in_read_fsm_init = 0;
reg [33-1:0] _axi_in_read_rest_size;
assign axi_in_tready = (_axi_in_read_fsm == 1) && !fifo_a_almost_full;
assign fifo_a_wdata = (axi_in_tready && axi_in_tvalid && (_axi_in_read_op_sel == 1))? axi_in_tdata : 'hx;
assign fifo_a_enq = (axi_in_tready && axi_in_tvalid && (_axi_in_read_op_sel == 1))? axi_in_tready && axi_in_tvalid && (_axi_in_read_op_sel == 1) && !fifo_a_almost_full : 0;
localparam _tmp_21 = 1;
wire [_tmp_21-1:0] _tmp_22;
assign _tmp_22 = !fifo_a_almost_full;
reg [_tmp_21-1:0] __tmp_22_1;
reg axistreamin_flag_23;
reg [32-1:0] _d1__axi_in_read_fsm;
reg __axi_in_read_fsm_cond_2_0_1;
reg axistreamout_flag_24;
reg _th_comp_cond_22_2_1;
reg _axi_out_fifo_c_write_start;
reg [8-1:0] _axi_out_fifo_c_write_op_sel;
reg [33-1:0] _axi_out_fifo_c_write_size;
reg [32-1:0] _axi_out_write_fsm;
localparam _axi_out_write_fsm_init = 0;
reg [33-1:0] _axi_out_write_counter;
reg [33-1:0] _axi_out_write_fifo_counter_25;
assign fifo_c_deq = ((_axi_out_write_fsm == 1) && (_axi_out_write_op_sel == 1) && (axi_out_tready || !axi_out_tvalid) && !fifo_c_empty && (_axi_out_write_fifo_counter_25 > 0) && !fifo_c_empty)? 1 : 0;
localparam _tmp_26 = 1;
wire [_tmp_26-1:0] _tmp_27;
assign _tmp_27 = (_axi_out_write_fsm == 1) && (_axi_out_write_op_sel == 1) && (axi_out_tready || !axi_out_tvalid) && !fifo_c_empty && (_axi_out_write_fifo_counter_25 > 0) && !fifo_c_empty;
reg [_tmp_26-1:0] __tmp_27_1;
reg rlast_28;
reg repeat_rvalid_29;
reg _axi_out_cond_0_1;
reg axistreamout_flag_30;
reg [32-1:0] _d1__axi_out_write_fsm;
reg __axi_out_write_fsm_cond_2_0_1;
wire signed [32-1:0] mystream_reduce_a_data;
wire signed [32-1:0] mystream_reduce_reduce_size_data;
wire [1-1:0] mystream_reduce__reduce_reset_data;
reg __mystream_reduce_stream_ivalid_1;
reg __mystream_reduce_stream_ivalid_2;
reg __mystream_reduce_stream_ivalid_3;
reg __mystream_reduce_stream_ivalid_4;
wire signed [64-1:0] _times_mul_odata_2;
reg signed [64-1:0] _times_mul_odata_reg_2;
wire signed [32-1:0] _times_data_2;
assign _times_data_2 = _times_mul_odata_reg_2;
wire _times_mul_update_2;
assign _times_mul_update_2 = _mystream_reduce_stream_oready;
multiplier_0
_times_mul_2
(
.CLK(CLK),
.update(_times_mul_update_2),
.a(mystream_reduce_a_data),
.b(mystream_reduce_a_data),
.c(_times_mul_odata_2)
);
reg signed [32-1:0] __delay_data_11__variable_1;
reg [1-1:0] __delay_data_14__variable_3;
reg signed [32-1:0] __delay_data_12__delay_11__variable_1;
reg [1-1:0] __delay_data_15__delay_14__variable_3;
reg signed [32-1:0] __delay_data_13__delay_12__delay_11__variable_1;
reg [1-1:0] __delay_data_16__delay_15__delay_14__variable_3;
reg signed [32-1:0] _reduceadd_data_4;
reg [33-1:0] _reduceadd_count_4;
reg _reduceadd_prev_count_max_4;
wire _reduceadd_reset_cond_4;
assign _reduceadd_reset_cond_4 = __delay_data_16__delay_15__delay_14__variable_3 || _reduceadd_prev_count_max_4;
wire [33-1:0] _reduceadd_current_count_4;
assign _reduceadd_current_count_4 = (_reduceadd_reset_cond_4)? 0 : _reduceadd_count_4;
wire signed [32-1:0] _reduceadd_current_data_4;
assign _reduceadd_current_data_4 = (_reduceadd_reset_cond_4)? 1'sd0 : _reduceadd_data_4;
reg [1-1:0] _pulse_data_6;
reg [33-1:0] _pulse_count_6;
reg _pulse_prev_count_max_6;
wire _pulse_reset_cond_6;
assign _pulse_reset_cond_6 = __delay_data_16__delay_15__delay_14__variable_3 || _pulse_prev_count_max_6;
wire [33-1:0] _pulse_current_count_6;
assign _pulse_current_count_6 = (_pulse_reset_cond_6)? 0 : _pulse_count_6;
wire [1-1:0] _pulse_current_data_6;
assign _pulse_current_data_6 = (_pulse_reset_cond_6)? 1'sd0 : _pulse_data_6;
wire signed [32-1:0] mystream_reduce_sum_data;
assign mystream_reduce_sum_data = _reduceadd_data_4;
wire [1-1:0] mystream_reduce_sum_valid_data;
assign mystream_reduce_sum_valid_data = _pulse_data_6;
wire _set_flag_31;
assign _set_flag_31 = th_comp == 25;
assign fifo_a_deq = (_mystream_reduce_stream_oready && _mystream_reduce_a_source_fifo_deq && (_mystream_reduce_a_source_sel == 1) && !fifo_a_empty)? 1 : 0;
localparam _tmp_32 = 1;
wire [_tmp_32-1:0] _tmp_33;
assign _tmp_33 = _mystream_reduce_stream_oready && _mystream_reduce_a_source_fifo_deq && (_mystream_reduce_a_source_sel == 1) && !fifo_a_empty;
reg [_tmp_32-1:0] __tmp_33_1;
assign _mystream_reduce_a_source_fifo_rdata = (_mystream_reduce_a_source_sel == 1)? fifo_a_rdata : 'hx;
reg signed [32-1:0] __variable_wdata_0;
assign mystream_reduce_a_data = __variable_wdata_0;
reg [32-1:0] _mystream_reduce_a_source_fsm_0;
localparam _mystream_reduce_a_source_fsm_0_init = 0;
wire _set_flag_34;
assign _set_flag_34 = th_comp == 26;
reg signed [32-1:0] __variable_wdata_1;
assign mystream_reduce_reduce_size_data = __variable_wdata_1;
wire _set_flag_35;
assign _set_flag_35 = th_comp == 27;
reg _tmp_36;
reg _tmp_37;
reg _tmp_38;
reg _tmp_39;
reg _tmp_40;
reg _tmp_41;
reg signed [32-1:0] _tmp_42;
reg signed [32-1:0] _tmp_43;
reg signed [32-1:0] _tmp_44;
reg signed [32-1:0] _tmp_45;
reg signed [32-1:0] _tmp_46;
reg signed [32-1:0] _tmp_47;
assign fifo_b_wdata = (_mystream_reduce_stream_oready && _mystream_reduce_sum_sink_fifo_enq && (_mystream_reduce_sum_sink_sel == 2))? _mystream_reduce_sum_sink_fifo_wdata : 'hx;
assign fifo_b_enq = (_mystream_reduce_stream_oready && _mystream_reduce_sum_sink_fifo_enq && (_mystream_reduce_sum_sink_sel == 2))? _mystream_reduce_stream_oready && _mystream_reduce_sum_sink_fifo_enq && (_mystream_reduce_sum_sink_sel == 2) && !fifo_b_almost_full : 0;
localparam _tmp_48 = 1;
wire [_tmp_48-1:0] _tmp_49;
assign _tmp_49 = !fifo_b_almost_full;
reg [_tmp_48-1:0] __tmp_49_1;
assign _mystream_reduce_stream_oready = ((_mystream_reduce_sink_busy && (_mystream_reduce_sum_sink_sel == 2))? !fifo_b_almost_full : 1) && (((_mystream_reduce_source_busy && (_mystream_reduce_a_source_sel == 1))? !fifo_a_empty || _mystream_reduce_a_idle : 1) && _mystream_reduce_stream_internal_oready);
reg [32-1:0] _mystream_reduce_sum_sink_fsm_1;
localparam _mystream_reduce_sum_sink_fsm_1_init = 0;
wire signed [32-1:0] mystream_bias_x_data;
wire signed [32-1:0] mystream_bias_y_data;
reg __mystream_bias_stream_ivalid_1;
reg signed [32-1:0] _plus_data_10;
wire signed [32-1:0] mystream_bias_z_data;
assign mystream_bias_z_data = _plus_data_10;
wire _set_flag_50;
assign _set_flag_50 = th_comp == 28;
assign fifo_b_deq = (_mystream_bias_stream_oready && _mystream_bias_x_source_fifo_deq && (_mystream_bias_x_source_sel == 1) && !fifo_b_empty)? 1 : 0;
localparam _tmp_51 = 1;
wire [_tmp_51-1:0] _tmp_52;
assign _tmp_52 = _mystream_bias_stream_oready && _mystream_bias_x_source_fifo_deq && (_mystream_bias_x_source_sel == 1) && !fifo_b_empty;
reg [_tmp_51-1:0] __tmp_52_1;
assign _mystream_bias_x_source_fifo_rdata = (_mystream_bias_x_source_sel == 1)? fifo_b_rdata : 'hx;
reg signed [32-1:0] __variable_wdata_8;
assign mystream_bias_x_data = __variable_wdata_8;
reg [32-1:0] _mystream_bias_x_source_fsm_0;
localparam _mystream_bias_x_source_fsm_0_init = 0;
wire _set_flag_53;
assign _set_flag_53 = th_comp == 29;
assign ram_b_0_addr = (_mystream_bias_stream_oready && _mystream_bias_y_source_ram_renable && (_mystream_bias_y_source_sel == 2))? _mystream_bias_y_source_ram_raddr :
(_tmp_17)? _tmp_15 : 'hx;
assign ram_b_0_enable = (_mystream_bias_stream_oready && _mystream_bias_y_source_ram_renable && (_mystream_bias_y_source_sel == 2))? 1'd1 :
(_tmp_17)? 1'd1 : 0;
localparam _tmp_54 = 1;
wire [_tmp_54-1:0] _tmp_55;
assign _tmp_55 = _mystream_bias_stream_oready && _mystream_bias_y_source_ram_renable && (_mystream_bias_y_source_sel == 2);
reg [_tmp_54-1:0] __tmp_55_1;
assign _mystream_bias_y_source_ram_rdata = (_mystream_bias_y_source_sel == 2)? ram_b_0_rdata : 'hx;
reg signed [32-1:0] __variable_wdata_9;
assign mystream_bias_y_data = __variable_wdata_9;
reg [32-1:0] _mystream_bias_y_source_fsm_1;
localparam _mystream_bias_y_source_fsm_1_init = 0;
wire _set_flag_56;
assign _set_flag_56 = th_comp == 30;
reg _tmp_57;
reg _tmp_58;
reg _tmp_59;
reg signed [32-1:0] _tmp_60;
reg signed [32-1:0] _tmp_61;
reg signed [32-1:0] _tmp_62;
assign fifo_c_wdata = (_mystream_bias_stream_oready && _mystream_bias_z_sink_fifo_enq && (_mystream_bias_z_sink_sel == 3))? _mystream_bias_z_sink_fifo_wdata : 'hx;
assign fifo_c_enq = (_mystream_bias_stream_oready && _mystream_bias_z_sink_fifo_enq && (_mystream_bias_z_sink_sel == 3))? _mystream_bias_stream_oready && _mystream_bias_z_sink_fifo_enq && (_mystream_bias_z_sink_sel == 3) && !fifo_c_almost_full : 0;
localparam _tmp_63 = 1;
wire [_tmp_63-1:0] _tmp_64;
assign _tmp_64 = !fifo_c_almost_full;
reg [_tmp_63-1:0] __tmp_64_1;
assign _mystream_bias_stream_oready = ((_mystream_bias_sink_busy && (_mystream_bias_z_sink_sel == 3))? !fifo_c_almost_full : 1) && (((_mystream_bias_source_busy && (_mystream_bias_x_source_sel == 1))? !fifo_b_empty || _mystream_bias_x_idle : 1) && _mystream_bias_stream_internal_oready);
reg [32-1:0] _mystream_bias_z_sink_fsm_2;
localparam _mystream_bias_z_sink_fsm_2_init = 0;
wire _set_flag_65;
assign _set_flag_65 = th_comp == 31;
assign _mystream_reduce_run_flag = (_set_flag_65)? 1 : 0;
reg _tmp_66;
reg _tmp_67;
reg _tmp_68;
reg _tmp_69;
reg _tmp_70;
reg _tmp_71;
reg [1-1:0] __variable_wdata_3;
assign mystream_reduce__reduce_reset_data = __variable_wdata_3;
reg _tmp_72;
reg _tmp_73;
reg _tmp_74;
reg _tmp_75;
assign _mystream_reduce_source_stop = _mystream_reduce_stream_oready && (_mystream_reduce_a_idle && (_mystream_reduce_fsm == 3));
localparam _tmp_76 = 1;
wire [_tmp_76-1:0] _tmp_77;
assign _tmp_77 = _mystream_reduce_a_idle && (_mystream_reduce_fsm == 3);
reg [_tmp_76-1:0] _tmp_78;
localparam _tmp_79 = 1;
wire [_tmp_79-1:0] _tmp_80;
assign _tmp_80 = _mystream_reduce_a_idle && (_mystream_reduce_fsm == 3);
reg [_tmp_79-1:0] _tmp_81;
reg _tmp_82;
reg _tmp_83;
reg _tmp_84;
reg _tmp_85;
reg _tmp_86;
reg _tmp_87;
assign _mystream_reduce_sink_start = _tmp_87;
reg _tmp_88;
reg _tmp_89;
reg _tmp_90;
reg _tmp_91;
reg _tmp_92;
reg _tmp_93;
assign _mystream_reduce_sink_stop = _tmp_93;
reg _tmp_94;
reg _tmp_95;
reg _tmp_96;
reg _tmp_97;
reg _tmp_98;
reg _tmp_99;
assign _mystream_reduce_sink_busy = _tmp_99;
reg _tmp_100;
assign _mystream_reduce_busy = _mystream_reduce_source_busy || _mystream_reduce_sink_busy || _mystream_reduce_busy_reg;
wire _set_flag_101;
assign _set_flag_101 = th_comp == 33;
assign _mystream_bias_run_flag = (_set_flag_101)? 1 : 0;
reg _tmp_102;
reg _tmp_103;
reg _tmp_104;
assign _mystream_bias_source_stop = _mystream_bias_stream_oready && (_mystream_bias_x_idle && _mystream_bias_y_idle && (_mystream_bias_fsm == 3));
localparam _tmp_105 = 1;
wire [_tmp_105-1:0] _tmp_106;
assign _tmp_106 = _mystream_bias_x_idle && _mystream_bias_y_idle && (_mystream_bias_fsm == 3);
reg [_tmp_105-1:0] _tmp_107;
reg _tmp_108;
reg _tmp_109;
reg _tmp_110;
assign _mystream_bias_sink_start = _tmp_110;
reg _tmp_111;
reg _tmp_112;
reg _tmp_113;
assign _mystream_bias_sink_stop = _tmp_113;
reg _tmp_114;
reg _tmp_115;
reg _tmp_116;
assign _mystream_bias_sink_busy = _tmp_116;
reg _tmp_117;
assign _mystream_bias_busy = _mystream_bias_source_busy || _mystream_bias_sink_busy || _mystream_bias_busy_reg;
always @(posedge CLK) begin
if(RST) begin
outstanding_wcount_0 <= 0;
_maxi_read_start <= 0;
_maxi_write_start <= 0;
maxi_awaddr <= 0;
maxi_awlen <= 0;
maxi_awvalid <= 0;
maxi_wdata <= 0;
maxi_wstrb <= 0;
maxi_wlast <= 0;
maxi_wvalid <= 0;
_maxi_ram_b_0_read_start <= 0;
_maxi_ram_b_0_read_op_sel <= 0;
_maxi_ram_b_0_read_local_addr <= 0;
_maxi_ram_b_0_read_global_addr <= 0;
_maxi_ram_b_0_read_size <= 0;
_maxi_ram_b_0_read_local_stride <= 0;
_maxi_read_idle <= 1;
_maxi_read_op_sel <= 0;
_maxi_read_local_addr <= 0;
_maxi_read_global_addr <= 0;
_maxi_read_size <= 0;
_maxi_read_local_stride <= 0;
maxi_araddr <= 0;
maxi_arlen <= 0;
maxi_arvalid <= 0;
counter_18 <= 0;
_maxi_cond_0_1 <= 0;
end else begin
if(_maxi_cond_0_1) begin
maxi_arvalid <= 0;
end
if(maxi_wlast && maxi_wvalid && maxi_wready && !(maxi_bvalid && maxi_bready) && (outstanding_wcount_0 < 7)) begin
outstanding_wcount_0 <= outstanding_wcount_0 + 1;
end
if(!(maxi_wlast && maxi_wvalid && maxi_wready) && (maxi_bvalid && maxi_bready) && (outstanding_wcount_0 > 0)) begin
outstanding_wcount_0 <= outstanding_wcount_0 - 1;
end
_maxi_read_start <= 0;
_maxi_write_start <= 0;
maxi_awaddr <= 0;
maxi_awlen <= 0;
maxi_awvalid <= 0;
maxi_wdata <= 0;
maxi_wstrb <= 0;
maxi_wlast <= 0;
maxi_wvalid <= 0;
_maxi_ram_b_0_read_start <= 0;
if(axim_flag_10) begin
_maxi_ram_b_0_read_start <= 1;
_maxi_ram_b_0_read_op_sel <= 1;
_maxi_ram_b_0_read_local_addr <= 0;
_maxi_ram_b_0_read_global_addr <= _th_comp_bias_addr_3;
_maxi_ram_b_0_read_size <= _th_comp_write_size_1;
_maxi_ram_b_0_read_local_stride <= 1;
end
if(_maxi_ram_b_0_read_start) begin
_maxi_read_idle <= 0;
end
if(_maxi_ram_b_0_read_start) begin
_maxi_read_start <= 1;
_maxi_read_op_sel <= _maxi_ram_b_0_read_op_sel;
_maxi_read_local_addr <= _maxi_ram_b_0_read_local_addr;
_maxi_read_global_addr <= _maxi_ram_b_0_read_global_addr;
_maxi_read_size <= _maxi_ram_b_0_read_size;
_maxi_read_local_stride <= _maxi_ram_b_0_read_local_stride;
end
if((_maxi_read_fsm == 2) && ((maxi_arready || !maxi_arvalid) && (counter_18 == 0))) begin
maxi_araddr <= _maxi_read_cur_global_addr;
maxi_arlen <= _maxi_read_cur_size - 1;
maxi_arvalid <= 1;
counter_18 <= _maxi_read_cur_size;
end
_maxi_cond_0_1 <= 1;
if(maxi_arvalid && !maxi_arready) begin
maxi_arvalid <= maxi_arvalid;
end
if(maxi_rready && maxi_rvalid && (counter_18 > 0)) begin
counter_18 <= counter_18 - 1;
end
if(axim_flag_19) begin
_maxi_read_idle <= 1;
end
end
end
assign _dataflow__variable_odata_0 = _wdata_11;
assign _dataflow__variable_ovalid_0 = _wvalid_12;
always @(posedge CLK) begin
if(RST) begin
saxi_bvalid <= 0;
prev_awvalid_4 <= 0;
prev_arvalid_5 <= 0;
writevalid_2 <= 0;
readvalid_3 <= 0;
addr_1 <= 0;
saxi_rdata <= 0;
saxi_rvalid <= 0;
_saxi_cond_0_1 <= 0;
_saxi_register_0 <= 0;
_saxi_flag_0 <= 0;
_saxi_register_1 <= 0;
_saxi_flag_1 <= 0;
_saxi_register_2 <= 0;
_saxi_flag_2 <= 0;
_saxi_register_3 <= 0;
_saxi_flag_3 <= 0;
_saxi_register_4 <= 0;
_saxi_flag_4 <= 0;
_saxi_register_5 <= 0;
_saxi_flag_5 <= 0;
_saxi_register_6 <= 0;
_saxi_flag_6 <= 0;
_saxi_register_7 <= 0;
_saxi_flag_7 <= 0;
end else begin
if(_saxi_cond_0_1) begin
saxi_rvalid <= 0;
end
if(saxi_bvalid && saxi_bready) begin
saxi_bvalid <= 0;
end
if(saxi_wvalid && saxi_wready) begin
saxi_bvalid <= 1;
end
prev_awvalid_4 <= saxi_awvalid;
prev_arvalid_5 <= saxi_arvalid;
writevalid_2 <= 0;
readvalid_3 <= 0;
if(saxi_awready && saxi_awvalid && !saxi_bvalid) begin
addr_1 <= saxi_awaddr;
writevalid_2 <= 1;
end else if(saxi_arready && saxi_arvalid) begin
addr_1 <= saxi_araddr;
readvalid_3 <= 1;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid)) begin
saxi_rdata <= _tmp_7;
saxi_rvalid <= 1;
end
_saxi_cond_0_1 <= 1;
if(saxi_rvalid && !saxi_rready) begin
saxi_rvalid <= saxi_rvalid;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 0)) begin
_saxi_register_0 <= _tmp_9;
_saxi_flag_0 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 1)) begin
_saxi_register_1 <= _tmp_9;
_saxi_flag_1 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 2)) begin
_saxi_register_2 <= _tmp_9;
_saxi_flag_2 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 3)) begin
_saxi_register_3 <= _tmp_9;
_saxi_flag_3 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 4)) begin
_saxi_register_4 <= _tmp_9;
_saxi_flag_4 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 5)) begin
_saxi_register_5 <= _tmp_9;
_saxi_flag_5 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 6)) begin
_saxi_register_6 <= _tmp_9;
_saxi_flag_6 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_8 && (_tmp_6 == 7)) begin
_saxi_register_7 <= _tmp_9;
_saxi_flag_7 <= 0;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 0)) begin
_saxi_register_0 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 1)) begin
_saxi_register_1 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 2)) begin
_saxi_register_2 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 3)) begin
_saxi_register_3 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 4)) begin
_saxi_register_4 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 5)) begin
_saxi_register_5 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 6)) begin
_saxi_register_6 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_6 == 7)) begin
_saxi_register_7 <= saxi_wdata;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 1) begin
_saxi_register_0 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_1 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_2 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_3 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_4 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_5 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_6 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_7 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_0 <= 1;
_saxi_flag_0 <= 0;
end
if((th_comp == 3) && 1) begin
_saxi_register_1 <= 1;
_saxi_flag_1 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_2 <= 1;
_saxi_flag_2 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_3 <= 1;
_saxi_flag_3 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_4 <= 1;
_saxi_flag_4 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_5 <= 1;
_saxi_flag_5 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_6 <= 1;
_saxi_flag_6 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_7 <= 1;
_saxi_flag_7 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_0 <= 0;
_saxi_flag_0 <= 0;
end
if((th_comp == 37) && 1) begin
_saxi_register_1 <= 0;
_saxi_flag_1 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_2 <= 0;
_saxi_flag_2 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_3 <= 0;
_saxi_flag_3 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_4 <= 0;
_saxi_flag_4 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_5 <= 0;
_saxi_flag_5 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_6 <= 0;
_saxi_flag_6 <= 0;
end
if((th_comp == 37) && 0) begin
_saxi_register_7 <= 0;
_saxi_flag_7 <= 0;
end
end
end
localparam _saxi_register_fsm_1 = 1;
localparam _saxi_register_fsm_2 = 2;
localparam _saxi_register_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end else begin
case(_saxi_register_fsm)
_saxi_register_fsm_init: begin
if(readvalid_3 || writevalid_2) begin
_tmp_6 <= (addr_1 >> _saxi_shift) & _saxi_mask;
end
if(readvalid_3) begin
_saxi_register_fsm <= _saxi_register_fsm_1;
end
if(writevalid_2) begin
_saxi_register_fsm <= _saxi_register_fsm_3;
end
end
_saxi_register_fsm_1: begin
if(saxi_rready && saxi_rvalid) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end
if((saxi_rready || !saxi_rvalid) && !(saxi_rready && saxi_rvalid)) begin
_saxi_register_fsm <= _saxi_register_fsm_2;
end
end
_saxi_register_fsm_2: begin
if(saxi_rready && saxi_rvalid) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end
end
_saxi_register_fsm_3: begin
if(saxi_wready && saxi_wvalid) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end
end
endcase
end
end
always @(posedge CLK) begin
if(RST) begin
_axi_in_read_start <= 0;
_axi_in_fifo_a_read_start <= 0;
_axi_in_fifo_a_read_op_sel <= 0;
_axi_in_fifo_a_read_size <= 0;
_axi_in_read_idle <= 1;
_axi_in_read_op_sel <= 0;
_axi_in_read_size <= 0;
end else begin
_axi_in_read_start <= 0;
_axi_in_fifo_a_read_start <= 0;
if(axistreamin_flag_20) begin
_axi_in_fifo_a_read_start <= 1;
_axi_in_fifo_a_read_op_sel <= 1;
_axi_in_fifo_a_read_size <= _th_comp_read_size_0;
end
if(_axi_in_fifo_a_read_start) begin
_axi_in_read_idle <= 0;
end
if(_axi_in_fifo_a_read_start) begin
_axi_in_read_start <= 1;
_axi_in_read_op_sel <= _axi_in_fifo_a_read_op_sel;
_axi_in_read_size <= _axi_in_fifo_a_read_size;
end
if(axistreamin_flag_23) begin
_axi_in_read_idle <= 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
_axi_out_write_start <= 0;
_axi_out_fifo_c_write_start <= 0;
_axi_out_fifo_c_write_op_sel <= 0;
_axi_out_fifo_c_write_size <= 0;
_axi_out_write_idle <= 1;
_axi_out_write_op_sel <= 0;
_axi_out_write_size <= 0;
rlast_28 <= 0;
repeat_rvalid_29 <= 0;
axi_out_tdata <= 0;
axi_out_tvalid <= 0;
axi_out_tlast <= 0;
_axi_out_cond_0_1 <= 0;
end else begin
if(_axi_out_cond_0_1) begin
axi_out_tvalid <= 0;
axi_out_tlast <= 0;
end
_axi_out_write_start <= 0;
_axi_out_fifo_c_write_start <= 0;
if(axistreamout_flag_24) begin
_axi_out_fifo_c_write_start <= 1;
_axi_out_fifo_c_write_op_sel <= 1;
_axi_out_fifo_c_write_size <= _th_comp_write_size_1;
end
if(_axi_out_fifo_c_write_start) begin
_axi_out_write_idle <= 0;
end
if(_axi_out_fifo_c_write_start) begin
_axi_out_write_start <= 1;
_axi_out_write_op_sel <= _axi_out_fifo_c_write_op_sel;
_axi_out_write_size <= _axi_out_fifo_c_write_size;
end
if((_axi_out_write_fsm == 1) && (_axi_out_write_op_sel == 1) && (axi_out_tready || !axi_out_tvalid) && !fifo_c_empty && (_axi_out_write_fifo_counter_25 > 0)) begin
rlast_28 <= _axi_out_write_fifo_counter_25 <= 1;
end
repeat_rvalid_29 <= 0;
if(__tmp_27_1 && !(axi_out_tready || !axi_out_tvalid)) begin
repeat_rvalid_29 <= 1;
end
if(repeat_rvalid_29 && !(axi_out_tready || !axi_out_tvalid)) begin
repeat_rvalid_29 <= 1;
end
if((__tmp_27_1 || repeat_rvalid_29) && (axi_out_tready || !axi_out_tvalid)) begin
axi_out_tdata <= fifo_c_rdata;
axi_out_tvalid <= 1;
axi_out_tlast <= rlast_28;
end
_axi_out_cond_0_1 <= 1;
if(axi_out_tvalid && !axi_out_tready) begin
axi_out_tvalid <= axi_out_tvalid;
axi_out_tlast <= axi_out_tlast;
end
if(axistreamout_flag_30) begin
_axi_out_write_idle <= 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
count_fifo_a <= 0;
__tmp_22_1 <= 0;
__tmp_33_1 <= 0;
end else begin
if(fifo_a_enq && !fifo_a_full && (fifo_a_deq && !fifo_a_empty)) begin
count_fifo_a <= count_fifo_a;
end else if(fifo_a_enq && !fifo_a_full) begin
count_fifo_a <= count_fifo_a + 1;
end else if(fifo_a_deq && !fifo_a_empty) begin
count_fifo_a <= count_fifo_a - 1;
end
__tmp_22_1 <= _tmp_22;
__tmp_33_1 <= _tmp_33;
end
end
always @(posedge CLK) begin
if(RST) begin
count_fifo_b <= 0;
__tmp_49_1 <= 0;
__tmp_52_1 <= 0;
end else begin
if(fifo_b_enq && !fifo_b_full && (fifo_b_deq && !fifo_b_empty)) begin
count_fifo_b <= count_fifo_b;
end else if(fifo_b_enq && !fifo_b_full) begin
count_fifo_b <= count_fifo_b + 1;
end else if(fifo_b_deq && !fifo_b_empty) begin
count_fifo_b <= count_fifo_b - 1;
end
__tmp_49_1 <= _tmp_49;
__tmp_52_1 <= _tmp_52;
end
end
always @(posedge CLK) begin
if(RST) begin
count_fifo_c <= 0;
__tmp_27_1 <= 0;
__tmp_64_1 <= 0;
end else begin
if(fifo_c_enq && !fifo_c_full && (fifo_c_deq && !fifo_c_empty)) begin
count_fifo_c <= count_fifo_c;
end else if(fifo_c_enq && !fifo_c_full) begin
count_fifo_c <= count_fifo_c + 1;
end else if(fifo_c_deq && !fifo_c_empty) begin
count_fifo_c <= count_fifo_c - 1;
end
__tmp_27_1 <= _tmp_27;
__tmp_64_1 <= _tmp_64;
end
end
always @(posedge CLK) begin
if(RST) begin
_tmp_15 <= 0;
_tmp_13 <= 0;
_tmp_16 <= 0;
_tmp_17 <= 0;
_tmp_14 <= 0;
_ram_b_cond_0_1 <= 0;
__tmp_55_1 <= 0;
end else begin
if(_ram_b_cond_0_1) begin
_tmp_17 <= 0;
_tmp_14 <= 0;
end
if(_maxi_read_start && (_maxi_read_op_sel == 1) && (_tmp_13 == 0)) begin
_tmp_15 <= _maxi_read_local_addr - _maxi_read_local_stride;
_tmp_13 <= _maxi_read_size;
end
if(_dataflow__variable_ovalid_0 && ((_tmp_13 > 0) && !_tmp_14) && (_tmp_13 > 0)) begin
_tmp_15 <= _tmp_15 + _maxi_read_local_stride;
_tmp_16 <= _dataflow__variable_odata_0;
_tmp_17 <= 1;
_tmp_13 <= _tmp_13 - 1;
end
if(_dataflow__variable_ovalid_0 && ((_tmp_13 > 0) && !_tmp_14) && (_tmp_13 == 1)) begin
_tmp_14 <= 1;
end
_ram_b_cond_0_1 <= 1;
__tmp_55_1 <= _tmp_55;
end
end
always @(posedge CLK) begin
if(RST) begin
_mystream_reduce_a_source_ram_renable <= 0;
_mystream_reduce_a_source_fifo_deq <= 0;
_mystream_reduce_a_idle <= 1;
_mystream_reduce_sum_sink_wenable <= 0;
_mystream_reduce_sum_sink_fifo_enq <= 0;
_mystream_reduce_sum_valid_sink_wenable <= 0;
_mystream_reduce_sum_valid_sink_fifo_enq <= 0;
__mystream_reduce_stream_ivalid_1 <= 0;
__mystream_reduce_stream_ivalid_2 <= 0;
__mystream_reduce_stream_ivalid_3 <= 0;
__mystream_reduce_stream_ivalid_4 <= 0;
_times_mul_odata_reg_2 <= 0;
__delay_data_11__variable_1 <= 0;
__delay_data_14__variable_3 <= 0;
__delay_data_12__delay_11__variable_1 <= 0;
__delay_data_15__delay_14__variable_3 <= 0;
__delay_data_13__delay_12__delay_11__variable_1 <= 0;
__delay_data_16__delay_15__delay_14__variable_3 <= 0;
_reduceadd_data_4 <= 1'sd0;
_reduceadd_count_4 <= 0;
_reduceadd_prev_count_max_4 <= 0;
_pulse_data_6 <= 1'sd0;
_pulse_count_6 <= 0;
_pulse_prev_count_max_6 <= 0;
_mystream_reduce_a_source_mode <= 5'b0;
_mystream_reduce_a_source_size <= 0;
_mystream_reduce_a_source_sel <= 0;
_mystream_reduce_a_source_size_buf <= 0;
__variable_wdata_0 <= 0;
_mystream_reduce_a_source_count <= 0;
_mystream_reduce_reduce_size_next_parameter_data <= 0;
__variable_wdata_1 <= 0;
_tmp_36 <= 0;
_tmp_37 <= 0;
_tmp_38 <= 0;
_tmp_39 <= 0;
_tmp_40 <= 0;
_tmp_41 <= 0;
_tmp_42 <= 0;
_tmp_43 <= 0;
_tmp_44 <= 0;
_tmp_45 <= 0;
_tmp_46 <= 0;
_tmp_47 <= 0;
_mystream_reduce_sum_sink_mode <= 5'b0;
_mystream_reduce_sum_sink_size <= 0;
_mystream_reduce_sum_sink_sel <= 0;
_mystream_reduce_sum_sink_size_buf <= 0;
_mystream_reduce_sum_sink_count <= 0;
_mystream_reduce_sum_sink_fifo_wdata <= 0;
_tmp_66 <= 0;
_tmp_67 <= 0;
_tmp_68 <= 0;
_tmp_69 <= 0;
_tmp_70 <= 0;
_tmp_71 <= 0;
__variable_wdata_3 <= 0;
_tmp_72 <= 0;
_tmp_73 <= 0;
_tmp_74 <= 0;
_tmp_75 <= 0;
_tmp_78 <= 0;
_tmp_81 <= 0;
_tmp_82 <= 0;
_tmp_83 <= 0;
_tmp_84 <= 0;
_tmp_85 <= 0;
_tmp_86 <= 0;
_tmp_87 <= 0;
_tmp_88 <= 0;
_tmp_89 <= 0;
_tmp_90 <= 0;
_tmp_91 <= 0;
_tmp_92 <= 0;
_tmp_93 <= 0;
_tmp_94 <= 0;
_tmp_95 <= 0;
_tmp_96 <= 0;
_tmp_97 <= 0;
_tmp_98 <= 0;
_tmp_99 <= 0;
_tmp_100 <= 0;
_mystream_reduce_busy_reg <= 0;
end else begin
if(_mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_ram_renable <= 0;
_mystream_reduce_a_source_fifo_deq <= 0;
end
_mystream_reduce_a_idle <= _mystream_reduce_a_idle;
if(_mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_wenable <= 0;
_mystream_reduce_sum_sink_fifo_enq <= 0;
end
if(_mystream_reduce_stream_oready) begin
_mystream_reduce_sum_valid_sink_wenable <= 0;
_mystream_reduce_sum_valid_sink_fifo_enq <= 0;
end
if(_mystream_reduce_stream_oready) begin
__mystream_reduce_stream_ivalid_1 <= _mystream_reduce_stream_ivalid;
end
if(_mystream_reduce_stream_oready) begin
__mystream_reduce_stream_ivalid_2 <= __mystream_reduce_stream_ivalid_1;
end
if(_mystream_reduce_stream_oready) begin
__mystream_reduce_stream_ivalid_3 <= __mystream_reduce_stream_ivalid_2;
end
if(_mystream_reduce_stream_oready) begin
__mystream_reduce_stream_ivalid_4 <= __mystream_reduce_stream_ivalid_3;
end
if(_mystream_reduce_stream_oready) begin
_times_mul_odata_reg_2 <= _times_mul_odata_2;
end
if(_mystream_reduce_stream_oready) begin
__delay_data_11__variable_1 <= mystream_reduce_reduce_size_data;
end
if(_mystream_reduce_stream_oready) begin
__delay_data_14__variable_3 <= mystream_reduce__reduce_reset_data;
end
if(_mystream_reduce_stream_oready) begin
__delay_data_12__delay_11__variable_1 <= __delay_data_11__variable_1;
end
if(_mystream_reduce_stream_oready) begin
__delay_data_15__delay_14__variable_3 <= __delay_data_14__variable_3;
end
if(_mystream_reduce_stream_oready) begin
__delay_data_13__delay_12__delay_11__variable_1 <= __delay_data_12__delay_11__variable_1;
end
if(_mystream_reduce_stream_oready) begin
__delay_data_16__delay_15__delay_14__variable_3 <= __delay_data_15__delay_14__variable_3;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready && _reduceadd_reset_cond_4) begin
_reduceadd_data_4 <= 1'sd0;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready) begin
_reduceadd_count_4 <= (_reduceadd_current_count_4 >= __delay_data_13__delay_12__delay_11__variable_1 - 1)? 0 : _reduceadd_current_count_4 + 1;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready) begin
_reduceadd_prev_count_max_4 <= _reduceadd_current_count_4 >= __delay_data_13__delay_12__delay_11__variable_1 - 1;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready) begin
_reduceadd_data_4 <= _reduceadd_current_data_4 + _times_data_2;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready && _pulse_reset_cond_6) begin
_pulse_data_6 <= 1'sd0;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready) begin
_pulse_count_6 <= (_pulse_current_count_6 >= __delay_data_13__delay_12__delay_11__variable_1 - 1)? 0 : _pulse_current_count_6 + 1;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready) begin
_pulse_prev_count_max_6 <= _pulse_current_count_6 >= __delay_data_13__delay_12__delay_11__variable_1 - 1;
end
if(__mystream_reduce_stream_ivalid_3 && _mystream_reduce_stream_oready) begin
_pulse_data_6 <= _pulse_current_count_6 >= __delay_data_13__delay_12__delay_11__variable_1 - 1;
end
if(_set_flag_31) begin
_mystream_reduce_a_source_mode <= 5'b10000;
_mystream_reduce_a_source_size <= _th_comp_read_size_0;
end
if(_set_flag_31) begin
_mystream_reduce_a_source_sel <= 1;
end
if(_mystream_reduce_source_start && _mystream_reduce_a_source_mode & 5'b10000 && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_idle <= 0;
_mystream_reduce_a_source_size_buf <= _mystream_reduce_a_source_size;
end
if(_mystream_reduce_stream_oready && _mystream_reduce_source_busy && _mystream_reduce_is_root) begin
__variable_wdata_0 <= _mystream_reduce_a_source_fifo_rdata;
end
if((_mystream_reduce_a_source_fsm_0 == 1) && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fifo_deq <= 1;
_mystream_reduce_a_source_count <= _mystream_reduce_a_source_size_buf;
end
if((_mystream_reduce_a_source_fsm_0 == 2) && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fifo_deq <= 1;
_mystream_reduce_a_source_count <= _mystream_reduce_a_source_count - 1;
end
if((_mystream_reduce_a_source_fsm_0 == 2) && (_mystream_reduce_a_source_count == 1) && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fifo_deq <= 0;
_mystream_reduce_a_idle <= 1;
end
if((_mystream_reduce_a_source_fsm_0 == 2) && _mystream_reduce_source_stop && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fifo_deq <= 0;
_mystream_reduce_a_idle <= 1;
end
if(_set_flag_34) begin
_mystream_reduce_reduce_size_next_parameter_data <= _th_comp_reduce_size_2;
end
if(_mystream_reduce_source_start) begin
__variable_wdata_1 <= _mystream_reduce_reduce_size_next_parameter_data;
end
if(_mystream_reduce_stream_oready) begin
_tmp_36 <= _set_flag_35;
end
if(_mystream_reduce_stream_oready) begin
_tmp_37 <= _tmp_36;
end
if(_mystream_reduce_stream_oready) begin
_tmp_38 <= _tmp_37;
end
if(_mystream_reduce_stream_oready) begin
_tmp_39 <= _tmp_38;
end
if(_mystream_reduce_stream_oready) begin
_tmp_40 <= _tmp_39;
end
if(_mystream_reduce_stream_oready) begin
_tmp_41 <= _tmp_40;
end
if(_mystream_reduce_stream_oready) begin
_tmp_42 <= _th_comp_write_size_1;
end
if(_mystream_reduce_stream_oready) begin
_tmp_43 <= _tmp_42;
end
if(_mystream_reduce_stream_oready) begin
_tmp_44 <= _tmp_43;
end
if(_mystream_reduce_stream_oready) begin
_tmp_45 <= _tmp_44;
end
if(_mystream_reduce_stream_oready) begin
_tmp_46 <= _tmp_45;
end
if(_mystream_reduce_stream_oready) begin
_tmp_47 <= _tmp_46;
end
if(_tmp_41) begin
_mystream_reduce_sum_sink_mode <= 5'b10000;
_mystream_reduce_sum_sink_size <= _tmp_47;
end
if(_tmp_41) begin
_mystream_reduce_sum_sink_sel <= 2;
end
if(_mystream_reduce_sink_start && _mystream_reduce_sum_sink_mode & 5'b10000 && _mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_size_buf <= _mystream_reduce_sum_sink_size;
end
if((_mystream_reduce_sum_sink_fsm_1 == 1) && _mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_count <= _mystream_reduce_sum_sink_size;
_mystream_reduce_sum_sink_size_buf <= _mystream_reduce_sum_sink_size;
end
if((_mystream_reduce_sum_sink_fsm_1 == 2) && mystream_reduce_sum_valid_data && _mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_fifo_wdata <= mystream_reduce_sum_data;
_mystream_reduce_sum_sink_fifo_enq <= 1;
_mystream_reduce_sum_sink_count <= _mystream_reduce_sum_sink_count - 1;
end
if(_mystream_reduce_stream_oready) begin
_tmp_66 <= _mystream_reduce_source_start;
end
if(_mystream_reduce_stream_oready) begin
_tmp_67 <= _tmp_66;
end
if(_mystream_reduce_stream_oready) begin
_tmp_68 <= _tmp_67;
end
if(_mystream_reduce_stream_oready) begin
_tmp_69 <= _mystream_reduce_source_start;
end
if(_mystream_reduce_stream_oready) begin
_tmp_70 <= _tmp_69;
end
if(_mystream_reduce_stream_oready) begin
_tmp_71 <= _tmp_70;
end
if(_mystream_reduce_stream_oready && _tmp_71) begin
__variable_wdata_3 <= 1;
end
if(_mystream_reduce_stream_oready) begin
_tmp_72 <= _mystream_reduce_source_start;
end
if(_mystream_reduce_stream_oready) begin
_tmp_73 <= _tmp_72;
end
if(_mystream_reduce_stream_oready) begin
_tmp_74 <= _tmp_73;
end
if(_mystream_reduce_stream_oready) begin
_tmp_75 <= _tmp_74;
end
if(_mystream_reduce_stream_oready && _tmp_75) begin
__variable_wdata_3 <= 0;
end
if(_mystream_reduce_stream_oready) begin
_tmp_78 <= _tmp_77;
end
if(_mystream_reduce_stream_oready) begin
_tmp_81 <= _tmp_80;
end
if(_mystream_reduce_stream_oready && _tmp_81) begin
__variable_wdata_3 <= 1;
end
if(_mystream_reduce_stream_oready) begin
_tmp_82 <= _mystream_reduce_source_start;
end
if(_mystream_reduce_stream_oready) begin
_tmp_83 <= _tmp_82;
end
if(_mystream_reduce_stream_oready) begin
_tmp_84 <= _tmp_83;
end
if(_mystream_reduce_stream_oready) begin
_tmp_85 <= _tmp_84;
end
if(_mystream_reduce_stream_oready) begin
_tmp_86 <= _tmp_85;
end
if(_mystream_reduce_stream_oready) begin
_tmp_87 <= _tmp_86;
end
if(_mystream_reduce_stream_oready) begin
_tmp_88 <= _mystream_reduce_source_stop;
end
if(_mystream_reduce_stream_oready) begin
_tmp_89 <= _tmp_88;
end
if(_mystream_reduce_stream_oready) begin
_tmp_90 <= _tmp_89;
end
if(_mystream_reduce_stream_oready) begin
_tmp_91 <= _tmp_90;
end
if(_mystream_reduce_stream_oready) begin
_tmp_92 <= _tmp_91;
end
if(_mystream_reduce_stream_oready) begin
_tmp_93 <= _tmp_92;
end
if(_mystream_reduce_stream_oready) begin
_tmp_94 <= _mystream_reduce_source_busy;
end
if(_mystream_reduce_stream_oready) begin
_tmp_95 <= _tmp_94;
end
if(_mystream_reduce_stream_oready) begin
_tmp_96 <= _tmp_95;
end
if(_mystream_reduce_stream_oready) begin
_tmp_97 <= _tmp_96;
end
if(_mystream_reduce_stream_oready) begin
_tmp_98 <= _tmp_97;
end
if(_mystream_reduce_stream_oready) begin
_tmp_99 <= _tmp_98;
end
if(_mystream_reduce_stream_oready) begin
_tmp_100 <= _mystream_reduce_sink_busy;
end
if(!_mystream_reduce_sink_busy && _tmp_100) begin
_mystream_reduce_busy_reg <= 0;
end
if(_mystream_reduce_source_busy) begin
_mystream_reduce_busy_reg <= 1;
end
end
end
localparam _mystream_reduce_fsm_1 = 1;
localparam _mystream_reduce_fsm_2 = 2;
localparam _mystream_reduce_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_mystream_reduce_fsm <= _mystream_reduce_fsm_init;
_mystream_reduce_source_start <= 0;
_mystream_reduce_source_busy <= 0;
_mystream_reduce_stream_ivalid <= 0;
end else begin
if(_mystream_reduce_stream_oready && _tmp_68) begin
_mystream_reduce_stream_ivalid <= 1;
end
if(_mystream_reduce_stream_oready && _tmp_78) begin
_mystream_reduce_stream_ivalid <= 0;
end
case(_mystream_reduce_fsm)
_mystream_reduce_fsm_init: begin
if(_mystream_reduce_run_flag) begin
_mystream_reduce_source_start <= 1;
end
if(_mystream_reduce_run_flag) begin
_mystream_reduce_fsm <= _mystream_reduce_fsm_1;
end
end
_mystream_reduce_fsm_1: begin
if(_mystream_reduce_source_start && _mystream_reduce_stream_oready) begin
_mystream_reduce_source_start <= 0;
_mystream_reduce_source_busy <= 1;
end
if(_mystream_reduce_source_start && _mystream_reduce_stream_oready) begin
_mystream_reduce_fsm <= _mystream_reduce_fsm_2;
end
end
_mystream_reduce_fsm_2: begin
if(_mystream_reduce_stream_oready) begin
_mystream_reduce_fsm <= _mystream_reduce_fsm_3;
end
end
_mystream_reduce_fsm_3: begin
if(_mystream_reduce_stream_oready && (_mystream_reduce_a_idle && (_mystream_reduce_fsm == 3))) begin
_mystream_reduce_source_busy <= 0;
end
if(_mystream_reduce_stream_oready && (_mystream_reduce_a_idle && (_mystream_reduce_fsm == 3)) && _mystream_reduce_run_flag) begin
_mystream_reduce_source_start <= 1;
end
if(_mystream_reduce_stream_oready && (_mystream_reduce_a_idle && (_mystream_reduce_fsm == 3))) begin
_mystream_reduce_fsm <= _mystream_reduce_fsm_init;
end
if(_mystream_reduce_stream_oready && (_mystream_reduce_a_idle && (_mystream_reduce_fsm == 3)) && _mystream_reduce_run_flag) begin
_mystream_reduce_fsm <= _mystream_reduce_fsm_1;
end
end
endcase
end
end
always @(posedge CLK) begin
if(RST) begin
_mystream_bias_x_source_ram_renable <= 0;
_mystream_bias_x_source_fifo_deq <= 0;
_mystream_bias_x_idle <= 1;
_mystream_bias_y_source_ram_renable <= 0;
_mystream_bias_y_source_fifo_deq <= 0;
_mystream_bias_y_idle <= 1;
_mystream_bias_z_sink_wenable <= 0;
_mystream_bias_z_sink_fifo_enq <= 0;
__mystream_bias_stream_ivalid_1 <= 0;
_plus_data_10 <= 0;
_mystream_bias_x_source_mode <= 5'b0;
_mystream_bias_x_source_size <= 0;
_mystream_bias_x_source_sel <= 0;
_mystream_bias_x_source_size_buf <= 0;
__variable_wdata_8 <= 0;
_mystream_bias_x_source_count <= 0;
_mystream_bias_y_source_mode <= 5'b0;
_mystream_bias_y_source_offset <= 0;
_mystream_bias_y_source_size <= 0;
_mystream_bias_y_source_stride <= 0;
_mystream_bias_y_source_sel <= 0;
_mystream_bias_y_source_offset_buf <= 0;
_mystream_bias_y_source_size_buf <= 0;
_mystream_bias_y_source_stride_buf <= 0;
__variable_wdata_9 <= 0;
_mystream_bias_y_source_ram_raddr <= 0;
_mystream_bias_y_source_count <= 0;
_tmp_57 <= 0;
_tmp_58 <= 0;
_tmp_59 <= 0;
_tmp_60 <= 0;
_tmp_61 <= 0;
_tmp_62 <= 0;
_mystream_bias_z_sink_mode <= 5'b0;
_mystream_bias_z_sink_size <= 0;
_mystream_bias_z_sink_sel <= 0;
_mystream_bias_z_sink_size_buf <= 0;
_mystream_bias_z_sink_count <= 0;
_mystream_bias_z_sink_fifo_wdata <= 0;
_tmp_102 <= 0;
_tmp_103 <= 0;
_tmp_104 <= 0;
_tmp_107 <= 0;
_tmp_108 <= 0;
_tmp_109 <= 0;
_tmp_110 <= 0;
_tmp_111 <= 0;
_tmp_112 <= 0;
_tmp_113 <= 0;
_tmp_114 <= 0;
_tmp_115 <= 0;
_tmp_116 <= 0;
_tmp_117 <= 0;
_mystream_bias_busy_reg <= 0;
end else begin
if(_mystream_bias_stream_oready) begin
_mystream_bias_x_source_ram_renable <= 0;
_mystream_bias_x_source_fifo_deq <= 0;
end
_mystream_bias_x_idle <= _mystream_bias_x_idle;
if(_mystream_bias_stream_oready) begin
_mystream_bias_y_source_ram_renable <= 0;
_mystream_bias_y_source_fifo_deq <= 0;
end
_mystream_bias_y_idle <= _mystream_bias_y_idle;
if(_mystream_bias_stream_oready) begin
_mystream_bias_z_sink_wenable <= 0;
_mystream_bias_z_sink_fifo_enq <= 0;
end
if(_mystream_bias_stream_oready) begin
__mystream_bias_stream_ivalid_1 <= _mystream_bias_stream_ivalid;
end
if(_mystream_bias_stream_oready) begin
_plus_data_10 <= mystream_bias_x_data + mystream_bias_y_data;
end
if(_set_flag_50) begin
_mystream_bias_x_source_mode <= 5'b10000;
_mystream_bias_x_source_size <= _th_comp_write_size_1;
end
if(_set_flag_50) begin
_mystream_bias_x_source_sel <= 1;
end
if(_mystream_bias_source_start && _mystream_bias_x_source_mode & 5'b10000 && _mystream_bias_stream_oready) begin
_mystream_bias_x_idle <= 0;
_mystream_bias_x_source_size_buf <= _mystream_bias_x_source_size;
end
if(_mystream_bias_stream_oready && _mystream_bias_source_busy && _mystream_bias_is_root) begin
__variable_wdata_8 <= _mystream_bias_x_source_fifo_rdata;
end
if((_mystream_bias_x_source_fsm_0 == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fifo_deq <= 1;
_mystream_bias_x_source_count <= _mystream_bias_x_source_size_buf;
end
if((_mystream_bias_x_source_fsm_0 == 2) && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fifo_deq <= 1;
_mystream_bias_x_source_count <= _mystream_bias_x_source_count - 1;
end
if((_mystream_bias_x_source_fsm_0 == 2) && (_mystream_bias_x_source_count == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fifo_deq <= 0;
_mystream_bias_x_idle <= 1;
end
if((_mystream_bias_x_source_fsm_0 == 2) && _mystream_bias_source_stop && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fifo_deq <= 0;
_mystream_bias_x_idle <= 1;
end
if(_set_flag_53) begin
_mystream_bias_y_source_mode <= 5'b1;
_mystream_bias_y_source_offset <= 0;
_mystream_bias_y_source_size <= _th_comp_write_size_1;
_mystream_bias_y_source_stride <= 1;
end
if(_set_flag_53) begin
_mystream_bias_y_source_sel <= 2;
end
if(_mystream_bias_source_start && _mystream_bias_y_source_mode & 5'b1 && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_offset_buf <= _mystream_bias_y_source_offset;
_mystream_bias_y_source_size_buf <= _mystream_bias_y_source_size;
_mystream_bias_y_source_stride_buf <= _mystream_bias_y_source_stride;
end
if(_mystream_bias_stream_oready && _mystream_bias_source_busy && _mystream_bias_is_root) begin
__variable_wdata_9 <= _mystream_bias_y_source_ram_rdata;
end
if((_mystream_bias_y_source_fsm_1 == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_y_idle <= 0;
_mystream_bias_y_source_ram_raddr <= _mystream_bias_y_source_offset_buf;
_mystream_bias_y_source_ram_renable <= 1;
_mystream_bias_y_source_count <= _mystream_bias_y_source_size_buf;
end
if((_mystream_bias_y_source_fsm_1 == 2) && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_ram_raddr <= _mystream_bias_y_source_ram_raddr + _mystream_bias_y_source_stride_buf;
_mystream_bias_y_source_ram_renable <= 1;
_mystream_bias_y_source_count <= _mystream_bias_y_source_count - 1;
end
if((_mystream_bias_y_source_fsm_1 == 2) && (_mystream_bias_y_source_count == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_ram_renable <= 0;
_mystream_bias_y_idle <= 1;
end
if((_mystream_bias_y_source_fsm_1 == 2) && _mystream_bias_source_stop && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_ram_renable <= 0;
_mystream_bias_y_idle <= 1;
end
if(_mystream_bias_stream_oready) begin
_tmp_57 <= _set_flag_56;
end
if(_mystream_bias_stream_oready) begin
_tmp_58 <= _tmp_57;
end
if(_mystream_bias_stream_oready) begin
_tmp_59 <= _tmp_58;
end
if(_mystream_bias_stream_oready) begin
_tmp_60 <= _th_comp_write_size_1;
end
if(_mystream_bias_stream_oready) begin
_tmp_61 <= _tmp_60;
end
if(_mystream_bias_stream_oready) begin
_tmp_62 <= _tmp_61;
end
if(_tmp_59) begin
_mystream_bias_z_sink_mode <= 5'b10000;
_mystream_bias_z_sink_size <= _tmp_62;
end
if(_tmp_59) begin
_mystream_bias_z_sink_sel <= 3;
end
if(_mystream_bias_sink_start && _mystream_bias_z_sink_mode & 5'b10000 && _mystream_bias_stream_oready) begin
_mystream_bias_z_sink_size_buf <= _mystream_bias_z_sink_size;
end
if((_mystream_bias_z_sink_fsm_2 == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_z_sink_count <= _mystream_bias_z_sink_size;
_mystream_bias_z_sink_size_buf <= _mystream_bias_z_sink_size;
end
if((_mystream_bias_z_sink_fsm_2 == 2) && _mystream_bias_stream_oready) begin
_mystream_bias_z_sink_fifo_wdata <= mystream_bias_z_data;
_mystream_bias_z_sink_fifo_enq <= 1;
_mystream_bias_z_sink_count <= _mystream_bias_z_sink_count - 1;
end
if(_mystream_bias_stream_oready) begin
_tmp_102 <= _mystream_bias_source_start;
end
if(_mystream_bias_stream_oready) begin
_tmp_103 <= _tmp_102;
end
if(_mystream_bias_stream_oready) begin
_tmp_104 <= _tmp_103;
end
if(_mystream_bias_stream_oready) begin
_tmp_107 <= _tmp_106;
end
if(_mystream_bias_stream_oready) begin
_tmp_108 <= _mystream_bias_source_start;
end
if(_mystream_bias_stream_oready) begin
_tmp_109 <= _tmp_108;
end
if(_mystream_bias_stream_oready) begin
_tmp_110 <= _tmp_109;
end
if(_mystream_bias_stream_oready) begin
_tmp_111 <= _mystream_bias_source_stop;
end
if(_mystream_bias_stream_oready) begin
_tmp_112 <= _tmp_111;
end
if(_mystream_bias_stream_oready) begin
_tmp_113 <= _tmp_112;
end
if(_mystream_bias_stream_oready) begin
_tmp_114 <= _mystream_bias_source_busy;
end
if(_mystream_bias_stream_oready) begin
_tmp_115 <= _tmp_114;
end
if(_mystream_bias_stream_oready) begin
_tmp_116 <= _tmp_115;
end
if(_mystream_bias_stream_oready) begin
_tmp_117 <= _mystream_bias_sink_busy;
end
if(!_mystream_bias_sink_busy && _tmp_117) begin
_mystream_bias_busy_reg <= 0;
end
if(_mystream_bias_source_busy) begin
_mystream_bias_busy_reg <= 1;
end
end
end
localparam _mystream_bias_fsm_1 = 1;
localparam _mystream_bias_fsm_2 = 2;
localparam _mystream_bias_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_mystream_bias_fsm <= _mystream_bias_fsm_init;
_mystream_bias_source_start <= 0;
_mystream_bias_source_busy <= 0;
_mystream_bias_stream_ivalid <= 0;
end else begin
if(_mystream_bias_stream_oready && _tmp_104) begin
_mystream_bias_stream_ivalid <= 1;
end
if(_mystream_bias_stream_oready && _tmp_107) begin
_mystream_bias_stream_ivalid <= 0;
end
case(_mystream_bias_fsm)
_mystream_bias_fsm_init: begin
if(_mystream_bias_run_flag) begin
_mystream_bias_source_start <= 1;
end
if(_mystream_bias_run_flag) begin
_mystream_bias_fsm <= _mystream_bias_fsm_1;
end
end
_mystream_bias_fsm_1: begin
if(_mystream_bias_source_start && _mystream_bias_stream_oready) begin
_mystream_bias_source_start <= 0;
_mystream_bias_source_busy <= 1;
end
if(_mystream_bias_source_start && _mystream_bias_stream_oready) begin
_mystream_bias_fsm <= _mystream_bias_fsm_2;
end
end
_mystream_bias_fsm_2: begin
if(_mystream_bias_stream_oready) begin
_mystream_bias_fsm <= _mystream_bias_fsm_3;
end
end
_mystream_bias_fsm_3: begin
if(_mystream_bias_stream_oready && (_mystream_bias_x_idle && _mystream_bias_y_idle && (_mystream_bias_fsm == 3))) begin
_mystream_bias_source_busy <= 0;
end
if(_mystream_bias_stream_oready && (_mystream_bias_x_idle && _mystream_bias_y_idle && (_mystream_bias_fsm == 3)) && _mystream_bias_run_flag) begin
_mystream_bias_source_start <= 1;
end
if(_mystream_bias_stream_oready && (_mystream_bias_x_idle && _mystream_bias_y_idle && (_mystream_bias_fsm == 3))) begin
_mystream_bias_fsm <= _mystream_bias_fsm_init;
end
if(_mystream_bias_stream_oready && (_mystream_bias_x_idle && _mystream_bias_y_idle && (_mystream_bias_fsm == 3)) && _mystream_bias_run_flag) begin
_mystream_bias_fsm <= _mystream_bias_fsm_1;
end
end
endcase
end
end
localparam th_comp_1 = 1;
localparam th_comp_2 = 2;
localparam th_comp_3 = 3;
localparam th_comp_4 = 4;
localparam th_comp_5 = 5;
localparam th_comp_6 = 6;
localparam th_comp_7 = 7;
localparam th_comp_8 = 8;
localparam th_comp_9 = 9;
localparam th_comp_10 = 10;
localparam th_comp_11 = 11;
localparam th_comp_12 = 12;
localparam th_comp_13 = 13;
localparam th_comp_14 = 14;
localparam th_comp_15 = 15;
localparam th_comp_16 = 16;
localparam th_comp_17 = 17;
localparam th_comp_18 = 18;
localparam th_comp_19 = 19;
localparam th_comp_20 = 20;
localparam th_comp_21 = 21;
localparam th_comp_22 = 22;
localparam th_comp_23 = 23;
localparam th_comp_24 = 24;
localparam th_comp_25 = 25;
localparam th_comp_26 = 26;
localparam th_comp_27 = 27;
localparam th_comp_28 = 28;
localparam th_comp_29 = 29;
localparam th_comp_30 = 30;
localparam th_comp_31 = 31;
localparam th_comp_32 = 32;
localparam th_comp_33 = 33;
localparam th_comp_34 = 34;
localparam th_comp_35 = 35;
localparam th_comp_36 = 36;
localparam th_comp_37 = 37;
localparam th_comp_38 = 38;
localparam th_comp_39 = 39;
always @(posedge CLK) begin
if(RST) begin
th_comp <= th_comp_init;
_d1_th_comp <= th_comp_init;
_th_comp_read_size_0 <= 0;
_th_comp_write_size_1 <= 0;
_th_comp_reduce_size_2 <= 0;
_th_comp_bias_addr_3 <= 0;
axim_flag_10 <= 0;
_th_comp_cond_14_0_1 <= 0;
axistreamin_flag_20 <= 0;
_th_comp_cond_19_1_1 <= 0;
axistreamout_flag_24 <= 0;
_th_comp_cond_22_2_1 <= 0;
end else begin
_d1_th_comp <= th_comp;
case(_d1_th_comp)
th_comp_14: begin
if(_th_comp_cond_14_0_1) begin
axim_flag_10 <= 0;
end
end
th_comp_19: begin
if(_th_comp_cond_19_1_1) begin
axistreamin_flag_20 <= 0;
end
end
th_comp_22: begin
if(_th_comp_cond_22_2_1) begin
axistreamout_flag_24 <= 0;
end
end
endcase
case(th_comp)
th_comp_init: begin
th_comp <= th_comp_1;
end
th_comp_1: begin
if(1) begin
th_comp <= th_comp_2;
end else begin
th_comp <= th_comp_39;
end
end
th_comp_2: begin
if(_saxi_register_0 == 1) begin
th_comp <= th_comp_3;
end
end
th_comp_3: begin
th_comp <= th_comp_4;
end
th_comp_4: begin
_th_comp_read_size_0 <= _saxi_register_2;
th_comp <= th_comp_5;
end
th_comp_5: begin
_th_comp_write_size_1 <= _saxi_register_3;
th_comp <= th_comp_6;
end
th_comp_6: begin
_th_comp_reduce_size_2 <= _saxi_register_4;
th_comp <= th_comp_7;
end
th_comp_7: begin
_th_comp_bias_addr_3 <= _saxi_register_5;
th_comp <= th_comp_8;
end
th_comp_8: begin
if((_th_comp_read_size_0 < 0) || (_th_comp_read_size_0 == 0)) begin
th_comp <= th_comp_9;
end else begin
th_comp <= th_comp_10;
end
end
th_comp_9: begin
_th_comp_read_size_0 <= 1;
th_comp <= th_comp_10;
end
th_comp_10: begin
if((_th_comp_write_size_1 < 0) || (_th_comp_write_size_1 == 0)) begin
th_comp <= th_comp_11;
end else begin
th_comp <= th_comp_12;
end
end
th_comp_11: begin
_th_comp_write_size_1 <= 1;
th_comp <= th_comp_12;
end
th_comp_12: begin
if((_th_comp_reduce_size_2 < 0) || (_th_comp_reduce_size_2 == 0)) begin
th_comp <= th_comp_13;
end else begin
th_comp <= th_comp_14;
end
end
th_comp_13: begin
_th_comp_reduce_size_2 <= 1;
th_comp <= th_comp_14;
end
th_comp_14: begin
axim_flag_10 <= 1;
_th_comp_cond_14_0_1 <= 1;
th_comp <= th_comp_15;
end
th_comp_15: begin
th_comp <= th_comp_16;
end
th_comp_16: begin
th_comp <= th_comp_17;
end
th_comp_17: begin
if(_maxi_read_idle) begin
th_comp <= th_comp_18;
end
end
th_comp_18: begin
if(_axi_in_read_idle) begin
th_comp <= th_comp_19;
end
end
th_comp_19: begin
axistreamin_flag_20 <= 1;
_th_comp_cond_19_1_1 <= 1;
th_comp <= th_comp_20;
end
th_comp_20: begin
th_comp <= th_comp_21;
end
th_comp_21: begin
if(_axi_out_write_idle) begin
th_comp <= th_comp_22;
end
end
th_comp_22: begin
axistreamout_flag_24 <= 1;
_th_comp_cond_22_2_1 <= 1;
th_comp <= th_comp_23;
end
th_comp_23: begin
th_comp <= th_comp_24;
end
th_comp_24: begin
th_comp <= th_comp_25;
end
th_comp_25: begin
th_comp <= th_comp_26;
end
th_comp_26: begin
th_comp <= th_comp_27;
end
th_comp_27: begin
if(_mystream_reduce_stream_oready) begin
th_comp <= th_comp_28;
end
end
th_comp_28: begin
th_comp <= th_comp_29;
end
th_comp_29: begin
th_comp <= th_comp_30;
end
th_comp_30: begin
if(_mystream_bias_stream_oready) begin
th_comp <= th_comp_31;
end
end
th_comp_31: begin
th_comp <= th_comp_32;
end
th_comp_32: begin
if(_mystream_reduce_busy) begin
th_comp <= th_comp_33;
end
end
th_comp_33: begin
th_comp <= th_comp_34;
end
th_comp_34: begin
if(_mystream_bias_busy) begin
th_comp <= th_comp_35;
end
end
th_comp_35: begin
if(!_mystream_reduce_busy) begin
th_comp <= th_comp_36;
end
end
th_comp_36: begin
if(!_mystream_bias_busy) begin
th_comp <= th_comp_37;
end
end
th_comp_37: begin
th_comp <= th_comp_38;
end
th_comp_38: begin
th_comp <= th_comp_1;
end
endcase
end
end
localparam _maxi_read_fsm_1 = 1;
localparam _maxi_read_fsm_2 = 2;
localparam _maxi_read_fsm_3 = 3;
localparam _maxi_read_fsm_4 = 4;
localparam _maxi_read_fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
_maxi_read_fsm <= _maxi_read_fsm_init;
_d1__maxi_read_fsm <= _maxi_read_fsm_init;
_maxi_read_cur_global_addr <= 0;
_maxi_read_rest_size <= 0;
_maxi_read_cur_size <= 0;
__maxi_read_fsm_cond_3_0_1 <= 0;
_wvalid_12 <= 0;
_wdata_11 <= 0;
axim_flag_19 <= 0;
__maxi_read_fsm_cond_4_1_1 <= 0;
end else begin
_d1__maxi_read_fsm <= _maxi_read_fsm;
case(_d1__maxi_read_fsm)
_maxi_read_fsm_3: begin
if(__maxi_read_fsm_cond_3_0_1) begin
_wvalid_12 <= 0;
end
end
_maxi_read_fsm_4: begin
if(__maxi_read_fsm_cond_4_1_1) begin
axim_flag_19 <= 0;
end
end
endcase
case(_maxi_read_fsm)
_maxi_read_fsm_init: begin
if(_maxi_read_start) begin
_maxi_read_cur_global_addr <= (_maxi_read_global_addr >> 2) << 2;
_maxi_read_rest_size <= _maxi_read_size;
end
if(_maxi_read_start && (_maxi_read_op_sel == 1)) begin
_maxi_read_fsm <= _maxi_read_fsm_1;
end
end
_maxi_read_fsm_1: begin
if((_maxi_read_rest_size <= 256) && ((_maxi_read_cur_global_addr & 4095) + (_maxi_read_rest_size << 2) >= 4096)) begin
_maxi_read_cur_size <= 4096 - (_maxi_read_cur_global_addr & 4095) >> 2;
_maxi_read_rest_size <= _maxi_read_rest_size - (4096 - (_maxi_read_cur_global_addr & 4095) >> 2);
end else if(_maxi_read_rest_size <= 256) begin
_maxi_read_cur_size <= _maxi_read_rest_size;
_maxi_read_rest_size <= 0;
end else if((_maxi_read_cur_global_addr & 4095) + 1024 >= 4096) begin
_maxi_read_cur_size <= 4096 - (_maxi_read_cur_global_addr & 4095) >> 2;
_maxi_read_rest_size <= _maxi_read_rest_size - (4096 - (_maxi_read_cur_global_addr & 4095) >> 2);
end else begin
_maxi_read_cur_size <= 256;
_maxi_read_rest_size <= _maxi_read_rest_size - 256;
end
_maxi_read_fsm <= _maxi_read_fsm_2;
end
_maxi_read_fsm_2: begin
if(maxi_arready || !maxi_arvalid) begin
_maxi_read_fsm <= _maxi_read_fsm_3;
end
end
_maxi_read_fsm_3: begin
__maxi_read_fsm_cond_3_0_1 <= 1;
if(maxi_rready && maxi_rvalid && (_maxi_read_op_sel == 1)) begin
_wdata_11 <= maxi_rdata;
_wvalid_12 <= 1;
end
if(maxi_rready && maxi_rvalid && maxi_rlast) begin
_maxi_read_cur_global_addr <= _maxi_read_cur_global_addr + (_maxi_read_cur_size << 2);
end
if(maxi_rready && maxi_rvalid && maxi_rlast && (_maxi_read_rest_size > 0)) begin
_maxi_read_fsm <= _maxi_read_fsm_1;
end
if(maxi_rready && maxi_rvalid && maxi_rlast && (_maxi_read_rest_size == 0)) begin
_maxi_read_fsm <= _maxi_read_fsm_4;
end
end
_maxi_read_fsm_4: begin
axim_flag_19 <= 1;
__maxi_read_fsm_cond_4_1_1 <= 1;
_maxi_read_fsm <= _maxi_read_fsm_5;
end
_maxi_read_fsm_5: begin
_maxi_read_fsm <= _maxi_read_fsm_init;
end
endcase
end
end
localparam _axi_in_read_fsm_1 = 1;
localparam _axi_in_read_fsm_2 = 2;
localparam _axi_in_read_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_axi_in_read_fsm <= _axi_in_read_fsm_init;
_d1__axi_in_read_fsm <= _axi_in_read_fsm_init;
_axi_in_read_rest_size <= 0;
axistreamin_flag_23 <= 0;
__axi_in_read_fsm_cond_2_0_1 <= 0;
end else begin
_d1__axi_in_read_fsm <= _axi_in_read_fsm;
case(_d1__axi_in_read_fsm)
_axi_in_read_fsm_2: begin
if(__axi_in_read_fsm_cond_2_0_1) begin
axistreamin_flag_23 <= 0;
end
end
endcase
case(_axi_in_read_fsm)
_axi_in_read_fsm_init: begin
if(_axi_in_read_start) begin
_axi_in_read_rest_size <= _axi_in_read_size;
end
if(_axi_in_read_start && (_axi_in_read_op_sel == 1)) begin
_axi_in_read_fsm <= _axi_in_read_fsm_1;
end
end
_axi_in_read_fsm_1: begin
if(axi_in_tready && axi_in_tvalid && (_axi_in_read_op_sel == 1)) begin
_axi_in_read_rest_size <= _axi_in_read_rest_size - 1;
end
if(axi_in_tready && axi_in_tvalid && (_axi_in_read_rest_size <= 1)) begin
_axi_in_read_fsm <= _axi_in_read_fsm_2;
end
end
_axi_in_read_fsm_2: begin
axistreamin_flag_23 <= 1;
__axi_in_read_fsm_cond_2_0_1 <= 1;
_axi_in_read_fsm <= _axi_in_read_fsm_3;
end
_axi_in_read_fsm_3: begin
_axi_in_read_fsm <= _axi_in_read_fsm_init;
end
endcase
end
end
localparam _axi_out_write_fsm_1 = 1;
localparam _axi_out_write_fsm_2 = 2;
localparam _axi_out_write_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_axi_out_write_fsm <= _axi_out_write_fsm_init;
_d1__axi_out_write_fsm <= _axi_out_write_fsm_init;
_axi_out_write_counter <= 0;
_axi_out_write_fifo_counter_25 <= 0;
axistreamout_flag_30 <= 0;
__axi_out_write_fsm_cond_2_0_1 <= 0;
end else begin
_d1__axi_out_write_fsm <= _axi_out_write_fsm;
case(_d1__axi_out_write_fsm)
_axi_out_write_fsm_2: begin
if(__axi_out_write_fsm_cond_2_0_1) begin
axistreamout_flag_30 <= 0;
end
end
endcase
case(_axi_out_write_fsm)
_axi_out_write_fsm_init: begin
if(_axi_out_write_start) begin
_axi_out_write_counter <= _axi_out_write_size;
end
if(_axi_out_write_start) begin
_axi_out_write_fifo_counter_25 <= _axi_out_write_size;
end
if(_axi_out_write_start && (_axi_out_write_op_sel == 1)) begin
_axi_out_write_fsm <= _axi_out_write_fsm_1;
end
end
_axi_out_write_fsm_1: begin
if((_axi_out_write_fsm == 1) && (_axi_out_write_op_sel == 1) && (axi_out_tready || !axi_out_tvalid) && !fifo_c_empty && (_axi_out_write_fifo_counter_25 > 0)) begin
_axi_out_write_fifo_counter_25 <= _axi_out_write_fifo_counter_25 - 1;
end
if(axi_out_tvalid && axi_out_tready) begin
_axi_out_write_counter <= _axi_out_write_counter - 1;
end
if(_axi_out_write_counter == 0) begin
_axi_out_write_fsm <= _axi_out_write_fsm_2;
end
end
_axi_out_write_fsm_2: begin
axistreamout_flag_30 <= 1;
__axi_out_write_fsm_cond_2_0_1 <= 1;
_axi_out_write_fsm <= _axi_out_write_fsm_3;
end
_axi_out_write_fsm_3: begin
_axi_out_write_fsm <= _axi_out_write_fsm_init;
end
endcase
end
end
localparam _mystream_reduce_a_source_fsm_0_1 = 1;
localparam _mystream_reduce_a_source_fsm_0_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_reduce_a_source_fsm_0 <= _mystream_reduce_a_source_fsm_0_init;
end else begin
case(_mystream_reduce_a_source_fsm_0)
_mystream_reduce_a_source_fsm_0_init: begin
if(_mystream_reduce_source_start && _mystream_reduce_a_source_mode & 5'b10000 && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fsm_0 <= _mystream_reduce_a_source_fsm_0_1;
end
end
_mystream_reduce_a_source_fsm_0_1: begin
if(_mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fsm_0 <= _mystream_reduce_a_source_fsm_0_2;
end
end
_mystream_reduce_a_source_fsm_0_2: begin
if((_mystream_reduce_a_source_count == 1) && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fsm_0 <= _mystream_reduce_a_source_fsm_0_init;
end
if(_mystream_reduce_source_stop && _mystream_reduce_stream_oready) begin
_mystream_reduce_a_source_fsm_0 <= _mystream_reduce_a_source_fsm_0_init;
end
end
endcase
end
end
localparam _mystream_reduce_sum_sink_fsm_1_1 = 1;
localparam _mystream_reduce_sum_sink_fsm_1_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_reduce_sum_sink_fsm_1 <= _mystream_reduce_sum_sink_fsm_1_init;
end else begin
case(_mystream_reduce_sum_sink_fsm_1)
_mystream_reduce_sum_sink_fsm_1_init: begin
if(_mystream_reduce_sink_start && _mystream_reduce_sum_sink_mode & 5'b10000 && _mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_fsm_1 <= _mystream_reduce_sum_sink_fsm_1_1;
end
end
_mystream_reduce_sum_sink_fsm_1_1: begin
if(_mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_fsm_1 <= _mystream_reduce_sum_sink_fsm_1_2;
end
end
_mystream_reduce_sum_sink_fsm_1_2: begin
if(mystream_reduce_sum_valid_data && (_mystream_reduce_sum_sink_count == 1) && _mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_fsm_1 <= _mystream_reduce_sum_sink_fsm_1_init;
end
if(_mystream_reduce_sink_stop && _mystream_reduce_stream_oready) begin
_mystream_reduce_sum_sink_fsm_1 <= _mystream_reduce_sum_sink_fsm_1_init;
end
end
endcase
end
end
localparam _mystream_bias_x_source_fsm_0_1 = 1;
localparam _mystream_bias_x_source_fsm_0_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_bias_x_source_fsm_0 <= _mystream_bias_x_source_fsm_0_init;
end else begin
case(_mystream_bias_x_source_fsm_0)
_mystream_bias_x_source_fsm_0_init: begin
if(_mystream_bias_source_start && _mystream_bias_x_source_mode & 5'b10000 && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fsm_0 <= _mystream_bias_x_source_fsm_0_1;
end
end
_mystream_bias_x_source_fsm_0_1: begin
if(_mystream_bias_stream_oready) begin
_mystream_bias_x_source_fsm_0 <= _mystream_bias_x_source_fsm_0_2;
end
end
_mystream_bias_x_source_fsm_0_2: begin
if((_mystream_bias_x_source_count == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fsm_0 <= _mystream_bias_x_source_fsm_0_init;
end
if(_mystream_bias_source_stop && _mystream_bias_stream_oready) begin
_mystream_bias_x_source_fsm_0 <= _mystream_bias_x_source_fsm_0_init;
end
end
endcase
end
end
localparam _mystream_bias_y_source_fsm_1_1 = 1;
localparam _mystream_bias_y_source_fsm_1_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_bias_y_source_fsm_1 <= _mystream_bias_y_source_fsm_1_init;
end else begin
case(_mystream_bias_y_source_fsm_1)
_mystream_bias_y_source_fsm_1_init: begin
if(_mystream_bias_source_start && _mystream_bias_y_source_mode & 5'b1 && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_fsm_1 <= _mystream_bias_y_source_fsm_1_1;
end
end
_mystream_bias_y_source_fsm_1_1: begin
if(_mystream_bias_stream_oready) begin
_mystream_bias_y_source_fsm_1 <= _mystream_bias_y_source_fsm_1_2;
end
end
_mystream_bias_y_source_fsm_1_2: begin
if((_mystream_bias_y_source_count == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_fsm_1 <= _mystream_bias_y_source_fsm_1_init;
end
if(_mystream_bias_source_stop && _mystream_bias_stream_oready) begin
_mystream_bias_y_source_fsm_1 <= _mystream_bias_y_source_fsm_1_init;
end
end
endcase
end
end
localparam _mystream_bias_z_sink_fsm_2_1 = 1;
localparam _mystream_bias_z_sink_fsm_2_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_bias_z_sink_fsm_2 <= _mystream_bias_z_sink_fsm_2_init;
end else begin
case(_mystream_bias_z_sink_fsm_2)
_mystream_bias_z_sink_fsm_2_init: begin
if(_mystream_bias_sink_start && _mystream_bias_z_sink_mode & 5'b10000 && _mystream_bias_stream_oready) begin
_mystream_bias_z_sink_fsm_2 <= _mystream_bias_z_sink_fsm_2_1;
end
end
_mystream_bias_z_sink_fsm_2_1: begin
if(_mystream_bias_stream_oready) begin
_mystream_bias_z_sink_fsm_2 <= _mystream_bias_z_sink_fsm_2_2;
end
end
_mystream_bias_z_sink_fsm_2_2: begin
if((_mystream_bias_z_sink_count == 1) && _mystream_bias_stream_oready) begin
_mystream_bias_z_sink_fsm_2 <= _mystream_bias_z_sink_fsm_2_init;
end
if(_mystream_bias_sink_stop && _mystream_bias_stream_oready) begin
_mystream_bias_z_sink_fsm_2 <= _mystream_bias_z_sink_fsm_2_init;
end
end
endcase
end
end
endmodule
module fifo_a
(
input CLK,
input RST,
input fifo_a_enq,
input [32-1:0] fifo_a_wdata,
output fifo_a_full,
output fifo_a_almost_full,
input fifo_a_deq,
output [32-1:0] fifo_a_rdata,
output fifo_a_empty,
output fifo_a_almost_empty
);
reg [32-1:0] mem [0:256-1];
reg [8-1:0] head;
reg [8-1:0] tail;
wire is_empty;
wire is_almost_empty;
wire is_full;
wire is_almost_full;
assign is_empty = head == tail;
assign is_almost_empty = head == (tail + 1 & 255);
assign is_full = (head + 1 & 255) == tail;
assign is_almost_full = (head + 2 & 255) == tail;
reg [32-1:0] rdata_reg;
assign fifo_a_full = is_full;
assign fifo_a_almost_full = is_almost_full || is_full;
assign fifo_a_empty = is_empty;
assign fifo_a_almost_empty = is_almost_empty || is_empty;
assign fifo_a_rdata = rdata_reg;
always @(posedge CLK) begin
if(RST) begin
head <= 0;
rdata_reg <= 0;
tail <= 0;
end else begin
if(fifo_a_enq && !is_full) begin
mem[head] <= fifo_a_wdata;
head <= head + 1;
end
if(fifo_a_deq && !is_empty) begin
rdata_reg <= mem[tail];
tail <= tail + 1;
end
end
end
endmodule
module fifo_b
(
input CLK,
input RST,
input fifo_b_enq,
input [32-1:0] fifo_b_wdata,
output fifo_b_full,
output fifo_b_almost_full,
input fifo_b_deq,
output [32-1:0] fifo_b_rdata,
output fifo_b_empty,
output fifo_b_almost_empty
);
reg [32-1:0] mem [0:256-1];
reg [8-1:0] head;
reg [8-1:0] tail;
wire is_empty;
wire is_almost_empty;
wire is_full;
wire is_almost_full;
assign is_empty = head == tail;
assign is_almost_empty = head == (tail + 1 & 255);
assign is_full = (head + 1 & 255) == tail;
assign is_almost_full = (head + 2 & 255) == tail;
reg [32-1:0] rdata_reg;
assign fifo_b_full = is_full;
assign fifo_b_almost_full = is_almost_full || is_full;
assign fifo_b_empty = is_empty;
assign fifo_b_almost_empty = is_almost_empty || is_empty;
assign fifo_b_rdata = rdata_reg;
always @(posedge CLK) begin
if(RST) begin
head <= 0;
rdata_reg <= 0;
tail <= 0;
end else begin
if(fifo_b_enq && !is_full) begin
mem[head] <= fifo_b_wdata;
head <= head + 1;
end
if(fifo_b_deq && !is_empty) begin
rdata_reg <= mem[tail];
tail <= tail + 1;
end
end
end
endmodule
module fifo_c
(
input CLK,
input RST,
input fifo_c_enq,
input [32-1:0] fifo_c_wdata,
output fifo_c_full,
output fifo_c_almost_full,
input fifo_c_deq,
output [32-1:0] fifo_c_rdata,
output fifo_c_empty,
output fifo_c_almost_empty
);
reg [32-1:0] mem [0:256-1];
reg [8-1:0] head;
reg [8-1:0] tail;
wire is_empty;
wire is_almost_empty;
wire is_full;
wire is_almost_full;
assign is_empty = head == tail;
assign is_almost_empty = head == (tail + 1 & 255);
assign is_full = (head + 1 & 255) == tail;
assign is_almost_full = (head + 2 & 255) == tail;
reg [32-1:0] rdata_reg;
assign fifo_c_full = is_full;
assign fifo_c_almost_full = is_almost_full || is_full;
assign fifo_c_empty = is_empty;
assign fifo_c_almost_empty = is_almost_empty || is_empty;
assign fifo_c_rdata = rdata_reg;
always @(posedge CLK) begin
if(RST) begin
head <= 0;
rdata_reg <= 0;
tail <= 0;
end else begin
if(fifo_c_enq && !is_full) begin
mem[head] <= fifo_c_wdata;
head <= head + 1;
end
if(fifo_c_deq && !is_empty) begin
rdata_reg <= mem[tail];
tail <= tail + 1;
end
end
end
endmodule
module ram_b
(
input CLK,
input [10-1:0] ram_b_0_addr,
output [32-1:0] ram_b_0_rdata,
input [32-1:0] ram_b_0_wdata,
input ram_b_0_wenable,
input ram_b_0_enable
);
reg [32-1:0] ram_b_0_rdata_out;
assign ram_b_0_rdata = ram_b_0_rdata_out;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_b_0_enable) begin
if(ram_b_0_wenable) begin
mem[ram_b_0_addr] <= ram_b_0_wdata;
ram_b_0_rdata_out <= ram_b_0_wdata;
end else begin
ram_b_0_rdata_out <= mem[ram_b_0_addr];
end
end
end
endmodule
module multiplier_0
(
input CLK,
input update,
input [32-1:0] a,
input [32-1:0] b,
output [64-1:0] c
);
multiplier_core_0
mult
(
.CLK(CLK),
.update(update),
.a(a),
.b(b),
.c(c)
);
endmodule
module multiplier_core_0
(
input CLK,
input update,
input [32-1:0] a,
input [32-1:0] b,
output [64-1:0] c
);
reg signed [32-1:0] _a;
reg signed [32-1:0] _b;
wire signed [64-1:0] _mul;
reg signed [64-1:0] _pipe_mul0;
assign _mul = _a * _b;
assign c = _pipe_mul0;
always @(posedge CLK) begin
if(update) begin
_a <= a;
_b <= b;
_pipe_mul0 <= _mul;
end
end
endmodule
"""
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
code = thread_stream_axi_stream_fifo_ipxact.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: fsm_/state/test_fsm_state.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import fsm_state
expected_verilog = """
module test;
reg CLK;
reg RST;
wire valid;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.valid(valid)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg valid
);
reg [8-1:0] counter;
reg [32-1:0] fsm;
localparam fsm_init = 0;
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
valid <= 0;
counter <= 0;
end else begin
if(counter <= 255) begin
counter <= counter + 1;
end else begin
counter <= 0;
end
case(fsm)
fsm_init: begin
if(counter == 10) begin
valid <= 0;
end else begin
valid <= 1;
end
if(counter == 40) begin
valid <= 0;
end else begin
valid <= 1;
end
if(valid) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(counter == 20) begin
valid <= 0;
end else begin
valid <= 1;
end
if(valid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
if(counter == 30) begin
valid <= 0;
end else begin
valid <= 1;
end
if(counter[0] == 0) begin
fsm <= fsm_3;
end
if(!(counter[0] == 0) && (counter[1] == 1)) begin
fsm <= fsm_1;
end
if(!(counter[0] == 0) && !(counter[1] == 1)) begin
fsm <= fsm_2;
end
end
fsm_3: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = fsm_state.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: seq_/merge/test_seq_merge.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import seq_merge
expected_verilog = """
module test;
reg CLK;
reg RST;
blinkled
uut
(
.CLK(CLK),
.RST(RST)
);
reg reset_done;
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
reset_done = 0;
#100;
RST = 1;
#100;
RST = 0;
#1000;
reset_done = 1;
@(posedge CLK);
#1;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST
);
reg [32-1:0] x;
reg [32-1:0] y;
reg [32-1:0] z;
reg _seq_x_cond_0_1;
reg _seq_y_cond_0_1;
reg _seq_y_cond_0_2;
reg [32-1:0] _x_1;
reg [32-1:0] _y_1;
always @(posedge CLK) begin
if(RST) begin
x <= 0;
_seq_x_cond_0_1 <= 0;
_x_1 <= 0;
y <= 0;
_seq_y_cond_0_1 <= 0;
_seq_y_cond_0_2 <= 0;
_y_1 <= 0;
z <= 0;
end else begin
if(_seq_y_cond_0_2) begin
y <= 0;
end
if(_seq_x_cond_0_1) begin
x <= 0;
end
_seq_y_cond_0_2 <= _seq_y_cond_0_1;
if(x < 16) begin
x <= x + 1;
end
_seq_x_cond_0_1 <= x == 16;
_x_1 <= x;
if(y < 32) begin
y <= y + 1;
end
_seq_y_cond_0_1 <= y == 32;
_y_1 <= y;
z <= _x_1 + _y_1;
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = seq_merge.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: thread_/fixed_tmp_variable/test_thread_fixed_tmp_variable.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_fixed_tmp_variable
expected_verilog = """
module test;
reg CLK;
reg RST;
blinkled
uut
(
.CLK(CLK),
.RST(RST)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST
);
reg [8-1:0] LED;
reg signed [8-1:0] count;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_i_1;
reg signed [33-1:0] _th_blink_next_val_2;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
count <= count + 'sd8;
end
end
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
LED <= 0;
_th_blink_i_1 <= 0;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 10;
th_blink <= th_blink_1;
end
th_blink_1: begin
LED <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_3;
end
th_blink_3: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_4;
end else begin
th_blink <= th_blink_8;
end
end
th_blink_4: begin
_th_blink_next_val_2 <= count + 'sd24;
th_blink <= th_blink_5;
end
th_blink_5: begin
LED <= _th_blink_next_val_2 >>> 3;
th_blink <= th_blink_6;
end
th_blink_6: begin
$display("led = %d", LED);
th_blink <= th_blink_7;
end
th_blink_7: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_3;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_fixed_tmp_variable.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: thread_/multithread_run/test_thread_multithread_run.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_multithread_run
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [8-1:0] LED;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.LED(LED)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [8-1:0] count;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_inc_1;
reg [32-1:0] th_countup;
localparam th_countup_init = 0;
reg _th_countup_called;
reg signed [32-1:0] _th_countup_times_2;
reg signed [32-1:0] _th_countup_inc_3;
reg signed [32-1:0] _th_countup_i_4;
reg signed [32-1:0] _th_blink_i_5;
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
_th_blink_inc_1 <= 0;
LED <= 0;
_th_blink_i_5 <= 0;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 10;
_th_blink_inc_1 <= 1;
th_blink <= th_blink_1;
end
th_blink_1: begin
th_blink <= th_blink_2;
end
th_blink_2: begin
LED <= 0;
th_blink <= th_blink_3;
end
th_blink_3: begin
_th_blink_i_5 <= 0;
th_blink <= th_blink_4;
end
th_blink_4: begin
if(_th_blink_i_5 < _th_blink_times_0) begin
th_blink <= th_blink_5;
end else begin
th_blink <= th_blink_8;
end
end
th_blink_5: begin
LED <= LED + _th_blink_inc_1;
th_blink <= th_blink_6;
end
th_blink_6: begin
$display(" led = %d", LED);
th_blink <= th_blink_7;
end
th_blink_7: begin
_th_blink_i_5 <= _th_blink_i_5 + 1;
th_blink <= th_blink_4;
end
endcase
end
end
localparam th_countup_1 = 1;
localparam th_countup_2 = 2;
localparam th_countup_3 = 3;
localparam th_countup_4 = 4;
localparam th_countup_5 = 5;
localparam th_countup_6 = 6;
localparam th_countup_7 = 7;
localparam th_countup_8 = 8;
localparam th_countup_9 = 9;
localparam th_countup_10 = 10;
always @(posedge CLK) begin
if(RST) begin
th_countup <= th_countup_init;
_th_countup_called <= 0;
_th_countup_times_2 <= 0;
_th_countup_inc_3 <= 0;
count <= 0;
_th_countup_i_4 <= 0;
end else begin
case(th_countup)
th_countup_init: begin
if(th_blink == 1) begin
_th_countup_called <= 1;
end
if(th_blink == 1) begin
th_countup <= th_countup_1;
end
end
th_countup_1: begin
th_countup <= th_countup_2;
end
th_countup_2: begin
_th_countup_times_2 <= 20;
th_countup <= th_countup_3;
end
th_countup_3: begin
_th_countup_inc_3 <= 1;
th_countup <= th_countup_4;
end
th_countup_4: begin
count <= 0;
th_countup <= th_countup_5;
end
th_countup_5: begin
_th_countup_i_4 <= 0;
th_countup <= th_countup_6;
end
th_countup_6: begin
if(_th_countup_i_4 < _th_countup_times_2) begin
th_countup <= th_countup_7;
end else begin
th_countup <= th_countup_10;
end
end
th_countup_7: begin
count <= count + _th_countup_inc_3;
th_countup <= th_countup_8;
end
th_countup_8: begin
$display("count = %d", count);
th_countup <= th_countup_9;
end
th_countup_9: begin
_th_countup_i_4 <= _th_countup_i_4 + 1;
th_countup <= th_countup_6;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_multithread_run.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: thread_/stream_ram_external_ports/test_thread_stream_ram_external_ports.py
```python
from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import thread_stream_ram_external_ports
expected_verilog = """
module blinkled
(
input CLK,
input RST,
input [10-1:0] ram_a_1_addr,
output [32-1:0] ram_a_1_rdata,
input [32-1:0] ram_a_1_wdata,
input ram_a_1_wenable,
input ram_a_1_enable,
input [10-1:0] ram_b_1_addr,
output [32-1:0] ram_b_1_rdata,
input [32-1:0] ram_b_1_wdata,
input ram_b_1_wenable,
input ram_b_1_enable,
input [10-1:0] ram_c_1_addr,
output [32-1:0] ram_c_1_rdata,
input [32-1:0] ram_c_1_wdata,
input ram_c_1_wenable,
input ram_c_1_enable,
input [32-1:0] saxi_awaddr,
input [4-1:0] saxi_awcache,
input [3-1:0] saxi_awprot,
input saxi_awvalid,
output saxi_awready,
input [32-1:0] saxi_wdata,
input [4-1:0] saxi_wstrb,
input saxi_wvalid,
output saxi_wready,
output [2-1:0] saxi_bresp,
output reg saxi_bvalid,
input saxi_bready,
input [32-1:0] saxi_araddr,
input [4-1:0] saxi_arcache,
input [3-1:0] saxi_arprot,
input saxi_arvalid,
output saxi_arready,
output reg [32-1:0] saxi_rdata,
output [2-1:0] saxi_rresp,
output reg saxi_rvalid,
input saxi_rready
);
wire [10-1:0] ram_a_0_addr;
wire [32-1:0] ram_a_0_rdata;
wire [32-1:0] ram_a_0_wdata;
wire ram_a_0_wenable;
wire ram_a_0_enable;
ram_a
inst_ram_a
(
.CLK(CLK),
.ram_a_0_addr(ram_a_0_addr),
.ram_a_0_rdata(ram_a_0_rdata),
.ram_a_0_wdata(ram_a_0_wdata),
.ram_a_0_wenable(ram_a_0_wenable),
.ram_a_0_enable(ram_a_0_enable),
.ram_a_1_addr(ram_a_1_addr),
.ram_a_1_rdata(ram_a_1_rdata),
.ram_a_1_wdata(ram_a_1_wdata),
.ram_a_1_wenable(ram_a_1_wenable),
.ram_a_1_enable(ram_a_1_enable)
);
wire [10-1:0] ram_b_0_addr;
wire [32-1:0] ram_b_0_rdata;
wire [32-1:0] ram_b_0_wdata;
wire ram_b_0_wenable;
wire ram_b_0_enable;
ram_b
inst_ram_b
(
.CLK(CLK),
.ram_b_0_addr(ram_b_0_addr),
.ram_b_0_rdata(ram_b_0_rdata),
.ram_b_0_wdata(ram_b_0_wdata),
.ram_b_0_wenable(ram_b_0_wenable),
.ram_b_0_enable(ram_b_0_enable),
.ram_b_1_addr(ram_b_1_addr),
.ram_b_1_rdata(ram_b_1_rdata),
.ram_b_1_wdata(ram_b_1_wdata),
.ram_b_1_wenable(ram_b_1_wenable),
.ram_b_1_enable(ram_b_1_enable)
);
wire [10-1:0] ram_c_0_addr;
wire [32-1:0] ram_c_0_rdata;
wire [32-1:0] ram_c_0_wdata;
wire ram_c_0_wenable;
wire ram_c_0_enable;
ram_c
inst_ram_c
(
.CLK(CLK),
.ram_c_0_addr(ram_c_0_addr),
.ram_c_0_rdata(ram_c_0_rdata),
.ram_c_0_wdata(ram_c_0_wdata),
.ram_c_0_wenable(ram_c_0_wenable),
.ram_c_0_enable(ram_c_0_enable),
.ram_c_1_addr(ram_c_1_addr),
.ram_c_1_rdata(ram_c_1_rdata),
.ram_c_1_wdata(ram_c_1_wdata),
.ram_c_1_wenable(ram_c_1_wenable),
.ram_c_1_enable(ram_c_1_enable)
);
assign saxi_bresp = 0;
assign saxi_rresp = 0;
reg signed [32-1:0] _saxi_register_0;
reg signed [32-1:0] _saxi_register_1;
reg signed [32-1:0] _saxi_register_2;
reg signed [32-1:0] _saxi_register_3;
reg _saxi_flag_0;
reg _saxi_flag_1;
reg _saxi_flag_2;
reg _saxi_flag_3;
reg signed [32-1:0] _saxi_resetval_0;
reg signed [32-1:0] _saxi_resetval_1;
reg signed [32-1:0] _saxi_resetval_2;
reg signed [32-1:0] _saxi_resetval_3;
localparam _saxi_maskwidth = 2;
localparam _saxi_mask = { _saxi_maskwidth{ 1'd1 } };
localparam _saxi_shift = 2;
reg [32-1:0] _saxi_register_fsm;
localparam _saxi_register_fsm_init = 0;
reg [32-1:0] addr_0;
reg writevalid_1;
reg readvalid_2;
reg prev_awvalid_3;
reg prev_arvalid_4;
assign saxi_awready = (_saxi_register_fsm == 0) && (!writevalid_1 && !readvalid_2 && !saxi_bvalid && prev_awvalid_3);
assign saxi_arready = (_saxi_register_fsm == 0) && (!readvalid_2 && !writevalid_1 && prev_arvalid_4 && !prev_awvalid_3);
reg [_saxi_maskwidth-1:0] _tmp_5;
wire signed [32-1:0] _tmp_6;
assign _tmp_6 = (_tmp_5 == 0)? _saxi_register_0 :
(_tmp_5 == 1)? _saxi_register_1 :
(_tmp_5 == 2)? _saxi_register_2 :
(_tmp_5 == 3)? _saxi_register_3 : 'hx;
wire _tmp_7;
assign _tmp_7 = (_tmp_5 == 0)? _saxi_flag_0 :
(_tmp_5 == 1)? _saxi_flag_1 :
(_tmp_5 == 2)? _saxi_flag_2 :
(_tmp_5 == 3)? _saxi_flag_3 : 'hx;
wire signed [32-1:0] _tmp_8;
assign _tmp_8 = (_tmp_5 == 0)? _saxi_resetval_0 :
(_tmp_5 == 1)? _saxi_resetval_1 :
(_tmp_5 == 2)? _saxi_resetval_2 :
(_tmp_5 == 3)? _saxi_resetval_3 : 'hx;
reg _saxi_cond_0_1;
assign saxi_wready = _saxi_register_fsm == 3;
reg _mystream_stream_ivalid;
wire _mystream_stream_oready;
wire _mystream_stream_internal_oready;
assign _mystream_stream_internal_oready = 1;
assign _mystream_stream_oready = _mystream_stream_internal_oready;
reg [32-1:0] _mystream_fsm;
localparam _mystream_fsm_init = 0;
wire _mystream_run_flag;
reg _mystream_source_start;
wire _mystream_source_stop;
reg _mystream_source_busy;
wire _mystream_sink_start;
wire _mystream_sink_stop;
wire _mystream_sink_busy;
wire _mystream_busy;
reg _mystream_busy_reg;
wire _mystream_is_root;
assign _mystream_is_root = 1;
reg _mystream_a_idle;
reg [33-1:0] _mystream_a_source_count;
reg [5-1:0] _mystream_a_source_mode;
reg [16-1:0] _mystream_a_source_generator_id;
reg [32-1:0] _mystream_a_source_offset;
reg [33-1:0] _mystream_a_source_size;
reg [32-1:0] _mystream_a_source_stride;
reg [32-1:0] _mystream_a_source_offset_buf;
reg [33-1:0] _mystream_a_source_size_buf;
reg [32-1:0] _mystream_a_source_stride_buf;
reg [8-1:0] _mystream_a_source_sel;
reg [32-1:0] _mystream_a_source_ram_raddr;
reg _mystream_a_source_ram_renable;
wire [32-1:0] _mystream_a_source_ram_rdata;
reg _mystream_a_source_fifo_deq;
wire [32-1:0] _mystream_a_source_fifo_rdata;
reg [32-1:0] _mystream_a_source_empty_data;
reg _mystream_b_idle;
reg [33-1:0] _mystream_b_source_count;
reg [5-1:0] _mystream_b_source_mode;
reg [16-1:0] _mystream_b_source_generator_id;
reg [32-1:0] _mystream_b_source_offset;
reg [33-1:0] _mystream_b_source_size;
reg [32-1:0] _mystream_b_source_stride;
reg [32-1:0] _mystream_b_source_offset_buf;
reg [33-1:0] _mystream_b_source_size_buf;
reg [32-1:0] _mystream_b_source_stride_buf;
reg [8-1:0] _mystream_b_source_sel;
reg [32-1:0] _mystream_b_source_ram_raddr;
reg _mystream_b_source_ram_renable;
wire [32-1:0] _mystream_b_source_ram_rdata;
reg _mystream_b_source_fifo_deq;
wire [32-1:0] _mystream_b_source_fifo_rdata;
reg [32-1:0] _mystream_b_source_empty_data;
reg [33-1:0] _mystream_c_sink_count;
reg [5-1:0] _mystream_c_sink_mode;
reg [16-1:0] _mystream_c_sink_generator_id;
reg [32-1:0] _mystream_c_sink_offset;
reg [33-1:0] _mystream_c_sink_size;
reg [32-1:0] _mystream_c_sink_stride;
reg [32-1:0] _mystream_c_sink_offset_buf;
reg [33-1:0] _mystream_c_sink_size_buf;
reg [32-1:0] _mystream_c_sink_stride_buf;
reg [8-1:0] _mystream_c_sink_sel;
reg [32-1:0] _mystream_c_sink_waddr;
reg _mystream_c_sink_wenable;
reg [32-1:0] _mystream_c_sink_wdata;
reg _mystream_c_sink_fifo_enq;
reg [32-1:0] _mystream_c_sink_fifo_wdata;
reg [32-1:0] _mystream_c_sink_immediate;
reg [32-1:0] th_comp;
localparam th_comp_init = 0;
reg signed [32-1:0] _th_comp_size_0;
reg signed [32-1:0] _th_comp_offset_1;
reg signed [32-1:0] _th_comp_size_2;
reg signed [32-1:0] _th_comp_offset_3;
wire signed [32-1:0] mystream_a_data;
wire signed [32-1:0] mystream_b_data;
reg __mystream_stream_ivalid_1;
reg signed [32-1:0] _plus_data_2;
wire signed [32-1:0] mystream_c_data;
assign mystream_c_data = _plus_data_2;
wire _set_flag_9;
assign _set_flag_9 = th_comp == 7;
assign ram_a_0_addr = (_mystream_stream_oready && _mystream_a_source_ram_renable && (_mystream_a_source_sel == 1))? _mystream_a_source_ram_raddr : 'hx;
assign ram_a_0_enable = (_mystream_stream_oready && _mystream_a_source_ram_renable && (_mystream_a_source_sel == 1))? 1'd1 : 0;
localparam _tmp_10 = 1;
wire [_tmp_10-1:0] _tmp_11;
assign _tmp_11 = _mystream_stream_oready && _mystream_a_source_ram_renable && (_mystream_a_source_sel == 1);
reg [_tmp_10-1:0] __tmp_11_1;
assign _mystream_a_source_ram_rdata = (_mystream_a_source_sel == 1)? ram_a_0_rdata : 'hx;
reg signed [32-1:0] __variable_wdata_0;
assign mystream_a_data = __variable_wdata_0;
reg [32-1:0] _mystream_a_source_fsm_0;
localparam _mystream_a_source_fsm_0_init = 0;
wire _set_flag_12;
assign _set_flag_12 = th_comp == 8;
assign ram_b_0_addr = (_mystream_stream_oready && _mystream_b_source_ram_renable && (_mystream_b_source_sel == 2))? _mystream_b_source_ram_raddr : 'hx;
assign ram_b_0_enable = (_mystream_stream_oready && _mystream_b_source_ram_renable && (_mystream_b_source_sel == 2))? 1'd1 : 0;
localparam _tmp_13 = 1;
wire [_tmp_13-1:0] _tmp_14;
assign _tmp_14 = _mystream_stream_oready && _mystream_b_source_ram_renable && (_mystream_b_source_sel == 2);
reg [_tmp_13-1:0] __tmp_14_1;
assign _mystream_b_source_ram_rdata = (_mystream_b_source_sel == 2)? ram_b_0_rdata : 'hx;
reg signed [32-1:0] __variable_wdata_1;
assign mystream_b_data = __variable_wdata_1;
reg [32-1:0] _mystream_b_source_fsm_1;
localparam _mystream_b_source_fsm_1_init = 0;
wire _set_flag_15;
assign _set_flag_15 = th_comp == 9;
reg _tmp_16;
reg _tmp_17;
reg _tmp_18;
reg signed [32-1:0] _tmp_19;
reg signed [32-1:0] _tmp_20;
reg signed [32-1:0] _tmp_21;
reg signed [32-1:0] _tmp_22;
reg signed [32-1:0] _tmp_23;
reg signed [32-1:0] _tmp_24;
assign ram_c_0_addr = (_mystream_stream_oready && _mystream_c_sink_wenable && (_mystream_c_sink_sel == 3))? _mystream_c_sink_waddr : 'hx;
assign ram_c_0_wdata = (_mystream_stream_oready && _mystream_c_sink_wenable && (_mystream_c_sink_sel == 3))? _mystream_c_sink_wdata : 'hx;
assign ram_c_0_wenable = (_mystream_stream_oready && _mystream_c_sink_wenable && (_mystream_c_sink_sel == 3))? 1'd1 : 0;
assign ram_c_0_enable = (_mystream_stream_oready && _mystream_c_sink_wenable && (_mystream_c_sink_sel == 3))? 1'd1 : 0;
reg [32-1:0] _mystream_c_sink_fsm_2;
localparam _mystream_c_sink_fsm_2_init = 0;
wire _set_flag_25;
assign _set_flag_25 = th_comp == 10;
assign _mystream_run_flag = (_set_flag_25)? 1 : 0;
reg _tmp_26;
reg _tmp_27;
reg _tmp_28;
assign _mystream_source_stop = _mystream_stream_oready && (_mystream_a_idle && _mystream_b_idle && (_mystream_fsm == 3));
localparam _tmp_29 = 1;
wire [_tmp_29-1:0] _tmp_30;
assign _tmp_30 = _mystream_a_idle && _mystream_b_idle && (_mystream_fsm == 3);
reg [_tmp_29-1:0] _tmp_31;
reg _tmp_32;
reg _tmp_33;
reg _tmp_34;
assign _mystream_sink_start = _tmp_34;
reg _tmp_35;
reg _tmp_36;
reg _tmp_37;
assign _mystream_sink_stop = _tmp_37;
reg _tmp_38;
reg _tmp_39;
reg _tmp_40;
assign _mystream_sink_busy = _tmp_40;
reg _tmp_41;
assign _mystream_busy = _mystream_source_busy || _mystream_sink_busy || _mystream_busy_reg;
always @(posedge CLK) begin
if(RST) begin
__tmp_11_1 <= 0;
end else begin
__tmp_11_1 <= _tmp_11;
end
end
always @(posedge CLK) begin
if(RST) begin
__tmp_14_1 <= 0;
end else begin
__tmp_14_1 <= _tmp_14;
end
end
always @(posedge CLK) begin
if(RST) begin
saxi_bvalid <= 0;
prev_awvalid_3 <= 0;
prev_arvalid_4 <= 0;
writevalid_1 <= 0;
readvalid_2 <= 0;
addr_0 <= 0;
saxi_rdata <= 0;
saxi_rvalid <= 0;
_saxi_cond_0_1 <= 0;
_saxi_register_0 <= 0;
_saxi_flag_0 <= 0;
_saxi_register_1 <= 0;
_saxi_flag_1 <= 0;
_saxi_register_2 <= 0;
_saxi_flag_2 <= 0;
_saxi_register_3 <= 0;
_saxi_flag_3 <= 0;
end else begin
if(_saxi_cond_0_1) begin
saxi_rvalid <= 0;
end
if(saxi_bvalid && saxi_bready) begin
saxi_bvalid <= 0;
end
if(saxi_wvalid && saxi_wready) begin
saxi_bvalid <= 1;
end
prev_awvalid_3 <= saxi_awvalid;
prev_arvalid_4 <= saxi_arvalid;
writevalid_1 <= 0;
readvalid_2 <= 0;
if(saxi_awready && saxi_awvalid && !saxi_bvalid) begin
addr_0 <= saxi_awaddr;
writevalid_1 <= 1;
end else if(saxi_arready && saxi_arvalid) begin
addr_0 <= saxi_araddr;
readvalid_2 <= 1;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid)) begin
saxi_rdata <= _tmp_6;
saxi_rvalid <= 1;
end
_saxi_cond_0_1 <= 1;
if(saxi_rvalid && !saxi_rready) begin
saxi_rvalid <= saxi_rvalid;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_7 && (_tmp_5 == 0)) begin
_saxi_register_0 <= _tmp_8;
_saxi_flag_0 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_7 && (_tmp_5 == 1)) begin
_saxi_register_1 <= _tmp_8;
_saxi_flag_1 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_7 && (_tmp_5 == 2)) begin
_saxi_register_2 <= _tmp_8;
_saxi_flag_2 <= 0;
end
if((_saxi_register_fsm == 1) && (saxi_rready || !saxi_rvalid) && _tmp_7 && (_tmp_5 == 3)) begin
_saxi_register_3 <= _tmp_8;
_saxi_flag_3 <= 0;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_5 == 0)) begin
_saxi_register_0 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_5 == 1)) begin
_saxi_register_1 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_5 == 2)) begin
_saxi_register_2 <= saxi_wdata;
end
if((_saxi_register_fsm == 3) && (saxi_wready && saxi_wvalid) && (_tmp_5 == 3)) begin
_saxi_register_3 <= saxi_wdata;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 1) begin
_saxi_register_0 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_1 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_2 <= 0;
end
if((_saxi_register_0 == 1) && (th_comp == 2) && 0) begin
_saxi_register_3 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_0 <= 1;
_saxi_flag_0 <= 0;
end
if((th_comp == 3) && 1) begin
_saxi_register_1 <= 1;
_saxi_flag_1 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_2 <= 1;
_saxi_flag_2 <= 0;
end
if((th_comp == 3) && 0) begin
_saxi_register_3 <= 1;
_saxi_flag_3 <= 0;
end
if((th_comp == 13) && 0) begin
_saxi_register_0 <= 0;
_saxi_flag_0 <= 0;
end
if((th_comp == 13) && 1) begin
_saxi_register_1 <= 0;
_saxi_flag_1 <= 0;
end
if((th_comp == 13) && 0) begin
_saxi_register_2 <= 0;
_saxi_flag_2 <= 0;
end
if((th_comp == 13) && 0) begin
_saxi_register_3 <= 0;
_saxi_flag_3 <= 0;
end
end
end
localparam _saxi_register_fsm_1 = 1;
localparam _saxi_register_fsm_2 = 2;
localparam _saxi_register_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end else begin
case(_saxi_register_fsm)
_saxi_register_fsm_init: begin
if(readvalid_2 || writevalid_1) begin
_tmp_5 <= (addr_0 >> _saxi_shift) & _saxi_mask;
end
if(readvalid_2) begin
_saxi_register_fsm <= _saxi_register_fsm_1;
end
if(writevalid_1) begin
_saxi_register_fsm <= _saxi_register_fsm_3;
end
end
_saxi_register_fsm_1: begin
if(saxi_rready && saxi_rvalid) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end
if((saxi_rready || !saxi_rvalid) && !(saxi_rready && saxi_rvalid)) begin
_saxi_register_fsm <= _saxi_register_fsm_2;
end
end
_saxi_register_fsm_2: begin
if(saxi_rready && saxi_rvalid) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end
end
_saxi_register_fsm_3: begin
if(saxi_wready && saxi_wvalid) begin
_saxi_register_fsm <= _saxi_register_fsm_init;
end
end
endcase
end
end
always @(posedge CLK) begin
if(RST) begin
_mystream_a_source_ram_renable <= 0;
_mystream_a_source_fifo_deq <= 0;
_mystream_a_idle <= 1;
_mystream_b_source_ram_renable <= 0;
_mystream_b_source_fifo_deq <= 0;
_mystream_b_idle <= 1;
_mystream_c_sink_wenable <= 0;
_mystream_c_sink_fifo_enq <= 0;
__mystream_stream_ivalid_1 <= 0;
_plus_data_2 <= 0;
_mystream_a_source_mode <= 5'b0;
_mystream_a_source_offset <= 0;
_mystream_a_source_size <= 0;
_mystream_a_source_stride <= 0;
_mystream_a_source_sel <= 0;
_mystream_a_source_offset_buf <= 0;
_mystream_a_source_size_buf <= 0;
_mystream_a_source_stride_buf <= 0;
__variable_wdata_0 <= 0;
_mystream_a_source_ram_raddr <= 0;
_mystream_a_source_count <= 0;
_mystream_b_source_mode <= 5'b0;
_mystream_b_source_offset <= 0;
_mystream_b_source_size <= 0;
_mystream_b_source_stride <= 0;
_mystream_b_source_sel <= 0;
_mystream_b_source_offset_buf <= 0;
_mystream_b_source_size_buf <= 0;
_mystream_b_source_stride_buf <= 0;
__variable_wdata_1 <= 0;
_mystream_b_source_ram_raddr <= 0;
_mystream_b_source_count <= 0;
_tmp_16 <= 0;
_tmp_17 <= 0;
_tmp_18 <= 0;
_tmp_19 <= 0;
_tmp_20 <= 0;
_tmp_21 <= 0;
_tmp_22 <= 0;
_tmp_23 <= 0;
_tmp_24 <= 0;
_mystream_c_sink_mode <= 5'b0;
_mystream_c_sink_offset <= 0;
_mystream_c_sink_size <= 0;
_mystream_c_sink_stride <= 0;
_mystream_c_sink_sel <= 0;
_mystream_c_sink_offset_buf <= 0;
_mystream_c_sink_size_buf <= 0;
_mystream_c_sink_stride_buf <= 0;
_mystream_c_sink_waddr <= 0;
_mystream_c_sink_count <= 0;
_mystream_c_sink_wdata <= 0;
_tmp_26 <= 0;
_tmp_27 <= 0;
_tmp_28 <= 0;
_tmp_31 <= 0;
_tmp_32 <= 0;
_tmp_33 <= 0;
_tmp_34 <= 0;
_tmp_35 <= 0;
_tmp_36 <= 0;
_tmp_37 <= 0;
_tmp_38 <= 0;
_tmp_39 <= 0;
_tmp_40 <= 0;
_tmp_41 <= 0;
_mystream_busy_reg <= 0;
end else begin
if(_mystream_stream_oready) begin
_mystream_a_source_ram_renable <= 0;
_mystream_a_source_fifo_deq <= 0;
end
_mystream_a_idle <= _mystream_a_idle;
if(_mystream_stream_oready) begin
_mystream_b_source_ram_renable <= 0;
_mystream_b_source_fifo_deq <= 0;
end
_mystream_b_idle <= _mystream_b_idle;
if(_mystream_stream_oready) begin
_mystream_c_sink_wenable <= 0;
_mystream_c_sink_fifo_enq <= 0;
end
if(_mystream_stream_oready) begin
__mystream_stream_ivalid_1 <= _mystream_stream_ivalid;
end
if(_mystream_stream_oready) begin
_plus_data_2 <= mystream_a_data + mystream_b_data;
end
if(_set_flag_9) begin
_mystream_a_source_mode <= 5'b1;
_mystream_a_source_offset <= _th_comp_offset_3;
_mystream_a_source_size <= _th_comp_size_2;
_mystream_a_source_stride <= 1;
end
if(_set_flag_9) begin
_mystream_a_source_sel <= 1;
end
if(_mystream_source_start && _mystream_a_source_mode & 5'b1 && _mystream_stream_oready) begin
_mystream_a_source_offset_buf <= _mystream_a_source_offset;
_mystream_a_source_size_buf <= _mystream_a_source_size;
_mystream_a_source_stride_buf <= _mystream_a_source_stride;
end
if(_mystream_stream_oready && _mystream_source_busy && _mystream_is_root) begin
__variable_wdata_0 <= _mystream_a_source_ram_rdata;
end
if((_mystream_a_source_fsm_0 == 1) && _mystream_stream_oready) begin
_mystream_a_idle <= 0;
_mystream_a_source_ram_raddr <= _mystream_a_source_offset_buf;
_mystream_a_source_ram_renable <= 1;
_mystream_a_source_count <= _mystream_a_source_size_buf;
end
if((_mystream_a_source_fsm_0 == 2) && _mystream_stream_oready) begin
_mystream_a_source_ram_raddr <= _mystream_a_source_ram_raddr + _mystream_a_source_stride_buf;
_mystream_a_source_ram_renable <= 1;
_mystream_a_source_count <= _mystream_a_source_count - 1;
end
if((_mystream_a_source_fsm_0 == 2) && (_mystream_a_source_count == 1) && _mystream_stream_oready) begin
_mystream_a_source_ram_renable <= 0;
_mystream_a_idle <= 1;
end
if((_mystream_a_source_fsm_0 == 2) && _mystream_source_stop && _mystream_stream_oready) begin
_mystream_a_source_ram_renable <= 0;
_mystream_a_idle <= 1;
end
if(_set_flag_12) begin
_mystream_b_source_mode <= 5'b1;
_mystream_b_source_offset <= _th_comp_offset_3;
_mystream_b_source_size <= _th_comp_size_2;
_mystream_b_source_stride <= 1;
end
if(_set_flag_12) begin
_mystream_b_source_sel <= 2;
end
if(_mystream_source_start && _mystream_b_source_mode & 5'b1 && _mystream_stream_oready) begin
_mystream_b_source_offset_buf <= _mystream_b_source_offset;
_mystream_b_source_size_buf <= _mystream_b_source_size;
_mystream_b_source_stride_buf <= _mystream_b_source_stride;
end
if(_mystream_stream_oready && _mystream_source_busy && _mystream_is_root) begin
__variable_wdata_1 <= _mystream_b_source_ram_rdata;
end
if((_mystream_b_source_fsm_1 == 1) && _mystream_stream_oready) begin
_mystream_b_idle <= 0;
_mystream_b_source_ram_raddr <= _mystream_b_source_offset_buf;
_mystream_b_source_ram_renable <= 1;
_mystream_b_source_count <= _mystream_b_source_size_buf;
end
if((_mystream_b_source_fsm_1 == 2) && _mystream_stream_oready) begin
_mystream_b_source_ram_raddr <= _mystream_b_source_ram_raddr + _mystream_b_source_stride_buf;
_mystream_b_source_ram_renable <= 1;
_mystream_b_source_count <= _mystream_b_source_count - 1;
end
if((_mystream_b_source_fsm_1 == 2) && (_mystream_b_source_count == 1) && _mystream_stream_oready) begin
_mystream_b_source_ram_renable <= 0;
_mystream_b_idle <= 1;
end
if((_mystream_b_source_fsm_1 == 2) && _mystream_source_stop && _mystream_stream_oready) begin
_mystream_b_source_ram_renable <= 0;
_mystream_b_idle <= 1;
end
if(_mystream_stream_oready) begin
_tmp_16 <= _set_flag_15;
end
if(_mystream_stream_oready) begin
_tmp_17 <= _tmp_16;
end
if(_mystream_stream_oready) begin
_tmp_18 <= _tmp_17;
end
if(_mystream_stream_oready) begin
_tmp_19 <= _th_comp_offset_3;
end
if(_mystream_stream_oready) begin
_tmp_20 <= _tmp_19;
end
if(_mystream_stream_oready) begin
_tmp_21 <= _tmp_20;
end
if(_mystream_stream_oready) begin
_tmp_22 <= _th_comp_size_2;
end
if(_mystream_stream_oready) begin
_tmp_23 <= _tmp_22;
end
if(_mystream_stream_oready) begin
_tmp_24 <= _tmp_23;
end
if(_tmp_18) begin
_mystream_c_sink_mode <= 5'b1;
_mystream_c_sink_offset <= _tmp_21;
_mystream_c_sink_size <= _tmp_24;
_mystream_c_sink_stride <= 1;
end
if(_tmp_18) begin
_mystream_c_sink_sel <= 3;
end
if(_mystream_sink_start && _mystream_c_sink_mode & 5'b1 && _mystream_stream_oready) begin
_mystream_c_sink_offset_buf <= _mystream_c_sink_offset;
_mystream_c_sink_size_buf <= _mystream_c_sink_size;
_mystream_c_sink_stride_buf <= _mystream_c_sink_stride;
end
if((_mystream_c_sink_fsm_2 == 1) && _mystream_stream_oready) begin
_mystream_c_sink_waddr <= _mystream_c_sink_offset_buf - _mystream_c_sink_stride_buf;
_mystream_c_sink_count <= _mystream_c_sink_size_buf;
end
if((_mystream_c_sink_fsm_2 == 2) && _mystream_stream_oready) begin
_mystream_c_sink_waddr <= _mystream_c_sink_waddr + _mystream_c_sink_stride_buf;
_mystream_c_sink_wdata <= mystream_c_data;
_mystream_c_sink_wenable <= 1;
_mystream_c_sink_count <= _mystream_c_sink_count - 1;
end
if(_mystream_stream_oready) begin
_tmp_26 <= _mystream_source_start;
end
if(_mystream_stream_oready) begin
_tmp_27 <= _tmp_26;
end
if(_mystream_stream_oready) begin
_tmp_28 <= _tmp_27;
end
if(_mystream_stream_oready) begin
_tmp_31 <= _tmp_30;
end
if(_mystream_stream_oready) begin
_tmp_32 <= _mystream_source_start;
end
if(_mystream_stream_oready) begin
_tmp_33 <= _tmp_32;
end
if(_mystream_stream_oready) begin
_tmp_34 <= _tmp_33;
end
if(_mystream_stream_oready) begin
_tmp_35 <= _mystream_source_stop;
end
if(_mystream_stream_oready) begin
_tmp_36 <= _tmp_35;
end
if(_mystream_stream_oready) begin
_tmp_37 <= _tmp_36;
end
if(_mystream_stream_oready) begin
_tmp_38 <= _mystream_source_busy;
end
if(_mystream_stream_oready) begin
_tmp_39 <= _tmp_38;
end
if(_mystream_stream_oready) begin
_tmp_40 <= _tmp_39;
end
if(_mystream_stream_oready) begin
_tmp_41 <= _mystream_sink_busy;
end
if(!_mystream_sink_busy && _tmp_41) begin
_mystream_busy_reg <= 0;
end
if(_mystream_source_busy) begin
_mystream_busy_reg <= 1;
end
end
end
localparam _mystream_fsm_1 = 1;
localparam _mystream_fsm_2 = 2;
localparam _mystream_fsm_3 = 3;
always @(posedge CLK) begin
if(RST) begin
_mystream_fsm <= _mystream_fsm_init;
_mystream_source_start <= 0;
_mystream_source_busy <= 0;
_mystream_stream_ivalid <= 0;
end else begin
if(_mystream_stream_oready && _tmp_28) begin
_mystream_stream_ivalid <= 1;
end
if(_mystream_stream_oready && _tmp_31) begin
_mystream_stream_ivalid <= 0;
end
case(_mystream_fsm)
_mystream_fsm_init: begin
if(_mystream_run_flag) begin
_mystream_source_start <= 1;
end
if(_mystream_run_flag) begin
_mystream_fsm <= _mystream_fsm_1;
end
end
_mystream_fsm_1: begin
if(_mystream_source_start && _mystream_stream_oready) begin
_mystream_source_start <= 0;
_mystream_source_busy <= 1;
end
if(_mystream_source_start && _mystream_stream_oready) begin
_mystream_fsm <= _mystream_fsm_2;
end
end
_mystream_fsm_2: begin
if(_mystream_stream_oready) begin
_mystream_fsm <= _mystream_fsm_3;
end
end
_mystream_fsm_3: begin
if(_mystream_stream_oready && (_mystream_a_idle && _mystream_b_idle && (_mystream_fsm == 3))) begin
_mystream_source_busy <= 0;
end
if(_mystream_stream_oready && (_mystream_a_idle && _mystream_b_idle && (_mystream_fsm == 3)) && _mystream_run_flag) begin
_mystream_source_start <= 1;
end
if(_mystream_stream_oready && (_mystream_a_idle && _mystream_b_idle && (_mystream_fsm == 3))) begin
_mystream_fsm <= _mystream_fsm_init;
end
if(_mystream_stream_oready && (_mystream_a_idle && _mystream_b_idle && (_mystream_fsm == 3)) && _mystream_run_flag) begin
_mystream_fsm <= _mystream_fsm_1;
end
end
endcase
end
end
localparam th_comp_1 = 1;
localparam th_comp_2 = 2;
localparam th_comp_3 = 3;
localparam th_comp_4 = 4;
localparam th_comp_5 = 5;
localparam th_comp_6 = 6;
localparam th_comp_7 = 7;
localparam th_comp_8 = 8;
localparam th_comp_9 = 9;
localparam th_comp_10 = 10;
localparam th_comp_11 = 11;
localparam th_comp_12 = 12;
localparam th_comp_13 = 13;
localparam th_comp_14 = 14;
localparam th_comp_15 = 15;
localparam th_comp_16 = 16;
always @(posedge CLK) begin
if(RST) begin
th_comp <= th_comp_init;
_th_comp_size_0 <= 0;
_th_comp_offset_1 <= 0;
_th_comp_size_2 <= 0;
_th_comp_offset_3 <= 0;
end else begin
case(th_comp)
th_comp_init: begin
th_comp <= th_comp_1;
end
th_comp_1: begin
if(1) begin
th_comp <= th_comp_2;
end else begin
th_comp <= th_comp_15;
end
end
th_comp_2: begin
if(_saxi_register_0 == 1) begin
th_comp <= th_comp_3;
end
end
th_comp_3: begin
th_comp <= th_comp_4;
end
th_comp_4: begin
_th_comp_size_0 <= _saxi_register_2;
th_comp <= th_comp_5;
end
th_comp_5: begin
_th_comp_offset_1 <= _saxi_register_3;
th_comp <= th_comp_6;
end
th_comp_6: begin
_th_comp_size_2 <= _th_comp_size_0;
_th_comp_offset_3 <= _th_comp_offset_1;
th_comp <= th_comp_7;
end
th_comp_7: begin
th_comp <= th_comp_8;
end
th_comp_8: begin
th_comp <= th_comp_9;
end
th_comp_9: begin
if(_mystream_stream_oready) begin
th_comp <= th_comp_10;
end
end
th_comp_10: begin
th_comp <= th_comp_11;
end
th_comp_11: begin
if(_mystream_busy) begin
th_comp <= th_comp_12;
end
end
th_comp_12: begin
if(!_mystream_busy) begin
th_comp <= th_comp_13;
end
end
th_comp_13: begin
th_comp <= th_comp_14;
end
th_comp_14: begin
th_comp <= th_comp_1;
end
th_comp_15: begin
$finish;
th_comp <= th_comp_16;
end
endcase
end
end
localparam _mystream_a_source_fsm_0_1 = 1;
localparam _mystream_a_source_fsm_0_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_a_source_fsm_0 <= _mystream_a_source_fsm_0_init;
end else begin
case(_mystream_a_source_fsm_0)
_mystream_a_source_fsm_0_init: begin
if(_mystream_source_start && _mystream_a_source_mode & 5'b1 && _mystream_stream_oready) begin
_mystream_a_source_fsm_0 <= _mystream_a_source_fsm_0_1;
end
end
_mystream_a_source_fsm_0_1: begin
if(_mystream_stream_oready) begin
_mystream_a_source_fsm_0 <= _mystream_a_source_fsm_0_2;
end
end
_mystream_a_source_fsm_0_2: begin
if((_mystream_a_source_count == 1) && _mystream_stream_oready) begin
_mystream_a_source_fsm_0 <= _mystream_a_source_fsm_0_init;
end
if(_mystream_source_stop && _mystream_stream_oready) begin
_mystream_a_source_fsm_0 <= _mystream_a_source_fsm_0_init;
end
end
endcase
end
end
localparam _mystream_b_source_fsm_1_1 = 1;
localparam _mystream_b_source_fsm_1_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_b_source_fsm_1 <= _mystream_b_source_fsm_1_init;
end else begin
case(_mystream_b_source_fsm_1)
_mystream_b_source_fsm_1_init: begin
if(_mystream_source_start && _mystream_b_source_mode & 5'b1 && _mystream_stream_oready) begin
_mystream_b_source_fsm_1 <= _mystream_b_source_fsm_1_1;
end
end
_mystream_b_source_fsm_1_1: begin
if(_mystream_stream_oready) begin
_mystream_b_source_fsm_1 <= _mystream_b_source_fsm_1_2;
end
end
_mystream_b_source_fsm_1_2: begin
if((_mystream_b_source_count == 1) && _mystream_stream_oready) begin
_mystream_b_source_fsm_1 <= _mystream_b_source_fsm_1_init;
end
if(_mystream_source_stop && _mystream_stream_oready) begin
_mystream_b_source_fsm_1 <= _mystream_b_source_fsm_1_init;
end
end
endcase
end
end
localparam _mystream_c_sink_fsm_2_1 = 1;
localparam _mystream_c_sink_fsm_2_2 = 2;
always @(posedge CLK) begin
if(RST) begin
_mystream_c_sink_fsm_2 <= _mystream_c_sink_fsm_2_init;
end else begin
case(_mystream_c_sink_fsm_2)
_mystream_c_sink_fsm_2_init: begin
if(_mystream_sink_start && _mystream_c_sink_mode & 5'b1 && _mystream_stream_oready) begin
_mystream_c_sink_fsm_2 <= _mystream_c_sink_fsm_2_1;
end
end
_mystream_c_sink_fsm_2_1: begin
if(_mystream_stream_oready) begin
_mystream_c_sink_fsm_2 <= _mystream_c_sink_fsm_2_2;
end
end
_mystream_c_sink_fsm_2_2: begin
if((_mystream_c_sink_count == 1) && _mystream_stream_oready) begin
_mystream_c_sink_fsm_2 <= _mystream_c_sink_fsm_2_init;
end
if(_mystream_sink_stop && _mystream_stream_oready) begin
_mystream_c_sink_fsm_2 <= _mystream_c_sink_fsm_2_init;
end
end
endcase
end
end
endmodule
module ram_a
(
input CLK,
input [10-1:0] ram_a_0_addr,
output [32-1:0] ram_a_0_rdata,
input [32-1:0] ram_a_0_wdata,
input ram_a_0_wenable,
input ram_a_0_enable,
input [10-1:0] ram_a_1_addr,
output [32-1:0] ram_a_1_rdata,
input [32-1:0] ram_a_1_wdata,
input ram_a_1_wenable,
input ram_a_1_enable
);
reg [32-1:0] ram_a_0_rdata_out;
assign ram_a_0_rdata = ram_a_0_rdata_out;
reg [32-1:0] ram_a_1_rdata_out;
assign ram_a_1_rdata = ram_a_1_rdata_out;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_a_0_enable) begin
if(ram_a_0_wenable) begin
mem[ram_a_0_addr] <= ram_a_0_wdata;
ram_a_0_rdata_out <= ram_a_0_wdata;
end else begin
ram_a_0_rdata_out <= mem[ram_a_0_addr];
end
end
end
always @(posedge CLK) begin
if(ram_a_1_enable) begin
if(ram_a_1_wenable) begin
mem[ram_a_1_addr] <= ram_a_1_wdata;
ram_a_1_rdata_out <= ram_a_1_wdata;
end else begin
ram_a_1_rdata_out <= mem[ram_a_1_addr];
end
end
end
endmodule
module ram_b
(
input CLK,
input [10-1:0] ram_b_0_addr,
output [32-1:0] ram_b_0_rdata,
input [32-1:0] ram_b_0_wdata,
input ram_b_0_wenable,
input ram_b_0_enable,
input [10-1:0] ram_b_1_addr,
output [32-1:0] ram_b_1_rdata,
input [32-1:0] ram_b_1_wdata,
input ram_b_1_wenable,
input ram_b_1_enable
);
reg [32-1:0] ram_b_0_rdata_out;
assign ram_b_0_rdata = ram_b_0_rdata_out;
reg [32-1:0] ram_b_1_rdata_out;
assign ram_b_1_rdata = ram_b_1_rdata_out;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_b_0_enable) begin
if(ram_b_0_wenable) begin
mem[ram_b_0_addr] <= ram_b_0_wdata;
ram_b_0_rdata_out <= ram_b_0_wdata;
end else begin
ram_b_0_rdata_out <= mem[ram_b_0_addr];
end
end
end
always @(posedge CLK) begin
if(ram_b_1_enable) begin
if(ram_b_1_wenable) begin
mem[ram_b_1_addr] <= ram_b_1_wdata;
ram_b_1_rdata_out <= ram_b_1_wdata;
end else begin
ram_b_1_rdata_out <= mem[ram_b_1_addr];
end
end
end
endmodule
module ram_c
(
input CLK,
input [10-1:0] ram_c_0_addr,
output [32-1:0] ram_c_0_rdata,
input [32-1:0] ram_c_0_wdata,
input ram_c_0_wenable,
input ram_c_0_enable,
input [10-1:0] ram_c_1_addr,
output [32-1:0] ram_c_1_rdata,
input [32-1:0] ram_c_1_wdata,
input ram_c_1_wenable,
input ram_c_1_enable
);
reg [32-1:0] ram_c_0_rdata_out;
assign ram_c_0_rdata = ram_c_0_rdata_out;
reg [32-1:0] ram_c_1_rdata_out;
assign ram_c_1_rdata = ram_c_1_rdata_out;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_c_0_enable) begin
if(ram_c_0_wenable) begin
mem[ram_c_0_addr] <= ram_c_0_wdata;
ram_c_0_rdata_out <= ram_c_0_wdata;
end else begin
ram_c_0_rdata_out <= mem[ram_c_0_addr];
end
end
end
always @(posedge CLK) begin
if(ram_c_1_enable) begin
if(ram_c_1_wenable) begin
mem[ram_c_1_addr] <= ram_c_1_wdata;
ram_c_1_rdata_out <= ram_c_1_wdata;
end else begin
ram_c_1_rdata_out <= mem[ram_c_1_addr];
end
end
end
endmodule
"""
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
code = thread_stream_ram_external_ports.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: axi_/slave_read_lite/test_types_axi_slave_read_lite.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import types_axi_slave_read_lite
expected_verilog = """
module test;
reg CLK;
reg RST;
reg [32-1:0] myaxi_awaddr;
reg [4-1:0] myaxi_awcache;
reg [3-1:0] myaxi_awprot;
reg myaxi_awvalid;
wire myaxi_awready;
reg [32-1:0] myaxi_wdata;
reg [4-1:0] myaxi_wstrb;
reg myaxi_wvalid;
wire myaxi_wready;
wire [2-1:0] myaxi_bresp;
wire myaxi_bvalid;
reg myaxi_bready;
reg [32-1:0] myaxi_araddr;
reg [4-1:0] myaxi_arcache;
reg [3-1:0] myaxi_arprot;
reg myaxi_arvalid;
wire myaxi_arready;
wire [32-1:0] myaxi_rdata;
wire [2-1:0] myaxi_rresp;
wire myaxi_rvalid;
reg myaxi_rready;
reg [32-1:0] _axi_awaddr;
wire [4-1:0] _axi_awcache;
wire [3-1:0] _axi_awprot;
reg _axi_awvalid;
wire _axi_awready;
reg [32-1:0] _axi_wdata;
reg [4-1:0] _axi_wstrb;
reg _axi_wvalid;
wire _axi_wready;
wire [2-1:0] _axi_bresp;
wire _axi_bvalid;
wire _axi_bready;
reg [32-1:0] _axi_araddr;
wire [4-1:0] _axi_arcache;
wire [3-1:0] _axi_arprot;
reg _axi_arvalid;
wire _axi_arready;
wire [32-1:0] _axi_rdata;
wire [2-1:0] _axi_rresp;
wire _axi_rvalid;
wire _axi_rready;
assign _axi_awcache = 3;
assign _axi_awprot = 0;
assign _axi_bready = 1;
assign _axi_arcache = 3;
assign _axi_arprot = 0;
reg [3-1:0] outstanding_wcount_0;
wire [32-1:0] _tmp_1;
assign _tmp_1 = _axi_awaddr;
always @(*) begin
myaxi_awaddr = _tmp_1;
end
wire [4-1:0] _tmp_2;
assign _tmp_2 = _axi_awcache;
always @(*) begin
myaxi_awcache = _tmp_2;
end
wire [3-1:0] _tmp_3;
assign _tmp_3 = _axi_awprot;
always @(*) begin
myaxi_awprot = _tmp_3;
end
wire _tmp_4;
assign _tmp_4 = _axi_awvalid;
always @(*) begin
myaxi_awvalid = _tmp_4;
end
assign _axi_awready = myaxi_awready;
wire [32-1:0] _tmp_5;
assign _tmp_5 = _axi_wdata;
always @(*) begin
myaxi_wdata = _tmp_5;
end
wire [4-1:0] _tmp_6;
assign _tmp_6 = _axi_wstrb;
always @(*) begin
myaxi_wstrb = _tmp_6;
end
wire _tmp_7;
assign _tmp_7 = _axi_wvalid;
always @(*) begin
myaxi_wvalid = _tmp_7;
end
assign _axi_wready = myaxi_wready;
assign _axi_bresp = myaxi_bresp;
assign _axi_bvalid = myaxi_bvalid;
wire _tmp_8;
assign _tmp_8 = _axi_bready;
always @(*) begin
myaxi_bready = _tmp_8;
end
wire [32-1:0] _tmp_9;
assign _tmp_9 = _axi_araddr;
always @(*) begin
myaxi_araddr = _tmp_9;
end
wire [4-1:0] _tmp_10;
assign _tmp_10 = _axi_arcache;
always @(*) begin
myaxi_arcache = _tmp_10;
end
wire [3-1:0] _tmp_11;
assign _tmp_11 = _axi_arprot;
always @(*) begin
myaxi_arprot = _tmp_11;
end
wire _tmp_12;
assign _tmp_12 = _axi_arvalid;
always @(*) begin
myaxi_arvalid = _tmp_12;
end
assign _axi_arready = myaxi_arready;
assign _axi_rdata = myaxi_rdata;
assign _axi_rresp = myaxi_rresp;
assign _axi_rvalid = myaxi_rvalid;
wire _tmp_13;
assign _tmp_13 = _axi_rready;
always @(*) begin
myaxi_rready = _tmp_13;
end
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] sum;
reg __axi_cond_0_1;
reg __axi_cond_1_1;
assign _axi_rready = (fsm == 1) || (fsm == 3);
main
uut
(
.CLK(CLK),
.RST(RST),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awcache(myaxi_awcache),
.myaxi_awprot(myaxi_awprot),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_bresp(myaxi_bresp),
.myaxi_bvalid(myaxi_bvalid),
.myaxi_bready(myaxi_bready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arcache(myaxi_arcache),
.myaxi_arprot(myaxi_arprot),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rresp(myaxi_rresp),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
_axi_awaddr = 0;
_axi_awvalid = 0;
_axi_wdata = 0;
_axi_wstrb = 0;
_axi_wvalid = 0;
_axi_araddr = 0;
_axi_arvalid = 0;
outstanding_wcount_0 = 0;
fsm = fsm_init;
sum = 0;
__axi_cond_0_1 = 0;
__axi_cond_1_1 = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
always @(posedge CLK) begin
if(RST) begin
outstanding_wcount_0 <= 0;
_axi_awaddr <= 0;
_axi_awvalid <= 0;
_axi_wdata <= 0;
_axi_wstrb <= 0;
_axi_wvalid <= 0;
_axi_araddr <= 0;
_axi_arvalid <= 0;
__axi_cond_0_1 <= 0;
__axi_cond_1_1 <= 0;
end else begin
if(__axi_cond_0_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_1_1) begin
_axi_arvalid <= 0;
end
if(_axi_wvalid && _axi_wready && !(_axi_bvalid && _axi_bready) && (outstanding_wcount_0 < 7)) begin
outstanding_wcount_0 <= outstanding_wcount_0 + 1;
end
if(!(_axi_wvalid && _axi_wready) && (_axi_bvalid && _axi_bready) && (outstanding_wcount_0 > 0)) begin
outstanding_wcount_0 <= outstanding_wcount_0 - 1;
end
_axi_awaddr <= 0;
_axi_awvalid <= 0;
_axi_wdata <= 0;
_axi_wstrb <= 0;
_axi_wvalid <= 0;
if((fsm == 0) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 1024;
_axi_arvalid <= 1;
end
__axi_cond_0_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((fsm == 2) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 2048;
_axi_arvalid <= 1;
end
__axi_cond_1_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
localparam fsm_4 = 4;
localparam fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
sum <= 0;
end else begin
case(fsm)
fsm_init: begin
if(_axi_arready || !_axi_arvalid) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(_axi_rready && _axi_rvalid) begin
sum <= sum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
if(_axi_arready || !_axi_arvalid) begin
fsm <= fsm_3;
end
end
fsm_3: begin
if(_axi_rready && _axi_rvalid) begin
sum <= sum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
fsm <= fsm_4;
end
end
fsm_4: begin
$display("sum=%d expected_sum=%d", sum, 768);
fsm <= fsm_5;
end
endcase
end
end
endmodule
module main
(
input CLK,
input RST,
input [32-1:0] myaxi_awaddr,
input [4-1:0] myaxi_awcache,
input [3-1:0] myaxi_awprot,
input myaxi_awvalid,
output myaxi_awready,
input [32-1:0] myaxi_wdata,
input [4-1:0] myaxi_wstrb,
input myaxi_wvalid,
output myaxi_wready,
output [2-1:0] myaxi_bresp,
output reg myaxi_bvalid,
input myaxi_bready,
input [32-1:0] myaxi_araddr,
input [4-1:0] myaxi_arcache,
input [3-1:0] myaxi_arprot,
input myaxi_arvalid,
output myaxi_arready,
output reg [32-1:0] myaxi_rdata,
output [2-1:0] myaxi_rresp,
output reg myaxi_rvalid,
input myaxi_rready
);
assign myaxi_bresp = 0;
assign myaxi_rresp = 0;
assign myaxi_awready = 0;
assign myaxi_wready = 0;
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] addr_0;
reg valid_1;
reg prev_arvalid_2;
assign myaxi_arready = (fsm == 0) && !valid_1 && prev_arvalid_2;
reg [32-1:0] rdata;
reg _myaxi_cond_0_1;
always @(posedge CLK) begin
if(RST) begin
myaxi_bvalid <= 0;
prev_arvalid_2 <= 0;
addr_0 <= 0;
valid_1 <= 0;
myaxi_rdata <= 0;
myaxi_rvalid <= 0;
_myaxi_cond_0_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_rvalid <= 0;
end
if(myaxi_bvalid && myaxi_bready) begin
myaxi_bvalid <= 0;
end
if(myaxi_wvalid && myaxi_wready) begin
myaxi_bvalid <= 1;
end
prev_arvalid_2 <= myaxi_arvalid;
if(myaxi_arready && myaxi_arvalid) begin
addr_0 <= myaxi_araddr;
end
valid_1 <= myaxi_arready && myaxi_arvalid;
if((fsm == 1) && (myaxi_rready || !myaxi_rvalid)) begin
myaxi_rdata <= rdata;
myaxi_rvalid <= 1;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_rvalid && !myaxi_rready) begin
myaxi_rvalid <= myaxi_rvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
rdata <= 0;
end else begin
case(fsm)
fsm_init: begin
if(valid_1) begin
rdata <= addr_0 >> 2;
end
if(valid_1) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(myaxi_rready || !myaxi_rvalid) begin
rdata <= rdata + 1;
end
if(myaxi_rready || !myaxi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = types_axi_slave_read_lite.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: axi_/slave_readwrite_lite_simultaneous/test_types_axi_slave_readwrite_lite_simultaneous.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import types_axi_slave_readwrite_lite_simultaneous
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [32-1:0] sum;
reg [32-1:0] myaxi_awaddr;
reg [4-1:0] myaxi_awcache;
reg [3-1:0] myaxi_awprot;
reg myaxi_awvalid;
wire myaxi_awready;
reg [32-1:0] myaxi_wdata;
reg [4-1:0] myaxi_wstrb;
reg myaxi_wvalid;
wire myaxi_wready;
wire [2-1:0] myaxi_bresp;
wire myaxi_bvalid;
reg myaxi_bready;
reg [32-1:0] myaxi_araddr;
reg [4-1:0] myaxi_arcache;
reg [3-1:0] myaxi_arprot;
reg myaxi_arvalid;
wire myaxi_arready;
wire [32-1:0] myaxi_rdata;
wire [2-1:0] myaxi_rresp;
wire myaxi_rvalid;
reg myaxi_rready;
reg [32-1:0] _axi_awaddr;
wire [4-1:0] _axi_awcache;
wire [3-1:0] _axi_awprot;
reg _axi_awvalid;
wire _axi_awready;
reg [32-1:0] _axi_wdata;
reg [4-1:0] _axi_wstrb;
reg _axi_wvalid;
wire _axi_wready;
wire [2-1:0] _axi_bresp;
wire _axi_bvalid;
wire _axi_bready;
reg [32-1:0] _axi_araddr;
wire [4-1:0] _axi_arcache;
wire [3-1:0] _axi_arprot;
reg _axi_arvalid;
wire _axi_arready;
wire [32-1:0] _axi_rdata;
wire [2-1:0] _axi_rresp;
wire _axi_rvalid;
wire _axi_rready;
assign _axi_awcache = 3;
assign _axi_awprot = 0;
assign _axi_bready = 1;
assign _axi_arcache = 3;
assign _axi_arprot = 0;
reg [3-1:0] outstanding_wcount_0;
wire [32-1:0] _tmp_1;
assign _tmp_1 = _axi_awaddr;
always @(*) begin
myaxi_awaddr = _tmp_1;
end
wire [4-1:0] _tmp_2;
assign _tmp_2 = _axi_awcache;
always @(*) begin
myaxi_awcache = _tmp_2;
end
wire [3-1:0] _tmp_3;
assign _tmp_3 = _axi_awprot;
always @(*) begin
myaxi_awprot = _tmp_3;
end
wire _tmp_4;
assign _tmp_4 = _axi_awvalid;
always @(*) begin
myaxi_awvalid = _tmp_4;
end
assign _axi_awready = myaxi_awready;
wire [32-1:0] _tmp_5;
assign _tmp_5 = _axi_wdata;
always @(*) begin
myaxi_wdata = _tmp_5;
end
wire [4-1:0] _tmp_6;
assign _tmp_6 = _axi_wstrb;
always @(*) begin
myaxi_wstrb = _tmp_6;
end
wire _tmp_7;
assign _tmp_7 = _axi_wvalid;
always @(*) begin
myaxi_wvalid = _tmp_7;
end
assign _axi_wready = myaxi_wready;
assign _axi_bresp = myaxi_bresp;
assign _axi_bvalid = myaxi_bvalid;
wire _tmp_8;
assign _tmp_8 = _axi_bready;
always @(*) begin
myaxi_bready = _tmp_8;
end
wire [32-1:0] _tmp_9;
assign _tmp_9 = _axi_araddr;
always @(*) begin
myaxi_araddr = _tmp_9;
end
wire [4-1:0] _tmp_10;
assign _tmp_10 = _axi_arcache;
always @(*) begin
myaxi_arcache = _tmp_10;
end
wire [3-1:0] _tmp_11;
assign _tmp_11 = _axi_arprot;
always @(*) begin
myaxi_arprot = _tmp_11;
end
wire _tmp_12;
assign _tmp_12 = _axi_arvalid;
always @(*) begin
myaxi_arvalid = _tmp_12;
end
assign _axi_arready = myaxi_arready;
assign _axi_rdata = myaxi_rdata;
assign _axi_rresp = myaxi_rresp;
assign _axi_rvalid = myaxi_rvalid;
wire _tmp_13;
assign _tmp_13 = _axi_rready;
always @(*) begin
myaxi_rready = _tmp_13;
end
reg [32-1:0] read_fsm;
localparam read_fsm_init = 0;
reg [32-1:0] rsum;
reg __axi_cond_0_1;
reg __axi_cond_1_1;
assign _axi_rready = (read_fsm == 1) || (read_fsm == 3);
reg [32-1:0] write_fsm;
localparam write_fsm_init = 0;
reg __axi_cond_2_1;
reg [32-1:0] wdata;
reg __axi_cond_3_1;
reg __axi_cond_4_1;
reg __axi_cond_5_1;
main
uut
(
.CLK(CLK),
.RST(RST),
.sum(sum),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awcache(myaxi_awcache),
.myaxi_awprot(myaxi_awprot),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_bresp(myaxi_bresp),
.myaxi_bvalid(myaxi_bvalid),
.myaxi_bready(myaxi_bready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arcache(myaxi_arcache),
.myaxi_arprot(myaxi_arprot),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rresp(myaxi_rresp),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
_axi_awaddr = 0;
_axi_awvalid = 0;
_axi_wdata = 0;
_axi_wstrb = 0;
_axi_wvalid = 0;
_axi_araddr = 0;
_axi_arvalid = 0;
outstanding_wcount_0 = 0;
read_fsm = read_fsm_init;
rsum = 0;
__axi_cond_0_1 = 0;
__axi_cond_1_1 = 0;
write_fsm = write_fsm_init;
__axi_cond_2_1 = 0;
wdata = 100;
__axi_cond_3_1 = 0;
__axi_cond_4_1 = 0;
__axi_cond_5_1 = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
always @(posedge CLK) begin
if(RST) begin
outstanding_wcount_0 <= 0;
_axi_araddr <= 0;
_axi_arvalid <= 0;
__axi_cond_0_1 <= 0;
__axi_cond_1_1 <= 0;
_axi_awaddr <= 0;
_axi_awvalid <= 0;
__axi_cond_2_1 <= 0;
_axi_wdata <= 0;
_axi_wvalid <= 0;
_axi_wstrb <= 0;
__axi_cond_3_1 <= 0;
__axi_cond_4_1 <= 0;
__axi_cond_5_1 <= 0;
end else begin
if(__axi_cond_0_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_1_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_2_1) begin
_axi_awvalid <= 0;
end
if(__axi_cond_3_1) begin
_axi_wvalid <= 0;
end
if(__axi_cond_4_1) begin
_axi_awvalid <= 0;
end
if(__axi_cond_5_1) begin
_axi_wvalid <= 0;
end
if(_axi_wvalid && _axi_wready && !(_axi_bvalid && _axi_bready) && (outstanding_wcount_0 < 7)) begin
outstanding_wcount_0 <= outstanding_wcount_0 + 1;
end
if(!(_axi_wvalid && _axi_wready) && (_axi_bvalid && _axi_bready) && (outstanding_wcount_0 > 0)) begin
outstanding_wcount_0 <= outstanding_wcount_0 - 1;
end
if((read_fsm == 0) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 1024;
_axi_arvalid <= 1;
end
__axi_cond_0_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((read_fsm == 2) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 2048;
_axi_arvalid <= 1;
end
__axi_cond_1_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((write_fsm == 0) && (_axi_awready || !_axi_awvalid)) begin
_axi_awaddr <= 1024;
_axi_awvalid <= 1;
end
__axi_cond_2_1 <= 1;
if(_axi_awvalid && !_axi_awready) begin
_axi_awvalid <= _axi_awvalid;
end
if((write_fsm == 1) && ((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid))) begin
_axi_wdata <= wdata;
_axi_wvalid <= 1;
_axi_wstrb <= { 4{ 1'd1 } };
end
__axi_cond_3_1 <= 1;
if(_axi_wvalid && !_axi_wready) begin
_axi_wvalid <= _axi_wvalid;
end
if((write_fsm == 2) && (_axi_awready || !_axi_awvalid)) begin
_axi_awaddr <= 1024;
_axi_awvalid <= 1;
end
__axi_cond_4_1 <= 1;
if(_axi_awvalid && !_axi_awready) begin
_axi_awvalid <= _axi_awvalid;
end
if((write_fsm == 3) && ((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid))) begin
_axi_wdata <= wdata;
_axi_wvalid <= 1;
_axi_wstrb <= { 4{ 1'd1 } };
end
__axi_cond_5_1 <= 1;
if(_axi_wvalid && !_axi_wready) begin
_axi_wvalid <= _axi_wvalid;
end
end
end
localparam read_fsm_1 = 1;
localparam read_fsm_2 = 2;
localparam read_fsm_3 = 3;
localparam read_fsm_4 = 4;
localparam read_fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
read_fsm <= read_fsm_init;
rsum <= 0;
end else begin
case(read_fsm)
read_fsm_init: begin
if(_axi_arready || !_axi_arvalid) begin
read_fsm <= read_fsm_1;
end
end
read_fsm_1: begin
if(_axi_rready && _axi_rvalid) begin
rsum <= rsum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
read_fsm <= read_fsm_2;
end
end
read_fsm_2: begin
if(_axi_arready || !_axi_arvalid) begin
read_fsm <= read_fsm_3;
end
end
read_fsm_3: begin
if(_axi_rready && _axi_rvalid) begin
rsum <= rsum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
read_fsm <= read_fsm_4;
end
end
read_fsm_4: begin
$display("rsum=%d expected_rsum=%d", rsum, 768);
read_fsm <= read_fsm_5;
end
endcase
end
end
localparam write_fsm_1 = 1;
localparam write_fsm_2 = 2;
localparam write_fsm_3 = 3;
localparam write_fsm_4 = 4;
localparam write_fsm_5 = 5;
localparam write_fsm_6 = 6;
localparam write_fsm_7 = 7;
localparam write_fsm_8 = 8;
localparam write_fsm_9 = 9;
localparam write_fsm_10 = 10;
localparam write_fsm_11 = 11;
localparam write_fsm_12 = 12;
localparam write_fsm_13 = 13;
localparam write_fsm_14 = 14;
localparam write_fsm_15 = 15;
always @(posedge CLK) begin
if(RST) begin
write_fsm <= write_fsm_init;
wdata <= 100;
end else begin
case(write_fsm)
write_fsm_init: begin
wdata <= 100;
if(_axi_awready || !_axi_awvalid) begin
write_fsm <= write_fsm_1;
end
end
write_fsm_1: begin
if((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid)) begin
write_fsm <= write_fsm_2;
end
end
write_fsm_2: begin
wdata <= 200;
if(_axi_awready || !_axi_awvalid) begin
write_fsm <= write_fsm_3;
end
end
write_fsm_3: begin
if((outstanding_wcount_0 < 6) && (_axi_wready || !_axi_wvalid)) begin
write_fsm <= write_fsm_4;
end
end
write_fsm_4: begin
write_fsm <= write_fsm_5;
end
write_fsm_5: begin
write_fsm <= write_fsm_6;
end
write_fsm_6: begin
write_fsm <= write_fsm_7;
end
write_fsm_7: begin
write_fsm <= write_fsm_8;
end
write_fsm_8: begin
write_fsm <= write_fsm_9;
end
write_fsm_9: begin
write_fsm <= write_fsm_10;
end
write_fsm_10: begin
write_fsm <= write_fsm_11;
end
write_fsm_11: begin
write_fsm <= write_fsm_12;
end
write_fsm_12: begin
write_fsm <= write_fsm_13;
end
write_fsm_13: begin
write_fsm <= write_fsm_14;
end
write_fsm_14: begin
$display("sum=%d expected_sum=%d", sum, 300);
write_fsm <= write_fsm_15;
end
endcase
end
end
endmodule
module main
(
input CLK,
input RST,
output reg [32-1:0] sum,
input [32-1:0] myaxi_awaddr,
input [4-1:0] myaxi_awcache,
input [3-1:0] myaxi_awprot,
input myaxi_awvalid,
output myaxi_awready,
input [32-1:0] myaxi_wdata,
input [4-1:0] myaxi_wstrb,
input myaxi_wvalid,
output myaxi_wready,
output [2-1:0] myaxi_bresp,
output reg myaxi_bvalid,
input myaxi_bready,
input [32-1:0] myaxi_araddr,
input [4-1:0] myaxi_arcache,
input [3-1:0] myaxi_arprot,
input myaxi_arvalid,
output myaxi_arready,
output reg [32-1:0] myaxi_rdata,
output [2-1:0] myaxi_rresp,
output reg myaxi_rvalid,
input myaxi_rready
);
assign myaxi_bresp = 0;
assign myaxi_rresp = 0;
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] addr_0;
reg writevalid_1;
reg readvalid_2;
reg prev_awvalid_3;
reg prev_arvalid_4;
assign myaxi_awready = (fsm == 0) && (!writevalid_1 && !readvalid_2 && !myaxi_bvalid && prev_awvalid_3);
assign myaxi_arready = (fsm == 0) && (!readvalid_2 && !writevalid_1 && prev_arvalid_4 && !prev_awvalid_3);
reg [32-1:0] rdata;
reg _myaxi_cond_0_1;
assign myaxi_wready = fsm == 100;
always @(posedge CLK) begin
if(RST) begin
myaxi_bvalid <= 0;
prev_awvalid_3 <= 0;
prev_arvalid_4 <= 0;
writevalid_1 <= 0;
readvalid_2 <= 0;
addr_0 <= 0;
myaxi_rdata <= 0;
myaxi_rvalid <= 0;
_myaxi_cond_0_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_rvalid <= 0;
end
if(myaxi_bvalid && myaxi_bready) begin
myaxi_bvalid <= 0;
end
if(myaxi_wvalid && myaxi_wready) begin
myaxi_bvalid <= 1;
end
prev_awvalid_3 <= myaxi_awvalid;
prev_arvalid_4 <= myaxi_arvalid;
writevalid_1 <= 0;
readvalid_2 <= 0;
if(myaxi_awready && myaxi_awvalid && !myaxi_bvalid) begin
addr_0 <= myaxi_awaddr;
writevalid_1 <= 1;
end else if(myaxi_arready && myaxi_arvalid) begin
addr_0 <= myaxi_araddr;
readvalid_2 <= 1;
end
if((fsm == 1) && (myaxi_rready || !myaxi_rvalid)) begin
myaxi_rdata <= rdata;
myaxi_rvalid <= 1;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_rvalid && !myaxi_rready) begin
myaxi_rvalid <= myaxi_rvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_100 = 100;
localparam fsm_101 = 101;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
rdata <= 0;
sum <= 0;
end else begin
case(fsm)
fsm_init: begin
if(readvalid_2) begin
rdata <= addr_0 >> 2;
end
if(writevalid_1) begin
fsm <= fsm_100;
end
if(readvalid_2) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(myaxi_rready || !myaxi_rvalid) begin
rdata <= rdata + 1;
end
if(myaxi_rready || !myaxi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
fsm <= fsm_init;
end
fsm_100: begin
if(myaxi_wready && myaxi_wvalid) begin
sum <= sum + myaxi_wdata;
end
if(myaxi_wready && myaxi_wvalid) begin
fsm <= fsm_101;
end
end
fsm_101: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = types_axi_slave_readwrite_lite_simultaneous.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: axi_/write_dataflow_when/test_types_axi_write_dataflow_when.py
```python
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import types_axi_write_dataflow_when
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [32-1:0] myaxi_awaddr;
wire [8-1:0] myaxi_awlen;
wire [3-1:0] myaxi_awsize;
wire [2-1:0] myaxi_awburst;
wire [1-1:0] myaxi_awlock;
wire [4-1:0] myaxi_awcache;
wire [3-1:0] myaxi_awprot;
wire [4-1:0] myaxi_awqos;
wire [2-1:0] myaxi_awuser;
wire myaxi_awvalid;
reg myaxi_awready;
wire [32-1:0] myaxi_wdata;
wire [4-1:0] myaxi_wstrb;
wire myaxi_wlast;
wire myaxi_wvalid;
reg myaxi_wready;
reg [2-1:0] myaxi_bresp;
reg myaxi_bvalid;
wire myaxi_bready;
wire [32-1:0] myaxi_araddr;
wire [8-1:0] myaxi_arlen;
wire [3-1:0] myaxi_arsize;
wire [2-1:0] myaxi_arburst;
wire [1-1:0] myaxi_arlock;
wire [4-1:0] myaxi_arcache;
wire [3-1:0] myaxi_arprot;
wire [4-1:0] myaxi_arqos;
wire [2-1:0] myaxi_aruser;
wire myaxi_arvalid;
reg myaxi_arready;
reg [32-1:0] myaxi_rdata;
reg [2-1:0] myaxi_rresp;
reg myaxi_rlast;
reg myaxi_rvalid;
wire myaxi_rready;
reg [32-1:0] waddr;
localparam waddr_init = 0;
reg [32-1:0] _awlen;
wire _tmp_0;
assign _tmp_0 = 0;
always @(*) begin
myaxi_arready <= _tmp_0;
end
wire _tmp_1;
assign _tmp_1 = 0;
always @(*) begin
myaxi_rvalid <= _tmp_1;
end
wire [32-1:0] _tmp_2;
assign _tmp_2 = 0;
always @(*) begin
myaxi_rdata <= _tmp_2;
end
wire _tmp_3;
assign _tmp_3 = 0;
always @(*) begin
myaxi_rlast <= _tmp_3;
end
main
uut
(
.CLK(CLK),
.RST(RST),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awlen(myaxi_awlen),
.myaxi_awsize(myaxi_awsize),
.myaxi_awburst(myaxi_awburst),
.myaxi_awlock(myaxi_awlock),
.myaxi_awcache(myaxi_awcache),
.myaxi_awprot(myaxi_awprot),
.myaxi_awqos(myaxi_awqos),
.myaxi_awuser(myaxi_awuser),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wlast(myaxi_wlast),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_bresp(myaxi_bresp),
.myaxi_bvalid(myaxi_bvalid),
.myaxi_bready(myaxi_bready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arlen(myaxi_arlen),
.myaxi_arsize(myaxi_arsize),
.myaxi_arburst(myaxi_arburst),
.myaxi_arlock(myaxi_arlock),
.myaxi_arcache(myaxi_arcache),
.myaxi_arprot(myaxi_arprot),
.myaxi_arqos(myaxi_arqos),
.myaxi_aruser(myaxi_aruser),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rresp(myaxi_rresp),
.myaxi_rlast(myaxi_rlast),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
waddr = waddr_init;
_awlen = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
localparam waddr_1 = 1;
localparam waddr_2 = 2;
localparam waddr_3 = 3;
localparam waddr_4 = 4;
localparam waddr_5 = 5;
always @(posedge CLK) begin
if(RST) begin
waddr <= waddr_init;
_awlen <= 0;
end else begin
case(waddr)
waddr_init: begin
myaxi_awready <= 0;
myaxi_wready <= 0;
_awlen <= 0;
if(myaxi_awvalid) begin
waddr <= waddr_1;
end
end
waddr_1: begin
if(myaxi_awvalid) begin
myaxi_awready <= 1;
end
waddr <= waddr_2;
end
waddr_2: begin
myaxi_awready <= 0;
_awlen <= myaxi_awlen;
waddr <= waddr_3;
end
waddr_3: begin
myaxi_wready <= 0;
if(myaxi_wvalid) begin
waddr <= waddr_4;
end
end
waddr_4: begin
if(myaxi_wvalid) begin
myaxi_wready <= 1;
end
waddr <= waddr_5;
end
waddr_5: begin
myaxi_wready <= 0;
_awlen <= _awlen - 1;
waddr <= waddr_3;
if(_awlen == 0) begin
waddr <= waddr_init;
end
end
endcase
end
end
endmodule
module main
(
input CLK,
input RST,
output reg [32-1:0] myaxi_awaddr,
output reg [8-1:0] myaxi_awlen,
output [3-1:0] myaxi_awsize,
output [2-1:0] myaxi_awburst,
output [1-1:0] myaxi_awlock,
output [4-1:0] myaxi_awcache,
output [3-1:0] myaxi_awprot,
output [4-1:0] myaxi_awqos,
output [2-1:0] myaxi_awuser,
output reg myaxi_awvalid,
input myaxi_awready,
output reg [32-1:0] myaxi_wdata,
output reg [4-1:0] myaxi_wstrb,
output reg myaxi_wlast,
output reg myaxi_wvalid,
input myaxi_wready,
input [2-1:0] myaxi_bresp,
input myaxi_bvalid,
output myaxi_bready,
output reg [32-1:0] myaxi_araddr,
output reg [8-1:0] myaxi_arlen,
output [3-1:0] myaxi_arsize,
output [2-1:0] myaxi_arburst,
output [1-1:0] myaxi_arlock,
output [4-1:0] myaxi_arcache,
output [3-1:0] myaxi_arprot,
output [4-1:0] myaxi_arqos,
output [2-1:0] myaxi_aruser,
output reg myaxi_arvalid,
input myaxi_arready,
input [32-1:0] myaxi_rdata,
input [2-1:0] myaxi_rresp,
input myaxi_rlast,
input myaxi_rvalid,
output myaxi_rready
);
assign myaxi_awsize = 2;
assign myaxi_awburst = 1;
assign myaxi_awlock = 0;
assign myaxi_awcache = 3;
assign myaxi_awprot = 0;
assign myaxi_awqos = 0;
assign myaxi_awuser = 0;
assign myaxi_bready = 1;
assign myaxi_arsize = 2;
assign myaxi_arburst = 1;
assign myaxi_arlock = 0;
assign myaxi_arcache = 3;
assign myaxi_arprot = 0;
assign myaxi_arqos = 0;
assign myaxi_aruser = 0;
reg [3-1:0] outstanding_wcount_0;
assign myaxi_rready = 0;
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [9-1:0] counter_1;
reg _myaxi_cond_0_1;
reg last_2;
wire _dataflow_tmp_all_valid_3;
wire [32-1:0] _dataflow_counter_odata_1;
wire _dataflow_counter_ovalid_1;
wire _dataflow_counter_oready_1;
assign _dataflow_counter_oready_1 = (counter_1 > 0) && (outstanding_wcount_0 < 6) && (myaxi_wready || !myaxi_wvalid) && _dataflow_tmp_all_valid_3;
wire [1-1:0] _dataflow_eq_odata_7;
wire _dataflow_eq_ovalid_7;
wire _dataflow_eq_oready_7;
assign _dataflow_eq_oready_7 = (counter_1 > 0) && (outstanding_wcount_0 < 6) && (myaxi_wready || !myaxi_wvalid) && _dataflow_tmp_all_valid_3;
assign _dataflow_tmp_all_valid_3 = _dataflow_counter_ovalid_1 && _dataflow_eq_ovalid_7;
reg _myaxi_cond_1_1;
reg [32-1:0] sum;
reg _seq_cond_0_1;
always @(posedge CLK) begin
if(RST) begin
outstanding_wcount_0 <= 0;
myaxi_araddr <= 0;
myaxi_arlen <= 0;
myaxi_arvalid <= 0;
myaxi_awaddr <= 0;
myaxi_awlen <= 0;
myaxi_awvalid <= 0;
counter_1 <= 0;
_myaxi_cond_0_1 <= 0;
myaxi_wdata <= 0;
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
myaxi_wstrb <= 0;
last_2 <= 0;
_myaxi_cond_1_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_awvalid <= 0;
end
if(_myaxi_cond_1_1) begin
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
last_2 <= 0;
end
if(myaxi_wlast && myaxi_wvalid && myaxi_wready && !(myaxi_bvalid && myaxi_bready) && (outstanding_wcount_0 < 7)) begin
outstanding_wcount_0 <= outstanding_wcount_0 + 1;
end
if(!(myaxi_wlast && myaxi_wvalid && myaxi_wready) && (myaxi_bvalid && myaxi_bready) && (outstanding_wcount_0 > 0)) begin
outstanding_wcount_0 <= outstanding_wcount_0 - 1;
end
myaxi_araddr <= 0;
myaxi_arlen <= 0;
myaxi_arvalid <= 0;
if((fsm == 0) && ((myaxi_awready || !myaxi_awvalid) && (counter_1 == 0))) begin
myaxi_awaddr <= 1024;
myaxi_awlen <= 63;
myaxi_awvalid <= 1;
counter_1 <= 64;
end
if((fsm == 0) && ((myaxi_awready || !myaxi_awvalid) && (counter_1 == 0)) && 0) begin
myaxi_awvalid <= 0;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_awvalid && !myaxi_awready) begin
myaxi_awvalid <= myaxi_awvalid;
end
if(_dataflow_eq_odata_7 && (_dataflow_counter_ovalid_1 && ((counter_1 > 0) && (outstanding_wcount_0 < 6) && (myaxi_wready || !myaxi_wvalid) && _dataflow_tmp_all_valid_3)) && ((counter_1 > 0) && (outstanding_wcount_0 < 6) && (myaxi_wready || !myaxi_wvalid) && (counter_1 > 0))) begin
myaxi_wdata <= _dataflow_counter_odata_1;
myaxi_wvalid <= 1;
myaxi_wlast <= 0;
myaxi_wstrb <= { 4{ 1'd1 } };
counter_1 <= counter_1 - 1;
end
if(_dataflow_eq_odata_7 && (_dataflow_counter_ovalid_1 && ((counter_1 > 0) && (outstanding_wcount_0 < 6) && (myaxi_wready || !myaxi_wvalid) && _dataflow_tmp_all_valid_3)) && ((counter_1 > 0) && (outstanding_wcount_0 < 6) && (myaxi_wready || !myaxi_wvalid) && (counter_1 > 0)) && (counter_1 == 1)) begin
myaxi_wlast <= 1;
last_2 <= 1;
end
_myaxi_cond_1_1 <= 1;
if(myaxi_wvalid && !myaxi_wready) begin
myaxi_wvalid <= myaxi_wvalid;
myaxi_wlast <= myaxi_wlast;
last_2 <= last_2;
end
end
end
reg [32-1:0] _dataflow_counter_data_1;
reg _dataflow_counter_valid_1;
wire _dataflow_counter_ready_1;
reg [32-1:0] _dataflow_counter_data_5;
reg _dataflow_counter_valid_5;
wire _dataflow_counter_ready_5;
reg [6-1:0] _dataflow_counter_count_5;
reg [1-1:0] _dataflow_eq_data_7;
reg _dataflow_eq_valid_7;
wire _dataflow_eq_ready_7;
assign _dataflow_counter_ready_5 = (_dataflow_eq_ready_7 || !_dataflow_eq_valid_7) && _dataflow_counter_valid_5;
reg [32-1:0] _dataflow__delay_data_9;
reg _dataflow__delay_valid_9;
wire _dataflow__delay_ready_9;
assign _dataflow_counter_ready_1 = (_dataflow__delay_ready_9 || !_dataflow__delay_valid_9) && _dataflow_counter_valid_1;
assign _dataflow_eq_odata_7 = _dataflow_eq_data_7;
assign _dataflow_eq_ovalid_7 = _dataflow_eq_valid_7;
assign _dataflow_eq_ready_7 = _dataflow_eq_oready_7;
assign _dataflow_counter_odata_1 = _dataflow__delay_data_9;
assign _dataflow_counter_ovalid_1 = _dataflow__delay_valid_9;
assign _dataflow__delay_ready_9 = _dataflow_counter_oready_1;
always @(posedge CLK) begin
if(RST) begin
_dataflow_counter_data_1 <= -2'sd1;
_dataflow_counter_valid_1 <= 0;
_dataflow_counter_data_5 <= -2'sd1;
_dataflow_counter_count_5 <= 0;
_dataflow_counter_valid_5 <= 0;
_dataflow_eq_data_7 <= 0;
_dataflow_eq_valid_7 <= 0;
_dataflow__delay_data_9 <= 0;
_dataflow__delay_valid_9 <= 0;
end else begin
if((_dataflow_counter_ready_1 || !_dataflow_counter_valid_1) && 1 && 1) begin
_dataflow_counter_data_1 <= _dataflow_counter_data_1 + 1;
end
if(_dataflow_counter_valid_1 && _dataflow_counter_ready_1) begin
_dataflow_counter_valid_1 <= 0;
end
if((_dataflow_counter_ready_1 || !_dataflow_counter_valid_1) && 1) begin
_dataflow_counter_valid_1 <= 1;
end
if((_dataflow_counter_ready_5 || !_dataflow_counter_valid_5) && 1 && 1) begin
_dataflow_counter_data_5 <= _dataflow_counter_data_5 + 1;
end
if((_dataflow_counter_ready_5 || !_dataflow_counter_valid_5) && 1 && 1) begin
_dataflow_counter_count_5 <= (_dataflow_counter_count_5 == 5'sd8 - 1)? 0 : _dataflow_counter_count_5 + 1;
end
if(_dataflow_counter_valid_5 && _dataflow_counter_ready_5) begin
_dataflow_counter_valid_5 <= 0;
end
if((_dataflow_counter_ready_5 || !_dataflow_counter_valid_5) && 1) begin
_dataflow_counter_valid_5 <= 1;
end
if((_dataflow_counter_ready_5 || !_dataflow_counter_valid_5) && 1 && 1 && (_dataflow_counter_count_5 == 0)) begin
_dataflow_counter_data_5 <= -2'sd1 + 1;
end
if((_dataflow_eq_ready_7 || !_dataflow_eq_valid_7) && _dataflow_counter_ready_5 && _dataflow_counter_valid_5) begin
_dataflow_eq_data_7 <= _dataflow_counter_data_5 == 1'sd0;
end
if(_dataflow_eq_valid_7 && _dataflow_eq_ready_7) begin
_dataflow_eq_valid_7 <= 0;
end
if((_dataflow_eq_ready_7 || !_dataflow_eq_valid_7) && _dataflow_counter_ready_5) begin
_dataflow_eq_valid_7 <= _dataflow_counter_valid_5;
end
if((_dataflow__delay_ready_9 || !_dataflow__delay_valid_9) && _dataflow_counter_ready_1 && _dataflow_counter_valid_1) begin
_dataflow__delay_data_9 <= _dataflow_counter_data_1;
end
if(_dataflow__delay_valid_9 && _dataflow__delay_ready_9) begin
_dataflow__delay_valid_9 <= 0;
end
if((_dataflow__delay_ready_9 || !_dataflow__delay_valid_9) && _dataflow_counter_ready_1) begin
_dataflow__delay_valid_9 <= _dataflow_counter_valid_1;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
end else begin
case(fsm)
fsm_init: begin
if(myaxi_awready || !myaxi_awvalid) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(last_2 && myaxi_wvalid && myaxi_wready) begin
fsm <= fsm_2;
end
end
endcase
end
end
always @(posedge CLK) begin
if(RST) begin
sum <= 0;
_seq_cond_0_1 <= 0;
end else begin
if(_seq_cond_0_1) begin
$display("sum=%d expected_sum=%d", sum, 16128);
end
if(myaxi_wvalid && myaxi_wready) begin
sum <= sum + myaxi_wdata;
end
_seq_cond_0_1 <= myaxi_wvalid && myaxi_wready && myaxi_wlast;
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = types_axi_write_dataflow_when.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
```
#### File: veriloggen/thread/axistreamin.py
```python
from __future__ import absolute_import
from __future__ import print_function
import math
import functools
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
import veriloggen.types.axi as axi
from veriloggen.fsm.fsm import FSM
from veriloggen.optimizer import try_optimize as optimize
from .ttypes import _MutexFunction
from .ram import RAM, MultibankRAM, to_multibank_ram
from .fifo import FIFO
class AXIStreamIn(axi.AxiStreamIn, _MutexFunction):
""" AXI Stream Interface for Input """
__intrinsics__ = ('read',
'write_ram', 'write_ram_async',
'wait_write_ram')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False,
enable_async=True,
num_cmd_delay=0, num_data_delay=0,
op_sel_width=8, fsm_as_module=False):
axi.AxiStreamIn.__init__(self, m, name, clk, rst, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
noio)
self.addrwidth = addrwidth
self.enable_async = enable_async
self.num_cmd_delay = num_cmd_delay
self.num_data_delay = num_data_delay
self.op_sel_width = op_sel_width
self.fsm_as_module = fsm_as_module
self.mutex = None
self.read_start = self.m.Reg('_'.join(['', self.name, 'read_start']),
initval=0)
self.read_op_sel = self.m.Reg('_'.join(['', self.name, 'read_op_sel']),
self.op_sel_width, initval=0)
self.read_local_addr = self.m.Reg('_'.join(['', self.name, 'read_local_addr']),
self.addrwidth, initval=0)
self.read_size = self.m.Reg('_'.join(['', self.name, 'read_size']),
self.addrwidth + 1, initval=0)
self.read_local_stride = self.m.Reg('_'.join(['', self.name, 'read_local_stride']),
self.addrwidth, initval=0)
self.read_idle = self.m.Reg(
'_'.join(['', self.name, 'read_idle']), initval=1)
self.seq(
self.read_start(0)
)
self.read_op_id_map = OrderedDict()
self.read_op_id_count = 1
self.read_reqs = OrderedDict()
self.read_ops = []
self.read_fsm = None
self.read_data_wire = None
self.read_valid_wire = None
self.read_rest_size = None
self.read_narrow_fsms = OrderedDict() # key: pack_size
self.read_narrow_pack_counts = OrderedDict() # key: pack_size
self.read_narrow_data_wires = OrderedDict() # key: pack_size
self.read_narrow_valid_wires = OrderedDict() # key: pack_size
self.read_narrow_rest_size_wires = OrderedDict() # key: pack_size
self.read_wide_fsms = OrderedDict() # key: pack_size
self.read_wide_pack_counts = OrderedDict() # key: pack_size
self.read_wide_data_wires = OrderedDict() # key: pack_size
self.read_wide_valid_wires = OrderedDict() # key: pack_size
self.read_wide_rest_size_wires = OrderedDict() # key: pack_size
def read(self, fsm):
data, last, _id, user, dest, valid = self.read_data(cond=fsm)
rdata = self.m.TmpReg(self.datawidth, initval=0,
signed=True, prefix='axistreamin_rdata')
if last is not None:
rlast = self.m.TmpReg(1, initval=0,
signed=False, prefix='axistreamin_rlast')
else:
rlast = True
fsm.If(valid)(
rdata(data),
rlast(last) if last is not None else ()
)
fsm.Then().goto_next()
return rdata, rlast
def write_ram(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if self.enable_async:
self.wait_write_ram(fsm)
self._write_ram(fsm, ram, local_addr, size,
local_stride, port, ram_method)
self.wait_write_ram(fsm)
def write_ram_async(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if not self.enable_async:
raise ValueError(
"Async mode is disabled. Set 'True' to AXIM.enable_async.")
self.wait_write_ram(fsm)
self._write_ram(fsm, ram, local_addr, size,
local_stride, port, ram_method)
def wait_write_ram(self, fsm):
fsm.If(self.read_idle).goto_next()
def _write_ram(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if isinstance(ram, (tuple, list)):
ram = to_multibank_ram(ram)
if not isinstance(ram, (RAM, MultibankRAM)):
raise TypeError('RAM object is required.')
if ram_method is None:
ram_method = getattr(ram, 'write_dataflow')
start = self._set_flag(fsm)
for _ in range(self.num_cmd_delay + 1):
fsm.goto_next()
self._set_read_request(ram, port, ram_method, start,
local_addr, size, local_stride)
self._synthesize_read_fsm(ram, port, ram_method)
fsm.goto_next()
def _set_read_request(self, ram, port, ram_method, start,
local_addr, size, local_stride):
op_id = self._get_read_op_id(ram, port, ram_method)
if op_id in self.read_reqs:
(read_start, read_op_sel,
read_local_addr_in,
read_size_in, read_local_stride_in) = self.read_reqs[op_id]
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_local_addr_in(local_addr),
read_size_in(size),
read_local_stride_in(local_stride)
)
return
port = str(vtypes.to_int(port))
read_start = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_start']),
initval=0)
read_op_sel = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_op_sel']),
self.op_sel_width, initval=0)
read_local_addr = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_local_addr']),
self.addrwidth, initval=0)
read_size = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_size']),
self.addrwidth + 1, initval=0)
read_local_stride = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_local_stride']),
self.addrwidth, initval=0)
self.seq(
read_start(0)
)
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_local_addr(local_addr),
read_size(size),
read_local_stride(local_stride)
)
self.read_reqs[op_id] = (read_start, read_op_sel,
read_local_addr,
read_size, read_local_stride)
if self.num_cmd_delay > 0:
read_start = self.seq.Prev(read_start, self.num_cmd_delay)
read_op_sel = self.seq.Prev(read_op_sel, self.num_cmd_delay)
read_local_addr = self.seq.Prev(
read_local_addr, self.num_cmd_delay)
read_size = self.seq.Prev(read_size, self.num_cmd_delay)
read_local_stride = self.seq.Prev(
read_local_stride, self.num_cmd_delay)
self.seq.If(read_start)(
self.read_idle(0)
)
self.seq.If(read_start)(
self.read_start(1),
self.read_op_sel(read_op_sel),
self.read_local_addr(read_local_addr),
self.read_size(read_size),
self.read_local_stride(read_local_stride)
)
def _synthesize_read_fsm(self, ram, port, ram_method):
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
ram_datawidth = (ram.datawidth if ram_method is None else
ram.orig_datawidth if 'bcast' in ram_method_name else
ram.orig_datawidth if 'block' in ram_method_name else
ram.datawidth)
if not isinstance(self.datawidth, int):
raise TypeError("axi.datawidth must be int, not '%s'" %
str(type(self.datawidth)))
if not isinstance(ram_datawidth, int):
raise TypeError("ram_datawidth must be int, not '%s'" %
str(type(ram_datawidth)))
if self.datawidth == ram_datawidth:
return self._synthesize_read_fsm_same(ram, port, ram_method, ram_datawidth)
if self.datawidth < ram_datawidth:
return self._synthesize_read_fsm_narrow(ram, port, ram_method, ram_datawidth)
return self._synthesize_read_fsm_wide(ram, port, ram_method, ram_datawidth)
def _synthesize_read_fsm_same(self, ram, port, ram_method, ram_datawidth):
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if self.read_fsm is not None:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_fsm
data = self.read_data_wire
valid = self.read_valid_wire
rest_size = self.read_rest_size
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(data),
wvalid(1)
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name, 'read_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_fsm = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name, 'read_rest_size']),
self.addrwidth + 1, initval=0)
self.read_rest_size = rest_size
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(self.read_start)(
rest_size(self.read_size)
)
fsm.If(cond).goto_next()
# state 1
data, last, _id, user, dest, valid = self.read_data(cond=fsm)
self.read_data_wire = data
self.read_valid_wire = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(data),
wvalid(1),
)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(valid, rest_size <= 1).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_narrow(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth < ram.datawidth """
if ram_datawidth % self.datawidth != 0:
raise ValueError(
'ram_datawidth must be multiple number of axi.datawidth')
pack_size = ram_datawidth // self.datawidth
dma_size = (self.read_size << int(math.log(pack_size, 2))
if math.log(pack_size, 2) % 1.0 == 0.0 else
self.read_size * pack_size)
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_narrow_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_narrow_fsms[pack_size]
pack_count = self.read_narrow_pack_counts[pack_size]
data = self.read_narrow_data_wires[pack_size]
valid = self.read_narrow_valid_wires[pack_size]
rest_size = self.read_narrow_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_narrow', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_narrow_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_narrow_rest_size_wires[pack_size] = rest_size
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_narrow_pack_counts[pack_size] = pack_count
data, last, _id, user, dest, valid = self.read_data(cond=fsm)
self.read_narrow_data_wires[pack_size] = data
self.read_narrow_valid_wires[pack_size] = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(wvalid, rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_wide(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth > ram.datawidth """
if self.datawidth % ram_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of ram_datawidth')
pack_size = self.datawidth // ram_datawidth
shamt = int(math.log(pack_size, 2))
res = vtypes.Mux(
vtypes.And(self.read_size, 2 ** shamt - 1) > 0, 1, 0)
dma_size = (self.read_size >> shamt) + res
actual_read_size = dma_size << shamt
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_wide_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_wide_fsms[pack_size]
pack_count = self.read_wide_pack_counts[pack_size]
data = self.read_wide_data_wires[pack_size]
valid = self.read_wide_valid_wires[pack_size]
rest_size = self.read_wide_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, actual_read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> ram_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_wide', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_wide_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_wide_rest_size_wires[pack_size] = rest_size
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, actual_read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_wide_pack_counts[pack_size] = pack_count
cond = vtypes.Ands(fsm.here, pack_count == 0)
data, last, _id, user, dest, valid = self.read_data(cond=cond)
self.read_wide_data_wires[pack_size] = data
self.read_wide_valid_wires[pack_size] = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
wlast = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wlast']),
initval=0)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
wlast(last),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> ram_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count == pack_size - 1)(
pack_count(0)
)
fsm.If(pack_count == 0, valid_cond)(
rest_size.dec()
)
fsm.If(pack_count == pack_size - 1, rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _set_flag(self, fsm, prefix='axistreamin_flag'):
flag = self.m.TmpReg(initval=0, prefix=prefix)
fsm(
flag(1)
)
fsm.Delay(1)(
flag(0)
)
fsm.goto_next()
return flag
def _get_read_op_id(self, ram, port, ram_method):
ram_id = ram._id()
port = vtypes.to_int(port)
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
op = (ram_id, port, ram_method_name)
if op in self.read_op_id_map:
op_id = self.read_op_id_map[op]
else:
op_id = self.read_op_id_count
self.read_op_id_count += 1
self.read_op_id_map[op] = op_id
return op_id
def _get_op_write_dataflow(self, ram_datawidth):
if self.datawidth == ram_datawidth:
wdata = self.m.TmpReg(ram_datawidth, initval=0, prefix='_wdata')
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
if self.datawidth < ram_datawidth:
wdata = self.m.TmpReg(ram_datawidth, initval=0, prefix='_wdata')
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
wdata = self.m.TmpReg(self.datawidth, initval=0, prefix='_wdata')
wdata_ram = self.m.TmpWire(ram_datawidth, prefix='_wdata_ram')
wdata_ram.assign(wdata)
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata_ram, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
class AXIStreamInFifo(AXIStreamIn):
""" AXI Stream Interface to FIFO for Input """
__intrinsics__ = ('read',
'write_fifo',
'wait_write_fifo')
def write_ram(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
raise NotImplementedError('Use AXIStreamIn.')
def write_ram_async(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
raise NotImplementedError('Use AXIStreamIn.')
def wait_write_ram(self, fsm):
raise NotImplementedError('Use AXIStreamIn.')
def write_fifo(self, fsm, fifo, size):
if not self.enable_async:
raise ValueError(
"Async mode is disabled. Set 'True' to AXIM.enable_async.")
self.wait_write_fifo(fsm)
self._write_fifo(fsm, fifo, size)
def wait_write_fifo(self, fsm):
fsm.If(self.read_idle).goto_next()
def _get_read_op_id_fifo(self, fifo):
fifo_id = fifo._id()
op = fifo_id
if op in self.read_op_id_map:
op_id = self.read_op_id_map[op]
else:
op_id = self.read_op_id_count
self.read_op_id_count += 1
self.read_op_id_map[op] = op_id
return op_id
def _write_fifo(self, fsm, fifo, size):
if self.num_data_delay != 0:
raise ValueError('num_data_delay must be 0.')
if not isinstance(fifo, FIFO):
raise TypeError('FIFO object is required.')
start = self._set_flag(fsm)
for _ in range(self.num_cmd_delay + 1):
fsm.goto_next()
self._set_read_request_fifo(fifo, start, size)
self._synthesize_read_fsm_fifo(fifo)
def _set_read_request_fifo(self, fifo, start, size):
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
(read_start, read_op_sel, read_size_in) = self.read_reqs[op_id]
self.seq.If(start)(
read_start(1),
read_op_sel(op_id)
)
return
read_start = self.m.Reg(
'_'.join(['', self.name, fifo.name, 'read_start']),
initval=0)
read_op_sel = self.m.Reg(
'_'.join(['', self.name, fifo.name, 'read_op_sel']),
self.op_sel_width, initval=0)
read_size = self.m.Reg(
'_'.join(['', self.name, fifo.name, 'read_size']),
self.addrwidth + 1, initval=0)
self.seq(
read_start(0)
)
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_size(size),
)
self.read_reqs[op_id] = (read_start, read_op_sel, read_size)
if self.num_cmd_delay > 0:
read_start = self.seq.Prev(read_start, self.num_cmd_delay)
read_op_sel = self.seq.Prev(read_op_sel, self.num_cmd_delay)
read_size = self.seq.Prev(read_size, self.num_cmd_delay)
self.seq.If(read_start)(
self.read_idle(0)
)
self.seq.If(read_start)(
self.read_start(1),
self.read_op_sel(read_op_sel),
self.read_size(read_size),
)
def _synthesize_read_fsm_fifo(self, fifo):
fifo_datawidth = fifo.datawidth
if not isinstance(fifo_datawidth, int):
raise TypeError("fifo_datawidth must be int, not '%s'" %
str(type(fifo_datawidth)))
if self.datawidth == fifo_datawidth:
return self._synthesize_read_fsm_fifo_same(fifo, fifo_datawidth)
if self.datawidth < fifo_datawidth:
return self._synthesize_read_fsm_fifo_narrow(fifo, fifo_datawidth)
return self._synthesize_read_fsm_fifo_wide(fifo, fifo_datawidth)
def _synthesize_read_fsm_fifo_same(self, fifo, fifo_datawidth):
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
""" already synthesized op """
return
if self.read_fsm is not None:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_fsm
data = self.read_data_wire
valid = self.read_valid_wire
rest_size = self.read_rest_size
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(data, cond=valid_cond)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name, 'read_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_fsm = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name, 'read_rest_size']),
self.addrwidth + 1, initval=0)
self.read_rest_size = rest_size
# state 0
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(self.read_start)(
rest_size(self.read_size)
)
fsm.If(cond).goto_next()
# state 1
ready = vtypes.Not(fifo.almost_full)
read_cond = vtypes.Ands(fsm.here, ready)
data, last, _id, user, dest, valid = self.read_data(cond=read_cond)
self.read_data_wire = data
self.read_valid_wire = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(data, cond=valid_cond)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(valid, rest_size <= 1).goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_fifo_narrow(self, fifo, fifo_datawidth):
""" axi.datawidth < fifo.datawidth """
if fifo_datawidth % self.datawidth != 0:
raise ValueError(
'fifo_datawidth must be multiple number of axi.datawidth')
pack_size = fifo_datawidth // self.datawidth
dma_size = (self.read_size << int(math.log(pack_size, 2))
if math.log(pack_size, 2) % 1.0 == 0.0 else
self.read_size * pack_size)
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_narrow_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_narrow_fsms[pack_size]
pack_count = self.read_narrow_pack_counts[pack_size]
data = self.read_narrow_data_wires[pack_size]
valid = self.read_narrow_valid_wires[pack_size]
rest_size = self.read_narrow_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
wdata = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wdata']),
fifo_datawidth, initval=0)
wvalid = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wvalid']),
initval=0)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(wdata, cond=wvalid)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_narrow', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_narrow_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_narrow_rest_size_wires[pack_size] = rest_size
# state 0
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_narrow_pack_counts[pack_size] = pack_count
ready = vtypes.Not(fifo.almost_full)
read_cond = vtypes.Ands(fsm.here, ready)
data, last, _id, user, dest, valid = self.read_data(cond=read_cond)
self.read_narrow_data_wires[pack_size] = data
self.read_narrow_valid_wires[pack_size] = valid
wdata = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wdata']),
fifo_datawidth, initval=0)
wvalid = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wvalid']),
initval=0)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(wdata, cond=wvalid)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(wvalid, rest_size == 0).goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_fifo_wide(self, fifo, fifo_datawidth):
""" axi.datawidth > fifo.datawidth """
if self.datawidth % fifo_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of fifo_datawidth')
pack_size = self.datawidth // fifo_datawidth
shamt = int(math.log(pack_size, 2))
res = vtypes.Mux(
vtypes.And(self.read_size, 2 ** shamt - 1) > 0, 1, 0)
dma_size = (self.read_size >> shamt) + res
actual_read_size = dma_size << shamt
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_wide_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_wide_fsms[pack_size]
pack_count = self.read_wide_pack_counts[pack_size]
data = self.read_wide_data_wires[pack_size]
valid = self.read_wide_valid_wires[pack_size]
rest_size = self.read_wide_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> fifo_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_wide', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_wide_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_wide_rest_size_wires[pack_size] = rest_size
# state 0
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_wide_pack_counts[pack_size] = pack_count
ready = vtypes.Not(fifo.almost_full)
read_cond = vtypes.Ands(fsm.here, ready)
cond = vtypes.Ands(fsm.here, pack_count == 0, read_cond)
data, last, _id, user, dest, valid = self.read_data(cond=cond)
self.read_wide_data_wires[pack_size] = data
self.read_wide_valid_wires[pack_size] = valid
wdata = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wdata']),
self.datawidth, initval=0)
wvalid = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wvalid']),
initval=0)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
ack, _ = fifo.enq_rtl(wdata, cond=wvalid)
wlast = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wlast']),
initval=0)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
wlast(last),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> fifo_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count == pack_size - 1)(
pack_count(0)
)
fsm.If(pack_count == 0, valid_cond)(
rest_size.dec()
)
fsm.If(pack_count == pack_size - 1, rest_size == 0).goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
```
#### File: veriloggen/types/axi.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import math
from collections import defaultdict
import veriloggen.core.vtypes as vtypes
from veriloggen.seq.seq import Seq
from veriloggen.fsm.fsm import FSM
import veriloggen.dataflow as _df
from veriloggen.dataflow.dataflow import DataflowManager
from veriloggen.dataflow.dtypes import make_condition, read_multi
from veriloggen.dataflow.dtypes import _Numeric as df_numeric
from . import util
BURST_FIXED = 0b00
BURST_INCR = 0b01
BURST_WRAP = 0b10
AxCACHE_NONCOHERENT = 0b0011
AxCACHE_COHERENT = 0b1111
AxPROT_NONCOHERENT = 0b000
AxPROT_COHERENT = 0b010
AxUSER_NONCOHERENT = 0b00
AxUSER_COHERENT = 0b01
xUSER_DEFAULT = 0b00
def _connect_ready(m, var, val):
prev_assign = var._get_assign()
if not prev_assign:
var.assign(val)
else:
prev_assign.overwrite_right(
vtypes.Ors(prev_assign.statement.right, val))
m.remove(prev_assign)
m.append(prev_assign)
class AxiInterfaceBase(object):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None,
datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
self.name = name
self.datawidth = datawidth
self.addrwidth = addrwidth
self.id_width = id_width
self.user_width = user_width
self.itype = itype
self.otype = otype
class AxiLiteInterfaceBase(AxiInterfaceBase):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None,
datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
None, None,
itype, otype)
class AxiStreamInterfaceBase(AxiInterfaceBase):
_I = util.t_Input
_O = util.t_OutputReg
def __init__(self, m, name=None,
datawidth=32,
id_width=0, user_width=0, dest_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, None,
id_width, user_width,
itype, otype)
self.dest_width = dest_width
class AxiWriteAddress(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=2,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.awid = None
else:
self.awid = util.make_port(
m, self.otype, name + '_awid', self.id_width, initval=0)
self.awaddr = util.make_port(
m, self.otype, name + '_awaddr', self.addrwidth, initval=0)
self.awlen = util.make_port(
m, self.otype, name + '_awlen', 8, initval=0)
self.awsize = util.make_port(
m, self.otype, name + '_awsize', 3, initval=0, no_reg=True)
self.awburst = util.make_port(
m, self.otype, name + '_awburst', 2, initval=0, no_reg=True)
self.awlock = util.make_port(
m, self.otype, name + '_awlock', 1, initval=0, no_reg=True)
self.awcache = util.make_port(
m, self.otype, name + '_awcache', 4, initval=0, no_reg=True)
self.awprot = util.make_port(
m, self.otype, name + '_awprot', 3, initval=0, no_reg=True)
self.awqos = util.make_port(
m, self.otype, name + '_awqos', 4, initval=0, no_reg=True)
if isinstance(user_width, int) and user_width == 0:
self.awuser = None
else:
self.awuser = util.make_port(
m, self.otype, name + '_awuser', self.user_width, initval=0, no_reg=True)
self.awvalid = util.make_port(
m, self.otype, name + '_awvalid', None, initval=0)
self.awready = util.make_port(
m, self.itype, name + '_awready', None, initval=0)
class AxiLiteWriteAddress(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.awaddr = util.make_port(
m, self.otype, name + '_awaddr', self.addrwidth, initval=0)
self.awcache = util.make_port(
m, self.otype, name + '_awcache', 4, initval=0, no_reg=True)
self.awprot = util.make_port(
m, self.otype, name + '_awprot', 3, initval=0, no_reg=True)
self.awvalid = util.make_port(
m, self.otype, name + '_awvalid', None, initval=0)
self.awready = util.make_port(
m, self.itype, name + '_awready', None, initval=0)
class AxiWriteData(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
self.wdata = util.make_port(
m, self.otype, name + '_wdata', self.datawidth, initval=0)
self.wstrb = util.make_port(
m, self.otype, name + '_wstrb', self.datawidth // 8, initval=0)
self.wlast = util.make_port(
m, self.otype, name + '_wlast', None, initval=0)
if isinstance(user_width, int) and user_width == 0:
self.wuser = None
else:
self.wuser = util.make_port(
m, self.otype, name + '_wuser', self.user_width, initval=0, no_reg=True)
self.wvalid = util.make_port(
m, self.otype, name + '_wvalid', None, initval=0)
self.wready = util.make_port(
m, self.itype, name + '_wready', None, initval=0)
class AxiLiteWriteData(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.wdata = util.make_port(
m, self.otype, name + '_wdata', self.datawidth, initval=0)
self.wstrb = util.make_port(
m, self.otype, name + '_wstrb', self.datawidth // 8, initval=0)
self.wvalid = util.make_port(
m, self.otype, name + '_wvalid', None, initval=0)
self.wready = util.make_port(
m, self.itype, name + '_wready', None, initval=0)
class AxiWriteResponse(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.bid = None
else:
self.bid = util.make_port(
m, self.itype, name + '_bid', self.id_width, initval=0)
self.bresp = util.make_port(
m, self.itype, name + '_bresp', 2, initval=0, no_reg=True)
if isinstance(user_width, int) and user_width == 0:
self.buser = None
else:
self.buser = util.make_port(
m, self.itype, name + '_buser', self.user_width, initval=0, no_reg=True)
self.bvalid = util.make_port(
m, self.itype, name + '_bvalid', None, initval=0)
self.bready = util.make_port(
m, self.otype, name + '_bready', None, initval=0, no_reg=True)
class AxiLiteWriteResponse(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.bresp = util.make_port(
m, self.itype, name + '_bresp', 2, initval=0, no_reg=True)
self.bvalid = util.make_port(
m, self.itype, name + '_bvalid', None, initval=0)
self.bready = util.make_port(
m, self.otype, name + '_bready', None, initval=0, no_reg=True)
class AxiReadAddress(AxiInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=2,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.arid = None
else:
self.arid = util.make_port(
m, self.otype, name + '_arid', self.id_width, initval=0)
self.araddr = util.make_port(
m, self.otype, name + '_araddr', self.addrwidth, initval=0)
self.arlen = util.make_port(
m, self.otype, name + '_arlen', 8, initval=0)
self.arsize = util.make_port(
m, self.otype, name + '_arsize', 3, initval=0, no_reg=True)
self.arburst = util.make_port(
m, self.otype, name + '_arburst', 2, initval=0, no_reg=True)
self.arlock = util.make_port(
m, self.otype, name + '_arlock', 1, initval=0, no_reg=True)
self.arcache = util.make_port(
m, self.otype, name + '_arcache', 4, initval=0, no_reg=True)
self.arprot = util.make_port(
m, self.otype, name + '_arprot', 3, initval=0, no_reg=True)
self.arqos = util.make_port(
m, self.otype, name + '_arqos', 4, initval=0, no_reg=True)
if isinstance(user_width, int) and user_width == 0:
self.aruser = None
else:
self.aruser = util.make_port(
m, self.otype, name + '_aruser', self.user_width, initval=0, no_reg=True)
self.arvalid = util.make_port(
m, self.otype, name + '_arvalid', None, initval=0)
self.arready = util.make_port(
m, self.itype, name + '_arready', None, initval=0)
class AxiLiteReadAddress(AxiLiteInterfaceBase):
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.araddr = util.make_port(
m, self.otype, name + '_araddr', self.addrwidth, initval=0)
self.arcache = util.make_port(
m, self.otype, name + '_arcache', 4, initval=0, no_reg=True)
self.arprot = util.make_port(
m, self.otype, name + '_arprot', 3, initval=0, no_reg=True)
self.arvalid = util.make_port(
m, self.otype, name + '_arvalid', None, initval=0)
self.arready = util.make_port(
m, self.itype, name + '_arready', None, initval=0)
class AxiReadData(AxiInterfaceBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
id_width=0, user_width=0,
itype=None, otype=None):
AxiInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
id_width, user_width, itype, otype)
if isinstance(id_width, int) and id_width == 0:
self.rid = None
else:
self.rid = util.make_port(
m, self.itype, name + '_rid', self.id_width, initval=0)
self.rdata = util.make_port(
m, self.itype, name + '_rdata', self.datawidth, initval=0)
self.rresp = util.make_port(
m, self.itype, name + '_rresp', 2, initval=0, no_reg=True)
self.rlast = util.make_port(
m, self.itype, name + '_rlast', None, initval=0)
if isinstance(user_width, int) and user_width == 0:
self.ruser = None
else:
self.ruser = util.make_port(
m, self.itype, name + '_ruser', self.user_width, initval=0, no_reg=True)
self.rvalid = util.make_port(
m, self.itype, name + '_rvalid', None, initval=0)
self.rready = util.make_port(
m, self.otype, name + '_rready', None, initval=0)
class AxiLiteReadData(AxiLiteInterfaceBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32, addrwidth=32,
itype=None, otype=None):
AxiLiteInterfaceBase.__init__(self, m, name, datawidth, addrwidth,
itype, otype)
self.rdata = util.make_port(
m, self.itype, name + '_rdata', self.datawidth, initval=0)
self.rresp = util.make_port(
m, self.itype, name + '_rresp', 2, initval=0, no_reg=True)
self.rvalid = util.make_port(
m, self.itype, name + '_rvalid', None, initval=0)
self.rready = util.make_port(
m, self.otype, name + '_rready', None, initval=0)
# AXI-Full Master
class AxiMasterWriteAddress(AxiWriteAddress):
pass
class AxiMasterWriteData(AxiWriteData):
pass
class AxiMasterWriteResponse(AxiWriteResponse):
pass
class AxiMasterReadAddress(AxiReadAddress):
pass
class AxiMasterReadData(AxiReadData):
pass
# AXI-Lite Master
class AxiLiteMasterWriteAddress(AxiLiteWriteAddress):
pass
class AxiLiteMasterWriteData(AxiLiteWriteData):
pass
class AxiLiteMasterWriteResponse(AxiLiteWriteResponse):
pass
class AxiLiteMasterReadAddress(AxiLiteReadAddress):
pass
class AxiLiteMasterReadData(AxiLiteReadData):
pass
# AXI-Full Slave
class AxiSlaveWriteAddress(AxiWriteAddress):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveWriteData(AxiWriteData):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveWriteResponse(AxiWriteResponse):
_I = util.t_OutputReg
_O = util.t_Input
class AxiSlaveReadAddress(AxiReadAddress):
_I = util.t_Output
_O = util.t_Input
class AxiSlaveReadData(AxiReadData):
_I = util.t_OutputReg
_O = util.t_Input
# AXI-Lite Slave
class AxiLiteSlaveWriteAddress(AxiLiteWriteAddress):
_I = util.t_Output
_O = util.t_Input
class AxiLiteSlaveWriteData(AxiLiteWriteData):
_I = util.t_Output
_O = util.t_Input
class AxiLiteSlaveWriteResponse(AxiLiteWriteResponse):
_I = util.t_OutputReg
_O = util.t_Input
class AxiLiteSlaveReadAddress(AxiLiteReadAddress):
_I = util.t_Output
_O = util.t_Input
class AxiLiteSlaveReadData(AxiLiteReadData):
_I = util.t_OutputReg
_O = util.t_Input
class AxiStreamInData(AxiStreamInterfaceBase):
_O = util.t_Output
def __init__(self, m, name=None, datawidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
itype=None, otype=None):
AxiStreamInterfaceBase.__init__(self, m, name, datawidth,
id_width, user_width, dest_width,
itype, otype)
self.tdata = util.make_port(
m, self.itype, name + '_tdata', self.datawidth, initval=0)
self.tvalid = util.make_port(
m, self.itype, name + '_tvalid', None, initval=0)
self.tready = util.make_port(
m, self.otype, name + '_tready', None, initval=0)
if not with_last:
self.tlast = None
else:
self.tlast = util.make_port(
m, self.itype, name + '_tlast', initval=0)
if not with_strb:
self.tstrb = None
else:
self.tstrb = util.make_port(
m, self.itype, name + '_tstrb', self.datawidth // 8, initval=0)
if isinstance(user_width, int) and user_width == 0:
self.tuser = None
else:
self.tuser = util.make_port(
m, self.itype, name + '_tuser', self.user_width, initval=0)
if isinstance(id_width, int) and id_width == 0:
self.tid = None
else:
self.tid = util.make_port(
m, self.itype, name + '_tid', self.id_width, initval=0)
if isinstance(dest_width, int) and dest_width == 0:
self.tdest = None
else:
self.tdest = util.make_port(
m, self.itype, name + '_tdest', self.dest_width, initval=0)
class AxiStreamOutData(AxiStreamInData):
_I = util.t_OutputReg
_O = util.t_Input
# AXI-Full
class AxiMaster(object):
burst_size_width = 8
boundary_size = 4096
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
waddr_burst_mode=BURST_INCR, raddr_burst_mode=BURST_INCR,
waddr_cache_mode=AxCACHE_NONCOHERENT, raddr_cache_mode=AxCACHE_NONCOHERENT,
waddr_prot_mode=AxPROT_NONCOHERENT, raddr_prot_mode=AxPROT_NONCOHERENT,
waddr_user_mode=AxUSER_NONCOHERENT, wdata_user_mode=xUSER_DEFAULT,
raddr_user_mode=AxUSER_NONCOHERENT,
noio=False, nodataflow=False, outstanding_wcount_width=3):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'masterbus'):
self.m.masterbus = []
self.m.masterbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Reg if noio else None
self.waddr = AxiMasterWriteAddress(m, name, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
self.wdata = AxiMasterWriteData(m, name, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
self.wresp = AxiMasterWriteResponse(m, name, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
self.raddr = AxiMasterReadAddress(m, name, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
otype = util.t_Wire if noio else None
self.rdata = AxiMasterReadData(m, name, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.waddr.awsize.assign(int(math.log(self.datawidth / 8, 2)))
self.waddr.awburst.assign(waddr_burst_mode)
self.waddr.awlock.assign(0)
self.waddr.awcache.assign(waddr_cache_mode)
self.waddr.awprot.assign(waddr_prot_mode)
self.waddr.awqos.assign(0)
if self.waddr.awuser is not None:
self.waddr.awuser.assign(waddr_user_mode)
if self.wdata.wuser is not None:
self.wdata.wuser.assign(wdata_user_mode)
self.wresp.bready.assign(1)
self.raddr.arsize.assign(int(math.log(self.datawidth / 8, 2)))
self.raddr.arburst.assign(raddr_burst_mode)
self.raddr.arlock.assign(0)
self.raddr.arcache.assign(raddr_cache_mode)
self.raddr.arprot.assign(raddr_prot_mode)
self.raddr.arqos.assign(0)
if self.raddr.aruser is not None:
self.raddr.aruser.assign(raddr_user_mode)
self.write_counters = []
self.read_counters = []
# outstanding write request
if outstanding_wcount_width < 2:
raise ValueError("outstanding_wcount_width must be 2 or more.")
self.outstanding_wcount_width = outstanding_wcount_width
self.outstanding_wcount = self.m.TmpReg(self.outstanding_wcount_width, initval=0,
prefix='outstanding_wcount')
self.seq.If(vtypes.Ands(self.wdata.wlast, self.wdata.wvalid, self.wdata.wready),
vtypes.Not(vtypes.Ands(self.wresp.bvalid, self.wresp.bready)),
self.outstanding_wcount < 2 ** self.outstanding_wcount_width - 1)(
self.outstanding_wcount.inc()
)
self.seq.If(vtypes.Not(vtypes.Ands(self.wdata.wlast, self.wdata.wvalid, self.wdata.wready)),
vtypes.Ands(self.wresp.bvalid, self.wresp.bready),
self.outstanding_wcount > 0)(
self.outstanding_wcount.dec()
)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
ports = [self.waddr.awaddr(0),
self.waddr.awlen(0),
self.waddr.awvalid(0),
self.wdata.wdata(0),
self.wdata.wstrb(0),
self.wdata.wlast(0),
self.wdata.wvalid(0)]
if self.waddr.awid is not None:
ports.insert(0, self.waddr.awid(0))
self.seq(
*ports
)
self._write_disabled = True
def disable_read(self):
ports = [self.raddr.araddr(0),
self.raddr.arlen(0),
self.raddr.arvalid(0)]
if self.raddr.arid is not None:
ports.insert(0, self.raddr.arid(0))
self.seq(
*ports
)
self.rdata.rready.assign(0)
self._read_disabled = True
def mask_addr(self, addr):
s = util.log2(self.datawidth // 8)
return (addr >> s) << s
def check_boundary(self, addr, length, datawidth=None, boundary_size=None):
if datawidth is None:
datawidth = self.datawidth
if boundary_size is None:
boundary_size = self.boundary_size
mask = boundary_size - 1
return ((addr & mask) + (length << util.log2(datawidth // 8))) >= boundary_size
def rest_boundary(self, addr, datawidth=None, boundary_size=None):
if datawidth is None:
datawidth = self.datawidth
if boundary_size is None:
boundary_size = self.boundary_size
mask = boundary_size - 1
return (vtypes.Int(boundary_size) - (addr & mask)) >> util.log2(datawidth // 8)
def write_acceptable(self):
return self.outstanding_wcount < 2 ** self.outstanding_wcount_width - 2
def write_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
self.seq.If(ack)(
self.waddr.awid(0) if self.waddr.awid is not None else (),
self.waddr.awaddr(addr),
self.waddr.awlen(length - 1),
self.waddr.awvalid(1)
)
self.seq.Then().If(length == 0)(
self.waddr.awvalid(0)
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack
def write_request_counter(self, addr, length=1, cond=None, counter=None):
"""
@return ack, counter
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0,
prefix='counter')
self.write_counters.append(counter)
self.seq.If(vtypes.Ands(ack, counter == 0))(
self.waddr.awid(0) if self.waddr.awid is not None else (),
self.waddr.awaddr(addr),
self.waddr.awlen(length - 1),
self.waddr.awvalid(1),
counter(length)
)
self.seq.Then().If(length == 0)(
self.waddr.awvalid(0)
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack, counter
def write_data(self, data, counter=None, cond=None):
"""
@return ack, last
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(counter > 0,
self.write_acceptable(),
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
last = self.m.TmpReg(initval=0, prefix='last')
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.wdata.wdata(data),
self.wdata.wvalid(1),
self.wdata.wlast(0),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8))),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.wdata.wlast(1),
last(1)
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
self.wdata.wlast(0),
last(0)
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid),
self.wdata.wlast(self.wdata.wlast),
last(last)
)
return ack, last
def write_dataflow(self, data, counter=None, cond=None, when=None):
"""
@return done
'data' and 'when' must be dataflow variables
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
ack = vtypes.Ands(counter > 0,
self.write_acceptable(),
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
last = self.m.TmpReg(initval=0, prefix='last')
if cond is None:
cond = ack
else:
cond = (cond, ack)
if when is None or not isinstance(when, df_numeric):
raw_data, raw_valid = data.read(cond=cond)
else:
data_list, raw_valid = read_multi(self.m, data, when, cond=cond)
raw_data = data_list[0]
when = data_list[1]
when_cond = make_condition(when, ready=cond)
if when_cond is not None:
raw_valid = vtypes.Ands(when_cond, raw_valid)
# write condition
self.seq.If(raw_valid)
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.wdata.wdata(raw_data),
self.wdata.wvalid(1),
self.wdata.wlast(0),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8))),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.wdata.wlast(1),
last(1)
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
self.wdata.wlast(0),
last(0)
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid),
self.wdata.wlast(self.wdata.wlast),
last(last)
)
done = vtypes.Ands(last, self.wdata.wvalid, self.wdata.wready)
return done
def write_completed(self):
return self.outstanding_wcount == 0
def read_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
self.seq.If(ack)(
self.raddr.arid(0) if self.raddr.arid is not None else (),
self.raddr.araddr(addr),
self.raddr.arlen(length - 1),
self.raddr.arvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack
def read_request_counter(self, addr, length=1, cond=None, counter=None):
"""
@return ack, counter
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if isinstance(length, int) and length > 2 ** self.burst_size_width:
raise ValueError("length must be less than 257.")
if isinstance(length, int) and length < 1:
raise ValueError("length must be more than 0.")
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0, prefix='counter')
self.read_counters.append(counter)
self.seq.If(vtypes.Ands(ack, counter == 0))(
self.raddr.arid(0) if self.raddr.arid is not None else (),
self.raddr.araddr(addr),
self.raddr.arlen(length - 1),
self.raddr.arvalid(1),
counter(length)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack, counter
def read_data(self, counter=None, cond=None):
"""
@return data, valid, last
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.rdata.rready._get_module(), self.rdata.rready, val)
ack = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
data = self.rdata.rdata
valid = ack
last = self.rdata.rlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
return data, valid, last
def read_dataflow(self, counter=None, cond=None, point=0, signed=True):
"""
@return data, last, done
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
data_ready = self.m.TmpWire(prefix='data_ready')
last_ready = self.m.TmpWire(prefix='last_ready')
data_ready.assign(1)
last_ready.assign(1)
if cond is None:
cond = (data_ready, last_ready)
elif isinstance(cond, (tuple, list)):
cond = tuple(list(cond) + [data_ready, last_ready])
else:
cond = (cond, data_ready, last_ready)
ready = make_condition(*cond)
val = 1 if ready is None else ready
_connect_ready(self.rdata.rready._get_module(), self.rdata.rready, val)
ack = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
data = self.rdata.rdata
valid = self.rdata.rvalid
last = self.rdata.rlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
df = self.df if self.df is not None else _df
df_data = df.Variable(data, valid, data_ready,
width=self.datawidth, point=point, signed=signed)
df_last = df.Variable(last, valid, last_ready, width=1, signed=False)
done = vtypes.Ands(last, self.rdata.rvalid, self.rdata.rready)
return df_data, df_last, done
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if awid is not None:
awid.connect(self.waddr.awid if self.waddr.awid is not None else 0)
awaddr.connect(self.waddr.awaddr)
awlen.connect(self.waddr.awlen)
awsize.connect(self.waddr.awsize)
awburst.connect(self.waddr.awburst)
awlock.connect(self.waddr.awlock)
awcache.connect(self.waddr.awcache)
awprot.connect(self.waddr.awprot)
awqos.connect(self.waddr.awqos)
if awuser is not None:
awuser.connect(self.waddr.awuser if self.waddr.awuser is not None else 0)
awvalid.connect(self.waddr.awvalid)
self.waddr.awready.connect(awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
wdata.connect(self.wdata.wdata)
wstrb.connect(self.wdata.wstrb)
wlast.connect(self.wdata.wlast)
if wuser is not None:
wuser.connect(self.wdata.wuser if self.wdata.wuser is not None else 0)
wvalid.connect(self.wdata.wvalid)
self.wdata.wready.connect(wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if self.wresp.bid is not None:
self.wresp.bid.connect(bid if bid is not None else 0)
self.wresp.bresp.connect(bresp)
if self.wresp.buser is not None:
self.wresp.buser.connect(buser if buser is not None else 0)
self.wresp.bvalid.connect(bvalid)
bready.connect(self.wresp.bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if arid is not None:
arid.connect(self.raddr.arid if self.raddr.arid is not None else 0)
araddr.connect(self.raddr.araddr)
arlen.connect(self.raddr.arlen)
arsize.connect(self.raddr.arsize)
arburst.connect(self.raddr.arburst)
arlock.connect(self.raddr.arlock)
arcache.connect(self.raddr.arcache)
arprot.connect(self.raddr.arprot)
arqos.connect(self.raddr.arqos)
if aruser is not None:
aruser.connect(self.raddr.aruser if self.raddr.aruser is not None else 0)
arvalid.connect(self.raddr.arvalid)
self.raddr.arready.connect(arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if self.rdata.rid is not None:
self.rdata.rid.connect(rid if rid is not None else 0)
self.rdata.rdata.connect(rdata)
self.rdata.rresp.connect(rresp)
self.rdata.rlast.connect(rlast)
if self.rdata.ruser is not None:
self.rdata.ruser.connect(ruser if ruser is not None else 0)
self.rdata.rvalid.connect(rvalid)
rready.connect(self.rdata.rready)
# AXI-Lite
class AxiLiteMaster(AxiMaster):
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_cache_mode=AxCACHE_NONCOHERENT, raddr_cache_mode=AxCACHE_NONCOHERENT,
waddr_prot_mode=AxPROT_NONCOHERENT, raddr_prot_mode=AxPROT_NONCOHERENT,
noio=False, nodataflow=False, outstanding_wcount_width=3):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'masterbus'):
self.m.masterbus = []
self.m.masterbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Reg if noio else None
self.waddr = AxiLiteMasterWriteAddress(m, name, datawidth, addrwidth,
itype, otype)
self.wdata = AxiLiteMasterWriteData(m, name, datawidth, addrwidth,
itype, otype)
self.wresp = AxiLiteMasterWriteResponse(m, name, datawidth, addrwidth,
itype, otype)
self.raddr = AxiLiteMasterReadAddress(m, name, datawidth, addrwidth,
itype, otype)
otype = util.t_Wire if noio else None
self.rdata = AxiLiteMasterReadData(m, name, datawidth, addrwidth,
itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.waddr.awcache.assign(waddr_cache_mode)
self.waddr.awprot.assign(waddr_prot_mode)
self.wresp.bready.assign(1)
self.raddr.arcache.assign(raddr_cache_mode)
self.raddr.arprot.assign(raddr_prot_mode)
# outstanding write request
if outstanding_wcount_width < 2:
raise ValueError("outstanding_wcount_width must be 2 or more.")
self.outstanding_wcount_width = outstanding_wcount_width
self.outstanding_wcount = self.m.TmpReg(self.outstanding_wcount_width, initval=0,
prefix='outstanding_wcount')
self.seq.If(vtypes.Ands(self.wdata.wvalid, self.wdata.wready),
vtypes.Not(vtypes.Ands(self.wresp.bvalid, self.wresp.bready)),
self.outstanding_wcount < (2 ** self.outstanding_wcount_width - 1))(
self.outstanding_wcount.inc()
)
self.seq.If(vtypes.Not(vtypes.Ands(self.wdata.wvalid, self.wdata.wready)),
vtypes.Ands(self.wresp.bvalid, self.wresp.bready),
self.outstanding_wcount > 0)(
self.outstanding_wcount.dec()
)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
ports = [self.waddr.awaddr(0),
self.waddr.awvalid(0),
self.wdata.wdata(0),
self.wdata.wstrb(0),
self.wdata.wvalid(0)]
self.seq(
*ports
)
self._write_disabled = True
def disable_read(self):
ports = [self.raddr.araddr(0),
self.raddr.arvalid(0)]
self.seq(
*ports
)
self.rdata.rready.assign(0)
self._read_disabled = True
def write_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if length != 1:
raise ValueError('length must be 1 for lite-interface.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid))
self.seq.If(ack)(
self.waddr.awaddr(addr),
self.waddr.awvalid(1),
)
# de-assert
self.seq.Delay(1)(
self.waddr.awvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))(
self.waddr.awvalid(self.waddr.awvalid)
)
return ack
def write_data(self, data, cond=None):
"""
@return ack
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(self.write_acceptable(),
vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid)))
self.seq.If(ack)(
self.wdata.wdata(data),
self.wdata.wvalid(1),
self.wdata.wstrb(vtypes.Repeat(
vtypes.Int(1, 1), (self.wdata.datawidth // 8)))
)
# de-assert
self.seq.Delay(1)(
self.wdata.wvalid(0),
)
# retry
self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))(
self.wdata.wvalid(self.wdata.wvalid)
)
return ack
def write_dataflow(self, data, counter=None, cond=None, when=None):
"""
@return done
'data' and 'when' must be dataflow variables
"""
raise TypeError('lite interface support no dataflow operation.')
def write_completed(self):
return self.outstanding_wcount == 0
def read_request(self, addr, length=1, cond=None):
"""
@return ack
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if length != 1:
raise ValueError('length must be 1 for lite-interface.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.raddr.arready, vtypes.Not(self.raddr.arvalid))
self.seq.If(ack)(
self.raddr.araddr(addr),
self.raddr.arvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.raddr.arvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.raddr.arvalid, vtypes.Not(self.raddr.arready)))(
self.raddr.arvalid(self.raddr.arvalid)
)
return ack
def read_data(self, cond=None):
"""
@return data, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.rdata.rready._get_module(), self.rdata.rready, val)
ack = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
data = self.rdata.rdata
valid = ack
return data, valid
def read_dataflow(self, counter=None, cond=None, point=0, signed=True):
"""
@return data, last, done
"""
raise TypeError('lite interface support no dataflow operation.')
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
awaddr = ports['_'.join([name, 'awaddr'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
awaddr.connect(self.waddr.awaddr)
awcache.connect(self.waddr.awcache)
awprot.connect(self.waddr.awprot)
awvalid.connect(self.waddr.awvalid)
self.waddr.awready.connect(awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
wdata.connect(self.wdata.wdata)
wstrb.connect(self.wdata.wstrb)
wvalid.connect(self.wdata.wvalid)
self.wdata.wready.connect(wready)
bresp = ports['_'.join([name, 'bresp'])]
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
self.wresp.bresp.connect(bresp)
self.wresp.bvalid.connect(bvalid)
bready.connect(self.wresp.bready)
araddr = ports['_'.join([name, 'araddr'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
araddr.connect(self.raddr.araddr)
arcache.connect(self.raddr.arcache)
arprot.connect(self.raddr.arprot)
arvalid.connect(self.raddr.arvalid)
self.raddr.arready.connect(arready)
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
self.rdata.rdata.connect(rdata)
self.rdata.rresp.connect(rresp)
self.rdata.rvalid.connect(rvalid)
rready.connect(self.rdata.rready)
class AxiSlave(object):
burst_size_width = 8
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=xUSER_DEFAULT,
rdata_user_mode=xUSER_DEFAULT,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'slavebus'):
self.m.slavebus = []
self.m.slavebus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Wire if noio else None
self.waddr = AxiSlaveWriteAddress(m, name, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
self.wdata = AxiSlaveWriteData(m, name, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
self.wresp = AxiSlaveWriteResponse(m, name, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
self.raddr = AxiSlaveReadAddress(m, name, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
itype = util.t_Reg if noio else None
self.rdata = AxiSlaveReadData(m, name, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.wresp.bresp.assign(0)
if self.wresp.buser is not None:
self.wresp.buser.assign(wresp_user_mode)
self.rdata.rresp.assign(0)
if self.rdata.ruser is not None:
self.rdata.ruser.assign(rdata_user_mode)
# write response
if self.wresp.bid is not None:
self.seq.If(self.waddr.awvalid, self.waddr.awready, vtypes.Not(self.wresp.bvalid))(
self.wresp.bid(self.waddr.awid if self.waddr.awid is not None else 0)
)
if self.rdata.rid is not None:
self.seq.If(self.raddr.arvalid, self.raddr.arready)(
self.rdata.rid(self.raddr.arid if self.raddr.arid is not None else 0)
)
self.seq.If(self.wresp.bvalid, self.wresp.bready)(
self.wresp.bvalid(0)
)
self.seq.If(self.wdata.wvalid, self.wdata.wready, self.wdata.wlast)(
self.wresp.bvalid(1)
)
self.write_counters = []
self.read_counters = []
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
self.waddr.awready.assign(0)
self.wdata.wready.assign(0)
self._write_disabled = True
def disable_read(self):
self.raddr.arready.assign(0)
ports = [self.rdata.rvalid(0),
self.rdata.rlast(0)]
self.seq(
*ports
)
self._read_disabled = True
def pull_request_counter(self, cond, counter=None):
"""
@return addr, counter, readvalid, writevalid
"""
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0, prefix='counter')
ready = make_condition(cond)
write_ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
read_ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
writevalid = self.m.TmpReg(initval=0, prefix='writevalid')
readvalid = self.m.TmpReg(initval=0, prefix='readvalid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
writeval = vtypes.Ands(vtypes.Not(writevalid), vtypes.Not(readvalid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid)
if ready is not None:
writeval = vtypes.Ands(ready, writeval)
readval = vtypes.Ands(vtypes.Not(readvalid), vtypes.Not(writevalid),
prev_arvalid, vtypes.Not(prev_awvalid))
if ready is not None:
readval = vtypes.Ands(ready, readval)
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, writeval)
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, readval)
self.seq(
writevalid(0),
readvalid(0)
)
self.seq.If(write_ack)(
addr(self.waddr.awaddr),
counter(self.waddr.awlen + 1),
writevalid(1)
).Elif(read_ack)(
addr(self.raddr.araddr),
counter(self.raddr.arlen + 1),
readvalid(1)
)
return addr, counter, readvalid, writevalid
def pull_write_request_counter(self, cond=None, counter=None):
"""
@return addr, counter, valid
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0,
prefix='counter')
self.write_counters.append(counter)
ready = make_condition(cond)
ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
val = (vtypes.Ands(vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid))
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, val)
self.seq.If(ack)(
addr(self.waddr.awaddr),
counter(self.waddr.awlen + 1)
)
self.seq(
valid(ack)
)
return addr, counter, valid
def pull_write_data(self, counter=None, cond=None):
"""
@return data, mask, valid, last
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.wdata.wready._get_module(), self.wdata.wready, val)
ack = vtypes.Ands(self.wdata.wready, self.wdata.wvalid)
data = self.wdata.wdata
mask = self.wdata.wstrb
valid = ack
last = self.wdata.wlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
return data, mask, valid, last
def pull_write_dataflow(self, counter=None, cond=None):
"""
@return data, mask, last, done
"""
if self._write_disabled:
raise TypeError('Write disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.write_counters[-1]
data_ready = self.m.TmpWire(prefix='data_ready')
mask_ready = self.m.TmpWire(prefix='mask_ready')
last_ready = self.m.TmpWire(prefix='last_ready')
data_ready.assign(1)
mask_ready.assign(1)
last_ready.assign(1)
if cond is None:
cond = (data_ready, last_ready)
elif isinstance(cond, (tuple, list)):
cond = tuple(list(cond) + [data_ready, last_ready])
else:
cond = (cond, data_ready, last_ready)
ready = make_condition(*cond)
val = 1 if ready is None else ready
_connect_ready(self.wdata.wready._get_module(), self.wdata.wready, val)
ack = vtypes.Ands(self.wdata.wready, self.wdata.wvalid)
data = self.wdata.wdata
mask = self.wdata.wstrb
valid = self.wdata.wvalid
last = self.wdata.wlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
counter.dec()
)
df_data = self.df.Variable(data, valid, data_ready,
width=self.datawidth, signed=False)
df_mask = self.df.Variable(mask, valid, mask_ready,
width=self.datawidth // 4, signed=False)
df_last = self.df.Variable(last, valid, last_ready,
width=1, signed=False)
done = vtypes.Ands(last, self.wdata.wvalid, self.wdata.wready)
return df_data, df_mask, df_last, done
def pull_read_request_counter(self, cond=None, counter=None):
"""
@return addr, counter, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.m.TmpReg(self.burst_size_width + 1, initval=0, prefix='counter')
self.read_counters.append(counter)
ready = make_condition(cond)
ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
val = (vtypes.Ands(vtypes.Not(valid), prev_arvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid), prev_arvalid))
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, val)
self.seq.If(ack)(
addr(self.raddr.araddr),
counter(self.raddr.arlen + 1)
)
self.seq(
valid(ack)
)
return addr, counter, valid
def push_read_data(self, data, counter=None, cond=None):
"""
@return ack, valid, last
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ands(counter > 0,
vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))
valid = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
last = self.rdata.rlast
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.rdata.rdata(data),
self.rdata.rvalid(1),
self.rdata.rlast(0),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.rdata.rlast(1)
)
# de-assert
self.seq.Delay(1)(
self.rdata.rvalid(0),
self.rdata.rlast(0)
)
# retry
self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))(
self.rdata.rvalid(self.rdata.rvalid),
self.rdata.rlast(self.rdata.rlast)
)
return ack, valid, last
def push_read_dataflow(self, data, counter=None, cond=None):
"""
@return done
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if counter is not None and not isinstance(counter, vtypes.Reg):
raise TypeError("counter must be Reg or None.")
if counter is None:
counter = self.read_counters[-1]
ack = vtypes.Ands(counter > 0,
vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))
if cond is None:
cond = ack
else:
cond = (cond, ack)
raw_data, raw_valid = data.read(cond=cond)
# write condition
self.seq.If(raw_valid)
self.seq.If(vtypes.Ands(ack, counter > 0))(
self.rdata.rdata(raw_data),
self.rdata.rvalid(1),
self.rdata.rlast(0),
counter.dec()
)
self.seq.Then().If(counter == 1)(
self.rdata.rlast(1)
)
# de-assert
self.seq.Delay(1)(
self.rdata.rvalid(0),
self.rdata.rlast(0)
)
# retry
self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))(
self.rdata.rvalid(self.rdata.rvalid),
self.rdata.rlast(self.rdata.rlast)
)
done = vtypes.Ands(self.rdata.rlast, self.rdata.rvalid, self.rdata.rready)
return done
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
ports = defaultdict(lambda: None, ports)
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if self.waddr.awid is not None:
self.waddr.awid.connect(awid if awid is not None else 0)
self.waddr.awaddr.connect(awaddr)
self.waddr.awlen.connect(awlen if awlen is not None else 0)
self.waddr.awsize.connect(awsize if awsize is not None else
int(math.log(self.datawidth // 8)))
self.waddr.awburst.connect(awburst if awburst is not None else BURST_INCR)
self.waddr.awlock.connect(awlock if awlock is not None else 0)
self.waddr.awcache.connect(awcache)
self.waddr.awprot.connect(awprot)
self.waddr.awqos.connect(awqos if awqos is not None else 0)
if self.waddr.awuser is not None:
self.waddr.awuser.connect(awuser if awuser is not None else 0)
self.waddr.awvalid.connect(awvalid)
awready.connect(self.waddr.awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdata.wdata.connect(wdata)
self.wdata.wstrb.connect(wstrb)
self.wdata.wlast.connect(wlast if wlast is not None else 1)
if self.wdata.wuser is not None:
self.wdata.wuser.connect(wuser if wuser is not None else 0)
self.wdata.wvalid.connect(wvalid)
wready.connect(self.wdata.wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if bid is not None:
bid.connect(self.wresp.bid if self.wresp.bid is not None else 0)
bresp.connect(self.wresp.bresp)
if buser is not None:
buser.connect(self.wresp.buser if self.wresp.buser is not None else 0)
bvalid.connect(self.wresp.bvalid)
self.wresp.bready.connect(bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if self.raddr.arid is not None:
self.raddr.arid.connect(arid if arid is not None else 0)
self.raddr.araddr.connect(araddr)
self.raddr.arlen.connect(arlen if arlen is not None else 0)
self.raddr.arsize.connect(arsize if arsize is not None else
int(math.log(self.datawidth // 8)))
self.raddr.arburst.connect(arburst if arburst is not None else BURST_INCR)
self.raddr.arlock.connect(arlock if arlock is not None else 0)
self.raddr.arcache.connect(arcache)
self.raddr.arprot.connect(arprot)
self.raddr.arqos.connect(arqos if arqos is not None else 0)
if self.raddr.aruser is not None:
self.raddr.aruser.connect(aruser if aruser is not None else 0)
self.raddr.arvalid.connect(arvalid)
arready.connect(self.raddr.arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if rid is not None:
rid.connect(self.rdata.rid if self.rdata.rid is not None else 0)
rdata.connect(self.rdata.rdata)
rresp.connect(self.rdata.rresp)
if rlast is not None:
rlast.connect(self.rdata.rlast)
if ruser is not None:
ruser.connect(self.rdata.ruser if self.rdata.ruser is not None else 0)
rvalid.connect(self.rdata.rvalid)
self.rdata.rready.connect(rready)
class AxiLiteSlave(AxiSlave):
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = noio
if not hasattr(self.m, 'slavebus'):
self.m.slavebus = []
self.m.slavebus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Wire if noio else None
self.waddr = AxiLiteSlaveWriteAddress(m, name, datawidth, addrwidth,
itype, otype)
self.wdata = AxiLiteSlaveWriteData(m, name, datawidth, addrwidth,
itype, otype)
self.wresp = AxiLiteSlaveWriteResponse(m, name, datawidth, addrwidth,
itype, otype)
self.raddr = AxiLiteSlaveReadAddress(m, name, datawidth, addrwidth,
itype, otype)
itype = util.t_Reg if noio else None
self.rdata = AxiLiteSlaveReadData(m, name, datawidth, addrwidth,
itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
self.wresp.bresp.assign(0)
self.rdata.rresp.assign(0)
# write response
self.seq.If(self.wresp.bvalid, self.wresp.bready)(
self.wresp.bvalid(0)
)
self.seq.If(self.wdata.wvalid, self.wdata.wready)(
self.wresp.bvalid(1)
)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
self._write_disabled = False
self._read_disabled = False
def disable_write(self):
self.waddr.awready.assign(0)
self.wdata.wready.assign(0)
self._write_disabled = True
def disable_read(self):
self.raddr.arready.assign(0)
ports = [self.rdata.rvalid(0)]
self.seq(
*ports
)
self._read_disabled = True
def pull_request(self, cond):
"""
@return addr, readvalid, writevalid
"""
ready = make_condition(cond)
write_ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
read_ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
writevalid = self.m.TmpReg(initval=0, prefix='writevalid')
readvalid = self.m.TmpReg(initval=0, prefix='readvalid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
writeval = vtypes.Ands(vtypes.Not(writevalid), vtypes.Not(readvalid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid)
if ready is not None:
writeval = vtypes.Ands(ready, writeval)
readval = vtypes.Ands(vtypes.Not(readvalid), vtypes.Not(writevalid),
prev_arvalid, vtypes.Not(prev_awvalid))
if ready is not None:
readval = vtypes.Ands(ready, readval)
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, writeval)
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, readval)
self.seq(
writevalid(0),
readvalid(0)
)
self.seq.If(write_ack)(
addr(self.waddr.awaddr),
writevalid(1)
).Elif(read_ack)(
addr(self.raddr.araddr),
readvalid(1)
)
return addr, readvalid, writevalid
def pull_write_request(self, cond=None):
"""
@return addr, valid
"""
if self._write_disabled:
raise TypeError('Write disabled.')
ready = make_condition(cond)
ack = vtypes.Ands(self.waddr.awready, self.waddr.awvalid,
vtypes.Not(self.wresp.bvalid))
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_awvalid = self.m.TmpReg(initval=0, prefix='prev_awvalid')
self.seq(
prev_awvalid(self.waddr.awvalid)
)
val = (vtypes.Ands(vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid),
vtypes.Not(self.wresp.bvalid),
prev_awvalid))
_connect_ready(self.waddr.awready._get_module(),
self.waddr.awready, val)
self.seq.If(ack)(
addr(self.waddr.awaddr),
)
self.seq(
valid(ack)
)
return addr, valid
def pull_write_data(self, cond=None):
"""
@return data, mask, valid
"""
if self._write_disabled:
raise TypeError('Write disabled.')
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.wdata.wready._get_module(), self.wdata.wready, val)
ack = vtypes.Ands(self.wdata.wready, self.wdata.wvalid)
data = self.wdata.wdata
mask = self.wdata.wstrb
valid = ack
return data, mask, valid
def pull_write_dataflow(self, counter=None, cond=None):
"""
@return data, mask, last, done
"""
raise TypeError('lite interface support no dataflow operation.')
def pull_read_request(self, cond=None):
"""
@return addr, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
ready = make_condition(cond)
ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid)
addr = self.m.TmpReg(self.addrwidth, initval=0, prefix='addr')
valid = self.m.TmpReg(initval=0, prefix='valid')
prev_arvalid = self.m.TmpReg(initval=0, prefix='prev_arvalid')
self.seq(
prev_arvalid(self.raddr.arvalid)
)
val = (vtypes.Ands(vtypes.Not(valid), prev_arvalid) if ready is None else
vtypes.Ands(ready, vtypes.Not(valid), prev_arvalid))
_connect_ready(self.raddr.arready._get_module(),
self.raddr.arready, val)
self.seq.If(ack)(
addr(self.raddr.araddr)
)
self.seq(
valid(ack)
)
return addr, valid
def push_read_data(self, data, cond=None):
"""
@return ack, valid
"""
if self._read_disabled:
raise TypeError('Read disabled.')
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid))
valid = vtypes.Ands(self.rdata.rready, self.rdata.rvalid)
self.seq.If(ack)(
self.rdata.rdata(data),
self.rdata.rvalid(1)
)
# de-assert
self.seq.Delay(1)(
self.rdata.rvalid(0)
)
# retry
self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))(
self.rdata.rvalid(self.rdata.rvalid)
)
return ack, valid
def push_read_dataflow(self, data, counter=None, cond=None):
"""
@return done
"""
raise TypeError('lite interface support no dataflow operation.')
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
awaddr = ports['_'.join([name, 'awaddr'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
self.waddr.awaddr.connect(awaddr)
self.waddr.awcache.connect(awcache)
self.waddr.awprot.connect(awprot)
self.waddr.awvalid.connect(awvalid)
awready.connect(self.waddr.awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdata.wdata.connect(wdata)
self.wdata.wstrb.connect(wstrb)
self.wdata.wvalid.connect(wvalid)
wready.connect(self.wdata.wready)
bresp = ports['_'.join([name, 'bresp'])]
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
bresp.connect(self.wresp.bresp)
bvalid.connect(self.wresp.bvalid)
self.wresp.bready.connect(bready)
araddr = ports['_'.join([name, 'araddr'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
self.raddr.araddr.connect(araddr)
self.raddr.arcache.connect(arcache)
self.raddr.arprot.connect(arprot)
self.raddr.arvalid.connect(arvalid)
arready.connect(self.raddr.arready)
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
rdata.connect(self.rdata.rdata)
rresp.connect(self.rdata.rresp)
rvalid.connect(self.rdata.rvalid)
self.rdata.rready.connect(rready)
class AxiStreamIn(object):
def __init__(self, m, name, clk, rst, datawidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.noio = noio
if not hasattr(self.m, 'streaminbus'):
self.m.streaminbus = []
self.m.streaminbus.append(self)
itype = util.t_Wire if noio else None
otype = util.t_Wire if noio else None
self.tdata = AxiStreamInData(m, name, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
itype, otype)
self.seq = Seq(m, name, clk, rst)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
def read_data(self, cond=None):
"""
@return data, last, _id, user, dest, valid
"""
ready = make_condition(cond)
val = 1 if ready is None else ready
_connect_ready(self.tdata.tready._get_module(), self.tdata.tready, val)
ack = vtypes.Ands(self.tdata.tready, self.tdata.tvalid)
data = self.tdata.tdata
valid = ack
last = self.tdata.tlast
_id = self.tdata.tid
user = self.tdata.tuser
dest = self.tdata.tdest
return data, last, _id, user, dest, valid
def read_dataflow(self, cond=None, point=0, signed=True):
"""
@return data, last, _id, user, dest, done
"""
data_ready = self.m.TmpWire(prefix='data_ready')
last_ready = self.m.TmpWire(prefix='last_ready')
id_ready = self.m.TmpWire(prefix='id_ready')
user_ready = self.m.TmpWire(prefix='user_ready')
dest_ready = self.m.TmpWire(prefix='dest_ready')
data_ready.assign(1)
id_ready.assign(1)
last_ready.assign(1)
user_ready.assign(1)
dest_ready.assign(1)
if cond is None:
cond = (data_ready, last_ready, id_ready, user_ready, dest_ready)
elif isinstance(cond, (tuple, list)):
cond = tuple(list(cond) + [data_ready, last_ready, id_ready, user_ready, dest_ready])
else:
cond = (cond, data_ready, last_ready, id_ready, user_ready, dest_ready)
ready = make_condition(*cond)
val = 1 if ready is None else ready
_connect_ready(self.tdata.tready._get_module(), self.tdata.tready, val)
ack = vtypes.Ands(self.tdata.tready, self.tdata.tvalid)
data = self.tdata.tdata
valid = self.tdata.tvalid
_id = self.tdata.tid
last = self.tdata.tlast
user = self.tdata.tuser
dest = self.tdata.tdest
df = self.df if self.df is not None else _df
df_data = df.Variable(data, valid, data_ready,
width=self.datawidth, point=point, signed=signed)
if last is not None:
df_last = df.Variable(last, valid, last_ready, width=1, signed=False)
done = vtypes.Ands(last, self.tdata.tvalid, self.tdata.tready)
else:
df_last = None
done = vtypes.Ands(self.tdata.tvalid, self.tdata.tready)
if _id is not None:
df_id = df.Variable(_id, valid, id_ready, width=_id.width, signed=False)
else:
df_id = None
if user is not None:
df_user = df.Variable(user, valid, user_ready, width=user.width, signed=False)
else:
df_user = None
if dest is not None:
df_dest = df.Variable(dest, valid, dest_ready, width=dest.width, signed=False)
else:
df_dest = None
return df_data, df_last, df_id, df_user, df_dest, done
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = ports['_'.join([name, 'tdata'])]
tvalid = ports['_'.join([name, 'tvalid'])]
tready = ports['_'.join([name, 'tready'])]
if '_'.join([name, 'tlast']) in ports:
tlast = ports['_'.join([name, 'tlast'])]
else:
tlast = None
if '_'.join([name, 'tid']) in ports:
tid = ports['_'.join([name, 'tid'])]
else:
tid = None
if '_'.join([name, 'tuser']) in ports:
tuser = ports['_'.join([name, 'tuser'])]
else:
tuser = None
if '_'.join([name, 'tdest']) in ports:
tdest = ports['_'.join([name, 'tdest'])]
else:
tdest = None
self.tdata.tdata.connect(tdata)
self.tdata.tvalid.connect(tvalid)
tready.connect(self.tdata.tready)
if self.tdata.tlast is not None:
self.tdata.tlast.connect(tlast if tlast is not None else 1)
if self.tdata.tid is not None:
self.tdata.tid.connect(tid if tid is not None else 0)
if self.tdata.tuser is not None:
self.tdata.tuser.connect(tuser if tuser is not None else 0)
if self.tdata.tdest is not None:
self.tdata.tdest.connect(tdest if tdest is not None else 0)
def connect_stream(self, stream):
if not isinstance(stream, AxiStreamOut):
raise TypeError('stream must be an instance of AxiStreamOut.')
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = stream.tdata.tdata
tvalid = stream.tdata.tvalid
tready = stream.tdata.tready
if stream.tdata.tlast is not None:
tlast = stream.tdata.tlast
else:
tlast = None
if stream.tdata.tid is not None:
tid = stream.tdata.tid
else:
tid = None
if stream.tdata.tuser is not None:
tuser = stream.tdata.tuser
else:
tuser = None
if stream.tdata.tdest is not None:
tdest = stream.tdata.tdest
else:
tdest = None
self.tdata.tdata.connect(tdata)
self.tdata.tvalid.connect(tvalid)
tready.connect(self.tdata.tready)
if self.tdata.tlast is not None:
self.tdata.tlast.connect(tlast if tlast is not None else 1)
if self.tdata.tid is not None:
self.tdata.tid.connect(tid if tid is not None else 0)
if self.tdata.tuser is not None:
self.tdata.tuser.connect(tuser if tuser is not None else 0)
if self.tdata.tdest is not None:
self.tdata.tdest.connect(tdest if tdest is not None else 0)
def connect_master_rdata(self, master):
if not isinstance(master, AxiMaster):
raise TypeError('master must be an instance of AxiMaster.')
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = master.rdata.rdata
tvalid = master.rdata.rvalid
tready = master.rdata.rready
tlast = 0
if master.rdata.rid is not None:
tid = master.rdata.rid
else:
tid = None
if master.rdata.ruser is not None:
tuser = master.rdata.ruser
else:
tuser = None
tdest = None
self.tdata.tdata.connect(tdata)
self.tdata.tvalid.connect(tvalid)
tready.connect(self.tdata.tready)
if self.tdata.tlast is not None:
self.tdata.tlast.connect(tlast if tlast is not None else 1)
if self.tdata.tid is not None:
self.tdata.tid.connect(tid if tid is not None else 0)
if self.tdata.tuser is not None:
self.tdata.tuser.connect(tuser if tuser is not None else 0)
if self.tdata.tdest is not None:
self.tdata.tdest.connect(tdest if tdest is not None else 0)
class AxiStreamOut(object):
def __init__(self, m, name, clk, rst, datawidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False, nodataflow=False):
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.noio = noio
if not hasattr(self.m, 'streamoutbus'):
self.m.streamoutbus = []
self.m.streamoutbus.append(self)
itype = util.t_Reg if noio else None
otype = util.t_Wire if noio else None
self.tdata = AxiStreamOutData(m, name, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
itype, otype)
self.seq = Seq(m, name, clk, rst)
# default values
if self.tdata.tuser is not None:
self.tdata.tuser.assign(0)
if self.tdata.tid is not None:
self.tdata.tid.assign(0)
if nodataflow:
self.df = None
else:
self.df = DataflowManager(self.m, self.clk, self.rst)
def write_data(self, data, last=None, _id=None, user=None, dest=None, cond=None):
"""
@return ack
"""
if cond is not None:
self.seq.If(cond)
ack = vtypes.Ors(self.tdata.tready, vtypes.Not(self.tdata.tvalid))
self.seq.If(ack)(
self.tdata.tdata(data),
self.tdata.tvalid(1),
self.tdata.tlast(last) if self.tdata.tlast is not None else (),
self.tdata.tid(_id) if self.tdata.tid is not None else (),
self.tdata.tuser(user) if self.tdata.tuser is not None else (),
self.tdata.tdest(dest) if self.tdata.tdest is not None else (),
)
# de-assert
self.seq.Delay(1)(
self.tdata.tvalid(0),
self.tdata.tlast(0) if self.tdata.tlast is not None else ()
)
# retry
self.seq.If(vtypes.Ands(self.tdata.tvalid, vtypes.Not(self.tdata.tready)))(
self.tdata.tvalid(self.tdata.tvalid),
self.tdata.tlast(self.tdata.tlast) if self.tdata.tlast is not None else ()
)
return ack
def write_dataflow(self, data, last=None, _id=None, user=None, dest=None, cond=None, when=None):
"""
@return ack
'data', 'last', '_id', 'user', 'dest', and 'when' must be dataflow variables
"""
ack = vtypes.Ors(self.tdata.tready, vtypes.Not(self.tdata.tvalid))
if cond is None:
cond = ack
else:
cond = (cond, ack)
args = [data]
last_index = 0
id_index = 0
user_index = 0
dest_index = 0
when_index = 0
if last is not None:
args.append(last)
last_index = len(args) - 1
if _id is not None:
args.append(_id)
id_index = len(args) - 1
if user is not None:
args.append(user)
user_index = len(args) - 1
if dest is not None:
args.append(dest)
dest_index = len(args) - 1
if when is not None:
args.append(when)
when_index = len(args) - 1
data_list, raw_valid = read_multi(self.m, *args, cond=cond)
raw_data = data_list[0]
raw_last = data_list[last_index] if last_index > 0 else None
raw_id = data_list[id_index] if id_index > 0 else None
raw_user = data_list[user_index] if user_index > 0 else None
raw_dest = data_list[dest_index] if dest_index > 0 else None
raw_when = data_list[when_index] if when_index > 0 else None
when_cond = make_condition(raw_when, ready=cond)
if when_cond is not None:
raw_valid = vtypes.Ands(when_cond, raw_valid)
# write condition
self.seq.If(raw_valid)
self.seq.If(ack)(
self.tdata.tdata(raw_data),
self.tdata.tvalid(1),
self.tdata.tlast(raw_last) if self.tdata.tlast is not None else (),
self.tdata.tid(raw_id) if self.tdata.tid is not None else (),
self.tdata.tuser(raw_user) if self.tdata.tuser is not None else (),
self.tdata.tdest(raw_dest) if self.tdata.tdest is not None else (),
)
# de-assert
self.seq.Delay(1)(
self.tdata.tvalid(0),
self.tdata.tlast(0)
)
# retry
self.seq.If(vtypes.Ands(self.tdata.tvalid, vtypes.Not(self.tdata.tready)))(
self.tdata.tvalid(self.tdata.tvalid),
self.tdata.tlast(self.tdata.tlast) if self.tdata.tlast is not None else ()
)
ack = vtypes.Ands(self.tdata.tvalid, self.tdata.tready)
return ack
def connect(self, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = ports['_'.join([name, 'tdata'])]
tvalid = ports['_'.join([name, 'tvalid'])]
tready = ports['_'.join([name, 'tready'])]
if '_'.join([name, 'tlast']) in ports:
tlast = ports['_'.join([name, 'tlast'])]
else:
tlast = None
if '_'.join([name, 'tid']) in ports:
tid = ports['_'.join([name, 'tid'])]
else:
tid = None
if '_'.join([name, 'tuser']) in ports:
tuser = ports['_'.join([name, 'tuser'])]
else:
tuser = None
if '_'.join([name, 'tdest']) in ports:
tdest = ports['_'.join([name, 'tdest'])]
else:
tdest = None
tdata.connect(self.tdata.tdata)
tvalid.connect(self.tdata.tvalid)
self.tdata.tready.connect(tready)
if tlast is not None:
tlast.connect(self.tdata.tlast if self.tdata.tlast is not None else 1)
if tuser is not None:
tuser.connect(self.tdata.tuser if self.tdata.tuser is not None else 0)
if tid is not None:
tid.connect(self.tdata.tid if self.tdata.tid is not None else 0)
if tdest is not None:
tdest.connect(self.tdata.tdest if self.tdata.tdest is not None else 0)
def connect_stream(self, stream):
if not isinstance(stream, AxiStreamIn):
raise TypeError('stream must be an instance of AxiStreamIn.')
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
tdata = stream.tdata.tdata
tvalid = stream.tdata.tvalid
tready = stream.tdata.tready
if stream.tdata.tlast is not None:
tlast = stream.tdata.tlast
else:
tlast = None
if stream.tdata.tid is not None:
tid = stream.tdata.tid
else:
tid = None
if stream.tdata.tuser is not None:
tuser = stream.tdata.tuser
else:
tuser = None
if stream.tdata.tdest is not None:
tdest = stream.tdata.tdest
else:
tdest = None
tdata.connect(self.tdata.tdata)
tvalid.connect(self.tdata.tvalid)
self.tdata.tready.connect(tready)
if tlast is not None:
tlast.connect(self.tdata.tlast if self.tdata.tlast is not None else 1)
if tuser is not None:
tuser.connect(self.tdata.tuser if self.tdata.tuser is not None else 0)
if tid is not None:
tid.connect(self.tdata.tid if self.tdata.tid is not None else 0)
if tdest is not None:
tdest.connect(self.tdata.tdest if self.tdata.tdest is not None else 0)
class AxiMemoryModel(AxiSlave):
__intrinsics__ = ('read', 'write',
'read_word', 'write_word')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
mem_datawidth=32, mem_addrwidth=20,
memimg=None, memimg_name=None,
memimg_datawidth=None,
write_delay=10, read_delay=10, sleep=4, sub_sleep=4,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=xUSER_DEFAULT,
rdata_user_mode=xUSER_DEFAULT):
if mem_datawidth % 8 != 0:
raise ValueError('mem_datawidth must be a multiple of 8')
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.noio = True
self.mem_datawidth = mem_datawidth
self.mem_addrwidth = mem_addrwidth
itype = util.t_Reg
otype = util.t_Wire
self.waddr = AxiSlaveWriteAddress(m, name, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
self.wdata = AxiSlaveWriteData(m, name, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
self.wresp = AxiSlaveWriteResponse(m, name, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
self.raddr = AxiSlaveReadAddress(m, name, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
self.rdata = AxiSlaveReadData(m, name, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
# default values
self.wresp.bresp.assign(0)
if self.wresp.buser is not None:
self.wresp.buser.assign(wresp_user_mode)
self.rdata.rresp.assign(0)
if self.rdata.ruser is not None:
self.rdata.ruser.assign(rdata_user_mode)
self.fsm = FSM(self.m, '_'.join(['', self.name, 'fsm']), clk, rst)
self.seq = self.fsm.seq
# write response
if self.wresp.bid is not None:
self.seq.If(self.waddr.awvalid, self.waddr.awready,
vtypes.Not(self.wresp.bvalid))(
self.wresp.bid(self.waddr.awid if self.waddr.awid is not None else 0)
)
if self.rdata.rid is not None:
self.seq.If(self.raddr.arvalid, self.raddr.arready)(
self.rdata.rid(self.raddr.arid if self.raddr.arid is not None else 0)
)
self.seq.If(self.wresp.bvalid, self.wresp.bready)(
self.wresp.bvalid(0)
)
self.seq.If(self.wdata.wvalid, self.wdata.wready, self.wdata.wlast)(
self.wresp.bvalid(1)
)
if memimg is None:
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
size = 2 ** self.mem_addrwidth
width = self.mem_datawidth
self._make_img(memimg_name, size, width)
elif isinstance(memimg, str):
memimg_name = memimg
num_words = sum(1 for line in open(memimg, 'r'))
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
else:
if memimg_datawidth is None:
memimg_datawidth = mem_datawidth
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
num_words = to_memory_image(memimg_name, memimg, datawidth=memimg_datawidth)
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
self.mem = self.m.Reg(
'_'.join(['', self.name, 'mem']), 8, vtypes.Int(2) ** self.mem_addrwidth)
self.m.Initial(
vtypes.Systask('readmemh', memimg_name, self.mem)
)
self._make_fsm(write_delay, read_delay, sleep, sub_sleep)
@staticmethod
def _make_img(filename, size, width, blksize=4096):
import numpy as np
wordsize = width // 8
zero = np.zeros([size // wordsize, wordsize], dtype=np.int64)
base = np.arange(size // wordsize, dtype=np.int64).reshape([-1, 1])
shamt = np.arange(wordsize, dtype=np.int64) * [8]
mask = np.full([1], 2 ** 8 - 1, dtype=np.int64)
data = (((zero + base) >> shamt) & mask).reshape([-1])
fmt = '%02x\n'
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
def _make_fsm(self, write_delay=10, read_delay=10, sleep=4, sub_sleep=4):
write_mode = 100
read_mode = 200
while read_mode <= write_mode + write_delay + 10:
read_mode += 100
self.fsm.If(self.waddr.awvalid).goto(write_mode)
self.fsm.If(self.raddr.arvalid).goto(read_mode)
write_count = self.m.Reg(
'_'.join(['', 'write_count']), self.addrwidth + 1, initval=0)
write_addr = self.m.Reg(
'_'.join(['', 'write_addr']), self.addrwidth, initval=0)
read_count = self.m.Reg(
'_'.join(['', 'read_count']), self.addrwidth + 1, initval=0)
read_addr = self.m.Reg(
'_'.join(['', 'read_addr']), self.addrwidth, initval=0)
if sleep > 0:
sleep_count = self.m.Reg(
'_'.join(['', 'sleep_count']), self.addrwidth + 1, initval=0)
if sub_sleep > 0:
sub_sleep_count = self.m.Reg(
'_'.join(['', 'sub_sleep_count']), self.addrwidth + 1, initval=0)
self.seq.If(sleep_count == sleep - 1)(
sub_sleep_count.inc()
)
self.seq.If(sleep_count == sleep - 1,
sub_sleep_count == sub_sleep - 1)(
sub_sleep_count(0)
)
cond = sub_sleep_count == sub_sleep - 1
else:
cond = None
self.seq.If(sleep_count < sleep - 1)(
sleep_count.inc()
)
self.seq.If(cond, sleep_count == sleep - 1)(
sleep_count(0)
)
# write mode
self.fsm._set_index(write_mode)
# awvalid and awready
self.fsm.If(self.waddr.awvalid, vtypes.Not(self.wresp.bvalid))(
self.waddr.awready(1),
write_addr(self.waddr.awaddr),
write_count(self.waddr.awlen + 1)
)
self.fsm.Delay(1)(
self.waddr.awready(0)
)
self.fsm.If(vtypes.Not(self.waddr.awvalid)).goto_init()
self.fsm.If(self.waddr.awvalid).goto_next()
# delay
for _ in range(write_delay):
self.fsm.goto_next()
# wready
self.fsm(
self.wdata.wready(1)
)
self.fsm.goto_next()
# wdata -> mem
for i in range(int(self.datawidth / 8)):
self.fsm.If(self.wdata.wvalid, self.wdata.wstrb[i])(
self.mem[write_addr + i](self.wdata.wdata[i * 8:i * 8 + 8])
)
self.fsm.If(self.wdata.wvalid, self.wdata.wready)(
write_addr.add(int(self.datawidth / 8)),
write_count.dec()
)
# sleep
if sleep > 0:
self.fsm.If(sleep_count == sleep - 1)(
self.wdata.wready(0)
).Else(
self.wdata.wready(1)
)
# write complete
self.fsm.If(self.wdata.wvalid, self.wdata.wready, write_count == 1)(
self.wdata.wready(0)
)
self.fsm.Then().goto_init()
# read mode
self.fsm._set_index(read_mode)
# arvalid and arready
self.fsm.If(self.raddr.arvalid)(
self.raddr.arready(1),
read_addr(self.raddr.araddr),
read_count(self.raddr.arlen + 1)
)
self.fsm.Delay(1)(
self.raddr.arready(0)
)
self.fsm.If(vtypes.Not(self.raddr.arvalid)).goto_init()
self.fsm.If(self.raddr.arvalid).goto_next()
# delay
for _ in range(read_delay):
self.fsm.goto_next()
# mem -> rdata
for i in range(int(self.datawidth / 8)):
self.fsm.If(vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rdata[i * 8:i * 8 + 8](self.mem[read_addr + i])
)
if sleep > 0:
self.fsm.If(sleep_count < sleep - 1, read_count > 0,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
self.fsm.If(sleep_count < sleep - 1, read_count == 1,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rlast(1)
)
else:
self.fsm.If(read_count > 0,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
self.fsm.If(read_count == 1,
vtypes.Or(self.rdata.rready, vtypes.Not(self.rdata.rvalid)))(
self.rdata.rlast(1)
)
# de-assert
self.fsm.Delay(1)(
self.rdata.rvalid(0),
self.rdata.rlast(0)
)
# retry
self.fsm.If(self.rdata.rvalid, vtypes.Not(self.rdata.rready))(
self.rdata.rvalid(self.rdata.rvalid),
self.rdata.rdata(self.rdata.rdata),
self.rdata.rlast(self.rdata.rlast)
)
# read complete
self.fsm.If(self.rdata.rvalid, self.rdata.rready,
read_count == 0).goto_init()
def read(self, fsm, addr):
""" intrinsic for thread """
cond = fsm.state == fsm.current
rdata = self.m.TmpReg(self.mem_datawidth, initval=0, signed=True, prefix='rdata')
num_bytes = self.mem_datawidth // 8
fsm.If(cond)(
rdata(vtypes.Cat(*reversed([self.mem[addr + i]
for i in range(num_bytes)])))
)
fsm.goto_next()
return rdata
def write(self, fsm, addr, wdata):
""" intrinsic for thread """
cond = fsm.state == fsm.current
num_bytes = self.mem_datawidth // 8
wdata_wire = self.m.TmpWire(self.mem_datawidth, prefix='wdata_wire')
wdata_wire.assign(wdata)
for i in range(num_bytes):
self.seq.If(cond)(
self.mem[addr + i](wdata_wire[i * 8:i * 8 + 8])
)
fsm.goto_next()
return 0
def read_word(self, fsm, word_index, byte_offset, bits=8):
""" intrinsic method word-indexed read """
cond = fsm.state == fsm.current
rdata = self.m.TmpReg(bits, initval=0, signed=True, prefix='rdata')
num_bytes = int(math.ceil(bits / 8))
addr = vtypes.Add(byte_offset,
vtypes.Div(vtypes.Mul(word_index, bits), 8))
shift = word_index * bits % 8
raw_data = vtypes.Cat(*reversed([self.mem[addr + i]
for i in range(num_bytes)]))
fsm.If(cond)(
rdata(raw_data >> shift)
)
fsm.goto_next()
return rdata
def write_word(self, fsm, word_index, byte_offset, wdata, bits=8):
""" intrinsic method word-indexed write """
cond = fsm.state == fsm.current
rdata = self.m.TmpReg(bits, initval=0, signed=True, prefix='rdata')
num_bytes = int(math.ceil(bits / 8))
addr = vtypes.Add(byte_offset,
vtypes.Div(vtypes.Mul(word_index, bits), 8))
shift = word_index * bits % 8
wdata_wire = self.m.TmpWire(bits, prefix='wdata_wire')
wdata_wire.assign(wdata)
mem_data = vtypes.Cat(*reversed([self.mem[addr + i]
for i in range(num_bytes)]))
mem_data_wire = self.m.TmpWire(8 * num_bytes, prefix='mem_data_wire')
mem_data_wire.assign(mem_data)
inv_mask = self.m.TmpWire(8 * num_bytes, prefix='inv_mask')
inv_mask.assign(vtypes.Repeat(vtypes.Int(1, 1), bits) << shift)
mask = self.m.TmpWire(8 * num_bytes, prefix='mask')
mask.assign(vtypes.Unot(inv_mask))
raw_data = vtypes.Or(wdata_wire << shift,
vtypes.And(mem_data_wire, mask))
raw_data_wire = self.m.TmpWire(8 * num_bytes, prefix='raw_data_wire')
raw_data_wire.assign(raw_data)
for i in range(num_bytes):
self.seq.If(cond)(
self.mem[addr + i](raw_data_wire[i * 8:i * 8 + 8])
)
fsm.goto_next()
return 0
class AxiMultiportMemoryModel(AxiMemoryModel):
__intrinsics__ = ('read', 'write',
'read_word', 'write_word')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32, numports=2,
mem_datawidth=32, mem_addrwidth=20,
memimg=None, memimg_name=None,
memimg_datawidth=None,
write_delay=10, read_delay=10, sleep=4, sub_sleep=4,
waddr_id_width=0, wdata_id_width=0, wresp_id_width=0,
raddr_id_width=0, rdata_id_width=0,
waddr_user_width=2, wdata_user_width=0, wresp_user_width=0,
raddr_user_width=2, rdata_user_width=0,
wresp_user_mode=xUSER_DEFAULT,
rdata_user_mode=xUSER_DEFAULT):
if mem_datawidth % 8 != 0:
raise ValueError('mem_datawidth must be a multiple of 8')
self.m = m
self.name = name
self.clk = clk
self.rst = rst
self.datawidth = datawidth
self.addrwidth = addrwidth
self.numports = numports
self.noio = True
self.mem_datawidth = mem_datawidth
self.mem_addrwidth = mem_addrwidth
itype = util.t_Reg
otype = util.t_Wire
self.waddrs = [AxiSlaveWriteAddress(m, name + '_%d' % i, datawidth, addrwidth,
waddr_id_width, waddr_user_width, itype, otype)
for i in range(numports)]
self.wdatas = [AxiSlaveWriteData(m, name + '_%d' % i, datawidth, addrwidth,
wdata_id_width, wdata_user_width, itype, otype)
for i in range(numports)]
self.wresps = [AxiSlaveWriteResponse(m, name + '%d' % i, datawidth, addrwidth,
wresp_id_width, wresp_user_width, itype, otype)
for i in range(numports)]
self.raddrs = [AxiSlaveReadAddress(m, name + '_%d' % i, datawidth, addrwidth,
raddr_id_width, raddr_user_width, itype, otype)
for i in range(numports)]
self.rdatas = [AxiSlaveReadData(m, name + '_%d' % i, datawidth, addrwidth,
rdata_id_width, rdata_user_width, itype, otype)
for i in range(numports)]
# default values
for wresp in self.wresps:
wresp.bresp.assign(0)
if wresp.buser is not None:
wresp.buser.assign(wresp_user_mode)
for rdata in self.rdatas:
rdata.rresp.assign(0)
if rdata.ruser is not None:
rdata.ruser.assign(rdata_user_mode)
self.seq = Seq(self.m, '_'.join(['', self.name, 'seq']), clk, rst)
self.fsms = [FSM(self.m, '_'.join(['', self.name, 'fsm_%d' % i]), clk, rst)
for i in range(numports)]
# all FSM shares an indentical Seq
for fsm in self.fsms:
fsm.seq = self.seq
# write response
for wresp, waddr in zip(self.wresps, self.waddrs):
if wresp.bid is not None:
self.seq.If(waddr.awvalid, waddr.awready,
vtypes.Not(wresp.bvalid))(
wresp.bid(waddr.awid if waddr.awid is not None else 0)
)
for rdata, raddr in zip(self.rdatas, self.raddrs):
if rdata.rid is not None:
self.seq.If(raddr.arvalid, raddr.arready)(
rdata.rid(raddr.arid if raddr.arid is not None else 0)
)
for wresp, wdata in zip(self.wresps, self.wdatas):
self.seq.If(wresp.bvalid, wresp.bready)(
wresp.bvalid(0)
)
self.seq.If(wdata.wvalid, wdata.wready, wdata.wlast)(
wresp.bvalid(1)
)
if memimg is None:
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
size = 2 ** self.mem_addrwidth
width = self.mem_datawidth
self._make_img(memimg_name, size, width)
elif isinstance(memimg, str):
memimg_name = memimg
num_words = sum(1 for line in open(memimg, 'r'))
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
else:
if memimg_datawidth is None:
memimg_datawidth = mem_datawidth
if memimg_name is None:
memimg_name = '_'.join(['', self.name, 'memimg', '.out'])
num_words = to_memory_image(memimg_name, memimg, datawidth=memimg_datawidth)
# resize mem_addrwidth according to the memimg size
self.mem_addrwidth = max(self.mem_addrwidth,
int(math.ceil(math.log(num_words, 2))))
self.mem = self.m.Reg(
'_'.join(['', self.name, 'mem']), 8, vtypes.Int(2) ** self.mem_addrwidth)
self.m.Initial(
vtypes.Systask('readmemh', memimg_name, self.mem)
)
self._make_fsms(write_delay, read_delay, sleep, sub_sleep)
def _make_fsms(self, write_delay=10, read_delay=10, sleep=4, sub_sleep=4):
for i, (fsm, waddr, wdata, wresp, raddr, rdata) in enumerate(
zip(self.fsms, self.waddrs, self.wdatas, self.wresps, self.raddrs, self.rdatas)):
write_count = self.m.Reg(
'_'.join(['', 'write_count_%d' % i]), self.addrwidth + 1, initval=0)
write_addr = self.m.Reg(
'_'.join(['', 'write_addr_%d' % i]), self.addrwidth, initval=0)
read_count = self.m.Reg(
'_'.join(['', 'read_count_%d' % i]), self.addrwidth + 1, initval=0)
read_addr = self.m.Reg(
'_'.join(['', 'read_addr_%d' % i]), self.addrwidth, initval=0)
if sleep > 0:
sleep_count = self.m.Reg(
'_'.join(['', 'sleep_count_%d' % i]), self.addrwidth + 1, initval=0)
if sub_sleep > 0:
sub_sleep_count = self.m.Reg(
'_'.join(['', 'sub_sleep_count_%d' % i]), self.addrwidth + 1, initval=0)
fsm.seq.If(sleep_count == sleep - 1)(
sub_sleep_count.inc()
)
fsm.seq.If(sleep_count == sleep - 1,
sub_sleep_count == sub_sleep - 1)(
sub_sleep_count(0)
)
cond = sub_sleep_count == sub_sleep - 1
else:
cond = None
fsm.seq.If(sleep_count < sleep - 1)(
sleep_count.inc()
)
fsm.seq.If(cond, sleep_count == sleep - 1)(
sleep_count(0)
)
write_mode = 100
read_mode = 200
while read_mode <= write_mode + write_delay + 10:
read_mode += 100
fsm.If(waddr.awvalid).goto(write_mode)
fsm.If(raddr.arvalid).goto(read_mode)
# write mode
fsm._set_index(write_mode)
# awvalid and awready
fsm.If(waddr.awvalid, vtypes.Not(wresp.bvalid))(
waddr.awready(1),
write_addr(waddr.awaddr),
write_count(waddr.awlen + 1)
)
fsm.Delay(1)(
waddr.awready(0)
)
fsm.If(vtypes.Not(waddr.awvalid)).goto_init()
fsm.If(waddr.awvalid).goto_next()
# delay
for _ in range(write_delay):
fsm.goto_next()
# wready
fsm(
wdata.wready(1)
)
fsm.goto_next()
# wdata -> mem
for i in range(int(self.datawidth / 8)):
fsm.If(wdata.wvalid, wdata.wstrb[i])(
self.mem[write_addr + i](wdata.wdata[i * 8:i * 8 + 8])
)
fsm.If(wdata.wvalid, wdata.wready)(
write_addr.add(int(self.datawidth / 8)),
write_count.dec()
)
# sleep
if sleep > 0:
fsm.If(sleep_count == sleep - 1)(
wdata.wready(0)
).Else(
wdata.wready(1)
)
# write complete
fsm.If(wdata.wvalid, wdata.wready, write_count == 1)(
wdata.wready(0)
)
fsm.Then().goto_init()
# read mode
fsm._set_index(read_mode)
# arvalid and arready
fsm.If(raddr.arvalid)(
raddr.arready(1),
read_addr(raddr.araddr),
read_count(raddr.arlen + 1)
)
fsm.Delay(1)(
raddr.arready(0)
)
fsm.If(vtypes.Not(raddr.arvalid)).goto_init()
fsm.If(raddr.arvalid).goto_next()
# delay
for _ in range(read_delay):
fsm.goto_next()
# mem -> rdata
for i in range(int(self.datawidth / 8)):
fsm.If(vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rdata[i * 8:i * 8 + 8](self.mem[read_addr + i])
)
if sleep > 0:
fsm.If(sleep_count < sleep - 1, read_count > 0,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
fsm.If(sleep_count < sleep - 1, read_count == 1,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rlast(1)
)
else:
fsm.If(read_count > 0,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rvalid(1),
read_addr.add(int(self.datawidth / 8)),
read_count.dec()
)
fsm.If(read_count == 1,
vtypes.Or(rdata.rready, vtypes.Not(rdata.rvalid)))(
rdata.rlast(1)
)
# de-assert
fsm.Delay(1)(
rdata.rvalid(0),
rdata.rlast(0)
)
# retry
fsm.If(rdata.rvalid, vtypes.Not(rdata.rready))(
rdata.rvalid(rdata.rvalid),
rdata.rdata(rdata.rdata),
rdata.rlast(rdata.rlast)
)
# read complete
fsm.If(rdata.rvalid, rdata.rready,
read_count == 0).goto_init()
def connect(self, index, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
ports = defaultdict(lambda: None, ports)
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if self.waddrs[index].awid is not None:
self.waddrs[index].awid.connect(awid if awid is not None else 0)
self.waddrs[index].awaddr.connect(awaddr)
self.waddrs[index].awlen.connect(awlen if awlen is not None else 0)
self.waddrs[index].awsize.connect(awsize if awsize is not None else
int(math.log(self.datawidth // 8)))
self.waddrs[index].awburst.connect(awburst if awburst is not None else BURST_INCR)
self.waddrs[index].awlock.connect(awlock if awlock is not None else 0)
self.waddrs[index].awcache.connect(awcache)
self.waddrs[index].awprot.connect(awprot)
self.waddrs[index].awqos.connect(awqos if awqos is not None else 0)
if self.waddrs[index].awuser is not None:
self.waddrs[index].awuser.connect(awuser if awuser is not None else 0)
self.waddrs[index].awvalid.connect(awvalid)
awready.connect(self.waddrs[index].awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdatas[index].wdata.connect(wdata)
self.wdatas[index].wstrb.connect(wstrb)
self.wdatas[index].wlast.connect(wlast if wlast is not None else 1)
if self.wdatas[index].wuser is not None:
self.wdatas[index].wuser.connect(wuser if wuser is not None else 0)
self.wdatas[index].wvalid.connect(wvalid)
wready.connect(self.wdatas[index].wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if bid is not None:
bid.connect(self.wresps[index].bid if self.wresps[index].bid is not None else 0)
bresp.connect(self.wresps[index].bresp)
if buser is not None:
buser.connect(self.wresps[index].buser if self.wresps[index].buser is not None else 0)
bvalid.connect(self.wresps[index].bvalid)
self.wresps[index].bready.connect(bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if self.raddrs[index].arid is not None:
self.raddrs[index].arid.connect(arid if arid is not None else 0)
self.raddrs[index].araddr.connect(araddr)
self.raddrs[index].arlen.connect(arlen if arlen is not None else 0)
self.raddrs[index].arsize.connect(arsize if arsize is not None else
int(math.log(self.datawidth // 8)))
self.raddrs[index].arburst.connect(arburst if arburst is not None else BURST_INCR)
self.raddrs[index].arlock.connect(arlock if arlock is not None else 0)
self.raddrs[index].arcache.connect(arcache)
self.raddrs[index].arprot.connect(arprot)
self.raddrs[index].arqos.connect(arqos if arqos is not None else 0)
if self.raddrs[index].aruser is not None:
self.raddrs[index].aruser.connect(aruser if aruser is not None else 0)
self.raddrs[index].arvalid.connect(arvalid)
arready.connect(self.raddrs[index].arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if rid is not None:
rid.connect(self.rdatas[index].rid if self.rdatas[index].rid is not None else 0)
rdata.connect(self.rdatas[index].rdata)
rresp.connect(self.rdatas[index].rresp)
if rlast is not None:
rlast.connect(self.rdatas[index].rlast)
if ruser is not None:
ruser.connect(self.rdatas[index].ruser if self.rdatas[index].ruser is not None else 0)
rvalid.connect(self.rdatas[index].rvalid)
self.rdatas[index].rready.connect(rready)
def make_memory_image(filename, length, pattern='inc', dtype=None,
datawidth=32, wordwidth=8, endian='little'):
import numpy as np
if dtype is None:
dtype = np.int64
if pattern == 'inc':
l = list(range(length))
array = np.array(l, dtype=dtype)
else:
array = np.zeros([length], dtype=dtype)
to_memory_image(filename, array,
datawidth=datawidth, wordwidth=wordwidth,
endian=endian)
def to_memory_image(filename, array, length=None,
datawidth=32, wordwidth=8, endian='little', blksize=4096):
import numpy as np
if not isinstance(array, np.ndarray):
array = np.array(array)
array = np.reshape(array, [-1])
if not isinstance(array[0], (int, np.int64, np.int32)):
raise TypeError("not supported type: '%s'" %
str(type(array[0])))
if length is not None:
if len(array) > length:
array = array[:length]
elif len(array) < length:
np.append(array, np.zeros([length - len(array)],
dtype=array.dtype))
num_hex = int(math.ceil(wordwidth / 4))
fmt = ''.join(['%0', str(num_hex), 'x\n'])
if datawidth >= wordwidth:
num = int(math.ceil(datawidth / wordwidth))
zero = np.zeros(list(array.shape) + [num], dtype=np.int64)
base = array.reshape([-1, 1])
shamt = np.arange(num, dtype=np.int64) * [wordwidth]
if endian == 'big':
shamt.reverse()
mask = np.full([1], 2 ** wordwidth - 1, dtype=np.int64)
data = (((zero + base) >> shamt) & mask).reshape([-1])
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
return len(data)
else:
num = int(math.ceil(wordwidth / datawidth))
base = array.reshape([-1, num])
shamt = np.arange(num, dtype=np.int64) * [datawidth]
if endian == 'big':
shamt.reverse()
mask = np.full([1], 2 ** datawidth - 1, dtype=np.int64)
data = (base.reshape([-1, num]) & mask) << shamt
data = np.bitwise_or.reduce(data, -1).reshape([-1])
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
return len(data)
def aligned_shape(shape, datawidth, mem_datawidth):
aligned_shape = list(shape[:])
if datawidth == mem_datawidth or datawidth > mem_datawidth:
return aligned_shape
chunk = mem_datawidth // datawidth
new_size = int(math.ceil(aligned_shape[-1] / chunk)) * chunk
aligned_shape[-1] = new_size
return aligned_shape
def shape_to_length(shape):
return functools.reduce(lambda x, y: x * y, shape, 1)
def shape_to_memory_size(shape, datawidth, mem_datawidth=None, block_size=4096):
if mem_datawidth is not None:
shape = aligned_shape(shape, datawidth, mem_datawidth)
bytes = int(math.ceil(datawidth / 8))
length = shape_to_length(shape)
return ((block_size // bytes) *
int(math.ceil(length / (block_size // bytes))))
def set_memory(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth < src_datawidth:
return _set_memory_narrow(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words)
return _set_memory_wide(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words)
def _set_memory_wide(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth > 64:
raise ValueError('not supported')
import numpy as np
if num_align_words is not None:
src = align(src, num_align_words)
num_pack = int(math.ceil(mem_datawidth / src_datawidth))
src_mask = np.full([1], 2 ** src_datawidth - 1, dtype=np.int64)
mem_mask = np.full([1], 2 ** mem_datawidth - 1, dtype=np.int64)
offset = mem_offset // int(math.ceil(mem_datawidth / 8))
if src.shape[-1] % num_pack != 0:
pads = []
for s in src.shape[:-1]:
pads.append((0, 0))
pads.append((0, num_pack - src.shape[-1]))
src = np.pad(src, pads, 'constant')
masked_data = src.astype(np.int64) & src_mask
pack = np.arange(src.shape[-1], dtype=np.int64) % [num_pack]
shift = [src_datawidth] * pack
v = (masked_data << shift) & mem_mask
v = np.reshape(v, [-1, num_pack])
v = np.bitwise_or.reduce(v, -1)
dst_size = mem[offset:offset + v.shape[-1]].size
if v.size > dst_size:
raise ValueError("""too large source data: """
"""destination size (%d) < source size (%d)""" %
(dst_size, v.size))
mem[offset:offset + v.shape[-1]] = v
def _set_memory_narrow(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth > 64:
raise ValueError('not supported')
import numpy as np
if num_align_words is not None:
src = align(src, num_align_words)
num_pack = int(math.ceil(src_datawidth / mem_datawidth))
src_mask = np.full([1], 2 ** src_datawidth - 1, dtype=np.int64)
mem_mask = np.full([1], 2 ** mem_datawidth - 1, dtype=np.int64)
offset = mem_offset // int(math.ceil(mem_datawidth / 8))
pack = np.arange(num_pack, dtype=np.int64)
shift = [mem_datawidth] * pack
dup_src_based = np.zeros(list(src.shape) + [num_pack], dtype=np.int64)
dup_src = dup_src_based + np.reshape(src, list(src.shape) + [1])
v = dup_src >> shift
v = np.reshape(v, [-1])
v = v & mem_mask
dst_size = mem[offset:offset + v.shape[-1]].size
if v.size > dst_size:
raise ValueError("""too large source data: """
"""destination size (%d) < source size (%d)""" %
(dst_size, v.size))
mem[offset:offset + v.shape[-1]] = v
def align(src, num_align_words):
if num_align_words == 1:
return src
import numpy as np
src_aligned_shape = aligned_shape(src.shape, 1, num_align_words)
ret = np.zeros(src_aligned_shape, dtype=np.int64).reshape([-1])
offset = 0
index = 0
res = num_align_words - src.shape[-1] % num_align_words
for data in src.reshape([-1]):
ret[offset] = data
offset += 1
index += 1
if index == src.shape[-1]:
index = 0
if res < num_align_words:
offset += res
return ret
def split_read_write(m, ports, prefix,
read_prefix='r_', write_prefix='w_'):
# Read (AR, R)
r_ports = {}
for name, port in ports.items():
r_name = read_prefix + port.name
if name.startswith(prefix + '_ar') or name.startswith(prefix + '_r'):
if isinstance(port, vtypes.Reg):
r_port = m.RegLike(port, name=r_name)
port.connect(r_port)
else:
r_port = m.WireLike(port, name=r_name)
r_port.connect(port)
else:
r_port = m.WireLike(port, name=r_name)
if isinstance(port, vtypes.Wire):
r_port.assign(0)
r_ports[r_name] = r_port
# Write (AW, W, B)
w_ports = {}
for name, port in ports.items():
w_name = write_prefix + port.name
if (name.startswith(prefix + '_aw') or
name.startswith(prefix + '_w') or name.startswith(prefix + '_b')):
if isinstance(port, vtypes.Reg):
w_port = m.RegLike(port, name=w_name)
port.connect(w_port)
else:
w_port = m.WireLike(port, name=w_name)
w_port.connect(port)
else:
w_port = m.WireLike(port, name=w_name)
if isinstance(port, vtypes.Wire):
w_port.assign(0)
w_ports[w_name] = w_port
return r_ports, w_ports
```
#### File: veriloggen/types/componentgen.py
```python
from __future__ import absolute_import
from __future__ import print_function
import xml.dom.minidom
import datetime
import copy
import math
import veriloggen.core.vtypes as vtypes
from veriloggen.resolver import resolver
from . import axi
PORTLIST = ('AWID', 'AWADDR', 'AWLEN', 'AWSIZE', 'AWBURST', 'AWLOCK',
'AWCACHE', 'AWPROT', 'AWQOS', 'AWUSER', 'AWVALID', 'AWREADY',
'WDATA', 'WSTRB', 'WLAST', 'WUSER', 'WVALID', 'WREADY',
'BID', 'BRESP', 'BUSER', 'BVALID', 'BREADY',
'ARID', 'ARADDR', 'ARLEN', 'ARSIZE', 'ARBURST', 'ARLOCK',
'ARCACHE', 'ARPROT', 'ARQOS', 'ARUSER', 'ARVALID', 'ARREADY',
'RID', 'RDATA', 'RRESP', 'RLAST', 'RUSER', 'RVALID', 'RREADY')
PORTLITELIST = ('AWADDR', 'AWCACHE', 'AWPROT', 'AWVALID', 'AWREADY',
'WDATA', 'WSTRB', 'WVALID', 'WREADY',
'BRESP', 'BVALID', 'BREADY',
'ARADDR', 'ARCACHE', 'ARPROT', 'ARVALID', 'ARREADY',
'RDATA', 'RRESP', 'RVALID', 'RREADY')
PORTSTREAMLIST = ('TDATA', 'TVALID', 'TREADY', 'TLAST', 'TSTRB', 'TUSER', 'TID', 'TDEST')
class ComponentGen(object):
def __init__(self):
self.m = None
self.ip_name = None
self.bus_interfaces = None
self.clk_ports = None
self.rst_ports = None
self.irq_ports = None
self.ext_ports = None
self.ext_params = None
self.vendor = 'user.org'
self.library = 'user'
self.version = '1.0'
self.doc = None
self.top = None
self.dependency_consumer = set()
def generate(self, m, ip_name, bus_interfaces,
clk_ports, rst_ports, irq_ports,
ext_ports, ext_params,
vendor='user.org', library='user', version='1.0',
description='user description',
supported_families=('zynq', 'zynquplus')):
self.m = m
self.ip_name = ip_name
self.bus_interfaces = bus_interfaces
self.clk_ports = clk_ports
self.rst_ports = rst_ports
self.irq_ports = irq_ports
self.ext_ports = ext_ports
self.ext_params = ext_params
self.resolved_m = resolver.resolve(copy.deepcopy(m))
self.vendor = vendor
self.library = library
self.version = version
if not supported_families:
raise ValueError(
'supported_families must be a list or tuple with valid names.')
self.supported_families = supported_families
impl = xml.dom.minidom.getDOMImplementation()
self.doc = impl.createDocument('spirit', 'spirit:component', None)
self.top = self.doc.documentElement
self.setAttribute(self.top, 'xmlns:xilinx', "http://www.xilinx.com")
self.setAttribute(self.top, 'xmlns:spirit',
"http://www.spiritconsortium.org/XMLSchema/SPIRIT/1685-2009")
self.setAttribute(self.top, 'xmlns:xsi',
"http://www.w3.org/2001/XMLSchema-instance")
self.dependency_consumer = set()
self.top.appendChild(self.mkVendor())
self.top.appendChild(self.mkLibrary())
self.top.appendChild(self.mkName(self.ip_name))
self.top.appendChild(self.mkVersion())
self.top.appendChild(self.mkBusInterfaces())
r = self.mkAddressSpaces()
if r:
self.top.appendChild(r)
r = self.mkMemoryMaps()
if r:
self.top.appendChild(r)
self.top.appendChild(self.mkModel())
self.top.appendChild(self.mkFileSets())
self.top.appendChild(self.mkDescription(description))
self.top.appendChild(self.mkParameters())
self.top.appendChild(self.mkVendorExtensions())
return self.doc.toprettyxml(indent=' ')
def setAttribute(self, obj, name, text):
obj.setAttribute(name, str(text))
def setText(self, obj, text):
textobj = self.doc.createTextNode(str(text))
obj.appendChild(textobj)
def mkVendor(self):
vendor = self.doc.createElement('spirit:vendor')
self.setText(vendor, self.vendor)
return vendor
def mkLibrary(self):
library = self.doc.createElement('spirit:library')
self.setText(library, self.library)
return library
def mkVersion(self):
version = self.doc.createElement('spirit:version')
self.setText(version, str(self.version))
return version
def mkName(self, v):
name = self.doc.createElement('spirit:name')
self.setText(name, v)
return name
def mkTextNode(self, n, v):
name = self.doc.createElement(n)
self.setText(name, v)
return name
def mkBusInterfaces(self):
bus = self.doc.createElement('spirit:busInterfaces')
for bus_interface in self.bus_interfaces:
bus.appendChild(self.mkBusInterface(bus_interface))
for rst_name, rst_polarity in self.rst_ports.items():
bus.appendChild(self.mkBusInterfaceReset(rst_name, rst_polarity))
for clk_name, assoc_rsts in self.clk_ports.items():
bus.appendChild(self.mkBusInterfaceClock(clk_name, assoc_rsts))
for irq_name, sensitivity in self.irq_ports.items():
bus.appendChild(self.mkBusInterfaceInterrupt(irq_name, sensitivity))
return bus
def mkBusInterface(self, obj):
name = obj.name
datawidth = obj.datawidth
interface = self.doc.createElement('spirit:busInterface')
interface.appendChild(self.mkName(name))
if is_master(obj):
interface.appendChild(self.mkBusTypeMemory())
interface.appendChild(self.mkAbstractionTypeMemory())
interface.appendChild(self.mkMaster(name))
interface.appendChild(self.mkPortMapsMemory(obj))
elif is_slave(obj):
interface.appendChild(self.mkBusTypeMemory())
interface.appendChild(self.mkAbstractionTypeMemory())
interface.appendChild(self.mkSlave(name))
interface.appendChild(self.mkPortMapsMemory(obj))
elif is_streamin(obj):
interface.appendChild(self.mkBusTypeStream())
interface.appendChild(self.mkAbstractionTypeStream())
interface.appendChild(self.mkStreamIn(name))
interface.appendChild(self.mkPortMapsStream(obj))
elif is_streamout(obj):
interface.appendChild(self.mkBusTypeStream())
interface.appendChild(self.mkAbstractionTypeStream())
interface.appendChild(self.mkStreamOut(name))
interface.appendChild(self.mkPortMapsStream(obj))
else:
raise TypeError("Unsupported type: '%s'" % str(type(obj)))
return interface
def mkBusTypeMemory(self):
bustype = self.doc.createElement('spirit:busType')
self.setAttribute(bustype, 'spirit:vendor', "xilinx.com")
self.setAttribute(bustype, 'spirit:library', "interface")
self.setAttribute(bustype, 'spirit:name', "aximm")
self.setAttribute(bustype, 'spirit:version', "1.0")
return bustype
def mkAbstractionTypeMemory(self):
abstractiontype = self.doc.createElement('spirit:abstractionType')
self.setAttribute(abstractiontype, 'spirit:vendor', "xilinx.com")
self.setAttribute(abstractiontype, 'spirit:library', "interface")
self.setAttribute(abstractiontype, 'spirit:name', "aximm_rtl")
self.setAttribute(abstractiontype, 'spirit:version', "1.0")
return abstractiontype
def mkBusTypeStream(self):
bustype = self.doc.createElement('spirit:busType')
self.setAttribute(bustype, 'spirit:vendor', "xilinx.com")
self.setAttribute(bustype, 'spirit:library', "interface")
self.setAttribute(bustype, 'spirit:name', "axis")
self.setAttribute(bustype, 'spirit:version', "1.0")
return bustype
def mkAbstractionTypeStream(self):
abstractiontype = self.doc.createElement('spirit:abstractionType')
self.setAttribute(abstractiontype, 'spirit:vendor', "xilinx.com")
self.setAttribute(abstractiontype, 'spirit:library', "interface")
self.setAttribute(abstractiontype, 'spirit:name', "axis_rtl")
self.setAttribute(abstractiontype, 'spirit:version', "1.0")
return abstractiontype
def mkMaster(self, name):
master = self.doc.createElement('spirit:master')
addressspaceref = self.doc.createElement('spirit:addressSpaceRef')
self.setAttribute(addressspaceref, 'spirit:addressSpaceRef', name)
master.appendChild(addressspaceref)
return master
def mkSlave(self, name):
slave = self.doc.createElement('spirit:slave')
memorymapref = self.doc.createElement('spirit:memoryMapRef')
self.setAttribute(memorymapref, 'spirit:memoryMapRef', name)
slave.appendChild(memorymapref)
return slave
def mkStreamIn(self, name):
streamin = self.doc.createElement('spirit:slave')
return streamin
def mkStreamOut(self, name):
streamout = self.doc.createElement('spirit:master')
return streamout
def mkPortMapsMemory(self, obj):
lite = is_lite(obj)
portmaps = self.doc.createElement('spirit:portMaps')
portlist = list(PORTLITELIST if lite else PORTLIST)
if not lite and obj.waddr.awid is None:
portlist.remove('AWID')
if not lite and obj.waddr.awuser is None:
portlist.remove('AWUSER')
if not lite and obj.wdata.wuser is None:
portlist.remove('WUSER')
if not lite and obj.wresp.bid is None:
portlist.remove('BID')
if not lite and obj.wresp.buser is None:
portlist.remove('BUSER')
if not lite and obj.raddr.arid is None:
portlist.remove('ARID')
if not lite and obj.raddr.aruser is None:
portlist.remove('ARUSER')
if not lite and obj.rdata.rid is None:
portlist.remove('RID')
if not lite and obj.rdata.ruser is None:
portlist.remove('RUSER')
for port in portlist:
portmaps.appendChild(self.mkPortMapMemory(obj, port))
return portmaps
def mkPortMapsStream(self, obj):
portmaps = self.doc.createElement('spirit:portMaps')
portlist = list(PORTSTREAMLIST)
if obj.tdata.tlast is None:
portlist.remove('TLAST')
if obj.tdata.tstrb is None:
portlist.remove('TSTRB')
if obj.tdata.tuser is None:
portlist.remove('TUSER')
if obj.tdata.tid is None:
portlist.remove('TID')
if obj.tdata.tdest is None:
portlist.remove('TDEST')
for port in portlist:
portmaps.appendChild(self.mkPortMapStream(obj, port))
return portmaps
def mkPortMapMemory(self, obj, attr):
portmap = self.doc.createElement('spirit:portMap')
portmap.appendChild(self.mkLogicalPort(attr))
portmap.appendChild(self.mkPhysicalPortMemory(obj, attr))
return portmap
def mkPortMapStream(self, obj, attr):
portmap = self.doc.createElement('spirit:portMap')
portmap.appendChild(self.mkLogicalPort(attr))
portmap.appendChild(self.mkPhysicalPortStream(obj, attr))
return portmap
def mkLogicalPort(self, attr):
logicalport = self.doc.createElement('spirit:logicalPort')
logicalport.appendChild(self.mkName(attr))
return logicalport
def mkPhysicalPortMemory(self, obj, attr):
if hasattr(obj.waddr, attr.lower()):
name = getattr(obj.waddr, attr.lower()).name
elif hasattr(obj.wdata, attr.lower()):
name = getattr(obj.wdata, attr.lower()).name
elif hasattr(obj.wresp, attr.lower()):
name = getattr(obj.wresp, attr.lower()).name
elif hasattr(obj.raddr, attr.lower()):
name = getattr(obj.raddr, attr.lower()).name
elif hasattr(obj.rdata, attr.lower()):
name = getattr(obj.rdata, attr.lower()).name
else:
raise NameError("No such attribute '%s' in object '%s'" %
(attr.lower(), obj))
physicalport = self.doc.createElement('spirit:physicalPort')
physicalport.appendChild(self.mkName(name))
return physicalport
def mkPhysicalPortStream(self, obj, attr):
if hasattr(obj.tdata, attr.lower()):
name = getattr(obj.tdata, attr.lower()).name
else:
raise NameError("No such attribute '%s' in object '%s'" %
(attr.lower(), obj))
physicalport = self.doc.createElement('spirit:physicalPort')
physicalport.appendChild(self.mkName(name))
return physicalport
def mkBusInterfaceClock(self, name, rsts):
interface = self.doc.createElement('spirit:busInterface')
interface.appendChild(self.mkName(name))
interface.appendChild(self.mkBusTypeClock())
interface.appendChild(self.mkAbstractionTypeClock())
interface.appendChild(self.mkSlaveClock())
interface.appendChild(self.mkPortMapsClock(name))
interface.appendChild(self.mkBusParametersClock(name, rsts))
return interface
def mkBusTypeClock(self):
bustype = self.doc.createElement('spirit:busType')
self.setAttribute(bustype, 'spirit:vendor', "xilinx.com")
self.setAttribute(bustype, 'spirit:library', "signal")
self.setAttribute(bustype, 'spirit:name', "clock")
self.setAttribute(bustype, 'spirit:version', "1.0")
return bustype
def mkAbstractionTypeClock(self):
abstractiontype = self.doc.createElement('spirit:abstractionType')
self.setAttribute(abstractiontype, 'spirit:vendor', "xilinx.com")
self.setAttribute(abstractiontype, 'spirit:library', "signal")
self.setAttribute(abstractiontype, 'spirit:name', "clock_rtl")
self.setAttribute(abstractiontype, 'spirit:version', "1.0")
return abstractiontype
def mkSlaveClock(self):
slave = self.doc.createElement('spirit:slave')
return slave
def mkPortMapsClock(self, name):
portmaps = self.doc.createElement('spirit:portMaps')
portmaps.appendChild(self.mkPortMapClock(name))
return portmaps
def mkPortMapClock(self, name):
portmap = self.doc.createElement('spirit:portMap')
portmap.appendChild(self.mkLogicalPort('CLK'))
portmap.appendChild(self.mkPhysicalPortClock(name))
return portmap
def mkPhysicalPortClock(self, name):
physicalport = self.doc.createElement('spirit:physicalPort')
physicalport.appendChild(self.mkName(name))
return physicalport
def mkBusParametersClock(self, name, rsts):
parameters = self.doc.createElement('spirit:parameters')
parameters.appendChild(self.mkBusParameterAssocBusIf(name))
parameters.appendChild(self.mkBusParameterAssocReset(name, rsts))
return parameters
def mkBusParameterAssocBusIf(self, name):
parameter = self.doc.createElement('spirit:parameter')
parameter.appendChild(self.mkName('ASSOCIATED_BUSIF'))
value = self.doc.createElement('spirit:value')
bus_name_list = []
for bus_interface in self.bus_interfaces:
if (isinstance(bus_interface.clk, vtypes._Variable) and
bus_interface.clk.module.is_input(bus_interface.clk.name) and
bus_interface.clk.name == name):
bus_name_list.append(bus_interface.name)
bus_names = ':'.join(bus_name_list)
self.setAttribute(value, 'spirit:id', "BUSIFPARAM_VALUE."
+ name + ".ASSOCIATED_BUSIF")
self.setText(value, bus_names)
parameter.appendChild(value)
return parameter
def mkBusParameterAssocReset(self, name, rsts):
parameter = self.doc.createElement('spirit:parameter')
parameter.appendChild(self.mkName('ASSOCIATED_RESET'))
value = self.doc.createElement('spirit:value')
rst_names = ':'.join(rsts)
self.setAttribute(value, 'spirit:id', "BUSIFPARAM_VALUE."
+ name + ".ASSOCIATED_RESET")
self.setText(value, rst_names)
parameter.appendChild(value)
return parameter
def mkBusInterfaceReset(self, name, polarity):
interface = self.doc.createElement('spirit:busInterface')
interface.appendChild(self.mkName(name))
interface.appendChild(self.mkBusTypeReset())
interface.appendChild(self.mkAbstractionTypeReset())
interface.appendChild(self.mkSlaveReset())
interface.appendChild(self.mkPortMapsReset(name))
interface.appendChild(self.mkBusParametersReset(name, polarity))
return interface
def mkBusTypeReset(self):
bustype = self.doc.createElement('spirit:busType')
self.setAttribute(bustype, 'spirit:vendor', "xilinx.com")
self.setAttribute(bustype, 'spirit:library', "signal")
self.setAttribute(bustype, 'spirit:name', "reset")
self.setAttribute(bustype, 'spirit:version', "1.0")
return bustype
def mkAbstractionTypeReset(self):
abstractiontype = self.doc.createElement('spirit:abstractionType')
self.setAttribute(abstractiontype, 'spirit:vendor', "xilinx.com")
self.setAttribute(abstractiontype, 'spirit:library', "signal")
self.setAttribute(abstractiontype, 'spirit:name', "reset_rtl")
self.setAttribute(abstractiontype, 'spirit:version', "1.0")
return abstractiontype
def mkSlaveReset(self):
slave = self.doc.createElement('spirit:slave')
return slave
def mkPortMapsReset(self, name):
portmaps = self.doc.createElement('spirit:portMaps')
portmaps.appendChild(self.mkPortMapReset(name))
return portmaps
def mkPortMapReset(self, name):
portmap = self.doc.createElement('spirit:portMap')
portmap.appendChild(self.mkLogicalPort('RST'))
portmap.appendChild(self.mkPhysicalPortReset(name))
return portmap
def mkPhysicalPortReset(self, name):
physicalport = self.doc.createElement('spirit:physicalPort')
physicalport.appendChild(self.mkName(name))
return physicalport
def mkBusParametersReset(self, name, polarity):
parameters = self.doc.createElement('spirit:parameters')
parameters.appendChild(self.mkBusParameterPolarity(name, polarity))
return parameters
def mkBusParameterPolarity(self, name, polarity):
parameter = self.doc.createElement('spirit:parameter')
parameter.appendChild(self.mkName('POLARITY'))
value = self.doc.createElement('spirit:value')
self.setAttribute(value, 'spirit:id', "BUSIFPARAM_VALUE."
+ name + ".POLARITY")
self.setText(value, polarity)
parameter.appendChild(value)
return parameter
def mkBusInterfaceInterrupt(self, name, sensitivity):
interface = self.doc.createElement('spirit:busInterface')
interface.appendChild(self.mkName(name))
interface.appendChild(self.mkBusTypeInterrupt())
interface.appendChild(self.mkAbstractionTypeInterrupt())
interface.appendChild(self.mkMasterInterrupt())
interface.appendChild(self.mkPortMapsInterrupt(name))
interface.appendChild(self.mkBusParametersInterrupt(name, sensitivity))
return interface
def mkBusTypeInterrupt(self):
bustype = self.doc.createElement('spirit:busType')
self.setAttribute(bustype, 'spirit:vendor', "xilinx.com")
self.setAttribute(bustype, 'spirit:library', "signal")
self.setAttribute(bustype, 'spirit:name', "interrupt")
self.setAttribute(bustype, 'spirit:version', "1.0")
return bustype
def mkAbstractionTypeInterrupt(self):
abstractiontype = self.doc.createElement('spirit:abstractionType')
self.setAttribute(abstractiontype, 'spirit:vendor', "xilinx.com")
self.setAttribute(abstractiontype, 'spirit:library', "signal")
self.setAttribute(abstractiontype, 'spirit:name', "interrupt_rtl")
self.setAttribute(abstractiontype, 'spirit:version', "1.0")
return abstractiontype
def mkMasterInterrupt(self):
master = self.doc.createElement('spirit:master')
return master
def mkPortMapsInterrupt(self, name):
portmaps = self.doc.createElement('spirit:portMaps')
portmaps.appendChild(self.mkPortMapInterrupt(name))
return portmaps
def mkPortMapInterrupt(self, name):
portmap = self.doc.createElement('spirit:portMap')
portmap.appendChild(self.mkLogicalPort('INTERRUPT'))
portmap.appendChild(self.mkPhysicalPortInterrupt(name))
return portmap
def mkPhysicalPortInterrupt(self, name):
physicalport = self.doc.createElement('spirit:physicalPort')
physicalport.appendChild(self.mkName(name))
return physicalport
def mkBusParametersInterrupt(self, name, sensitivity):
parameters = self.doc.createElement('spirit:parameters')
parameters.appendChild(self.mkBusParameterSensitivity(name, sensitivity))
return parameters
def mkBusParameterSensitivity(self, name, sensitivity):
parameter = self.doc.createElement('spirit:parameter')
parameter.appendChild(self.mkName('SENSITIVITY'))
value = self.doc.createElement('spirit:value')
self.setAttribute(value, 'spirit:id', "BUSIFPARAM_VALUE."
+ name.upper() + ".SENSITIVITY")
self.setText(value, sensitivity)
parameter.appendChild(value)
return parameter
def mkAddressSpaces(self):
isempty = True
spaces = self.doc.createElement('spirit:addressSpaces')
for bus_interface in self.bus_interfaces:
if isinstance(bus_interface, (axi.AxiLiteMaster, axi.AxiMaster)):
spaces.appendChild(self.mkAddressSpace(bus_interface))
isempty = False
if isempty:
return None
return spaces
def mkAddressSpace(self, obj):
name = obj.name
space = self.doc.createElement('spirit:addressSpace')
space.appendChild(self.mkName(name))
range = self.doc.createElement('spirit:range')
self.setAttribute(range, 'spirit:format', "long")
range_value = 2 ** self.resolved_m[obj.name + '_awaddr'].width
self.setText(range, range_value)
space.appendChild(range)
width = self.doc.createElement('spirit:width')
self.setAttribute(width, 'spirit:format', "long")
width_value = self.resolved_m[obj.name + '_wdata'].width
self.setText(width, width_value)
space.appendChild(width)
return space
def mkMemoryMaps(self):
isempty = True
maps = self.doc.createElement('spirit:memoryMaps')
for bus_interface in self.bus_interfaces:
if isinstance(bus_interface, (axi.AxiLiteSlave, axi.AxiSlave)):
maps.appendChild(self.mkMemoryMap(bus_interface))
isempty = False
if isempty:
return None
return maps
def mkMemoryMap(self, obj):
name = obj.name
map = self.doc.createElement('spirit:memoryMap')
map.appendChild(self.mkName(name))
addressblock = self.doc.createElement('spirit:addressBlock')
addressblock.appendChild(self.mkName(name + '_reg'))
baseaddr = self.doc.createElement('spirit:baseAddress')
self.setAttribute(baseaddr, 'spirit:format', "long")
self.setText(baseaddr, 0)
addressblock.appendChild(baseaddr)
range = self.doc.createElement('spirit:range')
self.setAttribute(range, 'spirit:format', "long")
if hasattr(obj, 'register') and isinstance(obj.register, (tuple, list)):
map_range = 2 ** int(math.ceil(math.log(max(len(obj.register), 4096), 2)))
else:
map_range = 2 ** self.resolved_m[obj.name + '_awaddr'].width
self.setText(range, map_range)
addressblock.appendChild(range)
width = self.doc.createElement('spirit:width')
self.setAttribute(width, 'spirit:format', "long")
self.setText(width, obj.datawidth)
addressblock.appendChild(width)
usage = self.doc.createElement('spirit:usage')
self.setText(usage, 'register')
addressblock.appendChild(usage)
map.appendChild(addressblock)
return map
def mkModel(self):
model = self.doc.createElement('spirit:model')
model.appendChild(self.mkViews())
model.appendChild(self.mkPorts())
return model
def mkViews(self):
views = self.doc.createElement('spirit:views')
views.appendChild(self.mkView('xilinx_verilogsynthesis',
'Verilog Synthesis',
'verilogSource:vivado.xilinx.com:synthesis',
'verilog',
self.ip_name,
'xilinx_verilogsynthesis_view_fileset'))
views.appendChild(self.mkView('xilinx_verilogbehavioralsimulation',
'Verilog Simulation',
'verilogSource:vivado.xilinx.com:simulation',
'verilog',
self.ip_name,
'xilinx_verilogbehavioralsimulation_view_fileset'))
views.appendChild(self.mkView('xilinx_xpgui',
'UI Layout',
':vivado.xilinx.com:xgui.ui',
None,
None,
'xilinx_xpgui_view_fileset'))
return views
def mkView(self, name, displayname, envidentifier, language, modelname, localname):
view = self.doc.createElement('spirit:view')
view.appendChild(self.mkName(name))
view.appendChild(self.mkTextNode('spirit:displayName', displayname))
view.appendChild(self.mkTextNode('spirit:envIdentifier', envidentifier))
if language is not None:
view.appendChild(self.mkTextNode('spirit:language', language))
if modelname is not None:
view.appendChild(self.mkTextNode('spirit:modelName', modelname))
filesetref = self.doc.createElement('spirit:fileSetRef')
filesetref.appendChild(self.mkTextNode('spirit:localName', localname))
view.appendChild(filesetref)
return view
def mkPorts(self):
ports = self.doc.createElement('spirit:ports')
for portname, port in self.ext_ports.items():
ports.appendChild(self.mkPortSignal(port))
return ports
def mkPortSignal(self, var):
name = var.name
direction = ('in' if isinstance(var, vtypes.Input) else
'out' if isinstance(var, vtypes.Output) else
'inout')
width = self.resolved_m[name].width
h = width - 1 if width is not None else None
l = 0 if h is not None else None
return self.mkPortEntry(name, direction,
None, h, None, l)
def mkPortEntry(self, name, direction,
lvar, lvalue, rvar, rvalue,
withdriver=False,
extensionvar=None, extensionvalue='true'):
port = self.doc.createElement('spirit:port')
port.appendChild(self.mkName(name))
port.appendChild(self.mkWire(direction, lvar, lvalue, rvar, rvalue, withdriver))
if extensionvar is not None:
port.appendChild(
self.mkPortVendorExtensions(name, extensionvar, extensionvalue))
return port
def mkWire(self, direction, lvar, lvalue, rvar, rvalue, withdriver=False):
wire = self.doc.createElement('spirit:wire')
wire.appendChild(self.mkDirection(direction))
if not (lvalue is None and rvalue is None):
wire.appendChild(self.mkVector(lvar, lvalue, rvar, rvalue))
wire.appendChild(self.mkWireTypeDefs('wire'))
if withdriver:
wire.appendChild(self.mkDriver())
return wire
def mkDirection(self, direction):
return self.mkTextNode('spirit:direction', direction)
def mkVector(self, lvar, lvalue, rvar, rvalue):
vector = self.doc.createElement('spirit:vector')
left = self.doc.createElement('spirit:left')
self.setAttribute(left, 'spirit:format', "long")
if lvar is not None:
self.setAttribute(left, 'spirit:resolve', 'dependent')
self.setAttribute(left, 'spirit:dependency', lvar)
self.setText(left, lvalue)
vector.appendChild(left)
right = self.doc.createElement('spirit:right')
self.setAttribute(right, 'spirit:format', "long")
if rvar is not None:
self.setAttribute(right, 'spirit:resolve', 'dependent')
self.setAttribute(right, 'spirit:dependency', rvar)
self.setText(right, rvalue)
vector.appendChild(right)
return vector
def mkWireTypeDefs(self, wiretype):
wiretypedefs = self.doc.createElement('spirit:wireTypeDefs')
wiretypedefs.appendChild(self.mkWireTypeDef(wiretype))
return wiretypedefs
def mkWireTypeDef(self, wiretype):
wiretypedef = self.doc.createElement('spirit:wireTypeDef')
wiretypedef.appendChild(self.mkTextNode('spirit:typeName', wiretype))
wiretypedef.appendChild(
self.mkTextNode('spirit:viewNameRef', 'xilinx_verilogsynthesis'))
wiretypedef.appendChild(
self.mkTextNode('spirit:viewNameRef', 'xilinx_verilogbehavioralsimulation'))
return wiretypedef
def mkDriver(self):
driver = self.doc.createElement('spirit:driver')
driver.appendChild(self.mkTextNode('spirit:defaultValue', 0))
return driver
def mkPortVendorExtensions(self, name, var, value='true'):
extensions = self.doc.createElement('spirit:vendorExtensions')
portinfo = self.doc.createElement('xilinx:portInfo')
enablement = self.doc.createElement('xilinx:enablement')
isEnabled = self.doc.createElement('xilinx:isEnabled')
self.setAttribute(isEnabled, 'xilinx:resolve', "dependent")
self.setAttribute(isEnabled, 'xilinx:id', 'PORT_ENABLEMENT.' + name)
self.setAttribute(isEnabled, 'xilinx:dependency',
"spirit:decode(id('MODELPARAM_VALUE." + var.name + "')) >0")
self.setText(isEnabled, value)
enablement.appendChild(isEnabled)
portinfo.appendChild(enablement)
extensions.appendChild(portinfo)
return extensions
def mkFileSets(self):
filesets = self.doc.createElement('spirit:fileSets')
source = self.doc.createElement('spirit:fileSet')
source.appendChild(self.mkName("xilinx_verilogsynthesis_view_fileset"))
source.appendChild(self.mkFileSet('hdl/' + self.ip_name + '.v',
'verilogSource'))
filesets.appendChild(source)
sim = self.doc.createElement('spirit:fileSet')
sim.appendChild(self.mkName(
"xilinx_verilogbehavioralsimulation_view_fileset"))
sim.appendChild(self.mkFileSet('hdl/' + self.ip_name + '.v',
'verilogSource'))
filesets.appendChild(sim)
xguitcl = self.doc.createElement('spirit:fileSet')
xguitcl.appendChild(self.mkName("xilinx_xpgui_view_fileset"))
xguitcl.appendChild(self.mkFileSet('xgui/xgui.tcl',
'tclSource', 'XGUI_VERSION_2'))
filesets.appendChild(xguitcl)
return filesets
def mkFileSet(self, name, filetype=None, *userfiletypes):
fileset = self.doc.createElement('spirit:file')
fileset.appendChild(self.mkName(name))
if filetype is not None:
fileset.appendChild(self.mkTextNode('spirit:fileType', filetype))
for u in userfiletypes:
fileset.appendChild(self.mkTextNode('spirit:userFileType', u))
return fileset
def mkDescription(self, description):
return self.mkTextNode('spirit:description', description)
def mkParameters(self):
parameters = self.doc.createElement('spirit:parameters')
compname = self.doc.createElement('spirit:parameter')
compname.appendChild(self.mkName('Component_Name'))
value = self.doc.createElement('spirit:value')
self.setAttribute(value, 'spirit:resolve', 'user')
self.setAttribute(value, 'spirit:id',
"PARAM_VALUE." + 'Component_Name')
self.setAttribute(value, 'spirit:order', 1)
self.setText(value, self.ip_name + '_v' + self.version.replace('.', '_'))
compname.appendChild(value)
parameters.appendChild(compname)
return parameters
def mkVendorExtensions(self):
extensions = self.doc.createElement('spirit:vendorExtensions')
extensions.appendChild(self.mkCoreExtensions())
packageinfo = self.doc.createElement('xilinx:packagingInfo')
packageinfo.appendChild(self.mkTextNode(
'xilinx:xilinxVersion', '2018.3'))
extensions.appendChild(packageinfo)
return extensions
def mkCoreExtensions(self):
coreextensions = self.doc.createElement('xilinx:coreExtensions')
supported = self.doc.createElement('xilinx:supportedFamilies')
for name in self.supported_families:
family = self.doc.createElement('xilinx:family')
self.setAttribute(family, 'xilinx:lifeCycle', 'Production')
self.setText(family, name)
supported.appendChild(family)
coreextensions.appendChild(supported)
taxonomies = self.doc.createElement('xilinx:taxonomies')
taxonomies.appendChild(self.mkTextNode('xilinx:taxonomy', '/UserIP'))
coreextensions.appendChild(taxonomies)
coreextensions.appendChild(
self.mkTextNode('xilinx:displayName',
(self.ip_name + '_v' + self.version.replace('.', '_'))))
coreextensions.appendChild(self.mkTextNode('xilinx:coreRevision', 1))
now = datetime.datetime.now()
dt = now.strftime("%Y-%m-%dT%H:%M:%SZ") # '2015-03-08T02:16:15Z'
coreextensions.appendChild(self.mkTextNode('xilinx:coreCreationDateTime', dt))
return coreextensions
def is_master(obj):
return isinstance(obj, (axi.AxiMaster, axi.AxiLiteMaster))
def is_slave(obj):
return isinstance(obj, (axi.AxiSlave, axi.AxiLiteSlave))
def is_streamin(obj):
return isinstance(obj, axi.AxiStreamIn)
def is_streamout(obj):
return isinstance(obj, axi.AxiStreamOut)
def is_lite(obj):
return isinstance(obj, (axi.AxiLiteMaster, axi.AxiLiteSlave))
```
#### File: veriloggen/types/rom.py
```python
from __future__ import absolute_import
from __future__ import print_function
import math
import veriloggen.core.vtypes as vtypes
from veriloggen.core.module import Module
from . import util
def mkROMDefinition(name, values, size, datawidth, sync=False, with_enable=False):
if not sync and with_enable:
raise ValueError('Async ROM cannot have enable signals')
m = Module(name)
clk = m.Input('CLK') if sync else None
addr = m.Input('addr', size)
if with_enable:
enable = m.Input('enable')
val = m.OutputReg('val', datawidth)
if clk is not None:
alw = m.Always(vtypes.Posedge(clk))
else:
alw = m.Always()
patterns = [vtypes.When(i)(val(v, blk=not sync))
for i, v in enumerate(values)]
body = vtypes.Case(addr)(*patterns)
if with_enable:
body = vtypes.If(enable)(body)
alw(
body
)
return m
class _ROM_RTL(object):
def __init__(self, m, name, clk, addr, values, enable=None, datawidth=None):
self.m = m
self.name = name
self.clk = clk
size = int(math.ceil(math.log(len(values), 2)))
self.addr = self.m.Wire(name + '_addr', size)
self.m.Assign(self.addr(addr))
if datawidth is None:
datawidth = 1
for v in values:
w = vtypes.get_width(v)
if w is not None and w > datawidth:
datawidth = w
with_enable = enable is not None
if enable is not None:
self.enable = self.m.Wire(name + '_enable')
self.m.Assign(self.enable(enable))
self.rdata = self.m.Wire(name + '_val', datawidth)
sync = True if clk is not None else False
rom_def = mkROMDefinition(
name, values, size, datawidth, sync, with_enable)
ports = []
if clk is not None:
ports.append(self.clk)
ports.append(self.addr)
if enable is not None:
ports.append(self.enable)
ports.append(self.rdata)
self.m.Instance(rom_def, name, params=(), ports=ports)
class SyncROM(_ROM_RTL):
def __init__(self, m, name, clk, addr, values, enable=None, datawidth=None):
_ROM_RTL.__init__(self, m, name, clk, addr, values,
enable=enable, datawidth=datawidth)
class AsyncROM(_ROM_RTL):
def __init__(self, m, name, addr, values, enable=None, datawidth=None):
_ROM_RTL.__init__(self, m, name, None, addr, values,
enable=enable, datawidth=datawidth)
```
|
{
"source": "jessecoding/djangoproject",
"score": 2
}
|
#### File: myproject/boards/views.py
```python
from django.shortcuts import render
from boards.models import Board
def home(request):
boards = Board.objects.all()
return render(request, 'home.html', {'boards': boards})
```
|
{
"source": "JesseCorrington/CryptoHypeTrader",
"score": 3
}
|
#### File: CryptoHypeTrader/backtest/strategies.py
```python
from random import randint
import pandas as pd
from backtest.engine import Strategy, Signal
# Simple buy and hold strategy, as a control to compare other strategies against
class BuyAndHoldStrategy(Strategy):
def __init__(self, coin_ids, buy_date, sell_date):
self.coin_ids = coin_ids
self.buy_date = buy_date
self.sell_date = sell_date
self.starting_cash = None
self.hold_count = 0
def generate_signals(self, coin_id, df):
if self.coin_ids == "all" or coin_id in self.coin_ids:
buy_date = max(self.buy_date, df.index.min())
sell_date = min(self.sell_date, df.index.max())
signals = pd.DataFrame(index=df.index)
signals["signals"] = Signal.NONE
signals.loc[buy_date : sell_date, "signals"] = Signal.HOLD
signals.loc[buy_date, "signals"] = Signal.BUY
signals.loc[sell_date, "signals"] = Signal.SELL
self.hold_count += 1
return signals
def allocation(self, available_cash):
if not self.starting_cash:
self.starting_cash = available_cash
return self.starting_cash / self.hold_count
# Trading strategy that buys and sells based on how quickly a subreddit growing compared to price
class RedditGrowthStrategy(Strategy):
def __init__(self, min_market_cap, sub_growth_threshold):
self.min_market_cap = min_market_cap
self.sub_growth_threshold = sub_growth_threshold
def generate_signals(self, coin_id, df):
signals = pd.DataFrame(index=df.index)
df_change = df.pct_change(1)
holding = False
buy_price = 1
for index, row in df_change.iterrows():
market_cap = df.loc[index, "market_cap"]
current_price = df.loc[index, "close"]
subs_added = df.loc[index, "reddit_subs"] * row["reddit_subs"]
profit_percent = (current_price - buy_price) / buy_price
if not holding and row["reddit_subs"] > self.sub_growth_threshold and \
market_cap > self.min_market_cap and subs_added > 50:
signals.loc[index, "signals"] = Signal.BUY
holding = True
buy_price = current_price
elif holding and (row["reddit_subs"] < self.sub_growth_threshold or profit_percent > 5):
signals.loc[index, "signals"] = Signal.SELL
holding = False
return signals
def allocation(self, current_cash):
size = current_cash / 4
size = min(size, 10000)
return size
# Buys and sells randomly, as a control to test other strategies against
class RandomStrategy(Strategy):
def generate_signals(self, coin_id, df):
holding = False
signals = pd.DataFrame(index=df.index)
for index, row in df.iterrows():
if not holding and randint(1, 20) == 1:
signals.loc[index, "signals"] = Signal.BUY
holding = True
elif holding and randint(1, 20) == 1:
signals.loc[index, "signals"] = Signal.SELL
holding = False
return signals
def allocation(self, current_cash):
size = current_cash / 8
size = min(size, 10000)
return size
```
#### File: CryptoHypeTrader/ingestion/analysis.py
```python
import pymongo
from datetime import datetime, timedelta
from common import database as db
# Calculates statistics on the data over various time ranges
def dict_access(d, key):
"""Access a key in a dict using . notation (key.subkey1.subkey2...subkeyn)
returns None if the key path does not exist in the object"""
current = d
for subkey in key.split("."):
try:
current = current[subkey]
except KeyError:
return None
return current
def growth(records, field, from_date, to_date):
"""Calculates the growth (absolute and percent) on a field over a time range
requires that the records are sorted in descending order"""
if to_date < from_date:
raise ValueError("Invaid date range: from_date must be > to_date")
records = [x for x in records
if from_date <= x["date"] <= to_date]
if len(records) < 2:
return 0, 0
try:
end = dict_access(records[0], field)
start = dict_access(records[-1], field)
except KeyError:
return 0, 0
if end is None or start is None:
return 0, 0
growth_amount = end - start
# prevent division by zero
if start == 0:
start = 1
growth_percent = growth_amount / start
return growth_amount, growth_percent
def growth_stats(coin, stats, key, end_time):
"""Calculates growth on a field over the following time ranges"""
time_ranges = {
"h12": timedelta(hours=12),
"d1": timedelta(days=1),
"d3": timedelta(days=3),
"d5": timedelta(days=5),
"d7": timedelta(days=7)
}
entries = [x for x in stats if x["coin_id"] == coin["_id"]]
if len(entries) == 0:
return None
coin_stats = {}
for name, time_range in time_ranges.items():
from_date = end_time - time_range
amount, percent = growth(entries, key, from_date, end_time)
coin_stats[name] = amount
coin_stats[name + "_pct"] = percent
return coin_stats
def social_growth():
"""Calculates growth of social stats over various time ranges"""
now = datetime.utcnow()
oldest = now - timedelta(days=8)
all_stats = []
coins = list(db.mongo_db.coins.find())
def get_stats(collection, coin_id):
return list(db.mongo_db[collection].find({"coin_id": coin_id,
"date": {"$gte": oldest}}).sort('date', pymongo.DESCENDING))
stats_to_calc = [
("price", "prices", "price"),
("volume", "prices", "volume"),
("reddit", "reddit_stats", "subscribers"),
("twitter", "twitter_comments", "count"),
("code_points", "cryptocompare_stats", "code_repo.points"),
("facebook_points", "cryptocompare_stats", "facebook.points"),
("twitter_followers", "cryptocompare_stats", "twitter.followers")
]
for coin in coins:
cid = coin["_id"]
coin_stats = {"coin_id": cid}
all_stats.append(coin_stats)
for name, collection, key in stats_to_calc:
entries = get_stats(collection, cid)
stats = growth_stats(coin, entries, key, now)
if stats:
coin_stats[name] = stats
else:
coin_stats[name] = {}
return all_stats
```
#### File: CryptoHypeTrader/ingestion/datasource.py
```python
import urllib.parse
import urllib.request
import json
from xml.dom import minidom
from bs4 import BeautifulSoup
class HTTPError(Exception):
pass
class ParseError(Exception):
pass
class ValidationError(Exception):
pass
class DataSource:
"""Abstract base class for data sources to derive from
This does some of the common work of error handling and parsing the data into various formats
"""
def __init__(self, url, params={}, response_format="json"):
self.url = url
self.params = params
self.format = response_format # json | text | xml | soup
def get(self):
text = self._get(self.url, self.params)
if self.format == "text":
data = text
elif self.format == "json":
data = json.loads(text)
elif self.format == "xml":
data = minidom.parseString(text)
elif self.format == "soup":
data = BeautifulSoup(text, "lxml")
else:
raise ValueError("Unsupported data source format: {}".format(self.format))
error_msg = self.validate(data)
if error_msg is not None:
raise ValidationError(error_msg)
try:
data = self.pre_parse(data)
return self.parse(data)
except Exception as err:
raise ParseError(str(err))
def parse(self, data):
raise NotImplementedError("DataSource's must implement parse")
@staticmethod
def pre_parse(data):
return data
@staticmethod
def validate(data):
return None
@staticmethod
def _get(url, params={}):
str_params = urllib.parse.urlencode(params)
if len(str_params) > 0:
url += "?" + str_params
try:
ret = urllib.request.urlopen(url, timeout=20)
# TODO: consider adding some rate limit monitoring functionality here
# ret.info().get('x-ratelimit-remaining')
return ret.read().decode("utf-8")
except Exception as err:
raise HTTPError(str(err))
```
#### File: ingestion/datasources/cryptocompare.py
```python
from urllib.parse import urlparse
from ingestion import datasource as ds
from common.util import safe_assign
# Provides access to the cryptocompare.com API to get coin, price, and social data
class CryptoCompareDataSource(ds.DataSource):
"""Abstract data source that handles common error checking and parsing for cryptocompare responses"""
def validate(self, data):
if "Response" not in data:
return "invalid response format"
if "Data" not in data:
return "no response data"
if data["Response"] != "Success":
return "response not success {}".format(data["Response"])
def pre_parse(self, data):
return data["Data"]
class CoinList(CryptoCompareDataSource):
"""Used to get the full list of coins on cryptocompare.com"""
def __init__(self):
super().__init__("https://min-api.cryptocompare.com/data/all/coinlist")
def parse(self, all_coins):
ret = []
for symbol, coin in all_coins.items():
ret.append({
"cc_id": coin["Id"],
"symbol": coin["Symbol"],
"name": coin["CoinName"],
"icon": "https://cryptocompare.com" + coin["ImageUrl"] if "ImageUrl" in coin else ""
})
return ret
class CoinLinks(CryptoCompareDataSource):
"""Used to get the social links for a single coin (reddit, twitter)"""
def __init__(self, cc_id):
super().__init__("https://www.cryptocompare.com/api/data/socialstats/", {"id": cc_id})
def parse(self, stats):
links = {}
if "Reddit" in stats and "link" in stats["Reddit"] and stats["Reddit"]["link"]:
subreddit_url = stats["Reddit"]["link"]
links["subreddit"] = urlparse(subreddit_url).path.replace("/r/", "").replace("/", "")
if "Twitter" in stats and "link" in stats["Twitter"] and stats["Twitter"]["link"]:
twitter_url = stats["Twitter"]["link"]
links["twitter"] = urlparse(twitter_url).path.replace("/", "")
return links
class SocialStats(CryptoCompareDataSource):
"""Used to get the social stats for a single coin (twitter, reddit, facebook, github)"""
def __init__(self, cc_id):
super().__init__("https://www.cryptocompare.com/api/data/socialstats/", {"id": cc_id})
def parse(self, data):
stats = {
"total_points": data["General"]["Points"],
}
if "CryptoCompare" in data:
cc_stats = data["CryptoCompare"]
stats["crypto_compare"] = {}
safe_assign(stats["crypto_compare"], "comments", cc_stats, "Comments", int)
safe_assign(stats["crypto_compare"], "followers", cc_stats, "Followers", int)
safe_assign(stats["crypto_compare"], "posts", cc_stats, "Posts", int)
safe_assign(stats["crypto_compare"], "points", cc_stats, "Points", int)
safe_assign(stats["crypto_compare"], "page_views", cc_stats, "PageViews", int)
if "Twitter" in data:
twitter_stats = data["Twitter"]
stats["twitter"] = {}
safe_assign(stats["twitter"], "followers", twitter_stats, "followers", int)
safe_assign(stats["twitter"], "points", twitter_stats, "Points", int)
if "Reddit" in data:
reddit_stats = data["Reddit"]
stats["reddit"] = {}
safe_assign(stats["reddit"], "comments_per_hour", reddit_stats, "comments_per_hour", float)
safe_assign(stats["reddit"], "comments_per_day", reddit_stats, "comments_per_day", float)
safe_assign(stats["reddit"], "posts_per_hour", reddit_stats, "posts_per_hour", float)
safe_assign(stats["reddit"], "posts_per_day", reddit_stats, "posts_per_day", float)
safe_assign(stats["reddit"], "points", reddit_stats, "Points", int)
if "Facebook" in data:
fb = data["Facebook"]
stats["facebook"] = {}
safe_assign(stats["facebook"], "likes", fb, "likes", int)
safe_assign(stats["facebook"], "points", fb, "Points", int)
if "CodeRepository" in data:
stats["code_repo"] = {}
safe_assign(stats["code_repo"], "points", data["CodeRepository"], "Points", int)
return stats
```
#### File: ingestion/datasources/twitter.py
```python
import time
import datetime
import tweepy
from ingestion import config
from ingestion import comment
# Provides access to sentiment data for twitter comments via the twitter.com API
api = None
def init_api():
global api
if api is not None:
return
auth = tweepy.OAuthHandler(config.twitter["api_key"], config.twitter["api_secret"])
auth.set_access_token(config.twitter["access_token"], config.twitter["access_token_secret"])
api = tweepy.API(auth)
class CommentScanner(comment.CommentScanner):
def __init__(self, coin, hours):
super().__init__()
self.query = "$" + coin["symbol"].lower()
self.hours = hours
def find_comments(self):
global api
now = datetime.datetime.utcnow()
max_old = datetime.timedelta(hours=self.hours)
# 450 requests in a 15 minute interval.
api_wait = 60 * 15
# This is the maximum number of tweets the api wil give us per page
# this means we can get a maximum of 45,000 tweets every 15 minutes, assuming all full pages
max_count = 100
c = tweepy.Cursor(api.search, q=self.query, include_entities=False,
result_type="recent", count=max_count).items()
while True:
try:
tweet = c.next()
old = now - tweet.created_at
if old > max_old:
break
# TODO: we should also factor in how many users a follower has into scoring
# The base score value is 1, so add it here
score = tweet.retweet_count + 1
self._add_comment(tweet.text, score)
except tweepy.TweepError:
print("Twitter Rate limit exceeded, waiting 15 minutes")
time.sleep(api_wait)
continue
except StopIteration:
break
```
#### File: CryptoHypeTrader/ingestion/tasks.py
```python
import concurrent.futures
import datetime
import pymongo
import urllib.request
from common import database as db, util
from ingestion import config, manager as mgr
from ingestion.datasources import reddit, twitter, cryptocompare as cc, coinmarketcap as cmc, stocktwits as st
from ingestion import analysis
def init():
db.init(config.database)
db.create_indexes()
reddit.init_api()
twitter.init_api()
class ImportCoinList(mgr.IngestionTask):
""" Task to import the list of coins from coinmarketcap.com and cryptocompare.com
This checks all coins on every run, and only makes updates to new/changed items in the db
while this is inefficient, this only needs to run a couple times a day
and it's better to be sure we have correct info, as all other tasks depend on it
"""
def __get_links(self, coin):
links = self._get_data(cmc.CoinLinks(coin))
if links is None:
return None
# If we have a subreddit, make sure it's valid, because some links are broken on cmc
if "subreddit" in links:
if not reddit.is_valid(links["subreddit"]):
self._warn("Invalid subreddit {}".format(links["subreddit"]))
del links["subreddit"]
missing_links = {"subreddit", "twitter", "btctalk_ann"} - set(links.keys())
if len(missing_links) > 0 and "cc_id" in coin:
cc_links = self._get_data(cc.CoinLinks(coin["cc_id"]))
if cc_links:
for missing_link in missing_links:
if missing_link in cc_links:
links[missing_link] = cc_links[missing_link]
return links
@staticmethod
def __duplicate_symbols(coins):
"""Returns a list of symbols that have duplicates in the coin list"""
symbols = set()
duplicate_symbols = set()
for coin in coins:
sym = coin["symbol"]
if sym in symbols:
duplicate_symbols.add(sym)
else:
symbols.add(sym)
return duplicate_symbols
def __merge_cc_data(self, coins, cc_coins):
"""Merges the cryptocompare ids into the coin list
this allows us to pull coin data from both sources
"""
def full_id(coin):
return "{}_{}".format(coin["symbol"], coin["name"]).lower()
# Symbols are not guaranteed to be unique, so to be sure we map a coin from
# coinmarketcap to cryptocompare we use <symbol_name> as the id
cc_lookup = {}
for coin in cc_coins:
cid = full_id(coin)
if cid in cc_lookup:
self._fatal("Duplicate cid {}".format(cid))
else:
cc_lookup[cid] = coin
for coin in coins:
cid = full_id(coin)
if cid in cc_lookup:
coin["cc_id"] = cc_lookup[cid]["cc_id"]
coin["icon"] = cc_lookup[cid]["icon"]
def _run(self):
current_coins = self._get_data(cmc.CoinList())
cc_coins = self._get_data(cc.CoinList())
if not current_coins or not cc_coins:
self._fatal("Failed to get coinlists from remotes")
stored_coins = db.get_coins()
# Find the set of ids that we don't have in the database yet
current_ids = util.list_to_set(current_coins, "cmc_id")
stored_ids = util.list_to_set(stored_coins, "cmc_id")
new_ids = current_ids - stored_ids
# map from coinmarketcap id to coins
stored_coins_map = util.list_to_dict(stored_coins, "cmc_id")
print("Total current coins (coinmarketcap.com):", len(current_ids))
print("Locally stored coins:", len(stored_ids))
print("New coins to process:", len(new_ids))
self.__merge_cc_data(current_coins, cc_coins)
processed = 0
coin_updates = 0
for coin in current_coins:
links = self.__get_links(coin)
if links is None:
continue
for name, val in links.items():
coin[name] = val
in_db = coin["cmc_id"] in stored_ids
if not in_db:
coin["_id"] = db.next_sequence_id("coins")
self._db_insert("coins", coin)
else:
stored_coin = stored_coins_map[coin["cmc_id"]]
coin["_id"] = stored_coin["_id"]
# Update only if changed
if coin != stored_coin:
# fields will allow to be updated only if not empty.
# This prevents removing good data if we simply failed to get the data this time.
# This has the drawback that bad data won't get removed
# The only draw back is that if bad data got corrected to be empty
# we would not remove it here, but we can deal with that manually for now
updateable = {"cc_id", "subreddit", "twitter", "btctalk_ann", "icon"}
updates = {}
for field in updateable:
current = coin[field] if field in coin else ""
stored = stored_coin[field] if field in stored_coin else ""
if current != stored and len(current) > 0:
updates[field] = current
if len(updates) > 0:
coin_updates += 1
self._db_update_one("coins", coin["_id"], updates)
processed += 1
self._progress(processed, len(current_coins))
print("Total coins", len(current_coins))
print("Added", len(new_ids), "new coins")
print("Updated", coin_updates)
class ImportHistoricalData(mgr.IngestionTask):
"""Task to Import historical daily data from a specified DataSource"""
def __init__(self, collection, data_source, coin_filter=None):
"""
:param collection: the db collection to store the data
:param data_source: the DataSource used to get the data
:param coin_filter: optional to filter which coins to use
"""
super().__init__()
self.__collection = collection
self.__data_source = data_source
self.__coin_filter = coin_filter
self._name += "-" + collection
@staticmethod
def _outdated(coins, latest_updates):
"""Returns a list of coins with outdated data in the db"""
coins_to_update = {}
# Make a list of coins that don't have up to date historical data
for coin in coins:
coin_id = coin["_id"]
update_start = datetime.datetime(2011, 1, 1)
if coin_id in latest_updates:
most_recent = latest_updates[coin_id]["date"]
today = datetime.datetime.utcnow()
if today.day - most_recent.day <= 1:
continue
update_start = most_recent + datetime.timedelta(days=1)
coins_to_update[coin_id] = update_start
return coins_to_update
def _run(self):
coins = db.get_coins(self.__coin_filter)
latest_data = db.get_latest(self.__collection)
coins_to_update = self._outdated(coins, latest_data)
print("Coins with no {} data: {}".format(self.__collection, len(coins) - len(latest_data)))
print("Coins with out of date {} data: {}".format(self.__collection, len(coins_to_update)))
processed = 0
coins = util.list_to_dict(coins, "_id")
for coin_id in coins_to_update:
coin = coins[coin_id]
update_start = coins_to_update[coin_id]
new_data = self._get_data(self.__data_source(coin, start=update_start))
if new_data:
for day in new_data:
day["coin_id"] = coin["_id"]
self._db_insert(self.__collection, new_data)
print("Added all historical", self.__collection, "data for", coin["symbol"])
else:
self._error("no historical data found for {}, starting on {}".format(coin["symbol"], update_start))
processed += 1
self._progress(processed, len(coins_to_update))
class ImportPrices(mgr.IngestionTask):
"""Task to import current prices for all coins"""
def _run(self):
data = self._get_data(cmc.Ticker())
if not data:
self._fatal("Failed to get coinmarketcap ticker")
# Need to map coinmarketcap ids back to ours
stored_coins = db.get_coins()
id_map = {}
for coin in stored_coins:
id_map[coin["cmc_id"]] = coin["_id"]
for coin in data:
cmc_id = coin["cmc_id"]
if cmc_id in id_map:
coin["coin_id"] = id_map[cmc_id]
del coin["cmc_id"]
else:
self._error("Can't add price data to unknown coin {}".format(cmc_id))
# filter out coins we haven't seen yet
# we'll pick them up after our ImportCoinList runs again
data = [x for x in data if "coin_id" in x]
self._db_insert("prices", data)
class ImportRedditStats(mgr.IngestionTask):
"""Task to import current reddit stats"""
def __init__(self, collection, get_stats):
super().__init__()
self.__collection = collection
self.__get_stats = get_stats
self._name += "-" + collection
def _run(self):
coins = db.get_coins({"subreddit": {"$exists": True}})
processed = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_coin = {executor.submit(self._get_data, self.__get_stats, coin["subreddit"]): coin for coin in coins}
for future in concurrent.futures.as_completed(future_to_coin):
coin = future_to_coin[future]
try:
today = datetime.datetime.utcnow()
stats = future.result()
if stats:
stats["date"] = today
stats["coin_id"] = coin["_id"]
self._db_insert(self.__collection, stats)
else:
self._error("Failed to get reddit stats for r/{}".format(coin["subreddit"]))
except Exception as err:
self._error("Failed to get future results for r/{}, {}".format(coin["subreddit"], err))
processed += 1
self._progress(processed, len(coins))
class ImportStockTwits(mgr.IngestionTask):
"""Task to import recent StockTwits posts"""
def __init__(self, collection, num_posts=2):
super().__init__()
self.__collection = collection
self.num_posts = num_posts
def _run(self):
coins = st.CoinList()
coins = self._get_data(coins)
for coin in range(len(coins.iloc[:self.num_posts,:])):
posts = st.recentPosts(coins.loc[coin, 'symbol'] + '.X',
coins.loc[coin, 'coin_id'],
coins.loc[coin, 'name'])
posts = self._get_data(posts)
self._db_insert(self.__collection, posts)
class ImportCommentStats(mgr.IngestionTask):
"""Task to import social media comment stats"""
def __init__(self, collection, comment_scanner, coin_filter, max_workers=5):
super().__init__()
self.__comment_scanner = comment_scanner
self.__collection = collection
self.__coin_filter = coin_filter
self.__max_workers = max_workers
self._name += "-" + collection
def _run(self):
coins = db.get_coins(self.__coin_filter)
hours = 1
processed = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=self.__max_workers) as executor:
future_to_coin = {}
for coin in coins:
scanner = self.__comment_scanner(coin, hours)
fut = executor.submit(scanner.find_comments)
future_to_coin[fut] = (coin, scanner)
for future in concurrent.futures.as_completed(future_to_coin):
coin, scanner = future_to_coin[future]
try:
now = datetime.datetime.utcnow()
record = {
"date": now,
"coin_id": coin["_id"],
"count": scanner.count(),
"sum_score": scanner.sum_score(),
"avg_score": scanner.avg_score(),
"avg_sentiment": scanner.avg_sentiment(),
"strong_pos": scanner.count_strong_pos(),
"strong_neg": scanner.count_strong_neg()
}
self._db_insert(self.__collection, record)
strong_pos = scanner.strong_pos()
strong_neg = scanner.strong_neg()
# Remove old comments to reduce storage requirements
now = datetime.datetime.utcnow()
max_age = now - datetime.timedelta(days=200)
db.mongo_db.recent_comments.remove({"date": {"$lt": max_age}})
for comment in strong_pos + strong_neg:
r = {
"date": now,
"coin_id": coin["_id"],
"text": comment.text,
"score": comment.score,
"sentiment": comment.sentiment,
"platform": self.__collection
}
self._db_insert("recent_comments", r)
except Exception as err:
self._error("Failed to get future results for r/{}, {}".format(coin["subreddit"], err))
processed += 1
self._progress(processed, len(coins))
class ImportCryptoCompareStats(mgr.IngestionTask):
"""Task to import stats from cryptocompare"""
def _run(self):
coins = db.get_coins({"cc_id": {"$exists": True}})
processed = 0
for coin in coins:
stats = self._get_data(cc.SocialStats(coin["cc_id"]))
if stats:
stats["date"] = datetime.datetime.utcnow()
stats["coin_id"] = coin["_id"]
self._db_insert("cryptocompare_stats", stats)
else:
self._warn("No stats for coin {}".format(coin["symbol"]))
processed += 1
self._progress(processed, len(coins))
class DownloadCoinIcons(mgr.IngestionTask):
"""Task to download icon image files for all coins"""
def _run(self):
coins = db.get_coins({"icon": {"$exists": True}})
processed = 0
for coin in coins:
missing = db.mongo_db.coin_icons.find_one({"coin_id": coin["_id"]}) is None
if "icon" in coin and len(coin["icon"]) > 0 and missing:
req = urllib.request.Request(coin["icon"])
# Need to fake being a browser, or we get a 403
req.add_header('user-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36')
with urllib.request.urlopen(req) as response:
data = response.read()
item = {
"coin_id": coin["_id"],
"data": data
}
self._db_insert("coin_icons", item)
processed += 1
self._progress(processed, len(coins))
class SaveDBStats(mgr.IngestionTask):
"""Task to save database statistics so we can track over time"""
def _run(self):
stats = db.mongo_db.command("dbstats")
stats["date"] = datetime.datetime.utcnow()
self._db_insert("db_stats", stats)
class CreateCoinSummaries(mgr.IngestionTask):
"""Task to analyse statistics and create summary reports for each coin
This is run periodically, as it's a slow process and we want to do it in
the background and cache the results for later use
"""
def _run(self):
coins = db.get_coins()
prices = db.mongo_db["prices"].aggregate([
{"$sort": {"date": pymongo.DESCENDING}},
{"$group": {"_id": "$coin_id", "data": {'$first': '$$ROOT'}}}
], allowDiskUse=True)
prices = db.cursor_to_dict(prices)
growth = analysis.social_growth()
growth = util.list_to_dict(growth, "coin_id")
processed = 0
for coin in coins:
cid = coin["_id"]
if cid not in prices:
self._warn("No current price data for {}".format(cid))
continue
p = prices[cid]["data"]
record = {
"coin_id": coin["_id"],
"symbol": coin["symbol"],
"name": coin["name"],
"market_cap": p["market_cap"],
"price": p["price"],
"volume": p["volume"]
}
# Add in optional vals
for key in ["subreddit", "twitter", "cmc_id", "cc_id"]:
if key in coin:
record[key] = coin[key]
if cid in growth:
del growth[cid]["coin_id"]
record["growth"] = growth[cid]
db.mongo_db.coin_summaries.replace_one({"coin_id": cid}, record, upsert=True)
processed += 1
if processed % 50 == 0:
self._progress(processed, len(coins))
# Helper function for task runs
def historical_data_tasks():
return [
ImportHistoricalData("historical_prices", cmc.HistoricalPrices),
ImportHistoricalData("historical_social_stats", reddit.HistoricalStats, {"subreddit": {"$exists": True}})
]
def current_data_tasks():
return [
ImportPrices(),
ImportRedditStats("reddit_stats", reddit.get_current_stats),
ImportCommentStats("reddit_comments", reddit.CommentScanner, {"subreddit": {"$exists": True}})
]
def twitter_tasks():
# TODO: this has to be run separately because it takes much longer than the other tasks
# due to the low twitter API rate limit, which on average only allows us to process
# around 90 coins every 15 minutes, which means this takes 3+ hours
# look into distributing this across several server nodes with different twitter API keys
return [
ImportCommentStats("twitter_comments", twitter.CommentScanner, {"twitter": {"$exists": True}}, 1),
]
def analysis_tasks():
return [CreateCoinSummaries()]
```
#### File: ingestion/test/test_reddit.py
```python
from datetime import datetime, timedelta
from unittest import TestCase
from ingestion.datasources import reddit
class TestReddit(TestCase):
def test_get_historical_stats(self):
coin = {"subreddit": "bitcoin"}
stats = reddit.HistoricalStats(coin).get()
stats.sort(key=lambda x: x["date"], reverse=True)
newest = stats[0]["date"]
oldest = stats[-1]["date"]
self.assertGreater(newest, oldest)
# test using a date range
start = datetime(2017, 1, 1)
prices = reddit.HistoricalStats(coin, start).get()
prices.sort(key=lambda x: x["date"], reverse=True)
self.assertEqual(start, prices[-1]["date"])
def test_get_current_stats(self):
stats = reddit.get_current_stats("bitcoin")
self.assertGreater(stats["subscribers"], 1)
self.assertGreater(stats["active"], 1)
def test_is_valid(self):
self.assertEqual(reddit.is_valid("bitcoin"), True)
self.assertEqual(reddit.is_valid("doesnotexistdoesnotexistdoesnotexist"), False)
def test_comment_scanner(self):
reddit.init_api()
coin = {
"subreddit": "bitcoin"
}
btc_scanner = reddit.CommentScanner(coin, 1)
btc_scanner.find_comments()
self.assertGreater(btc_scanner.count(), 100)
self.assertGreater(btc_scanner.sum_score(), 1000)
self.assertGreater(btc_scanner.avg_score(), 10)
self.assertGreater(btc_scanner.avg_sentiment(), -1)
self.assertLess(btc_scanner.avg_sentiment(), 1)
self.assertGreater(btc_scanner.count_strong_pos(), 0)
self.assertGreater(btc_scanner.count_strong_neg(), 0)
```
|
{
"source": "jessecrossen/hautmidi",
"score": 3
}
|
#### File: mvc/fonts/bdf2c.py
```python
import sys
import os
import os.path
import glob
import re
def add_font(path):
h = ''
c = ''
# get an identifier-friendly name for the font
name = re.sub(r'\W+', '', os.path.basename(path)[0:-4])
# declare the font in the header
h += '// '+os.path.basename(path)+'\n'
h += 'extern const Font %s;\n' % name
# parse the file into a data structure
charWidth = charHeight = leading = 0
asciiMin = asciiMax = charCode = None
inBitmap = False
table = dict()
with open(path, 'r') as f:
for line in f:
line = line.strip()
if (line.startswith('FONTBOUNDINGBOX')):
parts = line.split(' ')
charWidth = int(parts[1])
charHeight = int(parts[2])
leading = abs(int(parts[4]))
charHeight -= leading
elif (line.startswith('ENCODING')):
parts = line.split(' ')
charCode = int(parts[1])
if ((charCode >= 32) and (charCode <= 126)):
table[charCode] = list()
inBitmap = False
else:
charCode = None
elif (line.startswith('BITMAP')):
inBitmap = True
elif (line.startswith('ENDCHAR')):
inBitmap = False
elif ((inBitmap) and (charCode != None)):
while (len(line) < 4):
line += '0'
table[charCode].append(int(line, 16))
asciiMin = min(table.keys())
asciiMax = max(table.keys())
c += 'static const uint16_t %s_data[] = {\n' % name
for charCode in range(asciiMin, asciiMax + 1):
c += ' '
lines = table[charCode]
# strip off the leading from the top, shifting ascending characters
# like quotes down to fit in the X height
for i in range(0, leading):
if (lines[0] == 0):
lines = lines[1:]
else:
lines = lines[0:-1]
# reverse line bits so the leftmost pixel is the LSB
for line in lines:
reversedBits = 0
for b in range(0, 16):
reversedBits |= ((line >> b) & 0x01)
reversedBits <<= 1
c += '0x%04x, ' % reversedBits
c += '\n'
c += '};\n'
c += '\n'
c += 'const Font %s = {\n' % name
c += ' %i, // charWidth\n' % charWidth
c += ' %i, // charHeight\n' % charHeight
c += ' %i, // asciiMin\n' % asciiMin
c += ' %i, // asciiMax\n' % asciiMax
c += ' %s_data // data\n' % name
c += '};\n'
return((h, c, charHeight, name))
h = '''#ifndef _HOODWIND_fonts_h_
#define _HOODWIND_fonts_h_
#include <stdint.h>
typedef struct {
uint8_t charWidth;
uint8_t charHeight;
uint8_t asciiMin;
uint8_t asciiMax;
const uint16_t *data;
} Font;
#ifdef __cplusplus
extern "C" {
#endif
'''
c = '#include "fonts.h"\n\n'
heightAndName = list()
for path in sorted(glob.glob('./bdf/*.bdf')):
(fh, fc, height, name) = add_font(path)
h += fh + '\n'
c += fc + '\n'
heightAndName.append((height, name))
h += 'const Font *fontWithHeight(uint8_t h);\n'
h += '''
#ifdef __cplusplus
} // extern "C"
#endif
#endif
'''
c += 'const Font *fontWithHeight(uint8_t h) {\n'
for item in heightAndName:
c += ' if (h <= %i) return(&%s);\n' % item
c += ' return(&%s);\n' % heightAndName[-1][1];
c += '}\n'
with open('../fonts.h', 'w') as f:
f.write(h)
with open('../fonts.c', 'w') as f:
f.write(c)
```
|
{
"source": "jessecureton-aurora/dulwich",
"score": 2
}
|
#### File: dulwich/tests/test_objects.py
```python
from io import BytesIO
import datetime
from itertools import (
permutations,
)
import os
import stat
import warnings
from contextlib import contextmanager
from dulwich.errors import (
ObjectFormatException,
)
from dulwich.objects import (
Blob,
Tree,
Commit,
ShaFile,
Tag,
TreeEntry,
format_timezone,
hex_to_sha,
sha_to_hex,
hex_to_filename,
check_hexsha,
check_identity,
object_class,
parse_timezone,
pretty_format_tree_entry,
parse_tree,
_parse_tree_py,
sorted_tree_items,
_sorted_tree_items_py,
MAX_TIME,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
make_commit,
make_object,
functest_builder,
ext_functest_builder,
)
a_sha = b"6f670c0fb53f9463760b7295fbb814e965fb20c8"
b_sha = b"2969be3e8ee1c0222396a5611407e4769f14e54b"
c_sha = b"954a536f7819d40e6f637f849ee187dd10066349"
tree_sha = b"70c190eb48fa8bbb50ddc692a17b44cb781af7f6"
tag_sha = b"71033db03a03c6a36721efcf1968dd8f8e0cf023"
class TestHexToSha(TestCase):
def test_simple(self):
self.assertEqual(b"\xab\xcd" * 10, hex_to_sha(b"abcd" * 10))
def test_reverse(self):
self.assertEqual(b"abcd" * 10, sha_to_hex(b"\xab\xcd" * 10))
class BlobReadTests(TestCase):
"""Test decompression of blobs"""
def get_sha_file(self, cls, base, sha):
dir = os.path.join(os.path.dirname(__file__), "data", base)
return cls.from_path(hex_to_filename(dir, sha))
def get_blob(self, sha):
"""Return the blob named sha from the test data dir"""
return self.get_sha_file(Blob, "blobs", sha)
def get_tree(self, sha):
return self.get_sha_file(Tree, "trees", sha)
def get_tag(self, sha):
return self.get_sha_file(Tag, "tags", sha)
def commit(self, sha):
return self.get_sha_file(Commit, "commits", sha)
def test_decompress_simple_blob(self):
b = self.get_blob(a_sha)
self.assertEqual(b.data, b"test 1\n")
self.assertEqual(b.sha().hexdigest().encode("ascii"), a_sha)
def test_hash(self):
b = self.get_blob(a_sha)
self.assertEqual(hash(b.id), hash(b))
def test_parse_empty_blob_object(self):
sha = b"e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"
b = self.get_blob(sha)
self.assertEqual(b.data, b"")
self.assertEqual(b.id, sha)
self.assertEqual(b.sha().hexdigest().encode("ascii"), sha)
def test_create_blob_from_string(self):
string = b"test 2\n"
b = Blob.from_string(string)
self.assertEqual(b.data, string)
self.assertEqual(b.sha().hexdigest().encode("ascii"), b_sha)
def test_legacy_from_file(self):
b1 = Blob.from_string(b"foo")
b_raw = b1.as_legacy_object()
b2 = b1.from_file(BytesIO(b_raw))
self.assertEqual(b1, b2)
def test_legacy_from_file_compression_level(self):
b1 = Blob.from_string(b"foo")
b_raw = b1.as_legacy_object(compression_level=6)
b2 = b1.from_file(BytesIO(b_raw))
self.assertEqual(b1, b2)
def test_chunks(self):
string = b"test 5\n"
b = Blob.from_string(string)
self.assertEqual([string], b.chunked)
def test_splitlines(self):
for case in [
[],
[b"foo\nbar\n"],
[b"bl\na", b"blie"],
[b"bl\na", b"blie", b"bloe\n"],
[b"", b"bl\na", b"blie", b"bloe\n"],
[b"", b"", b"", b"bla\n"],
[b"", b"", b"", b"bla\n", b""],
[b"bl", b"", b"a\naaa"],
[b"a\naaa", b"a"],
]:
b = Blob()
b.chunked = case
self.assertEqual(b.data.splitlines(True), b.splitlines())
def test_set_chunks(self):
b = Blob()
b.chunked = [b"te", b"st", b" 5\n"]
self.assertEqual(b"test 5\n", b.data)
b.chunked = [b"te", b"st", b" 6\n"]
self.assertEqual(b"test 6\n", b.as_raw_string())
self.assertEqual(b"test 6\n", bytes(b))
def test_parse_legacy_blob(self):
string = b"test 3\n"
b = self.get_blob(c_sha)
self.assertEqual(b.data, string)
self.assertEqual(b.sha().hexdigest().encode("ascii"), c_sha)
def test_eq(self):
blob1 = self.get_blob(a_sha)
blob2 = self.get_blob(a_sha)
self.assertEqual(blob1, blob2)
def test_read_tree_from_file(self):
t = self.get_tree(tree_sha)
self.assertEqual(t.items()[0], (b"a", 33188, a_sha))
self.assertEqual(t.items()[1], (b"b", 33188, b_sha))
def test_read_tree_from_file_parse_count(self):
old_deserialize = Tree._deserialize
def reset_deserialize():
Tree._deserialize = old_deserialize
self.addCleanup(reset_deserialize)
self.deserialize_count = 0
def counting_deserialize(*args, **kwargs):
self.deserialize_count += 1
return old_deserialize(*args, **kwargs)
Tree._deserialize = counting_deserialize
t = self.get_tree(tree_sha)
self.assertEqual(t.items()[0], (b"a", 33188, a_sha))
self.assertEqual(t.items()[1], (b"b", 33188, b_sha))
self.assertEqual(self.deserialize_count, 1)
def test_read_tag_from_file(self):
t = self.get_tag(tag_sha)
self.assertEqual(
t.object, (Commit, b"51b668fd5bf7061b7d6fa525f88803e6cfadaa51")
)
self.assertEqual(t.name, b"signed")
self.assertEqual(t.tagger, b"<NAME> <<EMAIL>>")
self.assertEqual(t.tag_time, 1231203091)
self.assertEqual(t.message, b"This is a signed tag\n")
self.assertEqual(
t.signature,
b"-----BEGIN PGP SIGNATURE-----\n"
b"Version: GnuPG v1.4.9 (GNU/Linux)\n"
b"\n"
b"iEYEABECAAYFAkliqx8ACgkQqSMmLy9u/"
b"kcx5ACfakZ9NnPl02tOyYP6pkBoEkU1\n"
b"5EcAn0UFgokaSvS371Ym/4W9iJj6vh3h\n"
b"=ql7y\n"
b"-----END PGP SIGNATURE-----\n",
)
def test_read_commit_from_file(self):
sha = b"60dacdc733de308bb77bb76ce0fb0f9b44c9769e"
c = self.commit(sha)
self.assertEqual(c.tree, tree_sha)
self.assertEqual(c.parents, [b"0d89f20333fbb1d2f3a94da77f4981373d8f4310"])
self.assertEqual(c.author, b"<NAME> <<EMAIL>>")
self.assertEqual(c.committer, b"<NAME> <<EMAIL>>")
self.assertEqual(c.commit_time, 1174759230)
self.assertEqual(c.commit_timezone, 0)
self.assertEqual(c.author_timezone, 0)
self.assertEqual(c.message, b"Test commit\n")
def test_read_commit_no_parents(self):
sha = b"0d89f20333fbb1d2f3a94da77f4981373d8f4310"
c = self.commit(sha)
self.assertEqual(c.tree, b"90182552c4a85a45ec2a835cadc3451bebdfe870")
self.assertEqual(c.parents, [])
self.assertEqual(c.author, b"<NAME> <<EMAIL>>")
self.assertEqual(c.committer, b"<NAME> <<EMAIL>>")
self.assertEqual(c.commit_time, 1174758034)
self.assertEqual(c.commit_timezone, 0)
self.assertEqual(c.author_timezone, 0)
self.assertEqual(c.message, b"Test commit\n")
def test_read_commit_two_parents(self):
sha = b"5dac377bdded4c9aeb8dff595f0faeebcc8498cc"
c = self.commit(sha)
self.assertEqual(c.tree, b"d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual(
c.parents,
[
b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd",
b"4cffe90e0a41ad3f5190079d7c8f036bde29cbe6",
],
)
self.assertEqual(c.author, b"<NAME> <<EMAIL>>")
self.assertEqual(c.committer, b"<NAME> <<EMAIL>>")
self.assertEqual(c.commit_time, 1174773719)
self.assertEqual(c.commit_timezone, 0)
self.assertEqual(c.author_timezone, 0)
self.assertEqual(c.message, b"Merge ../b\n")
def test_stub_sha(self):
sha = b"5" * 40
c = make_commit(id=sha, message=b"foo")
self.assertIsInstance(c, Commit)
self.assertEqual(sha, c.id)
self.assertNotEqual(sha, c.sha())
class ShaFileCheckTests(TestCase):
def assertCheckFails(self, cls, data):
obj = cls()
def do_check():
obj.set_raw_string(data)
obj.check()
self.assertRaises(ObjectFormatException, do_check)
def assertCheckSucceeds(self, cls, data):
obj = cls()
obj.set_raw_string(data)
self.assertEqual(None, obj.check())
small_buffer_zlib_object = (
b"\x48\x89\x15\xcc\x31\x0e\xc2\x30\x0c\x40\x51\xe6"
b"\x9c\xc2\x3b\xaa\x64\x37\xc4\xc1\x12\x42\x5c\xc5"
b"\x49\xac\x52\xd4\x92\xaa\x78\xe1\xf6\x94\xed\xeb"
b"\x0d\xdf\x75\x02\xa2\x7c\xea\xe5\x65\xd5\x81\x8b"
b"\x9a\x61\xba\xa0\xa9\x08\x36\xc9\x4c\x1a\xad\x88"
b"\x16\xba\x46\xc4\xa8\x99\x6a\x64\xe1\xe0\xdf\xcd"
b"\xa0\xf6\x75\x9d\x3d\xf8\xf1\xd0\x77\xdb\xfb\xdc"
b"\x86\xa3\x87\xf1\x2f\x93\xed\x00\xb7\xc7\xd2\xab"
b"\x2e\xcf\xfe\xf1\x3b\x50\xa4\x91\x53\x12\x24\x38"
b"\x23\x21\x86\xf0\x03\x2f\x91\x24\x52"
)
class ShaFileTests(TestCase):
def test_deflated_smaller_window_buffer(self):
# zlib on some systems uses smaller buffers,
# resulting in a different header.
# See https://github.com/libgit2/libgit2/pull/464
sf = ShaFile.from_file(BytesIO(small_buffer_zlib_object))
self.assertEqual(sf.type_name, b"tag")
self.assertEqual(sf.tagger, b" <@localhost>")
class CommitSerializationTests(TestCase):
def make_commit(self, **kwargs):
attrs = {
"tree": b"d80c186a03f423a81b39df39dc87fd269736ca86",
"parents": [
b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd",
b"4cffe90e0a41ad3f5190079d7c8f036bde29cbe6",
],
"author": b"<NAME> <<EMAIL>>",
"committer": b"<NAME> <<EMAIL>>",
"commit_time": 1174773719,
"author_time": 1174773719,
"commit_timezone": 0,
"author_timezone": 0,
"message": b"Merge ../b\n",
}
attrs.update(kwargs)
return make_commit(**attrs)
def test_encoding(self):
c = self.make_commit(encoding=b"iso8859-1")
self.assertIn(b"encoding iso8859-1\n", c.as_raw_string())
def test_short_timestamp(self):
c = self.make_commit(commit_time=30)
c1 = Commit()
c1.set_raw_string(c.as_raw_string())
self.assertEqual(30, c1.commit_time)
def test_full_tree(self):
c = self.make_commit(commit_time=30)
t = Tree()
t.add(b"data-x", 0o644, Blob().id)
c.tree = t
c1 = Commit()
c1.set_raw_string(c.as_raw_string())
self.assertEqual(t.id, c1.tree)
self.assertEqual(c.as_raw_string(), c1.as_raw_string())
def test_raw_length(self):
c = self.make_commit()
self.assertEqual(len(c.as_raw_string()), c.raw_length())
def test_simple(self):
c = self.make_commit()
self.assertEqual(c.id, b"5dac377bdded4c9aeb8dff595f0faeebcc8498cc")
self.assertEqual(
b"tree d80c186a03f423a81b39df39dc87fd269736ca86\n"
b"parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd\n"
b"parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6\n"
b"author <NAME> <<EMAIL>> "
b"1174773719 +0000\n"
b"committer <NAME> <<EMAIL>> "
b"1174773719 +0000\n"
b"\n"
b"Merge ../b\n",
c.as_raw_string(),
)
def test_timezone(self):
c = self.make_commit(commit_timezone=(5 * 60))
self.assertIn(b" +0005\n", c.as_raw_string())
def test_neg_timezone(self):
c = self.make_commit(commit_timezone=(-1 * 3600))
self.assertIn(b" -0100\n", c.as_raw_string())
def test_deserialize(self):
c = self.make_commit()
d = Commit()
d._deserialize(c.as_raw_chunks())
self.assertEqual(c, d)
def test_serialize_gpgsig(self):
commit = self.make_commit(
gpgsig=b"""-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8
vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV
NScTH<KEY>
=X6RT
-----END PGP SIGNATURE-----"""
)
self.maxDiff = None
self.assertEqual(
b"""\
tree d80c186a03f423a81b39df39dc87fd269736ca86
parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd
parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6
author <NAME> <<EMAIL>> 1174773719 +0000
committer <NAME> <<EMAIL>> 1174773719 +0000
gpgsig -----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8
vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV
NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg
xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3
GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy
qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC
XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj
dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+
v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x
0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H
ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA
fDeF1m4qYs+cUXKNUZ03
=X6RT
-----END PGP SIGNATURE-----
Merge ../b
""",
commit.as_raw_string(),
)
def test_serialize_mergetag(self):
tag = make_object(
Tag,
object=(Commit, b"a<PASSWORD>"),
object_type_name=b"commit",
name=b"v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger=b"<NAME> <<EMAIL>>",
message=default_message,
)
commit = self.make_commit(mergetag=[tag])
self.assertEqual(
b"""tree d80c186a03f423a81b39df39dc87fd269736ca86
parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd
parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6
author <NAME> <<EMAIL>> 1174773719 +0000
committer <NAME> <<EMAIL>> 1174773719 +0000
mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f
type commit
tag v2.6.22-rc7
tagger <NAME> <<EMAIL>> 1183319674 +0000
Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
Merge ../b
""",
commit.as_raw_string(),
)
def test_serialize_mergetags(self):
tag = make_object(
Tag,
object=(Commit, b"a38d6181ff27824c79fc7df825164a212eff6a3f"),
object_type_name=b"commit",
name=b"v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger=b"Lin<NAME> <<EMAIL>>",
message=default_message,
)
commit = self.make_commit(mergetag=[tag, tag])
self.assertEqual(
b"""tree d80c186a03f423a81b39df39dc87fd269736ca86
parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd
parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6
author <NAME> <<EMAIL>> 1174773719 +0000
committer <NAME> <<EMAIL>> 1174773719 +0000
mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f
type commit
tag v2.6.22-rc7
tagger <NAME> <<EMAIL>> 1183319674 +0000
Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f
type commit
tag v2.6.22-rc7
tagger <NAME> <<EMAIL>> 1183319674 +0000
Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
Merge ../b
""",
commit.as_raw_string(),
)
def test_deserialize_mergetag(self):
tag = make_object(
Tag,
object=(Commit, b"a38d6181ff27824c79fc7df825164a212eff6a3f"),
object_type_name=b"commit",
name=b"v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger=b"<NAME> <<EMAIL>>",
message=default_message,
)
commit = self.make_commit(mergetag=[tag])
d = Commit()
d._deserialize(commit.as_raw_chunks())
self.assertEqual(commit, d)
def test_deserialize_mergetags(self):
tag = make_object(
Tag,
object=(Commit, b"a38d<PASSWORD>"),
object_type_name=b"commit",
name=b"v2.6.22-rc7",
tag_time=1183319674,
tag_timezone=0,
tagger=b"<NAME> <<EMAIL>>",
message=default_message,
)
commit = self.make_commit(mergetag=[tag, tag])
d = Commit()
d._deserialize(commit.as_raw_chunks())
self.assertEqual(commit, d)
default_committer = b"<NAME> <<EMAIL>> 1174773719 +0000"
class CommitParseTests(ShaFileCheckTests):
def make_commit_lines(
self,
tree=b"d80c186a03f423a81b39df39dc87fd269736ca86",
parents=[
b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd",
b"4cffe90e0a41ad3f5<PASSWORD>",
],
author=default_committer,
committer=default_committer,
encoding=None,
message=b"Merge ../b\n",
extra=None,
):
lines = []
if tree is not None:
lines.append(b"tree " + tree)
if parents is not None:
lines.extend(b"parent " + p for p in parents)
if author is not None:
lines.append(b"author " + author)
if committer is not None:
lines.append(b"committer " + committer)
if encoding is not None:
lines.append(b"encoding " + encoding)
if extra is not None:
for name, value in sorted(extra.items()):
lines.append(name + b" " + value)
lines.append(b"")
if message is not None:
lines.append(message)
return lines
def make_commit_text(self, **kwargs):
return b"\n".join(self.make_commit_lines(**kwargs))
def test_simple(self):
c = Commit.from_string(self.make_commit_text())
self.assertEqual(b"Merge ../b\n", c.message)
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(b"<NAME> <<EMAIL>>", c.committer)
self.assertEqual(b"d80c186a03f423a81b39df39dc87fd269736ca86", c.tree)
self.assertEqual(
[
b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd",
b"4cffe90e0a41ad3f5190079d7c8f036bde29cbe6",
],
c.parents,
)
expected_time = datetime.datetime(2007, 3, 24, 22, 1, 59)
self.assertEqual(
expected_time, datetime.datetime.utcfromtimestamp(c.commit_time)
)
self.assertEqual(0, c.commit_timezone)
self.assertEqual(
expected_time, datetime.datetime.utcfromtimestamp(c.author_time)
)
self.assertEqual(0, c.author_timezone)
self.assertEqual(None, c.encoding)
def test_custom(self):
c = Commit.from_string(self.make_commit_text(extra={b"extra-field": b"data"}))
self.assertEqual([(b"extra-field", b"data")], c.extra)
def test_encoding(self):
c = Commit.from_string(self.make_commit_text(encoding=b"UTF-8"))
self.assertEqual(b"UTF-8", c.encoding)
def test_check(self):
self.assertCheckSucceeds(Commit, self.make_commit_text())
self.assertCheckSucceeds(Commit, self.make_commit_text(parents=None))
self.assertCheckSucceeds(Commit, self.make_commit_text(encoding=b"UTF-8"))
self.assertCheckFails(Commit, self.make_commit_text(tree=b"xxx"))
self.assertCheckFails(Commit, self.make_commit_text(parents=[a_sha, b"xxx"]))
bad_committer = b"some guy without an email address 1174773719 +0000"
self.assertCheckFails(Commit, self.make_commit_text(committer=bad_committer))
self.assertCheckFails(Commit, self.make_commit_text(author=bad_committer))
self.assertCheckFails(Commit, self.make_commit_text(author=None))
self.assertCheckFails(Commit, self.make_commit_text(committer=None))
self.assertCheckFails(
Commit, self.make_commit_text(author=None, committer=None)
)
def test_check_duplicates(self):
# duplicate each of the header fields
for i in range(5):
lines = self.make_commit_lines(parents=[a_sha], encoding=b"UTF-8")
lines.insert(i, lines[i])
text = b"\n".join(lines)
if lines[i].startswith(b"parent"):
# duplicate parents are ok for now
self.assertCheckSucceeds(Commit, text)
else:
self.assertCheckFails(Commit, text)
def test_check_order(self):
lines = self.make_commit_lines(parents=[a_sha], encoding=b"UTF-8")
headers = lines[:5]
rest = lines[5:]
# of all possible permutations, ensure only the original succeeds
for perm in permutations(headers):
perm = list(perm)
text = b"\n".join(perm + rest)
if perm == headers:
self.assertCheckSucceeds(Commit, text)
else:
self.assertCheckFails(Commit, text)
def test_check_commit_with_unparseable_time(self):
identity_with_wrong_time = (
b"<NAME> <<EMAIL>> 18446743887488505614+42707004"
)
# Those fail at reading time
self.assertCheckFails(
Commit,
self.make_commit_text(
author=default_committer, committer=identity_with_wrong_time
),
)
self.assertCheckFails(
Commit,
self.make_commit_text(
author=identity_with_wrong_time, committer=default_committer
),
)
def test_check_commit_with_overflow_date(self):
"""Date with overflow should raise an ObjectFormatException when checked"""
identity_with_wrong_time = (
b"<NAME> <<EMAIL>> 18446743887488505614 +42707004"
)
commit0 = Commit.from_string(
self.make_commit_text(
author=identity_with_wrong_time, committer=default_committer
)
)
commit1 = Commit.from_string(
self.make_commit_text(
author=default_committer, committer=identity_with_wrong_time
)
)
# Those fails when triggering the check() method
for commit in [commit0, commit1]:
with self.assertRaises(ObjectFormatException):
commit.check()
def test_mangled_author_line(self):
"""Mangled author line should successfully parse"""
author_line = (
b'<NAME> <<EMAIL>> <"<NAME> '
b'<<EMAIL>>"> 1197475547 -0500'
)
expected_identity = (
b'<NAME> <<EMAIL>> <"<NAME> '
b'<<EMAIL>>">'
)
commit = Commit.from_string(self.make_commit_text(author=author_line))
# The commit parses properly
self.assertEqual(commit.author, expected_identity)
# But the check fails because the author identity is bogus
with self.assertRaises(ObjectFormatException):
commit.check()
def test_parse_gpgsig(self):
c = Commit.from_string(
b"""tree aaff74984cccd156a469afa7d9ab10e4777beb24
author <NAME> <<EMAIL>> 1412179807 +0200
committer <NAME> <<EMAIL>> 1412179807 +0200
gpgsig -----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8
vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV
<KEY>
=X6RT
-----END PGP SIGNATURE-----
foo
"""
)
self.assertEqual(b"foo\n", c.message)
self.assertEqual([], c.extra)
self.assertEqual(
b"""-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8
vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV
NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg
xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3
GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy
qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC
XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj
dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+
v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x
0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H
ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA
fDeF1m4qYs+cUXKNUZ03
=X6RT
-----END PGP SIGNATURE-----""",
c.gpgsig,
)
def test_parse_header_trailing_newline(self):
c = Commit.from_string(
b"""\
tree a7d6277f78d3ecd0230a1a5df6db00b1d9c521ac
parent c09b6dec7a73760fbdb478383a3c926b18db8bbe
author <NAME> <<EMAIL>> 1461964057 -1000
committer <NAME> <<EMAIL>> 1461964057 -1000
gpgsig -----BEGIN PGP SIGNATURE-----
wsBcBAABCAAQBQJXI80ZCRA6pcNDcVZ70gAAarcIABs72xRX3FWeox349nh6ucJK
CtwmBTusez2Zwmq895fQEbZK7jpaGO5TRO4OvjFxlRo0E08UFx3pxZHSpj6bsFeL
hHsDXnCaotphLkbgKKRdGZo7tDqM84wuEDlh4MwNe7qlFC7bYLDyysc81ZX5lpMm
2MFF1TvjLAzSvkT7H1LPkuR3hSvfCYhikbPOUNnKOo0sYjeJeAJ/JdAVQ4mdJIM0
gl3REp9+A+qBEpNQI7z94Pg5Bc5xenwuDh3SJgHvJV6zBWupWcdB3fAkVd4TPnEZ
nHxksHfeNln9RKseIDcy4b2ATjhDNIJZARHNfr6oy4u3XPW4svRqtBsLoMiIeuI=
=ms6q
-----END PGP SIGNATURE-----
3.3.0 version bump and docs
"""
)
self.assertEqual([], c.extra)
self.assertEqual(
b"""\
-----BEGIN PGP SIGNATURE-----
wsBcBAABCAAQBQJXI80ZCRA6pcNDcVZ70gAAarcIABs72xRX3FWeox349nh6ucJK
CtwmBTusez2Zwmq895fQEbZK7jpaGO5TRO4OvjFxlRo0E08UFx3pxZHSpj6bsFeL
hHsDXnCaotphLkbgKKRdGZo7tDqM84wuEDlh4MwNe7qlFC7bYLDyysc81ZX5lpMm
2MFF1TvjLAzSvkT7H1LPkuR3hSvfCYhikbPOUNnKOo0sYjeJeAJ/JdAVQ4mdJIM0
gl3REp9+A+qBEpNQI7z94Pg5Bc5xenwuDh3SJgHvJV6zBWupWcdB3fAkVd4TPnEZ
nHxksHfeNln9RKseIDcy4b2ATjhDNIJZARHNfr6oy4u3XPW4svRqtBsLoMiIeuI=
=ms6q
-----END PGP SIGNATURE-----\n""",
c.gpgsig,
)
_TREE_ITEMS = {
b"a.c": (0o100755, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
b"a": (stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
b"a/c": (stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
}
_SORTED_TREE_ITEMS = [
TreeEntry(b"a.c", 0o100755, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
TreeEntry(b"a", stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
TreeEntry(b"a/c", stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
]
class TreeTests(ShaFileCheckTests):
def test_add(self):
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
x.add(b"myname", 0o100755, myhexsha)
self.assertEqual(x[b"myname"], (0o100755, myhexsha))
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), x.as_raw_string())
def test_add_old_order(self):
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
warnings.simplefilter("ignore", DeprecationWarning)
try:
x.add(0o100755, b"myname", myhexsha)
finally:
warnings.resetwarnings()
self.assertEqual(x[b"myname"], (0o100755, myhexsha))
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), x.as_raw_string())
def test_simple(self):
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
x = Tree()
x[b"myname"] = (0o100755, myhexsha)
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), x.as_raw_string())
self.assertEqual(b"100755 myname\0" + hex_to_sha(myhexsha), bytes(x))
def test_tree_update_id(self):
x = Tree()
x[b"a.c"] = (0o100755, b"d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual(b"0c5c6bc2c081accfbc250331b19e43b904ab9cdd", x.id)
x[b"a.b"] = (stat.S_IFDIR, b"d80c186a03f423a81b39df39dc87fd269736ca86")
self.assertEqual(b"07bfcb5f3ada15bbebdfa3bbb8fd858a363925c8", x.id)
def test_tree_iteritems_dir_sort(self):
x = Tree()
for name, item in _TREE_ITEMS.items():
x[name] = item
self.assertEqual(_SORTED_TREE_ITEMS, x.items())
def test_tree_items_dir_sort(self):
x = Tree()
for name, item in _TREE_ITEMS.items():
x[name] = item
self.assertEqual(_SORTED_TREE_ITEMS, x.items())
def _do_test_parse_tree(self, parse_tree):
dir = os.path.join(os.path.dirname(__file__), "data", "trees")
o = Tree.from_path(hex_to_filename(dir, tree_sha))
self.assertEqual(
[(b"a", 0o100644, a_sha), (b"b", 0o100644, b_sha)],
list(parse_tree(o.as_raw_string())),
)
# test a broken tree that has a leading 0 on the file mode
broken_tree = b"0100644 foo\0" + hex_to_sha(a_sha)
def eval_parse_tree(*args, **kwargs):
return list(parse_tree(*args, **kwargs))
self.assertEqual([(b"foo", 0o100644, a_sha)], eval_parse_tree(broken_tree))
self.assertRaises(
ObjectFormatException, eval_parse_tree, broken_tree, strict=True
)
test_parse_tree = functest_builder(_do_test_parse_tree, _parse_tree_py)
test_parse_tree_extension = ext_functest_builder(_do_test_parse_tree, parse_tree)
def _do_test_sorted_tree_items(self, sorted_tree_items):
def do_sort(entries):
return list(sorted_tree_items(entries, False))
actual = do_sort(_TREE_ITEMS)
self.assertEqual(_SORTED_TREE_ITEMS, actual)
self.assertIsInstance(actual[0], TreeEntry)
# C/Python implementations may differ in specific error types, but
# should all error on invalid inputs.
# For example, the C implementation has stricter type checks, so may
# raise TypeError where the Python implementation raises
# AttributeError.
errors = (TypeError, ValueError, AttributeError)
self.assertRaises(errors, do_sort, b"foo")
self.assertRaises(errors, do_sort, {b"foo": (1, 2, 3)})
myhexsha = b"d80c186a03f423a81b39df39dc87fd269736ca86"
self.assertRaises(errors, do_sort, {b"foo": (b"xxx", myhexsha)})
self.assertRaises(errors, do_sort, {b"foo": (0o100755, 12345)})
test_sorted_tree_items = functest_builder(
_do_test_sorted_tree_items, _sorted_tree_items_py
)
test_sorted_tree_items_extension = ext_functest_builder(
_do_test_sorted_tree_items, sorted_tree_items
)
def _do_test_sorted_tree_items_name_order(self, sorted_tree_items):
self.assertEqual(
[
TreeEntry(
b"a",
stat.S_IFDIR,
b"d80c186a03f423a81b39df39dc87fd269736ca86",
),
TreeEntry(
b"a.c",
0o100755,
b"d80c186a03f423a81b39df39dc87fd269736ca86",
),
TreeEntry(
b"a/c",
stat.S_IFDIR,
b"d80c186a03f423a81b39df39dc87fd269736ca86",
),
],
list(sorted_tree_items(_TREE_ITEMS, True)),
)
test_sorted_tree_items_name_order = functest_builder(
_do_test_sorted_tree_items_name_order, _sorted_tree_items_py
)
test_sorted_tree_items_name_order_extension = ext_functest_builder(
_do_test_sorted_tree_items_name_order, sorted_tree_items
)
def test_check(self):
t = Tree
sha = hex_to_sha(a_sha)
# filenames
self.assertCheckSucceeds(t, b"100644 .a\0" + sha)
self.assertCheckFails(t, b"100644 \0" + sha)
self.assertCheckFails(t, b"100644 .\0" + sha)
self.assertCheckFails(t, b"100644 a/a\0" + sha)
self.assertCheckFails(t, b"100644 ..\0" + sha)
self.assertCheckFails(t, b"100644 .git\0" + sha)
# modes
self.assertCheckSucceeds(t, b"100644 a\0" + sha)
self.assertCheckSucceeds(t, b"100755 a\0" + sha)
self.assertCheckSucceeds(t, b"160000 a\0" + sha)
# TODO more whitelisted modes
self.assertCheckFails(t, b"123456 a\0" + sha)
self.assertCheckFails(t, b"123abc a\0" + sha)
# should fail check, but parses ok
self.assertCheckFails(t, b"0100644 foo\0" + sha)
# shas
self.assertCheckFails(t, b"100644 a\0" + (b"x" * 5))
self.assertCheckFails(t, b"100644 a\0" + (b"x" * 18) + b"\0")
self.assertCheckFails(t, b"100644 a\0" + (b"x" * 21) + b"\n100644 b\0" + sha)
# ordering
sha2 = hex_to_sha(b_sha)
self.assertCheckSucceeds(t, b"100644 a\0" + sha + b"\n100644 b\0" + sha)
self.assertCheckSucceeds(t, b"100644 a\0" + sha + b"\n100644 b\0" + sha2)
self.assertCheckFails(t, b"100644 a\0" + sha + b"\n100755 a\0" + sha2)
self.assertCheckFails(t, b"100644 b\0" + sha2 + b"\n100644 a\0" + sha)
def test_iter(self):
t = Tree()
t[b"foo"] = (0o100644, a_sha)
self.assertEqual(set([b"foo"]), set(t))
class TagSerializeTests(TestCase):
def test_serialize_simple(self):
x = make_object(
Tag,
tagger=b"<NAME> <<EMAIL>>",
name=b"0.1",
message=b"Tag 0.1",
object=(Blob, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
tag_time=423423423,
tag_timezone=0,
)
self.assertEqual(
(
b"object d80c186a03f423a81b39df39dc87fd269736ca86\n"
b"type blob\n"
b"tag 0.1\n"
b"tagger <NAME> <<EMAIL>> "
b"423423423 +0000\n"
b"\n"
b"Tag 0.1"
),
x.as_raw_string(),
)
def test_serialize_none_message(self):
x = make_object(
Tag,
tagger=b"<NAME> <<EMAIL>>",
name=b"0.1",
message=None,
object=(Blob, b"d80c186a03f423a81b39df39dc87fd269736ca86"),
tag_time=423423423,
tag_timezone=0,
)
self.assertEqual(
(
b"object d80c186a03f423a81b39df39dc87fd269736ca86\n"
b"type blob\n"
b"tag 0.1\n"
b"tagger <NAME>ij <<EMAIL>> "
b"423423423 +0000\n"
),
x.as_raw_string(),
)
default_tagger = (
b"<NAME> <<EMAIL>> " b"1183319674 -0700"
)
default_message = b"""Linux 2.6.22-rc7
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql
OK2XeQOiEeXtT76rV4t2WR4=
=ivrA
-----END PGP SIGNATURE-----
"""
class TagParseTests(ShaFileCheckTests):
def make_tag_lines(
self,
object_sha=b"a38d6181ff27824c79fc7df825164a212eff6a3f",
object_type_name=b"commit",
name=b"v2.6.22-rc7",
tagger=default_tagger,
message=default_message,
):
lines = []
if object_sha is not None:
lines.append(b"object " + object_sha)
if object_type_name is not None:
lines.append(b"type " + object_type_name)
if name is not None:
lines.append(b"tag " + name)
if tagger is not None:
lines.append(b"tagger " + tagger)
if message is not None:
lines.append(b"")
lines.append(message)
return lines
def make_tag_text(self, **kwargs):
return b"\n".join(self.make_tag_lines(**kwargs))
def test_parse(self):
x = Tag()
x.set_raw_string(self.make_tag_text())
self.assertEqual(
b"<NAME> <<EMAIL>>", x.tagger
)
self.assertEqual(b"v2.6.22-rc7", x.name)
object_type, object_sha = x.object
self.assertEqual(b"a38d6181ff27824c79fc7df825164a212eff6a3f", object_sha)
self.assertEqual(Commit, object_type)
self.assertEqual(
datetime.datetime.utcfromtimestamp(x.tag_time),
datetime.datetime(2007, 7, 1, 19, 54, 34),
)
self.assertEqual(-25200, x.tag_timezone)
def test_parse_no_tagger(self):
x = Tag()
x.set_raw_string(self.make_tag_text(tagger=None))
self.assertEqual(None, x.tagger)
self.assertEqual(b"v2.6.22-rc7", x.name)
self.assertEqual(None, x.tag_time)
def test_parse_no_message(self):
x = Tag()
x.set_raw_string(self.make_tag_text(message=None))
self.assertEqual(None, x.message)
self.assertEqual(
b"<NAME> <<EMAIL>>", x.tagger
)
self.assertEqual(
datetime.datetime.utcfromtimestamp(x.tag_time),
datetime.datetime(2007, 7, 1, 19, 54, 34),
)
self.assertEqual(-25200, x.tag_timezone)
self.assertEqual(b"v2.6.22-rc7", x.name)
def test_check(self):
self.assertCheckSucceeds(Tag, self.make_tag_text())
self.assertCheckFails(Tag, self.make_tag_text(object_sha=None))
self.assertCheckFails(Tag, self.make_tag_text(object_type_name=None))
self.assertCheckFails(Tag, self.make_tag_text(name=None))
self.assertCheckFails(Tag, self.make_tag_text(name=b""))
self.assertCheckFails(Tag, self.make_tag_text(object_type_name=b"foobar"))
self.assertCheckFails(
Tag,
self.make_tag_text(
tagger=b"some guy without an email address 1183319674 -0700"
),
)
self.assertCheckFails(
Tag,
self.make_tag_text(
tagger=(
b"<NAME> <<EMAIL>> "
b"Sun 7 Jul 2007 12:54:34 +0700"
)
),
)
self.assertCheckFails(Tag, self.make_tag_text(object_sha=b"xxx"))
def test_check_tag_with_unparseable_field(self):
self.assertCheckFails(
Tag,
self.make_tag_text(
tagger=(
b"<NAME> <<EMAIL>> "
b"423423+0000"
)
),
)
def test_check_tag_with_overflow_time(self):
"""Date with overflow should raise an ObjectFormatException when checked"""
author = "<NAME> <<EMAIL>> %s +0000" % (MAX_TIME + 1,)
tag = Tag.from_string(self.make_tag_text(tagger=(author.encode())))
with self.assertRaises(ObjectFormatException):
tag.check()
def test_check_duplicates(self):
# duplicate each of the header fields
for i in range(4):
lines = self.make_tag_lines()
lines.insert(i, lines[i])
self.assertCheckFails(Tag, b"\n".join(lines))
def test_check_order(self):
lines = self.make_tag_lines()
headers = lines[:4]
rest = lines[4:]
# of all possible permutations, ensure only the original succeeds
for perm in permutations(headers):
perm = list(perm)
text = b"\n".join(perm + rest)
if perm == headers:
self.assertCheckSucceeds(Tag, text)
else:
self.assertCheckFails(Tag, text)
def test_tree_copy_after_update(self):
"""Check Tree.id is correctly updated when the tree is copied after updated."""
shas = []
tree = Tree()
shas.append(tree.id)
tree.add(b"data", 0o644, Blob().id)
copied = tree.copy()
shas.append(tree.id)
shas.append(copied.id)
self.assertNotIn(shas[0], shas[1:])
self.assertEqual(shas[1], shas[2])
class CheckTests(TestCase):
def test_check_hexsha(self):
check_hexsha(a_sha, "failed to check good sha")
self.assertRaises(
ObjectFormatException, check_hexsha, b"1" * 39, "sha too short"
)
self.assertRaises(
ObjectFormatException, check_hexsha, b"1" * 41, "sha too long"
)
self.assertRaises(
ObjectFormatException,
check_hexsha,
b"x" * 40,
"invalid characters",
)
def test_check_identity(self):
check_identity(
b"<NAME> <<EMAIL>>",
"failed to check good identity",
)
check_identity(b"<<EMAIL>>", "failed to check good identity")
self.assertRaises(
ObjectFormatException, check_identity, b"<NAME>", "no email"
)
self.assertRaises(
ObjectFormatException,
check_identity,
b"<NAME> <dborowitz",
"incomplete email",
)
self.assertRaises(
ObjectFormatException,
check_identity,
b"<EMAIL>>",
"incomplete email",
)
self.assertRaises(
ObjectFormatException,
check_identity,
b"<NAME> <<<EMAIL>>",
"typo",
)
self.assertRaises(
ObjectFormatException,
check_identity,
b"<NAME> <<EMAIL>>>",
"typo",
)
self.assertRaises(
ObjectFormatException,
check_identity,
b"<NAME> <<EMAIL>>xxx",
"trailing characters",
)
class TimezoneTests(TestCase):
def test_parse_timezone_utc(self):
self.assertEqual((0, False), parse_timezone(b"+0000"))
def test_parse_timezone_utc_negative(self):
self.assertEqual((0, True), parse_timezone(b"-0000"))
def test_generate_timezone_utc(self):
self.assertEqual(b"+0000", format_timezone(0))
def test_generate_timezone_utc_negative(self):
self.assertEqual(b"-0000", format_timezone(0, True))
def test_parse_timezone_cet(self):
self.assertEqual((60 * 60, False), parse_timezone(b"+0100"))
def test_format_timezone_cet(self):
self.assertEqual(b"+0100", format_timezone(60 * 60))
def test_format_timezone_pdt(self):
self.assertEqual(b"-0400", format_timezone(-4 * 60 * 60))
def test_parse_timezone_pdt(self):
self.assertEqual((-4 * 60 * 60, False), parse_timezone(b"-0400"))
def test_format_timezone_pdt_half(self):
self.assertEqual(b"-0440", format_timezone(int(((-4 * 60) - 40) * 60)))
def test_format_timezone_double_negative(self):
self.assertEqual(b"--700", format_timezone(int(((7 * 60)) * 60), True))
def test_parse_timezone_pdt_half(self):
self.assertEqual((((-4 * 60) - 40) * 60, False), parse_timezone(b"-0440"))
def test_parse_timezone_double_negative(self):
self.assertEqual((int(((7 * 60)) * 60), False), parse_timezone(b"+700"))
self.assertEqual((int(((7 * 60)) * 60), True), parse_timezone(b"--700"))
class ShaFileCopyTests(TestCase):
def assert_copy(self, orig):
oclass = object_class(orig.type_num)
copy = orig.copy()
self.assertIsInstance(copy, oclass)
self.assertEqual(copy, orig)
self.assertIsNot(copy, orig)
def test_commit_copy(self):
attrs = {
"tree": b"d80c186a03f423a81b39df39dc87fd269736ca86",
"parents": [
b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd",
b"4cffe90e0a41ad3f5190079d7c8f036bde29cbe6",
],
"author": b"<NAME> <<EMAIL>>",
"committer": b"<NAME> <<EMAIL>>",
"commit_time": 1174773719,
"author_time": 1174773719,
"commit_timezone": 0,
"author_timezone": 0,
"message": b"Merge ../b\n",
}
commit = make_commit(**attrs)
self.assert_copy(commit)
def test_blob_copy(self):
blob = make_object(Blob, data=b"i am a blob")
self.assert_copy(blob)
def test_tree_copy(self):
blob = make_object(Blob, data=b"i am a blob")
tree = Tree()
tree[b"blob"] = (stat.S_IFREG, blob.id)
self.assert_copy(tree)
def test_tag_copy(self):
tag = make_object(
Tag,
name=b"tag",
message=b"",
tagger=b"Tagger <<EMAIL>>",
tag_time=12345,
tag_timezone=0,
object=(Commit, b"0" * 40),
)
self.assert_copy(tag)
class ShaFileSerializeTests(TestCase):
"""`ShaFile` objects only gets serialized once if they haven't changed."""
@contextmanager
def assert_serialization_on_change(
self, obj, needs_serialization_after_change=True
):
old_id = obj.id
self.assertFalse(obj._needs_serialization)
yield obj
if needs_serialization_after_change:
self.assertTrue(obj._needs_serialization)
else:
self.assertFalse(obj._needs_serialization)
new_id = obj.id
self.assertFalse(obj._needs_serialization)
self.assertNotEqual(old_id, new_id)
def test_commit_serialize(self):
attrs = {
"tree": b"d80c186a03f423a81b39df39dc87fd269736ca86",
"parents": [
b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd",
b"4cffe90e0a41ad3f5190079d7c8f036bde29cbe6",
],
"author": b"<NAME> <<EMAIL>>",
"committer": b"<NAME> <<EMAIL>>",
"commit_time": 1174773719,
"author_time": 1174773719,
"commit_timezone": 0,
"author_timezone": 0,
"message": b"Merge ../b\n",
}
commit = make_commit(**attrs)
with self.assert_serialization_on_change(commit):
commit.parents = [b"ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd"]
def test_blob_serialize(self):
blob = make_object(Blob, data=b"i am a blob")
with self.assert_serialization_on_change(
blob, needs_serialization_after_change=False
):
blob.data = b"i am another blob"
def test_tree_serialize(self):
blob = make_object(Blob, data=b"i am a blob")
tree = Tree()
tree[b"blob"] = (stat.S_IFREG, blob.id)
with self.assert_serialization_on_change(tree):
tree[b"blob2"] = (stat.S_IFREG, blob.id)
def test_tag_serialize(self):
tag = make_object(
Tag,
name=b"tag",
message=b"",
tagger=b"Tagger <<EMAIL>>",
tag_time=12345,
tag_timezone=0,
object=(Commit, b"0" * 40),
)
with self.assert_serialization_on_change(tag):
tag.message = b"new message"
def test_tag_serialize_time_error(self):
with self.assertRaises(ObjectFormatException):
tag = make_object(
Tag,
name=b"tag",
message=b"some message",
tagger=b"Tagger <<EMAIL>> 1174773719+0000",
object=(Commit, b"0" * 40),
)
tag._deserialize(tag._serialize())
class PrettyFormatTreeEntryTests(TestCase):
def test_format(self):
self.assertEqual(
"40000 tree 40820c38cfb182ce6c8b261555410d8382a5918b\tfoo\n",
pretty_format_tree_entry(
b"foo", 0o40000, b"40820c38cfb182ce6c8b261555410d8382a5918b"
),
)
```
#### File: dulwich/tests/test_patch.py
```python
from io import BytesIO, StringIO
from dulwich.objects import (
Blob,
Commit,
S_IFGITLINK,
Tree,
)
from dulwich.object_store import (
MemoryObjectStore,
)
from dulwich.patch import (
get_summary,
git_am_patch_split,
write_blob_diff,
write_commit_patch,
write_object_diff,
write_tree_diff,
)
from dulwich.tests import (
SkipTest,
TestCase,
)
class WriteCommitPatchTests(TestCase):
def test_simple_bytesio(self):
f = BytesIO()
c = Commit()
c.committer = c.author = b"Jelmer <<EMAIL>>"
c.commit_time = c.author_time = 1271350201
c.commit_timezone = c.author_timezone = 0
c.message = b"This is the first line\nAnd this is the second line.\n"
c.tree = Tree().id
write_commit_patch(f, c, b"CONTENTS", (1, 1), version="custom")
f.seek(0)
lines = f.readlines()
self.assertTrue(
lines[0].startswith(b"From 0b0d34d1b5b596c928adc9a727a4b9e03d025298")
)
self.assertEqual(lines[1], b"From: Jelmer <<EMAIL>>\n")
self.assertTrue(lines[2].startswith(b"Date: "))
self.assertEqual(
[
b"Subject: [PATCH 1/1] This is the first line\n",
b"And this is the second line.\n",
b"\n",
b"\n",
b"---\n",
],
lines[3:8],
)
self.assertEqual([b"CONTENTS-- \n", b"custom\n"], lines[-2:])
if len(lines) >= 12:
# diffstat may not be present
self.assertEqual(lines[8], b" 0 files changed\n")
class ReadGitAmPatch(TestCase):
def test_extract_string(self):
text = b"""\
From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [PATCH 1/2] Remove executable bit from prey.ico (triggers a warning).
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(StringIO(text.decode("utf-8")), "utf-8")
self.assertEqual(b"<NAME> <<EMAIL>>", c.committer)
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(
b"Remove executable bit from prey.ico " b"(triggers a warning).\n",
c.message,
)
self.assertEqual(
b""" pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
""",
diff,
)
self.assertEqual(b"1.7.0.4", version)
def test_extract_bytes(self):
text = b"""\
From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [PATCH 1/2] Remove executable bit from prey.ico (triggers a warning).
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(BytesIO(text))
self.assertEqual(b"<NAME> <<EMAIL>>", c.committer)
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(
b"Remove executable bit from prey.ico " b"(triggers a warning).\n",
c.message,
)
self.assertEqual(
b""" pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
""",
diff,
)
self.assertEqual(b"1.7.0.4", version)
def test_extract_spaces(self):
text = b"""From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [Dulwich-users] [PATCH] Added unit tests for
dulwich.object_store.tree_lookup_path.
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(BytesIO(text), "utf-8")
self.assertEqual(
b"""\
Added unit tests for dulwich.object_store.tree_lookup_path.
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
""",
c.message,
)
def test_extract_pseudo_from_header(self):
text = b"""From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [Dulwich-users] [PATCH] Added unit tests for
dulwich.object_store.tree_lookup_path.
From: <NAME> <<EMAIL>>
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
--
1.7.0.4
"""
c, diff, version = git_am_patch_split(BytesIO(text), "utf-8")
self.assertEqual(b"<NAME> <<EMAIL>>", c.author)
self.assertEqual(
b"""\
Added unit tests for dulwich.object_store.tree_lookup_path.
* dulwich/tests/test_object_store.py
(TreeLookupPathTests): This test case contains a few tests that ensure the
tree_lookup_path function works as expected.
""",
c.message,
)
def test_extract_no_version_tail(self):
text = b"""\
From ff643aae102d8870cac88e8f007e70f58f3a7363 Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Thu, 15 Apr 2010 15:40:28 +0200
Subject: [Dulwich-users] [PATCH] Added unit tests for
dulwich.object_store.tree_lookup_path.
From: <NAME> <<EMAIL>>
---
pixmaps/prey.ico | Bin 9662 -> 9662 bytes
1 files changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 pixmaps/prey.ico
"""
c, diff, version = git_am_patch_split(BytesIO(text), "utf-8")
self.assertEqual(None, version)
def test_extract_mercurial(self):
raise SkipTest(
"git_am_patch_split doesn't handle Mercurial patches " "properly yet"
)
expected_diff = """\
diff --git a/dulwich/tests/test_patch.py b/dulwich/tests/test_patch.py
--- a/dulwich/tests/test_patch.py
+++ b/dulwich/tests/test_patch.py
@@ -158,7 +158,7 @@
'''
c, diff, version = git_am_patch_split(BytesIO(text))
- self.assertIs(None, version)
+ self.assertEqual(None, version)
class DiffTests(TestCase):
"""
text = (
"""\
From dulwich-users-bounces+<EMAIL>mer=sam<EMAIL>@lists.launchpad.net \
Mon Nov 29 00:58:18 2010
Date: Sun, 28 Nov 2010 17:57:27 -0600
From: <NAME> <<EMAIL>>
To: dulwich-users <<EMAIL>>
Subject: [Dulwich-users] [PATCH] test_patch: fix tests on Python 2.6
Content-Transfer-Encoding: 8bit
Change-Id: I5e51313d4ae3a65c3f00c665002a7489121bb0d6
%s
_______________________________________________
Mailing list: https://launchpad.net/~dulwich-users
Post to : <EMAIL>
Unsubscribe : https://launchpad.net/~dulwich-users
More help : https://help.launchpad.net/ListHelp
"""
% expected_diff
)
c, diff, version = git_am_patch_split(BytesIO(text))
self.assertEqual(expected_diff, diff)
self.assertEqual(None, version)
class DiffTests(TestCase):
"""Tests for write_blob_diff and write_tree_diff."""
def test_blob_diff(self):
f = BytesIO()
write_blob_diff(
f,
(b"foo.txt", 0o644, Blob.from_string(b"old\nsame\n")),
(b"bar.txt", 0o644, Blob.from_string(b"new\nsame\n")),
)
self.assertEqual(
[
b"diff --git a/foo.txt b/bar.txt",
b"index 3b0f961..a116b51 644",
b"--- a/foo.txt",
b"+++ b/bar.txt",
b"@@ -1,2 +1,2 @@",
b"-old",
b"+new",
b" same",
],
f.getvalue().splitlines(),
)
def test_blob_add(self):
f = BytesIO()
write_blob_diff(
f,
(None, None, None),
(b"bar.txt", 0o644, Blob.from_string(b"new\nsame\n")),
)
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"new file mode 644",
b"index 0000000..a116b51",
b"--- /dev/null",
b"+++ b/bar.txt",
b"@@ -0,0 +1,2 @@",
b"+new",
b"+same",
],
f.getvalue().splitlines(),
)
def test_blob_remove(self):
f = BytesIO()
write_blob_diff(
f,
(b"bar.txt", 0o644, Blob.from_string(b"new\nsame\n")),
(None, None, None),
)
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"deleted file mode 644",
b"index a116b51..0000000",
b"--- a/bar.txt",
b"+++ /dev/null",
b"@@ -1,2 +0,0 @@",
b"-new",
b"-same",
],
f.getvalue().splitlines(),
)
def test_tree_diff(self):
f = BytesIO()
store = MemoryObjectStore()
added = Blob.from_string(b"add\n")
removed = Blob.from_string(b"removed\n")
changed1 = Blob.from_string(b"unchanged\nremoved\n")
changed2 = Blob.from_string(b"unchanged\nadded\n")
unchanged = Blob.from_string(b"unchanged\n")
tree1 = Tree()
tree1.add(b"removed.txt", 0o644, removed.id)
tree1.add(b"changed.txt", 0o644, changed1.id)
tree1.add(b"unchanged.txt", 0o644, changed1.id)
tree2 = Tree()
tree2.add(b"added.txt", 0o644, added.id)
tree2.add(b"changed.txt", 0o644, changed2.id)
tree2.add(b"unchanged.txt", 0o644, changed1.id)
store.add_objects(
[
(o, None)
for o in [
tree1,
tree2,
added,
removed,
changed1,
changed2,
unchanged,
]
]
)
write_tree_diff(f, store, tree1.id, tree2.id)
self.assertEqual(
[
b"diff --git a/added.txt b/added.txt",
b"new file mode 644",
b"index 0000000..76d4bb8",
b"--- /dev/null",
b"+++ b/added.txt",
b"@@ -0,0 +1 @@",
b"+add",
b"diff --git a/changed.txt b/changed.txt",
b"index bf84e48..1be2436 644",
b"--- a/changed.txt",
b"+++ b/changed.txt",
b"@@ -1,2 +1,2 @@",
b" unchanged",
b"-removed",
b"+added",
b"diff --git a/removed.txt b/removed.txt",
b"deleted file mode 644",
b"index 2c3f0b3..0000000",
b"--- a/removed.txt",
b"+++ /dev/null",
b"@@ -1 +0,0 @@",
b"-removed",
],
f.getvalue().splitlines(),
)
def test_tree_diff_submodule(self):
f = BytesIO()
store = MemoryObjectStore()
tree1 = Tree()
tree1.add(
b"asubmodule",
S_IFGITLINK,
b"06d0bdd9e2e20377b3180e4986b14c8549b393e4",
)
tree2 = Tree()
tree2.add(
b"asubmodule",
S_IFGITLINK,
b"cc975646af69f279396d4d5e1379ac6af80ee637",
)
store.add_objects([(o, None) for o in [tree1, tree2]])
write_tree_diff(f, store, tree1.id, tree2.id)
self.assertEqual(
[
b"diff --git a/asubmodule b/asubmodule",
b"index 06d0bdd..cc97564 160000",
b"--- a/asubmodule",
b"+++ b/asubmodule",
b"@@ -1 +1 @@",
b"-Subproject commit <PASSWORD>",
b"+Subproject commit <PASSWORD>",
],
f.getvalue().splitlines(),
)
def test_object_diff_blob(self):
f = BytesIO()
b1 = Blob.from_string(b"old\nsame\n")
b2 = Blob.from_string(b"new\nsame\n")
store = MemoryObjectStore()
store.add_objects([(b1, None), (b2, None)])
write_object_diff(
f, store, (b"foo.txt", 0o644, b1.id), (b"bar.txt", 0o644, b2.id)
)
self.assertEqual(
[
b"diff --git a/foo.txt b/bar.txt",
b"index 3b0f961..a116b51 644",
b"--- a/foo.txt",
b"+++ b/bar.txt",
b"@@ -1,2 +1,2 @@",
b"-old",
b"+new",
b" same",
],
f.getvalue().splitlines(),
)
def test_object_diff_add_blob(self):
f = BytesIO()
store = MemoryObjectStore()
b2 = Blob.from_string(b"new\nsame\n")
store.add_object(b2)
write_object_diff(f, store, (None, None, None), (b"bar.txt", 0o644, b2.id))
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"new file mode 644",
b"index 0000000..a116b51",
b"--- /dev/null",
b"+++ b/bar.txt",
b"@@ -0,0 +1,2 @@",
b"+new",
b"+same",
],
f.getvalue().splitlines(),
)
def test_object_diff_remove_blob(self):
f = BytesIO()
b1 = Blob.from_string(b"new\nsame\n")
store = MemoryObjectStore()
store.add_object(b1)
write_object_diff(f, store, (b"bar.txt", 0o644, b1.id), (None, None, None))
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"deleted file mode 644",
b"index a116b51..0000000",
b"--- a/bar.txt",
b"+++ /dev/null",
b"@@ -1,2 +0,0 @@",
b"-new",
b"-same",
],
f.getvalue().splitlines(),
)
def test_object_diff_bin_blob_force(self):
f = BytesIO()
# Prepare two slightly different PNG headers
b1 = Blob.from_string(
b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
b"\x00\x00\x00\x0d\x49\x48\x44\x52"
b"\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x04\x00\x00\x00\x05\x04\x8b"
)
b2 = Blob.from_string(
b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
b"\x00\x00\x00\x0d\x49\x48\x44\x52"
b"\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x03\x00\x00\x00\x98\xd3\xb3"
)
store = MemoryObjectStore()
store.add_objects([(b1, None), (b2, None)])
write_object_diff(
f,
store,
(b"foo.png", 0o644, b1.id),
(b"bar.png", 0o644, b2.id),
diff_binary=True,
)
self.assertEqual(
[
b"diff --git a/foo.png b/bar.png",
b"index f73e47d..06364b7 644",
b"--- a/foo.png",
b"+++ b/bar.png",
b"@@ -1,4 +1,4 @@",
b" \x89PNG",
b" \x1a",
b" \x00\x00\x00",
b"-IHDR\x00\x00\x01\xd5\x00\x00\x00"
b"\x9f\x08\x04\x00\x00\x00\x05\x04\x8b",
b"\\ No newline at end of file",
b"+IHDR\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x03\x00\x00\x00\x98\xd3\xb3",
b"\\ No newline at end of file",
],
f.getvalue().splitlines(),
)
def test_object_diff_bin_blob(self):
f = BytesIO()
# Prepare two slightly different PNG headers
b1 = Blob.from_string(
b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
b"\x00\x00\x00\x0d\x49\x48\x44\x52"
b"\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x04\x00\x00\x00\x05\x04\x8b"
)
b2 = Blob.from_string(
b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
b"\x00\x00\x00\x0d\x49\x48\x44\x52"
b"\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x03\x00\x00\x00\x98\xd3\xb3"
)
store = MemoryObjectStore()
store.add_objects([(b1, None), (b2, None)])
write_object_diff(
f, store, (b"foo.png", 0o644, b1.id), (b"bar.png", 0o644, b2.id)
)
self.assertEqual(
[
b"diff --git a/foo.png b/bar.png",
b"index f73e47d..06364b7 644",
b"Binary files a/foo.png and b/bar.png differ",
],
f.getvalue().splitlines(),
)
def test_object_diff_add_bin_blob(self):
f = BytesIO()
b2 = Blob.from_string(
b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
b"\x00\x00\x00\x0d\x49\x48\x44\x52"
b"\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x03\x00\x00\x00\x98\xd3\xb3"
)
store = MemoryObjectStore()
store.add_object(b2)
write_object_diff(f, store, (None, None, None), (b"bar.png", 0o644, b2.id))
self.assertEqual(
[
b"diff --git a/bar.png b/bar.png",
b"new file mode 644",
b"index 0000000..06364b7",
b"Binary files /dev/null and b/bar.png differ",
],
f.getvalue().splitlines(),
)
def test_object_diff_remove_bin_blob(self):
f = BytesIO()
b1 = Blob.from_string(
b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
b"\x00\x00\x00\x0d\x49\x48\x44\x52"
b"\x00\x00\x01\xd5\x00\x00\x00\x9f"
b"\x08\x04\x00\x00\x00\x05\x04\x8b"
)
store = MemoryObjectStore()
store.add_object(b1)
write_object_diff(f, store, (b"foo.png", 0o644, b1.id), (None, None, None))
self.assertEqual(
[
b"diff --git a/foo.png b/foo.png",
b"deleted file mode 644",
b"index f73e47d..0000000",
b"Binary files a/foo.png and /dev/null differ",
],
f.getvalue().splitlines(),
)
def test_object_diff_kind_change(self):
f = BytesIO()
b1 = Blob.from_string(b"new\nsame\n")
store = MemoryObjectStore()
store.add_object(b1)
write_object_diff(
f,
store,
(b"bar.txt", 0o644, b1.id),
(
b"bar.txt",
0o160000,
b"06d0bdd9e2e20377b3180e4986b14c8549b393e4",
),
)
self.assertEqual(
[
b"diff --git a/bar.txt b/bar.txt",
b"old file mode 644",
b"new file mode 160000",
b"index a116b51..06d0bdd 160000",
b"--- a/bar.txt",
b"+++ b/bar.txt",
b"@@ -1,2 +1 @@",
b"-new",
b"-same",
b"+Subproject commit 06d0bdd9e2e20377b3180e4986b14c8549b393e4",
],
f.getvalue().splitlines(),
)
class GetSummaryTests(TestCase):
def test_simple(self):
c = Commit()
c.committer = c.author = b"Jelmer <<EMAIL>>"
c.commit_time = c.author_time = 1271350201
c.commit_timezone = c.author_timezone = 0
c.message = b"This is the first line\nAnd this is the second line.\n"
c.tree = Tree().id
self.assertEqual("This-is-the-first-line", get_summary(c))
```
|
{
"source": "jessecusack/cookiecutter-bare-bones-python-package",
"score": 2
}
|
#### File: cookiecutter-bare-bones-python-package/hooks/post_gen_project.py
```python
import os
import shutil
def remove(filepath):
if os.path.isfile(filepath):
os.remove(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath)
if not '{{cookiecutter.create_author_file}}' == 'y':
remove('AUTHORS.md')
if '{{ cookiecutter.open_source_license }}' == 'Not open source':
remove('LICENSE')
```
|
{
"source": "jessecusack/cookiecutter-research-project",
"score": 2
}
|
#### File: cookiecutter-research-project/hooks/post_gen_project.py
```python
from glob import glob
import os
import shutil
def remove(filepath):
if os.path.isfile(filepath):
os.remove(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath)
if not '{{cookiecutter.as_python_package}}' == 'y':
remove('{{cookiecutter.project_name.replace(' ', '_').replace('-', '_')}}')
remove('pyproject.toml')
remove('setup.cfg')
if not '{{cookiecutter.include_matlab}}' == 'y':
remove('matlab_toolboxes')
if not '{{cookiecutter.create_author_file}}' == 'y':
remove('AUTHORS.md')
if '{{ cookiecutter.open_source_license }}' == 'Not open source':
remove('LICENSE')
# Make the shell scripts executable
for sh_script in glob("*.sh"):
os.chmod(sh_script, 0o744)
```
|
{
"source": "jessecusack/DIMES_eddy_wave_interactions",
"score": 3
}
|
#### File: DIMES_eddy_wave_interactions/code/convenience_funcs.py
```python
import numpy as np
import utils
from string import ascii_lowercase
def ylabel(label, fig, ax1, ax2, dx=None):
x0 = 0.5*(ax1.get_position().x0 + ax2.get_position().x0)
y0 = 0.5*(ax1.get_position().y0 + ax2.get_position().y1)
if dx is None:
dx = -x0
x0 += dx
fig.text(x0, y0, label, rotation='vertical', verticalalignment='center')
def xlabel(label, fig, ax1, ax2, dy=None):
x0 = 0.5*(ax1.get_position().x0 + ax2.get_position().x1)
y0 = 0.5*(ax1.get_position().y0 + ax2.get_position().y0)
if dy is None:
dy = -y0
y0 += dy
fig.text(x0, y0, label, rotation='horizontal',
horizontalalignment='center')
def axes_labels(fig, axs, dx=0, dy=.01, i0=0, **kwargs):
axs = np.asarray(axs)
for i, ax in enumerate(axs.flat):
bbox = ax.get_position()
fig.text(bbox.x0+dx, bbox.y1+dy, '{})'.format(ascii_lowercase[i0+i]),
**kwargs)
def clump(x, nperseg):
"""Clump messy data."""
idxs = utils.contiguous_regions(~np.isnan(x))
ndats = np.squeeze(np.diff(idxs, axis=1))
nsegs = np.floor(1.*ndats/nperseg).astype(int)
nsegs_tot = np.sum(nsegs)
xclumped = np.empty((nsegs_tot*nperseg, ))
j = 0
for row in idxs:
i0, i1 = row
ndat = i1 - i0
if ndat < nperseg:
continue
nseg = int(np.floor(1.*ndat/nperseg)) # Convert to float and back.
xseg = x[i0:i1]
for i in range(nseg):
xclumped[j*nperseg:(j+1)*nperseg] = xseg[i*nperseg:(i+1)*nperseg]
j += 1
return xclumped
def rose_plot(ax, angles, bins=16, density=None, offset=0, lab_unit="degrees",
start_zero=False, **param_dict):
"""
Plot polar histogram of angles on ax. ax must have been created using
subplot_kw=dict(projection='polar'). Angles are expected in radians.
** This function is copied directly from user Ralph on stackoverflow
at https://stackoverflow.com/a/55067613 **
"""
# Wrap angles to [-pi, pi)
angles = (angles + np.pi) % (2*np.pi) - np.pi
# Set bins symetrically around zero
if start_zero:
# To have a bin edge at zero use an even number of bins
if bins % 2:
bins += 1
bins = np.linspace(-np.pi, np.pi, num=bins+1)
# Bin data and record counts
count, bin = np.histogram(angles, bins=bins)
# Compute width of each bin
widths = np.diff(bin)
# By default plot density (frequency potentially misleading)
if density is None or density is True:
# Area to assign each bin
area = count / angles.size
# Calculate corresponding bin radius
radius = (area / np.pi)**.5
else:
radius = count
# Plot data on ax
ax.bar(bin[:-1], radius, zorder=1, align='edge', width=widths,
edgecolor='C0', fill=False, linewidth=1)
# Set the direction of the zero angle
ax.set_theta_offset(offset)
# Remove ylabels, they are mostly obstructive and not informative
ax.set_yticks([])
if lab_unit == "radians":
label = ['$0$', r'$\pi/4$', r'$\pi/2$', r'$3\pi/4$',
r'$\pi$', r'$5\pi/4$', r'$3\pi/2$', r'$7\pi/4$']
ax.set_xticklabels(label)
def nangmean(arr, axis=None):
"""Geometric mean that ignores NaNs."""
arr = np.asarray(arr)
valids = np.sum(~np.isnan(arr), axis=axis)
prod = np.nanprod(arr, axis=axis)
return np.power(prod, 1. / valids)
```
#### File: DIMES_eddy_wave_interactions/code/post_process_data.py
```python
import datetime
import glob
import os
import gsw
import numpy as np
import numpy.ma as ma
import scipy.integrate as igr
import scipy.interpolate as itpl
import scipy.io as io
import scipy.signal as sig
import seawater
import xarray as xr
from matplotlib import path
import munch
import load_data
import moorings as moo
import utils
from oceans.sw_extras import gamma_GP_from_SP_pt
# Data directory
data_in = os.path.expanduser("../data")
data_out = data_in
def esum(ea, eb):
return np.sqrt(ea ** 2 + eb ** 2)
def emult(a, b, ea, eb):
return np.abs(a * b) * np.sqrt((ea / a) ** 2 + (eb / b) ** 2)
# %% [markdown]
# <a id="raw"></a>
# %% [markdown]
# ## Process raw data into a more convenient format
#
# Parameters for raw processing.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
t_start = 734494.0
# Length of time series
max_len = N_data = 42048
# Data file
raw_data_file = "moorings.mat"
# Index where NaNs start in u and v data from SW mooring
sw_vel_nans = 14027
# Sampling period (minutes)
dt_min = 15.0
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 9
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# When bandpass filtering windowed data use these params multiplied by f and N
filtlo = 0.9 # times f
filthi = 1.1 # times N
# Interpolation distance that raises flag (m)
zimax = 100.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# %% ############### PROCESS RAW DATA #########################################
print("RAW DATA")
###############################################################################
# Load w data for cc mooring and chop from text files. I checked and all the
# data has the same start date and the same length
print("Loading vertical velocity data from text files.")
nortek_files = glob.glob(os.path.join(data_in, "cc_1_*.txt"))
depth = []
for file in nortek_files:
with open(file, "r") as f:
content = f.readlines()
depth.append(int(content[3].split("=")[1].split()[0]))
idxs = np.argsort(depth)
w = np.empty((42573, 12))
datenum = np.empty((42573, 12))
for i in idxs:
YY, MM, DD, hh, W = np.genfromtxt(
nortek_files[i], skip_header=12, usecols=(0, 1, 2, 3, 8), unpack=True
)
YY = YY.astype(int)
MM = MM.astype(int)
DD = DD.astype(int)
mm = (60 * (hh % 1)).astype(int)
hh = np.floor(hh).astype(int)
w[:, i] = W / 100
dates = []
for j in range(len(YY)):
dates.append(datetime.datetime(YY[j], MM[j], DD[j], hh[j], mm[j]))
dates = np.asarray(dates)
datenum[:, i] = utils.datetime_to_datenum(dates)
idx_start = np.searchsorted(datenum[:, 0], t_start)
w = w[idx_start : idx_start + max_len]
# Start prepping raw data from the mat file.
print("Loading raw data file.")
data_path = os.path.join(data_in, raw_data_file)
ds = utils.loadmat(data_path)
cc = ds.pop("c")
nw = ds.pop("nw")
ne = ds.pop("ne")
se = ds.pop("se")
sw = ds.pop("sw")
cc["id"] = "cc"
nw["id"] = "nw"
ne["id"] = "ne"
se["id"] = "se"
sw["id"] = "sw"
moorings = [cc, nw, ne, se, sw]
# Useful information
dt_min = 15.0 # Sample period in minutes.
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
print("Chopping time series.")
for m in moorings:
m["idx_start"] = np.searchsorted(m["Dates"], t_start)
for m in moorings:
m["N_data"] = max_len
m["idx_end"] = m["idx_start"] + max_len
# Chop data to start and end dates.
varl = ["Dates", "Temp", "Sal", "u", "v", "Pres"]
for m in moorings:
for var in varl:
m[var] = m[var][m["idx_start"] : m["idx_end"], ...]
print("Renaming variables.")
print("Interpolating negative pressures.")
for m in moorings:
__, N_levels = m["Pres"].shape
m["N_levels"] = N_levels
# Tile time and pressure
m["t"] = np.tile(m.pop("Dates")[:, np.newaxis], (1, N_levels))
# Fix negative pressures by interpolating nearby data.
fix = m["Pres"] < 0.0
if fix.any():
levs = np.argwhere(np.any(fix, axis=0))[0]
for lev in levs:
x = m["t"][fix[:, lev], lev]
xp = m["t"][~fix[:, lev], lev]
fp = m["Pres"][~fix[:, lev], lev]
m["Pres"][fix[:, lev], lev] = np.interp(x, xp, fp)
# Rename variables
m["P"] = m.pop("Pres")
m["u"] = m["u"] / 100.0
m["v"] = m["v"] / 100.0
m["spd"] = np.sqrt(m["u"] ** 2 + m["v"] ** 2)
m["angle"] = np.angle(m["u"] + 1j * m["v"])
m["Sal"][(m["Sal"] < 33.5) | (m["Sal"] > 34.9)] = np.nan
m["S"] = m.pop("Sal")
m["Temp"][m["Temp"] < -2.0] = np.nan
m["T"] = m.pop("Temp")
# Dimensional quantities.
m["f"] = gsw.f(m["lat"])
m["ll"] = np.array([m["lon"], m["lat"]])
m["z"] = gsw.z_from_p(m["P"], m["lat"])
# Estimate thermodynamic quantities.
m["SA"] = gsw.SA_from_SP(m["S"], m["P"], m["lon"], m["lat"])
m["CT"] = gsw.CT_from_t(m["SA"], m["T"], m["P"])
# specvol_anom = gsw.specvol_anom(m['SA'], m['CT'], m['P'])
# m['sva'] = specvol_anom
cc["wr"] = w
print("Calculating thermodynamics.")
print("Excluding bad data using T-S funnel.")
# Chuck out data outside of TS funnel sensible range.
funnel = np.genfromtxt("funnel.txt")
for m in moorings:
S = m["SA"].flatten()
T = m["CT"].flatten()
p = path.Path(funnel)
in_funnel = p.contains_points(np.vstack((S, T)).T)
fix = np.reshape(~in_funnel, m["SA"].shape)
m["in_funnel"] = ~fix
varl = ["S"]
if fix.any():
levs = np.squeeze(np.argwhere(np.any(fix, axis=0)))
for lev in levs:
x = m["t"][fix[:, lev], lev]
xp = m["t"][~fix[:, lev], lev]
for var in varl:
fp = m[var][~fix[:, lev], lev]
m[var][fix[:, lev], lev] = np.interp(x, xp, fp)
# Re-estimate thermodynamic quantities.
m["SA"] = gsw.SA_from_SP(m["S"], m["P"], m["lon"], m["lat"])
m["CT"] = gsw.CT_from_t(m["SA"], m["T"], m["P"])
print("Calculating neutral density.")
# Estimate the neutral density
for m in moorings:
# Compute potential temperature using the 1983 UNESCO EOS.
m["PT0"] = seawater.ptmp(m["S"], m["T"], m["P"])
# Flatten variables for analysis.
lons = m["lon"] * np.ones_like(m["P"])
lats = m["lat"] * np.ones_like(m["P"])
S_ = m["S"].flatten()
T_ = m["PT0"].flatten()
P_ = m["P"].flatten()
LO_ = lons.flatten()
LA_ = lats.flatten()
gamman = gamma_GP_from_SP_pt(S_, T_, P_, LO_, LA_)
m["gamman"] = np.reshape(gamman, m["P"].shape) + 1000.0
print("Calculating slice gradients at C.")
# Want gradient of density/vel to be local, no large central differences.
slices = [slice(0, 4), slice(4, 6), slice(6, 10), slice(10, 12)]
cc["dgdz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dTdz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dudz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dvdz"] = np.empty((cc["N_data"], cc["N_levels"]))
for sl in slices:
z = cc["z"][:, sl]
g = cc["gamman"][:, sl]
T = cc["T"][:, sl]
u = cc["u"][:, sl]
v = cc["v"][:, sl]
cc["dgdz"][:, sl] = np.gradient(g, axis=1) / np.gradient(z, axis=1)
cc["dTdz"][:, sl] = np.gradient(T, axis=1) / np.gradient(z, axis=1)
cc["dudz"][:, sl] = np.gradient(u, axis=1) / np.gradient(z, axis=1)
cc["dvdz"][:, sl] = np.gradient(v, axis=1) / np.gradient(z, axis=1)
print("Filtering data.")
# Low pass filter data.
tc = tc_hrs * 60.0 * 60.0
fc = 1.0 / tc # Cut off frequency.
normal_cutoff = fc * dt_sec * 2.0 # Nyquist frequency is half 1/dt.
b, a = sig.butter(4, normal_cutoff, btype="lowpass")
varl = [
"z",
"P",
"S",
"T",
"u",
"v",
"wr",
"SA",
"CT",
"gamman",
"dgdz",
"dTdz",
"dudz",
"dvdz",
] # sva
for m in moorings:
for var in varl:
try:
data = m[var].copy()
except KeyError:
continue
m[var + "_m"] = np.nanmean(data, axis=0)
# For the purpose of filtering set fill with 0 rather than nan (SW)
nans = np.isnan(data)
if nans.any():
data[nans] = 0.0
datalo = sig.filtfilt(b, a, data, axis=0)
# Then put nans back...
if nans.any():
datalo[nans] = np.nan
namelo = var + "_lo"
m[namelo] = datalo
namehi = var + "_hi"
m[namehi] = m[var] - m[namelo]
m["spd_lo"] = np.sqrt(m["u_lo"] ** 2 + m["v_lo"] ** 2)
m["angle_lo"] = ma.angle(m["u_lo"] + 1j * m["v_lo"])
m["spd_hi"] = np.sqrt(m["u_hi"] ** 2 + m["v_hi"] ** 2)
m["angle_hi"] = ma.angle(m["u_hi"] + 1j * m["v_hi"])
# %% [markdown]
# Save the raw data.
# %% ##################### SAVE RAW DATA ######################################
io.savemat(os.path.join(data_out, "C_raw.mat"), cc)
io.savemat(os.path.join(data_out, "NW_raw.mat"), nw)
io.savemat(os.path.join(data_out, "NE_raw.mat"), ne)
io.savemat(os.path.join(data_out, "SE_raw.mat"), se)
io.savemat(os.path.join(data_out, "SW_raw.mat"), sw)
# %% [markdown]
# ## Create virtual mooring 'raw'.
# %%
print("VIRTUAL MOORING")
print("Determine maximum knockdown as a function of z.")
zms = np.hstack([m["z"].max(axis=0) for m in moorings if "se" not in m["id"]])
Dzs = np.hstack(
[m["z"].min(axis=0) - m["z"].max(axis=0) for m in moorings if "se" not in m["id"]]
)
zmax_pfit = np.polyfit(zms, Dzs, 2) # Second order polynomial for max knockdown
np.save(
os.path.join(data_out, "zmax_pfit"), np.polyfit(zms, Dzs, 2), allow_pickle=False
)
# Define the knockdown model:
def zmodel(u, zmax, zmax_pfit):
return zmax + np.polyval(zmax_pfit, zmax) * u ** 3
print("Load model data.")
mluv = xr.load_dataset("../data/mooring_locations_uv1.nc")
mluv = mluv.isel(
t=slice(0, np.argwhere(mluv.u[:, 0, 0].data == 0)[0][0])
) # Get rid of end zeros...
mluv = mluv.assign_coords(lon=mluv.lon)
mluv = mluv.assign_coords(id=["cc", "nw", "ne", "se", "sw"])
mluv["spd"] = (mluv.u ** 2 + mluv.v ** 2) ** 0.5
print("Create virtual mooring 'raw' dataset.")
savedict = {
"cc": {"id": "cc"},
"nw": {"id": "nw"},
"ne": {"id": "ne"},
"se": {"id": "se"},
"sw": {"id": "sw"},
}
mids = ["cc", "nw", "ne", "se", "sw"]
def nearidx(a, v):
return np.argmin(np.abs(np.asarray(a) - v))
for idx, mid in enumerate(mids):
savedict[mid]["lon"] = mluv.lon[idx].data
savedict[mid]["lat"] = mluv.lat[idx].data
izs = []
for i in range(moorings[idx]["N_levels"]):
izs.append(nearidx(mluv.z, moorings[idx]["z"][:, i].max()))
spdm = mluv.spd.isel(z=izs, index=idx).mean(dim="z")
spdn = spdm / spdm.max()
zmax = mluv.z[izs]
zk = zmodel(spdn.data[:, np.newaxis], zmax.data[np.newaxis, :], zmax_pfit)
savedict[mid]["z"] = zk
savedict[mid]["t"] = np.tile(
mluv.t.data[:, np.newaxis], (1, moorings[idx]["N_levels"])
)
fu = itpl.RectBivariateSpline(mluv.t.data, -mluv.z.data, mluv.u[..., idx].data)
fv = itpl.RectBivariateSpline(mluv.t.data, -mluv.z.data, mluv.v[..., idx].data)
uk = fu(mluv.t.data[:, np.newaxis], -zk, grid=False)
vk = fv(mluv.t.data[:, np.newaxis], -zk, grid=False)
savedict[mid]["u"] = uk
savedict[mid]["v"] = vk
io.savemat("../data/virtual_mooring_raw.mat", savedict)
# %% [markdown]
# ## Create virtual mooring 'interpolated'.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
# t_start = 734494.0
# Length of time series
# max_len = N_data = 42048
# Sampling period (minutes)
dt_min = 60.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 7
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# %%
moorings = utils.loadmat("../data/virtual_mooring_raw.mat")
cc = moorings.pop("cc")
nw = moorings.pop("nw")
ne = moorings.pop("ne")
se = moorings.pop("se")
sw = moorings.pop("sw")
moorings = [cc, nw, ne, se, sw]
N_data = cc["t"].shape[0]
# %% [markdown]
# Polynomial fits first.
# %%
print("**Generating corrected data**")
# Generate corrected moorings
z = np.concatenate([m["z"].flatten() for m in moorings])
u = np.concatenate([m["u"].flatten() for m in moorings])
v = np.concatenate([m["v"].flatten() for m in moorings])
print("Calculating polynomial coefficients.")
pzu = np.polyfit(z, u, 2)
pzv = np.polyfit(z, v, 2)
# %%
# Additional height in m to add to interpolation height.
hoffset = [-25.0, 50.0, -50.0, 100.0]
pi2 = np.pi * 2.0
nfft = nperseg
levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)]
Nclevels = len(levis)
spec_kwargs = {
"fs": 1.0 / dt_sec,
"window": window,
"nperseg": nperseg,
"nfft": nfft,
"detrend": detrend,
"axis": 0,
}
idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index
idx0 = idx1 - nperseg # Window start index
N_windows = len(idx0)
# Initialise the place holder dictionaries.
c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring
c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data
c4 = {"N_levels": Nclevels} # Dictionary for processed data
# Dictionaries for raw, windowed data from outer moorings
nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings5w = [nw5w, ne5w, se5w, sw5w]
# Dictionaries for processed, windowed data from outer moorings
nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings4w = [nw4w, ne4w, se4w, sw4w]
# Initialised the arrays of windowed data
varr = ["t", "z", "u", "v"]
for var in varr:
c12w[var] = np.zeros((nperseg, N_windows, 12))
var4 = [
"t",
"z",
"u",
"v",
"dudx",
"dvdx",
"dudy",
"dvdy",
"dudz",
"dvdz",
"nstrain",
"sstrain",
"vort",
"div",
]
for var in var4:
c4w[var] = np.zeros((nperseg, N_windows, Nclevels))
for var in var4:
c4[var] = np.zeros((N_windows, Nclevels))
# Initialised the arrays of windowed data for outer mooring
varro = ["z", "u", "v"]
for var in varro:
for m5w in moorings5w:
m5w[var] = np.zeros((nperseg, N_windows, 5))
var4o = ["z", "u", "v"]
for var in var4o:
for m4w in moorings4w:
m4w[var] = np.zeros((nperseg, N_windows, Nclevels))
# for var in var4o:
# for m4 in moorings4:
# m4[var] = np.zeros((N_windows, 4))
# Window the raw data.
for i in range(N_windows):
idx = idx0[i]
for var in varr:
c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :]
for i in range(N_windows):
idx = idx0[i]
for var in varro:
for m5w, m in zip(moorings5w, moorings[1:]):
m5w[var][:, i, :] = m[var][idx : idx + nperseg, :]
print("Interpolating properties.")
# Do the interpolation
for i in range(Nclevels):
# THIS hoffset is important!!!
c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i]
for j in range(N_windows):
zr = c12w["z"][:, j, levis[i]]
ur = c12w["u"][:, j, levis[i]]
vr = c12w["v"][:, j, levis[i]]
zi = c4["z"][j, i]
c4w["z"][:, j, i] = np.mean(zr, axis=-1)
c4w["t"][:, j, i] = c12w["t"][:, j, 0]
c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1)
dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1)
# Instead of mean, could moo.interp1d
c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1)
c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1)
for m5w, m4w in zip(moorings5w, moorings4w):
zr = m5w["z"][:, j, :]
ur = m5w["u"][:, j, :]
vr = m5w["v"][:, j, :]
m4w["z"][:, j, i] = np.full((nperseg), zi)
m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
print("Filtering windowed data.")
fcorcpd = np.abs(gsw.f(cc["lat"])) * 86400 / pi2
varl = ["u", "v"]
for var in varl:
c4w[var + "_lo"] = utils.butter_filter(
c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"]
varl = ["u", "v"]
for var in varl:
for m4w in moorings4w:
m4w[var + "_lo"] = utils.butter_filter(
m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"]
c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"]
print("Calculating horizontal gradients.")
# Calculate horizontal gradients
for j in range(N_windows):
ll = np.stack(
([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1
)
uv = np.stack(
(
[m4w["u_lo"][:, j, :] for m4w in moorings4w],
[m4w["v_lo"][:, j, :] for m4w in moorings4w],
),
axis=1,
)
dudx, dudy, dvdx, dvdy, vort, div = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
c4w["dudx"][:, j, :] = dudx
c4w["dudy"][:, j, :] = dudy
c4w["dvdx"][:, j, :] = dvdx
c4w["dvdy"][:, j, :] = dvdy
c4w["nstrain"][:, j, :] = nstrain
c4w["sstrain"][:, j, :] = sstrain
c4w["vort"][:, j, :] = vort
c4w["div"][:, j, :] = div
for var in var4:
if var == "z": # Keep z as modified by hoffset.
continue
c4[var] = np.mean(c4w[var], axis=0)
freq, c4w["Puu"] = sig.welch(c4w["u_hi"], **spec_kwargs)
_, c4w["Pvv"] = sig.welch(c4w["v_hi"], **spec_kwargs)
_, c4w["Cuv"] = sig.csd(c4w["u_hi"], c4w["v_hi"], **spec_kwargs)
c4w["freq"] = freq.copy()
# Get rid of annoying tiny values.
svarl = ["Puu", "Pvv", "Cuv"]
for var in svarl:
c4w[var][0, ...] = 0.0
c4[var + "_int"] = np.full((N_windows, 4), np.nan)
# Horizontal azimuth according to Jing 2018
c4w["theta"] = np.arctan2(2.0 * c4w["Cuv"].real, (c4w["Puu"] - c4w["Pvv"])) / 2
# Integration #############################################################
print("Integrating power spectra.")
for var in svarl:
c4w[var + "_cint"] = np.full_like(c4w[var], fill_value=np.nan)
fcor = np.abs(gsw.f(cc["lat"])) / pi2
N_freq = len(freq)
freq_ = np.tile(freq[:, np.newaxis, np.newaxis], (1, N_windows, Nclevels))
# ulim = fhi * np.tile(c4["N"][np.newaxis, ...], (N_freq, 1, 1)) / pi2
ulim = 1e9 # Set a huge upper limit since we don't know what N is...
llim = fcor * flo
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Puu", "Pvv", "Cuv"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Change lower integration limits for vertical components...
llim = fcor * flov
use = (freq_ < ulim) & (freq_ > llim)
# Usefull quantities
c4["nstress"] = c4["Puu_int"] - c4["Pvv_int"]
c4["sstress"] = -2.0 * c4["Cuv_int"]
c4["F_horiz"] = (
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]) * c4["nstrain"]
- c4["Cuv_int"] * c4["sstrain"]
)
# ## Now we have to create the model 'truth'...
#
# Load the model data and estimate some gradients.
print("Estimating smoothed gradients (slow).")
mluv = xr.load_dataset("../data/mooring_locations_uv1.nc")
mluv = mluv.isel(
t=slice(0, np.argwhere(mluv.u[:, 0, 0].data == 0)[0][0])
) # Get rid of end zeros...
mluv = mluv.assign_coords(lon=mluv.lon)
mluv = mluv.assign_coords(id=["cc", "nw", "ne", "se", "sw"])
mluv["dudz"] = (["t", "z", "index"], np.gradient(mluv.u, mluv.z, axis=1))
mluv["dvdz"] = (["t", "z", "index"], np.gradient(mluv.v, mluv.z, axis=1))
uv = np.rollaxis(np.stack((mluv.u, mluv.v))[..., 1:], 3, 0)
dudx, dudy, dvdx, dvdy, vort, div = moo.div_vort_4D(mluv.lon[1:], mluv.lat[1:], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
mluv["dudx"] = (["t", "z"], dudx)
mluv["dudy"] = (["t", "z"], dudy)
mluv["dvdx"] = (["t", "z"], dvdx)
mluv["dvdy"] = (["t", "z"], dvdy)
mluv["nstrain"] = (["t", "z"], nstrain)
mluv["sstrain"] = (["t", "z"], sstrain)
mluv["vort"] = (["t", "z"], vort)
mluv["div"] = (["t", "z"], div)
# Smooth the model data in an equivalent way to the real mooring.
dudxs = (
mluv.dudx.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdxs = (
mluv.dvdx.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dudys = (
mluv.dudy.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdys = (
mluv.dvdy.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
sstrains = (
mluv.sstrain.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
nstrains = (
mluv.nstrain.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
divs = (
mluv.div.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
vorts = (
mluv.vort.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dudzs = (
mluv.dudz.isel(index=0)
.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdzs = (
mluv.dvdz.isel(index=0)
.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
# Make spline fits.
fdudx = itpl.RectBivariateSpline(dudxs.t.data, -dudxs.z.data, dudxs.data)
fdvdx = itpl.RectBivariateSpline(dvdxs.t.data, -dvdxs.z.data, dvdxs.data)
fdudy = itpl.RectBivariateSpline(dudys.t.data, -dudys.z.data, dudys.data)
fdvdy = itpl.RectBivariateSpline(dvdys.t.data, -dvdys.z.data, dvdys.data)
fsstrain = itpl.RectBivariateSpline(sstrains.t.data, -sstrains.z.data, sstrains.data)
fnstrain = itpl.RectBivariateSpline(nstrains.t.data, -nstrains.z.data, nstrains.data)
fdiv = itpl.RectBivariateSpline(divs.t.data, -divs.z.data, divs.data)
fvort = itpl.RectBivariateSpline(vorts.t.data, -vorts.z.data, vorts.data)
fdudz = itpl.RectBivariateSpline(dudzs.t.data, -dudzs.z.data, dudzs.data)
fdvdz = itpl.RectBivariateSpline(dvdzs.t.data, -dvdzs.z.data, dvdzs.data)
# Interpolate using splines.
dudxt = fdudx(c4["t"], -c4["z"], grid=False)
dvdxt = fdvdx(c4["t"], -c4["z"], grid=False)
dudyt = fdudy(c4["t"], -c4["z"], grid=False)
dvdyt = fdvdy(c4["t"], -c4["z"], grid=False)
sstraint = fsstrain(c4["t"], -c4["z"], grid=False)
nstraint = fnstrain(c4["t"], -c4["z"], grid=False)
divt = fdiv(c4["t"], -c4["z"], grid=False)
vortt = fvort(c4["t"], -c4["z"], grid=False)
dudzt = fdudz(c4["t"], -c4["z"], grid=False)
dvdzt = fdvdz(c4["t"], -c4["z"], grid=False)
c4["dudxt"] = dudxt
c4["dvdxt"] = dvdxt
c4["dudyt"] = dudyt
c4["dvdyt"] = dvdyt
c4["sstraint"] = sstraint
c4["nstraint"] = nstraint
c4["divt"] = divt
c4["vortt"] = vortt
c4["dudzt"] = dudzt
c4["dvdzt"] = dvdzt
# %%
# %% ########################## SAVE CORRECTED FILES ##########################
io.savemat("../data/virtual_mooring_interpolated.mat", c4)
io.savemat("../data/virtual_mooring_interpolated_windowed.mat", c4w)
# %% [markdown]
# Signal to noise ratios.
# %%
print("Estimating signal to noise ratios.")
M = munch.munchify(utils.loadmat('../data/virtual_mooring_interpolated.mat'))
# shear strain
dsstrain = M.sstrain - M.sstraint
SNR_sstrain = M.sstrain.var(axis=0)/dsstrain.var(axis=0)
np.save('../data/SNR_sstrain', SNR_sstrain, allow_pickle=False)
# normal strain
dnstrain = M.nstrain - M.nstraint
SNR_nstrain = M.nstrain.var(axis=0)/dnstrain.var(axis=0)
np.save('../data/SNR_nstrain', SNR_nstrain, allow_pickle=False)
# zonal shear
ddudz = M.dudz - M.dudzt
SNR_dudz = M.dvdz.var(axis=0)/ddudz.var(axis=0)
np.save('../data/SNR_dudz', SNR_dudz, allow_pickle=False)
# meridional shear
ddvdz = M.dvdz - M.dvdzt
SNR_dvdz = M.dvdz.var(axis=0)/ddvdz.var(axis=0)
np.save('../data/SNR_dvdz', SNR_dvdz, allow_pickle=False)
# divergence
ddiv = M.div - M.divt
SNR_nstrain = M.div.var(axis=0)/ddiv.var(axis=0)
np.save('../data/SNR_div', SNR_nstrain, allow_pickle=False)
# %% [markdown]
# <a id="corrected"></a>
# %% [markdown]
# ## Generate interpolated data.
#
# Set parameters again.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
t_start = 734494.0
# Length of time series
max_len = N_data = 42048
# Data file
raw_data_file = "moorings.mat"
# Index where NaNs start in u and v data from SW mooring
sw_vel_nans = 14027
# Sampling period (minutes)
dt_min = 15.0
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 9
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# When bandpass filtering windowed data use these params multiplied by f and N
filtlo = 0.9 # times f
filthi = 1.1 # times N
# Interpolation distance that raises flag (m)
zimax = 100.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# %% [markdown]
# Polynomial fits first.
# %%
print("REAL MOORING INTERPOLATION")
print("**Generating corrected data**")
moorings = load_data.load_my_data()
cc, nw, ne, se, sw = moorings
# Generate corrected moorings
T = np.concatenate([m["T"].flatten() for m in moorings])
S = np.concatenate([m["S"].flatten() for m in moorings])
z = np.concatenate([m["z"].flatten() for m in moorings])
u = np.concatenate([m["u"].flatten() for m in moorings])
v = np.concatenate([m["v"].flatten() for m in moorings])
g = np.concatenate([m["gamman"].flatten() for m in moorings])
# SW problems...
nans = np.isnan(u) | np.isnan(v)
print("Calculating polynomial coefficients.")
pzT = np.polyfit(z[~nans], T[~nans], 3)
pzS = np.polyfit(z[~nans], S[~nans], 3)
pzg = np.polyfit(z[~nans], g[~nans], 3)
pzu = np.polyfit(z[~nans], u[~nans], 2)
pzv = np.polyfit(z[~nans], v[~nans], 2)
# %%
# Additional height in m to add to interpolation height.
hoffset = [-25.0, 50.0, -50.0, 100.0]
pi2 = np.pi * 2.0
nfft = nperseg
levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)]
Nclevels = len(levis)
spec_kwargs = {
"fs": 1.0 / dt_sec,
"window": window,
"nperseg": nperseg,
"nfft": nfft,
"detrend": detrend,
"axis": 0,
}
idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index
idx0 = idx1 - nperseg # Window start index
N_windows = len(idx0)
# Initialise the place holder dictionaries.
c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring
c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data
c4 = {"N_levels": Nclevels} # Dictionary for processed data
# Dictionaries for raw, windowed data from outer moorings
nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings5w = [nw5w, ne5w, se5w, sw5w]
# Dictionaries for processed, windowed data from outer moorings
nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings4w = [nw4w, ne4w, se4w, sw4w]
# Initialised the arrays of windowed data
varr = ["t", "z", "u", "v", "gamman", "S", "T", "P"]
for var in varr:
c12w[var] = np.zeros((nperseg, N_windows, cc["N_levels"]))
var4 = [
"t",
"z",
"u",
"v",
"gamman",
"dudx",
"dvdx",
"dudy",
"dvdy",
"dudz",
"dvdz",
"dgdz",
"nstrain",
"sstrain",
"vort",
"N2",
]
for var in var4:
c4w[var] = np.zeros((nperseg, N_windows, Nclevels))
for var in var4:
c4[var] = np.zeros((N_windows, Nclevels))
# Initialised the arrays of windowed data for outer mooring
varro = ["z", "u", "v"]
for var in varro:
for m5w in moorings5w:
m5w[var] = np.zeros((nperseg, N_windows, 5))
var4o = ["z", "u", "v"]
for var in var4o:
for m4w in moorings4w:
m4w[var] = np.zeros((nperseg, N_windows, Nclevels))
# for var in var4o:
# for m4 in moorings4:
# m4[var] = np.zeros((N_windows, 4))
# Window the raw data.
for i in range(N_windows):
idx = idx0[i]
for var in varr:
c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :]
for i in range(N_windows):
idx = idx0[i]
for var in varro:
for m5w, m in zip(moorings5w, moorings[1:]):
m5w[var][:, i, :] = m[var][idx : idx + nperseg, :]
c4["interp_far_flag"] = np.full_like(c4["u"], False, dtype=bool)
print("Interpolating properties.")
# Do the interpolation
for i in range(Nclevels):
# THIS hoffset is important!!!
c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i]
for j in range(N_windows):
zr = c12w["z"][:, j, levis[i]]
ur = c12w["u"][:, j, levis[i]]
vr = c12w["v"][:, j, levis[i]]
gr = c12w["gamman"][:, j, levis[i]]
Sr = c12w["S"][:, j, levis[i]]
Tr = c12w["T"][:, j, levis[i]]
Pr = c12w["P"][:, j, levis[i]]
zi = c4["z"][j, i]
c4["interp_far_flag"][j, i] = np.any(np.min(np.abs(zr - zi), axis=-1) > zimax)
c4w["z"][:, j, i] = np.mean(zr, axis=-1)
c4w["t"][:, j, i] = c12w["t"][:, j, 0]
c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
c4w["gamman"][:, j, i] = moo.interp_quantity(zr, gr, zi, pzg)
dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1)
dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1)
dgdzr = np.gradient(gr, axis=-1) / np.gradient(zr, axis=-1)
N2 = seawater.bfrq(Sr.T, Tr.T, Pr.T, cc["lat"])[0].T
# Instead of mean, could moo.interp1d
c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1)
c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1)
c4w["dgdz"][:, j, i] = np.mean(dgdzr, axis=-1)
c4w["N2"][:, j, i] = np.mean(N2, axis=-1)
for m5w, m4w in zip(moorings5w, moorings4w):
if (m5w["id"] == "sw") & (
idx1[j] > sw_vel_nans
): # Skip this level because of NaNs
zr = m5w["z"][:, j, (0, 1, 3, 4)]
ur = m5w["u"][:, j, (0, 1, 3, 4)]
vr = m5w["v"][:, j, (0, 1, 3, 4)]
else:
zr = m5w["z"][:, j, :]
ur = m5w["u"][:, j, :]
vr = m5w["v"][:, j, :]
m4w["z"][:, j, i] = np.full((nperseg), zi)
m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
print("Filtering windowed data.")
fcorcpd = np.abs(cc["f"]) * 86400 / pi2
Nmean = np.sqrt(np.average(c4w["N2"], weights=sig.hann(nperseg), axis=0))
varl = ["u", "v", "gamman"]
for var in varl:
c4w[var + "_hib"] = np.zeros_like(c4w[var])
c4w[var + "_lo"] = utils.butter_filter(
c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"]
for i in range(Nclevels):
for j in range(N_windows):
Nmean_ = Nmean[j, i] * 86400 / pi2
for var in varl:
c4w[var + "_hib"][:, j, i] = utils.butter_filter(
c4w[var][:, j, i],
(filtlo * fcorcpd, filthi * Nmean_),
fs=N_per_day,
btype="band",
)
varl = ["u", "v"]
for var in varl:
for m4w in moorings4w:
m4w[var + "_lo"] = utils.butter_filter(
m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"]
c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"]
print("Calculating horizontal gradients.")
# Calculate horizontal gradients
for j in range(N_windows):
ll = np.stack(
([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1
)
uv = np.stack(
(
[m4w["u_lo"][:, j, :] for m4w in moorings4w],
[m4w["v_lo"][:, j, :] for m4w in moorings4w],
),
axis=1,
)
dudx, dudy, dvdx, dvdy, vort, _ = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
c4w["dudx"][:, j, :] = dudx
c4w["dudy"][:, j, :] = dudy
c4w["dvdx"][:, j, :] = dvdx
c4w["dvdy"][:, j, :] = dvdy
c4w["nstrain"][:, j, :] = nstrain
c4w["sstrain"][:, j, :] = sstrain
c4w["vort"][:, j, :] = vort
print("Calculating window averages.")
for var in var4 + ["u_lo", "v_lo", "gamman_lo"]:
if var == "z": # Keep z as modified by hoffset.
continue
c4[var] = np.average(c4w[var], weights=sig.hann(nperseg), axis=0)
print("Estimating w and b.")
om = np.fft.fftfreq(nperseg, 15 * 60)
c4w["w_hi"] = np.fft.ifft(
1j
* pi2
* om[:, np.newaxis, np.newaxis]
* np.fft.fft(-c4w["gamman_hi"] / c4["dgdz"], axis=0),
axis=0,
).real
c4w["w_hib"] = np.fft.ifft(
1j
* pi2
* om[:, np.newaxis, np.newaxis]
* np.fft.fft(-c4w["gamman_hib"] / c4["dgdz"], axis=0),
axis=0,
).real
# Estimate buoyancy variables
c4w["b_hi"] = -gsw.grav(-c4["z"], cc["lat"]) * c4w["gamman_hi"] / c4["gamman_lo"]
c4w["b_hib"] = -gsw.grav(-c4["z"], cc["lat"]) * c4w["gamman_hib"] / c4["gamman_lo"]
c4["N"] = np.sqrt(c4["N2"])
print("Estimating covariance spectra.")
freq, c4w["Puu"] = sig.welch(c4w["u_hi"], **spec_kwargs)
_, c4w["Pvv"] = sig.welch(c4w["v_hi"], **spec_kwargs)
_, c4w["Pww"] = sig.welch(c4w["w_hi"], **spec_kwargs)
_, c4w["Pwwg"] = sig.welch(c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Pwwg"] *= (pi2 * freq[:, np.newaxis, np.newaxis]) ** 2
_, c4w["Pbb"] = sig.welch(c4w["b_hi"], **spec_kwargs)
_, c4w["Cuv"] = sig.csd(c4w["u_hi"], c4w["v_hi"], **spec_kwargs)
_, c4w["Cuwg"] = sig.csd(c4w["u_hi"], c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Cuwg"] *= -1j * pi2 * freq[:, np.newaxis, np.newaxis]
_, c4w["Cvwg"] = sig.csd(c4w["v_hi"], c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Cvwg"] *= -1j * pi2 * freq[:, np.newaxis, np.newaxis]
_, c4w["Cub"] = sig.csd(c4w["u_hi"], c4w["b_hi"], **spec_kwargs)
_, c4w["Cvb"] = sig.csd(c4w["v_hi"], c4w["b_hi"], **spec_kwargs)
print("Estimating covariance matrices.")
def cov(x, y, axis=None):
return np.mean((x - np.mean(x, axis=axis)) * (y - np.mean(y, axis=axis)), axis=axis)
c4["couu"] = cov(c4w["u_hib"], c4w["u_hib"], axis=0)
c4["covv"] = cov(c4w["v_hib"], c4w["v_hib"], axis=0)
c4["coww"] = cov(c4w["w_hib"], c4w["w_hib"], axis=0)
c4["cobb"] = cov(c4w["b_hib"], c4w["b_hib"], axis=0)
c4["couv"] = cov(c4w["u_hib"], c4w["v_hib"], axis=0)
c4["couw"] = cov(c4w["u_hib"], c4w["w_hib"], axis=0)
c4["covw"] = cov(c4w["v_hib"], c4w["w_hib"], axis=0)
c4["coub"] = cov(c4w["u_hib"], c4w["b_hib"], axis=0)
c4["covb"] = cov(c4w["v_hib"], c4w["b_hib"], axis=0)
c4w["freq"] = freq.copy()
# Get rid of annoying tiny values.
svarl = ["Puu", "Pvv", "Pbb", "Cuv", "Cub", "Cvb", "Pwwg", "Cuwg", "Cvwg"]
for var in svarl:
c4w[var][0, ...] = 0.0
c4[var + "_int"] = np.full((N_windows, 4), np.nan)
# Horizontal azimuth according to Jing 2018
c4w["theta"] = np.arctan2(2.0 * c4w["Cuv"].real, (c4w["Puu"] - c4w["Pvv"])) / 2
# Integration #############################################################
print("Integrating power spectra.")
for var in svarl:
c4w[var + "_cint"] = np.full_like(c4w[var], fill_value=np.nan)
fcor = np.abs(cc["f"]) / pi2
N_freq = len(freq)
freq_ = np.tile(freq[:, np.newaxis, np.newaxis], (1, N_windows, Nclevels))
ulim = fhi * np.tile(c4["N"][np.newaxis, ...], (N_freq, 1, 1)) / pi2
llim = fcor * flo
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Puu", "Pvv", "Pbb", "Cuv", "Pwwg"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Change lower integration limits for vertical components...
llim = fcor * flov
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Cub", "Cvb", "Cuwg", "Cvwg"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Ruddic and Joyce effective stress
for var1, var2 in zip(["Tuwg", "Tvwg"], ["Cuwg", "Cvwg"]):
func = use * c4w[var2].real * (1 - fcor ** 2 / freq_ ** 2)
nans = np.isnan(func)
func[nans] = 0.0
c4[var1 + "_int"] = igr.simps(func, freq, axis=0)
func = use * c4w[var2].real * (1 - fcor ** 2 / freq_ ** 2)
nans = np.isnan(func)
func[nans] = 0.0
c4w[var1 + "_cint"] = igr.cumtrapz(func, freq, axis=0, initial=0.0)
# Usefull quantities
c4["nstress"] = c4["Puu_int"] - c4["Pvv_int"]
c4["sstress"] = -2.0 * c4["Cuv_int"]
c4["F_horiz"] = (
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]) * c4["nstrain"]
- c4["Cuv_int"] * c4["sstrain"]
)
c4["F_vert"] = (
-(c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2) * c4["dudz"]
- (c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2) * c4["dvdz"]
)
c4["F_vert_alt"] = -c4["Tuwg_int"] * c4["dudz"] - c4["Tvwg_int"] * c4["dvdz"]
c4["F_total"] = c4["F_horiz"] + c4["F_vert"]
c4["EPu"] = c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2
c4["EPv"] = c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2
##
c4["nstress_cov"] = c4["couu"] - c4["covv"]
c4["sstress_cov"] = -2.0 * c4["couv"]
c4["F_horiz_cov"] = (
-0.5 * (c4["couu"] - c4["covv"]) * c4["nstrain"] - c4["couv"] * c4["sstrain"]
)
c4["F_vert_cov"] = (
-(c4["couw"] - cc["f"] * c4["covb"] / c4["N"] ** 2) * c4["dudz"]
- (c4["covw"] + cc["f"] * c4["coub"] / c4["N"] ** 2) * c4["dvdz"]
)
c4["F_total_cov"] = c4["F_horiz_cov"] + c4["F_vert_cov"]
# %% [markdown]
# Estimate standard error on covariances.
# %%
bootnum = 1000
np.random.seed(12341555)
idxs = np.arange(nperseg, dtype="i2")
# def cov1(xy, axis=0):
# x = xy[..., -1]
# y = xy[..., -1]
# return np.mean((x - np.mean(x, axis=axis))*(y - np.mean(y, axis=axis)), axis=axis)
print("Estimating error on covariance using bootstrap (slow).")
euu_ = np.zeros((bootnum, N_windows, Nclevels))
evv_ = np.zeros((bootnum, N_windows, Nclevels))
eww_ = np.zeros((bootnum, N_windows, Nclevels))
ebb_ = np.zeros((bootnum, N_windows, Nclevels))
euv_ = np.zeros((bootnum, N_windows, Nclevels))
euw_ = np.zeros((bootnum, N_windows, Nclevels))
evw_ = np.zeros((bootnum, N_windows, Nclevels))
eub_ = np.zeros((bootnum, N_windows, Nclevels))
evb_ = np.zeros((bootnum, N_windows, Nclevels))
for i in range(bootnum):
idxs_ = np.random.choice(idxs, nperseg)
u_ = c4w["u_hib"][idxs_, ...]
v_ = c4w["v_hib"][idxs_, ...]
w_ = c4w["w_hib"][idxs_, ...]
b_ = c4w["b_hib"][idxs_, ...]
euu_[i, ...] = cov(u_, u_, axis=0)
evv_[i, ...] = cov(v_, v_, axis=0)
eww_[i, ...] = cov(w_, w_, axis=0)
ebb_[i, ...] = cov(b_, b_, axis=0)
euv_[i, ...] = cov(u_, v_, axis=0)
euw_[i, ...] = cov(u_, w_, axis=0)
evw_[i, ...] = cov(v_, w_, axis=0)
eub_[i, ...] = cov(u_, b_, axis=0)
evb_[i, ...] = cov(v_, b_, axis=0)
c4["euu"] = euu_.std(axis=0)
c4["evv"] = evv_.std(axis=0)
c4["eww"] = eww_.std(axis=0)
c4["ebb"] = ebb_.std(axis=0)
c4["euv"] = euv_.std(axis=0)
c4["euw"] = euw_.std(axis=0)
c4["evw"] = evw_.std(axis=0)
c4["eub"] = eub_.std(axis=0)
c4["evb"] = evb_.std(axis=0)
# %% [markdown]
# Error on gradients.
# %%
finite_diff_err = 0.06 # Assume 6 percent...
SNR_dudz = np.load("../data/SNR_dudz.npy")
SNR_dvdz = np.load("../data/SNR_dvdz.npy")
SNR_nstrain = np.load("../data/SNR_nstrain.npy")
SNR_sstrain = np.load("../data/SNR_sstrain.npy")
ones = np.ones_like(c4["euu"])
c4["edudz"] = ones * np.sqrt(c4["dudz"].var(axis=0) / SNR_dudz)
c4["edvdz"] = ones * np.sqrt(c4["dvdz"].var(axis=0) / SNR_dvdz)
c4["enstrain"] = esum(
ones * np.sqrt(c4["nstrain"].var(axis=0) / SNR_nstrain),
finite_diff_err * c4["nstrain"],
)
c4["esstrain"] = esum(
ones * np.sqrt(c4["sstrain"].var(axis=0) / SNR_sstrain),
finite_diff_err * c4["sstrain"],
)
# %% [markdown]
# Error propagation.
# %%
euumvv = 0.5 * esum(c4["euu"], c4["evv"])
c4["enstress"] = euumvv.copy()
enorm = emult(
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]), c4["nstrain"], euumvv, c4["enstrain"]
)
eshear = emult(c4["Cuv_int"], c4["sstrain"], c4["euv"], c4["esstrain"])
c4["errF_horiz_norm"] = enorm.copy()
c4["errF_horiz_shear"] = eshear.copy()
c4["errF_horiz"] = esum(enorm, eshear)
euumvv = 0.5 * esum(c4["euu"], c4["evv"])
c4["enstress_cov"] = euumvv.copy()
enorm = emult(-0.5 * (c4["couu"] - c4["covv"]), c4["nstrain"], euumvv, c4["enstrain"])
eshear = emult(c4["couv"], c4["sstrain"], c4["euv"], c4["esstrain"])
c4["errF_horiz_norm_cov"] = enorm.copy()
c4["errF_horiz_shear_cov"] = eshear.copy()
c4["errF_horiz_cov"] = esum(enorm, eshear)
euwmvb = esum(c4["euw"], np.abs(cc["f"] / c4["N"] ** 2) * c4["evb"])
evwpub = esum(c4["evw"], np.abs(cc["f"] / c4["N"] ** 2) * c4["eub"])
c4["evstressu"] = euwmvb
c4["evstressv"] = evwpub
edu = emult(
-(c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2),
c4["dudz"],
euwmvb,
c4["edudz"],
)
edv = emult(
-(c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2),
c4["dvdz"],
evwpub,
c4["edvdz"],
)
c4["errEPu"] = edu.copy()
c4["errEPv"] = edv.copy()
c4["errF_vert"] = esum(edu, edv)
c4["errEPu_alt"] = emult(-c4["Tuwg_int"], c4["dudz"], c4["euw"], c4["edudz"])
c4["errEPv_alt"] = emult(-c4["Tvwg_int"], c4["dvdz"], c4["evw"], c4["edvdz"])
c4["errF_vert_alt"] = esum(c4["errEPu_alt"], c4["errEPv_alt"])
edu = emult(
-(c4["couw"] - cc["f"] * c4["covb"] / c4["N"] ** 2), c4["dudz"], euwmvb, c4["edudz"]
)
edv = emult(
-(c4["covw"] + cc["f"] * c4["coub"] / c4["N"] ** 2), c4["dvdz"], evwpub, c4["edvdz"]
)
c4["errEPu_cov"] = edu.copy()
c4["errEPv_cov"] = edv.copy()
c4["errF_vert_cov"] = esum(edu, edv)
c4["errF_total"] = esum(c4["errF_vert"], c4["errF_horiz"])
c4["errF_total_cov"] = esum(c4["errF_vert_cov"], c4["errF_horiz_cov"])
# %% [markdown]
# Save the interpolated data.
# %% ########################## SAVE CORRECTED FILES ##########################
io.savemat(os.path.join(data_out, "C_alt.mat"), c4)
io.savemat(os.path.join(data_out, "C_altw.mat"), c4w)
# %% [markdown]
# <a id="ADCP"></a>
# %% [markdown]
# # ADCP Processing
# %% ########################## PROCESS ADCP DATA #############################
print("ADCP PROCESSING")
tf = np.array([16.0, 2.0]) # band pass filter cut off hours
tc_hrs = 40.0 # Low pass cut off (hours)
dt = 0.5 # Data sample period hr
print("Loading ADCP data from file.")
file = os.path.expanduser(os.path.join(data_in, "ladcp_data.mat"))
adcp = utils.loadmat(file)["ladcp2"]
print("Removing all NaN rows.")
varl = ["u", "v", "z"]
for var in varl: # Get rid of the all nan row.
adcp[var] = adcp.pop(var)[:-1, :]
print("Calculating vertical shear.")
z = adcp["z"]
dudz = np.diff(adcp["u"], axis=0) / np.diff(z, axis=0)
dvdz = np.diff(adcp["v"], axis=0) / np.diff(z, axis=0)
nans = np.isnan(dudz) | np.isnan(dvdz)
dudz[nans] = np.nan
dvdz[nans] = np.nan
adcp["zm"] = utils.mid(z, axis=0)
adcp["dudz"] = dudz
adcp["dvdz"] = dvdz
# Low pass filter data.
print("Low pass filtering at {:1.0f} hrs.".format(tc_hrs))
varl = ["u", "v", "dudz", "dvdz"]
for var in varl:
data = adcp[var]
nans = np.isnan(data)
adcp[var + "_m"] = np.nanmean(data, axis=0)
datalo = utils.butter_filter(
utils.interp_nans(adcp["dates"], data, axis=1), 1 / tc_hrs, 1 / dt, btype="low"
)
# Then put nans back...
if nans.any():
datalo[nans] = np.nan
namelo = var + "_lo"
adcp[namelo] = datalo
namehi = var + "_hi"
adcp[namehi] = adcp[var] - adcp[namelo]
# Band pass filter the data.
print("Band pass filtering between {:1.0f} and {:1.0f} hrs.".format(*tf))
varl = ["u", "v", "dudz", "dvdz"]
for var in varl:
data = adcp[var]
nans = np.isnan(data)
databp = utils.butter_filter(
utils.interp_nans(adcp["dates"], data, axis=1), 1 / tf, 1 / dt, btype="band"
)
# Then put nans back...
if nans.any():
databp[nans] = np.nan
namebp = var + "_bp"
adcp[namebp] = databp
io.savemat(os.path.join(data_out, "ADCP.mat"), adcp)
# %% [markdown]
# <a id="VMP"></a>
# %% [markdown]
# ## VMP data
# %%
print("VMP PROCESSING")
vmp = utils.loadmat(os.path.join(data_in, "jc054_vmp_cleaned.mat"))["d"]
box = np.array([[-58.0, -58.0, -57.7, -57.7], [-56.15, -55.9, -55.9, -56.15]]).T
p = path.Path(box)
in_box = p.contains_points(np.vstack((vmp["startlon"], vmp["startlat"])).T)
idxs = np.argwhere(in_box).squeeze()
Np = len(idxs)
print("Isolate profiles in match around mooring.")
for var in vmp:
ndim = np.ndim(vmp[var])
if ndim == 2:
vmp[var] = vmp[var][:, idxs]
if ndim == 1 and vmp[var].size == 36:
vmp[var] = vmp[var][idxs]
print("Rename variables.")
vmp["P"] = vmp.pop("press")
vmp["T"] = vmp.pop("temp")
vmp["S"] = vmp.pop("salin")
print("Deal with profiles where P[0] != 1.")
P_ = np.arange(1.0, 10000.0)
i0o = np.zeros((Np), dtype=int)
i1o = np.zeros((Np), dtype=int)
i0n = np.zeros((Np), dtype=int)
i1n = np.zeros((Np), dtype=int)
pmax = 0.0
for i in range(Np):
nans = np.isnan(vmp["eps"][:, i])
i0o[i] = i0 = np.where(~nans)[0][0]
i1o[i] = i1 = np.where(~nans)[0][-1]
P0 = vmp["P"][i0, i]
P1 = vmp["P"][i1, i]
i0n[i] = np.searchsorted(P_, P0)
i1n[i] = np.searchsorted(P_, P1)
pmax = max(P1, pmax)
P = np.tile(np.arange(1.0, pmax + 2)[:, np.newaxis], (1, len(idxs)))
eps = np.full_like(P, np.nan)
chi = np.full_like(P, np.nan)
T = np.full_like(P, np.nan)
S = np.full_like(P, np.nan)
for i in range(Np):
eps[i0n[i] : i1n[i] + 1, i] = vmp["eps"][i0o[i] : i1o[i] + 1, i]
chi[i0n[i] : i1n[i] + 1, i] = vmp["chi"][i0o[i] : i1o[i] + 1, i]
T[i0n[i] : i1n[i] + 1, i] = vmp["T"][i0o[i] : i1o[i] + 1, i]
S[i0n[i] : i1n[i] + 1, i] = vmp["S"][i0o[i] : i1o[i] + 1, i]
vmp["P"] = P
vmp["eps"] = eps
vmp["chi"] = chi
vmp["T"] = T
vmp["S"] = S
vmp["z"] = gsw.z_from_p(vmp["P"], vmp["startlat"])
print("Calculate neutral density.")
# Compute potential temperature using the 1983 UNESCO EOS.
vmp["PT0"] = seawater.ptmp(vmp["S"], vmp["T"], vmp["P"])
# Flatten variables for analysis.
lons = np.ones_like(P) * vmp["startlon"]
lats = np.ones_like(P) * vmp["startlat"]
S_ = vmp["S"].flatten()
T_ = vmp["PT0"].flatten()
P_ = vmp["P"].flatten()
LO_ = lons.flatten()
LA_ = lats.flatten()
gamman = gamma_GP_from_SP_pt(S_, T_, P_, LO_, LA_)
vmp["gamman"] = np.reshape(gamman, vmp["P"].shape) + 1000.0
io.savemat(os.path.join(data_out, "VMP.mat"), vmp)
```
|
{
"source": "jessecusack/LeConte_postprocessing",
"score": 2
}
|
#### File: LeConte_postprocessing/code/utils.py
```python
import os
from datetime import datetime, timedelta
import pytz
import numpy as np
import scipy.io as io
import utm
import yaml
from munch import Munch, munchify
from scipy.ndimage import median_filter
import scipy.signal as sig
def loadmat(filename, check_arrays=False, **kwargs):
"""
Big thanks to mergen on stackexchange for this:
http://stackoverflow.com/a/8832212
This function should be called instead of direct scipy.io.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects.
"""
kwargs["struct_as_record"] = False
kwargs["squeeze_me"] = True
data = io.loadmat(filename, **kwargs)
return _check_keys(data, check_arrays)
def _check_keys(dict, check_arrays):
"""
Checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries.
"""
for key in dict:
if isinstance(dict[key], io.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
if isinstance(dict[key], np.ndarray) and check_arrays:
shape = dict[key].shape
array = dict[key].flatten()
for i, item in enumerate(array):
if isinstance(item, io.matlab.mio5_params.mat_struct):
array[i] = _todict(item)
dict[key] = array.reshape(shape)
return dict
def _todict(matobj):
"""
A recursive function which constructs from matobjects nested dictionaries.
"""
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, io.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
else:
dict[strg] = elem
return dict
def datenum_to_datetime(datenum):
"""
Convert a MATLAB datenums into python datetimes.
Parameters
----------
datenum : array_like
MATLAB datenumber which is the number of days since 0000-01-00.
Returns
-------
dt : ndarray
Python datetime. See datetime module.
"""
def convert(datenum):
try:
return (
datetime.fromordinal(int(datenum))
+ timedelta(days=datenum % 1)
- timedelta(days=366)
)
except ValueError:
return np.nan
if np.iterable(datenum):
datenumar = np.asarray(datenum)
shape = datenumar.shape
dt = np.array([convert(el) for el in datenumar.flat])
dt = dt.reshape(shape)
else:
dt = convert(datenum)
return dt
def POSIX_to_datetime(POSIX, tz=pytz.utc):
"""
Convert POSIX timestamps into python datetimes.
Parameters
----------
POSIX : array_like
A POSIX timestamp or array of timestamps.
tz : tzinfo class
Time zone information, the default is to assume UTC, e.g. tz=pytz.utc
Returns
-------
dt : ndarray
Python datetime. See datetime module.
"""
def convert(POSIX):
try:
return (
datetime.fromtimestamp(POSIX, tz)
)
except ValueError:
return np.nan
if np.iterable(POSIX):
par = np.asarray(POSIX)
shape = par.shape
dt = np.array([convert(el) for el in par.flat])
dt = dt.reshape(shape)
else:
dt = convert(POSIX)
return dt
def mid(x, axis=0):
"""Returns mid point values along given axis."""
ndim = np.ndim(x)
if ndim == 1:
return 0.5 * (x[1:] + x[:-1])
elif ndim > 1:
x_ = np.swapaxes(x, axis, 0)
xmid_ = 0.5 * (x_[1:, ...] + x_[:-1, ...])
return np.swapaxes(xmid_, 0, axis)
else:
raise ValueError
def nan_interp(x, xp, fp, left=None, right=None, axis=0, squeeze_me=True):
"""See numpy.interp documentation. This does the same thing but ignores NaN
values in the data. It can accept 2D arrays.
Parameters
----------
x : float or 1D array
The x-coordinates of the interpolated values. No NaNs please!
xp : 1D or 2D array of floats
The x-coordinates of the data points, must be increasing along the
dimension along which the interpolation is being performed.
fp : 1D or 2D array of floats or complex
The y-coordinates of the data points, same shape as `xp`.
left : optional float or complex corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float or complex corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
axis : [-1, 0, 1] int
Default is 0. The axis along which to perform the interpolation.
squeeze_me : boolean
Default is True. Squeeze output to remove singleton dimensions.
Returns
-------
y : ndarray
The interpolated values.
"""
if axis not in [-1, 0, 1]:
raise ValueError("The axis may be only -1, 0 or 1.")
if xp.shape != fp.shape:
raise ValueError("xp and fp have different shapes.")
ndim = np.ndim(xp)
if ndim > 2:
raise ValueError("Only 1 or 2 dimensional arrays are supported.")
nans = np.isnan(xp) | np.isnan(fp)
if ndim == 1:
y = np.full_like(x, np.nan)
y = np.interp(x, xp[~nans], fp[~nans], left, right)
if ndim == 2:
nr, nc = xp.shape
if axis == 0:
if np.iterable(x):
y = np.full((len(x), nc), np.nan)
else:
y = np.full((1, nc), np.nan)
for i in range(nc):
xp_ = xp[~nans[:, i], i]
fp_ = fp[~nans[:, i], i]
y[:, i] = np.interp(x, xp_, fp_, left, right)
if axis == -1 or axis == 1:
if axis == 0:
if np.iterable(x):
y = np.full((nr, len(x)), np.nan)
else:
y = np.full((nr, 1), np.nan)
for i in range(nr):
xp_ = xp[i, ~nans[i, :]]
fp_ = fp[i, ~nans[i, :]]
y[i, :] = np.interp(x, xp_, fp_, left, right)
if squeeze_me:
return np.squeeze(y)
else:
return y
def interp_fill_valid_2D(x, xp, fp):
"""
Assumes input values fp is 2D with size N*M,
where M denotes profiles and N depths.
Parameters
----------
x : numpy array
Locations to interpolate to, 1D.
xp : numpy array
Data locations, 1D or 2D, shape (N) or (N, M).
fp : numpy array
Data values, 2D, shape (N, M).
"""
nc = fp.shape[1]
nr = x.size
f = np.full((nr, nc), np.nan)
if np.ndim(xp) == 1:
for i in range(nc):
f[:, i] = interp_fill_valid(x, xp, fp[:, i])
elif np.ndim(xp) == 2:
for i in range(nc):
f[:, i] = interp_fill_valid(x, xp[:, i], fp[:, i])
else:
raise ValueError("xp dimensions are wrong.")
return f
def interp_fill_valid(x, xp, fp):
"""Interpolate to x, and invalid regions with NaN. Region to fill is
that out of range of max(xp) and min(xp)."""
valid = np.isfinite(fp)
if any(valid):
xmax = np.max(xp[valid])
xmin = np.min(xp[valid])
f = np.interp(x, xp[valid], fp[valid])
f[(x > xmax) | (x < xmin)] = np.nan
else:
f = fp
return f
def check_files(files):
"""
Assumes that files is a dict or Munch object containing full file paths.
"""
for key in files:
# Skip check for non-string objects.
if type(files[key]) != str:
continue
if not os.path.isfile(files[key]):
raise ValueError("{} file not found: {}".format(key, files[key]))
else:
print("Found {} at '{}'.".format(key, files[key]))
def find_files(args, dataset, paths_file="file_paths.yml"):
"""
args: command line args
dataset: yaml file path parameter key e.g. "sep2018"
returns files as Munch
"""
# Grab the data file paths from the yml file.
with open(paths_file, "r") as f:
try:
all_files = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
# Grab file path info for the specified dataset
file_info = munchify(all_files[dataset])
files = Munch()
# Join root directory specified by command line arguments with path
# specified in the yaml file.
for key in file_info:
files[key] = os.path.join(args[file_info[key].root], file_info[key].path)
check_files(files)
return files
def load_parameters(parameter_file="processing_parameters.yml"):
"""Load processing parameters into Munch."""
with open(parameter_file, "r") as f:
try:
params = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
return munchify(params)
def closest_index(x, a):
"""
x: value
a: array
"""
return np.argmin(np.abs(x - a))
def regrid_profiles(time, timep, fp, time_win=60.0):
""""""
dt = time_win / (2 * 86400)
nc = time.size
idxs = []
idxps = []
for i in range(nc):
time_diff = np.abs(timep - time[i])
time_min = np.min(time_diff)
# Skip if not within time window
if time_min > dt:
continue
idx = np.argmin(time_diff)
# Skip if already found
if idx in idxs:
continue
idxs.append(i)
idxps.append(idx)
idxs = np.asarray(idxs)
idxps = np.asarray(idxps)
ndim = np.ndim(fp)
if ndim == 1:
f = np.full_like(time, np.nan)
f[idxs] = fp[idxps]
elif ndim == 2:
nr = fp.shape[0]
f = np.full((nr, nc), np.nan)
f[:, idxs] = fp[:, idxps]
return f
def apply_utm(m):
"""m is a Munch object"""
m.x, m.y, m.zone_number, m.zone_letter = utm.from_latlon(m.lat, m.lon)
return m
def rolling_window(a, size):
pad = np.ones(len(a.shape), dtype=np.int32)
pad[-1] = size - 1
pad = list(zip(pad, np.zeros(len(a.shape), dtype=np.int32)))
a = np.pad(a, pad, mode="reflect")
shape = a.shape[:-1] + (a.shape[-1] - size + 1, size)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def despike(x, n1=3, n2=6, size=101, xmin=-1.0, xmax=1.0, fill=True):
"""
Despike data using a median filter and 2 pass standard deviation threshold.
Parameters
----------
x : numpy array
Data, evenly spaced.
n1 : float
Pass 1 significance threshold, n standard deviations from reference data.
n2 : float
Pass 2 significance threshold, n standard deviations from reference data.
size : float, optional
Number of data points in the filter window.
xmin : float, optional
Minimum value of x, data below this value will be removed.
xmax : float, optional
Maximum value of x, data above this value will be removed.
fill : boolean, optional
Fill spikes using linear interpolation.
"""
# First get rid of gaps using linear interpolation
nans = np.isnan(x)
if any(nans):
t_ = np.arange(x.size)
x[nans] = np.interp(t_[nans], t[~nans], x[~nans])
# Moving median and std pass 1
roll = rolling_window(x, size)
x1med = np.median(roll, axis=-1)
x1std = np.std(roll, axis=-1)
# Spikes using first threshold
dx1 = x - x1med
spikes1 = np.abs(dx1) > n1 * x1std
# Mask out spikes from first pass
xm = np.ma.masked_where(spikes1, x)
# Moving median and std pass 2
roll = rolling_window(xm, size)
x2med = np.ma.median(roll, axis=-1)
x2std = np.ma.std(roll, axis=-1)
dx2 = x - x2med
spikes = np.abs(dx2) > n2 * x2std
# Trim min and max
trim = (x > xmax) | (x < xmin)
spikes[trim] = True
if fill:
t_ = np.arange(x.size)
x[spikes] = np.interp(t_[spikes], t_[~spikes], x[~spikes])
else:
x[spikes] = np.nan
return x
def butter(cutoff, fs, btype="low", order=4):
"""Return Butterworth filter coefficients. See scipy.signal.butter for a
more thorough documentation.
Parameters
----------
cutoff : array
Cutoff frequency, e.g. roughly speaking, the frequency at which the
filter acts. Units should be same as for fs paramter.
fs : float
Sampling frequency of signal. Units should be same as for cutoff
parameter.
btype : {‘lowpass’, ‘highpass’, ‘bandpass’, ‘bandstop’}, optional
Default is 'low'.
order : optional, int
Default is 4. The order of the Butterworth filter.
Returns
-------
b : numpy array
Filter b coefficients.
a : numpy array
Filter a coefficients.
"""
cutoff = np.asarray(cutoff)
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
sos = sig.butter(order, normal_cutoff, btype=btype, analog=False, output="sos")
return sos
def butter_filter(x, cutoff, fs, btype="low", order=4, **kwargs):
"""Apply Butterworth filter to data using scipy.signal.filtfilt.
Parameters
----------
x : array
The data to be filtered. Should be evenly sampled.
cutoff : array
Cutoff frequency, e.g. roughly speaking, the frequency at which the
filter acts. Units should be same as for fs paramter.
fs : float
Sampling frequency of signal. Units should be same as for cutoff
parameter.
btype : optional, string
Default is 'low'. Filter type can be 'low', 'high' or 'band'.
order : optional, int
Default is 4. The order of the Butterworth filter.
Returns
-------
y : numpy array
The filtered data.
"""
sos = butter(cutoff, fs, btype=btype, order=order)
y = sig.sosfiltfilt(sos, x, **kwargs)
return y
def nan_butter_filter(x, cutoff, fs, axis=1, btype="low", order=4, dic=20, **kwargs):
"""Apply Butterworth filter to data using scipy.signal.filtfilt for 2D array
along the given axis. Can handle some NaN values and 2D arrays.
Parameters
----------
x : array
The data to be filtered. Should be evenly sampled.
cutoff : array
Cutoff frequency, e.g. roughly speaking, the frequency at which the
filter acts. Units should be same as for fs paramter.
fs : float
Sampling frequency of signal. Units should be same as for cutoff
parameter.
axis : optional, int
Axis along which to perform operation, default is 1.
btype : optional, string
Default is 'low'. Filter type can be 'low', 'high' or 'band'.
order : optional, int
Default is 4. The order of the Butterworth filter.
dic : optional, int
Smallest contiguous region size, in number of data points, over which
to perform the filtering. Default is 20.
Returns
-------
y : numpy array
The filtered data.
"""
ndim = np.ndim(x)
y = np.full_like(x, np.nan)
def _filthelp(x_, cutoff, fs, btype, order, dic, **kwargs):
y_ = np.full_like(x_, np.nan)
nans = np.isnan(x_)
if nans.any():
idxs = contiguous_regions(~nans)
di = idxs[:, 1] - idxs[:, 0]
iidxs = np.argwhere(di > dic)
for j in iidxs[:, 0]:
sl = slice(*idxs[j, :])
y_[sl] = butter_filter(x_[sl], cutoff, fs, btype, **kwargs)
else:
y_ = butter_filter(x_, cutoff, fs, btype, **kwargs)
return y_
if ndim == 1:
y = _filthelp(x, cutoff, fs, btype, order, dic, **kwargs)
if ndim == 2:
nr, nc = x.shape
if axis == 0:
for i in range(nc):
y[:, i] = _filthelp(x[:, i], cutoff, fs, btype, order, dic, **kwargs)
if axis == -1 or axis == 1:
for i in range(nr):
y[i, :] = _filthelp(x[i, :], cutoff, fs, btype, order, dic, **kwargs)
return y
def nan_butter_filter_renan(x, cutoff, fs, axis=1, btype="low", order=4, dic=20, **kwargs):
# Could probably do this with decorators...
nans = np.isnan(x)
y = nan_butter_filter(x, cutoff, fs, axis, btype, order, dic, **kwargs)
y[nans] = np.nan
return y
```
#### File: LeConte_postprocessing/tests/ABLE_sentinel_processing.py
```python
import xarray as xr
import numpy as np
import utils
import matplotlib.pyplot as plt
import scipy.stats as stats
import utm
from scipy.ndimage import gaussian_filter
def mode(x, **kwargs):
mout = np.squeeze(stats.mode(x, axis=1)[0])
return mout
def interval_to_mid(intervals):
"""
Parameters
----------
intervals : 1D numpy array
An array of pandas Interval objects.
Returns
-------
mids : 1D numpy array
Midpoints of the intervals.
"""
return np.array([v.mid for v in intervals])
# %% [markdown]
# Load datasets and do some basic conversion of times and variables.
# %%
sV = xr.open_dataset("../proc/ABLE_sentinel_2018_enu.nc")
sV = sV.set_coords(["lon", "lat"])
sV["time"] = utils.POSIX_to_datetime(sV.time.values).astype(np.datetime64)
x, y, *_ = utm.from_latlon(sV.lat, sV.lon)
sV = sV.assign_coords({"x": x, "y": y})
virt = xr.open_dataset("../proc/ABLE_sentinel_RBRvirtuoso_2018.nc")
virt = virt.set_coords(["lon", "lat"])
virt["time"] = utils.POSIX_to_datetime(virt.time.values).astype(np.datetime64)
sbe = xr.open_dataset("../proc/ABLE_sentinel_SBE37_2018.nc")
sbe = sbe.set_coords(["lon", "lat"])
sbe["time"] = utils.POSIX_to_datetime(sbe.time.values).astype(np.datetime64)
# %% [markdown]
# Define some parameters and simple thresholds for processing.
# %%
pmin = 125 # Minimum pressure to keep
dpdtmax = 0.4e-9 # Maximum rate of change of pressure to keep
cut_ends = 2 # Number of points on either end to remove after applying other thresholds
dt = 10 # Bin size for time average [s]
# %% [markdown]
# Apply the thresholds to remove some data.
# %%
is_deep = sV.p > pmin
is_slow = np.fabs(sV.p.differentiate("time")) < dpdtmax
keep = is_deep & is_slow
sVp = sV.isel(time=keep).isel(time=slice(cut_ends, -cut_ends))
# %%
sVp.p.plot.line('.')
# %% [markdown]
# ## Old quality control
#
# Note [Marion's document](https://escholarship.org/content/qt6xd149s8/qt6xd149s8.pdf)
# %%
# # qc_err0 = 0.3
# # qc_err1 = 0.5
# qc_err = 0.15 # error velocity
# qc_q = 110 # correlation
# qc_uv = 2.0 # horizontal velocity
# qc_w = 1.5 # vertical velocity
# qc_a = 30 # echo intensity
# %%
# qc_u_bad = np.abs(sVp.u) > qc_uv
# qc_v_bad = np.abs(sVp.v) > qc_uv
# qc_w_bad = np.abs(sVp.w) > qc_w
# qc_vv_bad = np.abs(sVp.vv) > qc_w
# qc_err_bad = np.abs(sVp.err) > qc_err
# qc_q1_good = sVp.q1 > qc_q
# qc_q2_good = sVp.q2 > qc_q
# qc_q3_good = sVp.q3 > qc_q
# qc_q4_good = sVp.q4 > qc_q
# qc_q_bad = (qc_q1_good.astype(int) + qc_q2_good.astype(int) + qc_q3_good.astype(int) + qc_q4_good.astype(int)) <= 3
# %%
# uv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_u_bad.astype(int) + qc_v_bad.astype(int)) > 1
# w_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_w_bad.astype(int)) > 1
# vv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_vv_bad.astype(int)) > 1
# %%
# fig, axs = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 10))
# uv_reject.plot(ax=axs[0])
# w_reject.plot(ax=axs[1])
# vv_reject.plot(ax=axs[2])
# %% [markdown]
# Remove velocity using QC.
# %%
# sVqc = sVp.copy()
# u = sVqc.u.values
# u[uv_reject] = np.nan
# sVqc["u"] = (sVqc.u.dims, u, sVqc.u.attrs)
# v = sVqc.v.values
# v[uv_reject] = np.nan
# sVqc["v"] = (sVqc.v.dims, v, sVqc.v.attrs)
# w = sVqc.w.values
# w[w_reject] = np.nan
# sVqc["w"] = (sVqc.w.dims, w, sVqc.w.attrs)
# vv = sVqc.vv.values
# vv[vv_reject] = np.nan
# sVqc["vv"] = (sVqc.vv.dims, vv, sVqc.vv.attrs)
# %% [markdown]
# ## New cut off data above surface
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(sVp.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((sVp.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = sVp[var].where(sVp.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*sVp.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
sVp[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dsl.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(sVp.time, dmingood.min(axis=1), color="k")
# %%
good = dmingood.min(axis=1)
# Make a new dataset without surface
sVs = sVp.copy()
# Loop over the 2D datavars
mask = sVp.distance < xr.DataArray(good, dims={"time": sVp.time})
for var in sVp.data_vars:
if sVp[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
sVs[var] = sVp[var].where(mask)
# Remove distances where there is no good data
sVs = sVs.isel(distance=mask.any("time"))
# %% [markdown]
# ## New quality control
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = sVs.q1 + sVs.q2 + sVs.q3 + sVs.q4
qgood = qsum > qthresh
vqgood = sVs.vq.values > vqthresh
sVqc = sVs.copy()
egood = np.abs(sVs.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(sVs.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
sVqc[var] = sVs[var].where(ebgood)
sVqc["vv"] = sVs.vv.where(vebgood)
# %% [markdown]
# ## Time binning
# %% [markdown]
# Bin average data to reduce size and errors.
#
# First make bins.
# %%
# Time bin start and end to nearest minute. This will cut off some data.
tstart = (sVqc.time[0].values + np.timedelta64(30, 's')).astype('datetime64[m]')
tend = sVqc.time[-1].values.astype('datetime64[m]')
timebins = np.arange(tstart, tend, np.timedelta64(dt, 's'))
# %% [markdown]
# Group and take mean.
# %%
gb = sVqc.groupby_bins("time", timebins)
sVa = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sVa["time_bins"] = interval_to_mid(sVa.time_bins.values).astype("datetime64[s]")
sVa = sVa.rename({"time_bins": "time"})
# %% [markdown]
# Mean of heading should be performed using circular mean. (Technically, so should pitch and roll, but for small angles the noncircular mean is ok)
# %%
sVa["heading"] = (["time"], sVqc.heading.groupby_bins("time", timebins).reduce(stats.circmean, high=360.).values)
# %% [markdown]
# ## Old cut off data above surface
#
# Use a simple echo intensity threshold to find the maximum.
# %%
# dmin = 60. # Minimum distance above which to look for the maximum
# nroll = 120 # Number of points in rolling mode window
# fcut = 0.1 # Extra distance to remove (1 - fcut)*dcut
# %%
# sVa.va.isel(time=10000).plot.line('.')
# %% [markdown]
# Identify echo maximum in each beam, using a rolling mode to smooth out data.
# %%
# # fig, ax = plt.subplots()
# dcuts = []
# for var in ["a1", "a2", "a3", "a4", "va"]:
# am = sVa[var].where(sVa.distance > dmin)
# imax = am.argmax(dim="distance", skipna=True)
# dmax = am.distance[imax]
# ro = dmax.rolling(time=nroll, min_periods=1, center=True)
# dm = ro.reduce(mode)
# dcut = (1 - fcut)*dm
# # ax.plot(sVa.time, dmax, 'r')
# # ax.plot(sVa.time, dm, 'orange')
# # ax.plot(sVa.time, dcut, 'g')
# dcuts.append(dcut.values)
# %%
# dcuts = np.stack(dcuts, axis=1)
# # Use only the vertical beam for finding the surface.
# dcut_min = dcuts[:, 4]
# dcut_min = xr.DataArray(dcut_min, dims={"time": sVa.time})
# %% [markdown]
# Mask and remove data above distance threshold.
# %%
# sVm = sVa.where(sVa.distance < dcut_min)
# # The masking process converts some variables to 2D, change them back...
# sVm["p"] = sVa.p
# sVm["t"] = sVa.t
# sVm["pitch"] = sVa.pitch
# sVm["rol"] = sVa.rol
# sVm["heading"] = sVa.heading
# sVm = sVm.isel(distance=~np.isnan(sVm.u).all(axis=0))
# %% [markdown]
# ## Plotting time series
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.w.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[3], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
fig, ax = plt.subplots(figsize=(12, 3))
sVm_.p.plot(ax=ax)
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(8, 1, figsize=(15, 25), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.a1.plot(ax=axs[3], x="time")
sVm_.a2.plot(ax=axs[4], x="time")
sVm_.a3.plot(ax=axs[5], x="time")
sVm_.a4.plot(ax=axs[6], x="time")
sVm_.va.plot(ax=axs[7], x="time")
fig, axs = plt.subplots(3, 1, figsize=(11, 8))
sVm_.heading.plot(ax=axs[0])
sVm_.rol.plot(ax=axs[1])
sVm_.pitch.plot(ax=axs[2])
# %% [markdown]
# # Plug in other instruments to dataset
#
# Group and bin average.
# %%
gb = virt.groupby_bins("time", timebins)
virta = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
virta["time_bins"] = interval_to_mid(virta.time_bins.values).astype("datetime64[ms]")
virta = virta.rename({"time_bins": "time"})
gb = sbe.groupby_bins("time", timebins)
sbea = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sbea["time_bins"] = interval_to_mid(sbea.time_bins.values).astype("datetime64[ms]")
sbea = sbea.rename({"time_bins": "time"})
# %% [markdown]
# Look at a couple of plots.
# %%
fig, ax = plt.subplots(figsize=(12, 3))
virta.turb.plot(ax=ax)
fig, axs = plt.subplots(3, 1, figsize=(12, 10), sharex=True)
sbea.p.plot(ax=axs[0])
sbea.t.plot(ax=axs[1])
sbea.SP.plot(ax=axs[2])
# %% [markdown]
# Assign other data to the sentinal dataset.
# %%
ds = sVa.copy()
# %%
ds["turb_RBR"] = (sVa.p.dims, virta.turb, virta.turb.attrs)
ds["SP_SBE37"] = (sVa.p.dims, sbea.SP, sbea.SP.attrs)
ds["C_SBE37"] = (sVa.p.dims, sbea.C, sbea.C.attrs)
ds["t_SBE37"] = (sVa.p.dims, sbea.t, sbea.t.attrs)
ds["p_SBE37"] = (sVa.p.dims, sbea.p, sbea.p.attrs)
# %% [markdown]
# Try a plot...
# %%
fig, ax = plt.subplots()
ds.p_SBE37.plot(ax=ax)
ds.p.plot(ax=ax, yincrease=False)
# %% [markdown]
# Estimate some more thermodynamic variables.
# %%
import gsw
# %%
ds["SA_SBE37"] = (ds.p.dims, gsw.SA_from_SP(ds.SP_SBE37, ds.p_SBE37, ds.lon, ds.lat), {"units": "g/kg", "long_name": "Absolute_salinity"})
ds["CT_SBE37"] = (ds.p.dims, gsw.CT_from_t(ds.SA_SBE37, ds.t_SBE37, ds.p_SBE37), {"units": "deg C", "long_name": "Conservative_temperature"})
ds["z_SBE37"] = (ds.p.dims, gsw.z_from_p(ds.p_SBE37, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_SBE37"] = (ds.p.dims, -ds.z_SBE37, {"units": "m", "long_name": "depth"})
ds["z_ADCP"] = (ds.p.dims, gsw.z_from_p(ds.p, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_ADCP"] = (ds.p.dims, -ds.z_ADCP, {"units": "m", "long_name": "depth"})
ds["z"] = (ds.distance.dims, ds.distance + ds.z_ADCP.mean(dim="time"), {"units": "m", "long_name": "height"})
ds["depth"] = (ds.distance.dims, -ds.z, {"units": "m", "long_name": "depth"})
ds = ds.set_coords(["z", "depth"])
# %% [markdown]
# Save dataset to netcdf.
# %%
ds.to_netcdf("../proc/ABLE_sentinel_mooring_2018.nc")
# %% [markdown]
# ## Examine a short segment of the dataset
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True, sharey=True)
ds_.u.plot(ax=axs[0], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.a3.plot(ax=axs[1], y="depth", x="time", yincrease=False)
ds_.vv.plot(ax=axs[2], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.va.plot(ax=axs[3], y="depth", x="time", yincrease=False)
fig, axs = plt.subplots(4, 1, figsize=(11.7, 10), sharex=True)
ds_.p_SBE37.plot(ax=axs[0])
ds_.CT_SBE37.plot(ax=axs[1])
ds_.turb_RBR.plot(ax=axs[2])
ds_.pitch.plot(ax=axs[3])
# %% [markdown]
# Compare echo intensity near bottom for different beams.
# %%
dist = 5
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice).sel(distance=dist, method="nearest")
fig, ax = plt.subplots(figsize=(11, 4))
ds_.a1.plot(ax=ax, label="beam 1")
ds_.a2.plot(ax=ax, label="beam 2")
ds_.a3.plot(ax=ax, label="beam 3")
ds_.a4.plot(ax=ax, label="beam 4")
ds_.va.plot(ax=ax, label="beam v")
ax.set_ylabel("Echo intensity")
ax.legend()
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, ax = plt.subplots(figsize=(10, 10))
for i in range(0, ds_.time.size, 50):
ds__ = ds_.isel(time=i)
ds__.va.plot(ax=ax, label=ds__.time.values.astype("datetime64[s]"))
ax.legend(loc="upper left", bbox_to_anchor=(1, 1))
# %%
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import numpy as np
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# def cc(arg):
# return mcolors.to_rgba(arg, alpha=0.6)
xs = ds_.distance.values
verts = []
zs = []
for i in range(0, ds_.time.size, 100):
ds__ = ds_.isel(time=i)
time = (ds__.time - ds_.time[0]).astype(float)/1e9
zs.append(time)
ys = ds__.va.values
ys[0], ys[-1] = 0, 0
verts.append(list(zip(xs, ys)))
# zs = [0.0, 1.0, 2.0, 3.0]
# for z in zs:
# ys = np.random.rand(len(xs))
# ys[0], ys[-1] = 0, 0
# verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts) # facecolors=[cc('r'), cc('g'), cc('b'), cc('y')]
poly.set_alpha(0.2)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('Distance')
ax.set_xlim3d(0, xs.max())
ax.set_ylabel('Y')
ax.set_ylim3d(0, zs[-1])
ax.set_zlabel('Z')
ax.set_zlim3d(0, 200)
ax.view_init(elev=30., azim=30)
plt.show()
# %%
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
timeslice = slice(np.datetime64("2018-09-05T10:00"), np.datetime64("2018-09-05T10:45"))
ds_ = ds.sel(time=timeslice)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(projection='3d')
T, D = np.meshgrid(ds_.distance.values, (ds_.time.values - ds_.time[0].values).astype(float)/1e9)
# Plot a basic wireframe.
ax.plot_wireframe(T, D, ds_.a2.values, rstride=1, cstride=1)
ax.view_init(elev=45., azim=120)
# %% [markdown]
# # New QC
# %%
tslice = slice(np.datetime64("2018-09-07T10:00"), np.datetime64("2018-09-07T11:00"))
# tslice = slice(np.datetime64("2018-09-04T10:00"), np.datetime64("2018-09-04T11:00"))
# tslice = slice(np.datetime64("2018-09-11T14:00"), np.datetime64("2018-09-11T16:00"))
# tslice = slice(np.datetime64("2018-09-10T03:00"), np.datetime64("2018-09-10T04:00"))
enu = sVp.sel(time=tslice)
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.u.plot(ax=axs[0], **hvel_kwargs)
enu.v.plot(ax=axs[1], **hvel_kwargs)
enu.w.plot(ax=axs[2], **vvel_kwargs)
enu.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enu.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.q1.plot(ax=axs[0])
enu.q2.plot(ax=axs[1])
enu.q3.plot(ax=axs[2])
enu.q4.plot(ax=axs[3])
enu.vq.plot(ax=axs[4])
for ax in axs:
ax.set_xlabel("")
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(enu.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((enu.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = enu[var].where(enu.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*enu.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
enu[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dmode.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(enu.time, dmingood.min(axis=1), color="k")
# %%
fig, axs = plt.subplots(3, 1, figsize=(22, 9))
enu.heading.plot(ax=axs[0], marker='.', linestyle="")
enu.rol.plot(ax=axs[1])
enu.pitch.plot(ax=axs[2])
# %%
# Make a new dataset without surface
enus = enu.copy()
# Loop over the 2D datavars
mask = enu.distance < xr.DataArray(dmingood.min(axis=1), dims={"time": enu.time})
for var in enu.data_vars:
if enu[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
enus[var] = enu[var].where(mask)
# Remove distances where there is no good data
enus = enus.isel(distance=mask.any("time"))
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enus.u.plot(ax=axs[0], **hvel_kwargs)
enus.v.plot(ax=axs[1], **hvel_kwargs)
enus.w.plot(ax=axs[2], **vvel_kwargs)
enus.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enus.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
from scipy.ndimage import gaussian_filter
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = enus.q1 + enus.q2 + enus.q3 + enus.q4
qgood = qsum > qthresh
vqgood = enus.vq.values > vqthresh
enueb = enus.copy()
egood = np.abs(enus.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(enus.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
enueb[var] = enus[var].where(ebgood)
enueb["vv"] = enus.vv.where(vebgood)
# %%
fig, ax = plt.subplots(1, 1, figsize=(22, 3.5))
ax.pcolormesh(egood_filt)
ax.contour(egood_filt, [maskthresh], colors="r")
ax.contour(qgood, [0.5], colors="g")
ax.contour(vqgood, [0.5], colors="b")
# %% tags=[]
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(8, 1, sharex=True, figsize=(22, 28))
enueb.u.plot(ax=axs[0], **hvel_kwargs)
enus.u.plot(ax=axs[1], **hvel_kwargs)
enueb.v.plot(ax=axs[2], **hvel_kwargs)
enus.v.plot(ax=axs[3], **hvel_kwargs)
enueb.w.plot(ax=axs[4], **vvel_kwargs)
enus.w.plot(ax=axs[5], **vvel_kwargs)
enueb.vv.plot(ax=axs[6], **vvel_kwargs)
enus.vv.plot(ax=axs[7], **vvel_kwargs)
for ax in axs:
ax.set_xlabel("")
# %% [markdown]
# # Beam separation
# %%
z = sVp.distance[sVp.distance < 120]
angle = np.deg2rad(sVp.beamAngle)
separation_opposite = 2*z*np.tan(angle)
separation_adjacent = 2*z*np.tan(angle)*np.cos(np.pi/4)
fig, ax = plt.subplots()
ax.plot(separation_opposite, z, label="opposite")
ax.plot(separation_adjacent, z, label="adjacent")
ax.axvline(75, color="k", label="half wavelength")
ax.legend()
ax.grid()
ax.set_xlabel("Beam separation [m]")
ax.set_ylabel("Distance from ADCP (mast) [m]")
```
|
{
"source": "jessecusack/ocean_tools",
"score": 2
}
|
#### File: ocean_tools/ocean_tools/GM.py
```python
import numpy as np
import scipy as sp
from scipy.special import gamma
# Default parameter values.
N0 = 5.2e-3 # Buoyancy frequency [rad s-1].
b = 1300. # e-folding scale of N with depth [m].
E0 = 6.3e-5 # Internal wave energy parameter.
f_30 = 7.3e-5 # Coriolis frequency at 30N [rad s-1].
epsilon_0 = 8e-10 # GM energy dissipation rate (Polzin 2014).
# Garrett and Kunze 1991 set.
GM91 = {
's': 1.,
't': 2.,
'jp': 0.,
'jstar': 3.}
# Garrett and Munk 1976 set.
GM76 = {
's': 2.,
't': 2.,
'jp': 0.,
'jstar': 3.}
# Garrett and Munk 1975 set.
GM75 = {
's': 1.,
't': 2.5,
'jp': 0.,
'jstar': 6.}
IWEX = {
's': 1.,
't': 2.4,
'jp': 1.2,
'jstar': -1.}
class GM(object):
"""The GM class is a tool for diagnosing the Garrett-Munk internal wave
field for a given value of buoyancy frequency N and Coriolis parameter f.
It contains methods for estimating spectra (e.g. displacement or velocity)
as a funciton of wavenumber and frequency.
"""
def __init__(self, N, f, **kwargs):
self.N = N
self.f = np.abs(f)
# The default parameter values are defined at the top of module.
self.b = kwargs.pop('b', b)
self.N0 = kwargs.pop('N0', N0)
self.E0 = kwargs.pop('E0', E0)
self.Ef = kwargs.pop('Ef', 0.)
# Necessary parameters that vary between implimentations. Use Garrett
# and Munk 1976 set by default.
self.s = kwargs.pop('s', 2.)
self.t = kwargs.pop('t', 2.)
self.jp = kwargs.pop('jp', 0.)
self.jstar = kwargs.pop('jstar', 3.)
self.eps = self.f/self.N
def _B(self, om):
"""The frequency part of the GM spectrum."""
return 2.*self.f/(np.pi*om*np.sqrt(om**2 - self.f**2))
def _A(self, m, rolloff):
"""The vertical wavenumber part of the GM spectrum.
m in cycles per metre!
Set Er to a non-zero value to include high wavenumber roll off."""
# TODO: impliment trimming low and high.
# Roll off power, may need to be an argument.
rop = -3
# Normalisation factor, may need to be an argument.
I = self.s*gamma(self.t/self.s) \
/ (gamma(1/self.s)*gamma((self.t-1)/self.s))
delta = self.jp*self.N/(2.*self.N0*self.b)
mstar = self.jstar*self.N/(2.*self.N0*self.b)
A = (1/mstar)*I*(1 + ((m - delta)/mstar)**self.s)**(-self.t/self.s)
# If this is true, then roll off to m**-3 above m > 0.1 cpm.
# Why to the power -3? Not sure.
if rolloff:
if not self.Ef > 0.:
raise ValueError('For rolloff set Ef > 0.')
A10 = (1/mstar)*I*(1 + ((0.1 - delta)/mstar)**self.s)**(-self.t/self.s)
Aa = A10*(10*m)**rop
A = np.minimum(Aa, self.Ef*A)
return A
def _neg_jstar(self, jstar, om):
"""Deals with negative jstar... not exactly sure about this."""
j0 = 20.
jinf = 10.
om0 = self.f
# What on earth are these numbers?
ominf = 1.133*2.*np.pi/3600.
omm = 0.173*2.*np.pi/3600.
logs = 4.*(np.log10(om/self.f) - np.log10(omm/self.f)) \
/ np.log10(om0/ominf)
tanh = np.tanh(logs)
je = j0+0.5*(jinf - j0)*(1 - tanh)
# What is this number?
J = 2.1
return je/J
def vert_disp(self, om, m=None):
"""Vertical displacement."""
return (self.b**2)*self.N0*(om**2 - self.f**2)/(self.N*om**2)
def horiz_vel(self, om, m=None):
"""Horizontal velocity."""
return (self.b**2)*self.N0*self.N*(om**2 + self.f**2)/om**2
def vert_vel(self, om, m=None):
"""Vertical velocity."""
# Note: no factor of 2pi with om here because it is already in radian
# units.
return self.vert_disp(om)*om**2
def vert_strain(self, om, m):
"""Strain."""
return self.vert_disp(om)*(2.*np.pi*m)**2
def vert_shear(self, om, m):
"""Shear."""
return self.horiz_vel(om)*(2.*np.pi*m)**2
def Somm(self, om, m, Stype, rolloff=False):
"""Garrett-Munk spectrum as a function of frequency and vertical
wavenumber.
Parameters
----------
om: array
Frequency values. [rad s-1]
m: array
Vertical wavenumber values. [cpm]
Stype: string
Select between ['vert_disp', 'horiz_vel', 'vert_vel', 'vert_shear',
'vert_strain']. The last two are not working yet.
rolloff: boolean
If True, apply a rolloff after critical vertical wavenumber.
Default is False.
Er: float
Dimensionless energy of the internal wave field.
Returns
-------
S : array
Spectrum of size (len(m), len(om)).
"""
Nom = len(om)
Nm = len(m)
S = np.zeros((Nm, Nom))
# Choose the spectral function that gives dimensionality.
Sfunc = getattr(self, Stype)
M = np.tile(m, (Nom, 1)).T
A = self._A(M, rolloff)
B = self._B(om)
R = Sfunc(om, M)
S = self.E0*A*B*R
return S
def Skm(self, k, m, Stype, rolloff=False):
"""Garrett-Munk spectrum as a function of horizontal wavenumber and
vertical wavenumber.
Parameters
----------
k: array
Horizontal wavenumber values. [cpm]
m: array
Vertical wavenumber values. [cpm]
Stype: string
Select between ['vert_disp', 'horiz_vel', 'vert_vel', 'vert_shear',
'vert_strain']. The last two are not working yet.
rolloff: boolean
If True, apply a rolloff after critical vertical wavenumber.
Default is False.
Er: float
Dimensionless energy of the internal wave field.
Returns
-------
S : array
Spectrum of size (len(m), len(k)).
"""
# TODO: make this an imput parameter.
Nz = 200
Nk = len(k)
Nm = len(m)
S = np.zeros((Nm, Nk))
# Choose the spectral function that gives dimensionality.
Sfunc = getattr(self, Stype)
Z = np.tile(np.linspace(0., 1., Nz), (Nm, 1))
M = np.tile(m, (Nz, 1)).T
A = self._A(M, rolloff)
for i, _k in enumerate(k):
# We use the scipy sqrt function here because it gives imaginary
# results for negative numbers, rather than NaN. I dont' know
# what Zmax is supposed to represent.
Zmax = Z*sp.sqrt(M**2/_k**2 - 1).real
omsq = _k**2/M**2*(Zmax**2+1)*(self.N**2-self.f**2) + self.f**2
om = np.sqrt(omsq)
B = self._B(om)
# dom/da
domda = _k*np.sqrt(Z**2+1)*(self.N**2-self.f**2)/(om*M**2)
# The displacement factor, gives the spectrum a distance unit.
R = Sfunc(om, M)
# This needs to be all the right way around. Awkward.
dz = Zmax[:, 1] - Zmax[:, 0]
dZ = np.tile(dz, (Nz, 1)).T
# Tda cancels stuff, so just do that here and save some time...
Tda = dZ/sp.sqrt(Zmax**2+1)
# I think all this is just to scale TT so that when integrating,
# the trapz function does the right thing. Could simply pass x
# values to trapz? Wouldn't that be better?
TT = B*R*A*Tda*domda
S[:, i] = np.trapz(TT)
# Some more constants. Why?
S *= 2.*self.E0/np.pi
return S
def Som(self, om, Stype, Nm=1000, rolloff=False):
"""Garrett-Munk spectrum as a function of frequency.
Parameters
----------
om: array
Frequency values. [rad m-1]
Stype: string
Select between ['vert_disp', 'horiz_vel', 'vert_vel', 'vert_shear',
'vert_strain']. The last two are not working yet.
Nm: int
Integration resolution.
rolloff: boolean
If True, apply a rolloff after critical vertical wavenumber.
Default is False.
Er: float
Dimensionless energy of the internal wave field.
Returns
-------
S : array
Spectrum of size (len(om),).
"""
m = np.logspace(-4, 1, Nm)
S = self.Somm(om, m, Stype, rolloff)
return np.trapz(S, m, axis=0)
def Sm(self, m, Stype, Nom=1000, rolloff=False):
"""Garrett-Munk spectrum as a function of vertical wavenumber.
Parameters
----------
m: array
Vertical wavenumber values. [cpm]
Stype: string
Select between ['vert_disp', 'horiz_vel', 'vert_vel', 'vert_shear',
'vert_strain']. The last two are not working yet.
Nom: int
Integration resolution.
rolloff: boolean
If True, apply a rolloff after critical vertical wavenumber.
Default is False.
Er: float
Dimensionless energy of the internal wave field.
Returns
-------
S : array
Spectrum of size (len(m),).
"""
phi = np.arange(1, Nom+1)*np.arccos(self.eps)/Nom
om = self.f/np.cos(phi)
S = self.Somm(om, m, Stype, rolloff)
return np.trapz(S, om, axis=1)
def Sk(self, k, Stype, Nm=100, rolloff=False):
"""Garrett-Munk spectrum as a function of horizontal wavenumber.
Parameters
----------
k: array
Horizontal wavenumber values. [cpm]
Stype: string
Select between ['vert_disp', 'horiz_vel', 'vert_vel', 'vert_shear',
'vert_strain']. The last two are not working yet.
Nm: int
Integration resolution.
rolloff: boolean
If True, apply a rolloff after critical vertical wavenumber.
Default is False.
Er: float
Dimensionless energy of the internal wave field.
Returns
-------
S : array
Spectrum of size (len(k),).
"""
m = np.logspace(-4, 1, Nm)
S = self.Skm(k, m, Stype, rolloff)
return np.trapz(S, m, axis=0)
def diff_Pspec(f, S):
"""Differentiate power spectrum. Following Fourier theory this is
equivalent to multiplying by the frequency/wavenumber squared.
Parameters
----------
f: array
Frequency or wavenumber values in non-angular units.
S: array
Spectrum.
Returns
-------
dS : array
Differentiated spectrum.
"""
dS = S*(2.*np.pi*f)**2
return dS
def H(j, j_star=3., N_sum=100000):
# The number over which to sum if j_star is not 3.
if j_star == 3.:
# The factor 0.468043 comes from summing denominator from j = 1 to
# j = 1e+8 using j_star = 3.
return (j**2 + j_star**2)**(-1)/0.468043
else:
j_sum = np.arrange(1, N_sum)
return (j**2 + j_star**2)**(-1)/np.sum((j_sum**2 + j_star**2)**(-1))
def B(om, f=f_30):
"""The frequency part of the GM spectrum."""
return 2.*f/(np.pi*om*np.sqrt(om**2 + f**2))
def E(om, j):
return B(om)*H(j)*E0
def F_disp(om, N, j, f=f_30):
"""Displacement spectra."""
return b**2*N0*(om**2 - f**2)*E(om, j)/(N*om**2)
def F_vel(om, N, j, f=f_30):
"""Horizontal velocity spectra."""
return b**2*N0*N*(om**2 + f**2)*E(om, j)/om**2
def F_eng(om, N, j):
"""Energy per unit mass spectra."""
return b**2*N0*N*E(om, j)
def F_str(om, N, j, f=f_30):
pass
def F_she(om, N, j, f=f_30):
pass
# case upper('Str')
# R = (2*pi*kz).^2*(b.^2*N0/N.*(om.^2-f.^2)./om.^2);
# case upper('She')
# R = (2*pi*kz).^2*(b.^2*N0*N*(om.^2+f.^2)./om.^2);
#def m(om, N, j):
# """Convert from frequency space to vertical wavenumber space."""
# return (np.pi/b)*np.sqrt((N**2 - om**2)/(N0**2 - om**2))*j
#
#
#def k(om, N, j, f=f_30):
# """Convert from frequency space to horizontal wavenumber space."""
# return (np.pi/b)*np.sqrt((om**2 - f**2)/(N0**2 - om**2))*j
#
#
#def Emk(k, m, E_star=E0, N=N0, f=f_30, m_star=3*np.pi/b):
# """The GM spectra in k and m space as defined in Cushman-Roisin."""
# num = 3*f*N*E_star*m/m_star
# den = np.pi*(1 + m/m_star)**(2.5) * (N**2 * k**2 + f**2 * m**2)
# return num/den
def beta_star(N, j_star=3.):
return np.pi*j_star*N/(b*N0)
def E_vel_z(m, N, j_star=3.):
"""Horizontal velocity spectra as a function of vertical wavenumber. """
return 3*E0*b**3*N0**2/(2*j_star*np.pi*(1 + m/beta_star(N, j_star))**2)
def E_she_z(m, N, j_star=3.):
"""Vertical shear of horizontal velocity as a function of vertical
wavenumber. To normalise by N, divide return by N."""
return m**2 * E_vel_z(m, N, j_star)/N
def E_disp_z(m, N, j_star=3.):
"""Vertical displacement as a function of vertical wavenumber."""
num = E0*b**3*N0**2
den = 2*j_star*np.pi*N**2 * (1 + m/beta_star(N, j_star))**2
return num/den
def E_str_z(m, N, j_star=3.):
"""Vertical strain as a function of vertical wavenumber."""
return m**2 * E_disp_z(m, N, j_star)
def E_str_omk(om, k, f, N, j_star=3, rolloff=True, Er=E0):
"""Horizontal strain as a function of frequency and horizontal wavenumber.
Kunze et. al. 2015 Appendix
"""
A = (om**2 + f**2)/om**5
B = k**2/(k*N0*b + np.pi*np.sqrt(om**2 - f**2)*j_star)**2
S = np.pi*E0*N*(N0**2)*f*(b**3)*j_star*A*B
if rolloff:
m = k*N/np.sqrt(om**2 - f**2)
mc = np.pi*Er/(5.*E0)
r = mc/m
r[m < mc] = 1.
S *= r
return S
def E_str_k(k, f, N, j_star=3, rolloff=True, Er=E0):
"""Horizontal strain as a function horizontal wavenumber. It is equal to
the function E_str_omk integrated between f and N.
Kunze et. al. 2015 Appendix
"""
eps = 0.0001
om = np.logspace((1.-eps)*np.log10(f), (1.+eps)*np.log10(N), 1000)
omg, kg = np.meshgrid(om, k)
S = E_str_omk(omg, kg, f, N, j_star=j_star, rolloff=rolloff, Er=Er)
return np.trapz(S, om, axis=1)
def E_VKE(m, f, N, j_star=3, b_=b):
"""Thurnherr 2015, takes angular m presumably."""
return np.pi*E0*b_*np.abs(f)*N0*j_star/(m**2 + beta_star(N, j_star)**2)
if __name__ == '__main__':
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from scipy.integrate import cumtrapz
matplotlib.rc('font', **{'size': 8})
N = 5.2e-3
f = 7.292e-5 # f at 30 degrees.
# f = 1.031e-4 # f at 45 degrees.
# %% Example of shear and strain.
m = np.logspace(-4, 0, 100)
G = GM(N, f, **GM76)
fig, axs = plt.subplots(1, 2, figsize=(3.125, 3))
fig.tight_layout()
axs[0].loglog(m, G.Sm(m, 'vert_shear'), color='k')
axs[0].set_ylim(1e-6, 1e-3)
axs[0].set_xticks([1e-4, 1e-2, 1e0])
axs[1].loglog(m, G.Sm(m, 'vert_strain'), color='k')
axs[1].set_ylim(1e-2, 1e1)
axs[1].set_xticks([1e-4, 1e-2, 1e0])
# %% Variation in parameters.
m = np.logspace(-4, 0, 100)
GM76j6 = GM76.copy()
GM76j6['jstar'] = 6.
fig, ax = plt.subplots(1, 1, figsize=(3.125, 3))
clrs = ['k', 'r', 'g']
for i, params in enumerate([GM76, GM91, GM76j6]):
G = GM(N, f, **params)
ax.loglog(m, G.Sm(m, 'vert_shear'), color=clrs[i])
ax.set_ylim(1e-6, 1e-3)
ax.legend(['GM76 $j_* = 3$', 'GM91 $j_* = 3$', 'GM76 $j_* = 6$'], loc=0)
# %% Experiment with roll-off.
Ef = np.array([10., 3., 1.1, 0.3])
m = np.logspace(-4, 0, 100)
clrs = ['r', 'g', 'b', 'm']
fig, ax = plt.subplots(1, 1, figsize=(3.125, 3))
# No rolloff case:
G = GM(N, f, **GM76)
ax.loglog(m, G.Sm(m, 'vert_shear'), color='k')
for i in range(len(Ef)):
G = GM(N, f, Ef=Ef[i], **GM76)
Sshear = G.Sm(m, 'vert_shear', rolloff=True)
ax.loglog(m, Sshear, color=clrs[i])
ax.set_ylim(1e-6, 2e-3)
ax.legend(['No roll-off', 'Ef = 10', 'Ef = 3', 'Ef = 1.1', 'Ef = 0.3'],
loc=0)
# %% Frequency spectra
om = np.logspace(np.log10(f), np.log10(N), 100)
G = GM(N, f, **GM76)
fig, ax = plt.subplots(1, 1, figsize=(3.125, 3))
Sshear = G.Som(om, 'vert_shear')
Sshear[0] = 0. # Because value at f is very large.
ax.loglog(om, Sshear, color='k')
# %% Combined vertical frequency spectra
eps = f/N
Nom = 1000.
phi = np.arange(1, Nom+1)*np.arccos(eps)/Nom
om = f/np.cos(phi)
m = np.logspace(-4., 1., 1000)
G = GM(N, f, Ef=1., **GM76)
Somm = G.Somm(om, m, 'horiz_vel')
gs = gridspec.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[2,1])
fig = plt.figure(figsize=(6.5, 4))
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax1.pcolormesh(om, m, np.log10(Somm))
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlim(om[0], om[-1])
ax1.set_ylim(m[0], m[-1])
plt.setp(ax1.get_xticklabels(), visible=False)
for Nom in [2, 5, 20]:
Sm = G.Sm(m, 'horiz_vel', Nom, rolloff=True)
ax2.loglog(Sm, m, color='r')
ax2.set_xlim(1e-10, 1e10)
ax2.set_ylim(m[0], m[-1])
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_xticks([1e-10, 1e0, 1e10])
clrs = ['k', 'r', 'g', 'b']
for i, Nm in enumerate([1000, 5, 50, 500]):
Som = G.Som(om, 'horiz_vel', Nm, rolloff=True)
ax3.loglog(om, Som, color=clrs[i])
ax3.set_xlim(om[0], om[-1])
ax3.set_ylim(1e-4, 2e3)
ax3.set_yticks([1e-4, 1e-2, 1e0, 1e2])
# %% Check of the buoyancy scaling
m = np.logspace(-4., 1., 1000)
G1 = GM(N, f, Ef=1., **GM76)
G2 = GM(2.*N, f, Ef=1., **GM76)
S1 = G1.Sm(m, 'vert_shear', rolloff=True)
S2 = G2.Sm(m, 'vert_shear', rolloff=True)
fig, axs = plt.subplots(2, 1, sharex='col', figsize=(3.125, 3))
fig.tight_layout()
axs[0].loglog(m, S1, color='k')
axs[0].loglog(m, S2, color='b')
axs[0].set_ylim(1e-8, 1e-2)
axs[1].loglog(m, cumtrapz(S1, m, initial=0.)/N**2, color='k')
axs[1].loglog(m, cumtrapz(S2, m, initial=0.)/(2.*N)**2, color='b')
axs[1].set_ylim(1e-4, 1e1)
# %% Horizontal spectra
k = np.logspace(-4, -1, 100)
m = np.logspace(-4, 1, 100)
G = GM(N, f, Ef=1., **GM76)
Skm = np.log10(G.Skm(k, m, 'vert_disp', rolloff=True))
Skm = np.ma.masked_invalid(Skm)
Sk = G.Sk(k, 'vert_disp', rolloff=True)
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1])
fig = plt.figure(figsize=(3.125, 4))
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.pcolormesh(k, m, Skm)
ax1.set_xscale('log')
ax1.set_yscale('log')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2.loglog(k, Sk*(2*np.pi*k)**2, color='k')
# ax2.loglog(k, G.Sk(k, 'vert_strain', rolloff=True), color='b')
# %% Check spectrum well formed
m = np.logspace(-4, 1, 1100)
k = np.logspace(-6, 1, 1100)
G = GM(N, f, Ef=1., **GM76)
Skm = G.Skm(k, m, 'vert_disp', rolloff=True)
Sm = G.Sm(m, 'vert_disp', rolloff=True)
fig = plt.figure(figsize=(3.125, 4))
plt.loglog(m, np.pi*0.5*diff_Pspec(m, np.trapz(Skm, k)), 'k--')
plt.loglog(m, diff_Pspec(m, Sm), 'k')
# Horizontal strain as a function of horizontal wavenumber
# k = np.logspace(-6, 0, 200)
# om = np.logspace(np.log10(f), np.log10(N), 150)
# omg, kg = np.meshgrid(om, k)
#
# mc = np.pi/5.
# kc = mc*np.sqrt((om**2 - f**2)/(N**2 - om**2))/(2.*np.pi)
#
# Somk = E_str_omk(omg, 2.*np.pi*kg, f, N, True)
# Sk = E_str_k(2.*np.pi*k, f, N, True)
# kmax = kg[np.unravel_index(Somk.argmax(), Somk.shape)]
#
# fig, axs = plt.subplots(2, 1, gridspec_kw={'height_ratios':[1, 2]})
# c = axs[1].contourf(1000.*k, 1800.*om/np.pi, 2.*np.pi*Somk.T,
# cmap=plt.get_cmap('afmhot'))
# axs[1].plot(1000.*kc, 1800.*om/np.pi, color='b')
# axs[1].vlines(1000.*kmax, *axs[1].get_ylim(), color='b')
# axs[1].set_xlim(np.min(1000.*k), np.max(1000.*k))
# axs[1].set_xscale('log')
# axs[1].set_yscale('log')
# plt.colorbar(c, orientation='horizontal')
#
# axs[1].set_ylabel('Frequency (cph)')
# axs[1].set_xlabel('Horizontal wavenumber $k$ (cpkm)')
#
# axs[0].loglog(1000.*k, 2.*np.pi*Sk)
# axs[0].set_ylabel('Horizontal strain variance (')
```
#### File: ocean_tools/ocean_tools/window.py
```python
import numpy as np
from . import utils
def chunk(x, x_range, y):
"""Chunk returns slices of arrays x and y given some range of x values.
Parameters
----------
x : array_like
Monotonically increasing values.
x_range : sequence
Should contain (min, max) value at which to slice x and y.
y : array_like
Arbitrary values.
Returns
-------
x_chunk : array_like
Values of x that fall in the range x_range.
y_chunk : array_like
values of y that fall in the range x_range.
"""
if len(x_range) != 2:
raise ValueError('x_range must be a sequence of two numbers only.')
s = slice(*np.searchsorted(x, x_range))
return x[s], y[s]
def window(x, y, width, overlap=0., x_0=None, expansion=None, cap_left=True,
cap_right=True, ret_x=True):
"""Break arrays x and y into slices.
Parameters
----------
x : array_like
Monotonically increasing numbers. If x is not monotonically increasing
then it will be flipped, beware that this may not have the desired
effect.
y : array_like
Arbitrary values, same size as x.
width : float
Window width in the same units as x.
overlap : float, optional
Overlap of windows in the same units as x. If negative, the window
steps along x values rather than binning.
x_0 : float, optional
Position in x at which to start windowing. (untested)
expansion : polynomial coefficients, optional
Describes the rate of change of window size with x. (not implimented)
The idea is that width = width*np.polyval(expansion, x). Overlap is
similarly increased.
cap_left : boolean, optional
Stop window exceeding left most (minimum) value of x. Only applies when
overlap is positive.
cap_right : boolean, optional
Stop window exceeding right most (maximum) value of x. Only applies
when overlap is positive.
Returns
-------
vals : numpy.array
Contains all the windowed chunks of x and y.
Notes
-----
The current check on monotonicity is whether more than 20% of points in
x are are not monotonic. This is a sort of hack to avoid flipping for the
occasional erroneous non-monotonic point.
"""
if x.size != y.size:
raise ValueError('x and y must be of equal size.')
if overlap > width:
raise ValueError('The overlap cannot be larger than the width.')
# Incredibly bad check for monotonicity.
not_monotonic = np.sum(np.diff(x) < 0) > 0.2*len(x)
if not_monotonic:
x = utils.flip_padded(x)
y = utils.flip_padded(y)
if x_0 is not None:
idxs = ~np.isnan(x) & (x >= x_0)
else:
idxs = ~np.isnan(x)
x = x[idxs]
y = y[idxs]
if overlap < 0.:
left = x - width/2.
right = left + width
elif overlap >= 0.:
step = width - overlap
if cap_left:
xmin = x[0]
else:
xmin = x[0] - width
if cap_right:
# Take away slightly less than the full width to allow for the last
# bin to complete the full range.
xmax = x[-1] - 0.99*width
else:
xmax = x[-1]
left = np.arange(xmin, xmax, step)
right = left + width
bins = np.transpose(np.vstack((left, right)))
if ret_x:
vals = np.asarray([chunk(x, b, y) for b in bins])
else:
vals = np.asarray([chunk(x, b, y)[1] for b in bins])
if not_monotonic:
vals = np.flipud(vals)
return vals
def moving_polynomial_smooth(x, y, width=25., deg=1, expansion=None):
"""Smooth y using a moving polynomial fit.
Parameters
----------
x : array_like
Monotonically increasing numbers. If x is not monotonically increasing
then it will be flipped, beware that this may not have the desired
effect.
y : array_like
Arbitrary values, same size as x.
width : float
Window width in the same units as x.
deg : int
Degree of the polynomial with which to smooth.
expansion : polynomial coefficients
Describes the rate of change of window size with x. (not implimented)
The idea is that width = width_0*np.polyval(expansion, x)
Returns
-------
y_out : numpy.array
Smoothed y.
"""
vals = window(x, y, width=width, overlap=-1, expansion=expansion)
idxs = ~np.isnan(x)
y_out = np.nan*np.zeros_like(x)
xp = x[idxs]
yp = y_out[idxs]
for i, val in enumerate(vals):
p = np.polyfit(val[0], val[1], deg)
yp[i] = np.polyval(p, xp[i])
y_out[idxs] = yp
return y_out
def moving_mean_smooth(x, y, width=25., expansion=None):
"""Smooth y using a moving mean.
Parameters
----------
x : array_like
Monotonically increasing numbers. If x is not monotonically increasing
then it will be flipped, beware that this may not have the desired
effect.
y : array_like
Arbitrary values, same size as x.
width : float
Window width in the same units as x.
expansion : polynomial coefficients
Describes the rate of change of window size with x. (not implimented)
The idea is that width = width_0*np.polyval(expansion, x)
Returns
-------
y_out : numpy.array
Smoothed y.
"""
vals = window(x, y, width=width, overlap=-1, expansion=expansion)
idxs = ~np.isnan(x)
y_out = np.nan*np.zeros_like(x)
yp = y_out[idxs]
for i, val in enumerate(vals):
yp[i] = np.mean(val[1])
y_out[idxs] = yp
return y_out
```
|
{
"source": "jessecusack/pyobjmap",
"score": 3
}
|
#### File: pyobjmap/pyobjmap/covariance.py
```python
import numpy as np
import scipy.optimize as opt
import scipy.stats as stats
from . import matrix as mat
# class Gauss(object):
# def __init__(self, A, l):
# self.A = A
# self.l = l
# def Cr(self, r)
# return self.A*np.exp(-0.5*(r/self.l)**2)
# def
def gauss(r, A, l):
"""Gaussian"""
return A * np.exp(-0.5 * (r / l) ** 2)
def gauss2d(x, y, A, lx, ly, theta=0, x0=0, y0=0):
"""2D Gaussian with rotation of axis. Rotation in degrees 0 - 360."""
thetar = np.deg2rad(theta)
a = np.cos(thetar) ** 2 / (2 * lx ** 2) + np.sin(thetar) ** 2 / (2 * ly ** 2)
b = -np.sin(2 * thetar) / (4 * lx ** 2) + np.sin(2 * thetar) / (4 * ly ** 2)
c = np.sin(thetar) ** 2 / (2 * lx ** 2) + np.cos(thetar) ** 2 / (2 * ly ** 2)
return A * np.exp(
-(a * (x - x0) ** 2 + 2 * b * (x - x0) * (y - y0) + c * (y - y0) ** 2)
)
def marko(r, A, l):
"""Exponential"""
ra = np.abs(r) / l
return A * (1 + ra) * np.exp(-ra)
def letra(r, A, l):
ra = np.abs(r) / l
rsq = ra ** 2
return A * np.exp(-ra) * (1 + ra + rsq / 6 - ra * rsq / 6)
def funccheck(func):
if callable(func):
cfunc = func
elif func == "gauss":
cfunc = gauss
elif func == "marko":
cfunc = marko
elif func == "letra":
cfunc = letra
elif func == "gauss2d":
cfunc = gauss2d
else:
raise ValueError("func = {} not supported.".format(cov_func))
return cfunc
def bincovr(x, y, z, bins=10, origin="mean"):
if origin is None:
pass
elif origin == "mean":
x = x - x.mean()
y = y - y.mean()
else:
raise ValueError("Origin can be mean only for now.")
# Construct distance matrix.
R = mat.r_distance(x, y)
itri, jtri = np.triu_indices_from(R)
# remove mean before calculating covariance
zdetrend = z - z.mean()
# Covariance matrix
C = np.outer(zdetrend, zdetrend)
Cr, rbins, _ = stats.binned_statistic(
R[itri, jtri], C[itri, jtri], statistic="mean", bins=bins
)
return rbins, Cr
def bincovxy(x, y, z, bins=10):
xdist, ydist = mat.xy_distance(x, y)
# remove mean before calculating covariance
zdetrend = z - z.mean()
# Covariance matrix
C = np.outer(zdetrend, zdetrend)
itri, jtri = np.triu_indices_from(C)
Cxy, xbins, ybins, _ = stats.binned_statistic_2d(
xdist[itri, jtri], ydist[itri, jtri], C[itri, jtri], statistic="mean", bins=bins
)
return xbins, ybins, Cxy.T
def bincovxyabs(x, y, z, bins=10):
xdist, ydist = mat.xy_distance(x, y)
# remove mean before calculating covariance
zdetrend = z - z.mean()
# Covariance matrix
C = np.outer(zdetrend, zdetrend)
itri, jtri = np.triu_indices_from(C)
Cxy, xbins, ybins, _ = stats.binned_statistic_2d(
xdist[itri, jtri], ydist[itri, jtri], C[itri, jtri], statistic="mean", bins=bins
)
return xbins, ybins, Cxy.T
def bincovxyuv(x, y, u, v, bins=10):
xdist, ydist = mat.xy_distance(x, y)
# remove mean before calculating covariance
udetrend = u - u.mean()
vdetrend = v - v.mean()
# Covariance matrix
C = np.outer(udetrend, vdetrend)
itri, jtri = np.triu_indices_from(C)
Cxy, xbins, ybins, _ = stats.binned_statistic_2d(
xdist[itri, jtri], ydist[itri, jtri], C[itri, jtri], statistic="mean", bins=bins
)
return xbins, ybins, Cxy.T
def covfit(x, y, z, bins=10, cfunc="gauss", p0=[1, 1], rfitmax=None):
cfunc = funccheck(cfunc)
rbins, Cr = bincovr(x, y, z, bins=bins)
r = 0.5 * (rbins[1:] + rbins[:-1])
if rfitmax is None:
raise ValueError("rfitmax cannot be None.")
infit = r <= rfitmax
popt, _ = opt.curve_fit(cfunc, r[infit], Cr[infit], p0=p0)
return popt
# Gaussian covariance functions for velocity and streamfunction
def Cpsipsi(x, y, A, l):
r = np.sqrt(x ** 2 + y ** 2)
return A * np.exp(-0.5 * r ** 2 / l ** 2)
def Cuu(x, y, A, l):
r = np.sqrt(x ** 2 + y ** 2)
return A * (l ** 2 - y ** 2) * np.exp(-0.5 * r ** 2 / l ** 2) / l ** 4
def Cvv(x, y, A, l):
r = np.sqrt(x ** 2 + y ** 2)
return A * (l ** 2 - x ** 2) * np.exp(-0.5 * r ** 2 / l ** 2) / l ** 4
def Cuv(x, y, A, l):
r = np.sqrt(x ** 2 + y ** 2)
return A * x * y * np.exp(-0.5 * r ** 2 / l ** 2) / l ** 4
def Cpsiu(x, y, A, l):
r = np.sqrt(x ** 2 + y ** 2)
return A * y * np.exp(-0.5 * r ** 2 / l ** 2) / l ** 2
def Cpsiv(x, y, A, l):
r = np.sqrt(x ** 2 + y ** 2)
return -A * x * np.exp(-0.5 * r ** 2 / l ** 2) / l ** 2
```
#### File: pyobjmap/pyobjmap/matrix.py
```python
import numpy as np
from . import utils
def tile_position(x0, y0, x1=None, y1=None):
"""Need doc string..."""
if x1 is None and y1 is None:
x1 = x0
y1 = y0
if (x0.size != y0.size) or (x1.size != y1.size):
raise ValueError("x0 and y0 or x1 and y1 size do not match.")
x0g = np.tile(x0.ravel()[:, np.newaxis], (1, x1.size))
y0g = np.tile(y0.ravel()[:, np.newaxis], (1, x1.size))
x1g = np.tile(x1.ravel()[np.newaxis, :], (x0.size, 1))
y1g = np.tile(y1.ravel()[np.newaxis, :], (x0.size, 1))
return x0g, y0g, x1g, y1g
def xy_distance(x0, y0, x1=None, y1=None):
"""
Output x and y distance matrices.
If x1 and y1 are not supplied we calculate the auto-distance matrices.
"""
if x1 is None and y1 is None:
x1 = x0
y1 = y0
dx = x0.ravel()[:, np.newaxis] - x1.ravel()[np.newaxis, :]
dy = y0.ravel()[:, np.newaxis] - y1.ravel()[np.newaxis, :]
return dx, dy
def r_distance(x0, y0, x1=None, y1=None, coords="cartesian"):
"""
Distance matrix.
If x1 and y1 are not supplied we calculate the auto-distance matrix.
"""
if coords == "cartesian":
dx, dy = xy_distance(x0, y0, x1, y1)
r = np.sqrt(dx ** 2 + dy ** 2)
elif coords == "latlon":
r = utils.haversine_distance(*tile_position(x0, y0, x1, y1))
return r
```
|
{
"source": "JesseDavids/mqtta",
"score": 3
}
|
#### File: JesseDavids/mqtta/list_plugin.py
```python
import utility as utility
import time
import json
import paho.mqtt.client as mqtt
"""
topic = workstation/list/
lists all active devices listening to specified broker IP
"""
class Plugin:
def process(self):
utilities = utility.Utility()
ipAddress = utilities.ip()
DV = utilities.DynamicVariable()
hostname = utilities.host()
BROKER = utilities.broker()
while True:
broker = BROKER
client = mqtt.Client(ipAddress)
client.connect(broker)
client.loop_start()
client.publish("workstation/list", str(hostname + ": " + ipAddress), 2, False)
break
time.sleep(10)
client.disconnect()
quit()
```
#### File: JesseDavids/mqtta/ping_plugin.py
```python
import utility as utility
import time
import json
import paho.mqtt.client as mqtt
from icmplib import ping
Ping_Help = (
"\nHOW TO USE THE PING PLUGIN"
"\n"
"\nThis plugin will ping any target PC, with count and interval or default"
"\n"
"\nTopic = workstation/hostname-or-ip/parameter/ping/ <--- plugin requires a message parameter"
"\ni.e Message = 1.1.1.1 20 0.2"
"\nIP = 1.1.1.1 | COUNT = 15 pings | INTERVAL = every 0.2 seconds "
"\n"
"\nIf IP, Count and Interval is not specified the plugin will use default values"
"\nValues = 1.1.1.1 4 0.5"
)
class Plugin:
def process(self):
f = utility.Utility()
ip = f.ip()
#set ping rate in utilities file in the ping object
DV = f.DynamicVariable()
hostname = f.host()
BROKER = f.broker()
subT = f.subtopic()
while True:
broker = BROKER
client = mqtt.Client(ip)
client.connect(broker)
client.loop_start()
ip2 = "1.1.1.1"
count2 = "4"
interval2 = "0.5"
if(subT == "help" and DV == ""):
client.publish(f"workstation/{hostname}/n/ping/help", str(Ping_Help), 2, False)
quit()
elif(subT == "" and DV == ""):
host = ping(str(f'{ip2}'), count = int(count2), interval = float(interval2), privileged=False)
IPstats = {}
IPstats = {
'IP': ip2,
'MIN': host.min_rtt,
'MAX': host.max_rtt,
'AVG': host.avg_rtt,
'Packets Sent': host.packets_sent,
'Packets Received': host.packets_received,
'Packet Loss': host.packet_loss
}
json_IPstats = json.dumps(IPstats, indent=4)
client.publish(f"workstation/{hostname}/n/ping", str(json_IPstats), 2, False)
setattr(f, "logText", str(json_IPstats))
f.log()
break
time.sleep(10)
client.disconnect()
quit()
elif(DV != ""):
ip2, count2, interval2 = DV.split(' ')
host = ping(str(f'{ip2}'), count=int(count2), interval=float(interval2), privileged=False)
IPstats = {}
IPstats = {
'IP': ip2,
'MIN': host.min_rtt,
'MAX': host.max_rtt,
'AVG': host.avg_rtt,
'Packets Sent': host.packets_sent,
'Packets Received': host.packets_received,
'Packet Loss': host.packet_loss
}
json_IPstats = json.dumps(IPstats, indent=4)
client.publish(f"workstation/{hostname}/n/ping", str(json_IPstats), 2, False)
setattr(f, "logText", str(json_IPstats))
f.log()
break
time.sleep(10)
client.disconnect()
quit()
```
#### File: JesseDavids/mqtta/traceroute_plugin.py
```python
import subprocess
import utility as utility
import time
import paho.mqtt.client as mqtt
from io import StringIO
Traceroute_Help = (
"\nHOW TO USE THE TRACEROUTE PLUGIN"
"\n"
"\nThis plugin will make a traceroute to desired IP or website name"
"\n"
"\nTopic = workstation/hostname-or-ip/parameter/traceroute/"
"\nrequires a message parameter "
"\nMessage = 1.1.1.1 , or www.google.com "
)
class Plugin:
def process(self):
f = utility.Utility()
ip = f.ip()
hostname = f.host()
BROKER = f.broker()
subT = f.subtopic()
DV = f.DynamicVariable()
while True:
broker = BROKER
client = mqtt.Client(ip)
client.connect(broker)
client.loop_start()
if(subT == "help"):
client.publish(f"workstation/{hostname}/n/traceroute/help", str(Traceroute_Help), 2, False)
elif(subT == ""):
traceroute_result = subprocess.run(['traceroute', f'{DV}'], stdout=subprocess.PIPE)
rt = traceroute_result.stdout.decode('utf-8')
client.publish(f"workstation/{hostname}/n/traceroute", str(rt), 2, False)
setattr(f, 'logText', str(rt))
f.log()
break
time.sleep(0.5)
client.disconnect()
quit()
```
#### File: site-packages/hgdemandimport/tracing.py
```python
from __future__ import absolute_import
import contextlib
import os
_pipe = None
_checked = False
_session = 'none'
def _isactive():
global _pipe, _session, _checked
if _pipe is None:
if _checked:
return False
_checked = True
if 'HGCATAPULTSERVERPIPE' not in os.environ:
return False
_pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1)
_session = os.environ.get('HGCATAPULTSESSION', 'none')
return True
@contextlib.contextmanager
def log(whencefmt, *whenceargs):
if not _isactive():
yield
return
whence = whencefmt % whenceargs
try:
# Both writes to the pipe are wrapped in try/except to ignore
# errors, as we can see mysterious errors in here if the pager
# is active. Presumably other conditions could trigger
# problems too.
try:
_pipe.write('START %s %s\n' % (_session, whence))
except IOError:
pass
yield
finally:
try:
_pipe.write('END %s %s\n' % (_session, whence))
except IOError:
pass
def counter(label, amount, *labelargs):
if not _isactive():
return
l = label % labelargs
# See above in log() for why this is in a try/except.
try:
_pipe.write('COUNTER %s %d %s\n' % (_session, amount, l))
except IOError:
pass
```
#### File: site-packages/hgext/beautifygraph.py
```python
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial import (
encoding,
extensions,
graphmod,
pycompat,
templatekw,
)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
def prettyedge(before, edge, after):
if edge == b'~':
return b'\xE2\x95\xA7' # U+2567 ╧
if edge == b'/':
return b'\xE2\x95\xB1' # U+2571 ╱
if edge == b'-':
return b'\xE2\x94\x80' # U+2500 ─
if edge == b'|':
return b'\xE2\x94\x82' # U+2502 │
if edge == b':':
return b'\xE2\x94\x86' # U+2506 ┆
if edge == b'\\':
return b'\xE2\x95\xB2' # U+2572 ╲
if edge == b'+':
if before == b' ' and not after == b' ':
return b'\xE2\x94\x9C' # U+251C ├
if after == b' ' and not before == b' ':
return b'\xE2\x94\xA4' # U+2524 ┤
return b'\xE2\x94\xBC' # U+253C ┼
return edge
def convertedges(line):
line = b' %s ' % line
pretty = []
for idx in pycompat.xrange(len(line) - 2):
pretty.append(
prettyedge(
line[idx : idx + 1],
line[idx + 1 : idx + 2],
line[idx + 2 : idx + 3],
)
)
return b''.join(pretty)
def getprettygraphnode(orig, *args, **kwargs):
node = orig(*args, **kwargs)
if node == b'o':
return b'\xE2\x97\x8B' # U+25CB ○
if node == b'@':
return b'\xE2\x97\x89' # U+25C9 ◉
if node == b'%':
return b'\xE2\x97\x8D' # U+25CE ◎
if node == b'*':
return b'\xE2\x88\x97' # U+2217 ∗
if node == b'x':
return b'\xE2\x97\x8C' # U+25CC ◌
if node == b'_':
return b'\xE2\x95\xA4' # U+2564 ╤
return node
def outputprettygraph(orig, ui, graph, *args, **kwargs):
(edges, text) = zip(*graph)
graph = zip([convertedges(e) for e in edges], text)
return orig(ui, graph, *args, **kwargs)
def extsetup(ui):
if ui.plain(b'graph'):
return
if encoding.encoding != b'UTF-8':
ui.warn(_(b'beautifygraph: unsupported encoding, UTF-8 required\n'))
return
if 'A' in encoding._wide:
ui.warn(
_(
b'beautifygraph: unsupported terminal settings, '
b'monospace narrow text required\n'
)
)
return
extensions.wrapfunction(graphmod, b'outputgraph', outputprettygraph)
extensions.wrapfunction(templatekw, b'getgraphnode', getprettygraphnode)
```
#### File: site-packages/hgext/bookflow.py
```python
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial import (
bookmarks,
commands,
error,
extensions,
registrar,
)
MY_NAME = b'bookflow'
configtable = {}
configitem = registrar.configitem(configtable)
configitem(MY_NAME, b'protect', [b'@'])
configitem(MY_NAME, b'require-bookmark', True)
configitem(MY_NAME, b'enable-branches', False)
cmdtable = {}
command = registrar.command(cmdtable)
def commit_hook(ui, repo, **kwargs):
active = repo._bookmarks.active
if active:
if active in ui.configlist(MY_NAME, b'protect'):
raise error.Abort(
_(b'cannot commit, bookmark %s is protected') % active
)
if not cwd_at_bookmark(repo, active):
raise error.Abort(
_(
b'cannot commit, working directory out of sync with active bookmark'
),
hint=_(b"run 'hg up %s'") % active,
)
elif ui.configbool(MY_NAME, b'require-bookmark', True):
raise error.Abort(_(b'cannot commit without an active bookmark'))
return 0
def bookmarks_update(orig, repo, parents, node):
if len(parents) == 2:
# called during commit
return orig(repo, parents, node)
else:
# called during update
return False
def bookmarks_addbookmarks(
orig, repo, tr, names, rev=None, force=False, inactive=False
):
if not rev:
marks = repo._bookmarks
for name in names:
if name in marks:
raise error.Abort(
_(
b"bookmark %s already exists, to move use the --rev option"
)
% name
)
return orig(repo, tr, names, rev, force, inactive)
def commands_commit(orig, ui, repo, *args, **opts):
commit_hook(ui, repo)
return orig(ui, repo, *args, **opts)
def commands_pull(orig, ui, repo, *args, **opts):
rc = orig(ui, repo, *args, **opts)
active = repo._bookmarks.active
if active and not cwd_at_bookmark(repo, active):
ui.warn(
_(
b"working directory out of sync with active bookmark, run "
b"'hg up %s'"
)
% active
)
return rc
def commands_branch(orig, ui, repo, label=None, **opts):
if label and not opts.get('clean') and not opts.get('rev'):
raise error.Abort(
_(
b"creating named branches is disabled and you should use bookmarks"
),
hint=b"see 'hg help bookflow'",
)
return orig(ui, repo, label, **opts)
def cwd_at_bookmark(repo, mark):
mark_id = repo._bookmarks[mark]
cur_id = repo.lookup(b'.')
return cur_id == mark_id
def uisetup(ui):
extensions.wrapfunction(bookmarks, b'update', bookmarks_update)
extensions.wrapfunction(bookmarks, b'addbookmarks', bookmarks_addbookmarks)
extensions.wrapcommand(commands.table, b'commit', commands_commit)
extensions.wrapcommand(commands.table, b'pull', commands_pull)
if not ui.configbool(MY_NAME, b'enable-branches'):
extensions.wrapcommand(commands.table, b'branch', commands_branch)
```
#### File: site-packages/hgext/closehead.py
```python
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial import (
bookmarks,
cmdutil,
context,
error,
pycompat,
registrar,
scmutil,
)
cmdtable = {}
command = registrar.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
commitopts = cmdutil.commitopts
commitopts2 = cmdutil.commitopts2
commitopts3 = [(b'r', b'rev', [], _(b'revision to check'), _(b'REV'))]
@command(
b'close-head|close-heads',
commitopts + commitopts2 + commitopts3,
_(b'[OPTION]... [REV]...'),
helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
inferrepo=True,
)
def close_branch(ui, repo, *revs, **opts):
"""close the given head revisions
This is equivalent to checking out each revision in a clean tree and running
``hg commit --close-branch``, except that it doesn't change the working
directory.
The commit message must be specified with -l or -m.
"""
def docommit(rev):
cctx = context.memctx(
repo,
parents=[rev, None],
text=message,
files=[],
filectxfn=None,
user=opts.get(b'user'),
date=opts.get(b'date'),
extra=extra,
)
tr = repo.transaction(b'commit')
ret = repo.commitctx(cctx, True)
bookmarks.update(repo, [rev, None], ret)
cctx.markcommitted(ret)
tr.close()
opts = pycompat.byteskwargs(opts)
revs += tuple(opts.get(b'rev', []))
revs = scmutil.revrange(repo, revs)
if not revs:
raise error.Abort(_(b'no revisions specified'))
heads = []
for branch in repo.branchmap():
heads.extend(repo.branchheads(branch))
heads = {repo[h].rev() for h in heads}
for rev in revs:
if rev not in heads:
raise error.Abort(_(b'revision is not an open head: %d') % rev)
message = cmdutil.logmessage(ui, opts)
if not message:
raise error.Abort(_(b"no commit message specified with -l or -m"))
extra = {b'close': b'1'}
with repo.wlock(), repo.lock():
for rev in revs:
r = repo[rev]
branch = r.branch()
extra[b'branch'] = branch
docommit(r)
return 0
```
#### File: hgext/convert/transport.py
```python
from __future__ import absolute_import
import svn.client
import svn.core
import svn.ra
Pool = svn.core.Pool
SubversionException = svn.core.SubversionException
from mercurial.pycompat import getattr
from mercurial import util
# Some older versions of the Python bindings need to be
# explicitly initialized. But what we want to do probably
# won't work worth a darn against those libraries anyway!
svn.ra.initialize()
svn_config = None
def _create_auth_baton(pool):
"""Create a Subversion authentication baton. """
import svn.client
# Give the client context baton a suite of authentication
# providers.h
providers = [
svn.client.get_simple_provider(pool),
svn.client.get_username_provider(pool),
svn.client.get_ssl_client_cert_file_provider(pool),
svn.client.get_ssl_client_cert_pw_file_provider(pool),
svn.client.get_ssl_server_trust_file_provider(pool),
]
# Platform-dependent authentication methods
getprovider = getattr(
svn.core, 'svn_auth_get_platform_specific_provider', None
)
if getprovider:
# Available in svn >= 1.6
for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
p = getprovider(name, type, pool)
if p:
providers.append(p)
else:
if util.safehasattr(svn.client, b'get_windows_simple_provider'):
providers.append(svn.client.get_windows_simple_provider(pool))
return svn.core.svn_auth_open(providers, pool)
class NotBranchError(SubversionException):
pass
class SvnRaTransport(object):
"""
Open an ra connection to a Subversion repository.
"""
def __init__(self, url=b"", ra=None):
self.pool = Pool()
self.svn_url = url
self.username = b''
self.password = b''
# Only Subversion 1.4 has reparent()
if ra is None or not util.safehasattr(svn.ra, b'reparent'):
self.client = svn.client.create_context(self.pool)
ab = _create_auth_baton(self.pool)
self.client.auth_baton = ab
global svn_config
if svn_config is None:
svn_config = svn.core.svn_config_get_config(None)
self.client.config = svn_config
try:
self.ra = svn.client.open_ra_session(
self.svn_url, self.client, self.pool
)
except SubversionException as xxx_todo_changeme:
(inst, num) = xxx_todo_changeme.args
if num in (
svn.core.SVN_ERR_RA_ILLEGAL_URL,
svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
svn.core.SVN_ERR_BAD_URL,
):
raise NotBranchError(url)
raise
else:
self.ra = ra
svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
class Reporter(object):
def __init__(self, reporter_data):
self._reporter, self._baton = reporter_data
def set_path(self, path, revnum, start_empty, lock_token, pool=None):
svn.ra.reporter2_invoke_set_path(
self._reporter,
self._baton,
path,
revnum,
start_empty,
lock_token,
pool,
)
def delete_path(self, path, pool=None):
svn.ra.reporter2_invoke_delete_path(
self._reporter, self._baton, path, pool
)
def link_path(
self, path, url, revision, start_empty, lock_token, pool=None
):
svn.ra.reporter2_invoke_link_path(
self._reporter,
self._baton,
path,
url,
revision,
start_empty,
lock_token,
pool,
)
def finish_report(self, pool=None):
svn.ra.reporter2_invoke_finish_report(
self._reporter, self._baton, pool
)
def abort_report(self, pool=None):
svn.ra.reporter2_invoke_abort_report(
self._reporter, self._baton, pool
)
def do_update(self, revnum, path, *args, **kwargs):
return self.Reporter(
svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
)
```
#### File: hgext/fsmonitor/watchmanclient.py
```python
from __future__ import absolute_import
import getpass
from mercurial import (
encoding,
util,
)
from mercurial.utils import (
procutil,
stringutil,
)
from . import pywatchman
class Unavailable(Exception):
def __init__(self, msg, warn=True, invalidate=False):
self.msg = msg
self.warn = warn
if self.msg == b'timed out waiting for response':
self.warn = False
self.invalidate = invalidate
def __bytes__(self):
if self.warn:
return b'warning: Watchman unavailable: %s' % self.msg
else:
return b'Watchman unavailable: %s' % self.msg
__str__ = encoding.strmethod(__bytes__)
class WatchmanNoRoot(Unavailable):
def __init__(self, root, msg):
self.root = root
super(WatchmanNoRoot, self).__init__(msg)
class client(object):
def __init__(self, ui, root, timeout=1.0):
err = None
if not self._user:
err = b"couldn't get user"
warn = True
if self._user in ui.configlist(b'fsmonitor', b'blacklistusers'):
err = b'user %s in blacklist' % self._user
warn = False
if err:
raise Unavailable(err, warn)
self._timeout = timeout
self._watchmanclient = None
self._root = root
self._ui = ui
self._firsttime = True
def settimeout(self, timeout):
self._timeout = timeout
if self._watchmanclient is not None:
self._watchmanclient.setTimeout(timeout)
def getcurrentclock(self):
result = self.command(b'clock')
if not util.safehasattr(result, 'clock'):
raise Unavailable(
b'clock result is missing clock value', invalidate=True
)
return result.clock
def clearconnection(self):
self._watchmanclient = None
def available(self):
return self._watchmanclient is not None or self._firsttime
@util.propertycache
def _user(self):
try:
return getpass.getuser()
except KeyError:
# couldn't figure out our user
return None
def _command(self, *args):
watchmanargs = (args[0], self._root) + args[1:]
try:
if self._watchmanclient is None:
self._firsttime = False
watchman_exe = self._ui.configpath(
b'fsmonitor', b'watchman_exe'
)
self._watchmanclient = pywatchman.client(
timeout=self._timeout,
useImmutableBser=True,
binpath=procutil.tonativestr(watchman_exe),
)
return self._watchmanclient.query(*watchmanargs)
except pywatchman.CommandError as ex:
if 'unable to resolve root' in ex.msg:
raise WatchmanNoRoot(
self._root, stringutil.forcebytestr(ex.msg)
)
raise Unavailable(stringutil.forcebytestr(ex.msg))
except pywatchman.WatchmanError as ex:
raise Unavailable(stringutil.forcebytestr(ex))
def command(self, *args):
try:
try:
return self._command(*args)
except WatchmanNoRoot:
# this 'watch' command can also raise a WatchmanNoRoot if
# watchman refuses to accept this root
self._command(b'watch')
return self._command(*args)
except Unavailable:
# this is in an outer scope to catch Unavailable form any of the
# above _command calls
self._watchmanclient = None
raise
```
#### File: hgext/git/dirstate.py
```python
from __future__ import absolute_import
import contextlib
import errno
import os
from mercurial.node import nullid
from mercurial import (
error,
extensions,
match as matchmod,
pycompat,
scmutil,
util,
)
from mercurial.interfaces import (
dirstate as intdirstate,
util as interfaceutil,
)
from . import gitutil
pygit2 = gitutil.get_pygit2()
def readpatternfile(orig, filepath, warn, sourceinfo=False):
if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')):
return orig(filepath, warn, sourceinfo=False)
result = []
warnings = []
with open(filepath, b'rb') as fp:
for l in fp:
l = l.strip()
if not l or l.startswith(b'#'):
continue
if l.startswith(b'!'):
warnings.append(b'unsupported ignore pattern %s' % l)
continue
if l.startswith(b'/'):
result.append(b'rootglob:' + l[1:])
else:
result.append(b'relglob:' + l)
return result, warnings
extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile)
_STATUS_MAP = {}
if pygit2:
_STATUS_MAP = {
pygit2.GIT_STATUS_CONFLICTED: b'm',
pygit2.GIT_STATUS_CURRENT: b'n',
pygit2.GIT_STATUS_IGNORED: b'?',
pygit2.GIT_STATUS_INDEX_DELETED: b'r',
pygit2.GIT_STATUS_INDEX_MODIFIED: b'n',
pygit2.GIT_STATUS_INDEX_NEW: b'a',
pygit2.GIT_STATUS_INDEX_RENAMED: b'a',
pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n',
pygit2.GIT_STATUS_WT_DELETED: b'r',
pygit2.GIT_STATUS_WT_MODIFIED: b'n',
pygit2.GIT_STATUS_WT_NEW: b'?',
pygit2.GIT_STATUS_WT_RENAMED: b'a',
pygit2.GIT_STATUS_WT_TYPECHANGE: b'n',
pygit2.GIT_STATUS_WT_UNREADABLE: b'?',
pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: 'm',
}
@interfaceutil.implementer(intdirstate.idirstate)
class gitdirstate(object):
def __init__(self, ui, root, gitrepo):
self._ui = ui
self._root = os.path.dirname(root)
self.git = gitrepo
self._plchangecallbacks = {}
def p1(self):
try:
return self.git.head.peel().id.raw
except pygit2.GitError:
# Typically happens when peeling HEAD fails, as in an
# empty repository.
return nullid
def p2(self):
# TODO: MERGE_HEAD? something like that, right?
return nullid
def setparents(self, p1, p2=nullid):
assert p2 == nullid, b'TODO merging support'
self.git.head.set_target(gitutil.togitnode(p1))
@util.propertycache
def identity(self):
return util.filestat.frompath(
os.path.join(self._root, b'.git', b'index')
)
def branch(self):
return b'default'
def parents(self):
# TODO how on earth do we find p2 if a merge is in flight?
return self.p1(), nullid
def __iter__(self):
return (pycompat.fsencode(f.path) for f in self.git.index)
def items(self):
for ie in self.git.index:
yield ie.path, None # value should be a dirstatetuple
# py2,3 compat forward
iteritems = items
def __getitem__(self, filename):
try:
gs = self.git.status_file(filename)
except KeyError:
return b'?'
return _STATUS_MAP[gs]
def __contains__(self, filename):
try:
gs = self.git.status_file(filename)
return _STATUS_MAP[gs] != b'?'
except KeyError:
return False
def status(self, match, subrepos, ignored, clean, unknown):
listclean = clean
# TODO handling of clean files - can we get that from git.status()?
modified, added, removed, deleted, unknown, ignored, clean = (
[],
[],
[],
[],
[],
[],
[],
)
gstatus = self.git.status()
for path, status in gstatus.items():
path = pycompat.fsencode(path)
if not match(path):
continue
if status == pygit2.GIT_STATUS_IGNORED:
if path.endswith(b'/'):
continue
ignored.append(path)
elif status in (
pygit2.GIT_STATUS_WT_MODIFIED,
pygit2.GIT_STATUS_INDEX_MODIFIED,
pygit2.GIT_STATUS_WT_MODIFIED
| pygit2.GIT_STATUS_INDEX_MODIFIED,
):
modified.append(path)
elif status == pygit2.GIT_STATUS_INDEX_NEW:
added.append(path)
elif status == pygit2.GIT_STATUS_WT_NEW:
unknown.append(path)
elif status == pygit2.GIT_STATUS_WT_DELETED:
deleted.append(path)
elif status == pygit2.GIT_STATUS_INDEX_DELETED:
removed.append(path)
else:
raise error.Abort(
b'unhandled case: status for %r is %r' % (path, status)
)
if listclean:
observed = set(
modified + added + removed + deleted + unknown + ignored
)
index = self.git.index
index.read()
for entry in index:
path = pycompat.fsencode(entry.path)
if not match(path):
continue
if path in observed:
continue # already in some other set
if path[-1] == b'/':
continue # directory
clean.append(path)
# TODO are we really always sure of status here?
return (
False,
scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
),
)
def flagfunc(self, buildfallback):
# TODO we can do better
return buildfallback()
def getcwd(self):
# TODO is this a good way to do this?
return os.path.dirname(
os.path.dirname(pycompat.fsencode(self.git.path))
)
def normalize(self, path):
normed = util.normcase(path)
assert normed == path, b"TODO handling of case folding: %s != %s" % (
normed,
path,
)
return path
@property
def _checklink(self):
return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
def copies(self):
# TODO support copies?
return {}
# # TODO what the heck is this
_filecache = set()
def pendingparentchange(self):
# TODO: we need to implement the context manager bits and
# correctly stage/revert index edits.
return False
def write(self, tr):
# TODO: call parent change callbacks
if tr:
def writeinner(category):
self.git.index.write()
tr.addpending(b'gitdirstate', writeinner)
else:
self.git.index.write()
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
# TODO core dirstate does something about slashes here
assert isinstance(f, bytes)
r = util.pathto(self._root, cwd, f)
return r
def matches(self, match):
for x in self.git.index:
p = pycompat.fsencode(x.path)
if match(p):
yield p
def normal(self, f, parentfiledata=None):
"""Mark a file normal and clean."""
# TODO: for now we just let libgit2 re-stat the file. We can
# clearly do better.
def normallookup(self, f):
"""Mark a file normal, but possibly dirty."""
# TODO: for now we just let libgit2 re-stat the file. We can
# clearly do better.
def walk(self, match, subrepos, unknown, ignored, full=True):
# TODO: we need to use .status() and not iterate the index,
# because the index doesn't force a re-walk and so `hg add` of
# a new file without an intervening call to status will
# silently do nothing.
r = {}
cwd = self.getcwd()
for path, status in self.git.status().items():
if path.startswith('.hg/'):
continue
path = pycompat.fsencode(path)
if not match(path):
continue
# TODO construct the stat info from the status object?
try:
s = os.stat(os.path.join(cwd, path))
except OSError as e:
if e.errno != errno.ENOENT:
raise
continue
r[path] = s
return r
def savebackup(self, tr, backupname):
# TODO: figure out a strategy for saving index backups.
pass
def restorebackup(self, tr, backupname):
# TODO: figure out a strategy for saving index backups.
pass
def add(self, f):
index = self.git.index
index.read()
index.add(pycompat.fsdecode(f))
index.write()
def drop(self, f):
index = self.git.index
index.read()
fs = pycompat.fsdecode(f)
if fs in index:
index.remove(fs)
index.write()
def remove(self, f):
index = self.git.index
index.read()
index.remove(pycompat.fsdecode(f))
index.write()
def copied(self, path):
# TODO: track copies?
return None
def prefetch_parents(self):
# TODO
pass
@contextlib.contextmanager
def parentchange(self):
# TODO: track this maybe?
yield
def addparentchangecallback(self, category, callback):
# TODO: should this be added to the dirstate interface?
self._plchangecallbacks[category] = callback
def clearbackup(self, tr, backupname):
# TODO
pass
def setbranch(self, branch):
raise error.Abort(
b'git repos do not support branches. try using bookmarks'
)
```
#### File: hgext/git/index.py
```python
from __future__ import absolute_import
import collections
import os
import sqlite3
from mercurial.i18n import _
from mercurial.node import (
nullhex,
nullid,
)
from mercurial import (
encoding,
error,
pycompat,
)
from . import gitutil
pygit2 = gitutil.get_pygit2()
_CURRENT_SCHEMA_VERSION = 1
_SCHEMA = (
"""
CREATE TABLE refs (
-- node and name are unique together. There may be more than one name for
-- a given node, and there may be no name at all for a given node (in the
-- case of an anonymous hg head).
node TEXT NOT NULL,
name TEXT
);
-- The "possible heads" of the repository, which we use to figure out
-- if we need to re-walk the changelog.
CREATE TABLE possible_heads (
node TEXT NOT NULL
);
-- The topological heads of the changelog, which hg depends on.
CREATE TABLE heads (
node TEXT NOT NULL
);
-- A total ordering of the changelog
CREATE TABLE changelog (
rev INTEGER NOT NULL PRIMARY KEY,
node TEXT NOT NULL,
p1 TEXT,
p2 TEXT
);
CREATE UNIQUE INDEX changelog_node_idx ON changelog(node);
CREATE UNIQUE INDEX changelog_node_rev_idx ON changelog(rev, node);
-- Changed files for each commit, which lets us dynamically build
-- filelogs.
CREATE TABLE changedfiles (
node TEXT NOT NULL,
filename TEXT NOT NULL,
-- 40 zeroes for deletions
filenode TEXT NOT NULL,
-- to handle filelog parentage:
p1node TEXT,
p1filenode TEXT,
p2node TEXT,
p2filenode TEXT
);
CREATE INDEX changedfiles_nodes_idx
ON changedfiles(node);
PRAGMA user_version=%d
"""
% _CURRENT_SCHEMA_VERSION
)
def _createdb(path):
# print('open db', path)
# import traceback
# traceback.print_stack()
db = sqlite3.connect(encoding.strfromlocal(path))
db.text_factory = bytes
res = db.execute('PRAGMA user_version').fetchone()[0]
# New database.
if res == 0:
for statement in _SCHEMA.split(';'):
db.execute(statement.strip())
db.commit()
elif res == _CURRENT_SCHEMA_VERSION:
pass
else:
raise error.Abort(_(b'sqlite database has unrecognized version'))
db.execute('PRAGMA journal_mode=WAL')
return db
_OUR_ORDER = ()
if pygit2:
_OUR_ORDER = (
pygit2.GIT_SORT_TOPOLOGICAL
| pygit2.GIT_SORT_TIME
| pygit2.GIT_SORT_REVERSE
)
_DIFF_FLAGS = 1 << 21 # GIT_DIFF_FORCE_BINARY, which isn't exposed by pygit2
def _find_nearest_ancestor_introducing_node(
db, gitrepo, file_path, walk_start, filenode
):
"""Find the nearest ancestor that introduces a file node.
Args:
db: a handle to our sqlite database.
gitrepo: A pygit2.Repository instance.
file_path: the path of a file in the repo
walk_start: a pygit2.Oid that is a commit where we should start walking
for our nearest ancestor.
Returns:
A hexlified SHA that is the commit ID of the next-nearest parent.
"""
assert isinstance(file_path, str), 'file_path must be str, got %r' % type(
file_path
)
assert isinstance(filenode, str), 'filenode must be str, got %r' % type(
filenode
)
parent_options = {
row[0].decode('ascii')
for row in db.execute(
'SELECT node FROM changedfiles '
'WHERE filename = ? AND filenode = ?',
(file_path, filenode),
)
}
inner_walker = gitrepo.walk(walk_start, _OUR_ORDER)
for w in inner_walker:
if w.id.hex in parent_options:
return w.id.hex
raise error.ProgrammingError(
'Unable to find introducing commit for %s node %s from %s',
(file_path, filenode, walk_start),
)
def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode):
"""Given a starting commit and path, fill in a filelog's parent pointers.
Args:
gitrepo: a pygit2.Repository
db: a handle to our sqlite database
startcommit: a hexlified node id for the commit to start at
path: the path of the file whose parent pointers we should fill in.
filenode: the hexlified node id of the file at startcommit
TODO: make filenode optional
"""
assert isinstance(
startcommit, str
), 'startcommit must be str, got %r' % type(startcommit)
assert isinstance(
startfilenode, str
), 'startfilenode must be str, got %r' % type(startfilenode)
visit = collections.deque([(startcommit, startfilenode)])
while visit:
cnode, filenode = visit.popleft()
commit = gitrepo[cnode]
parents = []
for parent in commit.parents:
t = parent.tree
for comp in path.split('/'):
try:
t = gitrepo[t[comp].id]
except KeyError:
break
else:
introducer = _find_nearest_ancestor_introducing_node(
db, gitrepo, path, parent.id, t.id.hex
)
parents.append((introducer, t.id.hex))
p1node = p1fnode = p2node = p2fnode = gitutil.nullgit
for par, parfnode in parents:
found = int(
db.execute(
'SELECT COUNT(*) FROM changedfiles WHERE '
'node = ? AND filename = ? AND filenode = ? AND '
'p1node NOT NULL',
(par, path, parfnode),
).fetchone()[0]
)
if found == 0:
assert par is not None
visit.append((par, parfnode))
if parents:
p1node, p1fnode = parents[0]
if len(parents) == 2:
p2node, p2fnode = parents[1]
if len(parents) > 2:
raise error.ProgrammingError(
b"git support can't handle octopus merges"
)
db.execute(
'UPDATE changedfiles SET '
'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? '
'WHERE node = ? AND filename = ? AND filenode = ?',
(p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode),
)
db.commit()
def _index_repo(
gitrepo,
db,
logfn=lambda x: None,
progress_factory=lambda *args, **kwargs: None,
):
# Identify all references so we can tell the walker to visit all of them.
all_refs = gitrepo.listall_references()
possible_heads = set()
prog = progress_factory(b'refs')
for pos, ref in enumerate(all_refs):
if prog is not None:
prog.update(pos)
if not (
ref.startswith('refs/heads/') # local branch
or ref.startswith('refs/tags/') # tag
or ref.startswith('refs/remotes/') # remote branch
or ref.startswith('refs/hg/') # from this extension
):
continue
try:
start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT)
except ValueError:
# No commit to be found, so we don't care for hg's purposes.
continue
possible_heads.add(start.id)
# Optimization: if the list of heads hasn't changed, don't
# reindex, the changelog. This doesn't matter on small
# repositories, but on even moderately deep histories (eg cpython)
# this is a very important performance win.
#
# TODO: we should figure out how to incrementally index history
# (preferably by detecting rewinds!) so that we don't have to do a
# full changelog walk every time a new commit is created.
cache_heads = {
pycompat.sysstr(x[0])
for x in db.execute('SELECT node FROM possible_heads')
}
walker = None
cur_cache_heads = {h.hex for h in possible_heads}
if cur_cache_heads == cache_heads:
return
logfn(b'heads mismatch, rebuilding dagcache\n')
for start in possible_heads:
if walker is None:
walker = gitrepo.walk(start, _OUR_ORDER)
else:
walker.push(start)
# Empty out the existing changelog. Even for large-ish histories
# we can do the top-level "walk all the commits" dance very
# quickly as long as we don't need to figure out the changed files
# list.
db.execute('DELETE FROM changelog')
if prog is not None:
prog.complete()
prog = progress_factory(b'commits')
# This walker is sure to visit all the revisions in history, but
# only once.
for pos, commit in enumerate(walker):
if prog is not None:
prog.update(pos)
p1 = p2 = nullhex
if len(commit.parents) > 2:
raise error.ProgrammingError(
(
b"git support can't handle octopus merges, "
b"found a commit with %d parents :("
)
% len(commit.parents)
)
if commit.parents:
p1 = commit.parents[0].id.hex
if len(commit.parents) == 2:
p2 = commit.parents[1].id.hex
db.execute(
'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)',
(pos, commit.id.hex, p1, p2),
)
num_changedfiles = db.execute(
"SELECT COUNT(*) from changedfiles WHERE node = ?",
(commit.id.hex,),
).fetchone()[0]
if not num_changedfiles:
files = {}
# I *think* we only need to check p1 for changed files
# (and therefore linkrevs), because any node that would
# actually have this commit as a linkrev would be
# completely new in this rev.
p1 = commit.parents[0].id.hex if commit.parents else None
if p1 is not None:
patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS)
else:
patchgen = commit.tree.diff_to_tree(
swap=True, flags=_DIFF_FLAGS
)
new_files = (p.delta.new_file for p in patchgen)
files = {
nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
}
for p, n in files.items():
# We intentionally set NULLs for any file parentage
# information so it'll get demand-computed later. We
# used to do it right here, and it was _very_ slow.
db.execute(
'INSERT INTO changedfiles ('
'node, filename, filenode, p1node, p1filenode, p2node, '
'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)',
(commit.id.hex, p, n, None, None, None, None),
)
db.execute('DELETE FROM heads')
db.execute('DELETE FROM possible_heads')
for hid in possible_heads:
h = hid.hex
db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,))
haschild = db.execute(
'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h)
).fetchone()[0]
if not haschild:
db.execute('INSERT INTO heads (node) VALUES(?)', (h,))
db.commit()
if prog is not None:
prog.complete()
def get_index(
gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None
):
cachepath = os.path.join(
pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache'
)
if not os.path.exists(cachepath):
os.makedirs(cachepath)
dbpath = os.path.join(cachepath, b'git-commits.sqlite')
db = _createdb(dbpath)
# TODO check against gitrepo heads before doing a full index
# TODO thread a ui.progress call into this layer
_index_repo(gitrepo, db, logfn, progress_factory)
return db
```
#### File: hgext/hooklib/reject_merge_commits.py
```python
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial import (
error,
pycompat,
)
def hook(ui, repo, hooktype, node=None, **kwargs):
if hooktype != b"pretxnchangegroup":
raise error.Abort(
_(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
)
ctx = repo.unfiltered()[node]
for rev in repo.changelog.revs(start=ctx.rev()):
rev = repo[rev]
parents = rev.parents()
if len(parents) < 2:
continue
if all(repo[p].branch() == rev.branch() for p in parents):
raise error.Abort(
_(
b'%s rejected as merge on the same branch. '
b'Please consider rebase.'
)
% rev
)
```
#### File: hgext/lfs/blobstore.py
```python
from __future__ import absolute_import
import contextlib
import errno
import hashlib
import json
import os
import re
import socket
from mercurial.i18n import _
from mercurial.pycompat import getattr
from mercurial.node import hex
from mercurial import (
encoding,
error,
httpconnection as httpconnectionmod,
pathutil,
pycompat,
url as urlmod,
util,
vfs as vfsmod,
worker,
)
from mercurial.utils import stringutil
from ..largefiles import lfutil
# 64 bytes for SHA256
_lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
class lfsvfs(vfsmod.vfs):
def join(self, path):
"""split the path at first two characters, like: XX/XXXXX..."""
if not _lfsre.match(path):
raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
return super(lfsvfs, self).join(path[0:2], path[2:])
def walk(self, path=None, onerror=None):
"""Yield (dirpath, [], oids) tuple for blobs under path
Oids only exist in the root of this vfs, so dirpath is always ''.
"""
root = os.path.normpath(self.base)
# when dirpath == root, dirpath[prefixlen:] becomes empty
# because len(dirpath) < prefixlen.
prefixlen = len(pathutil.normasprefix(root))
oids = []
for dirpath, dirs, files in os.walk(
self.reljoin(self.base, path or b''), onerror=onerror
):
dirpath = dirpath[prefixlen:]
# Silently skip unexpected files and directories
if len(dirpath) == 2:
oids.extend(
[dirpath + f for f in files if _lfsre.match(dirpath + f)]
)
yield (b'', [], oids)
class nullvfs(lfsvfs):
def __init__(self):
pass
def exists(self, oid):
return False
def read(self, oid):
# store.read() calls into here if the blob doesn't exist in its
# self.vfs. Raise the same error as a normal vfs when asked to read a
# file that doesn't exist. The only difference is the full file path
# isn't available in the error.
raise IOError(
errno.ENOENT,
pycompat.sysstr(b'%s: No such file or directory' % oid),
)
def walk(self, path=None, onerror=None):
return (b'', [], [])
def write(self, oid, data):
pass
class lfsuploadfile(httpconnectionmod.httpsendfile):
"""a file-like object that supports keepalive."""
def __init__(self, ui, filename):
super(lfsuploadfile, self).__init__(ui, filename, b'rb')
self.read = self._data.read
def _makeprogress(self):
return None # progress is handled by the worker client
class local(object):
"""Local blobstore for large file contents.
This blobstore is used both as a cache and as a staging area for large blobs
to be uploaded to the remote blobstore.
"""
def __init__(self, repo):
fullpath = repo.svfs.join(b'lfs/objects')
self.vfs = lfsvfs(fullpath)
if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
self.cachevfs = nullvfs()
else:
usercache = lfutil._usercachedir(repo.ui, b'lfs')
self.cachevfs = lfsvfs(usercache)
self.ui = repo.ui
def open(self, oid):
"""Open a read-only file descriptor to the named blob, in either the
usercache or the local store."""
return open(self.path(oid), 'rb')
def path(self, oid):
"""Build the path for the given blob ``oid``.
If the blob exists locally, the path may point to either the usercache
or the local store. If it doesn't, it will point to the local store.
This is meant for situations where existing code that isn't LFS aware
needs to open a blob. Generally, prefer the ``open`` method on this
class.
"""
# The usercache is the most likely place to hold the file. Commit will
# write to both it and the local store, as will anything that downloads
# the blobs. However, things like clone without an update won't
# populate the local store. For an init + push of a local clone,
# the usercache is the only place it _could_ be. If not present, the
# missing file msg here will indicate the local repo, not the usercache.
if self.cachevfs.exists(oid):
return self.cachevfs.join(oid)
return self.vfs.join(oid)
def download(self, oid, src, content_length):
"""Read the blob from the remote source in chunks, verify the content,
and write to this local blobstore."""
sha256 = hashlib.sha256()
size = 0
with self.vfs(oid, b'wb', atomictemp=True) as fp:
for chunk in util.filechunkiter(src, size=1048576):
fp.write(chunk)
sha256.update(chunk)
size += len(chunk)
# If the server advertised a length longer than what we actually
# received, then we should expect that the server crashed while
# producing the response (but the server has no way of telling us
# that), and we really don't need to try to write the response to
# the localstore, because it's not going to match the expected.
if content_length is not None and int(content_length) != size:
msg = (
b"Response length (%s) does not match Content-Length "
b"header (%d): likely server-side crash"
)
raise LfsRemoteError(_(msg) % (size, int(content_length)))
realoid = hex(sha256.digest())
if realoid != oid:
raise LfsCorruptionError(
_(b'corrupt remote lfs object: %s') % oid
)
self._linktousercache(oid)
def write(self, oid, data):
"""Write blob to local blobstore.
This should only be called from the filelog during a commit or similar.
As such, there is no need to verify the data. Imports from a remote
store must use ``download()`` instead."""
with self.vfs(oid, b'wb', atomictemp=True) as fp:
fp.write(data)
self._linktousercache(oid)
def linkfromusercache(self, oid):
"""Link blobs found in the user cache into this store.
The server module needs to do this when it lets the client know not to
upload the blob, to ensure it is always available in this store.
Normally this is done implicitly when the client reads or writes the
blob, but that doesn't happen when the server tells the client that it
already has the blob.
"""
if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
def _linktousercache(self, oid):
# XXX: should we verify the content of the cache, and hardlink back to
# the local store on success, but truncate, write and link on failure?
if not self.cachevfs.exists(oid) and not isinstance(
self.cachevfs, nullvfs
):
self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
def read(self, oid, verify=True):
"""Read blob from local blobstore."""
if not self.vfs.exists(oid):
blob = self._read(self.cachevfs, oid, verify)
# Even if revlog will verify the content, it needs to be verified
# now before making the hardlink to avoid propagating corrupt blobs.
# Don't abort if corruption is detected, because `hg verify` will
# give more useful info about the corruption- simply don't add the
# hardlink.
if verify or hex(hashlib.sha256(blob).digest()) == oid:
self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
else:
self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
blob = self._read(self.vfs, oid, verify)
return blob
def _read(self, vfs, oid, verify):
"""Read blob (after verifying) from the given store"""
blob = vfs.read(oid)
if verify:
_verify(oid, blob)
return blob
def verify(self, oid):
"""Indicate whether or not the hash of the underlying file matches its
name."""
sha256 = hashlib.sha256()
with self.open(oid) as fp:
for chunk in util.filechunkiter(fp, size=1048576):
sha256.update(chunk)
return oid == hex(sha256.digest())
def has(self, oid):
"""Returns True if the local blobstore contains the requested blob,
False otherwise."""
return self.cachevfs.exists(oid) or self.vfs.exists(oid)
def _urlerrorreason(urlerror):
"""Create a friendly message for the given URLError to be used in an
LfsRemoteError message.
"""
inst = urlerror
if isinstance(urlerror.reason, Exception):
inst = urlerror.reason
if util.safehasattr(inst, b'reason'):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
except (AttributeError, IndexError):
# it might be anything, for example a string
reason = inst.reason
if isinstance(reason, pycompat.unicode):
# SSLError of Python 2.7.9 contains a unicode
reason = encoding.unitolocal(reason)
return reason
elif getattr(inst, "strerror", None):
return encoding.strtolocal(inst.strerror)
else:
return stringutil.forcebytestr(urlerror)
class lfsauthhandler(util.urlreq.basehandler):
handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
def http_error_401(self, req, fp, code, msg, headers):
"""Enforces that any authentication performed is HTTP Basic
Authentication. No authentication is also acceptable.
"""
authreq = headers.get('www-authenticate', None)
if authreq:
scheme = authreq.split()[0]
if scheme.lower() != 'basic':
msg = _(b'the server must support Basic Authentication')
raise util.urlerr.httperror(
req.get_full_url(),
code,
encoding.strfromlocal(msg),
headers,
fp,
)
return None
class _gitlfsremote(object):
def __init__(self, repo, url):
ui = repo.ui
self.ui = ui
baseurl, authinfo = url.authinfo()
self.baseurl = baseurl.rstrip(b'/')
useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
if not useragent:
useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
self.urlopener = urlmod.opener(ui, authinfo, useragent)
self.urlopener.add_handler(lfsauthhandler())
self.retry = ui.configint(b'lfs', b'retry')
def writebatch(self, pointers, fromstore):
"""Batch upload from local to remote blobstore."""
self._batch(_deduplicate(pointers), fromstore, b'upload')
def readbatch(self, pointers, tostore):
"""Batch download from remote to local blostore."""
self._batch(_deduplicate(pointers), tostore, b'download')
def _batchrequest(self, pointers, action):
"""Get metadata about objects pointed by pointers for given action
Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
"""
objects = [
{'oid': pycompat.strurl(p.oid()), 'size': p.size()}
for p in pointers
]
requestdata = pycompat.bytesurl(
json.dumps(
{
'objects': objects,
'operation': pycompat.strurl(action),
}
)
)
url = b'%s/objects/batch' % self.baseurl
batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
try:
with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
rawjson = rsp.read()
except util.urlerr.httperror as ex:
hints = {
400: _(
b'check that lfs serving is enabled on %s and "%s" is '
b'supported'
)
% (self.baseurl, action),
404: _(b'the "lfs.url" config may be used to override %s')
% self.baseurl,
}
hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
raise LfsRemoteError(
_(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
hint=hint,
)
except util.urlerr.urlerror as ex:
hint = (
_(b'the "lfs.url" config may be used to override %s')
% self.baseurl
)
raise LfsRemoteError(
_(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
)
try:
response = pycompat.json_loads(rawjson)
except ValueError:
raise LfsRemoteError(
_(b'LFS server returns invalid JSON: %s')
% rawjson.encode("utf-8")
)
if self.ui.debugflag:
self.ui.debug(b'Status: %d\n' % rsp.status)
# lfs-test-server and hg serve return headers in different order
headers = pycompat.bytestr(rsp.info()).strip()
self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
if 'objects' in response:
response['objects'] = sorted(
response['objects'], key=lambda p: p['oid']
)
self.ui.debug(
b'%s\n'
% pycompat.bytesurl(
json.dumps(
response,
indent=2,
separators=('', ': '),
sort_keys=True,
)
)
)
def encodestr(x):
if isinstance(x, pycompat.unicode):
return x.encode('utf-8')
return x
return pycompat.rapply(encodestr, response)
def _checkforservererror(self, pointers, responses, action):
"""Scans errors from objects
Raises LfsRemoteError if any objects have an error"""
for response in responses:
# The server should return 404 when objects cannot be found. Some
# server implementation (ex. lfs-test-server) does not set "error"
# but just removes "download" from "actions". Treat that case
# as the same as 404 error.
if b'error' not in response:
if action == b'download' and action not in response.get(
b'actions', []
):
code = 404
else:
continue
else:
# An error dict without a code doesn't make much sense, so
# treat as a server error.
code = response.get(b'error').get(b'code', 500)
ptrmap = {p.oid(): p for p in pointers}
p = ptrmap.get(response[b'oid'], None)
if p:
filename = getattr(p, 'filename', b'unknown')
errors = {
404: b'The object does not exist',
410: b'The object was removed by the owner',
422: b'Validation error',
500: b'Internal server error',
}
msg = errors.get(code, b'status code %d' % code)
raise LfsRemoteError(
_(b'LFS server error for "%s": %s') % (filename, msg)
)
else:
raise LfsRemoteError(
_(b'LFS server error. Unsolicited response for oid %s')
% response[b'oid']
)
def _extractobjects(self, response, pointers, action):
"""extract objects from response of the batch API
response: parsed JSON object returned by batch API
return response['objects'] filtered by action
raise if any object has an error
"""
# Scan errors from objects - fail early
objects = response.get(b'objects', [])
self._checkforservererror(pointers, objects, action)
# Filter objects with given action. Practically, this skips uploading
# objects which exist in the server.
filteredobjects = [
o for o in objects if action in o.get(b'actions', [])
]
return filteredobjects
def _basictransfer(self, obj, action, localstore):
"""Download or upload a single object using basic transfer protocol
obj: dict, an object description returned by batch API
action: string, one of ['upload', 'download']
localstore: blobstore.local
See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
basic-transfers.md
"""
oid = obj[b'oid']
href = obj[b'actions'][action].get(b'href')
headers = obj[b'actions'][action].get(b'header', {}).items()
request = util.urlreq.request(pycompat.strurl(href))
if action == b'upload':
# If uploading blobs, read data from local blobstore.
if not localstore.verify(oid):
raise error.Abort(
_(b'detected corrupt lfs object: %s') % oid,
hint=_(b'run hg verify'),
)
for k, v in headers:
request.add_header(pycompat.strurl(k), pycompat.strurl(v))
try:
if action == b'upload':
request.data = lfsuploadfile(self.ui, localstore.path(oid))
request.get_method = lambda: 'PUT'
request.add_header('Content-Type', 'application/octet-stream')
request.add_header('Content-Length', request.data.length)
with contextlib.closing(self.urlopener.open(request)) as res:
contentlength = res.info().get(b"content-length")
ui = self.ui # Shorten debug lines
if self.ui.debugflag:
ui.debug(b'Status: %d\n' % res.status)
# lfs-test-server and hg serve return headers in different
# order
headers = pycompat.bytestr(res.info()).strip()
ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
if action == b'download':
# If downloading blobs, store downloaded data to local
# blobstore
localstore.download(oid, res, contentlength)
else:
blocks = []
while True:
data = res.read(1048576)
if not data:
break
blocks.append(data)
response = b"".join(blocks)
if response:
ui.debug(b'lfs %s response: %s' % (action, response))
except util.urlerr.httperror as ex:
if self.ui.debugflag:
self.ui.debug(
b'%s: %s\n' % (oid, ex.read())
) # XXX: also bytes?
raise LfsRemoteError(
_(b'LFS HTTP error: %s (oid=%s, action=%s)')
% (stringutil.forcebytestr(ex), oid, action)
)
except util.urlerr.urlerror as ex:
hint = _(b'attempted connection to %s') % pycompat.bytesurl(
util.urllibcompat.getfullurl(request)
)
raise LfsRemoteError(
_(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
)
finally:
if request.data:
request.data.close()
def _batch(self, pointers, localstore, action):
if action not in [b'upload', b'download']:
raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
response = self._batchrequest(pointers, action)
objects = self._extractobjects(response, pointers, action)
total = sum(x.get(b'size', 0) for x in objects)
sizes = {}
for obj in objects:
sizes[obj.get(b'oid')] = obj.get(b'size', 0)
topic = {
b'upload': _(b'lfs uploading'),
b'download': _(b'lfs downloading'),
}[action]
if len(objects) > 1:
self.ui.note(
_(b'lfs: need to transfer %d objects (%s)\n')
% (len(objects), util.bytecount(total))
)
def transfer(chunk):
for obj in chunk:
objsize = obj.get(b'size', 0)
if self.ui.verbose:
if action == b'download':
msg = _(b'lfs: downloading %s (%s)\n')
elif action == b'upload':
msg = _(b'lfs: uploading %s (%s)\n')
self.ui.note(
msg % (obj.get(b'oid'), util.bytecount(objsize))
)
retry = self.retry
while True:
try:
self._basictransfer(obj, action, localstore)
yield 1, obj.get(b'oid')
break
except socket.error as ex:
if retry > 0:
self.ui.note(
_(b'lfs: failed: %r (remaining retry %d)\n')
% (stringutil.forcebytestr(ex), retry)
)
retry -= 1
continue
raise
# Until https multiplexing gets sorted out
if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
oids = worker.worker(
self.ui,
0.1,
transfer,
(),
sorted(objects, key=lambda o: o.get(b'oid')),
)
else:
oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
with self.ui.makeprogress(
topic, unit=_(b"bytes"), total=total
) as progress:
progress.update(0)
processed = 0
blobs = 0
for _one, oid in oids:
processed += sizes[oid]
blobs += 1
progress.update(processed)
self.ui.note(_(b'lfs: processed: %s\n') % oid)
if blobs > 0:
if action == b'upload':
self.ui.status(
_(b'lfs: uploaded %d files (%s)\n')
% (blobs, util.bytecount(processed))
)
elif action == b'download':
self.ui.status(
_(b'lfs: downloaded %d files (%s)\n')
% (blobs, util.bytecount(processed))
)
def __del__(self):
# copied from mercurial/httppeer.py
urlopener = getattr(self, 'urlopener', None)
if urlopener:
for h in urlopener.handlers:
h.close()
getattr(h, "close_all", lambda: None)()
class _dummyremote(object):
"""Dummy store storing blobs to temp directory."""
def __init__(self, repo, url):
fullpath = repo.vfs.join(b'lfs', url.path)
self.vfs = lfsvfs(fullpath)
def writebatch(self, pointers, fromstore):
for p in _deduplicate(pointers):
content = fromstore.read(p.oid(), verify=True)
with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
fp.write(content)
def readbatch(self, pointers, tostore):
for p in _deduplicate(pointers):
with self.vfs(p.oid(), b'rb') as fp:
tostore.download(p.oid(), fp, None)
class _nullremote(object):
"""Null store storing blobs to /dev/null."""
def __init__(self, repo, url):
pass
def writebatch(self, pointers, fromstore):
pass
def readbatch(self, pointers, tostore):
pass
class _promptremote(object):
"""Prompt user to set lfs.url when accessed."""
def __init__(self, repo, url):
pass
def writebatch(self, pointers, fromstore, ui=None):
self._prompt()
def readbatch(self, pointers, tostore, ui=None):
self._prompt()
def _prompt(self):
raise error.Abort(_(b'lfs.url needs to be configured'))
_storemap = {
b'https': _gitlfsremote,
b'http': _gitlfsremote,
b'file': _dummyremote,
b'null': _nullremote,
None: _promptremote,
}
def _deduplicate(pointers):
"""Remove any duplicate oids that exist in the list"""
reduced = util.sortdict()
for p in pointers:
reduced[p.oid()] = p
return reduced.values()
def _verify(oid, content):
realoid = hex(hashlib.sha256(content).digest())
if realoid != oid:
raise LfsCorruptionError(
_(b'detected corrupt lfs object: %s') % oid,
hint=_(b'run hg verify'),
)
def remote(repo, remote=None):
"""remotestore factory. return a store in _storemap depending on config
If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
infer the endpoint, based on the remote repository using the same path
adjustments as git. As an extension, 'http' is supported as well so that
``hg serve`` works out of the box.
https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
"""
lfsurl = repo.ui.config(b'lfs', b'url')
url = util.url(lfsurl or b'')
if lfsurl is None:
if remote:
path = remote
elif util.safehasattr(repo, b'_subtoppath'):
# The pull command sets this during the optional update phase, which
# tells exactly where the pull originated, whether 'paths.default'
# or explicit.
path = repo._subtoppath
else:
# TODO: investigate 'paths.remote:lfsurl' style path customization,
# and fall back to inferring from 'paths.remote' if unspecified.
path = repo.ui.config(b'paths', b'default') or b''
defaulturl = util.url(path)
# TODO: support local paths as well.
# TODO: consider the ssh -> https transformation that git applies
if defaulturl.scheme in (b'http', b'https'):
if defaulturl.path and defaulturl.path[:-1] != b'/':
defaulturl.path += b'/'
defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
url = util.url(bytes(defaulturl))
repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
scheme = url.scheme
if scheme not in _storemap:
raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
return _storemap[scheme](repo, url)
class LfsRemoteError(error.StorageError):
pass
class LfsCorruptionError(error.Abort):
"""Raised when a corrupt blob is detected, aborting an operation
It exists to allow specialized handling on the server side."""
```
#### File: hgext/lfs/pointer.py
```python
from __future__ import absolute_import
import re
from mercurial.i18n import _
from mercurial import (
error,
pycompat,
)
from mercurial.utils import stringutil
class InvalidPointer(error.StorageError):
pass
class gitlfspointer(dict):
VERSION = b'https://git-lfs.github.com/spec/v1'
def __init__(self, *args, **kwargs):
self[b'version'] = self.VERSION
super(gitlfspointer, self).__init__(*args)
self.update(pycompat.byteskwargs(kwargs))
@classmethod
def deserialize(cls, text):
try:
return cls(l.split(b' ', 1) for l in text.splitlines()).validate()
except ValueError: # l.split returns 1 item instead of 2
raise InvalidPointer(
_(b'cannot parse git-lfs text: %s') % stringutil.pprint(text)
)
def serialize(self):
sortkeyfunc = lambda x: (x[0] != b'version', x)
items = sorted(pycompat.iteritems(self.validate()), key=sortkeyfunc)
return b''.join(b'%s %s\n' % (k, v) for k, v in items)
def oid(self):
return self[b'oid'].split(b':')[-1]
def size(self):
return int(self[b'size'])
# regular expressions used by _validate
# see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
_keyre = re.compile(br'\A[a-z0-9.-]+\Z')
_valuere = re.compile(br'\A[^\n]*\Z')
_requiredre = {
b'size': re.compile(br'\A[0-9]+\Z'),
b'oid': re.compile(br'\Asha256:[0-9a-f]{64}\Z'),
b'version': re.compile(br'\A%s\Z' % stringutil.reescape(VERSION)),
}
def validate(self):
"""raise InvalidPointer on error. return self if there is no error"""
requiredcount = 0
for k, v in pycompat.iteritems(self):
if k in self._requiredre:
if not self._requiredre[k].match(v):
raise InvalidPointer(
_(b'unexpected lfs pointer value: %s=%s')
% (k, stringutil.pprint(v))
)
requiredcount += 1
elif not self._keyre.match(k):
raise InvalidPointer(_(b'unexpected lfs pointer key: %s') % k)
if not self._valuere.match(v):
raise InvalidPointer(
_(b'unexpected lfs pointer value: %s=%s')
% (k, stringutil.pprint(v))
)
if len(self._requiredre) != requiredcount:
miss = sorted(set(self._requiredre.keys()).difference(self.keys()))
raise InvalidPointer(
_(b'missing lfs pointer keys: %s') % b', '.join(miss)
)
return self
deserialize = gitlfspointer.deserialize
```
#### File: site-packages/hgext/mq.py
```python
from __future__ import absolute_import, print_function
import errno
import os
import re
import shutil
import sys
from mercurial.i18n import _
from mercurial.node import (
bin,
hex,
nullid,
nullrev,
short,
)
from mercurial.pycompat import (
delattr,
getattr,
open,
)
from mercurial import (
cmdutil,
commands,
dirstateguard,
encoding,
error,
extensions,
hg,
localrepo,
lock as lockmod,
logcmdutil,
patch as patchmod,
phases,
pycompat,
registrar,
revsetlang,
scmutil,
smartset,
strip,
subrepoutil,
util,
vfs as vfsmod,
)
from mercurial.utils import (
dateutil,
stringutil,
)
release = lockmod.release
seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
cmdtable = {}
command = registrar.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
configtable = {}
configitem = registrar.configitem(configtable)
configitem(
b'mq',
b'git',
default=b'auto',
)
configitem(
b'mq',
b'keepchanges',
default=False,
)
configitem(
b'mq',
b'plain',
default=False,
)
configitem(
b'mq',
b'secret',
default=False,
)
# force load strip extension formerly included in mq and import some utility
try:
extensions.find(b'strip')
except KeyError:
# note: load is lazy so we could avoid the try-except,
# but I (marmoute) prefer this explicit code.
class dummyui(object):
def debug(self, msg):
pass
def log(self, event, msgfmt, *msgargs, **opts):
pass
extensions.load(dummyui(), b'strip', b'')
strip = strip.strip
def checksubstate(repo, baserev=None):
"""return list of subrepos at a different revision than substate.
Abort if any subrepos have uncommitted changes."""
inclsubs = []
wctx = repo[None]
if baserev:
bctx = repo[baserev]
else:
bctx = wctx.p1()
for s in sorted(wctx.substate):
wctx.sub(s).bailifchanged(True)
if s not in bctx.substate or bctx.sub(s).dirty():
inclsubs.append(s)
return inclsubs
# Patch names looks like unix-file names.
# They must be joinable with queue directory and result in the patch path.
normname = util.normpath
class statusentry(object):
def __init__(self, node, name):
self.node, self.name = node, name
def __bytes__(self):
return hex(self.node) + b':' + self.name
__str__ = encoding.strmethod(__bytes__)
__repr__ = encoding.strmethod(__bytes__)
# The order of the headers in 'hg export' HG patches:
HGHEADERS = [
# '# HG changeset patch',
b'# User ',
b'# Date ',
b'# ',
b'# Branch ',
b'# Node ID ',
b'# Parent ', # can occur twice for merges - but that is not relevant for mq
]
# The order of headers in plain 'mail style' patches:
PLAINHEADERS = {
b'from': 0,
b'date': 1,
b'subject': 2,
}
def inserthgheader(lines, header, value):
"""Assuming lines contains a HG patch header, add a header line with value.
>>> try: inserthgheader([], b'# Date ', b'z')
... except ValueError as inst: print("oops")
oops
>>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
['# HG changeset patch', '# Date z']
>>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
['# HG changeset patch', '# Date z', '']
>>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
['# HG changeset patch', '# User y', '# Date z']
>>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
... b'# User ', b'z')
['# HG changeset patch', '# Date x', '# User z']
>>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
['# HG changeset patch', '# Date z']
>>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
... b'# Date ', b'z')
['# HG changeset patch', '# Date z', '', '# Date y']
>>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
... b'# Date ', b'z')
['# HG changeset patch', '# Date z', '# Parent y']
"""
start = lines.index(b'# HG changeset patch') + 1
newindex = HGHEADERS.index(header)
bestpos = len(lines)
for i in range(start, len(lines)):
line = lines[i]
if not line.startswith(b'# '):
bestpos = min(bestpos, i)
break
for lineindex, h in enumerate(HGHEADERS):
if line.startswith(h):
if lineindex == newindex:
lines[i] = header + value
return lines
if lineindex > newindex:
bestpos = min(bestpos, i)
break # next line
lines.insert(bestpos, header + value)
return lines
def insertplainheader(lines, header, value):
"""For lines containing a plain patch header, add a header line with value.
>>> insertplainheader([], b'Date', b'z')
['Date: z']
>>> insertplainheader([b''], b'Date', b'z')
['Date: z', '']
>>> insertplainheader([b'x'], b'Date', b'z')
['Date: z', '', 'x']
>>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
['From: y', 'Date: z', '', 'x']
>>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
[' date : x', 'From: z', '']
>>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
['Date: z', '', 'Date: y']
>>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
['From: y', 'foo: bar', 'DATE: z', '', 'x']
"""
newprio = PLAINHEADERS[header.lower()]
bestpos = len(lines)
for i, line in enumerate(lines):
if b':' in line:
lheader = line.split(b':', 1)[0].strip().lower()
lprio = PLAINHEADERS.get(lheader, newprio + 1)
if lprio == newprio:
lines[i] = b'%s: %s' % (header, value)
return lines
if lprio > newprio and i < bestpos:
bestpos = i
else:
if line:
lines.insert(i, b'')
if i < bestpos:
bestpos = i
break
lines.insert(bestpos, b'%s: %s' % (header, value))
return lines
class patchheader(object):
def __init__(self, pf, plainmode=False):
def eatdiff(lines):
while lines:
l = lines[-1]
if (
l.startswith(b"diff -")
or l.startswith(b"Index:")
or l.startswith(b"===========")
):
del lines[-1]
else:
break
def eatempty(lines):
while lines:
if not lines[-1].strip():
del lines[-1]
else:
break
message = []
comments = []
user = None
date = None
parent = None
format = None
subject = None
branch = None
nodeid = None
diffstart = 0
for line in open(pf, b'rb'):
line = line.rstrip()
if line.startswith(b'diff --git') or (
diffstart and line.startswith(b'+++ ')
):
diffstart = 2
break
diffstart = 0 # reset
if line.startswith(b"--- "):
diffstart = 1
continue
elif format == b"hgpatch":
# parse values when importing the result of an hg export
if line.startswith(b"# User "):
user = line[7:]
elif line.startswith(b"# Date "):
date = line[7:]
elif line.startswith(b"# Parent "):
parent = line[9:].lstrip() # handle double trailing space
elif line.startswith(b"# Branch "):
branch = line[9:]
elif line.startswith(b"# Node ID "):
nodeid = line[10:]
elif not line.startswith(b"# ") and line:
message.append(line)
format = None
elif line == b'# HG changeset patch':
message = []
format = b"hgpatch"
elif format != b"tagdone" and (
line.startswith(b"Subject: ") or line.startswith(b"subject: ")
):
subject = line[9:]
format = b"tag"
elif format != b"tagdone" and (
line.startswith(b"From: ") or line.startswith(b"from: ")
):
user = line[6:]
format = b"tag"
elif format != b"tagdone" and (
line.startswith(b"Date: ") or line.startswith(b"date: ")
):
date = line[6:]
format = b"tag"
elif format == b"tag" and line == b"":
# when looking for tags (subject: from: etc) they
# end once you find a blank line in the source
format = b"tagdone"
elif message or line:
message.append(line)
comments.append(line)
eatdiff(message)
eatdiff(comments)
# Remember the exact starting line of the patch diffs before consuming
# empty lines, for external use by TortoiseHg and others
self.diffstartline = len(comments)
eatempty(message)
eatempty(comments)
# make sure message isn't empty
if format and format.startswith(b"tag") and subject:
message.insert(0, subject)
self.message = message
self.comments = comments
self.user = user
self.date = date
self.parent = parent
# nodeid and branch are for external use by TortoiseHg and others
self.nodeid = nodeid
self.branch = branch
self.haspatch = diffstart > 1
self.plainmode = (
plainmode
or b'# HG changeset patch' not in self.comments
and any(
c.startswith(b'Date: ') or c.startswith(b'From: ')
for c in self.comments
)
)
def setuser(self, user):
try:
inserthgheader(self.comments, b'# User ', user)
except ValueError:
if self.plainmode:
insertplainheader(self.comments, b'From', user)
else:
tmp = [b'# HG changeset patch', b'# User ' + user]
self.comments = tmp + self.comments
self.user = user
def setdate(self, date):
try:
inserthgheader(self.comments, b'# Date ', date)
except ValueError:
if self.plainmode:
insertplainheader(self.comments, b'Date', date)
else:
tmp = [b'# HG changeset patch', b'# Date ' + date]
self.comments = tmp + self.comments
self.date = date
def setparent(self, parent):
try:
inserthgheader(self.comments, b'# Parent ', parent)
except ValueError:
if not self.plainmode:
tmp = [b'# HG changeset patch', b'# Parent ' + parent]
self.comments = tmp + self.comments
self.parent = parent
def setmessage(self, message):
if self.comments:
self._delmsg()
self.message = [message]
if message:
if self.plainmode and self.comments and self.comments[-1]:
self.comments.append(b'')
self.comments.append(message)
def __bytes__(self):
s = b'\n'.join(self.comments).rstrip()
if not s:
return b''
return s + b'\n\n'
__str__ = encoding.strmethod(__bytes__)
def _delmsg(self):
"""Remove existing message, keeping the rest of the comments fields.
If comments contains 'subject: ', message will prepend
the field and a blank line."""
if self.message:
subj = b'subject: ' + self.message[0].lower()
for i in pycompat.xrange(len(self.comments)):
if subj == self.comments[i].lower():
del self.comments[i]
self.message = self.message[2:]
break
ci = 0
for mi in self.message:
while mi != self.comments[ci]:
ci += 1
del self.comments[ci]
def newcommit(repo, phase, *args, **kwargs):
"""helper dedicated to ensure a commit respect mq.secret setting
It should be used instead of repo.commit inside the mq source for operation
creating new changeset.
"""
repo = repo.unfiltered()
if phase is None:
if repo.ui.configbool(b'mq', b'secret'):
phase = phases.secret
overrides = {(b'ui', b'allowemptycommit'): True}
if phase is not None:
overrides[(b'phases', b'new-commit')] = phase
with repo.ui.configoverride(overrides, b'mq'):
repo.ui.setconfig(b'ui', b'allowemptycommit', True)
return repo.commit(*args, **kwargs)
class AbortNoCleanup(error.Abort):
pass
class queue(object):
def __init__(self, ui, baseui, path, patchdir=None):
self.basepath = path
try:
with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
cur = fh.read().rstrip()
if not cur:
curpath = os.path.join(path, b'patches')
else:
curpath = os.path.join(path, b'patches-' + cur)
except IOError:
curpath = os.path.join(path, b'patches')
self.path = patchdir or curpath
self.opener = vfsmod.vfs(self.path)
self.ui = ui
self.baseui = baseui
self.applieddirty = False
self.seriesdirty = False
self.added = []
self.seriespath = b"series"
self.statuspath = b"status"
self.guardspath = b"guards"
self.activeguards = None
self.guardsdirty = False
# Handle mq.git as a bool with extended values
gitmode = ui.config(b'mq', b'git').lower()
boolmode = stringutil.parsebool(gitmode)
if boolmode is not None:
if boolmode:
gitmode = b'yes'
else:
gitmode = b'no'
self.gitmode = gitmode
# deprecated config: mq.plain
self.plainmode = ui.configbool(b'mq', b'plain')
self.checkapplied = True
@util.propertycache
def applied(self):
def parselines(lines):
for l in lines:
entry = l.split(b':', 1)
if len(entry) > 1:
n, name = entry
yield statusentry(bin(n), name)
elif l.strip():
self.ui.warn(
_(b'malformated mq status line: %s\n')
% stringutil.pprint(entry)
)
# else we ignore empty lines
try:
lines = self.opener.read(self.statuspath).splitlines()
return list(parselines(lines))
except IOError as e:
if e.errno == errno.ENOENT:
return []
raise
@util.propertycache
def fullseries(self):
try:
return self.opener.read(self.seriespath).splitlines()
except IOError as e:
if e.errno == errno.ENOENT:
return []
raise
@util.propertycache
def series(self):
self.parseseries()
return self.series
@util.propertycache
def seriesguards(self):
self.parseseries()
return self.seriesguards
def invalidate(self):
for a in 'applied fullseries series seriesguards'.split():
if a in self.__dict__:
delattr(self, a)
self.applieddirty = False
self.seriesdirty = False
self.guardsdirty = False
self.activeguards = None
def diffopts(self, opts=None, patchfn=None, plain=False):
"""Return diff options tweaked for this mq use, possibly upgrading to
git format, and possibly plain and without lossy options."""
diffopts = patchmod.difffeatureopts(
self.ui,
opts,
git=True,
whitespace=not plain,
formatchanging=not plain,
)
if self.gitmode == b'auto':
diffopts.upgrade = True
elif self.gitmode == b'keep':
pass
elif self.gitmode in (b'yes', b'no'):
diffopts.git = self.gitmode == b'yes'
else:
raise error.Abort(
_(b'mq.git option can be auto/keep/yes/no got %s')
% self.gitmode
)
if patchfn:
diffopts = self.patchopts(diffopts, patchfn)
return diffopts
def patchopts(self, diffopts, *patches):
"""Return a copy of input diff options with git set to true if
referenced patch is a git patch and should be preserved as such.
"""
diffopts = diffopts.copy()
if not diffopts.git and self.gitmode == b'keep':
for patchfn in patches:
patchf = self.opener(patchfn, b'r')
# if the patch was a git patch, refresh it as a git patch
diffopts.git = any(
line.startswith(b'diff --git') for line in patchf
)
patchf.close()
return diffopts
def join(self, *p):
return os.path.join(self.path, *p)
def findseries(self, patch):
def matchpatch(l):
l = l.split(b'#', 1)[0]
return l.strip() == patch
for index, l in enumerate(self.fullseries):
if matchpatch(l):
return index
return None
guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
def parseseries(self):
self.series = []
self.seriesguards = []
for l in self.fullseries:
h = l.find(b'#')
if h == -1:
patch = l
comment = b''
elif h == 0:
continue
else:
patch = l[:h]
comment = l[h:]
patch = patch.strip()
if patch:
if patch in self.series:
raise error.Abort(
_(b'%s appears more than once in %s')
% (patch, self.join(self.seriespath))
)
self.series.append(patch)
self.seriesguards.append(self.guard_re.findall(comment))
def checkguard(self, guard):
if not guard:
return _(b'guard cannot be an empty string')
bad_chars = b'# \t\r\n\f'
first = guard[0]
if first in b'-+':
return _(b'guard %r starts with invalid character: %r') % (
guard,
first,
)
for c in bad_chars:
if c in guard:
return _(b'invalid character in guard %r: %r') % (guard, c)
def setactive(self, guards):
for guard in guards:
bad = self.checkguard(guard)
if bad:
raise error.Abort(bad)
guards = sorted(set(guards))
self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
self.activeguards = guards
self.guardsdirty = True
def active(self):
if self.activeguards is None:
self.activeguards = []
try:
guards = self.opener.read(self.guardspath).split()
except IOError as err:
if err.errno != errno.ENOENT:
raise
guards = []
for i, guard in enumerate(guards):
bad = self.checkguard(guard)
if bad:
self.ui.warn(
b'%s:%d: %s\n'
% (self.join(self.guardspath), i + 1, bad)
)
else:
self.activeguards.append(guard)
return self.activeguards
def setguards(self, idx, guards):
for g in guards:
if len(g) < 2:
raise error.Abort(_(b'guard %r too short') % g)
if g[0] not in b'-+':
raise error.Abort(_(b'guard %r starts with invalid char') % g)
bad = self.checkguard(g[1:])
if bad:
raise error.Abort(bad)
drop = self.guard_re.sub(b'', self.fullseries[idx])
self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
self.parseseries()
self.seriesdirty = True
def pushable(self, idx):
if isinstance(idx, bytes):
idx = self.series.index(idx)
patchguards = self.seriesguards[idx]
if not patchguards:
return True, None
guards = self.active()
exactneg = [
g for g in patchguards if g.startswith(b'-') and g[1:] in guards
]
if exactneg:
return False, stringutil.pprint(exactneg[0])
pos = [g for g in patchguards if g.startswith(b'+')]
exactpos = [g for g in pos if g[1:] in guards]
if pos:
if exactpos:
return True, stringutil.pprint(exactpos[0])
return False, b' '.join([stringutil.pprint(p) for p in pos])
return True, b''
def explainpushable(self, idx, all_patches=False):
if all_patches:
write = self.ui.write
else:
write = self.ui.warn
if all_patches or self.ui.verbose:
if isinstance(idx, bytes):
idx = self.series.index(idx)
pushable, why = self.pushable(idx)
if all_patches and pushable:
if why is None:
write(
_(b'allowing %s - no guards in effect\n')
% self.series[idx]
)
else:
if not why:
write(
_(b'allowing %s - no matching negative guards\n')
% self.series[idx]
)
else:
write(
_(b'allowing %s - guarded by %s\n')
% (self.series[idx], why)
)
if not pushable:
if why:
write(
_(b'skipping %s - guarded by %s\n')
% (self.series[idx], why)
)
else:
write(
_(b'skipping %s - no matching guards\n')
% self.series[idx]
)
def savedirty(self):
def writelist(items, path):
fp = self.opener(path, b'wb')
for i in items:
fp.write(b"%s\n" % i)
fp.close()
if self.applieddirty:
writelist(map(bytes, self.applied), self.statuspath)
self.applieddirty = False
if self.seriesdirty:
writelist(self.fullseries, self.seriespath)
self.seriesdirty = False
if self.guardsdirty:
writelist(self.activeguards, self.guardspath)
self.guardsdirty = False
if self.added:
qrepo = self.qrepo()
if qrepo:
qrepo[None].add(f for f in self.added if f not in qrepo[None])
self.added = []
def removeundo(self, repo):
undo = repo.sjoin(b'undo')
if not os.path.exists(undo):
return
try:
os.unlink(undo)
except OSError as inst:
self.ui.warn(
_(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
)
def backup(self, repo, files, copy=False):
# backup local changes in --force case
for f in sorted(files):
absf = repo.wjoin(f)
if os.path.lexists(absf):
absorig = scmutil.backuppath(self.ui, repo, f)
self.ui.note(
_(b'saving current version of %s as %s\n')
% (f, os.path.relpath(absorig))
)
if copy:
util.copyfile(absf, absorig)
else:
util.rename(absf, absorig)
def printdiff(
self,
repo,
diffopts,
node1,
node2=None,
files=None,
fp=None,
changes=None,
opts=None,
):
if opts is None:
opts = {}
stat = opts.get(b'stat')
m = scmutil.match(repo[node1], files, opts)
logcmdutil.diffordiffstat(
self.ui,
repo,
diffopts,
repo[node1],
repo[node2],
m,
changes,
stat,
fp,
)
def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
# first try just applying the patch
(err, n) = self.apply(
repo, [patch], update_status=False, strict=True, merge=rev
)
if err == 0:
return (err, n)
if n is None:
raise error.Abort(_(b"apply failed for patch %s") % patch)
self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
# apply failed, strip away that rev and merge.
hg.clean(repo, head)
strip(self.ui, repo, [n], update=False, backup=False)
ctx = repo[rev]
ret = hg.merge(ctx, remind=False)
if ret:
raise error.Abort(_(b"update returned %d") % ret)
n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
if n is None:
raise error.Abort(_(b"repo commit failed"))
try:
ph = patchheader(mergeq.join(patch), self.plainmode)
except Exception:
raise error.Abort(_(b"unable to read %s") % patch)
diffopts = self.patchopts(diffopts, patch)
patchf = self.opener(patch, b"w")
comments = bytes(ph)
if comments:
patchf.write(comments)
self.printdiff(repo, diffopts, head, n, fp=patchf)
patchf.close()
self.removeundo(repo)
return (0, n)
def qparents(self, repo, rev=None):
"""return the mq handled parent or p1
In some case where mq get himself in being the parent of a merge the
appropriate parent may be p2.
(eg: an in progress merge started with mq disabled)
If no parent are managed by mq, p1 is returned.
"""
if rev is None:
(p1, p2) = repo.dirstate.parents()
if p2 == nullid:
return p1
if not self.applied:
return None
return self.applied[-1].node
p1, p2 = repo.changelog.parents(rev)
if p2 != nullid and p2 in [x.node for x in self.applied]:
return p2
return p1
def mergepatch(self, repo, mergeq, series, diffopts):
if not self.applied:
# each of the patches merged in will have two parents. This
# can confuse the qrefresh, qdiff, and strip code because it
# needs to know which parent is actually in the patch queue.
# so, we insert a merge marker with only one parent. This way
# the first patch in the queue is never a merge patch
#
pname = b".hg.patches.merge.marker"
n = newcommit(repo, None, b'[mq]: merge marker', force=True)
self.removeundo(repo)
self.applied.append(statusentry(n, pname))
self.applieddirty = True
head = self.qparents(repo)
for patch in series:
patch = mergeq.lookup(patch, strict=True)
if not patch:
self.ui.warn(_(b"patch %s does not exist\n") % patch)
return (1, None)
pushable, reason = self.pushable(patch)
if not pushable:
self.explainpushable(patch, all_patches=True)
continue
info = mergeq.isapplied(patch)
if not info:
self.ui.warn(_(b"patch %s is not applied\n") % patch)
return (1, None)
rev = info[1]
err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
if head:
self.applied.append(statusentry(head, patch))
self.applieddirty = True
if err:
return (err, head)
self.savedirty()
return (0, head)
def patch(self, repo, patchfile):
"""Apply patchfile to the working directory.
patchfile: name of patch file"""
files = set()
try:
fuzz = patchmod.patch(
self.ui, repo, patchfile, strip=1, files=files, eolmode=None
)
return (True, list(files), fuzz)
except Exception as inst:
self.ui.note(stringutil.forcebytestr(inst) + b'\n')
if not self.ui.verbose:
self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
self.ui.traceback()
return (False, list(files), False)
def apply(
self,
repo,
series,
list=False,
update_status=True,
strict=False,
patchdir=None,
merge=None,
all_files=None,
tobackup=None,
keepchanges=False,
):
wlock = lock = tr = None
try:
wlock = repo.wlock()
lock = repo.lock()
tr = repo.transaction(b"qpush")
try:
ret = self._apply(
repo,
series,
list,
update_status,
strict,
patchdir,
merge,
all_files=all_files,
tobackup=tobackup,
keepchanges=keepchanges,
)
tr.close()
self.savedirty()
return ret
except AbortNoCleanup:
tr.close()
self.savedirty()
raise
except: # re-raises
try:
tr.abort()
finally:
self.invalidate()
raise
finally:
release(tr, lock, wlock)
self.removeundo(repo)
def _apply(
self,
repo,
series,
list=False,
update_status=True,
strict=False,
patchdir=None,
merge=None,
all_files=None,
tobackup=None,
keepchanges=False,
):
"""returns (error, hash)
error = 1 for unable to read, 2 for patch failed, 3 for patch
fuzz. tobackup is None or a set of files to backup before they
are modified by a patch.
"""
# TODO unify with commands.py
if not patchdir:
patchdir = self.path
err = 0
n = None
for patchname in series:
pushable, reason = self.pushable(patchname)
if not pushable:
self.explainpushable(patchname, all_patches=True)
continue
self.ui.status(_(b"applying %s\n") % patchname)
pf = os.path.join(patchdir, patchname)
try:
ph = patchheader(self.join(patchname), self.plainmode)
except IOError:
self.ui.warn(_(b"unable to read %s\n") % patchname)
err = 1
break
message = ph.message
if not message:
# The commit message should not be translated
message = b"imported patch %s\n" % patchname
else:
if list:
# The commit message should not be translated
message.append(b"\nimported patch %s" % patchname)
message = b'\n'.join(message)
if ph.haspatch:
if tobackup:
touched = patchmod.changedfiles(self.ui, repo, pf)
touched = set(touched) & tobackup
if touched and keepchanges:
raise AbortNoCleanup(
_(b"conflicting local changes found"),
hint=_(b"did you forget to qrefresh?"),
)
self.backup(repo, touched, copy=True)
tobackup = tobackup - touched
(patcherr, files, fuzz) = self.patch(repo, pf)
if all_files is not None:
all_files.update(files)
patcherr = not patcherr
else:
self.ui.warn(_(b"patch %s is empty\n") % patchname)
patcherr, files, fuzz = 0, [], 0
if merge and files:
# Mark as removed/merged and update dirstate parent info
removed = []
merged = []
for f in files:
if os.path.lexists(repo.wjoin(f)):
merged.append(f)
else:
removed.append(f)
with repo.dirstate.parentchange():
for f in removed:
repo.dirstate.remove(f)
for f in merged:
repo.dirstate.merge(f)
p1 = repo.dirstate.p1()
repo.setparents(p1, merge)
if all_files and b'.hgsubstate' in all_files:
wctx = repo[None]
pctx = repo[b'.']
overwrite = False
mergedsubstate = subrepoutil.submerge(
repo, pctx, wctx, wctx, overwrite
)
files += mergedsubstate.keys()
match = scmutil.matchfiles(repo, files or [])
oldtip = repo.changelog.tip()
n = newcommit(
repo, None, message, ph.user, ph.date, match=match, force=True
)
if repo.changelog.tip() == oldtip:
raise error.Abort(
_(b"qpush exactly duplicates child changeset")
)
if n is None:
raise error.Abort(_(b"repository commit failed"))
if update_status:
self.applied.append(statusentry(n, patchname))
if patcherr:
self.ui.warn(
_(b"patch failed, rejects left in working directory\n")
)
err = 2
break
if fuzz and strict:
self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
err = 3
break
return (err, n)
def _cleanup(self, patches, numrevs, keep=False):
if not keep:
r = self.qrepo()
if r:
r[None].forget(patches)
for p in patches:
try:
os.unlink(self.join(p))
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
qfinished = []
if numrevs:
qfinished = self.applied[:numrevs]
del self.applied[:numrevs]
self.applieddirty = True
unknown = []
sortedseries = []
for p in patches:
idx = self.findseries(p)
if idx is None:
sortedseries.append((-1, p))
else:
sortedseries.append((idx, p))
sortedseries.sort(reverse=True)
for (i, p) in sortedseries:
if i != -1:
del self.fullseries[i]
else:
unknown.append(p)
if unknown:
if numrevs:
rev = {entry.name: entry.node for entry in qfinished}
for p in unknown:
msg = _(b'revision %s refers to unknown patches: %s\n')
self.ui.warn(msg % (short(rev[p]), p))
else:
msg = _(b'unknown patches: %s\n')
raise error.Abort(b''.join(msg % p for p in unknown))
self.parseseries()
self.seriesdirty = True
return [entry.node for entry in qfinished]
def _revpatches(self, repo, revs):
firstrev = repo[self.applied[0].node].rev()
patches = []
for i, rev in enumerate(revs):
if rev < firstrev:
raise error.Abort(_(b'revision %d is not managed') % rev)
ctx = repo[rev]
base = self.applied[i].node
if ctx.node() != base:
msg = _(b'cannot delete revision %d above applied patches')
raise error.Abort(msg % rev)
patch = self.applied[i].name
for fmt in (b'[mq]: %s', b'imported patch %s'):
if ctx.description() == fmt % patch:
msg = _(b'patch %s finalized without changeset message\n')
repo.ui.status(msg % patch)
break
patches.append(patch)
return patches
def finish(self, repo, revs):
# Manually trigger phase computation to ensure phasedefaults is
# executed before we remove the patches.
repo._phasecache
patches = self._revpatches(repo, sorted(revs))
qfinished = self._cleanup(patches, len(patches))
if qfinished and repo.ui.configbool(b'mq', b'secret'):
# only use this logic when the secret option is added
oldqbase = repo[qfinished[0]]
tphase = phases.newcommitphase(repo.ui)
if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
with repo.transaction(b'qfinish') as tr:
phases.advanceboundary(repo, tr, tphase, qfinished)
def delete(self, repo, patches, opts):
if not patches and not opts.get(b'rev'):
raise error.Abort(
_(b'qdelete requires at least one revision or patch name')
)
realpatches = []
for patch in patches:
patch = self.lookup(patch, strict=True)
info = self.isapplied(patch)
if info:
raise error.Abort(_(b"cannot delete applied patch %s") % patch)
if patch not in self.series:
raise error.Abort(_(b"patch %s not in series file") % patch)
if patch not in realpatches:
realpatches.append(patch)
numrevs = 0
if opts.get(b'rev'):
if not self.applied:
raise error.Abort(_(b'no patches applied'))
revs = scmutil.revrange(repo, opts.get(b'rev'))
revs.sort()
revpatches = self._revpatches(repo, revs)
realpatches += revpatches
numrevs = len(revpatches)
self._cleanup(realpatches, numrevs, opts.get(b'keep'))
def checktoppatch(self, repo):
'''check that working directory is at qtip'''
if self.applied:
top = self.applied[-1].node
patch = self.applied[-1].name
if repo.dirstate.p1() != top:
raise error.Abort(_(b"working directory revision is not qtip"))
return top, patch
return None, None
def putsubstate2changes(self, substatestate, changes):
if isinstance(changes, list):
mar = changes[:3]
else:
mar = (changes.modified, changes.added, changes.removed)
if any((b'.hgsubstate' in files for files in mar)):
return # already listed up
# not yet listed up
if substatestate in b'a?':
mar[1].append(b'.hgsubstate')
elif substatestate in b'r':
mar[2].append(b'.hgsubstate')
else: # modified
mar[0].append(b'.hgsubstate')
def checklocalchanges(self, repo, force=False, refresh=True):
excsuffix = b''
if refresh:
excsuffix = b', qrefresh first'
# plain versions for i18n tool to detect them
_(b"local changes found, qrefresh first")
_(b"local changed subrepos found, qrefresh first")
s = repo.status()
if not force:
cmdutil.checkunfinished(repo)
if s.modified or s.added or s.removed or s.deleted:
_(b"local changes found") # i18n tool detection
raise error.Abort(_(b"local changes found" + excsuffix))
if checksubstate(repo):
_(b"local changed subrepos found") # i18n tool detection
raise error.Abort(
_(b"local changed subrepos found" + excsuffix)
)
else:
cmdutil.checkunfinished(repo, skipmerge=True)
return s
_reserved = (b'series', b'status', b'guards', b'.', b'..')
def checkreservedname(self, name):
if name in self._reserved:
raise error.Abort(
_(b'"%s" cannot be used as the name of a patch') % name
)
if name != name.strip():
# whitespace is stripped by parseseries()
raise error.Abort(
_(b'patch name cannot begin or end with whitespace')
)
for prefix in (b'.hg', b'.mq'):
if name.startswith(prefix):
raise error.Abort(
_(b'patch name cannot begin with "%s"') % prefix
)
for c in (b'#', b':', b'\r', b'\n'):
if c in name:
raise error.Abort(
_(b'%r cannot be used in the name of a patch')
% pycompat.bytestr(c)
)
def checkpatchname(self, name, force=False):
self.checkreservedname(name)
if not force and os.path.exists(self.join(name)):
if os.path.isdir(self.join(name)):
raise error.Abort(
_(b'"%s" already exists as a directory') % name
)
else:
raise error.Abort(_(b'patch "%s" already exists') % name)
def makepatchname(self, title, fallbackname):
"""Return a suitable filename for title, adding a suffix to make
it unique in the existing list"""
namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
namebase = namebase[:75] # avoid too long name (issue5117)
if namebase:
try:
self.checkreservedname(namebase)
except error.Abort:
namebase = fallbackname
else:
namebase = fallbackname
name = namebase
i = 0
while True:
if name not in self.fullseries:
try:
self.checkpatchname(name)
break
except error.Abort:
pass
i += 1
name = b'%s__%d' % (namebase, i)
return name
def checkkeepchanges(self, keepchanges, force):
if force and keepchanges:
raise error.Abort(_(b'cannot use both --force and --keep-changes'))
def new(self, repo, patchfn, *pats, **opts):
"""options:
msg: a string or a no-argument function returning a string
"""
opts = pycompat.byteskwargs(opts)
msg = opts.get(b'msg')
edit = opts.get(b'edit')
editform = opts.get(b'editform', b'mq.qnew')
user = opts.get(b'user')
date = opts.get(b'date')
if date:
date = dateutil.parsedate(date)
diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
if opts.get(b'checkname', True):
self.checkpatchname(patchfn)
inclsubs = checksubstate(repo)
if inclsubs:
substatestate = repo.dirstate[b'.hgsubstate']
if opts.get(b'include') or opts.get(b'exclude') or pats:
# detect missing files in pats
def badfn(f, msg):
if f != b'.hgsubstate': # .hgsubstate is auto-created
raise error.Abort(b'%s: %s' % (f, msg))
match = scmutil.match(repo[None], pats, opts, badfn=badfn)
changes = repo.status(match=match)
else:
changes = self.checklocalchanges(repo, force=True)
commitfiles = list(inclsubs)
commitfiles.extend(changes.modified)
commitfiles.extend(changes.added)
commitfiles.extend(changes.removed)
match = scmutil.matchfiles(repo, commitfiles)
if len(repo[None].parents()) > 1:
raise error.Abort(_(b'cannot manage merge changesets'))
self.checktoppatch(repo)
insert = self.fullseriesend()
with repo.wlock():
try:
# if patch file write fails, abort early
p = self.opener(patchfn, b"w")
except IOError as e:
raise error.Abort(
_(b'cannot write patch "%s": %s')
% (patchfn, encoding.strtolocal(e.strerror))
)
try:
defaultmsg = b"[mq]: %s" % patchfn
editor = cmdutil.getcommiteditor(editform=editform)
if edit:
def finishdesc(desc):
if desc.rstrip():
return desc
else:
return defaultmsg
# i18n: this message is shown in editor with "HG: " prefix
extramsg = _(b'Leave message empty to use default message.')
editor = cmdutil.getcommiteditor(
finishdesc=finishdesc,
extramsg=extramsg,
editform=editform,
)
commitmsg = msg
else:
commitmsg = msg or defaultmsg
n = newcommit(
repo,
None,
commitmsg,
user,
date,
match=match,
force=True,
editor=editor,
)
if n is None:
raise error.Abort(_(b"repo commit failed"))
try:
self.fullseries[insert:insert] = [patchfn]
self.applied.append(statusentry(n, patchfn))
self.parseseries()
self.seriesdirty = True
self.applieddirty = True
nctx = repo[n]
ph = patchheader(self.join(patchfn), self.plainmode)
if user:
ph.setuser(user)
if date:
ph.setdate(b'%d %d' % date)
ph.setparent(hex(nctx.p1().node()))
msg = nctx.description().strip()
if msg == defaultmsg.strip():
msg = b''
ph.setmessage(msg)
p.write(bytes(ph))
if commitfiles:
parent = self.qparents(repo, n)
if inclsubs:
self.putsubstate2changes(substatestate, changes)
chunks = patchmod.diff(
repo,
node1=parent,
node2=n,
changes=changes,
opts=diffopts,
)
for chunk in chunks:
p.write(chunk)
p.close()
r = self.qrepo()
if r:
r[None].add([patchfn])
except: # re-raises
repo.rollback()
raise
except Exception:
patchpath = self.join(patchfn)
try:
os.unlink(patchpath)
except OSError:
self.ui.warn(_(b'error unlinking %s\n') % patchpath)
raise
self.removeundo(repo)
def isapplied(self, patch):
"""returns (index, rev, patch)"""
for i, a in enumerate(self.applied):
if a.name == patch:
return (i, a.node, a.name)
return None
# if the exact patch name does not exist, we try a few
# variations. If strict is passed, we try only #1
#
# 1) a number (as string) to indicate an offset in the series file
# 2) a unique substring of the patch name was given
# 3) patchname[-+]num to indicate an offset in the series file
def lookup(self, patch, strict=False):
def partialname(s):
if s in self.series:
return s
matches = [x for x in self.series if s in x]
if len(matches) > 1:
self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
for m in matches:
self.ui.warn(b' %s\n' % m)
return None
if matches:
return matches[0]
if self.series and self.applied:
if s == b'qtip':
return self.series[self.seriesend(True) - 1]
if s == b'qbase':
return self.series[0]
return None
if patch in self.series:
return patch
if not os.path.isfile(self.join(patch)):
try:
sno = int(patch)
except (ValueError, OverflowError):
pass
else:
if -len(self.series) <= sno < len(self.series):
return self.series[sno]
if not strict:
res = partialname(patch)
if res:
return res
minus = patch.rfind(b'-')
if minus >= 0:
res = partialname(patch[:minus])
if res:
i = self.series.index(res)
try:
off = int(patch[minus + 1 :] or 1)
except (ValueError, OverflowError):
pass
else:
if i - off >= 0:
return self.series[i - off]
plus = patch.rfind(b'+')
if plus >= 0:
res = partialname(patch[:plus])
if res:
i = self.series.index(res)
try:
off = int(patch[plus + 1 :] or 1)
except (ValueError, OverflowError):
pass
else:
if i + off < len(self.series):
return self.series[i + off]
raise error.Abort(_(b"patch %s not in series") % patch)
def push(
self,
repo,
patch=None,
force=False,
list=False,
mergeq=None,
all=False,
move=False,
exact=False,
nobackup=False,
keepchanges=False,
):
self.checkkeepchanges(keepchanges, force)
diffopts = self.diffopts()
with repo.wlock():
heads = []
for hs in repo.branchmap().iterheads():
heads.extend(hs)
if not heads:
heads = [nullid]
if repo.dirstate.p1() not in heads and not exact:
self.ui.status(_(b"(working directory not at a head)\n"))
if not self.series:
self.ui.warn(_(b'no patches in series\n'))
return 0
# Suppose our series file is: A B C and the current 'top'
# patch is B. qpush C should be performed (moving forward)
# qpush B is a NOP (no change) qpush A is an error (can't
# go backwards with qpush)
if patch:
patch = self.lookup(patch)
info = self.isapplied(patch)
if info and info[0] >= len(self.applied) - 1:
self.ui.warn(
_(b'qpush: %s is already at the top\n') % patch
)
return 0
pushable, reason = self.pushable(patch)
if pushable:
if self.series.index(patch) < self.seriesend():
raise error.Abort(
_(b"cannot push to a previous patch: %s") % patch
)
else:
if reason:
reason = _(b'guarded by %s') % reason
else:
reason = _(b'no matching guards')
self.ui.warn(
_(b"cannot push '%s' - %s\n") % (patch, reason)
)
return 1
elif all:
patch = self.series[-1]
if self.isapplied(patch):
self.ui.warn(_(b'all patches are currently applied\n'))
return 0
# Following the above example, starting at 'top' of B:
# qpush should be performed (pushes C), but a subsequent
# qpush without an argument is an error (nothing to
# apply). This allows a loop of "...while hg qpush..." to
# work as it detects an error when done
start = self.seriesend()
if start == len(self.series):
self.ui.warn(_(b'patch series already fully applied\n'))
return 1
if not force and not keepchanges:
self.checklocalchanges(repo, refresh=self.applied)
if exact:
if keepchanges:
raise error.Abort(
_(b"cannot use --exact and --keep-changes together")
)
if move:
raise error.Abort(
_(b'cannot use --exact and --move together')
)
if self.applied:
raise error.Abort(
_(b'cannot push --exact with applied patches')
)
root = self.series[start]
target = patchheader(self.join(root), self.plainmode).parent
if not target:
raise error.Abort(
_(b"%s does not have a parent recorded") % root
)
if not repo[target] == repo[b'.']:
hg.update(repo, target)
if move:
if not patch:
raise error.Abort(_(b"please specify the patch to move"))
for fullstart, rpn in enumerate(self.fullseries):
# strip markers for patch guards
if self.guard_re.split(rpn, 1)[0] == self.series[start]:
break
for i, rpn in enumerate(self.fullseries[fullstart:]):
# strip markers for patch guards
if self.guard_re.split(rpn, 1)[0] == patch:
break
index = fullstart + i
assert index < len(self.fullseries)
fullpatch = self.fullseries[index]
del self.fullseries[index]
self.fullseries.insert(fullstart, fullpatch)
self.parseseries()
self.seriesdirty = True
self.applieddirty = True
if start > 0:
self.checktoppatch(repo)
if not patch:
patch = self.series[start]
end = start + 1
else:
end = self.series.index(patch, start) + 1
tobackup = set()
if (not nobackup and force) or keepchanges:
status = self.checklocalchanges(repo, force=True)
if keepchanges:
tobackup.update(
status.modified
+ status.added
+ status.removed
+ status.deleted
)
else:
tobackup.update(status.modified + status.added)
s = self.series[start:end]
all_files = set()
try:
if mergeq:
ret = self.mergepatch(repo, mergeq, s, diffopts)
else:
ret = self.apply(
repo,
s,
list,
all_files=all_files,
tobackup=tobackup,
keepchanges=keepchanges,
)
except AbortNoCleanup:
raise
except: # re-raises
self.ui.warn(_(b'cleaning up working directory...\n'))
cmdutil.revert(
self.ui,
repo,
repo[b'.'],
no_backup=True,
)
# only remove unknown files that we know we touched or
# created while patching
for f in all_files:
if f not in repo.dirstate:
repo.wvfs.unlinkpath(f, ignoremissing=True)
self.ui.warn(_(b'done\n'))
raise
if not self.applied:
return ret[0]
top = self.applied[-1].name
if ret[0] and ret[0] > 1:
msg = _(b"errors during apply, please fix and qrefresh %s\n")
self.ui.write(msg % top)
else:
self.ui.write(_(b"now at: %s\n") % top)
return ret[0]
def pop(
self,
repo,
patch=None,
force=False,
update=True,
all=False,
nobackup=False,
keepchanges=False,
):
self.checkkeepchanges(keepchanges, force)
with repo.wlock():
if patch:
# index, rev, patch
info = self.isapplied(patch)
if not info:
patch = self.lookup(patch)
info = self.isapplied(patch)
if not info:
raise error.Abort(_(b"patch %s is not applied") % patch)
if not self.applied:
# Allow qpop -a to work repeatedly,
# but not qpop without an argument
self.ui.warn(_(b"no patches applied\n"))
return not all
if all:
start = 0
elif patch:
start = info[0] + 1
else:
start = len(self.applied) - 1
if start >= len(self.applied):
self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
return
if not update:
parents = repo.dirstate.parents()
rr = [x.node for x in self.applied]
for p in parents:
if p in rr:
self.ui.warn(_(b"qpop: forcing dirstate update\n"))
update = True
else:
parents = [p.node() for p in repo[None].parents()]
update = any(
entry.node in parents for entry in self.applied[start:]
)
tobackup = set()
if update:
s = self.checklocalchanges(repo, force=force or keepchanges)
if force:
if not nobackup:
tobackup.update(s.modified + s.added)
elif keepchanges:
tobackup.update(
s.modified + s.added + s.removed + s.deleted
)
self.applieddirty = True
end = len(self.applied)
rev = self.applied[start].node
try:
heads = repo.changelog.heads(rev)
except error.LookupError:
node = short(rev)
raise error.Abort(_(b'trying to pop unknown node %s') % node)
if heads != [self.applied[-1].node]:
raise error.Abort(
_(
b"popping would remove a revision not "
b"managed by this patch queue"
)
)
if not repo[self.applied[-1].node].mutable():
raise error.Abort(
_(b"popping would remove a public revision"),
hint=_(b"see 'hg help phases' for details"),
)
# we know there are no local changes, so we can make a simplified
# form of hg.update.
if update:
qp = self.qparents(repo, rev)
ctx = repo[qp]
st = repo.status(qp, b'.')
m, a, r, d = st.modified, st.added, st.removed, st.deleted
if d:
raise error.Abort(_(b"deletions found between repo revs"))
tobackup = set(a + m + r) & tobackup
if keepchanges and tobackup:
raise error.Abort(_(b"local changes found, qrefresh first"))
self.backup(repo, tobackup)
with repo.dirstate.parentchange():
for f in a:
repo.wvfs.unlinkpath(f, ignoremissing=True)
repo.dirstate.drop(f)
for f in m + r:
fctx = ctx[f]
repo.wwrite(f, fctx.data(), fctx.flags())
repo.dirstate.normal(f)
repo.setparents(qp, nullid)
for patch in reversed(self.applied[start:end]):
self.ui.status(_(b"popping %s\n") % patch.name)
del self.applied[start:end]
strip(self.ui, repo, [rev], update=False, backup=False)
for s, state in repo[b'.'].substate.items():
repo[b'.'].sub(s).get(state)
if self.applied:
self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
else:
self.ui.write(_(b"patch queue now empty\n"))
def diff(self, repo, pats, opts):
top, patch = self.checktoppatch(repo)
if not top:
self.ui.write(_(b"no patches applied\n"))
return
qp = self.qparents(repo, top)
if opts.get(b'reverse'):
node1, node2 = None, qp
else:
node1, node2 = qp, None
diffopts = self.diffopts(opts, patch)
self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
def refresh(self, repo, pats=None, **opts):
opts = pycompat.byteskwargs(opts)
if not self.applied:
self.ui.write(_(b"no patches applied\n"))
return 1
msg = opts.get(b'msg', b'').rstrip()
edit = opts.get(b'edit')
editform = opts.get(b'editform', b'mq.qrefresh')
newuser = opts.get(b'user')
newdate = opts.get(b'date')
if newdate:
newdate = b'%d %d' % dateutil.parsedate(newdate)
wlock = repo.wlock()
try:
self.checktoppatch(repo)
(top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
if repo.changelog.heads(top) != [top]:
raise error.Abort(
_(b"cannot qrefresh a revision with children")
)
if not repo[top].mutable():
raise error.Abort(
_(b"cannot qrefresh public revision"),
hint=_(b"see 'hg help phases' for details"),
)
cparents = repo.changelog.parents(top)
patchparent = self.qparents(repo, top)
inclsubs = checksubstate(repo, patchparent)
if inclsubs:
substatestate = repo.dirstate[b'.hgsubstate']
ph = patchheader(self.join(patchfn), self.plainmode)
diffopts = self.diffopts(
{b'git': opts.get(b'git')}, patchfn, plain=True
)
if newuser:
ph.setuser(newuser)
if newdate:
ph.setdate(newdate)
ph.setparent(hex(patchparent))
# only commit new patch when write is complete
patchf = self.opener(patchfn, b'w', atomictemp=True)
# update the dirstate in place, strip off the qtip commit
# and then commit.
#
# this should really read:
# st = repo.status(top, patchparent)
# but we do it backwards to take advantage of manifest/changelog
# caching against the next repo.status call
st = repo.status(patchparent, top)
mm, aa, dd = st.modified, st.added, st.removed
ctx = repo[top]
aaa = aa[:]
match1 = scmutil.match(repo[None], pats, opts)
# in short mode, we only diff the files included in the
# patch already plus specified files
if opts.get(b'short'):
# if amending a patch, we start with existing
# files plus specified files - unfiltered
match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
# filter with include/exclude options
match1 = scmutil.match(repo[None], opts=opts)
else:
match = scmutil.matchall(repo)
stb = repo.status(match=match)
m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
mm = set(mm)
aa = set(aa)
dd = set(dd)
# we might end up with files that were added between
# qtip and the dirstate parent, but then changed in the
# local dirstate. in this case, we want them to only
# show up in the added section
for x in m:
if x not in aa:
mm.add(x)
# we might end up with files added by the local dirstate that
# were deleted by the patch. In this case, they should only
# show up in the changed section.
for x in a:
if x in dd:
dd.remove(x)
mm.add(x)
else:
aa.add(x)
# make sure any files deleted in the local dirstate
# are not in the add or change column of the patch
forget = []
for x in d + r:
if x in aa:
aa.remove(x)
forget.append(x)
continue
else:
mm.discard(x)
dd.add(x)
m = list(mm)
r = list(dd)
a = list(aa)
# create 'match' that includes the files to be recommitted.
# apply match1 via repo.status to ensure correct case handling.
st = repo.status(patchparent, match=match1)
cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
allmatches = set(cm + ca + cr + cd)
refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
files = set(inclsubs)
for x in refreshchanges:
files.update(x)
match = scmutil.matchfiles(repo, files)
bmlist = repo[top].bookmarks()
dsguard = None
try:
dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
if diffopts.git or diffopts.upgrade:
copies = {}
for dst in a:
src = repo.dirstate.copied(dst)
# during qfold, the source file for copies may
# be removed. Treat this as a simple add.
if src is not None and src in repo.dirstate:
copies.setdefault(src, []).append(dst)
repo.dirstate.add(dst)
# remember the copies between patchparent and qtip
for dst in aaa:
src = ctx[dst].copysource()
if src:
copies.setdefault(src, []).extend(
copies.get(dst, [])
)
if dst in a:
copies[src].append(dst)
# we can't copy a file created by the patch itself
if dst in copies:
del copies[dst]
for src, dsts in pycompat.iteritems(copies):
for dst in dsts:
repo.dirstate.copy(src, dst)
else:
for dst in a:
repo.dirstate.add(dst)
# Drop useless copy information
for f in list(repo.dirstate.copies()):
repo.dirstate.copy(None, f)
for f in r:
repo.dirstate.remove(f)
# if the patch excludes a modified file, mark that
# file with mtime=0 so status can see it.
mm = []
for i in pycompat.xrange(len(m) - 1, -1, -1):
if not match1(m[i]):
mm.append(m[i])
del m[i]
for f in m:
repo.dirstate.normal(f)
for f in mm:
repo.dirstate.normallookup(f)
for f in forget:
repo.dirstate.drop(f)
user = ph.user or ctx.user()
oldphase = repo[top].phase()
# assumes strip can roll itself back if interrupted
repo.setparents(*cparents)
self.applied.pop()
self.applieddirty = True
strip(self.ui, repo, [top], update=False, backup=False)
dsguard.close()
finally:
release(dsguard)
try:
# might be nice to attempt to roll back strip after this
defaultmsg = b"[mq]: %s" % patchfn
editor = cmdutil.getcommiteditor(editform=editform)
if edit:
def finishdesc(desc):
if desc.rstrip():
ph.setmessage(desc)
return desc
return defaultmsg
# i18n: this message is shown in editor with "HG: " prefix
extramsg = _(b'Leave message empty to use default message.')
editor = cmdutil.getcommiteditor(
finishdesc=finishdesc,
extramsg=extramsg,
editform=editform,
)
message = msg or b"\n".join(ph.message)
elif not msg:
if not ph.message:
message = defaultmsg
else:
message = b"\n".join(ph.message)
else:
message = msg
ph.setmessage(msg)
# Ensure we create a new changeset in the same phase than
# the old one.
lock = tr = None
try:
lock = repo.lock()
tr = repo.transaction(b'mq')
n = newcommit(
repo,
oldphase,
message,
user,
ph.date,
match=match,
force=True,
editor=editor,
)
# only write patch after a successful commit
c = [list(x) for x in refreshchanges]
if inclsubs:
self.putsubstate2changes(substatestate, c)
chunks = patchmod.diff(
repo, patchparent, changes=c, opts=diffopts
)
comments = bytes(ph)
if comments:
patchf.write(comments)
for chunk in chunks:
patchf.write(chunk)
patchf.close()
marks = repo._bookmarks
marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
tr.close()
self.applied.append(statusentry(n, patchfn))
finally:
lockmod.release(tr, lock)
except: # re-raises
ctx = repo[cparents[0]]
repo.dirstate.rebuild(ctx.node(), ctx.manifest())
self.savedirty()
self.ui.warn(
_(
b'qrefresh interrupted while patch was popped! '
b'(revert --all, qpush to recover)\n'
)
)
raise
finally:
wlock.release()
self.removeundo(repo)
def init(self, repo, create=False):
if not create and os.path.isdir(self.path):
raise error.Abort(_(b"patch queue directory already exists"))
try:
os.mkdir(self.path)
except OSError as inst:
if inst.errno != errno.EEXIST or not create:
raise
if create:
return self.qrepo(create=True)
def unapplied(self, repo, patch=None):
if patch and patch not in self.series:
raise error.Abort(_(b"patch %s is not in series file") % patch)
if not patch:
start = self.seriesend()
else:
start = self.series.index(patch) + 1
unapplied = []
for i in pycompat.xrange(start, len(self.series)):
pushable, reason = self.pushable(i)
if pushable:
unapplied.append((i, self.series[i]))
self.explainpushable(i)
return unapplied
def qseries(
self,
repo,
missing=None,
start=0,
length=None,
status=None,
summary=False,
):
def displayname(pfx, patchname, state):
if pfx:
self.ui.write(pfx)
if summary:
ph = patchheader(self.join(patchname), self.plainmode)
if ph.message:
msg = ph.message[0]
else:
msg = b''
if self.ui.formatted():
width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
if width > 0:
msg = stringutil.ellipsis(msg, width)
else:
msg = b''
self.ui.write(patchname, label=b'qseries.' + state)
self.ui.write(b': ')
self.ui.write(msg, label=b'qseries.message.' + state)
else:
self.ui.write(patchname, label=b'qseries.' + state)
self.ui.write(b'\n')
applied = {p.name for p in self.applied}
if length is None:
length = len(self.series) - start
if not missing:
if self.ui.verbose:
idxwidth = len(b"%d" % (start + length - 1))
for i in pycompat.xrange(start, start + length):
patch = self.series[i]
if patch in applied:
char, state = b'A', b'applied'
elif self.pushable(i)[0]:
char, state = b'U', b'unapplied'
else:
char, state = b'G', b'guarded'
pfx = b''
if self.ui.verbose:
pfx = b'%*d %s ' % (idxwidth, i, char)
elif status and status != char:
continue
displayname(pfx, patch, state)
else:
msng_list = []
for root, dirs, files in os.walk(self.path):
d = root[len(self.path) + 1 :]
for f in files:
fl = os.path.join(d, f)
if (
fl not in self.series
and fl
not in (
self.statuspath,
self.seriespath,
self.guardspath,
)
and not fl.startswith(b'.')
):
msng_list.append(fl)
for x in sorted(msng_list):
pfx = self.ui.verbose and b'D ' or b''
displayname(pfx, x, b'missing')
def issaveline(self, l):
if l.name == b'.hg.patches.save.line':
return True
def qrepo(self, create=False):
ui = self.baseui.copy()
# copy back attributes set by ui.pager()
if self.ui.pageractive and not ui.pageractive:
ui.pageractive = self.ui.pageractive
# internal config: ui.formatted
ui.setconfig(
b'ui',
b'formatted',
self.ui.config(b'ui', b'formatted'),
b'mqpager',
)
ui.setconfig(
b'ui',
b'interactive',
self.ui.config(b'ui', b'interactive'),
b'mqpager',
)
if create or os.path.isdir(self.join(b".hg")):
return hg.repository(ui, path=self.path, create=create)
def restore(self, repo, rev, delete=None, qupdate=None):
desc = repo[rev].description().strip()
lines = desc.splitlines()
datastart = None
series = []
applied = []
qpp = None
for i, line in enumerate(lines):
if line == b'Patch Data:':
datastart = i + 1
elif line.startswith(b'Dirstate:'):
l = line.rstrip()
l = l[10:].split(b' ')
qpp = [bin(x) for x in l]
elif datastart is not None:
l = line.rstrip()
n, name = l.split(b':', 1)
if n:
applied.append(statusentry(bin(n), name))
else:
series.append(l)
if datastart is None:
self.ui.warn(_(b"no saved patch data found\n"))
return 1
self.ui.warn(_(b"restoring status: %s\n") % lines[0])
self.fullseries = series
self.applied = applied
self.parseseries()
self.seriesdirty = True
self.applieddirty = True
heads = repo.changelog.heads()
if delete:
if rev not in heads:
self.ui.warn(_(b"save entry has children, leaving it alone\n"))
else:
self.ui.warn(_(b"removing save entry %s\n") % short(rev))
pp = repo.dirstate.parents()
if rev in pp:
update = True
else:
update = False
strip(self.ui, repo, [rev], update=update, backup=False)
if qpp:
self.ui.warn(
_(b"saved queue repository parents: %s %s\n")
% (short(qpp[0]), short(qpp[1]))
)
if qupdate:
self.ui.status(_(b"updating queue directory\n"))
r = self.qrepo()
if not r:
self.ui.warn(_(b"unable to load queue repository\n"))
return 1
hg.clean(r, qpp[0])
def save(self, repo, msg=None):
if not self.applied:
self.ui.warn(_(b"save: no patches applied, exiting\n"))
return 1
if self.issaveline(self.applied[-1]):
self.ui.warn(_(b"status is already saved\n"))
return 1
if not msg:
msg = _(b"hg patches saved state")
else:
msg = b"hg patches: " + msg.rstrip(b'\r\n')
r = self.qrepo()
if r:
pp = r.dirstate.parents()
msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
msg += b"\n\nPatch Data:\n"
msg += b''.join(b'%s\n' % x for x in self.applied)
msg += b''.join(b':%s\n' % x for x in self.fullseries)
n = repo.commit(msg, force=True)
if not n:
self.ui.warn(_(b"repo commit failed\n"))
return 1
self.applied.append(statusentry(n, b'.hg.patches.save.line'))
self.applieddirty = True
self.removeundo(repo)
def fullseriesend(self):
if self.applied:
p = self.applied[-1].name
end = self.findseries(p)
if end is None:
return len(self.fullseries)
return end + 1
return 0
def seriesend(self, all_patches=False):
"""If all_patches is False, return the index of the next pushable patch
in the series, or the series length. If all_patches is True, return the
index of the first patch past the last applied one.
"""
end = 0
def nextpatch(start):
if all_patches or start >= len(self.series):
return start
for i in pycompat.xrange(start, len(self.series)):
p, reason = self.pushable(i)
if p:
return i
self.explainpushable(i)
return len(self.series)
if self.applied:
p = self.applied[-1].name
try:
end = self.series.index(p)
except ValueError:
return 0
return nextpatch(end + 1)
return nextpatch(end)
def appliedname(self, index):
pname = self.applied[index].name
if not self.ui.verbose:
p = pname
else:
p = (b"%d" % self.series.index(pname)) + b" " + pname
return p
def qimport(
self,
repo,
files,
patchname=None,
rev=None,
existing=None,
force=None,
git=False,
):
def checkseries(patchname):
if patchname in self.series:
raise error.Abort(
_(b'patch %s is already in the series file') % patchname
)
if rev:
if files:
raise error.Abort(
_(b'option "-r" not valid when importing files')
)
rev = scmutil.revrange(repo, rev)
rev.sort(reverse=True)
elif not files:
raise error.Abort(_(b'no files or revisions specified'))
if (len(files) > 1 or len(rev) > 1) and patchname:
raise error.Abort(
_(b'option "-n" not valid when importing multiple patches')
)
imported = []
if rev:
# If mq patches are applied, we can only import revisions
# that form a linear path to qbase.
# Otherwise, they should form a linear path to a head.
heads = repo.changelog.heads(repo.changelog.node(rev.first()))
if len(heads) > 1:
raise error.Abort(
_(b'revision %d is the root of more than one branch')
% rev.last()
)
if self.applied:
base = repo.changelog.node(rev.first())
if base in [n.node for n in self.applied]:
raise error.Abort(
_(b'revision %d is already managed') % rev.first()
)
if heads != [self.applied[-1].node]:
raise error.Abort(
_(b'revision %d is not the parent of the queue')
% rev.first()
)
base = repo.changelog.rev(self.applied[0].node)
lastparent = repo.changelog.parentrevs(base)[0]
else:
if heads != [repo.changelog.node(rev.first())]:
raise error.Abort(
_(b'revision %d has unmanaged children') % rev.first()
)
lastparent = None
diffopts = self.diffopts({b'git': git})
with repo.transaction(b'qimport') as tr:
for r in rev:
if not repo[r].mutable():
raise error.Abort(
_(b'revision %d is not mutable') % r,
hint=_(b"see 'hg help phases' " b'for details'),
)
p1, p2 = repo.changelog.parentrevs(r)
n = repo.changelog.node(r)
if p2 != nullrev:
raise error.Abort(
_(b'cannot import merge revision %d') % r
)
if lastparent and lastparent != r:
raise error.Abort(
_(b'revision %d is not the parent of %d')
% (r, lastparent)
)
lastparent = p1
if not patchname:
patchname = self.makepatchname(
repo[r].description().split(b'\n', 1)[0],
b'%d.diff' % r,
)
checkseries(patchname)
self.checkpatchname(patchname, force)
self.fullseries.insert(0, patchname)
with self.opener(patchname, b"w") as fp:
cmdutil.exportfile(repo, [n], fp, opts=diffopts)
se = statusentry(n, patchname)
self.applied.insert(0, se)
self.added.append(patchname)
imported.append(patchname)
patchname = None
if rev and repo.ui.configbool(b'mq', b'secret'):
# if we added anything with --rev, move the secret root
phases.retractboundary(repo, tr, phases.secret, [n])
self.parseseries()
self.applieddirty = True
self.seriesdirty = True
for i, filename in enumerate(files):
if existing:
if filename == b'-':
raise error.Abort(
_(b'-e is incompatible with import from -')
)
filename = normname(filename)
self.checkreservedname(filename)
if util.url(filename).islocal():
originpath = self.join(filename)
if not os.path.isfile(originpath):
raise error.Abort(
_(b"patch %s does not exist") % filename
)
if patchname:
self.checkpatchname(patchname, force)
self.ui.write(
_(b'renaming %s to %s\n') % (filename, patchname)
)
util.rename(originpath, self.join(patchname))
else:
patchname = filename
else:
if filename == b'-' and not patchname:
raise error.Abort(
_(b'need --name to import a patch from -')
)
elif not patchname:
patchname = normname(
os.path.basename(filename.rstrip(b'/'))
)
self.checkpatchname(patchname, force)
try:
if filename == b'-':
text = self.ui.fin.read()
else:
fp = hg.openpath(self.ui, filename)
text = fp.read()
fp.close()
except (OSError, IOError):
raise error.Abort(_(b"unable to read file %s") % filename)
patchf = self.opener(patchname, b"w")
patchf.write(text)
patchf.close()
if not force:
checkseries(patchname)
if patchname not in self.series:
index = self.fullseriesend() + i
self.fullseries[index:index] = [patchname]
self.parseseries()
self.seriesdirty = True
self.ui.warn(_(b"adding %s to series file\n") % patchname)
self.added.append(patchname)
imported.append(patchname)
patchname = None
self.removeundo(repo)
return imported
def fixkeepchangesopts(ui, opts):
if (
not ui.configbool(b'mq', b'keepchanges')
or opts.get(b'force')
or opts.get(b'exact')
):
return opts
opts = dict(opts)
opts[b'keep_changes'] = True
return opts
@command(
b"qdelete|qremove|qrm",
[
(b'k', b'keep', None, _(b'keep patch file')),
(
b'r',
b'rev',
[],
_(b'stop managing a revision (DEPRECATED)'),
_(b'REV'),
),
],
_(b'hg qdelete [-k] [PATCH]...'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def delete(ui, repo, *patches, **opts):
"""remove patches from queue
The patches must not be applied, and at least one patch is required. Exact
patch identifiers must be given. With -k/--keep, the patch files are
preserved in the patch directory.
To stop managing a patch and move it into permanent history,
use the :hg:`qfinish` command."""
q = repo.mq
q.delete(repo, patches, pycompat.byteskwargs(opts))
q.savedirty()
return 0
@command(
b"qapplied",
[(b'1', b'last', None, _(b'show only the preceding applied patch'))]
+ seriesopts,
_(b'hg qapplied [-1] [-s] [PATCH]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def applied(ui, repo, patch=None, **opts):
"""print the patches already applied
Returns 0 on success."""
q = repo.mq
opts = pycompat.byteskwargs(opts)
if patch:
if patch not in q.series:
raise error.Abort(_(b"patch %s is not in series file") % patch)
end = q.series.index(patch) + 1
else:
end = q.seriesend(True)
if opts.get(b'last') and not end:
ui.write(_(b"no patches applied\n"))
return 1
elif opts.get(b'last') and end == 1:
ui.write(_(b"only one patch applied\n"))
return 1
elif opts.get(b'last'):
start = end - 2
end = 1
else:
start = 0
q.qseries(
repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
)
@command(
b"qunapplied",
[(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
_(b'hg qunapplied [-1] [-s] [PATCH]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def unapplied(ui, repo, patch=None, **opts):
"""print the patches not yet applied
Returns 0 on success."""
q = repo.mq
opts = pycompat.byteskwargs(opts)
if patch:
if patch not in q.series:
raise error.Abort(_(b"patch %s is not in series file") % patch)
start = q.series.index(patch) + 1
else:
start = q.seriesend(True)
if start == len(q.series) and opts.get(b'first'):
ui.write(_(b"all patches applied\n"))
return 1
if opts.get(b'first'):
length = 1
else:
length = None
q.qseries(
repo,
start=start,
length=length,
status=b'U',
summary=opts.get(b'summary'),
)
@command(
b"qimport",
[
(b'e', b'existing', None, _(b'import file in patch directory')),
(b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
(b'f', b'force', None, _(b'overwrite existing files')),
(
b'r',
b'rev',
[],
_(b'place existing revisions under mq control'),
_(b'REV'),
),
(b'g', b'git', None, _(b'use git extended diff format')),
(b'P', b'push', None, _(b'qpush after importing')),
],
_(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
helpcategory=command.CATEGORY_IMPORT_EXPORT,
)
def qimport(ui, repo, *filename, **opts):
"""import a patch or existing changeset
The patch is inserted into the series after the last applied
patch. If no patches have been applied, qimport prepends the patch
to the series.
The patch will have the same name as its source file unless you
give it a new one with -n/--name.
You can register an existing patch inside the patch directory with
the -e/--existing flag.
With -f/--force, an existing patch of the same name will be
overwritten.
An existing changeset may be placed under mq control with -r/--rev
(e.g. qimport --rev . -n patch will place the current revision
under mq control). With -g/--git, patches imported with --rev will
use the git diff format. See the diffs help topic for information
on why this is important for preserving rename/copy information
and permission changes. Use :hg:`qfinish` to remove changesets
from mq control.
To import a patch from standard input, pass - as the patch file.
When importing from standard input, a patch name must be specified
using the --name flag.
To import an existing patch while renaming it::
hg qimport -e existing-patch -n new-name
Returns 0 if import succeeded.
"""
opts = pycompat.byteskwargs(opts)
with repo.lock(): # cause this may move phase
q = repo.mq
try:
imported = q.qimport(
repo,
filename,
patchname=opts.get(b'name'),
existing=opts.get(b'existing'),
force=opts.get(b'force'),
rev=opts.get(b'rev'),
git=opts.get(b'git'),
)
finally:
q.savedirty()
if imported and opts.get(b'push') and not opts.get(b'rev'):
return q.push(repo, imported[-1])
return 0
def qinit(ui, repo, create):
"""initialize a new queue repository
This command also creates a series file for ordering patches, and
an mq-specific .hgignore file in the queue repository, to exclude
the status and guards files (these contain mostly transient state).
Returns 0 if initialization succeeded."""
q = repo.mq
r = q.init(repo, create)
q.savedirty()
if r:
if not os.path.exists(r.wjoin(b'.hgignore')):
fp = r.wvfs(b'.hgignore', b'w')
fp.write(b'^\\.hg\n')
fp.write(b'^\\.mq\n')
fp.write(b'syntax: glob\n')
fp.write(b'status\n')
fp.write(b'guards\n')
fp.close()
if not os.path.exists(r.wjoin(b'series')):
r.wvfs(b'series', b'w').close()
r[None].add([b'.hgignore', b'series'])
commands.add(ui, r)
return 0
@command(
b"qinit",
[(b'c', b'create-repo', None, _(b'create queue repository'))],
_(b'hg qinit [-c]'),
helpcategory=command.CATEGORY_REPO_CREATION,
helpbasic=True,
)
def init(ui, repo, **opts):
"""init a new queue repository (DEPRECATED)
The queue repository is unversioned by default. If
-c/--create-repo is specified, qinit will create a separate nested
repository for patches (qinit -c may also be run later to convert
an unversioned patch repository into a versioned one). You can use
qcommit to commit changes to this queue repository.
This command is deprecated. Without -c, it's implied by other relevant
commands. With -c, use :hg:`init --mq` instead."""
return qinit(ui, repo, create=opts.get('create_repo'))
@command(
b"qclone",
[
(b'', b'pull', None, _(b'use pull protocol to copy metadata')),
(
b'U',
b'noupdate',
None,
_(b'do not update the new working directories'),
),
(
b'',
b'uncompressed',
None,
_(b'use uncompressed transfer (fast over LAN)'),
),
(
b'p',
b'patches',
b'',
_(b'location of source patch repository'),
_(b'REPO'),
),
]
+ cmdutil.remoteopts,
_(b'hg qclone [OPTION]... SOURCE [DEST]'),
helpcategory=command.CATEGORY_REPO_CREATION,
norepo=True,
)
def clone(ui, source, dest=None, **opts):
"""clone main and patch repository at same time
If source is local, destination will have no patches applied. If
source is remote, this command can not check if patches are
applied in source, so cannot guarantee that patches are not
applied in destination. If you clone remote repository, be sure
before that it has no patches applied.
Source patch repository is looked for in <src>/.hg/patches by
default. Use -p <url> to change.
The patch directory must be a nested Mercurial repository, as
would be created by :hg:`init --mq`.
Return 0 on success.
"""
opts = pycompat.byteskwargs(opts)
def patchdir(repo):
"""compute a patch repo url from a repo object"""
url = repo.url()
if url.endswith(b'/'):
url = url[:-1]
return url + b'/.hg/patches'
# main repo (destination and sources)
if dest is None:
dest = hg.defaultdest(source)
sr = hg.peer(ui, opts, ui.expandpath(source))
# patches repo (source only)
if opts.get(b'patches'):
patchespath = ui.expandpath(opts.get(b'patches'))
else:
patchespath = patchdir(sr)
try:
hg.peer(ui, opts, patchespath)
except error.RepoError:
raise error.Abort(
_(b'versioned patch repository not found (see init --mq)')
)
qbase, destrev = None, None
if sr.local():
repo = sr.local()
if repo.mq.applied and repo[qbase].phase() != phases.secret:
qbase = repo.mq.applied[0].node
if not hg.islocal(dest):
heads = set(repo.heads())
destrev = list(heads.difference(repo.heads(qbase)))
destrev.append(repo.changelog.parents(qbase)[0])
elif sr.capable(b'lookup'):
try:
qbase = sr.lookup(b'qbase')
except error.RepoError:
pass
ui.note(_(b'cloning main repository\n'))
sr, dr = hg.clone(
ui,
opts,
sr.url(),
dest,
pull=opts.get(b'pull'),
revs=destrev,
update=False,
stream=opts.get(b'uncompressed'),
)
ui.note(_(b'cloning patch repository\n'))
hg.clone(
ui,
opts,
opts.get(b'patches') or patchdir(sr),
patchdir(dr),
pull=opts.get(b'pull'),
update=not opts.get(b'noupdate'),
stream=opts.get(b'uncompressed'),
)
if dr.local():
repo = dr.local()
if qbase:
ui.note(
_(
b'stripping applied patches from destination '
b'repository\n'
)
)
strip(ui, repo, [qbase], update=False, backup=None)
if not opts.get(b'noupdate'):
ui.note(_(b'updating destination repository\n'))
hg.update(repo, repo.changelog.tip())
@command(
b"qcommit|qci",
commands.table[b"commit|ci"][1],
_(b'hg qcommit [OPTION]... [FILE]...'),
helpcategory=command.CATEGORY_COMMITTING,
inferrepo=True,
)
def commit(ui, repo, *pats, **opts):
"""commit changes in the queue repository (DEPRECATED)
This command is deprecated; use :hg:`commit --mq` instead."""
q = repo.mq
r = q.qrepo()
if not r:
raise error.Abort(b'no queue repository')
commands.commit(r.ui, r, *pats, **opts)
@command(
b"qseries",
[
(b'm', b'missing', None, _(b'print patches not in series')),
]
+ seriesopts,
_(b'hg qseries [-ms]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def series(ui, repo, **opts):
"""print the entire series file
Returns 0 on success."""
repo.mq.qseries(
repo, missing=opts.get('missing'), summary=opts.get('summary')
)
return 0
@command(
b"qtop",
seriesopts,
_(b'hg qtop [-s]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def top(ui, repo, **opts):
"""print the name of the current patch
Returns 0 on success."""
q = repo.mq
if q.applied:
t = q.seriesend(True)
else:
t = 0
if t:
q.qseries(
repo,
start=t - 1,
length=1,
status=b'A',
summary=opts.get('summary'),
)
else:
ui.write(_(b"no patches applied\n"))
return 1
@command(
b"qnext",
seriesopts,
_(b'hg qnext [-s]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def next(ui, repo, **opts):
"""print the name of the next pushable patch
Returns 0 on success."""
q = repo.mq
end = q.seriesend()
if end == len(q.series):
ui.write(_(b"all patches applied\n"))
return 1
q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
@command(
b"qprev",
seriesopts,
_(b'hg qprev [-s]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def prev(ui, repo, **opts):
"""print the name of the preceding applied patch
Returns 0 on success."""
q = repo.mq
l = len(q.applied)
if l == 1:
ui.write(_(b"only one patch applied\n"))
return 1
if not l:
ui.write(_(b"no patches applied\n"))
return 1
idx = q.series.index(q.applied[-2].name)
q.qseries(
repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
)
def setupheaderopts(ui, opts):
if not opts.get(b'user') and opts.get(b'currentuser'):
opts[b'user'] = ui.username()
if not opts.get(b'date') and opts.get(b'currentdate'):
opts[b'date'] = b"%d %d" % dateutil.makedate()
@command(
b"qnew",
[
(b'e', b'edit', None, _(b'invoke editor on commit messages')),
(b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
(b'g', b'git', None, _(b'use git extended diff format')),
(b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
(b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
(b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
(b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
]
+ cmdutil.walkopts
+ cmdutil.commitopts,
_(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
helpcategory=command.CATEGORY_COMMITTING,
helpbasic=True,
inferrepo=True,
)
def new(ui, repo, patch, *args, **opts):
"""create a new patch
qnew creates a new patch on top of the currently-applied patch (if
any). The patch will be initialized with any outstanding changes
in the working directory. You may also use -I/--include,
-X/--exclude, and/or a list of files after the patch name to add
only changes to matching files to the new patch, leaving the rest
as uncommitted modifications.
-u/--user and -d/--date can be used to set the (given) user and
date, respectively. -U/--currentuser and -D/--currentdate set user
to current user and date to current date.
-e/--edit, -m/--message or -l/--logfile set the patch header as
well as the commit message. If none is specified, the header is
empty and the commit message is '[mq]: PATCH'.
Use the -g/--git option to keep the patch in the git extended diff
format. Read the diffs help topic for more information on why this
is important for preserving permission changes and copy/rename
information.
Returns 0 on successful creation of a new patch.
"""
opts = pycompat.byteskwargs(opts)
msg = cmdutil.logmessage(ui, opts)
q = repo.mq
opts[b'msg'] = msg
setupheaderopts(ui, opts)
q.new(repo, patch, *args, **pycompat.strkwargs(opts))
q.savedirty()
return 0
@command(
b"qrefresh",
[
(b'e', b'edit', None, _(b'invoke editor on commit messages')),
(b'g', b'git', None, _(b'use git extended diff format')),
(
b's',
b'short',
None,
_(b'refresh only files already in the patch and specified files'),
),
(
b'U',
b'currentuser',
None,
_(b'add/update author field in patch with current user'),
),
(
b'u',
b'user',
b'',
_(b'add/update author field in patch with given user'),
_(b'USER'),
),
(
b'D',
b'currentdate',
None,
_(b'add/update date field in patch with current date'),
),
(
b'd',
b'date',
b'',
_(b'add/update date field in patch with given date'),
_(b'DATE'),
),
]
+ cmdutil.walkopts
+ cmdutil.commitopts,
_(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
helpcategory=command.CATEGORY_COMMITTING,
helpbasic=True,
inferrepo=True,
)
def refresh(ui, repo, *pats, **opts):
"""update the current patch
If any file patterns are provided, the refreshed patch will
contain only the modifications that match those patterns; the
remaining modifications will remain in the working directory.
If -s/--short is specified, files currently included in the patch
will be refreshed just like matched files and remain in the patch.
If -e/--edit is specified, Mercurial will start your configured editor for
you to enter a message. In case qrefresh fails, you will find a backup of
your message in ``.hg/last-message.txt``.
hg add/remove/copy/rename work as usual, though you might want to
use git-style patches (-g/--git or [diff] git=1) to track copies
and renames. See the diffs help topic for more information on the
git diff format.
Returns 0 on success.
"""
opts = pycompat.byteskwargs(opts)
q = repo.mq
message = cmdutil.logmessage(ui, opts)
setupheaderopts(ui, opts)
with repo.wlock():
ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
q.savedirty()
return ret
@command(
b"qdiff",
cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
_(b'hg qdiff [OPTION]... [FILE]...'),
helpcategory=command.CATEGORY_FILE_CONTENTS,
helpbasic=True,
inferrepo=True,
)
def diff(ui, repo, *pats, **opts):
"""diff of the current patch and subsequent modifications
Shows a diff which includes the current patch as well as any
changes which have been made in the working directory since the
last refresh (thus showing what the current patch would become
after a qrefresh).
Use :hg:`diff` if you only want to see the changes made since the
last qrefresh, or :hg:`export qtip` if you want to see changes
made by the current patch without including changes made since the
qrefresh.
Returns 0 on success.
"""
ui.pager(b'qdiff')
repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
return 0
@command(
b'qfold',
[
(b'e', b'edit', None, _(b'invoke editor on commit messages')),
(b'k', b'keep', None, _(b'keep folded patch files')),
]
+ cmdutil.commitopts,
_(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
)
def fold(ui, repo, *files, **opts):
"""fold the named patches into the current patch
Patches must not yet be applied. Each patch will be successively
applied to the current patch in the order given. If all the
patches apply successfully, the current patch will be refreshed
with the new cumulative patch, and the folded patches will be
deleted. With -k/--keep, the folded patch files will not be
removed afterwards.
The header for each folded patch will be concatenated with the
current patch header, separated by a line of ``* * *``.
Returns 0 on success."""
opts = pycompat.byteskwargs(opts)
q = repo.mq
if not files:
raise error.Abort(_(b'qfold requires at least one patch name'))
if not q.checktoppatch(repo)[0]:
raise error.Abort(_(b'no patches applied'))
q.checklocalchanges(repo)
message = cmdutil.logmessage(ui, opts)
parent = q.lookup(b'qtip')
patches = []
messages = []
for f in files:
p = q.lookup(f)
if p in patches or p == parent:
ui.warn(_(b'skipping already folded patch %s\n') % p)
if q.isapplied(p):
raise error.Abort(
_(b'qfold cannot fold already applied patch %s') % p
)
patches.append(p)
for p in patches:
if not message:
ph = patchheader(q.join(p), q.plainmode)
if ph.message:
messages.append(ph.message)
pf = q.join(p)
(patchsuccess, files, fuzz) = q.patch(repo, pf)
if not patchsuccess:
raise error.Abort(_(b'error folding patch %s') % p)
if not message:
ph = patchheader(q.join(parent), q.plainmode)
message = ph.message
for msg in messages:
if msg:
if message:
message.append(b'* * *')
message.extend(msg)
message = b'\n'.join(message)
diffopts = q.patchopts(q.diffopts(), *patches)
with repo.wlock():
q.refresh(
repo,
msg=message,
git=diffopts.git,
edit=opts.get(b'edit'),
editform=b'mq.qfold',
)
q.delete(repo, patches, opts)
q.savedirty()
@command(
b"qgoto",
[
(
b'',
b'keep-changes',
None,
_(b'tolerate non-conflicting local changes'),
),
(b'f', b'force', None, _(b'overwrite any local changes')),
(b'', b'no-backup', None, _(b'do not save backup copies of files')),
],
_(b'hg qgoto [OPTION]... PATCH'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def goto(ui, repo, patch, **opts):
"""push or pop patches until named patch is at top of stack
Returns 0 on success."""
opts = pycompat.byteskwargs(opts)
opts = fixkeepchangesopts(ui, opts)
q = repo.mq
patch = q.lookup(patch)
nobackup = opts.get(b'no_backup')
keepchanges = opts.get(b'keep_changes')
if q.isapplied(patch):
ret = q.pop(
repo,
patch,
force=opts.get(b'force'),
nobackup=nobackup,
keepchanges=keepchanges,
)
else:
ret = q.push(
repo,
patch,
force=opts.get(b'force'),
nobackup=nobackup,
keepchanges=keepchanges,
)
q.savedirty()
return ret
@command(
b"qguard",
[
(b'l', b'list', None, _(b'list all patches and guards')),
(b'n', b'none', None, _(b'drop all guards')),
],
_(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def guard(ui, repo, *args, **opts):
"""set or print guards for a patch
Guards control whether a patch can be pushed. A patch with no
guards is always pushed. A patch with a positive guard ("+foo") is
pushed only if the :hg:`qselect` command has activated it. A patch with
a negative guard ("-foo") is never pushed if the :hg:`qselect` command
has activated it.
With no arguments, print the currently active guards.
With arguments, set guards for the named patch.
.. note::
Specifying negative guards now requires '--'.
To set guards on another patch::
hg qguard other.patch -- +2.6.17 -stable
Returns 0 on success.
"""
def status(idx):
guards = q.seriesguards[idx] or [b'unguarded']
if q.series[idx] in applied:
state = b'applied'
elif q.pushable(idx)[0]:
state = b'unapplied'
else:
state = b'guarded'
label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
ui.write(b'%s: ' % ui.label(q.series[idx], label))
for i, guard in enumerate(guards):
if guard.startswith(b'+'):
ui.write(guard, label=b'qguard.positive')
elif guard.startswith(b'-'):
ui.write(guard, label=b'qguard.negative')
else:
ui.write(guard, label=b'qguard.unguarded')
if i != len(guards) - 1:
ui.write(b' ')
ui.write(b'\n')
q = repo.mq
applied = {p.name for p in q.applied}
patch = None
args = list(args)
if opts.get('list'):
if args or opts.get('none'):
raise error.Abort(
_(b'cannot mix -l/--list with options or arguments')
)
for i in pycompat.xrange(len(q.series)):
status(i)
return
if not args or args[0][0:1] in b'-+':
if not q.applied:
raise error.Abort(_(b'no patches applied'))
patch = q.applied[-1].name
if patch is None and args[0][0:1] not in b'-+':
patch = args.pop(0)
if patch is None:
raise error.Abort(_(b'no patch to work with'))
if args or opts.get('none'):
idx = q.findseries(patch)
if idx is None:
raise error.Abort(_(b'no patch named %s') % patch)
q.setguards(idx, args)
q.savedirty()
else:
status(q.series.index(q.lookup(patch)))
@command(
b"qheader",
[],
_(b'hg qheader [PATCH]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def header(ui, repo, patch=None):
"""print the header of the topmost or specified patch
Returns 0 on success."""
q = repo.mq
if patch:
patch = q.lookup(patch)
else:
if not q.applied:
ui.write(_(b'no patches applied\n'))
return 1
patch = q.lookup(b'qtip')
ph = patchheader(q.join(patch), q.plainmode)
ui.write(b'\n'.join(ph.message) + b'\n')
def lastsavename(path):
(directory, base) = os.path.split(path)
names = os.listdir(directory)
namere = re.compile(b"%s.([0-9]+)" % base)
maxindex = None
maxname = None
for f in names:
m = namere.match(f)
if m:
index = int(m.group(1))
if maxindex is None or index > maxindex:
maxindex = index
maxname = f
if maxname:
return (os.path.join(directory, maxname), maxindex)
return (None, None)
def savename(path):
(last, index) = lastsavename(path)
if last is None:
index = 0
newpath = path + b".%d" % (index + 1)
return newpath
@command(
b"qpush",
[
(
b'',
b'keep-changes',
None,
_(b'tolerate non-conflicting local changes'),
),
(b'f', b'force', None, _(b'apply on top of local changes')),
(
b'e',
b'exact',
None,
_(b'apply the target patch to its recorded parent'),
),
(b'l', b'list', None, _(b'list patch name in commit text')),
(b'a', b'all', None, _(b'apply all patches')),
(b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
(b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
(
b'',
b'move',
None,
_(b'reorder patch series and apply only the patch'),
),
(b'', b'no-backup', None, _(b'do not save backup copies of files')),
],
_(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
helpbasic=True,
)
def push(ui, repo, patch=None, **opts):
"""push the next patch onto the stack
By default, abort if the working directory contains uncommitted
changes. With --keep-changes, abort only if the uncommitted files
overlap with patched files. With -f/--force, backup and patch over
uncommitted changes.
Return 0 on success.
"""
q = repo.mq
mergeq = None
opts = pycompat.byteskwargs(opts)
opts = fixkeepchangesopts(ui, opts)
if opts.get(b'merge'):
if opts.get(b'name'):
newpath = repo.vfs.join(opts.get(b'name'))
else:
newpath, i = lastsavename(q.path)
if not newpath:
ui.warn(_(b"no saved queues found, please use -n\n"))
return 1
mergeq = queue(ui, repo.baseui, repo.path, newpath)
ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
ret = q.push(
repo,
patch,
force=opts.get(b'force'),
list=opts.get(b'list'),
mergeq=mergeq,
all=opts.get(b'all'),
move=opts.get(b'move'),
exact=opts.get(b'exact'),
nobackup=opts.get(b'no_backup'),
keepchanges=opts.get(b'keep_changes'),
)
return ret
@command(
b"qpop",
[
(b'a', b'all', None, _(b'pop all patches')),
(b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
(
b'',
b'keep-changes',
None,
_(b'tolerate non-conflicting local changes'),
),
(b'f', b'force', None, _(b'forget any local changes to patched files')),
(b'', b'no-backup', None, _(b'do not save backup copies of files')),
],
_(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
helpbasic=True,
)
def pop(ui, repo, patch=None, **opts):
"""pop the current patch off the stack
Without argument, pops off the top of the patch stack. If given a
patch name, keeps popping off patches until the named patch is at
the top of the stack.
By default, abort if the working directory contains uncommitted
changes. With --keep-changes, abort only if the uncommitted files
overlap with patched files. With -f/--force, backup and discard
changes made to such files.
Return 0 on success.
"""
opts = pycompat.byteskwargs(opts)
opts = fixkeepchangesopts(ui, opts)
localupdate = True
if opts.get(b'name'):
q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
ui.warn(_(b'using patch queue: %s\n') % q.path)
localupdate = False
else:
q = repo.mq
ret = q.pop(
repo,
patch,
force=opts.get(b'force'),
update=localupdate,
all=opts.get(b'all'),
nobackup=opts.get(b'no_backup'),
keepchanges=opts.get(b'keep_changes'),
)
q.savedirty()
return ret
@command(
b"qrename|qmv",
[],
_(b'hg qrename PATCH1 [PATCH2]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def rename(ui, repo, patch, name=None, **opts):
"""rename a patch
With one argument, renames the current patch to PATCH1.
With two arguments, renames PATCH1 to PATCH2.
Returns 0 on success."""
q = repo.mq
if not name:
name = patch
patch = None
if patch:
patch = q.lookup(patch)
else:
if not q.applied:
ui.write(_(b'no patches applied\n'))
return
patch = q.lookup(b'qtip')
absdest = q.join(name)
if os.path.isdir(absdest):
name = normname(os.path.join(name, os.path.basename(patch)))
absdest = q.join(name)
q.checkpatchname(name)
ui.note(_(b'renaming %s to %s\n') % (patch, name))
i = q.findseries(patch)
guards = q.guard_re.findall(q.fullseries[i])
q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
q.parseseries()
q.seriesdirty = True
info = q.isapplied(patch)
if info:
q.applied[info[0]] = statusentry(info[1], name)
q.applieddirty = True
destdir = os.path.dirname(absdest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
util.rename(q.join(patch), absdest)
r = q.qrepo()
if r and patch in r.dirstate:
wctx = r[None]
with r.wlock():
if r.dirstate[patch] == b'a':
r.dirstate.drop(patch)
r.dirstate.add(name)
else:
wctx.copy(patch, name)
wctx.forget([patch])
q.savedirty()
@command(
b"qrestore",
[
(b'd', b'delete', None, _(b'delete save entry')),
(b'u', b'update', None, _(b'update queue working directory')),
],
_(b'hg qrestore [-d] [-u] REV'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def restore(ui, repo, rev, **opts):
"""restore the queue state saved by a revision (DEPRECATED)
This command is deprecated, use :hg:`rebase` instead."""
rev = repo.lookup(rev)
q = repo.mq
q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
q.savedirty()
return 0
@command(
b"qsave",
[
(b'c', b'copy', None, _(b'copy patch directory')),
(b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
(b'e', b'empty', None, _(b'clear queue status file')),
(b'f', b'force', None, _(b'force copy')),
]
+ cmdutil.commitopts,
_(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def save(ui, repo, **opts):
"""save current queue state (DEPRECATED)
This command is deprecated, use :hg:`rebase` instead."""
q = repo.mq
opts = pycompat.byteskwargs(opts)
message = cmdutil.logmessage(ui, opts)
ret = q.save(repo, msg=message)
if ret:
return ret
q.savedirty() # save to .hg/patches before copying
if opts.get(b'copy'):
path = q.path
if opts.get(b'name'):
newpath = os.path.join(q.basepath, opts.get(b'name'))
if os.path.exists(newpath):
if not os.path.isdir(newpath):
raise error.Abort(
_(b'destination %s exists and is not a directory')
% newpath
)
if not opts.get(b'force'):
raise error.Abort(
_(b'destination %s exists, use -f to force') % newpath
)
else:
newpath = savename(path)
ui.warn(_(b"copy %s to %s\n") % (path, newpath))
util.copyfiles(path, newpath)
if opts.get(b'empty'):
del q.applied[:]
q.applieddirty = True
q.savedirty()
return 0
@command(
b"qselect",
[
(b'n', b'none', None, _(b'disable all guards')),
(b's', b'series', None, _(b'list all guards in series file')),
(b'', b'pop', None, _(b'pop to before first guarded applied patch')),
(b'', b'reapply', None, _(b'pop, then reapply patches')),
],
_(b'hg qselect [OPTION]... [GUARD]...'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def select(ui, repo, *args, **opts):
"""set or print guarded patches to push
Use the :hg:`qguard` command to set or print guards on patch, then use
qselect to tell mq which guards to use. A patch will be pushed if
it has no guards or any positive guards match the currently
selected guard, but will not be pushed if any negative guards
match the current guard. For example::
qguard foo.patch -- -stable (negative guard)
qguard bar.patch +stable (positive guard)
qselect stable
This activates the "stable" guard. mq will skip foo.patch (because
it has a negative match) but push bar.patch (because it has a
positive match).
With no arguments, prints the currently active guards.
With one argument, sets the active guard.
Use -n/--none to deactivate guards (no other arguments needed).
When no guards are active, patches with positive guards are
skipped and patches with negative guards are pushed.
qselect can change the guards on applied patches. It does not pop
guarded patches by default. Use --pop to pop back to the last
applied patch that is not guarded. Use --reapply (which implies
--pop) to push back to the current patch afterwards, but skip
guarded patches.
Use -s/--series to print a list of all guards in the series file
(no other arguments needed). Use -v for more information.
Returns 0 on success."""
q = repo.mq
opts = pycompat.byteskwargs(opts)
guards = q.active()
pushable = lambda i: q.pushable(q.applied[i].name)[0]
if args or opts.get(b'none'):
old_unapplied = q.unapplied(repo)
old_guarded = [
i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
]
q.setactive(args)
q.savedirty()
if not args:
ui.status(_(b'guards deactivated\n'))
if not opts.get(b'pop') and not opts.get(b'reapply'):
unapplied = q.unapplied(repo)
guarded = [
i for i in pycompat.xrange(len(q.applied)) if not pushable(i)
]
if len(unapplied) != len(old_unapplied):
ui.status(
_(
b'number of unguarded, unapplied patches has '
b'changed from %d to %d\n'
)
% (len(old_unapplied), len(unapplied))
)
if len(guarded) != len(old_guarded):
ui.status(
_(
b'number of guarded, applied patches has changed '
b'from %d to %d\n'
)
% (len(old_guarded), len(guarded))
)
elif opts.get(b'series'):
guards = {}
noguards = 0
for gs in q.seriesguards:
if not gs:
noguards += 1
for g in gs:
guards.setdefault(g, 0)
guards[g] += 1
if ui.verbose:
guards[b'NONE'] = noguards
guards = list(guards.items())
guards.sort(key=lambda x: x[0][1:])
if guards:
ui.note(_(b'guards in series file:\n'))
for guard, count in guards:
ui.note(b'%2d ' % count)
ui.write(guard, b'\n')
else:
ui.note(_(b'no guards in series file\n'))
else:
if guards:
ui.note(_(b'active guards:\n'))
for g in guards:
ui.write(g, b'\n')
else:
ui.write(_(b'no active guards\n'))
reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
popped = False
if opts.get(b'pop') or opts.get(b'reapply'):
for i in pycompat.xrange(len(q.applied)):
if not pushable(i):
ui.status(_(b'popping guarded patches\n'))
popped = True
if i == 0:
q.pop(repo, all=True)
else:
q.pop(repo, q.applied[i - 1].name)
break
if popped:
try:
if reapply:
ui.status(_(b'reapplying unguarded patches\n'))
q.push(repo, reapply)
finally:
q.savedirty()
@command(
b"qfinish",
[(b'a', b'applied', None, _(b'finish all applied changesets'))],
_(b'hg qfinish [-a] [REV]...'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def finish(ui, repo, *revrange, **opts):
"""move applied patches into repository history
Finishes the specified revisions (corresponding to applied
patches) by moving them out of mq control into regular repository
history.
Accepts a revision range or the -a/--applied option. If --applied
is specified, all applied mq revisions are removed from mq
control. Otherwise, the given revisions must be at the base of the
stack of applied patches.
This can be especially useful if your changes have been applied to
an upstream repository, or if you are about to push your changes
to upstream.
Returns 0 on success.
"""
if not opts.get('applied') and not revrange:
raise error.Abort(_(b'no revisions specified'))
elif opts.get('applied'):
revrange = (b'qbase::qtip',) + revrange
q = repo.mq
if not q.applied:
ui.status(_(b'no patches applied\n'))
return 0
revs = scmutil.revrange(repo, revrange)
if repo[b'.'].rev() in revs and repo[None].files():
ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
# queue.finish may changes phases but leave the responsibility to lock the
# repo to the caller to avoid deadlock with wlock. This command code is
# responsibility for this locking.
with repo.lock():
q.finish(repo, revs)
q.savedirty()
return 0
@command(
b"qqueue",
[
(b'l', b'list', False, _(b'list all available queues')),
(b'', b'active', False, _(b'print name of active queue')),
(b'c', b'create', False, _(b'create new queue')),
(b'', b'rename', False, _(b'rename active queue')),
(b'', b'delete', False, _(b'delete reference to queue')),
(b'', b'purge', False, _(b'delete queue, and remove patch dir')),
],
_(b'[OPTION] [QUEUE]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
)
def qqueue(ui, repo, name=None, **opts):
"""manage multiple patch queues
Supports switching between different patch queues, as well as creating
new patch queues and deleting existing ones.
Omitting a queue name or specifying -l/--list will show you the registered
queues - by default the "normal" patches queue is registered. The currently
active queue will be marked with "(active)". Specifying --active will print
only the name of the active queue.
To create a new queue, use -c/--create. The queue is automatically made
active, except in the case where there are applied patches from the
currently active queue in the repository. Then the queue will only be
created and switching will fail.
To delete an existing queue, use --delete. You cannot delete the currently
active queue.
Returns 0 on success.
"""
q = repo.mq
_defaultqueue = b'patches'
_allqueues = b'patches.queues'
_activequeue = b'patches.queue'
def _getcurrent():
cur = os.path.basename(q.path)
if cur.startswith(b'patches-'):
cur = cur[8:]
return cur
def _noqueues():
try:
fh = repo.vfs(_allqueues, b'r')
fh.close()
except IOError:
return True
return False
def _getqueues():
current = _getcurrent()
try:
fh = repo.vfs(_allqueues, b'r')
queues = [queue.strip() for queue in fh if queue.strip()]
fh.close()
if current not in queues:
queues.append(current)
except IOError:
queues = [_defaultqueue]
return sorted(queues)
def _setactive(name):
if q.applied:
raise error.Abort(
_(
b'new queue created, but cannot make active '
b'as patches are applied'
)
)
_setactivenocheck(name)
def _setactivenocheck(name):
fh = repo.vfs(_activequeue, b'w')
if name != b'patches':
fh.write(name)
fh.close()
def _addqueue(name):
fh = repo.vfs(_allqueues, b'a')
fh.write(b'%s\n' % (name,))
fh.close()
def _queuedir(name):
if name == b'patches':
return repo.vfs.join(b'patches')
else:
return repo.vfs.join(b'patches-' + name)
def _validname(name):
for n in name:
if n in b':\\/.':
return False
return True
def _delete(name):
if name not in existing:
raise error.Abort(_(b'cannot delete queue that does not exist'))
current = _getcurrent()
if name == current:
raise error.Abort(_(b'cannot delete currently active queue'))
fh = repo.vfs(b'patches.queues.new', b'w')
for queue in existing:
if queue == name:
continue
fh.write(b'%s\n' % (queue,))
fh.close()
repo.vfs.rename(b'patches.queues.new', _allqueues)
opts = pycompat.byteskwargs(opts)
if not name or opts.get(b'list') or opts.get(b'active'):
current = _getcurrent()
if opts.get(b'active'):
ui.write(b'%s\n' % (current,))
return
for queue in _getqueues():
ui.write(b'%s' % (queue,))
if queue == current and not ui.quiet:
ui.write(_(b' (active)\n'))
else:
ui.write(b'\n')
return
if not _validname(name):
raise error.Abort(
_(b'invalid queue name, may not contain the characters ":\\/."')
)
with repo.wlock():
existing = _getqueues()
if opts.get(b'create'):
if name in existing:
raise error.Abort(_(b'queue "%s" already exists') % name)
if _noqueues():
_addqueue(_defaultqueue)
_addqueue(name)
_setactive(name)
elif opts.get(b'rename'):
current = _getcurrent()
if name == current:
raise error.Abort(
_(b'can\'t rename "%s" to its current name') % name
)
if name in existing:
raise error.Abort(_(b'queue "%s" already exists') % name)
olddir = _queuedir(current)
newdir = _queuedir(name)
if os.path.exists(newdir):
raise error.Abort(
_(b'non-queue directory "%s" already exists') % newdir
)
fh = repo.vfs(b'patches.queues.new', b'w')
for queue in existing:
if queue == current:
fh.write(b'%s\n' % (name,))
if os.path.exists(olddir):
util.rename(olddir, newdir)
else:
fh.write(b'%s\n' % (queue,))
fh.close()
repo.vfs.rename(b'patches.queues.new', _allqueues)
_setactivenocheck(name)
elif opts.get(b'delete'):
_delete(name)
elif opts.get(b'purge'):
if name in existing:
_delete(name)
qdir = _queuedir(name)
if os.path.exists(qdir):
shutil.rmtree(qdir)
else:
if name not in existing:
raise error.Abort(_(b'use --create to create a new queue'))
_setactive(name)
def mqphasedefaults(repo, roots):
"""callback used to set mq changeset as secret when no phase data exists"""
if repo.mq.applied:
if repo.ui.configbool(b'mq', b'secret'):
mqphase = phases.secret
else:
mqphase = phases.draft
qbase = repo[repo.mq.applied[0].node]
roots[mqphase].add(qbase.node())
return roots
def reposetup(ui, repo):
class mqrepo(repo.__class__):
@localrepo.unfilteredpropertycache
def mq(self):
return queue(self.ui, self.baseui, self.path)
def invalidateall(self):
super(mqrepo, self).invalidateall()
if localrepo.hasunfilteredcache(self, 'mq'):
# recreate mq in case queue path was changed
delattr(self.unfiltered(), 'mq')
def abortifwdirpatched(self, errmsg, force=False):
if self.mq.applied and self.mq.checkapplied and not force:
parents = self.dirstate.parents()
patches = [s.node for s in self.mq.applied]
if any(p in patches for p in parents):
raise error.Abort(errmsg)
def commit(
self,
text=b"",
user=None,
date=None,
match=None,
force=False,
editor=False,
extra=None,
):
if extra is None:
extra = {}
self.abortifwdirpatched(
_(b'cannot commit over an applied mq patch'), force
)
return super(mqrepo, self).commit(
text, user, date, match, force, editor, extra
)
def checkpush(self, pushop):
if self.mq.applied and self.mq.checkapplied and not pushop.force:
outapplied = [e.node for e in self.mq.applied]
if pushop.revs:
# Assume applied patches have no non-patch descendants and
# are not on remote already. Filtering any changeset not
# pushed.
heads = set(pushop.revs)
for node in reversed(outapplied):
if node in heads:
break
else:
outapplied.pop()
# looking for pushed and shared changeset
for node in outapplied:
if self[node].phase() < phases.secret:
raise error.Abort(_(b'source has mq patches applied'))
# no non-secret patches pushed
super(mqrepo, self).checkpush(pushop)
def _findtags(self):
'''augment tags from base class with patch tags'''
result = super(mqrepo, self)._findtags()
q = self.mq
if not q.applied:
return result
mqtags = [(patch.node, patch.name) for patch in q.applied]
try:
# for now ignore filtering business
self.unfiltered().changelog.rev(mqtags[-1][0])
except error.LookupError:
self.ui.warn(
_(b'mq status file refers to unknown node %s\n')
% short(mqtags[-1][0])
)
return result
# do not add fake tags for filtered revisions
included = self.changelog.hasnode
mqtags = [mqt for mqt in mqtags if included(mqt[0])]
if not mqtags:
return result
mqtags.append((mqtags[-1][0], b'qtip'))
mqtags.append((mqtags[0][0], b'qbase'))
mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
tags = result[0]
for patch in mqtags:
if patch[1] in tags:
self.ui.warn(
_(b'tag %s overrides mq patch of the same name\n')
% patch[1]
)
else:
tags[patch[1]] = patch[0]
return result
if repo.local():
repo.__class__ = mqrepo
repo._phasedefaults.append(mqphasedefaults)
def mqimport(orig, ui, repo, *args, **kwargs):
if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
'no_commit', False
):
repo.abortifwdirpatched(
_(b'cannot import over an applied patch'), kwargs.get('force')
)
return orig(ui, repo, *args, **kwargs)
def mqinit(orig, ui, *args, **kwargs):
mq = kwargs.pop('mq', None)
if not mq:
return orig(ui, *args, **kwargs)
if args:
repopath = args[0]
if not hg.islocal(repopath):
raise error.Abort(
_(b'only a local queue repository may be initialized')
)
else:
repopath = cmdutil.findrepo(encoding.getcwd())
if not repopath:
raise error.Abort(
_(b'there is no Mercurial repository here (.hg not found)')
)
repo = hg.repository(ui, repopath)
return qinit(ui, repo, True)
def mqcommand(orig, ui, repo, *args, **kwargs):
"""Add --mq option to operate on patch repository instead of main"""
# some commands do not like getting unknown options
mq = kwargs.pop('mq', None)
if not mq:
return orig(ui, repo, *args, **kwargs)
q = repo.mq
r = q.qrepo()
if not r:
raise error.Abort(_(b'no queue repository'))
return orig(r.ui, r, *args, **kwargs)
def summaryhook(ui, repo):
q = repo.mq
m = []
a, u = len(q.applied), len(q.unapplied(repo))
if a:
m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
if u:
m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
if m:
# i18n: column positioning for "hg summary"
ui.write(_(b"mq: %s\n") % b', '.join(m))
else:
# i18n: column positioning for "hg summary"
ui.note(_(b"mq: (empty queue)\n"))
revsetpredicate = registrar.revsetpredicate()
@revsetpredicate(b'mq()')
def revsetmq(repo, subset, x):
"""Changesets managed by MQ."""
revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
applied = {repo[r.node].rev() for r in repo.mq.applied}
return smartset.baseset([r for r in subset if r in applied])
# tell hggettext to extract docstrings from these functions:
i18nfunctions = [revsetmq]
def extsetup(ui):
# Ensure mq wrappers are called first, regardless of extension load order by
# NOT wrapping in uisetup() and instead deferring to init stage two here.
mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
extensions.wrapcommand(commands.table, b'import', mqimport)
cmdutil.summaryhooks.add(b'mq', summaryhook)
entry = extensions.wrapcommand(commands.table, b'init', mqinit)
entry[1].extend(mqopt)
def dotable(cmdtable):
for cmd, entry in pycompat.iteritems(cmdtable):
cmd = cmdutil.parsealiases(cmd)[0]
func = entry[0]
if func.norepo:
continue
entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
entry[1].extend(mqopt)
dotable(commands.table)
thismodule = sys.modules["hgext.mq"]
for extname, extmodule in extensions.extensions():
if extmodule != thismodule:
dotable(getattr(extmodule, 'cmdtable', {}))
colortable = {
b'qguard.negative': b'red',
b'qguard.positive': b'yellow',
b'qguard.unguarded': b'green',
b'qseries.applied': b'blue bold underline',
b'qseries.guarded': b'black bold',
b'qseries.missing': b'red bold',
b'qseries.unapplied': b'black bold',
}
```
#### File: hgext/narrow/narrowrepo.py
```python
from __future__ import absolute_import
from mercurial import wireprototypes
from . import narrowdirstate
def wraprepo(repo):
"""Enables narrow clone functionality on a single local repository."""
class narrowrepository(repo.__class__):
def _makedirstate(self):
dirstate = super(narrowrepository, self)._makedirstate()
return narrowdirstate.wrapdirstate(self, dirstate)
def peer(self):
peer = super(narrowrepository, self).peer()
peer._caps.add(wireprototypes.NARROWCAP)
peer._caps.add(wireprototypes.ELLIPSESCAP)
return peer
repo.__class__ = narrowrepository
```
#### File: site-packages/hgext/patchbomb.py
```python
from __future__ import absolute_import
import email.encoders as emailencoders
import email.mime.base as emimebase
import email.mime.multipart as emimemultipart
import email.utils as eutil
import errno
import os
import socket
from mercurial.i18n import _
from mercurial.pycompat import open
from mercurial.node import bin
from mercurial import (
cmdutil,
commands,
encoding,
error,
formatter,
hg,
mail,
patch,
pycompat,
registrar,
scmutil,
templater,
util,
)
from mercurial.utils import dateutil
stringio = util.stringio
cmdtable = {}
command = registrar.command(cmdtable)
configtable = {}
configitem = registrar.configitem(configtable)
configitem(
b'patchbomb',
b'bundletype',
default=None,
)
configitem(
b'patchbomb',
b'bcc',
default=None,
)
configitem(
b'patchbomb',
b'cc',
default=None,
)
configitem(
b'patchbomb',
b'confirm',
default=False,
)
configitem(
b'patchbomb',
b'flagtemplate',
default=None,
)
configitem(
b'patchbomb',
b'from',
default=None,
)
configitem(
b'patchbomb',
b'intro',
default=b'auto',
)
configitem(
b'patchbomb',
b'publicurl',
default=None,
)
configitem(
b'patchbomb',
b'reply-to',
default=None,
)
configitem(
b'patchbomb',
b'to',
default=None,
)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
def _addpullheader(seq, ctx):
"""Add a header pointing to a public URL where the changeset is available"""
repo = ctx.repo()
# experimental config: patchbomb.publicurl
# waiting for some logic that check that the changeset are available on the
# destination before patchbombing anything.
publicurl = repo.ui.config(b'patchbomb', b'publicurl')
if publicurl:
return b'Available At %s\n# hg pull %s -r %s' % (
publicurl,
publicurl,
ctx,
)
return None
def uisetup(ui):
cmdutil.extraexport.append(b'pullurl')
cmdutil.extraexportmap[b'pullurl'] = _addpullheader
def reposetup(ui, repo):
if not repo.local():
return
repo._wlockfreeprefix.add(b'last-email.txt')
def prompt(ui, prompt, default=None, rest=b':'):
if default:
prompt += b' [%s]' % default
return ui.prompt(prompt + rest, default)
def introwanted(ui, opts, number):
'''is an introductory message apparently wanted?'''
introconfig = ui.config(b'patchbomb', b'intro')
if opts.get(b'intro') or opts.get(b'desc'):
intro = True
elif introconfig == b'always':
intro = True
elif introconfig == b'never':
intro = False
elif introconfig == b'auto':
intro = number > 1
else:
ui.write_err(
_(b'warning: invalid patchbomb.intro value "%s"\n') % introconfig
)
ui.write_err(_(b'(should be one of always, never, auto)\n'))
intro = number > 1
return intro
def _formatflags(ui, repo, rev, flags):
"""build flag string optionally by template"""
tmpl = ui.config(b'patchbomb', b'flagtemplate')
if not tmpl:
return b' '.join(flags)
out = util.stringio()
spec = formatter.literal_templatespec(templater.unquotestring(tmpl))
with formatter.templateformatter(ui, out, b'patchbombflag', {}, spec) as fm:
fm.startitem()
fm.context(ctx=repo[rev])
fm.write(b'flags', b'%s', fm.formatlist(flags, name=b'flag'))
return out.getvalue()
def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
"""build prefix to patch subject"""
flag = _formatflags(ui, repo, rev, flags)
if flag:
flag = b' ' + flag
if not numbered:
return b'[PATCH%s]' % flag
else:
tlen = len(b"%d" % total)
return b'[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
def makepatch(
ui,
repo,
rev,
patchlines,
opts,
_charsets,
idx,
total,
numbered,
patchname=None,
):
desc = []
node = None
body = b''
for line in patchlines:
if line.startswith(b'#'):
if line.startswith(b'# Node ID'):
node = line.split()[-1]
continue
if line.startswith(b'diff -r') or line.startswith(b'diff --git'):
break
desc.append(line)
if not patchname and not node:
raise ValueError
if opts.get(b'attach') and not opts.get(b'body'):
body = (
b'\n'.join(desc[1:]).strip()
or b'Patch subject is complete summary.'
)
body += b'\n\n\n'
if opts.get(b'plain'):
while patchlines and patchlines[0].startswith(b'# '):
patchlines.pop(0)
if patchlines:
patchlines.pop(0)
while patchlines and not patchlines[0].strip():
patchlines.pop(0)
ds = patch.diffstat(patchlines)
if opts.get(b'diffstat'):
body += ds + b'\n\n'
addattachment = opts.get(b'attach') or opts.get(b'inline')
if not addattachment or opts.get(b'body'):
body += b'\n'.join(patchlines)
if addattachment:
msg = emimemultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(b'test')))
p = mail.mimetextpatch(
b'\n'.join(patchlines), 'x-patch', opts.get(b'test')
)
binnode = bin(node)
# if node is mq patch, it will have the patch file's name as a tag
if not patchname:
patchtags = [
t
for t in repo.nodetags(binnode)
if t.endswith(b'.patch') or t.endswith(b'.diff')
]
if patchtags:
patchname = patchtags[0]
elif total > 1:
patchname = cmdutil.makefilename(
repo[node], b'%b-%n.patch', seqno=idx, total=total
)
else:
patchname = cmdutil.makefilename(repo[node], b'%b.patch')
disposition = r'inline'
if opts.get(b'attach'):
disposition = r'attachment'
p['Content-Disposition'] = (
disposition + '; filename=' + encoding.strfromlocal(patchname)
)
msg.attach(p)
else:
msg = mail.mimetextpatch(body, display=opts.get(b'test'))
prefix = _formatprefix(
ui, repo, rev, opts.get(b'flag'), idx, total, numbered
)
subj = desc[0].strip().rstrip(b'. ')
if not numbered:
subj = b' '.join([prefix, opts.get(b'subject') or subj])
else:
subj = b' '.join([prefix, subj])
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(b'test'))
msg['X-Mercurial-Node'] = pycompat.sysstr(node)
msg['X-Mercurial-Series-Index'] = '%i' % idx
msg['X-Mercurial-Series-Total'] = '%i' % total
return msg, subj, ds
def _getpatches(repo, revs, **opts):
"""return a list of patches for a list of revisions
Each patch in the list is itself a list of lines.
"""
ui = repo.ui
prev = repo[b'.'].rev()
for r in revs:
if r == prev and (repo[None].files() or repo[None].deleted()):
ui.warn(_(b'warning: working directory has uncommitted changes\n'))
output = stringio()
cmdutil.exportfile(
repo, [r], output, opts=patch.difffeatureopts(ui, opts, git=True)
)
yield output.getvalue().split(b'\n')
def _getbundle(repo, dest, **opts):
"""return a bundle containing changesets missing in "dest"
The `opts` keyword-arguments are the same as the one accepted by the
`bundle` command.
The bundle is a returned as a single in-memory binary blob.
"""
ui = repo.ui
tmpdir = pycompat.mkdtemp(prefix=b'hg-email-bundle-')
tmpfn = os.path.join(tmpdir, b'bundle')
btype = ui.config(b'patchbomb', b'bundletype')
if btype:
opts['type'] = btype
try:
commands.bundle(ui, repo, tmpfn, dest, **opts)
return util.readfile(tmpfn)
finally:
try:
os.unlink(tmpfn)
except OSError:
pass
os.rmdir(tmpdir)
def _getdescription(repo, defaultbody, sender, **opts):
"""obtain the body of the introduction message and return it
This is also used for the body of email with an attached bundle.
The body can be obtained either from the command line option or entered by
the user through the editor.
"""
ui = repo.ui
if opts.get('desc'):
body = open(opts.get('desc')).read()
else:
ui.write(
_(b'\nWrite the introductory message for the patch series.\n\n')
)
body = ui.edit(
defaultbody, sender, repopath=repo.path, action=b'patchbombbody'
)
# Save series description in case sendmail fails
msgfile = repo.vfs(b'last-email.txt', b'wb')
msgfile.write(body)
msgfile.close()
return body
def _getbundlemsgs(repo, sender, bundle, **opts):
"""Get the full email for sending a given bundle
This function returns a list of "email" tuples (subject, content, None).
The list is always one message long in that case.
"""
ui = repo.ui
_charsets = mail._charsets(ui)
subj = opts.get('subject') or prompt(
ui, b'Subject:', b'A bundle for your repository'
)
body = _getdescription(repo, b'', sender, **opts)
msg = emimemultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle')
datapart.set_payload(bundle)
bundlename = b'%s.hg' % opts.get('bundlename', b'bundle')
datapart.add_header(
'Content-Disposition',
'attachment',
filename=encoding.strfromlocal(bundlename),
)
emailencoders.encode_base64(datapart)
msg.attach(datapart)
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
return [(msg, subj, None)]
def _makeintro(repo, sender, revs, patches, **opts):
"""make an introduction email, asking the user for content if needed
email is returned as (subject, body, cumulative-diffstat)"""
ui = repo.ui
_charsets = mail._charsets(ui)
# use the last revision which is likely to be a bookmarked head
prefix = _formatprefix(
ui, repo, revs.last(), opts.get('flag'), 0, len(patches), numbered=True
)
subj = opts.get('subject') or prompt(
ui, b'(optional) Subject: ', rest=prefix, default=b''
)
if not subj:
return None # skip intro if the user doesn't bother
subj = prefix + b' ' + subj
body = b''
if opts.get('diffstat'):
# generate a cumulative diffstat of the whole patch series
diffstat = patch.diffstat(sum(patches, []))
body = b'\n' + diffstat
else:
diffstat = None
body = _getdescription(repo, body, sender, **opts)
msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
return (msg, subj, diffstat)
def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
"""return a list of emails from a list of patches
This involves introduction message creation if necessary.
This function returns a list of "email" tuples (subject, content, None).
"""
bytesopts = pycompat.byteskwargs(opts)
ui = repo.ui
_charsets = mail._charsets(ui)
patches = list(_getpatches(repo, revs, **opts))
msgs = []
ui.write(_(b'this patch series consists of %d patches.\n\n') % len(patches))
# build the intro message, or skip it if the user declines
if introwanted(ui, bytesopts, len(patches)):
msg = _makeintro(repo, sender, revs, patches, **opts)
if msg:
msgs.append(msg)
# are we going to send more than one message?
numbered = len(msgs) + len(patches) > 1
# now generate the actual patch messages
name = None
assert len(revs) == len(patches)
for i, (r, p) in enumerate(zip(revs, patches)):
if patchnames:
name = patchnames[i]
msg = makepatch(
ui,
repo,
r,
p,
bytesopts,
_charsets,
i + 1,
len(patches),
numbered,
name,
)
msgs.append(msg)
return msgs
def _getoutgoing(repo, dest, revs):
'''Return the revisions present locally but not in dest'''
ui = repo.ui
url = ui.expandpath(dest or b'default-push', dest or b'default')
url = hg.parseurl(url)[0]
ui.status(_(b'comparing with %s\n') % util.hidepassword(url))
revs = [r for r in revs if r >= 0]
if not revs:
revs = [repo.changelog.tiprev()]
revs = repo.revs(b'outgoing(%s) and ::%ld', dest or b'', revs)
if not revs:
ui.status(_(b"no changes found\n"))
return revs
def _msgid(node, timestamp):
try:
hostname = encoding.strfromlocal(encoding.environ[b'HGHOSTNAME'])
except KeyError:
hostname = socket.getfqdn()
return '<%s.%d@%s>' % (node, timestamp, hostname)
emailopts = [
(b'', b'body', None, _(b'send patches as inline message text (default)')),
(b'a', b'attach', None, _(b'send patches as attachments')),
(b'i', b'inline', None, _(b'send patches as inline attachments')),
(
b'',
b'bcc',
[],
_(b'email addresses of blind carbon copy recipients'),
_(b'EMAIL'),
),
(b'c', b'cc', [], _(b'email addresses of copy recipients'), _(b'EMAIL')),
(b'', b'confirm', None, _(b'ask for confirmation before sending')),
(b'd', b'diffstat', None, _(b'add diffstat output to messages')),
(
b'',
b'date',
b'',
_(b'use the given date as the sending date'),
_(b'DATE'),
),
(
b'',
b'desc',
b'',
_(b'use the given file as the series description'),
_(b'FILE'),
),
(b'f', b'from', b'', _(b'email address of sender'), _(b'EMAIL')),
(b'n', b'test', None, _(b'print messages that would be sent')),
(
b'm',
b'mbox',
b'',
_(b'write messages to mbox file instead of sending them'),
_(b'FILE'),
),
(
b'',
b'reply-to',
[],
_(b'email addresses replies should be sent to'),
_(b'EMAIL'),
),
(
b's',
b'subject',
b'',
_(b'subject of first message (intro or single patch)'),
_(b'TEXT'),
),
(
b'',
b'in-reply-to',
b'',
_(b'message identifier to reply to'),
_(b'MSGID'),
),
(b'', b'flag', [], _(b'flags to add in subject prefixes'), _(b'FLAG')),
(b't', b'to', [], _(b'email addresses of recipients'), _(b'EMAIL')),
]
@command(
b'email',
[
(b'g', b'git', None, _(b'use git extended diff format')),
(b'', b'plain', None, _(b'omit hg patch header')),
(
b'o',
b'outgoing',
None,
_(b'send changes not found in the target repository'),
),
(
b'b',
b'bundle',
None,
_(b'send changes not in target as a binary bundle'),
),
(
b'B',
b'bookmark',
b'',
_(b'send changes only reachable by given bookmark'),
_(b'BOOKMARK'),
),
(
b'',
b'bundlename',
b'bundle',
_(b'name of the bundle attachment file'),
_(b'NAME'),
),
(b'r', b'rev', [], _(b'a revision to send'), _(b'REV')),
(
b'',
b'force',
None,
_(
b'run even when remote repository is unrelated '
b'(with -b/--bundle)'
),
),
(
b'',
b'base',
[],
_(
b'a base changeset to specify instead of a destination '
b'(with -b/--bundle)'
),
_(b'REV'),
),
(
b'',
b'intro',
None,
_(b'send an introduction email for a single patch'),
),
]
+ emailopts
+ cmdutil.remoteopts,
_(b'hg email [OPTION]... [DEST]...'),
helpcategory=command.CATEGORY_IMPORT_EXPORT,
)
def email(ui, repo, *revs, **opts):
"""send changesets by email
By default, diffs are sent in the format generated by
:hg:`export`, one per message. The series starts with a "[PATCH 0
of N]" introduction, which describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using
the first line of the changeset description as the subject text.
The message contains two or three parts. First, the changeset
description.
With the -d/--diffstat option, if the diffstat program is
installed, the result of running diffstat on the patch is inserted.
Finally, the patch itself, as generated by :hg:`export`.
With the -d/--diffstat or --confirm options, you will be presented
with a final summary of all messages and asked for confirmation before
the messages are sent.
By default the patch is included as text in the email body for
easy reviewing. Using the -a/--attach option will instead create
an attachment for the patch. With -i/--inline an inline attachment
will be created. You can include a patch both as text in the email
body and as a regular or an inline attachment by combining the
-a/--attach or -i/--inline with the --body option.
With -B/--bookmark changesets reachable by the given bookmark are
selected.
With -o/--outgoing, emails will be generated for patches not found
in the destination repository (or only those which are ancestors
of the specified revisions if any are provided)
With -b/--bundle, changesets are selected as for --outgoing, but a
single email containing a binary Mercurial bundle as an attachment
will be sent. Use the ``patchbomb.bundletype`` config option to
control the bundle type as with :hg:`bundle --type`.
With -m/--mbox, instead of previewing each patchbomb message in a
pager or sending the messages directly, it will create a UNIX
mailbox file with the patch emails. This mailbox file can be
previewed with any mail user agent which supports UNIX mbox
files.
With -n/--test, all steps will run, but mail will not be sent.
You will be prompted for an email recipient address, a subject and
an introductory message describing the patches of your patchbomb.
Then when all is done, patchbomb messages are displayed.
In case email sending fails, you will find a backup of your series
introductory message in ``.hg/last-email.txt``.
The default behavior of this command can be customized through
configuration. (See :hg:`help patchbomb` for details)
Examples::
hg email -r 3000 # send patch 3000 only
hg email -r 3000 -r 3001 # send patches 3000 and 3001
hg email -r 3000:3005 # send patches 3000 through 3005
hg email 3000 # send patch 3000 (deprecated)
hg email -o # send all patches not in default
hg email -o DEST # send all patches not in DEST
hg email -o -r 3000 # send all ancestors of 3000 not in default
hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
hg email -B feature # send all ancestors of feature bookmark
hg email -b # send bundle of all patches not in default
hg email -b DEST # send bundle of all patches not in DEST
hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
hg email -o -m mbox && # generate an mbox file...
mutt -R -f mbox # ... and view it with mutt
hg email -o -m mbox && # generate an mbox file ...
formail -s sendmail \\ # ... and use formail to send from the mbox
-bm -t < mbox # ... using sendmail
Before using this command, you will need to enable email in your
hgrc. See the [email] section in hgrc(5) for details.
"""
opts = pycompat.byteskwargs(opts)
_charsets = mail._charsets(ui)
bundle = opts.get(b'bundle')
date = opts.get(b'date')
mbox = opts.get(b'mbox')
outgoing = opts.get(b'outgoing')
rev = opts.get(b'rev')
bookmark = opts.get(b'bookmark')
if not (opts.get(b'test') or mbox):
# really sending
mail.validateconfig(ui)
if not (revs or rev or outgoing or bundle or bookmark):
raise error.Abort(
_(b'specify at least one changeset with -B, -r or -o')
)
if outgoing and bundle:
raise error.Abort(
_(
b"--outgoing mode always on with --bundle;"
b" do not re-specify --outgoing"
)
)
cmdutil.check_at_most_one_arg(opts, b'rev', b'bookmark')
if outgoing or bundle:
if len(revs) > 1:
raise error.Abort(_(b"too many destinations"))
if revs:
dest = revs[0]
else:
dest = None
revs = []
if rev:
if revs:
raise error.Abort(_(b'use only one form to specify the revision'))
revs = rev
elif bookmark:
if bookmark not in repo._bookmarks:
raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
revs = scmutil.bookmarkrevs(repo, bookmark)
revs = scmutil.revrange(repo, revs)
if outgoing:
revs = _getoutgoing(repo, dest, revs)
if bundle:
opts[b'revs'] = [b"%d" % r for r in revs]
# check if revision exist on the public destination
publicurl = repo.ui.config(b'patchbomb', b'publicurl')
if publicurl:
repo.ui.debug(b'checking that revision exist in the public repo\n')
try:
publicpeer = hg.peer(repo, {}, publicurl)
except error.RepoError:
repo.ui.write_err(
_(b'unable to access public repo: %s\n') % publicurl
)
raise
if not publicpeer.capable(b'known'):
repo.ui.debug(b'skipping existence checks: public repo too old\n')
else:
out = [repo[r] for r in revs]
known = publicpeer.known(h.node() for h in out)
missing = []
for idx, h in enumerate(out):
if not known[idx]:
missing.append(h)
if missing:
if len(missing) > 1:
msg = _(b'public "%s" is missing %s and %i others')
msg %= (publicurl, missing[0], len(missing) - 1)
else:
msg = _(b'public url %s is missing %s')
msg %= (publicurl, missing[0])
missingrevs = [ctx.rev() for ctx in missing]
revhint = b' '.join(
b'-r %s' % h for h in repo.set(b'heads(%ld)', missingrevs)
)
hint = _(b"use 'hg push %s %s'") % (publicurl, revhint)
raise error.Abort(msg, hint=hint)
# start
if date:
start_time = dateutil.parsedate(date)
else:
start_time = dateutil.makedate()
def genmsgid(id):
return _msgid(id[:20], int(start_time[0]))
# deprecated config: patchbomb.from
sender = (
opts.get(b'from')
or ui.config(b'email', b'from')
or ui.config(b'patchbomb', b'from')
or prompt(ui, b'From', ui.username())
)
if bundle:
stropts = pycompat.strkwargs(opts)
bundledata = _getbundle(repo, dest, **stropts)
bundleopts = stropts.copy()
bundleopts.pop('bundle', None) # already processed
msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
else:
msgs = _getpatchmsgs(repo, sender, revs, **pycompat.strkwargs(opts))
showaddrs = []
def getaddrs(header, ask=False, default=None):
configkey = header.lower()
opt = header.replace(b'-', b'_').lower()
addrs = opts.get(opt)
if addrs:
showaddrs.append(b'%s: %s' % (header, b', '.join(addrs)))
return mail.addrlistencode(ui, addrs, _charsets, opts.get(b'test'))
# not on the command line: fallback to config and then maybe ask
addr = ui.config(b'email', configkey) or ui.config(
b'patchbomb', configkey
)
if not addr:
specified = ui.hasconfig(b'email', configkey) or ui.hasconfig(
b'patchbomb', configkey
)
if not specified and ask:
addr = prompt(ui, header, default=default)
if addr:
showaddrs.append(b'%s: %s' % (header, addr))
return mail.addrlistencode(ui, [addr], _charsets, opts.get(b'test'))
elif default:
return mail.addrlistencode(
ui, [default], _charsets, opts.get(b'test')
)
return []
to = getaddrs(b'To', ask=True)
if not to:
# we can get here in non-interactive mode
raise error.Abort(_(b'no recipient addresses provided'))
cc = getaddrs(b'Cc', ask=True, default=b'')
bcc = getaddrs(b'Bcc')
replyto = getaddrs(b'Reply-To')
confirm = ui.configbool(b'patchbomb', b'confirm')
confirm |= bool(opts.get(b'diffstat') or opts.get(b'confirm'))
if confirm:
ui.write(_(b'\nFinal summary:\n\n'), label=b'patchbomb.finalsummary')
ui.write((b'From: %s\n' % sender), label=b'patchbomb.from')
for addr in showaddrs:
ui.write(b'%s\n' % addr, label=b'patchbomb.to')
for m, subj, ds in msgs:
ui.write((b'Subject: %s\n' % subj), label=b'patchbomb.subject')
if ds:
ui.write(ds, label=b'patchbomb.diffstats')
ui.write(b'\n')
if ui.promptchoice(
_(b'are you sure you want to send (yn)?$$ &Yes $$ &No')
):
raise error.Abort(_(b'patchbomb canceled'))
ui.write(b'\n')
parent = opts.get(b'in_reply_to') or None
# angle brackets may be omitted, they're not semantically part of the msg-id
if parent is not None:
parent = encoding.strfromlocal(parent)
if not parent.startswith('<'):
parent = '<' + parent
if not parent.endswith('>'):
parent += '>'
sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1]
sender = mail.addressencode(ui, sender, _charsets, opts.get(b'test'))
sendmail = None
firstpatch = None
progress = ui.makeprogress(
_(b'sending'), unit=_(b'emails'), total=len(msgs)
)
for i, (m, subj, ds) in enumerate(msgs):
try:
m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
if not firstpatch:
firstpatch = m['Message-Id']
m['X-Mercurial-Series-Id'] = firstpatch
except TypeError:
m['Message-Id'] = genmsgid('patchbomb')
if parent:
m['In-Reply-To'] = parent
m['References'] = parent
if not parent or 'X-Mercurial-Node' not in m:
parent = m['Message-Id']
m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version().decode()
m['Date'] = eutil.formatdate(start_time[0], localtime=True)
start_time = (start_time[0] + 1, start_time[1])
m['From'] = sender
m['To'] = ', '.join(to)
if cc:
m['Cc'] = ', '.join(cc)
if bcc:
m['Bcc'] = ', '.join(bcc)
if replyto:
m['Reply-To'] = ', '.join(replyto)
if opts.get(b'test'):
ui.status(_(b'displaying '), subj, b' ...\n')
ui.pager(b'email')
generator = mail.Generator(ui, mangle_from_=False)
try:
generator.flatten(m, False)
ui.write(b'\n')
except IOError as inst:
if inst.errno != errno.EPIPE:
raise
else:
if not sendmail:
sendmail = mail.connect(ui, mbox=mbox)
ui.status(_(b'sending '), subj, b' ...\n')
progress.update(i, item=subj)
if not mbox:
# Exim does not remove the Bcc field
del m['Bcc']
fp = stringio()
generator = mail.Generator(fp, mangle_from_=False)
generator.flatten(m, False)
alldests = to + bcc + cc
sendmail(sender_addr, alldests, fp.getvalue())
progress.complete()
```
#### File: hgext/remotefilelog/basestore.py
```python
from __future__ import absolute_import
import errno
import os
import shutil
import stat
import time
from mercurial.i18n import _
from mercurial.node import bin, hex
from mercurial.pycompat import open
from mercurial import (
error,
pycompat,
util,
)
from mercurial.utils import hashutil
from . import (
constants,
shallowutil,
)
class basestore(object):
def __init__(self, repo, path, reponame, shared=False):
"""Creates a remotefilelog store object for the given repo name.
`path` - The file path where this store keeps its data
`reponame` - The name of the repo. This is used to partition data from
many repos.
`shared` - True if this store is a shared cache of data from the central
server, for many repos on this machine. False means this store is for
the local data for one repo.
"""
self.repo = repo
self.ui = repo.ui
self._path = path
self._reponame = reponame
self._shared = shared
self._uid = os.getuid() if not pycompat.iswindows else None
self._validatecachelog = self.ui.config(
b"remotefilelog", b"validatecachelog"
)
self._validatecache = self.ui.config(
b"remotefilelog", b"validatecache", b'on'
)
if self._validatecache not in (b'on', b'strict', b'off'):
self._validatecache = b'on'
if self._validatecache == b'off':
self._validatecache = False
if shared:
shallowutil.mkstickygroupdir(self.ui, path)
def getmissing(self, keys):
missing = []
for name, node in keys:
filepath = self._getfilepath(name, node)
exists = os.path.exists(filepath)
if (
exists
and self._validatecache == b'strict'
and not self._validatekey(filepath, b'contains')
):
exists = False
if not exists:
missing.append((name, node))
return missing
# BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE
def markledger(self, ledger, options=None):
if options and options.get(constants.OPTION_PACKSONLY):
return
if self._shared:
for filename, nodes in self._getfiles():
for node in nodes:
ledger.markdataentry(self, filename, node)
ledger.markhistoryentry(self, filename, node)
def cleanup(self, ledger):
ui = self.ui
entries = ledger.sources.get(self, [])
count = 0
progress = ui.makeprogress(
_(b"cleaning up"), unit=b"files", total=len(entries)
)
for entry in entries:
if entry.gced or (entry.datarepacked and entry.historyrepacked):
progress.update(count)
path = self._getfilepath(entry.filename, entry.node)
util.tryunlink(path)
count += 1
progress.complete()
# Clean up the repo cache directory.
self._cleanupdirectory(self._getrepocachepath())
# BELOW THIS ARE NON-STANDARD APIS
def _cleanupdirectory(self, rootdir):
"""Removes the empty directories and unnecessary files within the root
directory recursively. Note that this method does not remove the root
directory itself."""
oldfiles = set()
otherfiles = set()
# osutil.listdir returns stat information which saves some rmdir/listdir
# syscalls.
for name, mode in util.osutil.listdir(rootdir):
if stat.S_ISDIR(mode):
dirpath = os.path.join(rootdir, name)
self._cleanupdirectory(dirpath)
# Now that the directory specified by dirpath is potentially
# empty, try and remove it.
try:
os.rmdir(dirpath)
except OSError:
pass
elif stat.S_ISREG(mode):
if name.endswith(b'_old'):
oldfiles.add(name[:-4])
else:
otherfiles.add(name)
# Remove the files which end with suffix '_old' and have no
# corresponding file without the suffix '_old'. See addremotefilelognode
# method for the generation/purpose of files with '_old' suffix.
for filename in oldfiles - otherfiles:
filepath = os.path.join(rootdir, filename + b'_old')
util.tryunlink(filepath)
def _getfiles(self):
"""Return a list of (filename, [node,...]) for all the revisions that
exist in the store.
This is useful for obtaining a list of all the contents of the store
when performing a repack to another store, since the store API requires
name+node keys and not namehash+node keys.
"""
existing = {}
for filenamehash, node in self._listkeys():
existing.setdefault(filenamehash, []).append(node)
filenamemap = self._resolvefilenames(existing.keys())
for filename, sha in pycompat.iteritems(filenamemap):
yield (filename, existing[sha])
def _resolvefilenames(self, hashes):
"""Given a list of filename hashes that are present in the
remotefilelog store, return a mapping from filename->hash.
This is useful when converting remotefilelog blobs into other storage
formats.
"""
if not hashes:
return {}
filenames = {}
missingfilename = set(hashes)
# Start with a full manifest, since it'll cover the majority of files
for filename in self.repo[b'tip'].manifest():
sha = hashutil.sha1(filename).digest()
if sha in missingfilename:
filenames[filename] = sha
missingfilename.discard(sha)
# Scan the changelog until we've found every file name
cl = self.repo.unfiltered().changelog
for rev in pycompat.xrange(len(cl) - 1, -1, -1):
if not missingfilename:
break
files = cl.readfiles(cl.node(rev))
for filename in files:
sha = hashutil.sha1(filename).digest()
if sha in missingfilename:
filenames[filename] = sha
missingfilename.discard(sha)
return filenames
def _getrepocachepath(self):
return (
os.path.join(self._path, self._reponame)
if self._shared
else self._path
)
def _listkeys(self):
"""List all the remotefilelog keys that exist in the store.
Returns a iterator of (filename hash, filecontent hash) tuples.
"""
for root, dirs, files in os.walk(self._getrepocachepath()):
for filename in files:
if len(filename) != 40:
continue
node = filename
if self._shared:
# .../1a/85ffda..be21
filenamehash = root[-41:-39] + root[-38:]
else:
filenamehash = root[-40:]
yield (bin(filenamehash), bin(node))
def _getfilepath(self, name, node):
node = hex(node)
if self._shared:
key = shallowutil.getcachekey(self._reponame, name, node)
else:
key = shallowutil.getlocalkey(name, node)
return os.path.join(self._path, key)
def _getdata(self, name, node):
filepath = self._getfilepath(name, node)
try:
data = shallowutil.readfile(filepath)
if self._validatecache and not self._validatedata(data, filepath):
if self._validatecachelog:
with open(self._validatecachelog, b'ab+') as f:
f.write(b"corrupt %s during read\n" % filepath)
os.rename(filepath, filepath + b".corrupt")
raise KeyError(b"corrupt local cache file %s" % filepath)
except IOError:
raise KeyError(
b"no file found at %s for %s:%s" % (filepath, name, hex(node))
)
return data
def addremotefilelognode(self, name, node, data):
filepath = self._getfilepath(name, node)
oldumask = os.umask(0o002)
try:
# if this node already exists, save the old version for
# recovery/debugging purposes.
if os.path.exists(filepath):
newfilename = filepath + b'_old'
# newfilename can be read-only and shutil.copy will fail.
# Delete newfilename to avoid it
if os.path.exists(newfilename):
shallowutil.unlinkfile(newfilename)
shutil.copy(filepath, newfilename)
shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath))
shallowutil.writefile(filepath, data, readonly=True)
if self._validatecache:
if not self._validatekey(filepath, b'write'):
raise error.Abort(
_(b"local cache write was corrupted %s") % filepath
)
finally:
os.umask(oldumask)
def markrepo(self, path):
"""Call this to add the given repo path to the store's list of
repositories that are using it. This is useful later when doing garbage
collection, since it allows us to insecpt the repos to see what nodes
they want to be kept alive in the store.
"""
repospath = os.path.join(self._path, b"repos")
with open(repospath, b'ab') as reposfile:
reposfile.write(os.path.dirname(path) + b"\n")
repospathstat = os.stat(repospath)
if repospathstat.st_uid == self._uid:
os.chmod(repospath, 0o0664)
def _validatekey(self, path, action):
with open(path, b'rb') as f:
data = f.read()
if self._validatedata(data, path):
return True
if self._validatecachelog:
with open(self._validatecachelog, b'ab+') as f:
f.write(b"corrupt %s during %s\n" % (path, action))
os.rename(path, path + b".corrupt")
return False
def _validatedata(self, data, path):
try:
if len(data) > 0:
# see remotefilelogserver.createfileblob for the format
offset, size, flags = shallowutil.parsesizeflags(data)
if len(data) <= size:
# it is truncated
return False
# extract the node from the metadata
offset += size
datanode = data[offset : offset + 20]
# and compare against the path
if os.path.basename(path) == hex(datanode):
# Content matches the intended path
return True
return False
except (ValueError, RuntimeError):
pass
return False
def gc(self, keepkeys):
ui = self.ui
cachepath = self._path
# prune cache
queue = pycompat.queue.PriorityQueue()
originalsize = 0
size = 0
count = 0
removed = 0
# keep files newer than a day even if they aren't needed
limit = time.time() - (60 * 60 * 24)
progress = ui.makeprogress(
_(b"removing unnecessary files"), unit=b"files"
)
progress.update(0)
for root, dirs, files in os.walk(cachepath):
for file in files:
if file == b'repos':
continue
# Don't delete pack files
if b'/packs/' in root:
continue
progress.update(count)
path = os.path.join(root, file)
key = os.path.relpath(path, cachepath)
count += 1
try:
pathstat = os.stat(path)
except OSError as e:
# errno.ENOENT = no such file or directory
if e.errno != errno.ENOENT:
raise
msg = _(
b"warning: file %s was removed by another process\n"
)
ui.warn(msg % path)
continue
originalsize += pathstat.st_size
if key in keepkeys or pathstat.st_atime > limit:
queue.put((pathstat.st_atime, path, pathstat))
size += pathstat.st_size
else:
try:
shallowutil.unlinkfile(path)
except OSError as e:
# errno.ENOENT = no such file or directory
if e.errno != errno.ENOENT:
raise
msg = _(
b"warning: file %s was removed by another "
b"process\n"
)
ui.warn(msg % path)
continue
removed += 1
progress.complete()
# remove oldest files until under limit
limit = ui.configbytes(b"remotefilelog", b"cachelimit")
if size > limit:
excess = size - limit
progress = ui.makeprogress(
_(b"enforcing cache limit"), unit=b"bytes", total=excess
)
removedexcess = 0
while queue and size > limit and size > 0:
progress.update(removedexcess)
atime, oldpath, oldpathstat = queue.get()
try:
shallowutil.unlinkfile(oldpath)
except OSError as e:
# errno.ENOENT = no such file or directory
if e.errno != errno.ENOENT:
raise
msg = _(
b"warning: file %s was removed by another process\n"
)
ui.warn(msg % oldpath)
size -= oldpathstat.st_size
removed += 1
removedexcess += oldpathstat.st_size
progress.complete()
ui.status(
_(b"finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
% (
removed,
count,
float(originalsize) / 1024.0 / 1024.0 / 1024.0,
float(size) / 1024.0 / 1024.0 / 1024.0,
)
)
class baseunionstore(object):
def __init__(self, *args, **kwargs):
# If one of the functions that iterates all of the stores is about to
# throw a KeyError, try this many times with a full refresh between
# attempts. A repack operation may have moved data from one store to
# another while we were running.
self.numattempts = kwargs.get('numretries', 0) + 1
# If not-None, call this function on every retry and if the attempts are
# exhausted.
self.retrylog = kwargs.get('retrylog', None)
def markforrefresh(self):
for store in self.stores:
if util.safehasattr(store, b'markforrefresh'):
store.markforrefresh()
@staticmethod
def retriable(fn):
def noop(*args):
pass
def wrapped(self, *args, **kwargs):
retrylog = self.retrylog or noop
funcname = fn.__name__
i = 0
while i < self.numattempts:
if i > 0:
retrylog(
b're-attempting (n=%d) %s\n'
% (i, pycompat.sysbytes(funcname))
)
self.markforrefresh()
i += 1
try:
return fn(self, *args, **kwargs)
except KeyError:
if i == self.numattempts:
# retries exhausted
retrylog(
b'retries exhausted in %s, raising KeyError\n'
% pycompat.sysbytes(funcname)
)
raise
return wrapped
```
#### File: hgext/remotefilelog/shallowbundle.py
```python
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial.node import bin, hex, nullid
from mercurial import (
bundlerepo,
changegroup,
error,
match,
mdiff,
pycompat,
)
from . import (
constants,
remotefilelog,
shallowutil,
)
NoFiles = 0
LocalFiles = 1
AllFiles = 2
def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
if not isinstance(rlog, remotefilelog.remotefilelog):
for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
yield c
return
if len(nodelist) == 0:
yield self.close()
return
nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
# add the parent of the first rev
p = rlog.parents(nodelist[0])[0]
nodelist.insert(0, p)
# build deltas
for i in pycompat.xrange(len(nodelist) - 1):
prev, curr = nodelist[i], nodelist[i + 1]
linknode = lookup(curr)
for c in self.nodechunk(rlog, curr, prev, linknode):
yield c
yield self.close()
class shallowcg1packer(changegroup.cgpacker):
def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs):
if shallowutil.isenabled(self._repo):
fastpathlinkrev = False
return super(shallowcg1packer, self).generate(
commonrevs, clnodes, fastpathlinkrev, source, **kwargs
)
def group(self, nodelist, rlog, lookup, units=None, reorder=None):
return shallowgroup(
shallowcg1packer, self, nodelist, rlog, lookup, units=units
)
def generatefiles(self, changedfiles, *args):
try:
linknodes, commonrevs, source = args
except ValueError:
commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
if shallowutil.isenabled(self._repo):
repo = self._repo
if isinstance(repo, bundlerepo.bundlerepository):
# If the bundle contains filelogs, we can't pull from it, since
# bundlerepo is heavily tied to revlogs. Instead require that
# the user use unbundle instead.
# Force load the filelog data.
bundlerepo.bundlerepository.file(repo, b'foo')
if repo._cgfilespos:
raise error.Abort(
b"cannot pull from full bundles",
hint=b"use `hg unbundle` instead",
)
return []
filestosend = self.shouldaddfilegroups(source)
if filestosend == NoFiles:
changedfiles = list(
[f for f in changedfiles if not repo.shallowmatch(f)]
)
return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
def shouldaddfilegroups(self, source):
repo = self._repo
if not shallowutil.isenabled(repo):
return AllFiles
if source == b"push" or source == b"bundle":
return AllFiles
caps = self._bundlecaps or []
if source == b"serve" or source == b"pull":
if constants.BUNDLE2_CAPABLITY in caps:
return LocalFiles
else:
# Serving to a full repo requires us to serve everything
repo.ui.warn(_(b"pulling from a shallow repo\n"))
return AllFiles
return NoFiles
def prune(self, rlog, missing, commonrevs):
if not isinstance(rlog, remotefilelog.remotefilelog):
return super(shallowcg1packer, self).prune(
rlog, missing, commonrevs
)
repo = self._repo
results = []
for fnode in missing:
fctx = repo.filectx(rlog.filename, fileid=fnode)
if fctx.linkrev() not in commonrevs:
results.append(fnode)
return results
def nodechunk(self, revlog, node, prevnode, linknode):
prefix = b''
if prevnode == nullid:
delta = revlog.rawdata(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
# Actually uses remotefilelog.revdiff which works on nodes, not revs
delta = revlog.revdiff(prevnode, node)
p1, p2 = revlog.parents(node)
flags = revlog.flags(node)
meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
meta += prefix
l = len(meta) + len(delta)
yield changegroup.chunkheader(l)
yield meta
yield delta
def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
if not shallowutil.isenabled(repo):
return orig(repo, outgoing, version, source, *args, **kwargs)
original = repo.shallowmatch
try:
# if serving, only send files the clients has patterns for
if source == b'serve':
bundlecaps = kwargs.get('bundlecaps')
includepattern = None
excludepattern = None
for cap in bundlecaps or []:
if cap.startswith(b"includepattern="):
raw = cap[len(b"includepattern=") :]
if raw:
includepattern = raw.split(b'\0')
elif cap.startswith(b"excludepattern="):
raw = cap[len(b"excludepattern=") :]
if raw:
excludepattern = raw.split(b'\0')
if includepattern or excludepattern:
repo.shallowmatch = match.match(
repo.root, b'', None, includepattern, excludepattern
)
else:
repo.shallowmatch = match.always()
return orig(repo, outgoing, version, source, *args, **kwargs)
finally:
repo.shallowmatch = original
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
if not shallowutil.isenabled(repo):
return orig(repo, source, revmap, trp, expectedfiles, *args)
newfiles = 0
visited = set()
revisiondatas = {}
queue = []
# Normal Mercurial processes each file one at a time, adding all
# the new revisions for that file at once. In remotefilelog a file
# revision may depend on a different file's revision (in the case
# of a rename/copy), so we must lay all revisions down across all
# files in topological order.
# read all the file chunks but don't add them
progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
while True:
chunkdata = source.filelogheader()
if not chunkdata:
break
f = chunkdata[b"filename"]
repo.ui.debug(b"adding %s revisions\n" % f)
progress.increment()
if not repo.shallowmatch(f):
fl = repo.file(f)
deltas = source.deltaiter()
fl.addgroup(deltas, revmap, trp)
continue
chain = None
while True:
# returns: (node, p1, p2, cs, deltabase, delta, flags) or None
revisiondata = source.deltachunk(chain)
if not revisiondata:
break
chain = revisiondata[0]
revisiondatas[(f, chain)] = revisiondata
queue.append((f, chain))
if f not in visited:
newfiles += 1
visited.add(f)
if chain is None:
raise error.Abort(_(b"received file revlog group is empty"))
processed = set()
def available(f, node, depf, depnode):
if depnode != nullid and (depf, depnode) not in processed:
if not (depf, depnode) in revisiondatas:
# It's not in the changegroup, assume it's already
# in the repo
return True
# re-add self to queue
queue.insert(0, (f, node))
# add dependency in front
queue.insert(0, (depf, depnode))
return False
return True
skipcount = 0
# Prefetch the non-bundled revisions that we will need
prefetchfiles = []
for f, node in queue:
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
for dependent in dependents:
if dependent == nullid or (f, dependent) in revisiondatas:
continue
prefetchfiles.append((f, hex(dependent)))
repo.fileservice.prefetch(prefetchfiles)
# Apply the revisions in topological order such that a revision
# is only written once it's deltabase and parents have been written.
while queue:
f, node = queue.pop(0)
if (f, node) in processed:
continue
skipcount += 1
if skipcount > len(queue) + 1:
raise error.Abort(_(b"circular node dependency"))
fl = repo.file(f)
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
node, p1, p2, linknode, deltabase, delta, flags = revisiondata
if not available(f, node, f, deltabase):
continue
base = fl.rawdata(deltabase)
text = mdiff.patch(base, delta)
if not isinstance(text, bytes):
text = bytes(text)
meta, text = shallowutil.parsemeta(text)
if b'copy' in meta:
copyfrom = meta[b'copy']
copynode = bin(meta[b'copyrev'])
if not available(f, node, copyfrom, copynode):
continue
for p in [p1, p2]:
if p != nullid:
if not available(f, node, f, p):
continue
fl.add(text, meta, trp, linknode, p1, p2)
processed.add((f, node))
skipcount = 0
progress.complete()
return len(revisiondatas), newfiles
```
#### File: hgext/remotefilelog/shallowverifier.py
```python
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial import verify
class shallowverifier(verify.verifier):
def _verifyfiles(self, filenodes, filelinkrevs):
"""Skips files verification since repo's not guaranteed to have them"""
self.repo.ui.status(
_(b"skipping filelog check since remotefilelog is used\n")
)
return 0, 0
```
#### File: site-packages/icmplib/traceroute.py
```python
from time import sleep
from .sockets import ICMPv4Socket, ICMPv6Socket
from .models import ICMPRequest, Hop
from .exceptions import *
from .utils import PID, resolve, is_ipv6_address
def traceroute(address, count=2, interval=0.05, timeout=2, id=PID,
first_hop=1, max_hops=30, source=None, fast=False, **kwargs):
'''
Determine the route to a destination host.
The Internet is a large and complex aggregation of network hardware,
connected together by gateways. Tracking the route one's packets
follow can be difficult. This function uses the IP protocol time to
live field and attempts to elicit an ICMP Time Exceeded response
from each gateway along the path to some host.
This function requires root privileges to run.
:type address: str
:param address: The IP address, hostname or FQDN of the host to
reach. For deterministic behavior, prefer to use an IP address.
:type count: int, optional
:param count: The number of ping to perform per hop. Default to 2.
:type interval: int or float, optional
:param interval: The interval in seconds between sending each
packet. Default to 0.05.
:type timeout: int or float, optional
:param timeout: The maximum waiting time for receiving a reply in
seconds. Default to 2.
:type id: int, optional
:param id: The identifier of ICMP requests. Used to match the
responses with requests. In practice, a unique identifier
should be used for every traceroute process. By default, the
identifier corresponds to the PID.
:type first_hop: int, optional
:param first_hop: The initial time to live value used in outgoing
probe packets. Default to 1.
:type max_hops: int, optional
:param max_hops: The maximum time to live (max number of hops) used
in outgoing probe packets. Default to 30.
:type source: str, optional
:param source: The IP address from which you want to send packets.
By default, the interface is automatically chosen according to
the specified destination.
:type fast: bool, optional
:param fast: When this option is enabled and an intermediate router
has been reached, skip to the next hop rather than perform
additional requests. The `count` parameter then becomes the
maximum number of requests in the event of no response.
Default to False.
Advanced (**kwags):
:type payload: bytes, optional
:param payload: The payload content in bytes. A random payload is
used by default.
:type payload_size: int, optional
:param payload_size: The payload size. Ignored when the `payload`
parameter is set. Default to 56.
:type traffic_class: int, optional
:param traffic_class: The traffic class of ICMP packets.
Provides a defined level of service to packets by setting the
DS Field (formerly TOS) or the Traffic Class field of IP
headers. Packets are delivered with the minimum priority by
default (Best-effort delivery).
Intermediate routers must be able to support this feature.
Only available on Unix systems. Ignored on Windows.
:rtype: list of Hop
:returns: A list of `Hop` objects representing the route to the
desired destination. The list is sorted in ascending order
according to the distance, in terms of hops, that separates the
remote host from the current machine. Gateways that do not
respond to requests are not added to this list.
:raises NameLookupError: If you pass a hostname or FQDN in
parameters and it does not exist or cannot be resolved.
:raises SocketPermissionError: If the privileges are insufficient
to create the socket.
:raises SocketAddressError: If the source address cannot be
assigned to the socket.
:raises ICMPSocketError: If another error occurs. See the
`ICMPv4Socket` or `ICMPv6Socket` class for details.
Usage::
>>> from icmplib import traceroute
>>> hops = traceroute('1.1.1.1')
>>> last_distance = 0
>>> for hop in hops:
... if last_distance + 1 != hop.distance:
... print('Some gateways are not responding')
...
... print(f'{hop.distance} {hop.address} {hop.avg_rtt} ms')
...
... last_distance = hop.distance
...
1 10.0.0.1 5.196 ms
2 172.16.31.10 7.552 ms
3 172.16.31.10 12.21 ms
* Some gateways are not responding
5 192.168.3.11 22.15 ms
6 1.1.1.1 13.59 ms
See the `Hop` class for details.
'''
address = resolve(address)
if is_ipv6_address(address):
sock = ICMPv6Socket(source)
else:
sock = ICMPv4Socket(source)
ttl = first_hop
host_reached = False
hops = []
while not host_reached and ttl <= max_hops:
hop_address = None
packets_sent = 0
packets_received = 0
min_rtt = float('inf')
avg_rtt = 0.0
max_rtt = 0.0
for sequence in range(count):
request = ICMPRequest(
destination=address,
id=id,
sequence=sequence,
ttl=ttl,
**kwargs)
try:
sock.send(request)
packets_sent += 1
reply = sock.receive(request, timeout)
reply.raise_for_status()
host_reached = True
except TimeExceeded:
sleep(interval)
except ICMPLibError:
continue
hop_address = reply.source
packets_received += 1
round_trip_time = (reply.time - request.time) * 1000
avg_rtt += round_trip_time
min_rtt = min(round_trip_time, min_rtt)
max_rtt = max(round_trip_time, max_rtt)
if fast:
break
if packets_received:
avg_rtt /= packets_received
hop = Hop(
address=hop_address,
min_rtt=min_rtt,
avg_rtt=avg_rtt,
max_rtt=max_rtt,
packets_sent=packets_sent,
packets_received=packets_received,
distance=ttl)
hops.append(hop)
ttl += 1
sock.close()
return hops
```
#### File: site-packages/mercurial/bookmarks.py
```python
from __future__ import absolute_import
import errno
import struct
from .i18n import _
from .node import (
bin,
hex,
short,
wdirid,
)
from .pycompat import getattr
from . import (
encoding,
error,
obsutil,
pycompat,
scmutil,
txnutil,
util,
)
# label constants
# until 3.5, bookmarks.current was the advertised name, not
# bookmarks.active, so we must use both to avoid breaking old
# custom styles
activebookmarklabel = b'bookmarks.active bookmarks.current'
BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore'
def bookmarksinstore(repo):
return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
def bookmarksvfs(repo):
return repo.svfs if bookmarksinstore(repo) else repo.vfs
def _getbkfile(repo):
"""Hook so that extensions that mess with the store can hook bm storage.
For core, this just handles wether we should see pending
bookmarks or the committed ones. Other extensions (like share)
may need to tweak this behavior further.
"""
fp, pending = txnutil.trypending(
repo.root, bookmarksvfs(repo), b'bookmarks'
)
return fp
class bmstore(object):
r"""Storage for bookmarks.
This object should do all bookmark-related reads and writes, so
that it's fairly simple to replace the storage underlying
bookmarks without having to clone the logic surrounding
bookmarks. This type also should manage the active bookmark, if
any.
This particular bmstore implementation stores bookmarks as
{hash}\s{name}\n (the same format as localtags) in
.hg/bookmarks. The mapping is stored as {name: nodeid}.
"""
def __init__(self, repo):
self._repo = repo
self._refmap = refmap = {} # refspec: node
self._nodemap = nodemap = {} # node: sorted([refspec, ...])
self._clean = True
self._aclean = True
has_node = repo.changelog.index.has_node
tonode = bin # force local lookup
try:
with _getbkfile(repo) as bkfile:
for line in bkfile:
line = line.strip()
if not line:
continue
try:
sha, refspec = line.split(b' ', 1)
node = tonode(sha)
if has_node(node):
refspec = encoding.tolocal(refspec)
refmap[refspec] = node
nrefs = nodemap.get(node)
if nrefs is None:
nodemap[node] = [refspec]
else:
nrefs.append(refspec)
if nrefs[-2] > refspec:
# bookmarks weren't sorted before 4.5
nrefs.sort()
except (TypeError, ValueError):
# TypeError:
# - bin(...)
# ValueError:
# - node in nm, for non-20-bytes entry
# - split(...), for string without ' '
bookmarkspath = b'.hg/bookmarks'
if bookmarksinstore(repo):
bookmarkspath = b'.hg/store/bookmarks'
repo.ui.warn(
_(b'malformed line in %s: %r\n')
% (bookmarkspath, pycompat.bytestr(line))
)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
self._active = _readactive(repo, self)
@property
def active(self):
return self._active
@active.setter
def active(self, mark):
if mark is not None and mark not in self._refmap:
raise AssertionError(b'bookmark %s does not exist!' % mark)
self._active = mark
self._aclean = False
def __len__(self):
return len(self._refmap)
def __iter__(self):
return iter(self._refmap)
def iteritems(self):
return pycompat.iteritems(self._refmap)
def items(self):
return self._refmap.items()
# TODO: maybe rename to allnames()?
def keys(self):
return self._refmap.keys()
# TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
# could be self._nodemap.keys()
def values(self):
return self._refmap.values()
def __contains__(self, mark):
return mark in self._refmap
def __getitem__(self, mark):
return self._refmap[mark]
def get(self, mark, default=None):
return self._refmap.get(mark, default)
def _set(self, mark, node):
self._clean = False
if mark in self._refmap:
self._del(mark)
self._refmap[mark] = node
nrefs = self._nodemap.get(node)
if nrefs is None:
self._nodemap[node] = [mark]
else:
nrefs.append(mark)
nrefs.sort()
def _del(self, mark):
if mark not in self._refmap:
return
self._clean = False
node = self._refmap.pop(mark)
nrefs = self._nodemap[node]
if len(nrefs) == 1:
assert nrefs[0] == mark
del self._nodemap[node]
else:
nrefs.remove(mark)
def names(self, node):
"""Return a sorted list of bookmarks pointing to the specified node"""
return self._nodemap.get(node, [])
def applychanges(self, repo, tr, changes):
"""Apply a list of changes to bookmarks"""
bmchanges = tr.changes.get(b'bookmarks')
for name, node in changes:
old = self._refmap.get(name)
if node is None:
self._del(name)
else:
self._set(name, node)
if bmchanges is not None:
# if a previous value exist preserve the "initial" value
previous = bmchanges.get(name)
if previous is not None:
old = previous[0]
bmchanges[name] = (old, node)
self._recordchange(tr)
def _recordchange(self, tr):
"""record that bookmarks have been changed in a transaction
The transaction is then responsible for updating the file content."""
location = b'' if bookmarksinstore(self._repo) else b'plain'
tr.addfilegenerator(
b'bookmarks', (b'bookmarks',), self._write, location=location
)
tr.hookargs[b'bookmark_moved'] = b'1'
def _writerepo(self, repo):
"""Factored out for extensibility"""
rbm = repo._bookmarks
if rbm.active not in self._refmap:
rbm.active = None
rbm._writeactive()
if bookmarksinstore(repo):
vfs = repo.svfs
lock = repo.lock()
else:
vfs = repo.vfs
lock = repo.wlock()
with lock:
with vfs(b'bookmarks', b'w', atomictemp=True, checkambig=True) as f:
self._write(f)
def _writeactive(self):
if self._aclean:
return
with self._repo.wlock():
if self._active is not None:
with self._repo.vfs(
b'bookmarks.current', b'w', atomictemp=True, checkambig=True
) as f:
f.write(encoding.fromlocal(self._active))
else:
self._repo.vfs.tryunlink(b'bookmarks.current')
self._aclean = True
def _write(self, fp):
for name, node in sorted(pycompat.iteritems(self._refmap)):
fp.write(b"%s %s\n" % (hex(node), encoding.fromlocal(name)))
self._clean = True
self._repo.invalidatevolatilesets()
def expandname(self, bname):
if bname == b'.':
if self.active:
return self.active
else:
raise error.RepoLookupError(_(b"no active bookmark"))
return bname
def checkconflict(self, mark, force=False, target=None):
"""check repo for a potential clash of mark with an existing bookmark,
branch, or hash
If target is supplied, then check that we are moving the bookmark
forward.
If force is supplied, then forcibly move the bookmark to a new commit
regardless if it is a move forward.
If divergent bookmark are to be deleted, they will be returned as list.
"""
cur = self._repo[b'.'].node()
if mark in self._refmap and not force:
if target:
if self._refmap[mark] == target and target == cur:
# re-activating a bookmark
return []
rev = self._repo[target].rev()
anc = self._repo.changelog.ancestors([rev])
bmctx = self._repo[self[mark]]
divs = [
self._refmap[b]
for b in self._refmap
if b.split(b'@', 1)[0] == mark.split(b'@', 1)[0]
]
# allow resolving a single divergent bookmark even if moving
# the bookmark across branches when a revision is specified
# that contains a divergent bookmark
if bmctx.rev() not in anc and target in divs:
return divergent2delete(self._repo, [target], mark)
deletefrom = [
b for b in divs if self._repo[b].rev() in anc or b == target
]
delbms = divergent2delete(self._repo, deletefrom, mark)
if validdest(self._repo, bmctx, self._repo[target]):
self._repo.ui.status(
_(b"moving bookmark '%s' forward from %s\n")
% (mark, short(bmctx.node()))
)
return delbms
raise error.Abort(
_(b"bookmark '%s' already exists (use -f to force)") % mark
)
if (
mark in self._repo.branchmap()
or mark == self._repo.dirstate.branch()
) and not force:
raise error.Abort(
_(b"a bookmark cannot have the name of an existing branch")
)
if len(mark) > 3 and not force:
try:
shadowhash = scmutil.isrevsymbol(self._repo, mark)
except error.LookupError: # ambiguous identifier
shadowhash = False
if shadowhash:
self._repo.ui.warn(
_(
b"bookmark %s matches a changeset hash\n"
b"(did you leave a -r out of an 'hg bookmark' "
b"command?)\n"
)
% mark
)
return []
def _readactive(repo, marks):
"""
Get the active bookmark. We can have an active bookmark that updates
itself as we commit. This function returns the name of that bookmark.
It is stored in .hg/bookmarks.current
"""
# No readline() in osutil.posixfile, reading everything is
# cheap.
content = repo.vfs.tryread(b'bookmarks.current')
mark = encoding.tolocal((content.splitlines() or [b''])[0])
if mark == b'' or mark not in marks:
mark = None
return mark
def activate(repo, mark):
"""
Set the given bookmark to be 'active', meaning that this bookmark will
follow new commits that are made.
The name is recorded in .hg/bookmarks.current
"""
repo._bookmarks.active = mark
repo._bookmarks._writeactive()
def deactivate(repo):
"""
Unset the active bookmark in this repository.
"""
repo._bookmarks.active = None
repo._bookmarks._writeactive()
def isactivewdirparent(repo):
"""
Tell whether the 'active' bookmark (the one that follows new commits)
points to one of the parents of the current working directory (wdir).
While this is normally the case, it can on occasion be false; for example,
immediately after a pull, the active bookmark can be moved to point
to a place different than the wdir. This is solved by running `hg update`.
"""
mark = repo._activebookmark
marks = repo._bookmarks
parents = [p.node() for p in repo[None].parents()]
return mark in marks and marks[mark] in parents
def divergent2delete(repo, deletefrom, bm):
"""find divergent versions of bm on nodes in deletefrom.
the list of bookmark to delete."""
todelete = []
marks = repo._bookmarks
divergent = [
b for b in marks if b.split(b'@', 1)[0] == bm.split(b'@', 1)[0]
]
for mark in divergent:
if mark == b'@' or b'@' not in mark:
# can't be divergent by definition
continue
if mark and marks[mark] in deletefrom:
if mark != bm:
todelete.append(mark)
return todelete
def headsforactive(repo):
"""Given a repo with an active bookmark, return divergent bookmark nodes.
Args:
repo: A repository with an active bookmark.
Returns:
A list of binary node ids that is the full list of other
revisions with bookmarks divergent from the active bookmark. If
there were no divergent bookmarks, then this list will contain
only one entry.
"""
if not repo._activebookmark:
raise ValueError(
b'headsforactive() only makes sense with an active bookmark'
)
name = repo._activebookmark.split(b'@', 1)[0]
heads = []
for mark, n in pycompat.iteritems(repo._bookmarks):
if mark.split(b'@', 1)[0] == name:
heads.append(n)
return heads
def calculateupdate(ui, repo):
"""Return a tuple (activemark, movemarkfrom) indicating the active bookmark
and where to move the active bookmark from, if needed."""
checkout, movemarkfrom = None, None
activemark = repo._activebookmark
if isactivewdirparent(repo):
movemarkfrom = repo[b'.'].node()
elif activemark:
ui.status(_(b"updating to active bookmark %s\n") % activemark)
checkout = activemark
return (checkout, movemarkfrom)
def update(repo, parents, node):
deletefrom = parents
marks = repo._bookmarks
active = marks.active
if not active:
return False
bmchanges = []
if marks[active] in parents:
new = repo[node]
divs = [
repo[marks[b]]
for b in marks
if b.split(b'@', 1)[0] == active.split(b'@', 1)[0]
]
anc = repo.changelog.ancestors([new.rev()])
deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
if validdest(repo, repo[marks[active]], new):
bmchanges.append((active, new.node()))
for bm in divergent2delete(repo, deletefrom, active):
bmchanges.append((bm, None))
if bmchanges:
with repo.lock(), repo.transaction(b'bookmark') as tr:
marks.applychanges(repo, tr, bmchanges)
return bool(bmchanges)
def isdivergent(b):
return b'@' in b and not b.endswith(b'@')
def listbinbookmarks(repo):
# We may try to list bookmarks on a repo type that does not
# support it (e.g., statichttprepository).
marks = getattr(repo, '_bookmarks', {})
hasnode = repo.changelog.hasnode
for k, v in pycompat.iteritems(marks):
# don't expose local divergent bookmarks
if hasnode(v) and not isdivergent(k):
yield k, v
def listbookmarks(repo):
d = {}
for book, node in listbinbookmarks(repo):
d[book] = hex(node)
return d
def pushbookmark(repo, key, old, new):
if isdivergent(key):
return False
if bookmarksinstore(repo):
wlock = util.nullcontextmanager()
else:
wlock = repo.wlock()
with wlock, repo.lock(), repo.transaction(b'bookmarks') as tr:
marks = repo._bookmarks
existing = hex(marks.get(key, b''))
if existing != old and existing != new:
return False
if new == b'':
changes = [(key, None)]
else:
if new not in repo:
return False
changes = [(key, repo[new].node())]
marks.applychanges(repo, tr, changes)
return True
def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
"""Compare bookmarks between srcmarks and dstmarks
This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
differ, invalid)", each are list of bookmarks below:
:addsrc: added on src side (removed on dst side, perhaps)
:adddst: added on dst side (removed on src side, perhaps)
:advsrc: advanced on src side
:advdst: advanced on dst side
:diverge: diverge
:differ: changed, but changeset referred on src is unknown on dst
:invalid: unknown on both side
:same: same on both side
Each elements of lists in result tuple is tuple "(bookmark name,
changeset ID on source side, changeset ID on destination
side)". Each changeset ID is a binary node or None.
Changeset IDs of tuples in "addsrc", "adddst", "differ" or
"invalid" list may be unknown for repo.
If "targets" is specified, only bookmarks listed in it are
examined.
"""
if targets:
bset = set(targets)
else:
srcmarkset = set(srcmarks)
dstmarkset = set(dstmarks)
bset = srcmarkset | dstmarkset
results = ([], [], [], [], [], [], [], [])
addsrc = results[0].append
adddst = results[1].append
advsrc = results[2].append
advdst = results[3].append
diverge = results[4].append
differ = results[5].append
invalid = results[6].append
same = results[7].append
for b in sorted(bset):
if b not in srcmarks:
if b in dstmarks:
adddst((b, None, dstmarks[b]))
else:
invalid((b, None, None))
elif b not in dstmarks:
addsrc((b, srcmarks[b], None))
else:
scid = srcmarks[b]
dcid = dstmarks[b]
if scid == dcid:
same((b, scid, dcid))
elif scid in repo and dcid in repo:
sctx = repo[scid]
dctx = repo[dcid]
if sctx.rev() < dctx.rev():
if validdest(repo, sctx, dctx):
advdst((b, scid, dcid))
else:
diverge((b, scid, dcid))
else:
if validdest(repo, dctx, sctx):
advsrc((b, scid, dcid))
else:
diverge((b, scid, dcid))
else:
# it is too expensive to examine in detail, in this case
differ((b, scid, dcid))
return results
def _diverge(ui, b, path, localmarks, remotenode):
"""Return appropriate diverged bookmark for specified ``path``
This returns None, if it is failed to assign any divergent
bookmark name.
This reuses already existing one with "@number" suffix, if it
refers ``remotenode``.
"""
if b == b'@':
b = b''
# try to use an @pathalias suffix
# if an @pathalias already exists, we overwrite (update) it
if path.startswith(b"file:"):
path = util.url(path).path
for p, u in ui.configitems(b"paths"):
if u.startswith(b"file:"):
u = util.url(u).path
if path == u:
return b'%s@%s' % (b, p)
# assign a unique "@number" suffix newly
for x in range(1, 100):
n = b'%s@%d' % (b, x)
if n not in localmarks or localmarks[n] == remotenode:
return n
return None
def unhexlifybookmarks(marks):
binremotemarks = {}
for name, node in marks.items():
binremotemarks[name] = bin(node)
return binremotemarks
_binaryentry = struct.Struct(b'>20sH')
def binaryencode(bookmarks):
"""encode a '(bookmark, node)' iterable into a binary stream
the binary format is:
<node><bookmark-length><bookmark-name>
:node: is a 20 bytes binary node,
:bookmark-length: an unsigned short,
:bookmark-name: the name of the bookmark (of length <bookmark-length>)
wdirid (all bits set) will be used as a special value for "missing"
"""
binarydata = []
for book, node in bookmarks:
if not node: # None or ''
node = wdirid
binarydata.append(_binaryentry.pack(node, len(book)))
binarydata.append(book)
return b''.join(binarydata)
def binarydecode(stream):
"""decode a binary stream into an '(bookmark, node)' iterable
the binary format is:
<node><bookmark-length><bookmark-name>
:node: is a 20 bytes binary node,
:bookmark-length: an unsigned short,
:bookmark-name: the name of the bookmark (of length <bookmark-length>))
wdirid (all bits set) will be used as a special value for "missing"
"""
entrysize = _binaryentry.size
books = []
while True:
entry = stream.read(entrysize)
if len(entry) < entrysize:
if entry:
raise error.Abort(_(b'bad bookmark stream'))
break
node, length = _binaryentry.unpack(entry)
bookmark = stream.read(length)
if len(bookmark) < length:
if entry:
raise error.Abort(_(b'bad bookmark stream'))
if node == wdirid:
node = None
books.append((bookmark, node))
return books
def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
ui.debug(b"checking for updated bookmarks\n")
localmarks = repo._bookmarks
(
addsrc,
adddst,
advsrc,
advdst,
diverge,
differ,
invalid,
same,
) = comparebookmarks(repo, remotemarks, localmarks)
status = ui.status
warn = ui.warn
if ui.configbool(b'ui', b'quietbookmarkmove'):
status = warn = ui.debug
explicit = set(explicit)
changed = []
for b, scid, dcid in addsrc:
if scid in repo: # add remote bookmarks for changes we already have
changed.append(
(b, scid, status, _(b"adding remote bookmark %s\n") % b)
)
elif b in explicit:
explicit.remove(b)
ui.warn(
_(b"remote bookmark %s points to locally missing %s\n")
% (b, hex(scid)[:12])
)
for b, scid, dcid in advsrc:
changed.append((b, scid, status, _(b"updating bookmark %s\n") % b))
# remove normal movement from explicit set
explicit.difference_update(d[0] for d in changed)
for b, scid, dcid in diverge:
if b in explicit:
explicit.discard(b)
changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
else:
db = _diverge(ui, b, path, localmarks, scid)
if db:
changed.append(
(
db,
scid,
warn,
_(b"divergent bookmark %s stored as %s\n") % (b, db),
)
)
else:
warn(
_(
b"warning: failed to assign numbered name "
b"to divergent bookmark %s\n"
)
% b
)
for b, scid, dcid in adddst + advdst:
if b in explicit:
explicit.discard(b)
changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
for b, scid, dcid in differ:
if b in explicit:
explicit.remove(b)
ui.warn(
_(b"remote bookmark %s points to locally missing %s\n")
% (b, hex(scid)[:12])
)
if changed:
tr = trfunc()
changes = []
key = lambda t: (t[0], t[1] or b'')
for b, node, writer, msg in sorted(changed, key=key):
changes.append((b, node))
writer(msg)
localmarks.applychanges(repo, tr, changes)
def incoming(ui, repo, peer):
"""Show bookmarks incoming from other to repo"""
ui.status(_(b"searching for changed bookmarks\n"))
with peer.commandexecutor() as e:
remotemarks = unhexlifybookmarks(
e.callcommand(
b'listkeys',
{
b'namespace': b'bookmarks',
},
).result()
)
r = comparebookmarks(repo, remotemarks, repo._bookmarks)
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
incomings = []
if ui.debugflag:
getid = lambda id: id
else:
getid = lambda id: id[:12]
if ui.verbose:
def add(b, id, st):
incomings.append(b" %-25s %s %s\n" % (b, getid(id), st))
else:
def add(b, id, st):
incomings.append(b" %-25s %s\n" % (b, getid(id)))
for b, scid, dcid in addsrc:
# i18n: "added" refers to a bookmark
add(b, hex(scid), _(b'added'))
for b, scid, dcid in advsrc:
# i18n: "advanced" refers to a bookmark
add(b, hex(scid), _(b'advanced'))
for b, scid, dcid in diverge:
# i18n: "diverged" refers to a bookmark
add(b, hex(scid), _(b'diverged'))
for b, scid, dcid in differ:
# i18n: "changed" refers to a bookmark
add(b, hex(scid), _(b'changed'))
if not incomings:
ui.status(_(b"no changed bookmarks found\n"))
return 1
for s in sorted(incomings):
ui.write(s)
return 0
def outgoing(ui, repo, other):
"""Show bookmarks outgoing from repo to other"""
ui.status(_(b"searching for changed bookmarks\n"))
remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks'))
r = comparebookmarks(repo, repo._bookmarks, remotemarks)
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
outgoings = []
if ui.debugflag:
getid = lambda id: id
else:
getid = lambda id: id[:12]
if ui.verbose:
def add(b, id, st):
outgoings.append(b" %-25s %s %s\n" % (b, getid(id), st))
else:
def add(b, id, st):
outgoings.append(b" %-25s %s\n" % (b, getid(id)))
for b, scid, dcid in addsrc:
# i18n: "added refers to a bookmark
add(b, hex(scid), _(b'added'))
for b, scid, dcid in adddst:
# i18n: "deleted" refers to a bookmark
add(b, b' ' * 40, _(b'deleted'))
for b, scid, dcid in advsrc:
# i18n: "advanced" refers to a bookmark
add(b, hex(scid), _(b'advanced'))
for b, scid, dcid in diverge:
# i18n: "diverged" refers to a bookmark
add(b, hex(scid), _(b'diverged'))
for b, scid, dcid in differ:
# i18n: "changed" refers to a bookmark
add(b, hex(scid), _(b'changed'))
if not outgoings:
ui.status(_(b"no changed bookmarks found\n"))
return 1
for s in sorted(outgoings):
ui.write(s)
return 0
def summary(repo, peer):
"""Compare bookmarks between repo and other for "hg summary" output
This returns "(# of incoming, # of outgoing)" tuple.
"""
with peer.commandexecutor() as e:
remotemarks = unhexlifybookmarks(
e.callcommand(
b'listkeys',
{
b'namespace': b'bookmarks',
},
).result()
)
r = comparebookmarks(repo, remotemarks, repo._bookmarks)
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
return (len(addsrc), len(adddst))
def validdest(repo, old, new):
"""Is the new bookmark destination a valid update from the old one"""
repo = repo.unfiltered()
if old == new:
# Old == new -> nothing to update.
return False
elif not old:
# old is nullrev, anything is valid.
# (new != nullrev has been excluded by the previous check)
return True
elif repo.obsstore:
return new.node() in obsutil.foreground(repo, [old.node()])
else:
# still an independent clause as it is lazier (and therefore faster)
return old.isancestorof(new)
def checkformat(repo, mark):
"""return a valid version of a potential bookmark name
Raises an abort error if the bookmark name is not valid.
"""
mark = mark.strip()
if not mark:
raise error.InputError(
_(b"bookmark names cannot consist entirely of whitespace")
)
scmutil.checknewlabel(repo, mark, b'bookmark')
return mark
def delete(repo, tr, names):
"""remove a mark from the bookmark store
Raises an abort error if mark does not exist.
"""
marks = repo._bookmarks
changes = []
for mark in names:
if mark not in marks:
raise error.InputError(_(b"bookmark '%s' does not exist") % mark)
if mark == repo._activebookmark:
deactivate(repo)
changes.append((mark, None))
marks.applychanges(repo, tr, changes)
def rename(repo, tr, old, new, force=False, inactive=False):
"""rename a bookmark from old to new
If force is specified, then the new name can overwrite an existing
bookmark.
If inactive is specified, then do not activate the new bookmark.
Raises an abort error if old is not in the bookmark store.
"""
marks = repo._bookmarks
mark = checkformat(repo, new)
if old not in marks:
raise error.InputError(_(b"bookmark '%s' does not exist") % old)
changes = []
for bm in marks.checkconflict(mark, force):
changes.append((bm, None))
changes.extend([(mark, marks[old]), (old, None)])
marks.applychanges(repo, tr, changes)
if repo._activebookmark == old and not inactive:
activate(repo, mark)
def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
"""add a list of bookmarks
If force is specified, then the new name can overwrite an existing
bookmark.
If inactive is specified, then do not activate any bookmark. Otherwise, the
first bookmark is activated.
Raises an abort error if old is not in the bookmark store.
"""
marks = repo._bookmarks
cur = repo[b'.'].node()
newact = None
changes = []
# unhide revs if any
if rev:
repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
ctx = scmutil.revsingle(repo, rev, None)
# bookmarking wdir means creating a bookmark on p1 and activating it
activatenew = not inactive and ctx.rev() is None
if ctx.node() is None:
ctx = ctx.p1()
tgt = ctx.node()
assert tgt
for mark in names:
mark = checkformat(repo, mark)
if newact is None:
newact = mark
if inactive and mark == repo._activebookmark:
deactivate(repo)
continue
for bm in marks.checkconflict(mark, force, tgt):
changes.append((bm, None))
changes.append((mark, tgt))
# nothing changed but for the one deactivated above
if not changes:
return
if ctx.hidden():
repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % ctx.hex()[:12])
if ctx.obsolete():
msg = obsutil._getfilteredreason(repo, ctx.hex()[:12], ctx)
repo.ui.warn(b"(%s)\n" % msg)
marks.applychanges(repo, tr, changes)
if activatenew and cur == marks[newact]:
activate(repo, newact)
elif cur != tgt and newact == repo._activebookmark:
deactivate(repo)
def _printbookmarks(ui, repo, fm, bmarks):
"""private method to print bookmarks
Provides a way for extensions to control how bookmarks are printed (e.g.
prepend or postpend names)
"""
hexfn = fm.hexfunc
if len(bmarks) == 0 and fm.isplain():
ui.status(_(b"no bookmarks set\n"))
for bmark, (n, prefix, label) in sorted(pycompat.iteritems(bmarks)):
fm.startitem()
fm.context(repo=repo)
if not ui.quiet:
fm.plain(b' %s ' % prefix, label=label)
fm.write(b'bookmark', b'%s', bmark, label=label)
pad = b" " * (25 - encoding.colwidth(bmark))
fm.condwrite(
not ui.quiet,
b'rev node',
pad + b' %d:%s',
repo.changelog.rev(n),
hexfn(n),
label=label,
)
fm.data(active=(activebookmarklabel in label))
fm.plain(b'\n')
def printbookmarks(ui, repo, fm, names=None):
"""print bookmarks by the given formatter
Provides a way for extensions to control how bookmarks are printed.
"""
marks = repo._bookmarks
bmarks = {}
for bmark in names or marks:
if bmark not in marks:
raise error.InputError(_(b"bookmark '%s' does not exist") % bmark)
active = repo._activebookmark
if bmark == active:
prefix, label = b'*', activebookmarklabel
else:
prefix, label = b' ', b''
bmarks[bmark] = (marks[bmark], prefix, label)
_printbookmarks(ui, repo, fm, bmarks)
def preparehookargs(name, old, new):
if new is None:
new = b''
if old is None:
old = b''
return {b'bookmark': name, b'node': hex(new), b'oldnode': hex(old)}
```
#### File: site-packages/mercurial/branchmap.py
```python
from __future__ import absolute_import
import struct
from .node import (
bin,
hex,
nullid,
nullrev,
)
from . import (
encoding,
error,
pycompat,
scmutil,
util,
)
from .utils import (
repoviewutil,
stringutil,
)
if pycompat.TYPE_CHECKING:
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
assert any(
(
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
)
subsettable = repoviewutil.subsettable
calcsize = struct.calcsize
pack_into = struct.pack_into
unpack_from = struct.unpack_from
class BranchMapCache(object):
"""mapping of filtered views of repo with their branchcache"""
def __init__(self):
self._per_filter = {}
def __getitem__(self, repo):
self.updatecache(repo)
return self._per_filter[repo.filtername]
def updatecache(self, repo):
"""Update the cache for the given filtered view on a repository"""
# This can trigger updates for the caches for subsets of the filtered
# view, e.g. when there is no cache for this filtered view or the cache
# is stale.
cl = repo.changelog
filtername = repo.filtername
bcache = self._per_filter.get(filtername)
if bcache is None or not bcache.validfor(repo):
# cache object missing or cache object stale? Read from disk
bcache = branchcache.fromfile(repo)
revs = []
if bcache is None:
# no (fresh) cache available anymore, perhaps we can re-use
# the cache for a subset, then extend that to add info on missing
# revisions.
subsetname = subsettable.get(filtername)
if subsetname is not None:
subset = repo.filtered(subsetname)
bcache = self[subset].copy()
extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
revs.extend(r for r in extrarevs if r <= bcache.tiprev)
else:
# nothing to fall back on, start empty.
bcache = branchcache()
revs.extend(cl.revs(start=bcache.tiprev + 1))
if revs:
bcache.update(repo, revs)
assert bcache.validfor(repo), filtername
self._per_filter[repo.filtername] = bcache
def replace(self, repo, remotebranchmap):
"""Replace the branchmap cache for a repo with a branch mapping.
This is likely only called during clone with a branch map from a
remote.
"""
cl = repo.changelog
clrev = cl.rev
clbranchinfo = cl.branchinfo
rbheads = []
closed = set()
for bheads in pycompat.itervalues(remotebranchmap):
rbheads += bheads
for h in bheads:
r = clrev(h)
b, c = clbranchinfo(r)
if c:
closed.add(h)
if rbheads:
rtiprev = max((int(clrev(node)) for node in rbheads))
cache = branchcache(
remotebranchmap,
repo[rtiprev].node(),
rtiprev,
closednodes=closed,
)
# Try to stick it as low as possible
# filter above served are unlikely to be fetch from a clone
for candidate in (b'base', b'immutable', b'served'):
rview = repo.filtered(candidate)
if cache.validfor(rview):
self._per_filter[candidate] = cache
cache.write(rview)
return
def clear(self):
self._per_filter.clear()
def _unknownnode(node):
"""raises ValueError when branchcache found a node which does not exists"""
raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node)))
def _branchcachedesc(repo):
if repo.filtername is not None:
return b'branch cache (%s)' % repo.filtername
else:
return b'branch cache'
class branchcache(object):
"""A dict like object that hold branches heads cache.
This cache is used to avoid costly computations to determine all the
branch heads of a repo.
The cache is serialized on disk in the following format:
<tip hex node> <tip rev number> [optional filtered repo hex hash]
<branch head hex node> <open/closed state> <branch name>
<branch head hex node> <open/closed state> <branch name>
...
The first line is used to check if the cache is still valid. If the
branch cache is for a filtered repo view, an optional third hash is
included that hashes the hashes of all filtered revisions.
The open/closed state is represented by a single letter 'o' or 'c'.
This field can be used to avoid changelog reads when determining if a
branch head closes a branch or not.
"""
def __init__(
self,
entries=(),
tipnode=nullid,
tiprev=nullrev,
filteredhash=None,
closednodes=None,
hasnode=None,
):
# type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
"""hasnode is a function which can be used to verify whether changelog
has a given node or not. If it's not provided, we assume that every node
we have exists in changelog"""
self.tipnode = tipnode
self.tiprev = tiprev
self.filteredhash = filteredhash
# closednodes is a set of nodes that close their branch. If the branch
# cache has been updated, it may contain nodes that are no longer
# heads.
if closednodes is None:
self._closednodes = set()
else:
self._closednodes = closednodes
self._entries = dict(entries)
# whether closed nodes are verified or not
self._closedverified = False
# branches for which nodes are verified
self._verifiedbranches = set()
self._hasnode = hasnode
if self._hasnode is None:
self._hasnode = lambda x: True
def _verifyclosed(self):
""" verify the closed nodes we have """
if self._closedverified:
return
for node in self._closednodes:
if not self._hasnode(node):
_unknownnode(node)
self._closedverified = True
def _verifybranch(self, branch):
""" verify head nodes for the given branch. """
if branch not in self._entries or branch in self._verifiedbranches:
return
for n in self._entries[branch]:
if not self._hasnode(n):
_unknownnode(n)
self._verifiedbranches.add(branch)
def _verifyall(self):
""" verifies nodes of all the branches """
needverification = set(self._entries.keys()) - self._verifiedbranches
for b in needverification:
self._verifybranch(b)
def __iter__(self):
return iter(self._entries)
def __setitem__(self, key, value):
self._entries[key] = value
def __getitem__(self, key):
self._verifybranch(key)
return self._entries[key]
def __contains__(self, key):
self._verifybranch(key)
return key in self._entries
def iteritems(self):
for k, v in pycompat.iteritems(self._entries):
self._verifybranch(k)
yield k, v
items = iteritems
def hasbranch(self, label):
""" checks whether a branch of this name exists or not """
self._verifybranch(label)
return label in self._entries
@classmethod
def fromfile(cls, repo):
f = None
try:
f = repo.cachevfs(cls._filename(repo))
lineiter = iter(f)
cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
last, lrev = cachekey[:2]
last, lrev = bin(last), int(lrev)
filteredhash = None
hasnode = repo.changelog.hasnode
if len(cachekey) > 2:
filteredhash = bin(cachekey[2])
bcache = cls(
tipnode=last,
tiprev=lrev,
filteredhash=filteredhash,
hasnode=hasnode,
)
if not bcache.validfor(repo):
# invalidate the cache
raise ValueError('tip differs')
bcache.load(repo, lineiter)
except (IOError, OSError):
return None
except Exception as inst:
if repo.ui.debugflag:
msg = b'invalid %s: %s\n'
repo.ui.debug(
msg
% (
_branchcachedesc(repo),
pycompat.bytestr(
inst
), # pytype: disable=wrong-arg-types
)
)
bcache = None
finally:
if f:
f.close()
return bcache
def load(self, repo, lineiter):
"""fully loads the branchcache by reading from the file using the line
iterator passed"""
for line in lineiter:
line = line.rstrip(b'\n')
if not line:
continue
node, state, label = line.split(b" ", 2)
if state not in b'oc':
raise ValueError('invalid branch state')
label = encoding.tolocal(label.strip())
node = bin(node)
self._entries.setdefault(label, []).append(node)
if state == b'c':
self._closednodes.add(node)
@staticmethod
def _filename(repo):
"""name of a branchcache file for a given repo or repoview"""
filename = b"branch2"
if repo.filtername:
filename = b'%s-%s' % (filename, repo.filtername)
return filename
def validfor(self, repo):
"""Is the cache content valid regarding a repo
- False when cached tipnode is unknown or if we detect a strip.
- True when cache is up to date or a subset of current repo."""
try:
return (self.tipnode == repo.changelog.node(self.tiprev)) and (
self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
)
except IndexError:
return False
def _branchtip(self, heads):
"""Return tuple with last open head in heads and false,
otherwise return last closed head and true."""
tip = heads[-1]
closed = True
for h in reversed(heads):
if h not in self._closednodes:
tip = h
closed = False
break
return tip, closed
def branchtip(self, branch):
"""Return the tipmost open head on branch head, otherwise return the
tipmost closed head on branch.
Raise KeyError for unknown branch."""
return self._branchtip(self[branch])[0]
def iteropen(self, nodes):
return (n for n in nodes if n not in self._closednodes)
def branchheads(self, branch, closed=False):
self._verifybranch(branch)
heads = self._entries[branch]
if not closed:
heads = list(self.iteropen(heads))
return heads
def iterbranches(self):
for bn, heads in pycompat.iteritems(self):
yield (bn, heads) + self._branchtip(heads)
def iterheads(self):
""" returns all the heads """
self._verifyall()
return pycompat.itervalues(self._entries)
def copy(self):
"""return an deep copy of the branchcache object"""
return type(self)(
self._entries,
self.tipnode,
self.tiprev,
self.filteredhash,
self._closednodes,
)
def write(self, repo):
try:
f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
if self.filteredhash is not None:
cachekey.append(hex(self.filteredhash))
f.write(b" ".join(cachekey) + b'\n')
nodecount = 0
for label, nodes in sorted(pycompat.iteritems(self._entries)):
label = encoding.fromlocal(label)
for node in nodes:
nodecount += 1
if node in self._closednodes:
state = b'c'
else:
state = b'o'
f.write(b"%s %s %s\n" % (hex(node), state, label))
f.close()
repo.ui.log(
b'branchcache',
b'wrote %s with %d labels and %d nodes\n',
_branchcachedesc(repo),
len(self._entries),
nodecount,
)
except (IOError, OSError, error.Abort) as inst:
# Abort may be raised by read only opener, so log and continue
repo.ui.debug(
b"couldn't write branch cache: %s\n"
% stringutil.forcebytestr(inst)
)
def update(self, repo, revgen):
"""Given a branchhead cache, self, that may have extra nodes or be
missing heads, and a generator of nodes that are strictly a superset of
heads missing, this function updates self to be correct.
"""
starttime = util.timer()
cl = repo.changelog
# collect new branch entries
newbranches = {}
getbranchinfo = repo.revbranchcache().branchinfo
for r in revgen:
branch, closesbranch = getbranchinfo(r)
newbranches.setdefault(branch, []).append(r)
if closesbranch:
self._closednodes.add(cl.node(r))
# new tip revision which we found after iterating items from new
# branches
ntiprev = self.tiprev
# Delay fetching the topological heads until they are needed.
# A repository without non-continous branches can skip this part.
topoheads = None
# If a changeset is visible, its parents must be visible too, so
# use the faster unfiltered parent accessor.
parentrevs = repo.unfiltered().changelog.parentrevs
for branch, newheadrevs in pycompat.iteritems(newbranches):
# For every branch, compute the new branchheads.
# A branchhead is a revision such that no descendant is on
# the same branch.
#
# The branchheads are computed iteratively in revision order.
# This ensures topological order, i.e. parents are processed
# before their children. Ancestors are inclusive here, i.e.
# any revision is an ancestor of itself.
#
# Core observations:
# - The current revision is always a branchhead for the
# repository up to that point.
# - It is the first revision of the branch if and only if
# there was no branchhead before. In that case, it is the
# only branchhead as there are no possible ancestors on
# the same branch.
# - If a parent is on the same branch, a branchhead can
# only be an ancestor of that parent, if it is parent
# itself. Otherwise it would have been removed as ancestor
# of that parent before.
# - Therefore, if all parents are on the same branch, they
# can just be removed from the branchhead set.
# - If one parent is on the same branch and the other is not
# and there was exactly one branchhead known, the existing
# branchhead can only be an ancestor if it is the parent.
# Otherwise it would have been removed as ancestor of
# the parent before. The other parent therefore can't have
# a branchhead as ancestor.
# - In all other cases, the parents on different branches
# could have a branchhead as ancestor. Those parents are
# kept in the "uncertain" set. If all branchheads are also
# topological heads, they can't have descendants and further
# checks can be skipped. Otherwise, the ancestors of the
# "uncertain" set are removed from branchheads.
# This computation is heavy and avoided if at all possible.
bheads = self._entries.setdefault(branch, [])
bheadset = {cl.rev(node) for node in bheads}
uncertain = set()
for newrev in sorted(newheadrevs):
if not bheadset:
bheadset.add(newrev)
continue
parents = [p for p in parentrevs(newrev) if p != nullrev]
samebranch = set()
otherbranch = set()
for p in parents:
if p in bheadset or getbranchinfo(p)[0] == branch:
samebranch.add(p)
else:
otherbranch.add(p)
if otherbranch and not (len(bheadset) == len(samebranch) == 1):
uncertain.update(otherbranch)
bheadset.difference_update(samebranch)
bheadset.add(newrev)
if uncertain:
if topoheads is None:
topoheads = set(cl.headrevs())
if bheadset - topoheads:
floorrev = min(bheadset)
ancestors = set(cl.ancestors(newheadrevs, floorrev))
bheadset -= ancestors
bheadrevs = sorted(bheadset)
self[branch] = [cl.node(rev) for rev in bheadrevs]
tiprev = bheadrevs[-1]
if tiprev > ntiprev:
ntiprev = tiprev
if ntiprev > self.tiprev:
self.tiprev = ntiprev
self.tipnode = cl.node(ntiprev)
if not self.validfor(repo):
# cache key are not valid anymore
self.tipnode = nullid
self.tiprev = nullrev
for heads in self.iterheads():
tiprev = max(cl.rev(node) for node in heads)
if tiprev > self.tiprev:
self.tipnode = cl.node(tiprev)
self.tiprev = tiprev
self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
duration = util.timer() - starttime
repo.ui.log(
b'branchcache',
b'updated %s in %.4f seconds\n',
_branchcachedesc(repo),
duration,
)
self.write(repo)
class remotebranchcache(branchcache):
"""Branchmap info for a remote connection, should not write locally"""
def write(self, repo):
pass
# Revision branch info cache
_rbcversion = b'-v1'
_rbcnames = b'rbc-names' + _rbcversion
_rbcrevs = b'rbc-revs' + _rbcversion
# [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
_rbcrecfmt = b'>4sI'
_rbcrecsize = calcsize(_rbcrecfmt)
_rbcnodelen = 4
_rbcbranchidxmask = 0x7FFFFFFF
_rbccloseflag = 0x80000000
class revbranchcache(object):
"""Persistent cache, mapping from revision number to branch name and close.
This is a low level cache, independent of filtering.
Branch names are stored in rbc-names in internal encoding separated by 0.
rbc-names is append-only, and each branch name is only stored once and will
thus have a unique index.
The branch info for each revision is stored in rbc-revs as constant size
records. The whole file is read into memory, but it is only 'parsed' on
demand. The file is usually append-only but will be truncated if repo
modification is detected.
The record for each revision contains the first 4 bytes of the
corresponding node hash, and the record is only used if it still matches.
Even a completely trashed rbc-revs fill thus still give the right result
while converging towards full recovery ... assuming no incorrectly matching
node hashes.
The record also contains 4 bytes where 31 bits contains the index of the
branch and the last bit indicate that it is a branch close commit.
The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
and will grow with it but be 1/8th of its size.
"""
def __init__(self, repo, readonly=True):
assert repo.filtername is None
self._repo = repo
self._names = [] # branch names in local encoding with static index
self._rbcrevs = bytearray()
self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
try:
bndata = repo.cachevfs.read(_rbcnames)
self._rbcsnameslen = len(bndata) # for verification before writing
if bndata:
self._names = [
encoding.tolocal(bn) for bn in bndata.split(b'\0')
]
except (IOError, OSError):
if readonly:
# don't try to use cache - fall back to the slow path
self.branchinfo = self._branchinfo
if self._names:
try:
data = repo.cachevfs.read(_rbcrevs)
self._rbcrevs[:] = data
except (IOError, OSError) as inst:
repo.ui.debug(
b"couldn't read revision branch cache: %s\n"
% stringutil.forcebytestr(inst)
)
# remember number of good records on disk
self._rbcrevslen = min(
len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
)
if self._rbcrevslen == 0:
self._names = []
self._rbcnamescount = len(self._names) # number of names read at
# _rbcsnameslen
def _clear(self):
self._rbcsnameslen = 0
del self._names[:]
self._rbcnamescount = 0
self._rbcrevslen = len(self._repo.changelog)
self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
util.clearcachedproperty(self, b'_namesreverse')
@util.propertycache
def _namesreverse(self):
return {b: r for r, b in enumerate(self._names)}
def branchinfo(self, rev):
"""Return branch name and close flag for rev, using and updating
persistent cache."""
changelog = self._repo.changelog
rbcrevidx = rev * _rbcrecsize
# avoid negative index, changelog.read(nullrev) is fast without cache
if rev == nullrev:
return changelog.branchinfo(rev)
# if requested rev isn't allocated, grow and cache the rev info
if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
return self._branchinfo(rev)
# fast path: extract data from cache, use it if node is matching
reponode = changelog.node(rev)[:_rbcnodelen]
cachenode, branchidx = unpack_from(
_rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
)
close = bool(branchidx & _rbccloseflag)
if close:
branchidx &= _rbcbranchidxmask
if cachenode == b'\0\0\0\0':
pass
elif cachenode == reponode:
try:
return self._names[branchidx], close
except IndexError:
# recover from invalid reference to unknown branch
self._repo.ui.debug(
b"referenced branch names not found"
b" - rebuilding revision branch cache from scratch\n"
)
self._clear()
else:
# rev/node map has changed, invalidate the cache from here up
self._repo.ui.debug(
b"history modification detected - truncating "
b"revision branch cache to revision %d\n" % rev
)
truncate = rbcrevidx + _rbcrecsize
del self._rbcrevs[truncate:]
self._rbcrevslen = min(self._rbcrevslen, truncate)
# fall back to slow path and make sure it will be written to disk
return self._branchinfo(rev)
def _branchinfo(self, rev):
"""Retrieve branch info from changelog and update _rbcrevs"""
changelog = self._repo.changelog
b, close = changelog.branchinfo(rev)
if b in self._namesreverse:
branchidx = self._namesreverse[b]
else:
branchidx = len(self._names)
self._names.append(b)
self._namesreverse[b] = branchidx
reponode = changelog.node(rev)
if close:
branchidx |= _rbccloseflag
self._setcachedata(rev, reponode, branchidx)
return b, close
def setdata(self, branch, rev, node, close):
"""add new data information to the cache"""
if branch in self._namesreverse:
branchidx = self._namesreverse[branch]
else:
branchidx = len(self._names)
self._names.append(branch)
self._namesreverse[branch] = branchidx
if close:
branchidx |= _rbccloseflag
self._setcachedata(rev, node, branchidx)
# If no cache data were readable (non exists, bad permission, etc)
# the cache was bypassing itself by setting:
#
# self.branchinfo = self._branchinfo
#
# Since we now have data in the cache, we need to drop this bypassing.
if 'branchinfo' in vars(self):
del self.branchinfo
def _setcachedata(self, rev, node, branchidx):
"""Writes the node's branch data to the in-memory cache data."""
if rev == nullrev:
return
rbcrevidx = rev * _rbcrecsize
if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
self._rbcrevs.extend(
b'\0'
* (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
)
pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
self._rbcrevslen = min(self._rbcrevslen, rev)
tr = self._repo.currenttransaction()
if tr:
tr.addfinalize(b'write-revbranchcache', self.write)
def write(self, tr=None):
"""Save branch cache if it is dirty."""
repo = self._repo
wlock = None
step = b''
try:
# write the new names
if self._rbcnamescount < len(self._names):
wlock = repo.wlock(wait=False)
step = b' names'
self._writenames(repo)
# write the new revs
start = self._rbcrevslen * _rbcrecsize
if start != len(self._rbcrevs):
step = b''
if wlock is None:
wlock = repo.wlock(wait=False)
self._writerevs(repo, start)
except (IOError, OSError, error.Abort, error.LockError) as inst:
repo.ui.debug(
b"couldn't write revision branch cache%s: %s\n"
% (step, stringutil.forcebytestr(inst))
)
finally:
if wlock is not None:
wlock.release()
def _writenames(self, repo):
""" write the new branch names to revbranchcache """
if self._rbcnamescount != 0:
f = repo.cachevfs.open(_rbcnames, b'ab')
if f.tell() == self._rbcsnameslen:
f.write(b'\0')
else:
f.close()
repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
self._rbcnamescount = 0
self._rbcrevslen = 0
if self._rbcnamescount == 0:
# before rewriting names, make sure references are removed
repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
f = repo.cachevfs.open(_rbcnames, b'wb')
f.write(
b'\0'.join(
encoding.fromlocal(b)
for b in self._names[self._rbcnamescount :]
)
)
self._rbcsnameslen = f.tell()
f.close()
self._rbcnamescount = len(self._names)
def _writerevs(self, repo, start):
""" write the new revs to revbranchcache """
revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
with repo.cachevfs.open(_rbcrevs, b'ab') as f:
if f.tell() != start:
repo.ui.debug(
b"truncating cache/%s to %d\n" % (_rbcrevs, start)
)
f.seek(start)
if f.tell() != start:
start = 0
f.seek(start)
f.truncate()
end = revs * _rbcrecsize
f.write(self._rbcrevs[start:end])
self._rbcrevslen = revs
```
#### File: site-packages/mercurial/copies.py
```python
from __future__ import absolute_import
import collections
import os
from .i18n import _
from .node import (
nullid,
nullrev,
)
from . import (
match as matchmod,
pathutil,
policy,
pycompat,
util,
)
from .utils import stringutil
from .revlogutils import (
flagutil,
sidedata as sidedatamod,
)
rustmod = policy.importrust("copy_tracing")
def _filter(src, dst, t):
"""filters out invalid copies after chaining"""
# When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
# with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
# in the following table (not including trivial cases). For example, case 6
# is where a file existed in 'src' and remained under that name in 'mid' and
# then was renamed between 'mid' and 'dst'.
#
# case src mid dst result
# 1 x y - -
# 2 x y y x->y
# 3 x y x -
# 4 x y z x->z
# 5 - x y -
# 6 x x y x->y
#
# _chain() takes care of chaining the copies in 'a' and 'b', but it
# cannot tell the difference between cases 1 and 2, between 3 and 4, or
# between 5 and 6, so it includes all cases in its result.
# Cases 1, 3, and 5 are then removed by _filter().
for k, v in list(t.items()):
# remove copies from files that didn't exist
if v not in src:
del t[k]
# remove criss-crossed copies
elif k in src and v in dst:
del t[k]
# remove copies to files that were then removed
elif k not in dst:
del t[k]
def _chain(prefix, suffix):
"""chain two sets of copies 'prefix' and 'suffix'"""
result = prefix.copy()
for key, value in pycompat.iteritems(suffix):
result[key] = prefix.get(value, value)
return result
def _tracefile(fctx, am, basemf):
"""return file context that is the ancestor of fctx present in ancestor
manifest am
Note: we used to try and stop after a given limit, however checking if that
limit is reached turned out to be very expensive. we are better off
disabling that feature."""
for f in fctx.ancestors():
path = f.path()
if am.get(path, None) == f.filenode():
return path
if basemf and basemf.get(path, None) == f.filenode():
return path
def _dirstatecopies(repo, match=None):
ds = repo.dirstate
c = ds.copies().copy()
for k in list(c):
if ds[k] not in b'anm' or (match and not match(k)):
del c[k]
return c
def _computeforwardmissing(a, b, match=None):
"""Computes which files are in b but not a.
This is its own function so extensions can easily wrap this call to see what
files _forwardcopies is about to process.
"""
ma = a.manifest()
mb = b.manifest()
return mb.filesnotin(ma, match=match)
def usechangesetcentricalgo(repo):
"""Checks if we should use changeset-centric copy algorithms"""
if repo.filecopiesmode == b'changeset-sidedata':
return True
readfrom = repo.ui.config(b'experimental', b'copies.read-from')
changesetsource = (b'changeset-only', b'compatibility')
return readfrom in changesetsource
def _committedforwardcopies(a, b, base, match):
"""Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
# files might have to be traced back to the fctx parent of the last
# one-side-only changeset, but not further back than that
repo = a._repo
if usechangesetcentricalgo(repo):
return _changesetforwardcopies(a, b, match)
debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
dbg = repo.ui.debug
if debug:
dbg(b'debug.copies: looking into rename from %s to %s\n' % (a, b))
am = a.manifest()
basemf = None if base is None else base.manifest()
# find where new files came from
# we currently don't try to find where old files went, too expensive
# this means we can miss a case like 'hg rm b; hg cp a b'
cm = {}
# Computing the forward missing is quite expensive on large manifests, since
# it compares the entire manifests. We can optimize it in the common use
# case of computing what copies are in a commit versus its parent (like
# during a rebase or histedit). Note, we exclude merge commits from this
# optimization, since the ctx.files() for a merge commit is not correct for
# this comparison.
forwardmissingmatch = match
if b.p1() == a and b.p2().node() == nullid:
filesmatcher = matchmod.exact(b.files())
forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
if debug:
dbg(b'debug.copies: missing files to search: %d\n' % len(missing))
for f in sorted(missing):
if debug:
dbg(b'debug.copies: tracing file: %s\n' % f)
fctx = b[f]
fctx._ancestrycontext = ancestrycontext
if debug:
start = util.timer()
opath = _tracefile(fctx, am, basemf)
if opath:
if debug:
dbg(b'debug.copies: rename of: %s\n' % opath)
cm[f] = opath
if debug:
dbg(
b'debug.copies: time: %f seconds\n'
% (util.timer() - start)
)
return cm
def _revinfo_getter(repo, match):
"""returns a function that returns the following data given a <rev>"
* p1: revision number of first parent
* p2: revision number of first parent
* changes: a ChangingFiles object
"""
cl = repo.changelog
parents = cl.parentrevs
flags = cl.flags
HASCOPIESINFO = flagutil.REVIDX_HASCOPIESINFO
changelogrevision = cl.changelogrevision
if rustmod is not None:
def revinfo(rev):
p1, p2 = parents(rev)
if flags(rev) & HASCOPIESINFO:
raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES)
else:
raw = None
return (p1, p2, raw)
else:
def revinfo(rev):
p1, p2 = parents(rev)
if flags(rev) & HASCOPIESINFO:
changes = changelogrevision(rev).changes
else:
changes = None
return (p1, p2, changes)
return revinfo
def cached_is_ancestor(is_ancestor):
"""return a cached version of is_ancestor"""
cache = {}
def _is_ancestor(anc, desc):
if anc > desc:
return False
elif anc == desc:
return True
key = (anc, desc)
ret = cache.get(key)
if ret is None:
ret = cache[key] = is_ancestor(anc, desc)
return ret
return _is_ancestor
def _changesetforwardcopies(a, b, match):
if a.rev() in (nullrev, b.rev()):
return {}
repo = a.repo().unfiltered()
children = {}
cl = repo.changelog
isancestor = cl.isancestorrev
# To track rename from "A" to B, we need to gather all parent → children
# edges that are contains in `::B` but not in `::A`.
#
#
# To do so, we need to gather all revisions exclusive¹ to "B" (ie¹: `::b -
# ::a`) and also all the "roots point", ie the parents of the exclusive set
# that belong to ::a. These are exactly all the revisions needed to express
# the parent → children we need to combine.
#
# [1] actually, we need to gather all the edges within `(::a)::b`, ie:
# excluding paths that leads to roots that are not ancestors of `a`. We
# keep this out of the explanation because it is hard enough without this special case..
parents = cl._uncheckedparentrevs
graph_roots = (nullrev, nullrev)
ancestors = cl.ancestors([a.rev()], inclusive=True)
revs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
roots = set()
has_graph_roots = False
# iterate over `only(B, A)`
for r in revs:
ps = parents(r)
if ps == graph_roots:
has_graph_roots = True
else:
p1, p2 = ps
# find all the "root points" (see larger comment above)
if p1 != nullrev and p1 in ancestors:
roots.add(p1)
if p2 != nullrev and p2 in ancestors:
roots.add(p2)
if not roots:
# no common revision to track copies from
return {}
if has_graph_roots:
# this deal with the special case mentionned in the [1] footnotes. We
# must filter out revisions that leads to non-common graphroots.
roots = list(roots)
m = min(roots)
h = [b.rev()]
roots_to_head = cl.reachableroots(m, h, roots, includepath=True)
roots_to_head = set(roots_to_head)
revs = [r for r in revs if r in roots_to_head]
if repo.filecopiesmode == b'changeset-sidedata':
# When using side-data, we will process the edges "from" the children.
# We iterate over the childre, gathering previous collected data for
# the parents. Do know when the parents data is no longer necessary, we
# keep a counter of how many children each revision has.
#
# An interresting property of `children_count` is that it only contains
# revision that will be relevant for a edge of the graph. So if a
# children has parent not in `children_count`, that edges should not be
# processed.
children_count = dict((r, 0) for r in roots)
for r in revs:
for p in cl.parentrevs(r):
if p == nullrev:
continue
children_count[r] = 0
if p in children_count:
children_count[p] += 1
revinfo = _revinfo_getter(repo, match)
return _combine_changeset_copies(
revs, children_count, b.rev(), revinfo, match, isancestor
)
else:
# When not using side-data, we will process the edges "from" the parent.
# so we need a full mapping of the parent -> children relation.
children = dict((r, []) for r in roots)
for r in revs:
for p in cl.parentrevs(r):
if p == nullrev:
continue
children[r] = []
if p in children:
children[p].append(r)
x = revs.pop()
assert x == b.rev()
revs.extend(roots)
revs.sort()
revinfo = _revinfo_getter_extra(repo)
return _combine_changeset_copies_extra(
revs, children, b.rev(), revinfo, match, isancestor
)
def _combine_changeset_copies(
revs, children_count, targetrev, revinfo, match, isancestor
):
"""combine the copies information for each item of iterrevs
revs: sorted iterable of revision to visit
children_count: a {parent: <number-of-relevant-children>} mapping.
targetrev: the final copies destination revision (not in iterrevs)
revinfo(rev): a function that return (p1, p2, p1copies, p2copies, removed)
match: a matcher
It returns the aggregated copies information for `targetrev`.
"""
alwaysmatch = match.always()
if rustmod is not None:
final_copies = rustmod.combine_changeset_copies(
list(revs), children_count, targetrev, revinfo, isancestor
)
else:
isancestor = cached_is_ancestor(isancestor)
all_copies = {}
# iterate over all the "children" side of copy tracing "edge"
for current_rev in revs:
p1, p2, changes = revinfo(current_rev)
current_copies = None
# iterate over all parents to chain the existing data with the
# data from the parent → child edge.
for parent, parent_rev in ((1, p1), (2, p2)):
if parent_rev == nullrev:
continue
remaining_children = children_count.get(parent_rev)
if remaining_children is None:
continue
remaining_children -= 1
children_count[parent_rev] = remaining_children
if remaining_children:
copies = all_copies.get(parent_rev, None)
else:
copies = all_copies.pop(parent_rev, None)
if copies is None:
# this is a root
newcopies = copies = {}
elif remaining_children:
newcopies = copies.copy()
else:
newcopies = copies
# chain the data in the edge with the existing data
if changes is not None:
childcopies = {}
if parent == 1:
childcopies = changes.copied_from_p1
elif parent == 2:
childcopies = changes.copied_from_p2
if childcopies:
newcopies = copies.copy()
for dest, source in pycompat.iteritems(childcopies):
prev = copies.get(source)
if prev is not None and prev[1] is not None:
source = prev[1]
newcopies[dest] = (current_rev, source)
assert newcopies is not copies
if changes.removed:
for f in changes.removed:
if f in newcopies:
if newcopies is copies:
# copy on write to avoid affecting potential other
# branches. when there are no other branches, this
# could be avoided.
newcopies = copies.copy()
newcopies[f] = (current_rev, None)
# check potential need to combine the data from another parent (for
# that child). See comment below for details.
if current_copies is None:
current_copies = newcopies
else:
# we are the second parent to work on c, we need to merge our
# work with the other.
#
# In case of conflict, parent 1 take precedence over parent 2.
# This is an arbitrary choice made anew when implementing
# changeset based copies. It was made without regards with
# potential filelog related behavior.
assert parent == 2
current_copies = _merge_copies_dict(
newcopies, current_copies, isancestor, changes
)
all_copies[current_rev] = current_copies
# filter out internal details and return a {dest: source mapping}
final_copies = {}
for dest, (tt, source) in all_copies[targetrev].items():
if source is not None:
final_copies[dest] = source
if not alwaysmatch:
for filename in list(final_copies.keys()):
if not match(filename):
del final_copies[filename]
return final_copies
# constant to decide which side to pick with _merge_copies_dict
PICK_MINOR = 0
PICK_MAJOR = 1
PICK_EITHER = 2
def _merge_copies_dict(minor, major, isancestor, changes):
"""merge two copies-mapping together, minor and major
In case of conflict, value from "major" will be picked.
- `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
ancestors of `high_rev`,
- `ismerged(path)`: callable return True if `path` have been merged in the
current revision,
return the resulting dict (in practice, the "minor" object, updated)
"""
for dest, value in major.items():
other = minor.get(dest)
if other is None:
minor[dest] = value
else:
pick = _compare_values(changes, isancestor, dest, other, value)
if pick == PICK_MAJOR:
minor[dest] = value
return minor
def _compare_values(changes, isancestor, dest, minor, major):
"""compare two value within a _merge_copies_dict loop iteration"""
major_tt, major_value = major
minor_tt, minor_value = minor
# evacuate some simple case first:
if major_tt == minor_tt:
# if it comes from the same revision it must be the same value
assert major_value == minor_value
return PICK_EITHER
elif major[1] == minor[1]:
return PICK_EITHER
# actual merging needed: content from "major" wins, unless it is older than
# the branch point or there is a merge
elif changes is not None and major[1] is None and dest in changes.salvaged:
return PICK_MINOR
elif changes is not None and minor[1] is None and dest in changes.salvaged:
return PICK_MAJOR
elif changes is not None and dest in changes.merged:
return PICK_MAJOR
elif not isancestor(major_tt, minor_tt):
if major[1] is not None:
return PICK_MAJOR
elif isancestor(minor_tt, major_tt):
return PICK_MAJOR
return PICK_MINOR
def _revinfo_getter_extra(repo):
"""return a function that return multiple data given a <rev>"i
* p1: revision number of first parent
* p2: revision number of first parent
* p1copies: mapping of copies from p1
* p2copies: mapping of copies from p2
* removed: a list of removed files
* ismerged: a callback to know if file was merged in that revision
"""
cl = repo.changelog
parents = cl.parentrevs
def get_ismerged(rev):
ctx = repo[rev]
def ismerged(path):
if path not in ctx.files():
return False
fctx = ctx[path]
parents = fctx._filelog.parents(fctx._filenode)
nb_parents = 0
for n in parents:
if n != nullid:
nb_parents += 1
return nb_parents >= 2
return ismerged
def revinfo(rev):
p1, p2 = parents(rev)
ctx = repo[rev]
p1copies, p2copies = ctx._copies
removed = ctx.filesremoved()
return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
return revinfo
def _combine_changeset_copies_extra(
revs, children, targetrev, revinfo, match, isancestor
):
"""version of `_combine_changeset_copies` that works with the Google
specific "extra" based storage for copy information"""
all_copies = {}
alwaysmatch = match.always()
for r in revs:
copies = all_copies.pop(r, None)
if copies is None:
# this is a root
copies = {}
for i, c in enumerate(children[r]):
p1, p2, p1copies, p2copies, removed, ismerged = revinfo(c)
if r == p1:
parent = 1
childcopies = p1copies
else:
assert r == p2
parent = 2
childcopies = p2copies
if not alwaysmatch:
childcopies = {
dst: src for dst, src in childcopies.items() if match(dst)
}
newcopies = copies
if childcopies:
newcopies = copies.copy()
for dest, source in pycompat.iteritems(childcopies):
prev = copies.get(source)
if prev is not None and prev[1] is not None:
source = prev[1]
newcopies[dest] = (c, source)
assert newcopies is not copies
for f in removed:
if f in newcopies:
if newcopies is copies:
# copy on write to avoid affecting potential other
# branches. when there are no other branches, this
# could be avoided.
newcopies = copies.copy()
newcopies[f] = (c, None)
othercopies = all_copies.get(c)
if othercopies is None:
all_copies[c] = newcopies
else:
# we are the second parent to work on c, we need to merge our
# work with the other.
#
# In case of conflict, parent 1 take precedence over parent 2.
# This is an arbitrary choice made anew when implementing
# changeset based copies. It was made without regards with
# potential filelog related behavior.
if parent == 1:
_merge_copies_dict_extra(
othercopies, newcopies, isancestor, ismerged
)
else:
_merge_copies_dict_extra(
newcopies, othercopies, isancestor, ismerged
)
all_copies[c] = newcopies
final_copies = {}
for dest, (tt, source) in all_copies[targetrev].items():
if source is not None:
final_copies[dest] = source
return final_copies
def _merge_copies_dict_extra(minor, major, isancestor, ismerged):
"""version of `_merge_copies_dict` that works with the Google
specific "extra" based storage for copy information"""
for dest, value in major.items():
other = minor.get(dest)
if other is None:
minor[dest] = value
else:
new_tt = value[0]
other_tt = other[0]
if value[1] == other[1]:
continue
# content from "major" wins, unless it is older
# than the branch point or there is a merge
if (
new_tt == other_tt
or not isancestor(new_tt, other_tt)
or ismerged(dest)
):
minor[dest] = value
def _forwardcopies(a, b, base=None, match=None):
"""find {dst@b: src@a} copy mapping where a is an ancestor of b"""
if base is None:
base = a
match = a.repo().narrowmatch(match)
# check for working copy
if b.rev() is None:
cm = _committedforwardcopies(a, b.p1(), base, match)
# combine copies from dirstate if necessary
copies = _chain(cm, _dirstatecopies(b._repo, match))
else:
copies = _committedforwardcopies(a, b, base, match)
return copies
def _backwardrenames(a, b, match):
if a._repo.ui.config(b'experimental', b'copytrace') == b'off':
return {}
# Even though we're not taking copies into account, 1:n rename situations
# can still exist (e.g. hg cp a b; hg mv a c). In those cases we
# arbitrarily pick one of the renames.
# We don't want to pass in "match" here, since that would filter
# the destination by it. Since we're reversing the copies, we want
# to filter the source instead.
f = _forwardcopies(b, a)
r = {}
for k, v in sorted(pycompat.iteritems(f)):
if match and not match(v):
continue
# remove copies
if v in a:
continue
r[v] = k
return r
def pathcopies(x, y, match=None):
"""find {dst@y: src@x} copy mapping for directed compare"""
repo = x._repo
debug = repo.ui.debugflag and repo.ui.configbool(b'devel', b'debug.copies')
if debug:
repo.ui.debug(
b'debug.copies: searching copies from %s to %s\n' % (x, y)
)
if x == y or not x or not y:
return {}
if y.rev() is None and x == y.p1():
if debug:
repo.ui.debug(b'debug.copies: search mode: dirstate\n')
# short-circuit to avoid issues with merge states
return _dirstatecopies(repo, match)
a = y.ancestor(x)
if a == x:
if debug:
repo.ui.debug(b'debug.copies: search mode: forward\n')
copies = _forwardcopies(x, y, match=match)
elif a == y:
if debug:
repo.ui.debug(b'debug.copies: search mode: backward\n')
copies = _backwardrenames(x, y, match=match)
else:
if debug:
repo.ui.debug(b'debug.copies: search mode: combined\n')
base = None
if a.rev() != nullrev:
base = x
copies = _chain(
_backwardrenames(x, a, match=match),
_forwardcopies(a, y, base, match=match),
)
_filter(x, y, copies)
return copies
def mergecopies(repo, c1, c2, base):
"""
Finds moves and copies between context c1 and c2 that are relevant for
merging. 'base' will be used as the merge base.
Copytracing is used in commands like rebase, merge, unshelve, etc to merge
files that were moved/ copied in one merge parent and modified in another.
For example:
o ---> 4 another commit
|
| o ---> 3 commit that modifies a.txt
| /
o / ---> 2 commit that moves a.txt to b.txt
|/
o ---> 1 merge base
If we try to rebase revision 3 on revision 4, since there is no a.txt in
revision 4, and if user have copytrace disabled, we prints the following
message:
```other changed <file> which local deleted```
Returns a tuple where:
"branch_copies" an instance of branch_copies.
"diverge" is a mapping of source name -> list of destination names
for divergent renames.
This function calls different copytracing algorithms based on config.
"""
# avoid silly behavior for update from empty dir
if not c1 or not c2 or c1 == c2:
return branch_copies(), branch_copies(), {}
narrowmatch = c1.repo().narrowmatch()
# avoid silly behavior for parent -> working dir
if c2.node() is None and c1.node() == repo.dirstate.p1():
return (
branch_copies(_dirstatecopies(repo, narrowmatch)),
branch_copies(),
{},
)
copytracing = repo.ui.config(b'experimental', b'copytrace')
if stringutil.parsebool(copytracing) is False:
# stringutil.parsebool() returns None when it is unable to parse the
# value, so we should rely on making sure copytracing is on such cases
return branch_copies(), branch_copies(), {}
if usechangesetcentricalgo(repo):
# The heuristics don't make sense when we need changeset-centric algos
return _fullcopytracing(repo, c1, c2, base)
# Copy trace disabling is explicitly below the node == p1 logic above
# because the logic above is required for a simple copy to be kept across a
# rebase.
if copytracing == b'heuristics':
# Do full copytracing if only non-public revisions are involved as
# that will be fast enough and will also cover the copies which could
# be missed by heuristics
if _isfullcopytraceable(repo, c1, base):
return _fullcopytracing(repo, c1, c2, base)
return _heuristicscopytracing(repo, c1, c2, base)
else:
return _fullcopytracing(repo, c1, c2, base)
def _isfullcopytraceable(repo, c1, base):
"""Checks that if base, source and destination are all no-public branches,
if yes let's use the full copytrace algorithm for increased capabilities
since it will be fast enough.
`experimental.copytrace.sourcecommitlimit` can be used to set a limit for
number of changesets from c1 to base such that if number of changesets are
more than the limit, full copytracing algorithm won't be used.
"""
if c1.rev() is None:
c1 = c1.p1()
if c1.mutable() and base.mutable():
sourcecommitlimit = repo.ui.configint(
b'experimental', b'copytrace.sourcecommitlimit'
)
commits = len(repo.revs(b'%d::%d', base.rev(), c1.rev()))
return commits < sourcecommitlimit
return False
def _checksinglesidecopies(
src, dsts1, m1, m2, mb, c2, base, copy, renamedelete
):
if src not in m2:
# deleted on side 2
if src not in m1:
# renamed on side 1, deleted on side 2
renamedelete[src] = dsts1
elif src not in mb:
# Work around the "short-circuit to avoid issues with merge states"
# thing in pathcopies(): pathcopies(x, y) can return a copy where the
# destination doesn't exist in y.
pass
elif mb[src] != m2[src] and not _related(c2[src], base[src]):
return
elif mb[src] != m2[src] or mb.flags(src) != m2.flags(src):
# modified on side 2
for dst in dsts1:
copy[dst] = src
class branch_copies(object):
"""Information about copies made on one side of a merge/graft.
"copy" is a mapping from destination name -> source name,
where source is in c1 and destination is in c2 or vice-versa.
"movewithdir" is a mapping from source name -> destination name,
where the file at source present in one context but not the other
needs to be moved to destination by the merge process, because the
other context moved the directory it is in.
"renamedelete" is a mapping of source name -> list of destination
names for files deleted in c1 that were renamed in c2 or vice-versa.
"dirmove" is a mapping of detected source dir -> destination dir renames.
This is needed for handling changes to new files previously grafted into
renamed directories.
"""
def __init__(
self, copy=None, renamedelete=None, dirmove=None, movewithdir=None
):
self.copy = {} if copy is None else copy
self.renamedelete = {} if renamedelete is None else renamedelete
self.dirmove = {} if dirmove is None else dirmove
self.movewithdir = {} if movewithdir is None else movewithdir
def __repr__(self):
return '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>' % (
self.copy,
self.renamedelete,
self.dirmove,
self.movewithdir,
)
def _fullcopytracing(repo, c1, c2, base):
"""The full copytracing algorithm which finds all the new files that were
added from merge base up to the top commit and for each file it checks if
this file was copied from another file.
This is pretty slow when a lot of changesets are involved but will track all
the copies.
"""
m1 = c1.manifest()
m2 = c2.manifest()
mb = base.manifest()
copies1 = pathcopies(base, c1)
copies2 = pathcopies(base, c2)
if not (copies1 or copies2):
return branch_copies(), branch_copies(), {}
inversecopies1 = {}
inversecopies2 = {}
for dst, src in copies1.items():
inversecopies1.setdefault(src, []).append(dst)
for dst, src in copies2.items():
inversecopies2.setdefault(src, []).append(dst)
copy1 = {}
copy2 = {}
diverge = {}
renamedelete1 = {}
renamedelete2 = {}
allsources = set(inversecopies1) | set(inversecopies2)
for src in allsources:
dsts1 = inversecopies1.get(src)
dsts2 = inversecopies2.get(src)
if dsts1 and dsts2:
# copied/renamed on both sides
if src not in m1 and src not in m2:
# renamed on both sides
dsts1 = set(dsts1)
dsts2 = set(dsts2)
# If there's some overlap in the rename destinations, we
# consider it not divergent. For example, if side 1 copies 'a'
# to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
# and 'd' and deletes 'a'.
if dsts1 & dsts2:
for dst in dsts1 & dsts2:
copy1[dst] = src
copy2[dst] = src
else:
diverge[src] = sorted(dsts1 | dsts2)
elif src in m1 and src in m2:
# copied on both sides
dsts1 = set(dsts1)
dsts2 = set(dsts2)
for dst in dsts1 & dsts2:
copy1[dst] = src
copy2[dst] = src
# TODO: Handle cases where it was renamed on one side and copied
# on the other side
elif dsts1:
# copied/renamed only on side 1
_checksinglesidecopies(
src, dsts1, m1, m2, mb, c2, base, copy1, renamedelete1
)
elif dsts2:
# copied/renamed only on side 2
_checksinglesidecopies(
src, dsts2, m2, m1, mb, c1, base, copy2, renamedelete2
)
# find interesting file sets from manifests
cache = []
def _get_addedfiles(idx):
if not cache:
addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
u1 = sorted(addedinm1 - addedinm2)
u2 = sorted(addedinm2 - addedinm1)
cache.extend((u1, u2))
return cache[idx]
u1fn = lambda: _get_addedfiles(0)
u2fn = lambda: _get_addedfiles(1)
if repo.ui.debugflag:
u1 = u1fn()
u2 = u2fn()
header = b" unmatched files in %s"
if u1:
repo.ui.debug(
b"%s:\n %s\n" % (header % b'local', b"\n ".join(u1))
)
if u2:
repo.ui.debug(
b"%s:\n %s\n" % (header % b'other', b"\n ".join(u2))
)
renamedeleteset = set()
divergeset = set()
for dsts in diverge.values():
divergeset.update(dsts)
for dsts in renamedelete1.values():
renamedeleteset.update(dsts)
for dsts in renamedelete2.values():
renamedeleteset.update(dsts)
repo.ui.debug(
b" all copies found (* = to merge, ! = divergent, "
b"% = renamed and deleted):\n"
)
for side, copies in ((b"local", copies1), (b"remote", copies2)):
if not copies:
continue
repo.ui.debug(b" on %s side:\n" % side)
for f in sorted(copies):
note = b""
if f in copy1 or f in copy2:
note += b"*"
if f in divergeset:
note += b"!"
if f in renamedeleteset:
note += b"%"
repo.ui.debug(
b" src: '%s' -> dst: '%s' %s\n" % (copies[f], f, note)
)
del renamedeleteset
del divergeset
repo.ui.debug(b" checking for directory renames\n")
dirmove1, movewithdir2 = _dir_renames(repo, c1, copy1, copies1, u2fn)
dirmove2, movewithdir1 = _dir_renames(repo, c2, copy2, copies2, u1fn)
branch_copies1 = branch_copies(copy1, renamedelete1, dirmove1, movewithdir1)
branch_copies2 = branch_copies(copy2, renamedelete2, dirmove2, movewithdir2)
return branch_copies1, branch_copies2, diverge
def _dir_renames(repo, ctx, copy, fullcopy, addedfilesfn):
"""Finds moved directories and files that should move with them.
ctx: the context for one of the sides
copy: files copied on the same side (as ctx)
fullcopy: files copied on the same side (as ctx), including those that
merge.manifestmerge() won't care about
addedfilesfn: function returning added files on the other side (compared to
ctx)
"""
# generate a directory move map
invalid = set()
dirmove = {}
# examine each file copy for a potential directory move, which is
# when all the files in a directory are moved to a new directory
for dst, src in pycompat.iteritems(fullcopy):
dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
if dsrc in invalid:
# already seen to be uninteresting
continue
elif ctx.hasdir(dsrc) and ctx.hasdir(ddst):
# directory wasn't entirely moved locally
invalid.add(dsrc)
elif dsrc in dirmove and dirmove[dsrc] != ddst:
# files from the same directory moved to two different places
invalid.add(dsrc)
else:
# looks good so far
dirmove[dsrc] = ddst
for i in invalid:
if i in dirmove:
del dirmove[i]
del invalid
if not dirmove:
return {}, {}
dirmove = {k + b"/": v + b"/" for k, v in pycompat.iteritems(dirmove)}
for d in dirmove:
repo.ui.debug(
b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])
)
movewithdir = {}
# check unaccounted nonoverlapping files against directory moves
for f in addedfilesfn():
if f not in fullcopy:
for d in dirmove:
if f.startswith(d):
# new file added in a directory that was moved, move it
df = dirmove[d] + f[len(d) :]
if df not in copy:
movewithdir[f] = df
repo.ui.debug(
b" pending file src: '%s' -> dst: '%s'\n"
% (f, df)
)
break
return dirmove, movewithdir
def _heuristicscopytracing(repo, c1, c2, base):
"""Fast copytracing using filename heuristics
Assumes that moves or renames are of following two types:
1) Inside a directory only (same directory name but different filenames)
2) Move from one directory to another
(same filenames but different directory names)
Works only when there are no merge commits in the "source branch".
Source branch is commits from base up to c2 not including base.
If merge is involved it fallbacks to _fullcopytracing().
Can be used by setting the following config:
[experimental]
copytrace = heuristics
In some cases the copy/move candidates found by heuristics can be very large
in number and that will make the algorithm slow. The number of possible
candidates to check can be limited by using the config
`experimental.copytrace.movecandidateslimit` which defaults to 100.
"""
if c1.rev() is None:
c1 = c1.p1()
if c2.rev() is None:
c2 = c2.p1()
changedfiles = set()
m1 = c1.manifest()
if not repo.revs(b'%d::%d', base.rev(), c2.rev()):
# If base is not in c2 branch, we switch to fullcopytracing
repo.ui.debug(
b"switching to full copytracing as base is not "
b"an ancestor of c2\n"
)
return _fullcopytracing(repo, c1, c2, base)
ctx = c2
while ctx != base:
if len(ctx.parents()) == 2:
# To keep things simple let's not handle merges
repo.ui.debug(b"switching to full copytracing because of merges\n")
return _fullcopytracing(repo, c1, c2, base)
changedfiles.update(ctx.files())
ctx = ctx.p1()
copies2 = {}
cp = _forwardcopies(base, c2)
for dst, src in pycompat.iteritems(cp):
if src in m1:
copies2[dst] = src
# file is missing if it isn't present in the destination, but is present in
# the base and present in the source.
# Presence in the base is important to exclude added files, presence in the
# source is important to exclude removed files.
filt = lambda f: f not in m1 and f in base and f in c2
missingfiles = [f for f in changedfiles if filt(f)]
copies1 = {}
if missingfiles:
basenametofilename = collections.defaultdict(list)
dirnametofilename = collections.defaultdict(list)
for f in m1.filesnotin(base.manifest()):
basename = os.path.basename(f)
dirname = os.path.dirname(f)
basenametofilename[basename].append(f)
dirnametofilename[dirname].append(f)
for f in missingfiles:
basename = os.path.basename(f)
dirname = os.path.dirname(f)
samebasename = basenametofilename[basename]
samedirname = dirnametofilename[dirname]
movecandidates = samebasename + samedirname
# f is guaranteed to be present in c2, that's why
# c2.filectx(f) won't fail
f2 = c2.filectx(f)
# we can have a lot of candidates which can slow down the heuristics
# config value to limit the number of candidates moves to check
maxcandidates = repo.ui.configint(
b'experimental', b'copytrace.movecandidateslimit'
)
if len(movecandidates) > maxcandidates:
repo.ui.status(
_(
b"skipping copytracing for '%s', more "
b"candidates than the limit: %d\n"
)
% (f, len(movecandidates))
)
continue
for candidate in movecandidates:
f1 = c1.filectx(candidate)
if _related(f1, f2):
# if there are a few related copies then we'll merge
# changes into all of them. This matches the behaviour
# of upstream copytracing
copies1[candidate] = f
return branch_copies(copies1), branch_copies(copies2), {}
def _related(f1, f2):
"""return True if f1 and f2 filectx have a common ancestor
Walk back to common ancestor to see if the two files originate
from the same file. Since workingfilectx's rev() is None it messes
up the integer comparison logic, hence the pre-step check for
None (f1 and f2 can only be workingfilectx's initially).
"""
if f1 == f2:
return True # a match
g1, g2 = f1.ancestors(), f2.ancestors()
try:
f1r, f2r = f1.linkrev(), f2.linkrev()
if f1r is None:
f1 = next(g1)
if f2r is None:
f2 = next(g2)
while True:
f1r, f2r = f1.linkrev(), f2.linkrev()
if f1r > f2r:
f1 = next(g1)
elif f2r > f1r:
f2 = next(g2)
else: # f1 and f2 point to files in the same linkrev
return f1 == f2 # true if they point to the same file
except StopIteration:
return False
def graftcopies(wctx, ctx, base):
"""reproduce copies between base and ctx in the wctx
Unlike mergecopies(), this function will only consider copies between base
and ctx; it will ignore copies between base and wctx. Also unlike
mergecopies(), this function will apply copies to the working copy (instead
of just returning information about the copies). That makes it cheaper
(especially in the common case of base==ctx.p1()) and useful also when
experimental.copytrace=off.
merge.update() will have already marked most copies, but it will only
mark copies if it thinks the source files are related (see
merge._related()). It will also not mark copies if the file wasn't modified
on the local side. This function adds the copies that were "missed"
by merge.update().
"""
new_copies = pathcopies(base, ctx)
_filter(wctx.p1(), wctx, new_copies)
for dst, src in pycompat.iteritems(new_copies):
wctx[dst].markcopied(src)
```
#### File: site-packages/mercurial/diffutil.py
```python
from __future__ import absolute_import
from .i18n import _
from . import (
mdiff,
pycompat,
)
def diffallopts(
ui, opts=None, untrusted=False, section=b'diff', configprefix=b''
):
'''return diffopts with all features supported and parsed'''
return difffeatureopts(
ui,
opts=opts,
untrusted=untrusted,
section=section,
git=True,
whitespace=True,
formatchanging=True,
configprefix=configprefix,
)
def difffeatureopts(
ui,
opts=None,
untrusted=False,
section=b'diff',
git=False,
whitespace=False,
formatchanging=False,
configprefix=b'',
):
"""return diffopts with only opted-in features parsed
Features:
- git: git-style diffs
- whitespace: whitespace options like ignoreblanklines and ignorews
- formatchanging: options that will likely break or cause correctness issues
with most diff parsers
"""
def get(key, name=None, getter=ui.configbool, forceplain=None):
if opts:
v = opts.get(key)
# diffopts flags are either None-default (which is passed
# through unchanged, so we can identify unset values), or
# some other falsey default (eg --unified, which defaults
# to an empty string). We only want to override the config
# entries from hgrc with command line values if they
# appear to have been set, which is any truthy value,
# True, or False.
if v or isinstance(v, bool):
return v
if forceplain is not None and ui.plain():
return forceplain
return getter(
section, configprefix + (name or key), untrusted=untrusted
)
# core options, expected to be understood by every diff parser
buildopts = {
b'nodates': get(b'nodates'),
b'showfunc': get(b'show_function', b'showfunc'),
b'context': get(b'unified', getter=ui.config),
}
buildopts[b'xdiff'] = ui.configbool(b'experimental', b'xdiff')
if git:
buildopts[b'git'] = get(b'git')
# since this is in the experimental section, we need to call
# ui.configbool directory
buildopts[b'showsimilarity'] = ui.configbool(
b'experimental', b'extendedheader.similarity'
)
# need to inspect the ui object instead of using get() since we want to
# test for an int
hconf = ui.config(b'experimental', b'extendedheader.index')
if hconf is not None:
hlen = None
try:
# the hash config could be an integer (for length of hash) or a
# word (e.g. short, full, none)
hlen = int(hconf)
if hlen < 0 or hlen > 40:
msg = _(b"invalid length for extendedheader.index: '%d'\n")
ui.warn(msg % hlen)
except ValueError:
# default value
if hconf == b'short' or hconf == b'':
hlen = 12
elif hconf == b'full':
hlen = 40
elif hconf != b'none':
msg = _(b"invalid value for extendedheader.index: '%s'\n")
ui.warn(msg % hconf)
finally:
buildopts[b'index'] = hlen
if whitespace:
buildopts[b'ignorews'] = get(b'ignore_all_space', b'ignorews')
buildopts[b'ignorewsamount'] = get(
b'ignore_space_change', b'ignorewsamount'
)
buildopts[b'ignoreblanklines'] = get(
b'ignore_blank_lines', b'ignoreblanklines'
)
buildopts[b'ignorewseol'] = get(b'ignore_space_at_eol', b'ignorewseol')
if formatchanging:
buildopts[b'text'] = opts and opts.get(b'text')
binary = None if opts is None else opts.get(b'binary')
buildopts[b'nobinary'] = (
not binary
if binary is not None
else get(b'nobinary', forceplain=False)
)
buildopts[b'noprefix'] = get(b'noprefix', forceplain=False)
buildopts[b'worddiff'] = get(
b'word_diff', b'word-diff', forceplain=False
)
return mdiff.diffopts(**pycompat.strkwargs(buildopts))
```
#### File: site-packages/mercurial/formatter.py
```python
from __future__ import absolute_import, print_function
import contextlib
import itertools
import os
from .i18n import _
from .node import (
hex,
short,
)
from .thirdparty import attr
from . import (
error,
pycompat,
templatefilters,
templatekw,
templater,
templateutil,
util,
)
from .utils import (
cborutil,
dateutil,
stringutil,
)
pickle = util.pickle
def isprintable(obj):
"""Check if the given object can be directly passed in to formatter's
write() and data() functions
Returns False if the object is unsupported or must be pre-processed by
formatdate(), formatdict(), or formatlist().
"""
return isinstance(obj, (type(None), bool, int, pycompat.long, float, bytes))
class _nullconverter(object):
'''convert non-primitive data types to be processed by formatter'''
# set to True if context object should be stored as item
storecontext = False
@staticmethod
def wrapnested(data, tmpl, sep):
'''wrap nested data by appropriate type'''
return data
@staticmethod
def formatdate(date, fmt):
'''convert date tuple to appropriate format'''
# timestamp can be float, but the canonical form should be int
ts, tz = date
return (int(ts), tz)
@staticmethod
def formatdict(data, key, value, fmt, sep):
'''convert dict or key-value pairs to appropriate dict format'''
# use plain dict instead of util.sortdict so that data can be
# serialized as a builtin dict in pickle output
return dict(data)
@staticmethod
def formatlist(data, name, fmt, sep):
'''convert iterable to appropriate list format'''
return list(data)
class baseformatter(object):
def __init__(self, ui, topic, opts, converter):
self._ui = ui
self._topic = topic
self._opts = opts
self._converter = converter
self._item = None
# function to convert node to string suitable for this output
self.hexfunc = hex
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is None:
self.end()
def _showitem(self):
'''show a formatted item once all data is collected'''
def startitem(self):
'''begin an item in the format list'''
if self._item is not None:
self._showitem()
self._item = {}
def formatdate(self, date, fmt=b'%a %b %d %H:%M:%S %Y %1%2'):
'''convert date tuple to appropriate format'''
return self._converter.formatdate(date, fmt)
def formatdict(self, data, key=b'key', value=b'value', fmt=None, sep=b' '):
'''convert dict or key-value pairs to appropriate dict format'''
return self._converter.formatdict(data, key, value, fmt, sep)
def formatlist(self, data, name, fmt=None, sep=b' '):
'''convert iterable to appropriate list format'''
# name is mandatory argument for now, but it could be optional if
# we have default template keyword, e.g. {item}
return self._converter.formatlist(data, name, fmt, sep)
def context(self, **ctxs):
'''insert context objects to be used to render template keywords'''
ctxs = pycompat.byteskwargs(ctxs)
assert all(k in {b'repo', b'ctx', b'fctx'} for k in ctxs)
if self._converter.storecontext:
# populate missing resources in fctx -> ctx -> repo order
if b'fctx' in ctxs and b'ctx' not in ctxs:
ctxs[b'ctx'] = ctxs[b'fctx'].changectx()
if b'ctx' in ctxs and b'repo' not in ctxs:
ctxs[b'repo'] = ctxs[b'ctx'].repo()
self._item.update(ctxs)
def datahint(self):
'''set of field names to be referenced'''
return set()
def data(self, **data):
'''insert data into item that's not shown in default output'''
data = pycompat.byteskwargs(data)
self._item.update(data)
def write(self, fields, deftext, *fielddata, **opts):
'''do default text output while assigning data to item'''
fieldkeys = fields.split()
assert len(fieldkeys) == len(fielddata), (fieldkeys, fielddata)
self._item.update(zip(fieldkeys, fielddata))
def condwrite(self, cond, fields, deftext, *fielddata, **opts):
'''do conditional write (primarily for plain formatter)'''
fieldkeys = fields.split()
assert len(fieldkeys) == len(fielddata)
self._item.update(zip(fieldkeys, fielddata))
def plain(self, text, **opts):
'''show raw text for non-templated mode'''
def isplain(self):
'''check for plain formatter usage'''
return False
def nested(self, field, tmpl=None, sep=b''):
'''sub formatter to store nested data in the specified field'''
data = []
self._item[field] = self._converter.wrapnested(data, tmpl, sep)
return _nestedformatter(self._ui, self._converter, data)
def end(self):
'''end output for the formatter'''
if self._item is not None:
self._showitem()
def nullformatter(ui, topic, opts):
'''formatter that prints nothing'''
return baseformatter(ui, topic, opts, converter=_nullconverter)
class _nestedformatter(baseformatter):
'''build sub items and store them in the parent formatter'''
def __init__(self, ui, converter, data):
baseformatter.__init__(
self, ui, topic=b'', opts={}, converter=converter
)
self._data = data
def _showitem(self):
self._data.append(self._item)
def _iteritems(data):
'''iterate key-value pairs in stable order'''
if isinstance(data, dict):
return sorted(pycompat.iteritems(data))
return data
class _plainconverter(object):
'''convert non-primitive data types to text'''
storecontext = False
@staticmethod
def wrapnested(data, tmpl, sep):
raise error.ProgrammingError(b'plainformatter should never be nested')
@staticmethod
def formatdate(date, fmt):
'''stringify date tuple in the given format'''
return dateutil.datestr(date, fmt)
@staticmethod
def formatdict(data, key, value, fmt, sep):
'''stringify key-value pairs separated by sep'''
prefmt = pycompat.identity
if fmt is None:
fmt = b'%s=%s'
prefmt = pycompat.bytestr
return sep.join(
fmt % (prefmt(k), prefmt(v)) for k, v in _iteritems(data)
)
@staticmethod
def formatlist(data, name, fmt, sep):
'''stringify iterable separated by sep'''
prefmt = pycompat.identity
if fmt is None:
fmt = b'%s'
prefmt = pycompat.bytestr
return sep.join(fmt % prefmt(e) for e in data)
class plainformatter(baseformatter):
'''the default text output scheme'''
def __init__(self, ui, out, topic, opts):
baseformatter.__init__(self, ui, topic, opts, _plainconverter)
if ui.debugflag:
self.hexfunc = hex
else:
self.hexfunc = short
if ui is out:
self._write = ui.write
else:
self._write = lambda s, **opts: out.write(s)
def startitem(self):
pass
def data(self, **data):
pass
def write(self, fields, deftext, *fielddata, **opts):
self._write(deftext % fielddata, **opts)
def condwrite(self, cond, fields, deftext, *fielddata, **opts):
'''do conditional write'''
if cond:
self._write(deftext % fielddata, **opts)
def plain(self, text, **opts):
self._write(text, **opts)
def isplain(self):
return True
def nested(self, field, tmpl=None, sep=b''):
# nested data will be directly written to ui
return self
def end(self):
pass
class debugformatter(baseformatter):
def __init__(self, ui, out, topic, opts):
baseformatter.__init__(self, ui, topic, opts, _nullconverter)
self._out = out
self._out.write(b"%s = [\n" % self._topic)
def _showitem(self):
self._out.write(
b' %s,\n' % stringutil.pprint(self._item, indent=4, level=1)
)
def end(self):
baseformatter.end(self)
self._out.write(b"]\n")
class pickleformatter(baseformatter):
def __init__(self, ui, out, topic, opts):
baseformatter.__init__(self, ui, topic, opts, _nullconverter)
self._out = out
self._data = []
def _showitem(self):
self._data.append(self._item)
def end(self):
baseformatter.end(self)
self._out.write(pickle.dumps(self._data))
class cborformatter(baseformatter):
'''serialize items as an indefinite-length CBOR array'''
def __init__(self, ui, out, topic, opts):
baseformatter.__init__(self, ui, topic, opts, _nullconverter)
self._out = out
self._out.write(cborutil.BEGIN_INDEFINITE_ARRAY)
def _showitem(self):
self._out.write(b''.join(cborutil.streamencode(self._item)))
def end(self):
baseformatter.end(self)
self._out.write(cborutil.BREAK)
class jsonformatter(baseformatter):
def __init__(self, ui, out, topic, opts):
baseformatter.__init__(self, ui, topic, opts, _nullconverter)
self._out = out
self._out.write(b"[")
self._first = True
def _showitem(self):
if self._first:
self._first = False
else:
self._out.write(b",")
self._out.write(b"\n {\n")
first = True
for k, v in sorted(self._item.items()):
if first:
first = False
else:
self._out.write(b",\n")
u = templatefilters.json(v, paranoid=False)
self._out.write(b' "%s": %s' % (k, u))
self._out.write(b"\n }")
def end(self):
baseformatter.end(self)
self._out.write(b"\n]\n")
class _templateconverter(object):
'''convert non-primitive data types to be processed by templater'''
storecontext = True
@staticmethod
def wrapnested(data, tmpl, sep):
'''wrap nested data by templatable type'''
return templateutil.mappinglist(data, tmpl=tmpl, sep=sep)
@staticmethod
def formatdate(date, fmt):
'''return date tuple'''
return templateutil.date(date)
@staticmethod
def formatdict(data, key, value, fmt, sep):
'''build object that can be evaluated as either plain string or dict'''
data = util.sortdict(_iteritems(data))
def f():
yield _plainconverter.formatdict(data, key, value, fmt, sep)
return templateutil.hybriddict(
data, key=key, value=value, fmt=fmt, gen=f
)
@staticmethod
def formatlist(data, name, fmt, sep):
'''build object that can be evaluated as either plain string or list'''
data = list(data)
def f():
yield _plainconverter.formatlist(data, name, fmt, sep)
return templateutil.hybridlist(data, name=name, fmt=fmt, gen=f)
class templateformatter(baseformatter):
def __init__(self, ui, out, topic, opts, spec, overridetemplates=None):
baseformatter.__init__(self, ui, topic, opts, _templateconverter)
self._out = out
self._tref = spec.ref
self._t = loadtemplater(
ui,
spec,
defaults=templatekw.keywords,
resources=templateresources(ui),
cache=templatekw.defaulttempl,
)
if overridetemplates:
self._t.cache.update(overridetemplates)
self._parts = templatepartsmap(
spec, self._t, [b'docheader', b'docfooter', b'separator']
)
self._counter = itertools.count()
self._renderitem(b'docheader', {})
def _showitem(self):
item = self._item.copy()
item[b'index'] = index = next(self._counter)
if index > 0:
self._renderitem(b'separator', {})
self._renderitem(self._tref, item)
def _renderitem(self, part, item):
if part not in self._parts:
return
ref = self._parts[part]
# None can't be put in the mapping dict since it means <unset>
for k, v in item.items():
if v is None:
item[k] = templateutil.wrappedvalue(v)
self._out.write(self._t.render(ref, item))
@util.propertycache
def _symbolsused(self):
return self._t.symbolsused(self._tref)
def datahint(self):
'''set of field names to be referenced from the template'''
return self._symbolsused[0]
def end(self):
baseformatter.end(self)
self._renderitem(b'docfooter', {})
@attr.s(frozen=True)
class templatespec(object):
ref = attr.ib()
tmpl = attr.ib()
mapfile = attr.ib()
refargs = attr.ib(default=None)
fp = attr.ib(default=None)
def empty_templatespec():
return templatespec(None, None, None)
def reference_templatespec(ref, refargs=None):
return templatespec(ref, None, None, refargs)
def literal_templatespec(tmpl):
if pycompat.ispy3:
assert not isinstance(tmpl, str), b'tmpl must not be a str'
return templatespec(b'', tmpl, None)
def mapfile_templatespec(topic, mapfile, fp=None):
return templatespec(topic, None, mapfile, fp=fp)
def lookuptemplate(ui, topic, tmpl):
"""Find the template matching the given -T/--template spec 'tmpl'
'tmpl' can be any of the following:
- a literal template (e.g. '{rev}')
- a reference to built-in template (i.e. formatter)
- a map-file name or path (e.g. 'changelog')
- a reference to [templates] in config file
- a path to raw template file
A map file defines a stand-alone template environment. If a map file
selected, all templates defined in the file will be loaded, and the
template matching the given topic will be rendered. Aliases won't be
loaded from user config, but from the map file.
If no map file selected, all templates in [templates] section will be
available as well as aliases in [templatealias].
"""
if not tmpl:
return empty_templatespec()
# looks like a literal template?
if b'{' in tmpl:
return literal_templatespec(tmpl)
# a reference to built-in (formatter) template
if tmpl in {b'cbor', b'json', b'pickle', b'debug'}:
return reference_templatespec(tmpl)
# a function-style reference to built-in template
func, fsep, ftail = tmpl.partition(b'(')
if func in {b'cbor', b'json'} and fsep and ftail.endswith(b')'):
templater.parseexpr(tmpl) # make sure syntax errors are confined
return reference_templatespec(func, refargs=ftail[:-1])
# perhaps a stock style?
if not os.path.split(tmpl)[0]:
(mapname, fp) = templater.try_open_template(
b'map-cmdline.' + tmpl
) or templater.try_open_template(tmpl)
if mapname:
return mapfile_templatespec(topic, mapname, fp)
# perhaps it's a reference to [templates]
if ui.config(b'templates', tmpl):
return reference_templatespec(tmpl)
if tmpl == b'list':
ui.write(_(b"available styles: %s\n") % templater.stylelist())
raise error.Abort(_(b"specify a template"))
# perhaps it's a path to a map or a template
if (b'/' in tmpl or b'\\' in tmpl) and os.path.isfile(tmpl):
# is it a mapfile for a style?
if os.path.basename(tmpl).startswith(b"map-"):
return mapfile_templatespec(topic, os.path.realpath(tmpl))
with util.posixfile(tmpl, b'rb') as f:
tmpl = f.read()
return literal_templatespec(tmpl)
# constant string?
return literal_templatespec(tmpl)
def templatepartsmap(spec, t, partnames):
"""Create a mapping of {part: ref}"""
partsmap = {spec.ref: spec.ref} # initial ref must exist in t
if spec.mapfile:
partsmap.update((p, p) for p in partnames if p in t)
elif spec.ref:
for part in partnames:
ref = b'%s:%s' % (spec.ref, part) # select config sub-section
if ref in t:
partsmap[part] = ref
return partsmap
def loadtemplater(ui, spec, defaults=None, resources=None, cache=None):
"""Create a templater from either a literal template or loading from
a map file"""
assert not (spec.tmpl and spec.mapfile)
if spec.mapfile:
return templater.templater.frommapfile(
spec.mapfile,
spec.fp,
defaults=defaults,
resources=resources,
cache=cache,
)
return maketemplater(
ui, spec.tmpl, defaults=defaults, resources=resources, cache=cache
)
def maketemplater(ui, tmpl, defaults=None, resources=None, cache=None):
"""Create a templater from a string template 'tmpl'"""
aliases = ui.configitems(b'templatealias')
t = templater.templater(
defaults=defaults, resources=resources, cache=cache, aliases=aliases
)
t.cache.update(
(k, templater.unquotestring(v)) for k, v in ui.configitems(b'templates')
)
if tmpl:
t.cache[b''] = tmpl
return t
# marker to denote a resource to be loaded on demand based on mapping values
# (e.g. (ctx, path) -> fctx)
_placeholder = object()
class templateresources(templater.resourcemapper):
"""Resource mapper designed for the default templatekw and function"""
def __init__(self, ui, repo=None):
self._resmap = {
b'cache': {}, # for templatekw/funcs to store reusable data
b'repo': repo,
b'ui': ui,
}
def availablekeys(self, mapping):
return {
k for k in self.knownkeys() if self._getsome(mapping, k) is not None
}
def knownkeys(self):
return {b'cache', b'ctx', b'fctx', b'repo', b'revcache', b'ui'}
def lookup(self, mapping, key):
if key not in self.knownkeys():
return None
v = self._getsome(mapping, key)
if v is _placeholder:
v = mapping[key] = self._loadermap[key](self, mapping)
return v
def populatemap(self, context, origmapping, newmapping):
mapping = {}
if self._hasnodespec(newmapping):
mapping[b'revcache'] = {} # per-ctx cache
if self._hasnodespec(origmapping) and self._hasnodespec(newmapping):
orignode = templateutil.runsymbol(context, origmapping, b'node')
mapping[b'originalnode'] = orignode
# put marker to override 'ctx'/'fctx' in mapping if any, and flag
# its existence to be reported by availablekeys()
if b'ctx' not in newmapping and self._hasliteral(newmapping, b'node'):
mapping[b'ctx'] = _placeholder
if b'fctx' not in newmapping and self._hasliteral(newmapping, b'path'):
mapping[b'fctx'] = _placeholder
return mapping
def _getsome(self, mapping, key):
v = mapping.get(key)
if v is not None:
return v
return self._resmap.get(key)
def _hasliteral(self, mapping, key):
"""Test if a literal value is set or unset in the given mapping"""
return key in mapping and not callable(mapping[key])
def _getliteral(self, mapping, key):
"""Return value of the given name if it is a literal"""
v = mapping.get(key)
if callable(v):
return None
return v
def _hasnodespec(self, mapping):
"""Test if context revision is set or unset in the given mapping"""
return b'node' in mapping or b'ctx' in mapping
def _loadctx(self, mapping):
repo = self._getsome(mapping, b'repo')
node = self._getliteral(mapping, b'node')
if repo is None or node is None:
return
try:
return repo[node]
except error.RepoLookupError:
return None # maybe hidden/non-existent node
def _loadfctx(self, mapping):
ctx = self._getsome(mapping, b'ctx')
path = self._getliteral(mapping, b'path')
if ctx is None or path is None:
return None
try:
return ctx[path]
except error.LookupError:
return None # maybe removed file?
_loadermap = {
b'ctx': _loadctx,
b'fctx': _loadfctx,
}
def _internaltemplateformatter(
ui,
out,
topic,
opts,
spec,
tmpl,
docheader=b'',
docfooter=b'',
separator=b'',
):
"""Build template formatter that handles customizable built-in templates
such as -Tjson(...)"""
templates = {spec.ref: tmpl}
if docheader:
templates[b'%s:docheader' % spec.ref] = docheader
if docfooter:
templates[b'%s:docfooter' % spec.ref] = docfooter
if separator:
templates[b'%s:separator' % spec.ref] = separator
return templateformatter(
ui, out, topic, opts, spec, overridetemplates=templates
)
def formatter(ui, out, topic, opts):
spec = lookuptemplate(ui, topic, opts.get(b'template', b''))
if spec.ref == b"cbor" and spec.refargs is not None:
return _internaltemplateformatter(
ui,
out,
topic,
opts,
spec,
tmpl=b'{dict(%s)|cbor}' % spec.refargs,
docheader=cborutil.BEGIN_INDEFINITE_ARRAY,
docfooter=cborutil.BREAK,
)
elif spec.ref == b"cbor":
return cborformatter(ui, out, topic, opts)
elif spec.ref == b"json" and spec.refargs is not None:
return _internaltemplateformatter(
ui,
out,
topic,
opts,
spec,
tmpl=b'{dict(%s)|json}' % spec.refargs,
docheader=b'[\n ',
docfooter=b'\n]\n',
separator=b',\n ',
)
elif spec.ref == b"json":
return jsonformatter(ui, out, topic, opts)
elif spec.ref == b"pickle":
assert spec.refargs is None, r'function-style not supported'
return pickleformatter(ui, out, topic, opts)
elif spec.ref == b"debug":
assert spec.refargs is None, r'function-style not supported'
return debugformatter(ui, out, topic, opts)
elif spec.ref or spec.tmpl or spec.mapfile:
assert spec.refargs is None, r'function-style not supported'
return templateformatter(ui, out, topic, opts, spec)
# developer config: ui.formatdebug
elif ui.configbool(b'ui', b'formatdebug'):
return debugformatter(ui, out, topic, opts)
# deprecated config: ui.formatjson
elif ui.configbool(b'ui', b'formatjson'):
return jsonformatter(ui, out, topic, opts)
return plainformatter(ui, out, topic, opts)
@contextlib.contextmanager
def openformatter(ui, filename, topic, opts):
"""Create a formatter that writes outputs to the specified file
Must be invoked using the 'with' statement.
"""
with util.posixfile(filename, b'wb') as out:
with formatter(ui, out, topic, opts) as fm:
yield fm
@contextlib.contextmanager
def _neverending(fm):
yield fm
def maybereopen(fm, filename):
"""Create a formatter backed by file if filename specified, else return
the given formatter
Must be invoked using the 'with' statement. This will never call fm.end()
of the given formatter.
"""
if filename:
return openformatter(fm._ui, filename, fm._topic, fm._opts)
else:
return _neverending(fm)
```
#### File: mercurial/hgweb/__init__.py
```python
from __future__ import absolute_import
import os
from ..i18n import _
from .. import (
error,
pycompat,
)
from ..utils import procutil
from . import (
hgweb_mod,
hgwebdir_mod,
server,
)
def hgweb(config, name=None, baseui=None):
"""create an hgweb wsgi object
config can be one of:
- repo object (single repo view)
- path to repo (single repo view)
- path to config file (multi-repo view)
- dict of virtual:real pairs (multi-repo view)
- list of virtual:real tuples (multi-repo view)
"""
if isinstance(config, pycompat.unicode):
raise error.ProgrammingError(
b'Mercurial only supports encoded strings: %r' % config
)
if (
(isinstance(config, bytes) and not os.path.isdir(config))
or isinstance(config, dict)
or isinstance(config, list)
):
# create a multi-dir interface
return hgwebdir_mod.hgwebdir(config, baseui=baseui)
return hgweb_mod.hgweb(config, name=name, baseui=baseui)
def hgwebdir(config, baseui=None):
return hgwebdir_mod.hgwebdir(config, baseui=baseui)
class httpservice(object):
def __init__(self, ui, app, opts):
self.ui = ui
self.app = app
self.opts = opts
def init(self):
procutil.setsignalhandler()
self.httpd = server.create_server(self.ui, self.app)
if (
self.opts[b'port']
and not self.ui.verbose
and not self.opts[b'print_url']
):
return
if self.httpd.prefix:
prefix = self.httpd.prefix.strip(b'/') + b'/'
else:
prefix = b''
port = ':%d' % self.httpd.port
if port == ':80':
port = ''
bindaddr = self.httpd.addr
if bindaddr == '0.0.0.0':
bindaddr = '*'
elif ':' in bindaddr: # IPv6
bindaddr = '[%s]' % bindaddr
fqaddr = self.httpd.fqaddr
if ':' in fqaddr:
fqaddr = '[%s]' % fqaddr
url = b'http://%s%s/%s' % (
pycompat.sysbytes(fqaddr),
pycompat.sysbytes(port),
prefix,
)
if self.opts[b'print_url']:
self.ui.write(b'%s\n' % url)
else:
if self.opts[b'port']:
write = self.ui.status
else:
write = self.ui.write
write(
_(b'listening at %s (bound to %s:%d)\n')
% (url, pycompat.sysbytes(bindaddr), self.httpd.port)
)
self.ui.flush() # avoid buffering of status message
def run(self):
self.httpd.serve_forever()
def createapp(baseui, repo, webconf):
if webconf:
return hgwebdir_mod.hgwebdir(webconf, baseui=baseui)
else:
if not repo:
raise error.RepoError(
_(b"there is no Mercurial repository here (.hg not found)")
)
return hgweb_mod.hgweb(repo, baseui=baseui)
```
#### File: mercurial/hgweb/webutil.py
```python
from __future__ import absolute_import
import copy
import difflib
import os
import re
from ..i18n import _
from ..node import hex, nullid, short
from ..pycompat import setattr
from .common import (
ErrorResponse,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
paritygen,
)
from .. import (
context,
diffutil,
error,
match,
mdiff,
obsutil,
patch,
pathutil,
pycompat,
scmutil,
templatefilters,
templatekw,
templateutil,
ui as uimod,
util,
)
from ..utils import stringutil
archivespecs = util.sortdict(
(
(b'zip', (b'application/zip', b'zip', b'.zip', None)),
(b'gz', (b'application/x-gzip', b'tgz', b'.tar.gz', None)),
(b'bz2', (b'application/x-bzip2', b'tbz2', b'.tar.bz2', None)),
)
)
def archivelist(ui, nodeid, url=None):
allowed = ui.configlist(b'web', b'allow-archive', untrusted=True)
archives = []
for typ, spec in pycompat.iteritems(archivespecs):
if typ in allowed or ui.configbool(
b'web', b'allow' + typ, untrusted=True
):
archives.append(
{
b'type': typ,
b'extension': spec[2],
b'node': nodeid,
b'url': url,
}
)
return templateutil.mappinglist(archives)
def up(p):
if p[0:1] != b"/":
p = b"/" + p
if p[-1:] == b"/":
p = p[:-1]
up = os.path.dirname(p)
if up == b"/":
return b"/"
return up + b"/"
def _navseq(step, firststep=None):
if firststep:
yield firststep
if firststep >= 20 and firststep <= 40:
firststep = 50
yield firststep
assert step > 0
assert firststep > 0
while step <= firststep:
step *= 10
while True:
yield 1 * step
yield 3 * step
step *= 10
class revnav(object):
def __init__(self, repo):
"""Navigation generation object
:repo: repo object we generate nav for
"""
# used for hex generation
self._revlog = repo.changelog
def __nonzero__(self):
"""return True if any revision to navigate over"""
return self._first() is not None
__bool__ = __nonzero__
def _first(self):
"""return the minimum non-filtered changeset or None"""
try:
return next(iter(self._revlog))
except StopIteration:
return None
def hex(self, rev):
return hex(self._revlog.node(rev))
def gen(self, pos, pagelen, limit):
"""computes label and revision id for navigation link
:pos: is the revision relative to which we generate navigation.
:pagelen: the size of each navigation page
:limit: how far shall we link
The return is:
- a single element mappinglist
- containing a dictionary with a `before` and `after` key
- values are dictionaries with `label` and `node` keys
"""
if not self:
# empty repo
return templateutil.mappinglist(
[
{
b'before': templateutil.mappinglist([]),
b'after': templateutil.mappinglist([]),
},
]
)
targets = []
for f in _navseq(1, pagelen):
if f > limit:
break
targets.append(pos + f)
targets.append(pos - f)
targets.sort()
first = self._first()
navbefore = [{b'label': b'(%i)' % first, b'node': self.hex(first)}]
navafter = []
for rev in targets:
if rev not in self._revlog:
continue
if pos < rev < limit:
navafter.append(
{b'label': b'+%d' % abs(rev - pos), b'node': self.hex(rev)}
)
if 0 < rev < pos:
navbefore.append(
{b'label': b'-%d' % abs(rev - pos), b'node': self.hex(rev)}
)
navafter.append({b'label': b'tip', b'node': b'tip'})
# TODO: maybe this can be a scalar object supporting tomap()
return templateutil.mappinglist(
[
{
b'before': templateutil.mappinglist(navbefore),
b'after': templateutil.mappinglist(navafter),
},
]
)
class filerevnav(revnav):
def __init__(self, repo, path):
"""Navigation generation object
:repo: repo object we generate nav for
:path: path of the file we generate nav for
"""
# used for iteration
self._changelog = repo.unfiltered().changelog
# used for hex generation
self._revlog = repo.file(path)
def hex(self, rev):
return hex(self._changelog.node(self._revlog.linkrev(rev)))
# TODO: maybe this can be a wrapper class for changectx/filectx list, which
# yields {'ctx': ctx}
def _ctxsgen(context, ctxs):
for s in ctxs:
d = {
b'node': s.hex(),
b'rev': s.rev(),
b'user': s.user(),
b'date': s.date(),
b'description': s.description(),
b'branch': s.branch(),
}
if util.safehasattr(s, b'path'):
d[b'file'] = s.path()
yield d
def _siblings(siblings=None, hiderev=None):
if siblings is None:
siblings = []
siblings = [s for s in siblings if s.node() != nullid]
if len(siblings) == 1 and siblings[0].rev() == hiderev:
siblings = []
return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
def difffeatureopts(req, ui, section):
diffopts = diffutil.difffeatureopts(
ui, untrusted=True, section=section, whitespace=True
)
for k in (
b'ignorews',
b'ignorewsamount',
b'ignorewseol',
b'ignoreblanklines',
):
v = req.qsparams.get(k)
if v is not None:
v = stringutil.parsebool(v)
setattr(diffopts, k, v if v is not None else True)
return diffopts
def annotate(req, fctx, ui):
diffopts = difffeatureopts(req, ui, b'annotate')
return fctx.annotate(follow=True, diffopts=diffopts)
def parents(ctx, hide=None):
if isinstance(ctx, context.basefilectx):
introrev = ctx.introrev()
if ctx.changectx().rev() != introrev:
return _siblings([ctx.repo()[introrev]], hide)
return _siblings(ctx.parents(), hide)
def children(ctx, hide=None):
return _siblings(ctx.children(), hide)
def renamelink(fctx):
r = fctx.renamed()
if r:
return templateutil.mappinglist([{b'file': r[0], b'node': hex(r[1])}])
return templateutil.mappinglist([])
def nodetagsdict(repo, node):
return templateutil.hybridlist(repo.nodetags(node), name=b'name')
def nodebookmarksdict(repo, node):
return templateutil.hybridlist(repo.nodebookmarks(node), name=b'name')
def nodebranchdict(repo, ctx):
branches = []
branch = ctx.branch()
# If this is an empty repo, ctx.node() == nullid,
# ctx.branch() == 'default'.
try:
branchnode = repo.branchtip(branch)
except error.RepoLookupError:
branchnode = None
if branchnode == ctx.node():
branches.append(branch)
return templateutil.hybridlist(branches, name=b'name')
def nodeinbranch(repo, ctx):
branches = []
branch = ctx.branch()
try:
branchnode = repo.branchtip(branch)
except error.RepoLookupError:
branchnode = None
if branch != b'default' and branchnode != ctx.node():
branches.append(branch)
return templateutil.hybridlist(branches, name=b'name')
def nodebranchnodefault(ctx):
branches = []
branch = ctx.branch()
if branch != b'default':
branches.append(branch)
return templateutil.hybridlist(branches, name=b'name')
def _nodenamesgen(context, f, node, name):
for t in f(node):
yield {name: t}
def showtag(repo, t1, node=nullid):
args = (repo.nodetags, node, b'tag')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
def showbookmark(repo, t1, node=nullid):
args = (repo.nodebookmarks, node, b'bookmark')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
def branchentries(repo, stripecount, limit=0):
tips = []
heads = repo.heads()
parity = paritygen(stripecount)
sortkey = lambda item: (not item[1], item[0].rev())
def entries(context):
count = 0
if not tips:
for tag, hs, tip, closed in repo.branchmap().iterbranches():
tips.append((repo[tip], closed))
for ctx, closed in sorted(tips, key=sortkey, reverse=True):
if limit > 0 and count >= limit:
return
count += 1
if closed:
status = b'closed'
elif ctx.node() not in heads:
status = b'inactive'
else:
status = b'open'
yield {
b'parity': next(parity),
b'branch': ctx.branch(),
b'status': status,
b'node': ctx.hex(),
b'date': ctx.date(),
}
return templateutil.mappinggenerator(entries)
def cleanpath(repo, path):
path = path.lstrip(b'/')
auditor = pathutil.pathauditor(repo.root, realfs=False)
return pathutil.canonpath(repo.root, b'', path, auditor=auditor)
def changectx(repo, req):
changeid = b"tip"
if b'node' in req.qsparams:
changeid = req.qsparams[b'node']
ipos = changeid.find(b':')
if ipos != -1:
changeid = changeid[(ipos + 1) :]
return scmutil.revsymbol(repo, changeid)
def basechangectx(repo, req):
if b'node' in req.qsparams:
changeid = req.qsparams[b'node']
ipos = changeid.find(b':')
if ipos != -1:
changeid = changeid[:ipos]
return scmutil.revsymbol(repo, changeid)
return None
def filectx(repo, req):
if b'file' not in req.qsparams:
raise ErrorResponse(HTTP_NOT_FOUND, b'file not given')
path = cleanpath(repo, req.qsparams[b'file'])
if b'node' in req.qsparams:
changeid = req.qsparams[b'node']
elif b'filenode' in req.qsparams:
changeid = req.qsparams[b'filenode']
else:
raise ErrorResponse(HTTP_NOT_FOUND, b'node or filenode not given')
try:
fctx = scmutil.revsymbol(repo, changeid)[path]
except error.RepoError:
fctx = repo.filectx(path, fileid=changeid)
return fctx
def linerange(req):
linerange = req.qsparams.getall(b'linerange')
if not linerange:
return None
if len(linerange) > 1:
raise ErrorResponse(HTTP_BAD_REQUEST, b'redundant linerange parameter')
try:
fromline, toline = map(int, linerange[0].split(b':', 1))
except ValueError:
raise ErrorResponse(HTTP_BAD_REQUEST, b'invalid linerange parameter')
try:
return util.processlinerange(fromline, toline)
except error.ParseError as exc:
raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
def formatlinerange(fromline, toline):
return b'%d:%d' % (fromline + 1, toline)
def _succsandmarkersgen(context, mapping):
repo = context.resource(mapping, b'repo')
itemmappings = templatekw.showsuccsandmarkers(context, mapping)
for item in itemmappings.tovalue(context, mapping):
item[b'successors'] = _siblings(
repo[successor] for successor in item[b'successors']
)
yield item
def succsandmarkers(context, mapping):
return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,))
# teach templater succsandmarkers is switched to (context, mapping) API
succsandmarkers._requires = {b'repo', b'ctx'}
def _whyunstablegen(context, mapping):
repo = context.resource(mapping, b'repo')
ctx = context.resource(mapping, b'ctx')
entries = obsutil.whyunstable(repo, ctx)
for entry in entries:
if entry.get(b'divergentnodes'):
entry[b'divergentnodes'] = _siblings(entry[b'divergentnodes'])
yield entry
def whyunstable(context, mapping):
return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,))
whyunstable._requires = {b'repo', b'ctx'}
def commonentry(repo, ctx):
node = scmutil.binnode(ctx)
return {
# TODO: perhaps ctx.changectx() should be assigned if ctx is a
# filectx, but I'm not pretty sure if that would always work because
# fctx.parents() != fctx.changectx.parents() for example.
b'ctx': ctx,
b'rev': ctx.rev(),
b'node': hex(node),
b'author': ctx.user(),
b'desc': ctx.description(),
b'date': ctx.date(),
b'extra': ctx.extra(),
b'phase': ctx.phasestr(),
b'obsolete': ctx.obsolete(),
b'succsandmarkers': succsandmarkers,
b'instabilities': templateutil.hybridlist(
ctx.instabilities(), name=b'instability'
),
b'whyunstable': whyunstable,
b'branch': nodebranchnodefault(ctx),
b'inbranch': nodeinbranch(repo, ctx),
b'branches': nodebranchdict(repo, ctx),
b'tags': nodetagsdict(repo, node),
b'bookmarks': nodebookmarksdict(repo, node),
b'parent': lambda context, mapping: parents(ctx),
b'child': lambda context, mapping: children(ctx),
}
def changelistentry(web, ctx):
"""Obtain a dictionary to be used for entries in a changelist.
This function is called when producing items for the "entries" list passed
to the "shortlog" and "changelog" templates.
"""
repo = web.repo
rev = ctx.rev()
n = scmutil.binnode(ctx)
showtags = showtag(repo, b'changelogtag', n)
files = listfilediffs(ctx.files(), n, web.maxfiles)
entry = commonentry(repo, ctx)
entry.update(
{
b'allparents': lambda context, mapping: parents(ctx),
b'parent': lambda context, mapping: parents(ctx, rev - 1),
b'child': lambda context, mapping: children(ctx, rev + 1),
b'changelogtag': showtags,
b'files': files,
}
)
return entry
def changelistentries(web, revs, maxcount, parityfn):
"""Emit up to N records for an iterable of revisions."""
repo = web.repo
count = 0
for rev in revs:
if count >= maxcount:
break
count += 1
entry = changelistentry(web, repo[rev])
entry[b'parity'] = next(parityfn)
yield entry
def symrevorshortnode(req, ctx):
if b'node' in req.qsparams:
return templatefilters.revescape(req.qsparams[b'node'])
else:
return short(scmutil.binnode(ctx))
def _listfilesgen(context, ctx, stripecount):
parity = paritygen(stripecount)
filesadded = ctx.filesadded()
for blockno, f in enumerate(ctx.files()):
if f not in ctx:
status = b'removed'
elif f in filesadded:
status = b'added'
else:
status = b'modified'
template = b'filenolink' if status == b'removed' else b'filenodelink'
yield context.process(
template,
{
b'node': ctx.hex(),
b'file': f,
b'blockno': blockno + 1,
b'parity': next(parity),
b'status': status,
},
)
def changesetentry(web, ctx):
'''Obtain a dictionary to be used to render the "changeset" template.'''
showtags = showtag(web.repo, b'changesettag', scmutil.binnode(ctx))
showbookmarks = showbookmark(
web.repo, b'changesetbookmark', scmutil.binnode(ctx)
)
showbranch = nodebranchnodefault(ctx)
basectx = basechangectx(web.repo, web.req)
if basectx is None:
basectx = ctx.p1()
style = web.config(b'web', b'style')
if b'style' in web.req.qsparams:
style = web.req.qsparams[b'style']
diff = diffs(web, ctx, basectx, None, style)
parity = paritygen(web.stripecount)
diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx)
diffstats = diffstat(ctx, diffstatsgen, parity)
return dict(
diff=diff,
symrev=symrevorshortnode(web.req, ctx),
basenode=basectx.hex(),
changesettag=showtags,
changesetbookmark=showbookmarks,
changesetbranch=showbranch,
files=templateutil.mappedgenerator(
_listfilesgen, args=(ctx, web.stripecount)
),
diffsummary=lambda context, mapping: diffsummary(diffstatsgen),
diffstat=diffstats,
archives=web.archivelist(ctx.hex()),
**pycompat.strkwargs(commonentry(web.repo, ctx))
)
def _listfilediffsgen(context, files, node, max):
for f in files[:max]:
yield context.process(b'filedifflink', {b'node': hex(node), b'file': f})
if len(files) > max:
yield context.process(b'fileellipses', {})
def listfilediffs(files, node, max):
return templateutil.mappedgenerator(
_listfilediffsgen, args=(files, node, max)
)
def _prettyprintdifflines(context, lines, blockno, lineidprefix):
for lineno, l in enumerate(lines, 1):
difflineno = b"%d.%d" % (blockno, lineno)
if l.startswith(b'+'):
ltype = b"difflineplus"
elif l.startswith(b'-'):
ltype = b"difflineminus"
elif l.startswith(b'@'):
ltype = b"difflineat"
else:
ltype = b"diffline"
yield context.process(
ltype,
{
b'line': l,
b'lineno': lineno,
b'lineid': lineidprefix + b"l%s" % difflineno,
b'linenumber': b"% 8s" % difflineno,
},
)
def _diffsgen(
context,
repo,
ctx,
basectx,
files,
style,
stripecount,
linerange,
lineidprefix,
):
if files:
m = match.exact(files)
else:
m = match.always()
diffopts = patch.diffopts(repo.ui, untrusted=True)
parity = paritygen(stripecount)
diffhunks = patch.diffhunks(repo, basectx, ctx, m, opts=diffopts)
for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
if style != b'raw':
header = header[1:]
lines = [h + b'\n' for h in header]
for hunkrange, hunklines in hunks:
if linerange is not None and hunkrange is not None:
s1, l1, s2, l2 = hunkrange
if not mdiff.hunkinrange((s2, l2), linerange):
continue
lines.extend(hunklines)
if lines:
l = templateutil.mappedgenerator(
_prettyprintdifflines, args=(lines, blockno, lineidprefix)
)
yield {
b'parity': next(parity),
b'blockno': blockno,
b'lines': l,
}
def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=b''):
args = (
web.repo,
ctx,
basectx,
files,
style,
web.stripecount,
linerange,
lineidprefix,
)
return templateutil.mappinggenerator(
_diffsgen, args=args, name=b'diffblock'
)
def _compline(type, leftlineno, leftline, rightlineno, rightline):
lineid = leftlineno and (b"l%d" % leftlineno) or b''
lineid += rightlineno and (b"r%d" % rightlineno) or b''
llno = b'%d' % leftlineno if leftlineno else b''
rlno = b'%d' % rightlineno if rightlineno else b''
return {
b'type': type,
b'lineid': lineid,
b'leftlineno': leftlineno,
b'leftlinenumber': b"% 6s" % llno,
b'leftline': leftline or b'',
b'rightlineno': rightlineno,
b'rightlinenumber': b"% 6s" % rlno,
b'rightline': rightline or b'',
}
def _getcompblockgen(context, leftlines, rightlines, opcodes):
for type, llo, lhi, rlo, rhi in opcodes:
type = pycompat.sysbytes(type)
len1 = lhi - llo
len2 = rhi - rlo
count = min(len1, len2)
for i in pycompat.xrange(count):
yield _compline(
type=type,
leftlineno=llo + i + 1,
leftline=leftlines[llo + i],
rightlineno=rlo + i + 1,
rightline=rightlines[rlo + i],
)
if len1 > len2:
for i in pycompat.xrange(llo + count, lhi):
yield _compline(
type=type,
leftlineno=i + 1,
leftline=leftlines[i],
rightlineno=None,
rightline=None,
)
elif len2 > len1:
for i in pycompat.xrange(rlo + count, rhi):
yield _compline(
type=type,
leftlineno=None,
leftline=None,
rightlineno=i + 1,
rightline=rightlines[i],
)
def _getcompblock(leftlines, rightlines, opcodes):
args = (leftlines, rightlines, opcodes)
return templateutil.mappinggenerator(
_getcompblockgen, args=args, name=b'comparisonline'
)
def _comparegen(context, contextnum, leftlines, rightlines):
'''Generator function that provides side-by-side comparison data.'''
s = difflib.SequenceMatcher(None, leftlines, rightlines)
if contextnum < 0:
l = _getcompblock(leftlines, rightlines, s.get_opcodes())
yield {b'lines': l}
else:
for oc in s.get_grouped_opcodes(n=contextnum):
l = _getcompblock(leftlines, rightlines, oc)
yield {b'lines': l}
def compare(contextnum, leftlines, rightlines):
args = (contextnum, leftlines, rightlines)
return templateutil.mappinggenerator(
_comparegen, args=args, name=b'comparisonblock'
)
def diffstatgen(ui, ctx, basectx):
'''Generator function that provides the diffstat data.'''
diffopts = patch.diffopts(ui, {b'noprefix': False})
stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx, opts=diffopts)))
maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
while True:
yield stats, maxname, maxtotal, addtotal, removetotal, binary
def diffsummary(statgen):
'''Return a short summary of the diff.'''
stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
return _(b' %d files changed, %d insertions(+), %d deletions(-)\n') % (
len(stats),
addtotal,
removetotal,
)
def _diffstattmplgen(context, ctx, statgen, parity):
stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
files = ctx.files()
def pct(i):
if maxtotal == 0:
return 0
return (float(i) / maxtotal) * 100
fileno = 0
for filename, adds, removes, isbinary in stats:
template = b'diffstatlink' if filename in files else b'diffstatnolink'
total = adds + removes
fileno += 1
yield context.process(
template,
{
b'node': ctx.hex(),
b'file': filename,
b'fileno': fileno,
b'total': total,
b'addpct': pct(adds),
b'removepct': pct(removes),
b'parity': next(parity),
},
)
def diffstat(ctx, statgen, parity):
'''Return a diffstat template for each file in the diff.'''
args = (ctx, statgen, parity)
return templateutil.mappedgenerator(_diffstattmplgen, args=args)
class sessionvars(templateutil.wrapped):
def __init__(self, vars, start=b'?'):
self._start = start
self._vars = vars
def __getitem__(self, key):
return self._vars[key]
def __setitem__(self, key, value):
self._vars[key] = value
def __copy__(self):
return sessionvars(copy.copy(self._vars), self._start)
def contains(self, context, mapping, item):
item = templateutil.unwrapvalue(context, mapping, item)
return item in self._vars
def getmember(self, context, mapping, key):
key = templateutil.unwrapvalue(context, mapping, key)
return self._vars.get(key)
def getmin(self, context, mapping):
raise error.ParseError(_(b'not comparable'))
def getmax(self, context, mapping):
raise error.ParseError(_(b'not comparable'))
def filter(self, context, mapping, select):
# implement if necessary
raise error.ParseError(_(b'not filterable'))
def itermaps(self, context):
separator = self._start
for key, value in sorted(pycompat.iteritems(self._vars)):
yield {
b'name': key,
b'value': pycompat.bytestr(value),
b'separator': separator,
}
separator = b'&'
def join(self, context, mapping, sep):
# could be '{separator}{name}={value|urlescape}'
raise error.ParseError(_(b'not displayable without template'))
def show(self, context, mapping):
return self.join(context, mapping, b'')
def tobool(self, context, mapping):
return bool(self._vars)
def tovalue(self, context, mapping):
return self._vars
class wsgiui(uimod.ui):
# default termwidth breaks under mod_wsgi
def termwidth(self):
return 80
def getwebsubs(repo):
websubtable = []
websubdefs = repo.ui.configitems(b'websub')
# we must maintain interhg backwards compatibility
websubdefs += repo.ui.configitems(b'interhg')
for key, pattern in websubdefs:
# grab the delimiter from the character after the "s"
unesc = pattern[1:2]
delim = stringutil.reescape(unesc)
# identify portions of the pattern, taking care to avoid escaped
# delimiters. the replace format and flags are optional, but
# delimiters are required.
match = re.match(
br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
% (delim, delim, delim),
pattern,
)
if not match:
repo.ui.warn(
_(b"websub: invalid pattern for %s: %s\n") % (key, pattern)
)
continue
# we need to unescape the delimiter for regexp and format
delim_re = re.compile(br'(?<!\\)\\%s' % delim)
regexp = delim_re.sub(unesc, match.group(1))
format = delim_re.sub(unesc, match.group(2))
# the pattern allows for 6 regexp flags, so set them if necessary
flagin = match.group(3)
flags = 0
if flagin:
for flag in pycompat.sysstr(flagin.upper()):
flags |= re.__dict__[flag]
try:
regexp = re.compile(regexp, flags)
websubtable.append((regexp, format))
except re.error:
repo.ui.warn(
_(b"websub: invalid regexp for %s: %s\n") % (key, regexp)
)
return websubtable
def getgraphnode(repo, ctx):
return templatekw.getgraphnodecurrent(
repo, ctx, {}
) + templatekw.getgraphnodesymbol(ctx)
```
#### File: site-packages/mercurial/manifest.py
```python
from __future__ import absolute_import
import heapq
import itertools
import struct
import weakref
from .i18n import _
from .node import (
bin,
hex,
nullid,
nullrev,
)
from .pycompat import getattr
from . import (
encoding,
error,
match as matchmod,
mdiff,
pathutil,
policy,
pycompat,
revlog,
util,
)
from .interfaces import (
repository,
util as interfaceutil,
)
parsers = policy.importmod('parsers')
propertycache = util.propertycache
# Allow tests to more easily test the alternate path in manifestdict.fastdelta()
FASTDELTA_TEXTDIFF_THRESHOLD = 1000
def _parse(data):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
# prevent surprise breakage for anyone that develops against
# the pure version.
if data and data[-1:] != b'\n':
raise ValueError(b'Manifest did not end in a newline.')
prev = None
for l in data.splitlines():
if prev is not None and prev > l:
raise ValueError(b'Manifest lines not in sorted order.')
prev = l
f, n = l.split(b'\0')
nl = len(n)
flags = n[-1:]
if flags in _manifestflags:
n = n[:-1]
nl -= 1
else:
flags = b''
if nl not in (40, 64):
raise ValueError(b'Invalid manifest line')
yield f, bin(n), flags
def _text(it):
files = []
lines = []
for f, n, fl in it:
files.append(f)
# if this is changed to support newlines in filenames,
# be sure to check the templates/ dir again (especially *-raw.tmpl)
lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
_checkforbidden(files)
return b''.join(lines)
class lazymanifestiter(object):
def __init__(self, lm):
self.pos = 0
self.lm = lm
def __iter__(self):
return self
def next(self):
try:
data, pos = self.lm._get(self.pos)
except IndexError:
raise StopIteration
if pos == -1:
self.pos += 1
return data[0]
self.pos += 1
zeropos = data.find(b'\x00', pos)
return data[pos:zeropos]
__next__ = next
class lazymanifestiterentries(object):
def __init__(self, lm):
self.lm = lm
self.pos = 0
def __iter__(self):
return self
def next(self):
try:
data, pos = self.lm._get(self.pos)
except IndexError:
raise StopIteration
if pos == -1:
self.pos += 1
return data
zeropos = data.find(b'\x00', pos)
nlpos = data.find(b'\n', pos)
if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
raise error.StorageError(b'Invalid manifest line')
flags = data[nlpos - 1 : nlpos]
if flags in _manifestflags:
hlen = nlpos - zeropos - 2
else:
hlen = nlpos - zeropos - 1
flags = b''
if hlen not in (40, 64):
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(
data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
)
self.pos += 1
return (data[pos:zeropos], hashval, flags)
__next__ = next
def unhexlify(data, extra, pos, length):
s = bin(data[pos : pos + length])
if extra:
s += chr(extra & 0xFF)
return s
def _cmp(a, b):
return (a > b) - (a < b)
_manifestflags = {b'', b'l', b't', b'x'}
class _lazymanifest(object):
"""A pure python manifest backed by a byte string. It is supplimented with
internal lists as it is modified, until it is compacted back to a pure byte
string.
``data`` is the initial manifest data.
``positions`` is a list of offsets, one per manifest entry. Positive
values are offsets into ``data``, negative values are offsets into the
``extradata`` list. When an entry is removed, its entry is dropped from
``positions``. The values are encoded such that when walking the list and
indexing into ``data`` or ``extradata`` as appropriate, the entries are
sorted by filename.
``extradata`` is a list of (key, hash, flags) for entries that were added or
modified since the manifest was created or compacted.
"""
def __init__(
self,
data,
positions=None,
extrainfo=None,
extradata=None,
hasremovals=False,
):
if positions is None:
self.positions = self.findlines(data)
self.extrainfo = [0] * len(self.positions)
self.data = data
self.extradata = []
self.hasremovals = False
else:
self.positions = positions[:]
self.extrainfo = extrainfo[:]
self.extradata = extradata[:]
self.data = data
self.hasremovals = hasremovals
def findlines(self, data):
if not data:
return []
pos = data.find(b"\n")
if pos == -1 or data[-1:] != b'\n':
raise ValueError(b"Manifest did not end in a newline.")
positions = [0]
prev = data[: data.find(b'\x00')]
while pos < len(data) - 1 and pos != -1:
positions.append(pos + 1)
nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
if nexts < prev:
raise ValueError(b"Manifest lines not in sorted order.")
prev = nexts
pos = data.find(b"\n", pos + 1)
return positions
def _get(self, index):
# get the position encoded in pos:
# positive number is an index in 'data'
# negative number is in extrapieces
pos = self.positions[index]
if pos >= 0:
return self.data, pos
return self.extradata[-pos - 1], -1
def _getkey(self, pos):
if pos >= 0:
return self.data[pos : self.data.find(b'\x00', pos + 1)]
return self.extradata[-pos - 1][0]
def bsearch(self, key):
first = 0
last = len(self.positions) - 1
while first <= last:
midpoint = (first + last) // 2
nextpos = self.positions[midpoint]
candidate = self._getkey(nextpos)
r = _cmp(key, candidate)
if r == 0:
return midpoint
else:
if r < 0:
last = midpoint - 1
else:
first = midpoint + 1
return -1
def bsearch2(self, key):
# same as the above, but will always return the position
# done for performance reasons
first = 0
last = len(self.positions) - 1
while first <= last:
midpoint = (first + last) // 2
nextpos = self.positions[midpoint]
candidate = self._getkey(nextpos)
r = _cmp(key, candidate)
if r == 0:
return (midpoint, True)
else:
if r < 0:
last = midpoint - 1
else:
first = midpoint + 1
return (first, False)
def __contains__(self, key):
return self.bsearch(key) != -1
def __getitem__(self, key):
if not isinstance(key, bytes):
raise TypeError(b"getitem: manifest keys must be a bytes.")
needle = self.bsearch(key)
if needle == -1:
raise KeyError
data, pos = self._get(needle)
if pos == -1:
return (data[1], data[2])
zeropos = data.find(b'\x00', pos)
nlpos = data.find(b'\n', zeropos)
assert 0 <= needle <= len(self.positions)
assert len(self.extrainfo) == len(self.positions)
if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
raise error.StorageError(b'Invalid manifest line')
hlen = nlpos - zeropos - 1
flags = data[nlpos - 1 : nlpos]
if flags in _manifestflags:
hlen -= 1
else:
flags = b''
if hlen not in (40, 64):
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
return (hashval, flags)
def __delitem__(self, key):
needle, found = self.bsearch2(key)
if not found:
raise KeyError
cur = self.positions[needle]
self.positions = self.positions[:needle] + self.positions[needle + 1 :]
self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
if cur >= 0:
# This does NOT unsort the list as far as the search functions are
# concerned, as they only examine lines mapped by self.positions.
self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
self.hasremovals = True
def __setitem__(self, key, value):
if not isinstance(key, bytes):
raise TypeError(b"setitem: manifest keys must be a byte string.")
if not isinstance(value, tuple) or len(value) != 2:
raise TypeError(
b"Manifest values must be a tuple of (node, flags)."
)
hashval = value[0]
if not isinstance(hashval, bytes) or len(hashval) not in (20, 32):
raise TypeError(b"node must be a 20-byte or 32-byte byte string")
flags = value[1]
if not isinstance(flags, bytes) or len(flags) > 1:
raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
needle, found = self.bsearch2(key)
if found:
# put the item
pos = self.positions[needle]
if pos < 0:
self.extradata[-pos - 1] = (key, hashval, value[1])
else:
# just don't bother
self.extradata.append((key, hashval, value[1]))
self.positions[needle] = -len(self.extradata)
else:
# not found, put it in with extra positions
self.extradata.append((key, hashval, value[1]))
self.positions = (
self.positions[:needle]
+ [-len(self.extradata)]
+ self.positions[needle:]
)
self.extrainfo = (
self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
)
def copy(self):
# XXX call _compact like in C?
return _lazymanifest(
self.data,
self.positions,
self.extrainfo,
self.extradata,
self.hasremovals,
)
def _compact(self):
# hopefully not called TOO often
if len(self.extradata) == 0 and not self.hasremovals:
return
l = []
i = 0
offset = 0
self.extrainfo = [0] * len(self.positions)
while i < len(self.positions):
if self.positions[i] >= 0:
cur = self.positions[i]
last_cut = cur
# Collect all contiguous entries in the buffer at the current
# offset, breaking out only for added/modified items held in
# extradata, or a deleted line prior to the next position.
while True:
self.positions[i] = offset
i += 1
if i == len(self.positions) or self.positions[i] < 0:
break
# A removed file has no positions[] entry, but does have an
# overwritten first byte. Break out and find the end of the
# current good entry/entries if there is a removed file
# before the next position.
if (
self.hasremovals
and self.data.find(b'\n\x00', cur, self.positions[i])
!= -1
):
break
offset += self.positions[i] - cur
cur = self.positions[i]
end_cut = self.data.find(b'\n', cur)
if end_cut != -1:
end_cut += 1
offset += end_cut - cur
l.append(self.data[last_cut:end_cut])
else:
while i < len(self.positions) and self.positions[i] < 0:
cur = self.positions[i]
t = self.extradata[-cur - 1]
l.append(self._pack(t))
self.positions[i] = offset
# Hashes are either 20 bytes (old sha1s) or 32
# bytes (new non-sha1).
hlen = 20
if len(t[1]) > 25:
hlen = 32
if len(t[1]) > hlen:
self.extrainfo[i] = ord(t[1][hlen + 1])
offset += len(l[-1])
i += 1
self.data = b''.join(l)
self.hasremovals = False
self.extradata = []
def _pack(self, d):
n = d[1]
assert len(n) in (20, 32)
return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
def text(self):
self._compact()
return self.data
def diff(self, m2, clean=False):
'''Finds changes between the current manifest and m2.'''
# XXX think whether efficiency matters here
diff = {}
for fn, e1, flags in self.iterentries():
if fn not in m2:
diff[fn] = (e1, flags), (None, b'')
else:
e2 = m2[fn]
if (e1, flags) != e2:
diff[fn] = (e1, flags), e2
elif clean:
diff[fn] = None
for fn, e2, flags in m2.iterentries():
if fn not in self:
diff[fn] = (None, b''), (e2, flags)
return diff
def iterentries(self):
return lazymanifestiterentries(self)
def iterkeys(self):
return lazymanifestiter(self)
def __iter__(self):
return lazymanifestiter(self)
def __len__(self):
return len(self.positions)
def filtercopy(self, filterfn):
# XXX should be optimized
c = _lazymanifest(b'')
for f, n, fl in self.iterentries():
if filterfn(f):
c[f] = n, fl
return c
try:
_lazymanifest = parsers.lazymanifest
except AttributeError:
pass
@interfaceutil.implementer(repository.imanifestdict)
class manifestdict(object):
def __init__(self, data=b''):
self._lm = _lazymanifest(data)
def __getitem__(self, key):
return self._lm[key][0]
def find(self, key):
return self._lm[key]
def __len__(self):
return len(self._lm)
def __nonzero__(self):
# nonzero is covered by the __len__ function, but implementing it here
# makes it easier for extensions to override.
return len(self._lm) != 0
__bool__ = __nonzero__
def __setitem__(self, key, node):
self._lm[key] = node, self.flags(key)
def __contains__(self, key):
if key is None:
return False
return key in self._lm
def __delitem__(self, key):
del self._lm[key]
def __iter__(self):
return self._lm.__iter__()
def iterkeys(self):
return self._lm.iterkeys()
def keys(self):
return list(self.iterkeys())
def filesnotin(self, m2, match=None):
'''Set of files in this manifest that are not in the other'''
if match is not None:
match = matchmod.badmatch(match, lambda path, msg: None)
sm2 = set(m2.walk(match))
return {f for f in self.walk(match) if f not in sm2}
return {f for f in self if f not in m2}
@propertycache
def _dirs(self):
return pathutil.dirs(self)
def dirs(self):
return self._dirs
def hasdir(self, dir):
return dir in self._dirs
def _filesfastpath(self, match):
"""Checks whether we can correctly and quickly iterate over matcher
files instead of over manifest files."""
files = match.files()
return len(files) < 100 and (
match.isexact()
or (match.prefix() and all(fn in self for fn in files))
)
def walk(self, match):
"""Generates matching file names.
Equivalent to manifest.matches(match).iterkeys(), but without creating
an entirely new manifest.
It also reports nonexistent files by marking them bad with match.bad().
"""
if match.always():
for f in iter(self):
yield f
return
fset = set(match.files())
# avoid the entire walk if we're only looking for specific files
if self._filesfastpath(match):
for fn in sorted(fset):
if fn in self:
yield fn
return
for fn in self:
if fn in fset:
# specified pattern is the exact name
fset.remove(fn)
if match(fn):
yield fn
# for dirstate.walk, files=[''] means "walk the whole tree".
# follow that here, too
fset.discard(b'')
for fn in sorted(fset):
if not self.hasdir(fn):
match.bad(fn, None)
def _matches(self, match):
'''generate a new manifest filtered by the match argument'''
if match.always():
return self.copy()
if self._filesfastpath(match):
m = manifestdict()
lm = self._lm
for fn in match.files():
if fn in lm:
m._lm[fn] = lm[fn]
return m
m = manifestdict()
m._lm = self._lm.filtercopy(match)
return m
def diff(self, m2, match=None, clean=False):
"""Finds changes between the current manifest and m2.
Args:
m2: the manifest to which this manifest should be compared.
clean: if true, include files unchanged between these manifests
with a None value in the returned dictionary.
The result is returned as a dict with filename as key and
values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
nodeid in the current/other manifest and fl1/fl2 is the flag
in the current/other manifest. Where the file does not exist,
the nodeid will be None and the flags will be the empty
string.
"""
if match:
m1 = self._matches(match)
m2 = m2._matches(match)
return m1.diff(m2, clean=clean)
return self._lm.diff(m2._lm, clean)
def setflag(self, key, flag):
if flag not in _manifestflags:
raise TypeError(b"Invalid manifest flag set.")
self._lm[key] = self[key], flag
def get(self, key, default=None):
try:
return self._lm[key][0]
except KeyError:
return default
def flags(self, key):
try:
return self._lm[key][1]
except KeyError:
return b''
def copy(self):
c = manifestdict()
c._lm = self._lm.copy()
return c
def items(self):
return (x[:2] for x in self._lm.iterentries())
def iteritems(self):
return (x[:2] for x in self._lm.iterentries())
def iterentries(self):
return self._lm.iterentries()
def text(self):
# most likely uses native version
return self._lm.text()
def fastdelta(self, base, changes):
"""Given a base manifest text as a bytearray and a list of changes
relative to that text, compute a delta that can be used by revlog.
"""
delta = []
dstart = None
dend = None
dline = [b""]
start = 0
# zero copy representation of base as a buffer
addbuf = util.buffer(base)
changes = list(changes)
if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
# start with a readonly loop that finds the offset of
# each line and creates the deltas
for f, todelete in changes:
# bs will either be the index of the item or the insert point
start, end = _msearch(addbuf, f, start)
if not todelete:
h, fl = self._lm[f]
l = b"%s\0%s%s\n" % (f, hex(h), fl)
else:
if start == end:
# item we want to delete was not found, error out
raise AssertionError(
_(b"failed to remove %s from manifest") % f
)
l = b""
if dstart is not None and dstart <= start and dend >= start:
if dend < end:
dend = end
if l:
dline.append(l)
else:
if dstart is not None:
delta.append([dstart, dend, b"".join(dline)])
dstart = start
dend = end
dline = [l]
if dstart is not None:
delta.append([dstart, dend, b"".join(dline)])
# apply the delta to the base, and get a delta for addrevision
deltatext, arraytext = _addlistdelta(base, delta)
else:
# For large changes, it's much cheaper to just build the text and
# diff it.
arraytext = bytearray(self.text())
deltatext = mdiff.textdiff(
util.buffer(base), util.buffer(arraytext)
)
return arraytext, deltatext
def _msearch(m, s, lo=0, hi=None):
"""return a tuple (start, end) that says where to find s within m.
If the string is found m[start:end] are the line containing
that string. If start == end the string was not found and
they indicate the proper sorted insertion point.
m should be a buffer, a memoryview or a byte string.
s is a byte string"""
def advance(i, c):
while i < lenm and m[i : i + 1] != c:
i += 1
return i
if not s:
return (lo, lo)
lenm = len(m)
if not hi:
hi = lenm
while lo < hi:
mid = (lo + hi) // 2
start = mid
while start > 0 and m[start - 1 : start] != b'\n':
start -= 1
end = advance(start, b'\0')
if bytes(m[start:end]) < s:
# we know that after the null there are 40 bytes of sha1
# this translates to the bisect lo = mid + 1
lo = advance(end + 40, b'\n') + 1
else:
# this translates to the bisect hi = mid
hi = start
end = advance(lo, b'\0')
found = m[lo:end]
if s == found:
# we know that after the null there are 40 bytes of sha1
end = advance(end + 40, b'\n')
return (lo, end + 1)
else:
return (lo, lo)
def _checkforbidden(l):
"""Check filenames for illegal characters."""
for f in l:
if b'\n' in f or b'\r' in f:
raise error.StorageError(
_(b"'\\n' and '\\r' disallowed in filenames: %r")
% pycompat.bytestr(f)
)
# apply the changes collected during the bisect loop to our addlist
# return a delta suitable for addrevision
def _addlistdelta(addlist, x):
# for large addlist arrays, building a new array is cheaper
# than repeatedly modifying the existing one
currentposition = 0
newaddlist = bytearray()
for start, end, content in x:
newaddlist += addlist[currentposition:start]
if content:
newaddlist += bytearray(content)
currentposition = end
newaddlist += addlist[currentposition:]
deltatext = b"".join(
struct.pack(b">lll", start, end, len(content)) + content
for start, end, content in x
)
return deltatext, newaddlist
def _splittopdir(f):
if b'/' in f:
dir, subpath = f.split(b'/', 1)
return dir + b'/', subpath
else:
return b'', f
_noop = lambda s: None
@interfaceutil.implementer(repository.imanifestdict)
class treemanifest(object):
def __init__(self, dir=b'', text=b''):
self._dir = dir
self._node = nullid
self._loadfunc = _noop
self._copyfunc = _noop
self._dirty = False
self._dirs = {}
self._lazydirs = {}
# Using _lazymanifest here is a little slower than plain old dicts
self._files = {}
self._flags = {}
if text:
def readsubtree(subdir, subm):
raise AssertionError(
b'treemanifest constructor only accepts flat manifests'
)
self.parse(text, readsubtree)
self._dirty = True # Mark flat manifest dirty after parsing
def _subpath(self, path):
return self._dir + path
def _loadalllazy(self):
selfdirs = self._dirs
subpath = self._subpath
for d, (node, readsubtree, docopy) in pycompat.iteritems(
self._lazydirs
):
if docopy:
selfdirs[d] = readsubtree(subpath(d), node).copy()
else:
selfdirs[d] = readsubtree(subpath(d), node)
self._lazydirs = {}
def _loadlazy(self, d):
v = self._lazydirs.get(d)
if v:
node, readsubtree, docopy = v
if docopy:
self._dirs[d] = readsubtree(self._subpath(d), node).copy()
else:
self._dirs[d] = readsubtree(self._subpath(d), node)
del self._lazydirs[d]
def _loadchildrensetlazy(self, visit):
if not visit:
return None
if visit == b'all' or visit == b'this':
self._loadalllazy()
return None
loadlazy = self._loadlazy
for k in visit:
loadlazy(k + b'/')
return visit
def _loaddifflazy(self, t1, t2):
"""load items in t1 and t2 if they're needed for diffing.
The criteria currently is:
- if it's not present in _lazydirs in either t1 or t2, load it in the
other (it may already be loaded or it may not exist, doesn't matter)
- if it's present in _lazydirs in both, compare the nodeid; if it
differs, load it in both
"""
toloadlazy = []
for d, v1 in pycompat.iteritems(t1._lazydirs):
v2 = t2._lazydirs.get(d)
if not v2 or v2[0] != v1[0]:
toloadlazy.append(d)
for d, v1 in pycompat.iteritems(t2._lazydirs):
if d not in t1._lazydirs:
toloadlazy.append(d)
for d in toloadlazy:
t1._loadlazy(d)
t2._loadlazy(d)
def __len__(self):
self._load()
size = len(self._files)
self._loadalllazy()
for m in self._dirs.values():
size += m.__len__()
return size
def __nonzero__(self):
# Faster than "__len() != 0" since it avoids loading sub-manifests
return not self._isempty()
__bool__ = __nonzero__
def _isempty(self):
self._load() # for consistency; already loaded by all callers
# See if we can skip loading everything.
if self._files or (
self._dirs and any(not m._isempty() for m in self._dirs.values())
):
return False
self._loadalllazy()
return not self._dirs or all(m._isempty() for m in self._dirs.values())
@encoding.strmethod
def __repr__(self):
return (
b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
% (
self._dir,
hex(self._node),
bool(self._loadfunc is _noop),
self._dirty,
id(self),
)
)
def dir(self):
"""The directory that this tree manifest represents, including a
trailing '/'. Empty string for the repo root directory."""
return self._dir
def node(self):
"""This node of this instance. nullid for unsaved instances. Should
be updated when the instance is read or written from a revlog.
"""
assert not self._dirty
return self._node
def setnode(self, node):
self._node = node
self._dirty = False
def iterentries(self):
self._load()
self._loadalllazy()
for p, n in sorted(
itertools.chain(self._dirs.items(), self._files.items())
):
if p in self._files:
yield self._subpath(p), n, self._flags.get(p, b'')
else:
for x in n.iterentries():
yield x
def items(self):
self._load()
self._loadalllazy()
for p, n in sorted(
itertools.chain(self._dirs.items(), self._files.items())
):
if p in self._files:
yield self._subpath(p), n
else:
for f, sn in pycompat.iteritems(n):
yield f, sn
iteritems = items
def iterkeys(self):
self._load()
self._loadalllazy()
for p in sorted(itertools.chain(self._dirs, self._files)):
if p in self._files:
yield self._subpath(p)
else:
for f in self._dirs[p]:
yield f
def keys(self):
return list(self.iterkeys())
def __iter__(self):
return self.iterkeys()
def __contains__(self, f):
if f is None:
return False
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
return False
return self._dirs[dir].__contains__(subpath)
else:
return f in self._files
def get(self, f, default=None):
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
return default
return self._dirs[dir].get(subpath, default)
else:
return self._files.get(f, default)
def __getitem__(self, f):
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
return self._dirs[dir].__getitem__(subpath)
else:
return self._files[f]
def flags(self, f):
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
return b''
return self._dirs[dir].flags(subpath)
else:
if f in self._lazydirs or f in self._dirs:
return b''
return self._flags.get(f, b'')
def find(self, f):
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
return self._dirs[dir].find(subpath)
else:
return self._files[f], self._flags.get(f, b'')
def __delitem__(self, f):
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
self._dirs[dir].__delitem__(subpath)
# If the directory is now empty, remove it
if self._dirs[dir]._isempty():
del self._dirs[dir]
else:
del self._files[f]
if f in self._flags:
del self._flags[f]
self._dirty = True
def __setitem__(self, f, n):
assert n is not None
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
self._dirs[dir] = treemanifest(self._subpath(dir))
self._dirs[dir].__setitem__(subpath, n)
else:
# manifest nodes are either 20 bytes or 32 bytes,
# depending on the hash in use. Assert this as historically
# sometimes extra bytes were added.
assert len(n) in (20, 32)
self._files[f] = n
self._dirty = True
def _load(self):
if self._loadfunc is not _noop:
lf, self._loadfunc = self._loadfunc, _noop
lf(self)
elif self._copyfunc is not _noop:
cf, self._copyfunc = self._copyfunc, _noop
cf(self)
def setflag(self, f, flags):
"""Set the flags (symlink, executable) for path f."""
if flags not in _manifestflags:
raise TypeError(b"Invalid manifest flag set.")
self._load()
dir, subpath = _splittopdir(f)
if dir:
self._loadlazy(dir)
if dir not in self._dirs:
self._dirs[dir] = treemanifest(self._subpath(dir))
self._dirs[dir].setflag(subpath, flags)
else:
self._flags[f] = flags
self._dirty = True
def copy(self):
copy = treemanifest(self._dir)
copy._node = self._node
copy._dirty = self._dirty
if self._copyfunc is _noop:
def _copyfunc(s):
self._load()
s._lazydirs = {
d: (n, r, True)
for d, (n, r, c) in pycompat.iteritems(self._lazydirs)
}
sdirs = s._dirs
for d, v in pycompat.iteritems(self._dirs):
sdirs[d] = v.copy()
s._files = dict.copy(self._files)
s._flags = dict.copy(self._flags)
if self._loadfunc is _noop:
_copyfunc(copy)
else:
copy._copyfunc = _copyfunc
else:
copy._copyfunc = self._copyfunc
return copy
def filesnotin(self, m2, match=None):
'''Set of files in this manifest that are not in the other'''
if match and not match.always():
m1 = self._matches(match)
m2 = m2._matches(match)
return m1.filesnotin(m2)
files = set()
def _filesnotin(t1, t2):
if t1._node == t2._node and not t1._dirty and not t2._dirty:
return
t1._load()
t2._load()
self._loaddifflazy(t1, t2)
for d, m1 in pycompat.iteritems(t1._dirs):
if d in t2._dirs:
m2 = t2._dirs[d]
_filesnotin(m1, m2)
else:
files.update(m1.iterkeys())
for fn in t1._files:
if fn not in t2._files:
files.add(t1._subpath(fn))
_filesnotin(self, m2)
return files
@propertycache
def _alldirs(self):
return pathutil.dirs(self)
def dirs(self):
return self._alldirs
def hasdir(self, dir):
self._load()
topdir, subdir = _splittopdir(dir)
if topdir:
self._loadlazy(topdir)
if topdir in self._dirs:
return self._dirs[topdir].hasdir(subdir)
return False
dirslash = dir + b'/'
return dirslash in self._dirs or dirslash in self._lazydirs
def walk(self, match):
"""Generates matching file names.
It also reports nonexistent files by marking them bad with match.bad().
"""
if match.always():
for f in iter(self):
yield f
return
fset = set(match.files())
for fn in self._walk(match):
if fn in fset:
# specified pattern is the exact name
fset.remove(fn)
yield fn
# for dirstate.walk, files=[''] means "walk the whole tree".
# follow that here, too
fset.discard(b'')
for fn in sorted(fset):
if not self.hasdir(fn):
match.bad(fn, None)
def _walk(self, match):
'''Recursively generates matching file names for walk().'''
visit = match.visitchildrenset(self._dir[:-1])
if not visit:
return
# yield this dir's files and walk its submanifests
self._load()
visit = self._loadchildrensetlazy(visit)
for p in sorted(list(self._dirs) + list(self._files)):
if p in self._files:
fullp = self._subpath(p)
if match(fullp):
yield fullp
else:
if not visit or p[:-1] in visit:
for f in self._dirs[p]._walk(match):
yield f
def _matches(self, match):
"""recursively generate a new manifest filtered by the match argument."""
if match.always():
return self.copy()
return self._matches_inner(match)
def _matches_inner(self, match):
if match.always():
return self.copy()
visit = match.visitchildrenset(self._dir[:-1])
if visit == b'all':
return self.copy()
ret = treemanifest(self._dir)
if not visit:
return ret
self._load()
for fn in self._files:
# While visitchildrenset *usually* lists only subdirs, this is
# actually up to the matcher and may have some files in the set().
# If visit == 'this', we should obviously look at the files in this
# directory; if visit is a set, and fn is in it, we should inspect
# fn (but no need to inspect things not in the set).
if visit != b'this' and fn not in visit:
continue
fullp = self._subpath(fn)
# visitchildrenset isn't perfect, we still need to call the regular
# matcher code to further filter results.
if not match(fullp):
continue
ret._files[fn] = self._files[fn]
if fn in self._flags:
ret._flags[fn] = self._flags[fn]
visit = self._loadchildrensetlazy(visit)
for dir, subm in pycompat.iteritems(self._dirs):
if visit and dir[:-1] not in visit:
continue
m = subm._matches_inner(match)
if not m._isempty():
ret._dirs[dir] = m
if not ret._isempty():
ret._dirty = True
return ret
def fastdelta(self, base, changes):
raise FastdeltaUnavailable()
def diff(self, m2, match=None, clean=False):
"""Finds changes between the current manifest and m2.
Args:
m2: the manifest to which this manifest should be compared.
clean: if true, include files unchanged between these manifests
with a None value in the returned dictionary.
The result is returned as a dict with filename as key and
values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
nodeid in the current/other manifest and fl1/fl2 is the flag
in the current/other manifest. Where the file does not exist,
the nodeid will be None and the flags will be the empty
string.
"""
if match and not match.always():
m1 = self._matches(match)
m2 = m2._matches(match)
return m1.diff(m2, clean=clean)
result = {}
emptytree = treemanifest()
def _iterativediff(t1, t2, stack):
"""compares two tree manifests and append new tree-manifests which
needs to be compared to stack"""
if t1._node == t2._node and not t1._dirty and not t2._dirty:
return
t1._load()
t2._load()
self._loaddifflazy(t1, t2)
for d, m1 in pycompat.iteritems(t1._dirs):
m2 = t2._dirs.get(d, emptytree)
stack.append((m1, m2))
for d, m2 in pycompat.iteritems(t2._dirs):
if d not in t1._dirs:
stack.append((emptytree, m2))
for fn, n1 in pycompat.iteritems(t1._files):
fl1 = t1._flags.get(fn, b'')
n2 = t2._files.get(fn, None)
fl2 = t2._flags.get(fn, b'')
if n1 != n2 or fl1 != fl2:
result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
elif clean:
result[t1._subpath(fn)] = None
for fn, n2 in pycompat.iteritems(t2._files):
if fn not in t1._files:
fl2 = t2._flags.get(fn, b'')
result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
stackls = []
_iterativediff(self, m2, stackls)
while stackls:
t1, t2 = stackls.pop()
# stackls is populated in the function call
_iterativediff(t1, t2, stackls)
return result
def unmodifiedsince(self, m2):
return not self._dirty and not m2._dirty and self._node == m2._node
def parse(self, text, readsubtree):
selflazy = self._lazydirs
for f, n, fl in _parse(text):
if fl == b't':
f = f + b'/'
# False below means "doesn't need to be copied" and can use the
# cached value from readsubtree directly.
selflazy[f] = (n, readsubtree, False)
elif b'/' in f:
# This is a flat manifest, so use __setitem__ and setflag rather
# than assigning directly to _files and _flags, so we can
# assign a path in a subdirectory, and to mark dirty (compared
# to nullid).
self[f] = n
if fl:
self.setflag(f, fl)
else:
# Assigning to _files and _flags avoids marking as dirty,
# and should be a little faster.
self._files[f] = n
if fl:
self._flags[f] = fl
def text(self):
"""Get the full data of this manifest as a bytestring."""
self._load()
return _text(self.iterentries())
def dirtext(self):
"""Get the full data of this directory as a bytestring. Make sure that
any submanifests have been written first, so their nodeids are correct.
"""
self._load()
flags = self.flags
lazydirs = [
(d[:-1], v[0], b't') for d, v in pycompat.iteritems(self._lazydirs)
]
dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
files = [(f, self._files[f], flags(f)) for f in self._files]
return _text(sorted(dirs + files + lazydirs))
def read(self, gettext, readsubtree):
def _load_for_read(s):
s.parse(gettext(), readsubtree)
s._dirty = False
self._loadfunc = _load_for_read
def writesubtrees(self, m1, m2, writesubtree, match):
self._load() # for consistency; should never have any effect here
m1._load()
m2._load()
emptytree = treemanifest()
def getnode(m, d):
ld = m._lazydirs.get(d)
if ld:
return ld[0]
return m._dirs.get(d, emptytree)._node
# let's skip investigating things that `match` says we do not need.
visit = match.visitchildrenset(self._dir[:-1])
visit = self._loadchildrensetlazy(visit)
if visit == b'this' or visit == b'all':
visit = None
for d, subm in pycompat.iteritems(self._dirs):
if visit and d[:-1] not in visit:
continue
subp1 = getnode(m1, d)
subp2 = getnode(m2, d)
if subp1 == nullid:
subp1, subp2 = subp2, subp1
writesubtree(subm, subp1, subp2, match)
def walksubtrees(self, matcher=None):
"""Returns an iterator of the subtrees of this manifest, including this
manifest itself.
If `matcher` is provided, it only returns subtrees that match.
"""
if matcher and not matcher.visitdir(self._dir[:-1]):
return
if not matcher or matcher(self._dir[:-1]):
yield self
self._load()
# OPT: use visitchildrenset to avoid loading everything.
self._loadalllazy()
for d, subm in pycompat.iteritems(self._dirs):
for subtree in subm.walksubtrees(matcher=matcher):
yield subtree
class manifestfulltextcache(util.lrucachedict):
"""File-backed LRU cache for the manifest cache
File consists of entries, up to EOF:
- 20 bytes node, 4 bytes length, <length> manifest data
These are written in reverse cache order (oldest to newest).
"""
_file = b'manifestfulltextcache'
def __init__(self, max):
super(manifestfulltextcache, self).__init__(max)
self._dirty = False
self._read = False
self._opener = None
def read(self):
if self._read or self._opener is None:
return
try:
with self._opener(self._file) as fp:
set = super(manifestfulltextcache, self).__setitem__
# ignore trailing data, this is a cache, corruption is skipped
while True:
# TODO do we need to do work here for sha1 portability?
node = fp.read(20)
if len(node) < 20:
break
try:
size = struct.unpack(b'>L', fp.read(4))[0]
except struct.error:
break
value = bytearray(fp.read(size))
if len(value) != size:
break
set(node, value)
except IOError:
# the file is allowed to be missing
pass
self._read = True
self._dirty = False
def write(self):
if not self._dirty or self._opener is None:
return
# rotate backwards to the first used node
try:
with self._opener(
self._file, b'w', atomictemp=True, checkambig=True
) as fp:
node = self._head.prev
while True:
if node.key in self._cache:
fp.write(node.key)
fp.write(struct.pack(b'>L', len(node.value)))
fp.write(node.value)
if node is self._head:
break
node = node.prev
except IOError:
# We could not write the cache (eg: permission error)
# the content can be missing.
#
# We could try harder and see if we could recreate a wcache
# directory were we coudl write too.
#
# XXX the error pass silently, having some way to issue an error
# log `ui.log` would be nice.
pass
def __len__(self):
if not self._read:
self.read()
return super(manifestfulltextcache, self).__len__()
def __contains__(self, k):
if not self._read:
self.read()
return super(manifestfulltextcache, self).__contains__(k)
def __iter__(self):
if not self._read:
self.read()
return super(manifestfulltextcache, self).__iter__()
def __getitem__(self, k):
if not self._read:
self.read()
# the cache lru order can change on read
setdirty = self._cache.get(k) is not self._head
value = super(manifestfulltextcache, self).__getitem__(k)
if setdirty:
self._dirty = True
return value
def __setitem__(self, k, v):
if not self._read:
self.read()
super(manifestfulltextcache, self).__setitem__(k, v)
self._dirty = True
def __delitem__(self, k):
if not self._read:
self.read()
super(manifestfulltextcache, self).__delitem__(k)
self._dirty = True
def get(self, k, default=None):
if not self._read:
self.read()
return super(manifestfulltextcache, self).get(k, default=default)
def clear(self, clear_persisted_data=False):
super(manifestfulltextcache, self).clear()
if clear_persisted_data:
self._dirty = True
self.write()
self._read = False
# and upper bound of what we expect from compression
# (real live value seems to be "3")
MAXCOMPRESSION = 3
class FastdeltaUnavailable(Exception):
"""Exception raised when fastdelta isn't usable on a manifest."""
@interfaceutil.implementer(repository.imanifeststorage)
class manifestrevlog(object):
"""A revlog that stores manifest texts. This is responsible for caching the
full-text manifest contents.
"""
def __init__(
self,
opener,
tree=b'',
dirlogcache=None,
indexfile=None,
treemanifest=False,
):
"""Constructs a new manifest revlog
`indexfile` - used by extensions to have two manifests at once, like
when transitioning between flatmanifeset and treemanifests.
`treemanifest` - used to indicate this is a tree manifest revlog. Opener
options can also be used to make this a tree manifest revlog. The opener
option takes precedence, so if it is set to True, we ignore whatever
value is passed in to the constructor.
"""
# During normal operations, we expect to deal with not more than four
# revs at a time (such as during commit --amend). When rebasing large
# stacks of commits, the number can go up, hence the config knob below.
cachesize = 4
optiontreemanifest = False
opts = getattr(opener, 'options', None)
if opts is not None:
cachesize = opts.get(b'manifestcachesize', cachesize)
optiontreemanifest = opts.get(b'treemanifest', False)
self._treeondisk = optiontreemanifest or treemanifest
self._fulltextcache = manifestfulltextcache(cachesize)
if tree:
assert self._treeondisk, b'opts is %r' % opts
if indexfile is None:
indexfile = b'00manifest.i'
if tree:
indexfile = b"meta/" + tree + indexfile
self.tree = tree
# The dirlogcache is kept on the root manifest log
if tree:
self._dirlogcache = dirlogcache
else:
self._dirlogcache = {b'': self}
self._revlog = revlog.revlog(
opener,
indexfile,
# only root indexfile is cached
checkambig=not bool(tree),
mmaplargeindex=True,
upperboundcomp=MAXCOMPRESSION,
persistentnodemap=opener.options.get(b'persistent-nodemap', False),
)
self.index = self._revlog.index
self.version = self._revlog.version
self._generaldelta = self._revlog._generaldelta
def _setupmanifestcachehooks(self, repo):
"""Persist the manifestfulltextcache on lock release"""
if not util.safehasattr(repo, b'_wlockref'):
return
self._fulltextcache._opener = repo.wcachevfs
if repo._currentlock(repo._wlockref) is None:
return
reporef = weakref.ref(repo)
manifestrevlogref = weakref.ref(self)
def persistmanifestcache(success):
# Repo is in an unknown state, do not persist.
if not success:
return
repo = reporef()
self = manifestrevlogref()
if repo is None or self is None:
return
if repo.manifestlog.getstorage(b'') is not self:
# there's a different manifest in play now, abort
return
self._fulltextcache.write()
repo._afterlock(persistmanifestcache)
@property
def fulltextcache(self):
return self._fulltextcache
def clearcaches(self, clear_persisted_data=False):
self._revlog.clearcaches()
self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
self._dirlogcache = {self.tree: self}
def dirlog(self, d):
if d:
assert self._treeondisk
if d not in self._dirlogcache:
mfrevlog = manifestrevlog(
self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
)
self._dirlogcache[d] = mfrevlog
return self._dirlogcache[d]
def add(
self,
m,
transaction,
link,
p1,
p2,
added,
removed,
readtree=None,
match=None,
):
"""add some manifest entry in to the manifest log
input:
m: the manifest dict we want to store
transaction: the open transaction
p1: manifest-node of p1
p2: manifest-node of p2
added: file added/changed compared to parent
removed: file removed compared to parent
tree manifest input:
readtree: a function to read a subtree
match: a filematcher for the subpart of the tree manifest
"""
try:
if p1 not in self.fulltextcache:
raise FastdeltaUnavailable()
# If our first parent is in the manifest cache, we can
# compute a delta here using properties we know about the
# manifest up-front, which may save time later for the
# revlog layer.
_checkforbidden(added)
# combine the changed lists into one sorted iterator
work = heapq.merge(
[(x, False) for x in sorted(added)],
[(x, True) for x in sorted(removed)],
)
arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
cachedelta = self._revlog.rev(p1), deltatext
text = util.buffer(arraytext)
n = self._revlog.addrevision(
text, transaction, link, p1, p2, cachedelta
)
except FastdeltaUnavailable:
# The first parent manifest isn't already loaded or the
# manifest implementation doesn't support fastdelta, so
# we'll just encode a fulltext of the manifest and pass
# that through to the revlog layer, and let it handle the
# delta process.
if self._treeondisk:
assert readtree, b"readtree must be set for treemanifest writes"
assert match, b"match must be specified for treemanifest writes"
m1 = readtree(self.tree, p1)
m2 = readtree(self.tree, p2)
n = self._addtree(
m, transaction, link, m1, m2, readtree, match=match
)
arraytext = None
else:
text = m.text()
n = self._revlog.addrevision(text, transaction, link, p1, p2)
arraytext = bytearray(text)
if arraytext is not None:
self.fulltextcache[n] = arraytext
return n
def _addtree(self, m, transaction, link, m1, m2, readtree, match):
# If the manifest is unchanged compared to one parent,
# don't write a new revision
if self.tree != b'' and (
m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
):
return m.node()
def writesubtree(subm, subp1, subp2, match):
sublog = self.dirlog(subm.dir())
sublog.add(
subm,
transaction,
link,
subp1,
subp2,
None,
None,
readtree=readtree,
match=match,
)
m.writesubtrees(m1, m2, writesubtree, match)
text = m.dirtext()
n = None
if self.tree != b'':
# Double-check whether contents are unchanged to one parent
if text == m1.dirtext():
n = m1.node()
elif text == m2.dirtext():
n = m2.node()
if not n:
n = self._revlog.addrevision(
text, transaction, link, m1.node(), m2.node()
)
# Save nodeid so parent manifest can calculate its nodeid
m.setnode(n)
return n
def __len__(self):
return len(self._revlog)
def __iter__(self):
return self._revlog.__iter__()
def rev(self, node):
return self._revlog.rev(node)
def node(self, rev):
return self._revlog.node(rev)
def lookup(self, value):
return self._revlog.lookup(value)
def parentrevs(self, rev):
return self._revlog.parentrevs(rev)
def parents(self, node):
return self._revlog.parents(node)
def linkrev(self, rev):
return self._revlog.linkrev(rev)
def checksize(self):
return self._revlog.checksize()
def revision(self, node, _df=None, raw=False):
return self._revlog.revision(node, _df=_df, raw=raw)
def rawdata(self, node, _df=None):
return self._revlog.rawdata(node, _df=_df)
def revdiff(self, rev1, rev2):
return self._revlog.revdiff(rev1, rev2)
def cmp(self, node, text):
return self._revlog.cmp(node, text)
def deltaparent(self, rev):
return self._revlog.deltaparent(rev)
def emitrevisions(
self,
nodes,
nodesorder=None,
revisiondata=False,
assumehaveparentrevisions=False,
deltamode=repository.CG_DELTAMODE_STD,
):
return self._revlog.emitrevisions(
nodes,
nodesorder=nodesorder,
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
deltamode=deltamode,
)
def addgroup(
self,
deltas,
linkmapper,
transaction,
addrevisioncb=None,
duplicaterevisioncb=None,
):
return self._revlog.addgroup(
deltas,
linkmapper,
transaction,
addrevisioncb=addrevisioncb,
duplicaterevisioncb=duplicaterevisioncb,
)
def rawsize(self, rev):
return self._revlog.rawsize(rev)
def getstrippoint(self, minlink):
return self._revlog.getstrippoint(minlink)
def strip(self, minlink, transaction):
return self._revlog.strip(minlink, transaction)
def files(self):
return self._revlog.files()
def clone(self, tr, destrevlog, **kwargs):
if not isinstance(destrevlog, manifestrevlog):
raise error.ProgrammingError(b'expected manifestrevlog to clone()')
return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
def storageinfo(
self,
exclusivefiles=False,
sharedfiles=False,
revisionscount=False,
trackedsize=False,
storedsize=False,
):
return self._revlog.storageinfo(
exclusivefiles=exclusivefiles,
sharedfiles=sharedfiles,
revisionscount=revisionscount,
trackedsize=trackedsize,
storedsize=storedsize,
)
@property
def indexfile(self):
return self._revlog.indexfile
@indexfile.setter
def indexfile(self, value):
self._revlog.indexfile = value
@property
def opener(self):
return self._revlog.opener
@opener.setter
def opener(self, value):
self._revlog.opener = value
@interfaceutil.implementer(repository.imanifestlog)
class manifestlog(object):
"""A collection class representing the collection of manifest snapshots
referenced by commits in the repository.
In this situation, 'manifest' refers to the abstract concept of a snapshot
of the list of files in the given commit. Consumers of the output of this
class do not care about the implementation details of the actual manifests
they receive (i.e. tree or flat or lazily loaded, etc)."""
def __init__(self, opener, repo, rootstore, narrowmatch):
usetreemanifest = False
cachesize = 4
opts = getattr(opener, 'options', None)
if opts is not None:
usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
cachesize = opts.get(b'manifestcachesize', cachesize)
self._treemanifests = usetreemanifest
self._rootstore = rootstore
self._rootstore._setupmanifestcachehooks(repo)
self._narrowmatch = narrowmatch
# A cache of the manifestctx or treemanifestctx for each directory
self._dirmancache = {}
self._dirmancache[b''] = util.lrucachedict(cachesize)
self._cachesize = cachesize
def __getitem__(self, node):
"""Retrieves the manifest instance for the given node. Throws a
LookupError if not found.
"""
return self.get(b'', node)
def get(self, tree, node, verify=True):
"""Retrieves the manifest instance for the given node. Throws a
LookupError if not found.
`verify` - if True an exception will be thrown if the node is not in
the revlog
"""
if node in self._dirmancache.get(tree, ()):
return self._dirmancache[tree][node]
if not self._narrowmatch.always():
if not self._narrowmatch.visitdir(tree[:-1]):
return excludeddirmanifestctx(tree, node)
if tree:
if self._rootstore._treeondisk:
if verify:
# Side-effect is LookupError is raised if node doesn't
# exist.
self.getstorage(tree).rev(node)
m = treemanifestctx(self, tree, node)
else:
raise error.Abort(
_(
b"cannot ask for manifest directory '%s' in a flat "
b"manifest"
)
% tree
)
else:
if verify:
# Side-effect is LookupError is raised if node doesn't exist.
self._rootstore.rev(node)
if self._treemanifests:
m = treemanifestctx(self, b'', node)
else:
m = manifestctx(self, node)
if node != nullid:
mancache = self._dirmancache.get(tree)
if not mancache:
mancache = util.lrucachedict(self._cachesize)
self._dirmancache[tree] = mancache
mancache[node] = m
return m
def getstorage(self, tree):
return self._rootstore.dirlog(tree)
def clearcaches(self, clear_persisted_data=False):
self._dirmancache.clear()
self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
def rev(self, node):
return self._rootstore.rev(node)
def update_caches(self, transaction):
return self._rootstore._revlog.update_caches(transaction=transaction)
@interfaceutil.implementer(repository.imanifestrevisionwritable)
class memmanifestctx(object):
def __init__(self, manifestlog):
self._manifestlog = manifestlog
self._manifestdict = manifestdict()
def _storage(self):
return self._manifestlog.getstorage(b'')
def copy(self):
memmf = memmanifestctx(self._manifestlog)
memmf._manifestdict = self.read().copy()
return memmf
def read(self):
return self._manifestdict
def write(self, transaction, link, p1, p2, added, removed, match=None):
return self._storage().add(
self._manifestdict,
transaction,
link,
p1,
p2,
added,
removed,
match=match,
)
@interfaceutil.implementer(repository.imanifestrevisionstored)
class manifestctx(object):
"""A class representing a single revision of a manifest, including its
contents, its parent revs, and its linkrev.
"""
def __init__(self, manifestlog, node):
self._manifestlog = manifestlog
self._data = None
self._node = node
# TODO: We eventually want p1, p2, and linkrev exposed on this class,
# but let's add it later when something needs it and we can load it
# lazily.
# self.p1, self.p2 = store.parents(node)
# rev = store.rev(node)
# self.linkrev = store.linkrev(rev)
def _storage(self):
return self._manifestlog.getstorage(b'')
def node(self):
return self._node
def copy(self):
memmf = memmanifestctx(self._manifestlog)
memmf._manifestdict = self.read().copy()
return memmf
@propertycache
def parents(self):
return self._storage().parents(self._node)
def read(self):
if self._data is None:
if self._node == nullid:
self._data = manifestdict()
else:
store = self._storage()
if self._node in store.fulltextcache:
text = pycompat.bytestr(store.fulltextcache[self._node])
else:
text = store.revision(self._node)
arraytext = bytearray(text)
store.fulltextcache[self._node] = arraytext
self._data = manifestdict(text)
return self._data
def readfast(self, shallow=False):
"""Calls either readdelta or read, based on which would be less work.
readdelta is called if the delta is against the p1, and therefore can be
read quickly.
If `shallow` is True, nothing changes since this is a flat manifest.
"""
store = self._storage()
r = store.rev(self._node)
deltaparent = store.deltaparent(r)
if deltaparent != nullrev and deltaparent in store.parentrevs(r):
return self.readdelta()
return self.read()
def readdelta(self, shallow=False):
"""Returns a manifest containing just the entries that are present
in this manifest, but not in its p1 manifest. This is efficient to read
if the revlog delta is already p1.
Changing the value of `shallow` has no effect on flat manifests.
"""
store = self._storage()
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
return manifestdict(d)
def find(self, key):
return self.read().find(key)
@interfaceutil.implementer(repository.imanifestrevisionwritable)
class memtreemanifestctx(object):
def __init__(self, manifestlog, dir=b''):
self._manifestlog = manifestlog
self._dir = dir
self._treemanifest = treemanifest()
def _storage(self):
return self._manifestlog.getstorage(b'')
def copy(self):
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
memmf._treemanifest = self._treemanifest.copy()
return memmf
def read(self):
return self._treemanifest
def write(self, transaction, link, p1, p2, added, removed, match=None):
def readtree(dir, node):
return self._manifestlog.get(dir, node).read()
return self._storage().add(
self._treemanifest,
transaction,
link,
p1,
p2,
added,
removed,
readtree=readtree,
match=match,
)
@interfaceutil.implementer(repository.imanifestrevisionstored)
class treemanifestctx(object):
def __init__(self, manifestlog, dir, node):
self._manifestlog = manifestlog
self._dir = dir
self._data = None
self._node = node
# TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
# we can instantiate treemanifestctx objects for directories we don't
# have on disk.
# self.p1, self.p2 = store.parents(node)
# rev = store.rev(node)
# self.linkrev = store.linkrev(rev)
def _storage(self):
narrowmatch = self._manifestlog._narrowmatch
if not narrowmatch.always():
if not narrowmatch.visitdir(self._dir[:-1]):
return excludedmanifestrevlog(self._dir)
return self._manifestlog.getstorage(self._dir)
def read(self):
if self._data is None:
store = self._storage()
if self._node == nullid:
self._data = treemanifest()
# TODO accessing non-public API
elif store._treeondisk:
m = treemanifest(dir=self._dir)
def gettext():
return store.revision(self._node)
def readsubtree(dir, subm):
# Set verify to False since we need to be able to create
# subtrees for trees that don't exist on disk.
return self._manifestlog.get(dir, subm, verify=False).read()
m.read(gettext, readsubtree)
m.setnode(self._node)
self._data = m
else:
if self._node in store.fulltextcache:
text = pycompat.bytestr(store.fulltextcache[self._node])
else:
text = store.revision(self._node)
arraytext = bytearray(text)
store.fulltextcache[self._node] = arraytext
self._data = treemanifest(dir=self._dir, text=text)
return self._data
def node(self):
return self._node
def copy(self):
memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
memmf._treemanifest = self.read().copy()
return memmf
@propertycache
def parents(self):
return self._storage().parents(self._node)
def readdelta(self, shallow=False):
"""Returns a manifest containing just the entries that are present
in this manifest, but not in its p1 manifest. This is efficient to read
if the revlog delta is already p1.
If `shallow` is True, this will read the delta for this directory,
without recursively reading subdirectory manifests. Instead, any
subdirectory entry will be reported as it appears in the manifest, i.e.
the subdirectory will be reported among files and distinguished only by
its 't' flag.
"""
store = self._storage()
if shallow:
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
return manifestdict(d)
else:
# Need to perform a slow delta
r0 = store.deltaparent(store.rev(self._node))
m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
m1 = self.read()
md = treemanifest(dir=self._dir)
for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
if n1:
md[f] = n1
if fl1:
md.setflag(f, fl1)
return md
def readfast(self, shallow=False):
"""Calls either readdelta or read, based on which would be less work.
readdelta is called if the delta is against the p1, and therefore can be
read quickly.
If `shallow` is True, it only returns the entries from this manifest,
and not any submanifests.
"""
store = self._storage()
r = store.rev(self._node)
deltaparent = store.deltaparent(r)
if deltaparent != nullrev and deltaparent in store.parentrevs(r):
return self.readdelta(shallow=shallow)
if shallow:
return manifestdict(store.revision(self._node))
else:
return self.read()
def find(self, key):
return self.read().find(key)
class excludeddir(treemanifest):
"""Stand-in for a directory that is excluded from the repository.
With narrowing active on a repository that uses treemanifests,
some of the directory revlogs will be excluded from the resulting
clone. This is a huge storage win for clients, but means we need
some sort of pseudo-manifest to surface to internals so we can
detect a merge conflict outside the narrowspec. That's what this
class is: it stands in for a directory whose node is known, but
whose contents are unknown.
"""
def __init__(self, dir, node):
super(excludeddir, self).__init__(dir)
self._node = node
# Add an empty file, which will be included by iterators and such,
# appearing as the directory itself (i.e. something like "dir/")
self._files[b''] = node
self._flags[b''] = b't'
# Manifests outside the narrowspec should never be modified, so avoid
# copying. This makes a noticeable difference when there are very many
# directories outside the narrowspec. Also, it makes sense for the copy to
# be of the same type as the original, which would not happen with the
# super type's copy().
def copy(self):
return self
class excludeddirmanifestctx(treemanifestctx):
"""context wrapper for excludeddir - see that docstring for rationale"""
def __init__(self, dir, node):
self._dir = dir
self._node = node
def read(self):
return excludeddir(self._dir, self._node)
def readfast(self, shallow=False):
# special version of readfast since we don't have underlying storage
return self.read()
def write(self, *args):
raise error.ProgrammingError(
b'attempt to write manifest from excluded dir %s' % self._dir
)
class excludedmanifestrevlog(manifestrevlog):
"""Stand-in for excluded treemanifest revlogs.
When narrowing is active on a treemanifest repository, we'll have
references to directories we can't see due to the revlog being
skipped. This class exists to conform to the manifestrevlog
interface for those directories and proactively prevent writes to
outside the narrowspec.
"""
def __init__(self, dir):
self._dir = dir
def __len__(self):
raise error.ProgrammingError(
b'attempt to get length of excluded dir %s' % self._dir
)
def rev(self, node):
raise error.ProgrammingError(
b'attempt to get rev from excluded dir %s' % self._dir
)
def linkrev(self, node):
raise error.ProgrammingError(
b'attempt to get linkrev from excluded dir %s' % self._dir
)
def node(self, rev):
raise error.ProgrammingError(
b'attempt to get node from excluded dir %s' % self._dir
)
def add(self, *args, **kwargs):
# We should never write entries in dirlogs outside the narrow clone.
# However, the method still gets called from writesubtree() in
# _addtree(), so we need to handle it. We should possibly make that
# avoid calling add() with a clean manifest (_dirty is always False
# in excludeddir instances).
pass
```
#### File: site-packages/mercurial/minifileset.py
```python
from __future__ import absolute_import
from .i18n import _
from . import (
error,
fileset,
filesetlang,
pycompat,
)
def _sizep(x):
# i18n: "size" is a keyword
expr = filesetlang.getstring(x, _(b"size requires an expression"))
return fileset.sizematcher(expr)
def _compile(tree):
if not tree:
raise error.ParseError(_(b"missing argument"))
op = tree[0]
if op == b'withstatus':
return _compile(tree[1])
elif op in {b'symbol', b'string', b'kindpat'}:
name = filesetlang.getpattern(
tree, {b'path'}, _(b'invalid file pattern')
)
if name.startswith(b'**'): # file extension test, ex. "**.tar.gz"
ext = name[2:]
for c in pycompat.bytestr(ext):
if c in b'*{}[]?/\\':
raise error.ParseError(_(b'reserved character: %s') % c)
return lambda n, s: n.endswith(ext)
elif name.startswith(b'path:'): # directory or full path test
p = name[5:] # prefix
pl = len(p)
f = lambda n, s: n.startswith(p) and (
len(n) == pl or n[pl : pl + 1] == b'/'
)
return f
raise error.ParseError(
_(b"unsupported file pattern: %s") % name,
hint=_(b'paths must be prefixed with "path:"'),
)
elif op in {b'or', b'patterns'}:
funcs = [_compile(x) for x in tree[1:]]
return lambda n, s: any(f(n, s) for f in funcs)
elif op == b'and':
func1 = _compile(tree[1])
func2 = _compile(tree[2])
return lambda n, s: func1(n, s) and func2(n, s)
elif op == b'not':
return lambda n, s: not _compile(tree[1])(n, s)
elif op == b'func':
symbols = {
b'all': lambda n, s: True,
b'none': lambda n, s: False,
b'size': lambda n, s: _sizep(tree[2])(s),
}
name = filesetlang.getsymbol(tree[1])
if name in symbols:
return symbols[name]
raise error.UnknownIdentifier(name, symbols.keys())
elif op == b'minus': # equivalent to 'x and not y'
func1 = _compile(tree[1])
func2 = _compile(tree[2])
return lambda n, s: func1(n, s) and not func2(n, s)
elif op == b'list':
raise error.ParseError(
_(b"can't use a list in this context"),
hint=_(b'see \'hg help "filesets.x or y"\''),
)
raise error.ProgrammingError(b'illegal tree: %r' % (tree,))
def compile(text):
"""generate a function (path, size) -> bool from filter specification.
"text" could contain the operators defined by the fileset language for
common logic operations, and parenthesis for grouping. The supported path
tests are '**.extname' for file extension test, and '"path:dir/subdir"'
for prefix test. The ``size()`` predicate is borrowed from filesets to test
file size. The predicates ``all()`` and ``none()`` are also supported.
'(**.php & size(">10MB")) | **.zip | (path:bin & !path:bin/README)' for
example, will catch all php files whose size is greater than 10 MB, all
files whose name ends with ".zip", and all files under "bin" in the repo
root except for "bin/README".
"""
tree = filesetlang.parse(text)
tree = filesetlang.analyze(tree)
tree = filesetlang.optimize(tree)
return _compile(tree)
```
#### File: site-packages/mercurial/namespaces.py
```python
from __future__ import absolute_import
from .i18n import _
from . import (
pycompat,
registrar,
templatekw,
util,
)
def tolist(val):
"""
a convenience method to return an empty list instead of None
"""
if val is None:
return []
else:
return [val]
class namespaces(object):
"""provides an interface to register and operate on multiple namespaces. See
the namespace class below for details on the namespace object.
"""
_names_version = 0
def __init__(self):
self._names = util.sortdict()
columns = templatekw.getlogcolumns()
# we need current mercurial named objects (bookmarks, tags, and
# branches) to be initialized somewhere, so that place is here
bmknames = lambda repo: repo._bookmarks.keys()
bmknamemap = lambda repo, name: tolist(repo._bookmarks.get(name))
bmknodemap = lambda repo, node: repo.nodebookmarks(node)
n = namespace(
b"bookmarks",
templatename=b"bookmark",
logfmt=columns[b'bookmark'],
listnames=bmknames,
namemap=bmknamemap,
nodemap=bmknodemap,
builtin=True,
)
self.addnamespace(n)
tagnames = lambda repo: [t for t, n in repo.tagslist()]
tagnamemap = lambda repo, name: tolist(repo._tagscache.tags.get(name))
tagnodemap = lambda repo, node: repo.nodetags(node)
n = namespace(
b"tags",
templatename=b"tag",
logfmt=columns[b'tag'],
listnames=tagnames,
namemap=tagnamemap,
nodemap=tagnodemap,
deprecated={b'tip'},
builtin=True,
)
self.addnamespace(n)
bnames = lambda repo: repo.branchmap().keys()
bnamemap = lambda repo, name: tolist(repo.branchtip(name, True))
bnodemap = lambda repo, node: [repo[node].branch()]
n = namespace(
b"branches",
templatename=b"branch",
logfmt=columns[b'branch'],
listnames=bnames,
namemap=bnamemap,
nodemap=bnodemap,
builtin=True,
)
self.addnamespace(n)
def __getitem__(self, namespace):
"""returns the namespace object"""
return self._names[namespace]
def __iter__(self):
return self._names.__iter__()
def get(self, namespace, default=None):
return self._names.get(namespace, default)
def items(self):
return pycompat.iteritems(self._names)
iteritems = items
def addnamespace(self, namespace, order=None):
"""register a namespace
namespace: the name to be registered (in plural form)
order: optional argument to specify the order of namespaces
(e.g. 'branches' should be listed before 'bookmarks')
"""
if order is not None:
self._names.insert(order, namespace.name, namespace)
else:
self._names[namespace.name] = namespace
# we only generate a template keyword if one does not already exist
if namespace.name not in templatekw.keywords:
templatekeyword = registrar.templatekeyword(templatekw.keywords)
@templatekeyword(namespace.name, requires={b'repo', b'ctx'})
def generatekw(context, mapping):
return templatekw.shownames(context, mapping, namespace.name)
def singlenode(self, repo, name):
"""
Return the 'best' node for the given name. What's best is defined
by the namespace's singlenode() function. The first match returned by
a namespace in the defined precedence order is used.
Raises a KeyError if there is no such node.
"""
for ns, v in pycompat.iteritems(self._names):
n = v.singlenode(repo, name)
if n:
return n
raise KeyError(_(b'no such name: %s') % name)
class namespace(object):
"""provides an interface to a namespace
Namespaces are basically generic many-to-many mapping between some
(namespaced) names and nodes. The goal here is to control the pollution of
jamming things into tags or bookmarks (in extension-land) and to simplify
internal bits of mercurial: log output, tab completion, etc.
More precisely, we define a mapping of names to nodes, and a mapping from
nodes to names. Each mapping returns a list.
Furthermore, each name mapping will be passed a name to lookup which might
not be in its domain. In this case, each method should return an empty list
and not raise an error.
This namespace object will define the properties we need:
'name': the namespace (plural form)
'templatename': name to use for templating (usually the singular form
of the plural namespace name)
'listnames': list of all names in the namespace (usually the keys of a
dictionary)
'namemap': function that takes a name and returns a list of nodes
'nodemap': function that takes a node and returns a list of names
'deprecated': set of names to be masked for ordinary use
'builtin': bool indicating if this namespace is supported by core
Mercurial.
"""
def __init__(
self,
name,
templatename=None,
logname=None,
colorname=None,
logfmt=None,
listnames=None,
namemap=None,
nodemap=None,
deprecated=None,
builtin=False,
singlenode=None,
):
"""create a namespace
name: the namespace to be registered (in plural form)
templatename: the name to use for templating
logname: the name to use for log output; if not specified templatename
is used
colorname: the name to use for colored log output; if not specified
logname is used
logfmt: the format to use for (i18n-ed) log output; if not specified
it is composed from logname
listnames: function to list all names
namemap: function that inputs a name, output node(s)
nodemap: function that inputs a node, output name(s)
deprecated: set of names to be masked for ordinary use
builtin: whether namespace is implemented by core Mercurial
singlenode: function that inputs a name, output best node (or None)
"""
self.name = name
self.templatename = templatename
self.logname = logname
self.colorname = colorname
self.logfmt = logfmt
self.listnames = listnames
self.namemap = namemap
self.nodemap = nodemap
if singlenode:
self.singlenode = singlenode
# if logname is not specified, use the template name as backup
if self.logname is None:
self.logname = self.templatename
# if colorname is not specified, just use the logname as a backup
if self.colorname is None:
self.colorname = self.logname
# if logfmt is not specified, compose it from logname as backup
if self.logfmt is None:
# i18n: column positioning for "hg log"
self.logfmt = (b"%s:" % self.logname).ljust(13) + b"%s\n"
if deprecated is None:
self.deprecated = set()
else:
self.deprecated = deprecated
self.builtin = builtin
def names(self, repo, node):
"""method that returns a (sorted) list of names in a namespace that
match a given node"""
return sorted(self.nodemap(repo, node))
def nodes(self, repo, name):
"""method that returns a list of nodes in a namespace that
match a given name.
"""
return sorted(self.namemap(repo, name))
def singlenode(self, repo, name):
"""returns the best node for the given name
By default, the best node is the node from nodes() with the highest
revision number. It can be overriden by the namespace."""
n = self.namemap(repo, name)
if n:
# return max revision number
if len(n) > 1:
cl = repo.changelog
maxrev = max(cl.rev(node) for node in n)
return cl.node(maxrev)
return n[0]
return None
```
#### File: site-packages/mercurial/node.py
```python
from __future__ import absolute_import
import binascii
# This ugly style has a noticeable effect in manifest parsing
hex = binascii.hexlify
# Adapt to Python 3 API changes. If this ends up showing up in
# profiles, we can use this version only on Python 3, and forward
# binascii.unhexlify like we used to on Python 2.
def bin(s):
try:
return binascii.unhexlify(s)
except binascii.Error as e:
raise TypeError(e)
nullrev = -1
# In hex, this is '0000000000000000000000000000000000000000'
nullid = b"\0" * 20
nullhex = hex(nullid)
# Phony node value to stand-in for new files in some uses of
# manifests.
# In hex, this is '2121212121212121212121212121212121212121'
newnodeid = b'!!!!!!!!!!!!!!!!!!!!'
# In hex, this is '3030303030303030303030303030306164646564'
addednodeid = b'000000000000000added'
# In hex, this is '3030303030303030303030306d6f646966696564'
modifiednodeid = b'000000000000modified'
wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
# pseudo identifiers for working directory
# (they are experimental, so don't add too many dependencies on them)
wdirrev = 0x7FFFFFFF
# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
wdirid = b"\xff" * 20
wdirhex = hex(wdirid)
def short(node):
return hex(node[:6])
```
#### File: site-packages/mercurial/sslutil.py
```python
from __future__ import absolute_import
import hashlib
import os
import re
import ssl
from .i18n import _
from .pycompat import getattr
from .node import hex
from . import (
encoding,
error,
pycompat,
util,
)
from .utils import (
hashutil,
resourceutil,
stringutil,
)
# Python 2.7.9+ overhauled the built-in SSL/TLS features of Python. It added
# support for TLS 1.1, TLS 1.2, SNI, system CA stores, etc. These features are
# all exposed via the "ssl" module.
#
# We require in setup.py the presence of ssl.SSLContext, which indicates modern
# SSL/TLS support.
configprotocols = {
b'tls1.0',
b'tls1.1',
b'tls1.2',
}
hassni = getattr(ssl, 'HAS_SNI', False)
# ssl.HAS_TLSv1* are preferred to check support but they were added in Python
# 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
# (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
# were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
# support. At the mentioned commit, they were unconditionally defined.
supportedprotocols = set()
if getattr(ssl, 'HAS_TLSv1', util.safehasattr(ssl, 'PROTOCOL_TLSv1')):
supportedprotocols.add(b'tls1.0')
if getattr(ssl, 'HAS_TLSv1_1', util.safehasattr(ssl, 'PROTOCOL_TLSv1_1')):
supportedprotocols.add(b'tls1.1')
if getattr(ssl, 'HAS_TLSv1_2', util.safehasattr(ssl, 'PROTOCOL_TLSv1_2')):
supportedprotocols.add(b'tls1.2')
def _hostsettings(ui, hostname):
"""Obtain security settings for a hostname.
Returns a dict of settings relevant to that hostname.
"""
bhostname = pycompat.bytesurl(hostname)
s = {
# Whether we should attempt to load default/available CA certs
# if an explicit ``cafile`` is not defined.
b'allowloaddefaultcerts': True,
# List of 2-tuple of (hash algorithm, hash).
b'certfingerprints': [],
# Path to file containing concatenated CA certs. Used by
# SSLContext.load_verify_locations().
b'cafile': None,
# Whether certificate verification should be disabled.
b'disablecertverification': False,
# Whether the legacy [hostfingerprints] section has data for this host.
b'legacyfingerprint': False,
# String representation of minimum protocol to be used for UI
# presentation.
b'minimumprotocol': None,
# ssl.CERT_* constant used by SSLContext.verify_mode.
b'verifymode': None,
# OpenSSL Cipher List to use (instead of default).
b'ciphers': None,
}
# Allow minimum TLS protocol to be specified in the config.
def validateprotocol(protocol, key):
if protocol not in configprotocols:
raise error.Abort(
_(b'unsupported protocol from hostsecurity.%s: %s')
% (key, protocol),
hint=_(b'valid protocols: %s')
% b' '.join(sorted(configprotocols)),
)
# We default to TLS 1.1+ because TLS 1.0 has known vulnerabilities (like
# BEAST and POODLE). We allow users to downgrade to TLS 1.0+ via config
# options in case a legacy server is encountered.
# setup.py checks that TLS 1.1 or TLS 1.2 is present, so the following
# assert should not fail.
assert supportedprotocols - {b'tls1.0'}
defaultminimumprotocol = b'tls1.1'
key = b'minimumprotocol'
minimumprotocol = ui.config(b'hostsecurity', key, defaultminimumprotocol)
validateprotocol(minimumprotocol, key)
key = b'%s:minimumprotocol' % bhostname
minimumprotocol = ui.config(b'hostsecurity', key, minimumprotocol)
validateprotocol(minimumprotocol, key)
# If --insecure is used, we allow the use of TLS 1.0 despite config options.
# We always print a "connection security to %s is disabled..." message when
# --insecure is used. So no need to print anything more here.
if ui.insecureconnections:
minimumprotocol = b'tls1.0'
s[b'minimumprotocol'] = minimumprotocol
ciphers = ui.config(b'hostsecurity', b'ciphers')
ciphers = ui.config(b'hostsecurity', b'%s:ciphers' % bhostname, ciphers)
s[b'ciphers'] = ciphers
# Look for fingerprints in [hostsecurity] section. Value is a list
# of <alg>:<fingerprint> strings.
fingerprints = ui.configlist(
b'hostsecurity', b'%s:fingerprints' % bhostname
)
for fingerprint in fingerprints:
if not (fingerprint.startswith((b'sha1:', b'sha256:', b'sha512:'))):
raise error.Abort(
_(b'invalid fingerprint for %s: %s') % (bhostname, fingerprint),
hint=_(b'must begin with "sha1:", "sha256:", or "sha512:"'),
)
alg, fingerprint = fingerprint.split(b':', 1)
fingerprint = fingerprint.replace(b':', b'').lower()
s[b'certfingerprints'].append((alg, fingerprint))
# Fingerprints from [hostfingerprints] are always SHA-1.
for fingerprint in ui.configlist(b'hostfingerprints', bhostname):
fingerprint = fingerprint.replace(b':', b'').lower()
s[b'certfingerprints'].append((b'sha1', fingerprint))
s[b'legacyfingerprint'] = True
# If a host cert fingerprint is defined, it is the only thing that
# matters. No need to validate CA certs.
if s[b'certfingerprints']:
s[b'verifymode'] = ssl.CERT_NONE
s[b'allowloaddefaultcerts'] = False
# If --insecure is used, don't take CAs into consideration.
elif ui.insecureconnections:
s[b'disablecertverification'] = True
s[b'verifymode'] = ssl.CERT_NONE
s[b'allowloaddefaultcerts'] = False
if ui.configbool(b'devel', b'disableloaddefaultcerts'):
s[b'allowloaddefaultcerts'] = False
# If both fingerprints and a per-host ca file are specified, issue a warning
# because users should not be surprised about what security is or isn't
# being performed.
cafile = ui.config(b'hostsecurity', b'%s:verifycertsfile' % bhostname)
if s[b'certfingerprints'] and cafile:
ui.warn(
_(
b'(hostsecurity.%s:verifycertsfile ignored when host '
b'fingerprints defined; using host fingerprints for '
b'verification)\n'
)
% bhostname
)
# Try to hook up CA certificate validation unless something above
# makes it not necessary.
if s[b'verifymode'] is None:
# Look at per-host ca file first.
if cafile:
cafile = util.expandpath(cafile)
if not os.path.exists(cafile):
raise error.Abort(
_(b'path specified by %s does not exist: %s')
% (
b'hostsecurity.%s:verifycertsfile' % (bhostname,),
cafile,
)
)
s[b'cafile'] = cafile
else:
# Find global certificates file in config.
cafile = ui.config(b'web', b'cacerts')
if cafile:
cafile = util.expandpath(cafile)
if not os.path.exists(cafile):
raise error.Abort(
_(b'could not find web.cacerts: %s') % cafile
)
elif s[b'allowloaddefaultcerts']:
# CAs not defined in config. Try to find system bundles.
cafile = _defaultcacerts(ui)
if cafile:
ui.debug(b'using %s for CA file\n' % cafile)
s[b'cafile'] = cafile
# Require certificate validation if CA certs are being loaded and
# verification hasn't been disabled above.
if cafile or s[b'allowloaddefaultcerts']:
s[b'verifymode'] = ssl.CERT_REQUIRED
else:
# At this point we don't have a fingerprint, aren't being
# explicitly insecure, and can't load CA certs. Connecting
# is insecure. We allow the connection and abort during
# validation (once we have the fingerprint to print to the
# user).
s[b'verifymode'] = ssl.CERT_NONE
assert s[b'verifymode'] is not None
return s
def commonssloptions(minimumprotocol):
"""Return SSLContext options common to servers and clients."""
if minimumprotocol not in configprotocols:
raise ValueError(b'protocol value not supported: %s' % minimumprotocol)
# SSLv2 and SSLv3 are broken. We ban them outright.
options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
if minimumprotocol == b'tls1.0':
# Defaults above are to use TLS 1.0+
pass
elif minimumprotocol == b'tls1.1':
options |= ssl.OP_NO_TLSv1
elif minimumprotocol == b'tls1.2':
options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
else:
raise error.Abort(_(b'this should not happen'))
# Prevent CRIME.
# There is no guarantee this attribute is defined on the module.
options |= getattr(ssl, 'OP_NO_COMPRESSION', 0)
return options
def wrapsocket(sock, keyfile, certfile, ui, serverhostname=None):
"""Add SSL/TLS to a socket.
This is a glorified wrapper for ``ssl.wrap_socket()``. It makes sane
choices based on what security options are available.
In addition to the arguments supported by ``ssl.wrap_socket``, we allow
the following additional arguments:
* serverhostname - The expected hostname of the remote server. If the
server (and client) support SNI, this tells the server which certificate
to use.
"""
if not serverhostname:
raise error.Abort(_(b'serverhostname argument is required'))
if b'SSLKEYLOGFILE' in encoding.environ:
try:
import sslkeylog
sslkeylog.set_keylog(
pycompat.fsdecode(encoding.environ[b'SSLKEYLOGFILE'])
)
ui.warnnoi18n(
b'sslkeylog enabled by SSLKEYLOGFILE environment variable\n'
)
except ImportError:
ui.warnnoi18n(
b'sslkeylog module missing, '
b'but SSLKEYLOGFILE set in environment\n'
)
for f in (keyfile, certfile):
if f and not os.path.exists(f):
raise error.Abort(
_(b'certificate file (%s) does not exist; cannot connect to %s')
% (f, pycompat.bytesurl(serverhostname)),
hint=_(
b'restore missing file or fix references '
b'in Mercurial config'
),
)
settings = _hostsettings(ui, serverhostname)
# We can't use ssl.create_default_context() because it calls
# load_default_certs() unless CA arguments are passed to it. We want to
# have explicit control over CA loading because implicitly loading
# CAs may undermine the user's intent. For example, a user may define a CA
# bundle with a specific CA cert removed. If the system/default CA bundle
# is loaded and contains that removed CA, you've just undone the user's
# choice.
#
# Despite its name, PROTOCOL_SSLv23 selects the highest protocol that both
# ends support, including TLS protocols. commonssloptions() restricts the
# set of allowed protocols.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= commonssloptions(settings[b'minimumprotocol'])
sslcontext.verify_mode = settings[b'verifymode']
if settings[b'ciphers']:
try:
sslcontext.set_ciphers(pycompat.sysstr(settings[b'ciphers']))
except ssl.SSLError as e:
raise error.Abort(
_(b'could not set ciphers: %s')
% stringutil.forcebytestr(e.args[0]),
hint=_(b'change cipher string (%s) in config')
% settings[b'ciphers'],
)
if certfile is not None:
def password():
f = keyfile or certfile
return ui.getpass(_(b'passphrase for %s: ') % f, b'')
sslcontext.load_cert_chain(certfile, keyfile, password)
if settings[b'cafile'] is not None:
try:
sslcontext.load_verify_locations(cafile=settings[b'cafile'])
except ssl.SSLError as e:
if len(e.args) == 1: # pypy has different SSLError args
msg = e.args[0]
else:
msg = e.args[1]
raise error.Abort(
_(b'error loading CA file %s: %s')
% (settings[b'cafile'], stringutil.forcebytestr(msg)),
hint=_(b'file is empty or malformed?'),
)
caloaded = True
elif settings[b'allowloaddefaultcerts']:
# This is a no-op on old Python.
sslcontext.load_default_certs()
caloaded = True
else:
caloaded = False
try:
sslsocket = sslcontext.wrap_socket(sock, server_hostname=serverhostname)
except ssl.SSLError as e:
# If we're doing certificate verification and no CA certs are loaded,
# that is almost certainly the reason why verification failed. Provide
# a hint to the user.
# The exception handler is here to handle bugs around cert attributes:
# https://bugs.python.org/issue20916#msg213479. (See issues5313.)
# When the main 20916 bug occurs, 'sslcontext.get_ca_certs()' is a
# non-empty list, but the following conditional is otherwise True.
try:
if (
caloaded
and settings[b'verifymode'] == ssl.CERT_REQUIRED
and not sslcontext.get_ca_certs()
):
ui.warn(
_(
b'(an attempt was made to load CA certificates but '
b'none were loaded; see '
b'https://mercurial-scm.org/wiki/SecureConnections '
b'for how to configure Mercurial to avoid this '
b'error)\n'
)
)
except ssl.SSLError:
pass
# Try to print more helpful error messages for known failures.
if util.safehasattr(e, b'reason'):
# This error occurs when the client and server don't share a
# common/supported SSL/TLS protocol. We've disabled SSLv2 and SSLv3
# outright. Hopefully the reason for this error is that we require
# TLS 1.1+ and the server only supports TLS 1.0. Whatever the
# reason, try to emit an actionable warning.
if e.reason == 'UNSUPPORTED_PROTOCOL':
# We attempted TLS 1.0+.
if settings[b'minimumprotocol'] == b'tls1.0':
# We support more than just TLS 1.0+. If this happens,
# the likely scenario is either the client or the server
# is really old. (e.g. server doesn't support TLS 1.0+ or
# client doesn't support modern TLS versions introduced
# several years from when this comment was written).
if supportedprotocols != {b'tls1.0'}:
ui.warn(
_(
b'(could not communicate with %s using security '
b'protocols %s; if you are using a modern Mercurial '
b'version, consider contacting the operator of this '
b'server; see '
b'https://mercurial-scm.org/wiki/SecureConnections '
b'for more info)\n'
)
% (
pycompat.bytesurl(serverhostname),
b', '.join(sorted(supportedprotocols)),
)
)
else:
ui.warn(
_(
b'(could not communicate with %s using TLS 1.0; the '
b'likely cause of this is the server no longer '
b'supports TLS 1.0 because it has known security '
b'vulnerabilities; see '
b'https://mercurial-scm.org/wiki/SecureConnections '
b'for more info)\n'
)
% pycompat.bytesurl(serverhostname)
)
else:
# We attempted TLS 1.1+. We can only get here if the client
# supports the configured protocol. So the likely reason is
# the client wants better security than the server can
# offer.
ui.warn(
_(
b'(could not negotiate a common security protocol (%s+) '
b'with %s; the likely cause is Mercurial is configured '
b'to be more secure than the server can support)\n'
)
% (
settings[b'minimumprotocol'],
pycompat.bytesurl(serverhostname),
)
)
ui.warn(
_(
b'(consider contacting the operator of this '
b'server and ask them to support modern TLS '
b'protocol versions; or, set '
b'hostsecurity.%s:minimumprotocol=tls1.0 to allow '
b'use of legacy, less secure protocols when '
b'communicating with this server)\n'
)
% pycompat.bytesurl(serverhostname)
)
ui.warn(
_(
b'(see https://mercurial-scm.org/wiki/SecureConnections '
b'for more info)\n'
)
)
elif e.reason == 'CERTIFICATE_VERIFY_FAILED' and pycompat.iswindows:
ui.warn(
_(
b'(the full certificate chain may not be available '
b'locally; see "hg help debugssl")\n'
)
)
raise
# check if wrap_socket failed silently because socket had been
# closed
# - see http://bugs.python.org/issue13721
if not sslsocket.cipher():
raise error.SecurityError(_(b'ssl connection failed'))
sslsocket._hgstate = {
b'caloaded': caloaded,
b'hostname': serverhostname,
b'settings': settings,
b'ui': ui,
}
return sslsocket
def wrapserversocket(
sock, ui, certfile=None, keyfile=None, cafile=None, requireclientcert=False
):
"""Wrap a socket for use by servers.
``certfile`` and ``keyfile`` specify the files containing the certificate's
public and private keys, respectively. Both keys can be defined in the same
file via ``certfile`` (the private key must come first in the file).
``cafile`` defines the path to certificate authorities.
``requireclientcert`` specifies whether to require client certificates.
Typically ``cafile`` is only defined if ``requireclientcert`` is true.
"""
# This function is not used much by core Mercurial, so the error messaging
# doesn't have to be as detailed as for wrapsocket().
for f in (certfile, keyfile, cafile):
if f and not os.path.exists(f):
raise error.Abort(
_(b'referenced certificate file (%s) does not exist') % f
)
# Despite its name, PROTOCOL_SSLv23 selects the highest protocol that both
# ends support, including TLS protocols. commonssloptions() restricts the
# set of allowed protocols.
protocol = ssl.PROTOCOL_SSLv23
options = commonssloptions(b'tls1.0')
# This config option is intended for use in tests only. It is a giant
# footgun to kill security. Don't define it.
exactprotocol = ui.config(b'devel', b'serverexactprotocol')
if exactprotocol == b'tls1.0':
if b'tls1.0' not in supportedprotocols:
raise error.Abort(_(b'TLS 1.0 not supported by this Python'))
protocol = ssl.PROTOCOL_TLSv1
elif exactprotocol == b'tls1.1':
if b'tls1.1' not in supportedprotocols:
raise error.Abort(_(b'TLS 1.1 not supported by this Python'))
protocol = ssl.PROTOCOL_TLSv1_1
elif exactprotocol == b'tls1.2':
if b'tls1.2' not in supportedprotocols:
raise error.Abort(_(b'TLS 1.2 not supported by this Python'))
protocol = ssl.PROTOCOL_TLSv1_2
elif exactprotocol:
raise error.Abort(
_(b'invalid value for serverexactprotocol: %s') % exactprotocol
)
# We /could/ use create_default_context() here since it doesn't load
# CAs when configured for client auth. However, it is hard-coded to
# use ssl.PROTOCOL_SSLv23 which may not be appropriate here.
sslcontext = ssl.SSLContext(protocol)
sslcontext.options |= options
# Improve forward secrecy.
sslcontext.options |= getattr(ssl, 'OP_SINGLE_DH_USE', 0)
sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0)
# Use the list of more secure ciphers if found in the ssl module.
if util.safehasattr(ssl, b'_RESTRICTED_SERVER_CIPHERS'):
sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
if requireclientcert:
sslcontext.verify_mode = ssl.CERT_REQUIRED
else:
sslcontext.verify_mode = ssl.CERT_NONE
if certfile or keyfile:
sslcontext.load_cert_chain(certfile=certfile, keyfile=keyfile)
if cafile:
sslcontext.load_verify_locations(cafile=cafile)
return sslcontext.wrap_socket(sock, server_side=True)
class wildcarderror(Exception):
"""Represents an error parsing wildcards in DNS name."""
def _dnsnamematch(dn, hostname, maxwildcards=1):
"""Match DNS names according RFC 6125 section 6.4.3.
This code is effectively copied from CPython's ssl._dnsname_match.
Returns a bool indicating whether the expected hostname matches
the value in ``dn``.
"""
pats = []
if not dn:
return False
dn = pycompat.bytesurl(dn)
hostname = pycompat.bytesurl(hostname)
pieces = dn.split(b'.')
leftmost = pieces[0]
remainder = pieces[1:]
wildcards = leftmost.count(b'*')
if wildcards > maxwildcards:
raise wildcarderror(
_(b'too many wildcards in certificate DNS name: %s') % dn
)
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == b'*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append(b'[^.]+')
elif leftmost.startswith(b'xn--') or hostname.startswith(b'xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(stringutil.reescape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(stringutil.reescape(leftmost).replace(br'\*', b'[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(stringutil.reescape(frag))
pat = re.compile(br'\A' + br'\.'.join(pats) + br'\Z', re.IGNORECASE)
return pat.match(hostname) is not None
def _verifycert(cert, hostname):
"""Verify that cert (in socket.getpeercert() format) matches hostname.
CRLs is not handled.
Returns error message if any problems are found and None on success.
"""
if not cert:
return _(b'no certificate received')
dnsnames = []
san = cert.get('subjectAltName', [])
for key, value in san:
if key == 'DNS':
try:
if _dnsnamematch(value, hostname):
return
except wildcarderror as e:
return stringutil.forcebytestr(e.args[0])
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no DNS in subjectAltName.
for sub in cert.get('subject', []):
for key, value in sub:
# According to RFC 2818 the most specific Common Name must
# be used.
if key == 'commonName':
# 'subject' entries are unicode.
try:
value = value.encode('ascii')
except UnicodeEncodeError:
return _(b'IDN in certificate not supported')
try:
if _dnsnamematch(value, hostname):
return
except wildcarderror as e:
return stringutil.forcebytestr(e.args[0])
dnsnames.append(value)
dnsnames = [pycompat.bytesurl(d) for d in dnsnames]
if len(dnsnames) > 1:
return _(b'certificate is for %s') % b', '.join(dnsnames)
elif len(dnsnames) == 1:
return _(b'certificate is for %s') % dnsnames[0]
else:
return _(b'no commonName or subjectAltName found in certificate')
def _plainapplepython():
"""return true if this seems to be a pure Apple Python that
* is unfrozen and presumably has the whole mercurial module in the file
system
* presumably is an Apple Python that uses Apple OpenSSL which has patches
for using system certificate store CAs in addition to the provided
cacerts file
"""
if (
not pycompat.isdarwin
or resourceutil.mainfrozen()
or not pycompat.sysexecutable
):
return False
exe = os.path.realpath(pycompat.sysexecutable).lower()
return exe.startswith(b'/usr/bin/python') or exe.startswith(
b'/system/library/frameworks/python.framework/'
)
def _defaultcacerts(ui):
"""return path to default CA certificates or None.
It is assumed this function is called when the returned certificates
file will actually be used to validate connections. Therefore this
function may print warnings or debug messages assuming this usage.
We don't print a message when the Python is able to load default
CA certs because this scenario is detected at socket connect time.
"""
# The "certifi" Python package provides certificates. If it is installed
# and usable, assume the user intends it to be used and use it.
try:
import certifi
certs = certifi.where()
if os.path.exists(certs):
ui.debug(b'using ca certificates from certifi\n')
return pycompat.fsencode(certs)
except (ImportError, AttributeError):
pass
# Apple's OpenSSL has patches that allow a specially constructed certificate
# to load the system CA store. If we're running on Apple Python, use this
# trick.
if _plainapplepython():
dummycert = os.path.join(
os.path.dirname(pycompat.fsencode(__file__)), b'dummycert.pem'
)
if os.path.exists(dummycert):
return dummycert
return None
def validatesocket(sock):
"""Validate a socket meets security requirements.
The passed socket must have been created with ``wrapsocket()``.
"""
shost = sock._hgstate[b'hostname']
host = pycompat.bytesurl(shost)
ui = sock._hgstate[b'ui']
settings = sock._hgstate[b'settings']
try:
peercert = sock.getpeercert(True)
peercert2 = sock.getpeercert()
except AttributeError:
raise error.SecurityError(_(b'%s ssl connection error') % host)
if not peercert:
raise error.SecurityError(
_(b'%s certificate error: no certificate received') % host
)
if settings[b'disablecertverification']:
# We don't print the certificate fingerprint because it shouldn't
# be necessary: if the user requested certificate verification be
# disabled, they presumably already saw a message about the inability
# to verify the certificate and this message would have printed the
# fingerprint. So printing the fingerprint here adds little to no
# value.
ui.warn(
_(
b'warning: connection security to %s is disabled per current '
b'settings; communication is susceptible to eavesdropping '
b'and tampering\n'
)
% host
)
return
# If a certificate fingerprint is pinned, use it and only it to
# validate the remote cert.
peerfingerprints = {
b'sha1': hex(hashutil.sha1(peercert).digest()),
b'sha256': hex(hashlib.sha256(peercert).digest()),
b'sha512': hex(hashlib.sha512(peercert).digest()),
}
def fmtfingerprint(s):
return b':'.join([s[x : x + 2] for x in range(0, len(s), 2)])
nicefingerprint = b'sha256:%s' % fmtfingerprint(peerfingerprints[b'sha256'])
if settings[b'certfingerprints']:
for hash, fingerprint in settings[b'certfingerprints']:
if peerfingerprints[hash].lower() == fingerprint:
ui.debug(
b'%s certificate matched fingerprint %s:%s\n'
% (host, hash, fmtfingerprint(fingerprint))
)
if settings[b'legacyfingerprint']:
ui.warn(
_(
b'(SHA-1 fingerprint for %s found in legacy '
b'[hostfingerprints] section; '
b'if you trust this fingerprint, remove the old '
b'SHA-1 fingerprint from [hostfingerprints] and '
b'add the following entry to the new '
b'[hostsecurity] section: %s:fingerprints=%s)\n'
)
% (host, host, nicefingerprint)
)
return
# Pinned fingerprint didn't match. This is a fatal error.
if settings[b'legacyfingerprint']:
section = b'hostfingerprint'
nice = fmtfingerprint(peerfingerprints[b'sha1'])
else:
section = b'hostsecurity'
nice = b'%s:%s' % (hash, fmtfingerprint(peerfingerprints[hash]))
raise error.SecurityError(
_(b'certificate for %s has unexpected fingerprint %s')
% (host, nice),
hint=_(b'check %s configuration') % section,
)
# Security is enabled but no CAs are loaded. We can't establish trust
# for the cert so abort.
if not sock._hgstate[b'caloaded']:
raise error.SecurityError(
_(
b'unable to verify security of %s (no loaded CA certificates); '
b'refusing to connect'
)
% host,
hint=_(
b'see https://mercurial-scm.org/wiki/SecureConnections for '
b'how to configure Mercurial to avoid this error or set '
b'hostsecurity.%s:fingerprints=%s to trust this server'
)
% (host, nicefingerprint),
)
msg = _verifycert(peercert2, shost)
if msg:
raise error.SecurityError(
_(b'%s certificate error: %s') % (host, msg),
hint=_(
b'set hostsecurity.%s:certfingerprints=%s '
b'config setting or use --insecure to connect '
b'insecurely'
)
% (host, nicefingerprint),
)
```
#### File: site-packages/mercurial/stack.py
```python
from __future__ import absolute_import
def getstack(repo, rev=None):
"""return a sorted smartrev of the stack containing either rev if it is
not None or the current working directory parent.
The stack will always contain all drafts changesets which are ancestors to
the revision and are not merges.
"""
if rev is None:
rev = b'.'
revspec = b'only(%s) and not public() and not ::merge()'
revisions = repo.revs(revspec, rev)
revisions.sort()
return revisions
```
#### File: site-packages/mercurial/statprof.py
```python
from __future__ import absolute_import, division, print_function
import collections
import contextlib
import getopt
import inspect
import json
import os
import signal
import sys
import threading
import time
from .pycompat import open
from . import (
encoding,
pycompat,
)
defaultdict = collections.defaultdict
contextmanager = contextlib.contextmanager
__all__ = [b'start', b'stop', b'reset', b'display', b'profile']
skips = {
"util.py:check",
"extensions.py:closure",
"color.py:colorcmd",
"dispatch.py:checkargs",
"dispatch.py:<lambda>",
"dispatch.py:_runcatch",
"dispatch.py:_dispatch",
"dispatch.py:_runcommand",
"pager.py:pagecmd",
"dispatch.py:run",
"dispatch.py:dispatch",
"dispatch.py:runcommand",
"hg.py:<module>",
"evolve.py:warnobserrors",
}
###########################################################################
## Utils
def clock():
times = os.times()
return (times[0] + times[1], times[4])
###########################################################################
## Collection data structures
class ProfileState(object):
def __init__(self, frequency=None):
self.reset(frequency)
self.track = b'cpu'
def reset(self, frequency=None):
# total so far
self.accumulated_time = (0.0, 0.0)
# start_time when timer is active
self.last_start_time = None
# a float
if frequency:
self.sample_interval = 1.0 / frequency
elif not pycompat.hasattr(self, 'sample_interval'):
# default to 1000 Hz
self.sample_interval = 1.0 / 1000.0
else:
# leave the frequency as it was
pass
self.remaining_prof_time = None
# for user start/stop nesting
self.profile_level = 0
self.samples = []
def accumulate_time(self, stop_time):
increment = (
stop_time[0] - self.last_start_time[0],
stop_time[1] - self.last_start_time[1],
)
self.accumulated_time = (
self.accumulated_time[0] + increment[0],
self.accumulated_time[1] + increment[1],
)
def seconds_per_sample(self):
return self.accumulated_time[self.timeidx] / len(self.samples)
@property
def timeidx(self):
if self.track == b'real':
return 1
return 0
state = ProfileState()
class CodeSite(object):
cache = {}
__slots__ = ('path', 'lineno', 'function', 'source')
def __init__(self, path, lineno, function):
assert isinstance(path, bytes)
self.path = path
self.lineno = lineno
assert isinstance(function, bytes)
self.function = function
self.source = None
def __eq__(self, other):
try:
return self.lineno == other.lineno and self.path == other.path
except:
return False
def __hash__(self):
return hash((self.lineno, self.path))
@classmethod
def get(cls, path, lineno, function):
k = (path, lineno)
try:
return cls.cache[k]
except KeyError:
v = cls(path, lineno, function)
cls.cache[k] = v
return v
def getsource(self, length):
if self.source is None:
lineno = self.lineno - 1
try:
with open(self.path, b'rb') as fp:
for i, line in enumerate(fp):
if i == lineno:
self.source = line.strip()
break
except:
pass
if self.source is None:
self.source = b''
source = self.source
if len(source) > length:
source = source[: (length - 3)] + b"..."
return source
def filename(self):
return os.path.basename(self.path)
def skipname(self):
return '%s:%s' % (self.filename(), self.function)
class Sample(object):
__slots__ = ('stack', 'time')
def __init__(self, stack, time):
self.stack = stack
self.time = time
@classmethod
def from_frame(cls, frame, time):
stack = []
while frame:
stack.append(
CodeSite.get(
pycompat.sysbytes(frame.f_code.co_filename),
frame.f_lineno,
pycompat.sysbytes(frame.f_code.co_name),
)
)
frame = frame.f_back
return Sample(stack, time)
###########################################################################
## SIGPROF handler
def profile_signal_handler(signum, frame):
if state.profile_level > 0:
now = clock()
state.accumulate_time(now)
timestamp = state.accumulated_time[state.timeidx]
state.samples.append(Sample.from_frame(frame, timestamp))
signal.setitimer(signal.ITIMER_PROF, state.sample_interval, 0.0)
state.last_start_time = now
stopthread = threading.Event()
def samplerthread(tid):
while not stopthread.is_set():
now = clock()
state.accumulate_time(now)
frame = sys._current_frames()[tid]
timestamp = state.accumulated_time[state.timeidx]
state.samples.append(Sample.from_frame(frame, timestamp))
state.last_start_time = now
time.sleep(state.sample_interval)
stopthread.clear()
###########################################################################
## Profiling API
def is_active():
return state.profile_level > 0
lastmechanism = None
def start(mechanism=b'thread', track=b'cpu'):
'''Install the profiling signal handler, and start profiling.'''
state.track = track # note: nesting different mode won't work
state.profile_level += 1
if state.profile_level == 1:
state.last_start_time = clock()
rpt = state.remaining_prof_time
state.remaining_prof_time = None
global lastmechanism
lastmechanism = mechanism
if mechanism == b'signal':
signal.signal(signal.SIGPROF, profile_signal_handler)
signal.setitimer(
signal.ITIMER_PROF, rpt or state.sample_interval, 0.0
)
elif mechanism == b'thread':
frame = inspect.currentframe()
tid = [k for k, f in sys._current_frames().items() if f == frame][0]
state.thread = threading.Thread(
target=samplerthread, args=(tid,), name="samplerthread"
)
state.thread.start()
def stop():
'''Stop profiling, and uninstall the profiling signal handler.'''
state.profile_level -= 1
if state.profile_level == 0:
if lastmechanism == b'signal':
rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
state.remaining_prof_time = rpt[0]
elif lastmechanism == b'thread':
stopthread.set()
state.thread.join()
state.accumulate_time(clock())
state.last_start_time = None
statprofpath = encoding.environ.get(b'STATPROF_DEST')
if statprofpath:
save_data(statprofpath)
return state
def save_data(path):
with open(path, b'w+') as file:
file.write(b"%f %f\n" % state.accumulated_time)
for sample in state.samples:
time = sample.time
stack = sample.stack
sites = [
b'\1'.join([s.path, b'%d' % s.lineno, s.function])
for s in stack
]
file.write(b"%d\0%s\n" % (time, b'\0'.join(sites)))
def load_data(path):
lines = open(path, b'rb').read().splitlines()
state.accumulated_time = [float(value) for value in lines[0].split()]
state.samples = []
for line in lines[1:]:
parts = line.split(b'\0')
time = float(parts[0])
rawsites = parts[1:]
sites = []
for rawsite in rawsites:
siteparts = rawsite.split(b'\1')
sites.append(
CodeSite.get(siteparts[0], int(siteparts[1]), siteparts[2])
)
state.samples.append(Sample(sites, time))
def reset(frequency=None):
"""Clear out the state of the profiler. Do not call while the
profiler is running.
The optional frequency argument specifies the number of samples to
collect per second."""
assert state.profile_level == 0, b"Can't reset() while statprof is running"
CodeSite.cache.clear()
state.reset(frequency)
@contextmanager
def profile():
start()
try:
yield
finally:
stop()
display()
###########################################################################
## Reporting API
class SiteStats(object):
def __init__(self, site):
self.site = site
self.selfcount = 0
self.totalcount = 0
def addself(self):
self.selfcount += 1
def addtotal(self):
self.totalcount += 1
def selfpercent(self):
return self.selfcount / len(state.samples) * 100
def totalpercent(self):
return self.totalcount / len(state.samples) * 100
def selfseconds(self):
return self.selfcount * state.seconds_per_sample()
def totalseconds(self):
return self.totalcount * state.seconds_per_sample()
@classmethod
def buildstats(cls, samples):
stats = {}
for sample in samples:
for i, site in enumerate(sample.stack):
sitestat = stats.get(site)
if not sitestat:
sitestat = SiteStats(site)
stats[site] = sitestat
sitestat.addtotal()
if i == 0:
sitestat.addself()
return [s for s in pycompat.itervalues(stats)]
class DisplayFormats:
ByLine = 0
ByMethod = 1
AboutMethod = 2
Hotpath = 3
FlameGraph = 4
Json = 5
Chrome = 6
def display(fp=None, format=3, data=None, **kwargs):
'''Print statistics, either to stdout or the given file object.'''
if data is None:
data = state
if fp is None:
import sys
fp = sys.stdout
if len(data.samples) == 0:
fp.write(b'No samples recorded.\n')
return
if format == DisplayFormats.ByLine:
display_by_line(data, fp)
elif format == DisplayFormats.ByMethod:
display_by_method(data, fp)
elif format == DisplayFormats.AboutMethod:
display_about_method(data, fp, **kwargs)
elif format == DisplayFormats.Hotpath:
display_hotpath(data, fp, **kwargs)
elif format == DisplayFormats.FlameGraph:
write_to_flame(data, fp, **kwargs)
elif format == DisplayFormats.Json:
write_to_json(data, fp)
elif format == DisplayFormats.Chrome:
write_to_chrome(data, fp, **kwargs)
else:
raise Exception(b"Invalid display format")
if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
fp.write(b'---\n')
fp.write(b'Sample count: %d\n' % len(data.samples))
fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time)
def display_by_line(data, fp):
"""Print the profiler data with each sample line represented
as one row in a table. Sorted by self-time per line."""
stats = SiteStats.buildstats(data.samples)
stats.sort(reverse=True, key=lambda x: x.selfseconds())
fp.write(
b'%5.5s %10.10s %7.7s %-8.8s\n'
% (b'% ', b'cumulative', b'self', b'')
)
fp.write(
b'%5.5s %9.9s %8.8s %-8.8s\n'
% (b"time", b"seconds", b"seconds", b"name")
)
for stat in stats:
site = stat.site
sitelabel = b'%s:%d:%s' % (site.filename(), site.lineno, site.function)
fp.write(
b'%6.2f %9.2f %9.2f %s\n'
% (
stat.selfpercent(),
stat.totalseconds(),
stat.selfseconds(),
sitelabel,
)
)
def display_by_method(data, fp):
"""Print the profiler data with each sample function represented
as one row in a table. Important lines within that function are
output as nested rows. Sorted by self-time per line."""
fp.write(
b'%5.5s %10.10s %7.7s %-8.8s\n'
% (b'% ', b'cumulative', b'self', b'')
)
fp.write(
b'%5.5s %9.9s %8.8s %-8.8s\n'
% (b"time", b"seconds", b"seconds", b"name")
)
stats = SiteStats.buildstats(data.samples)
grouped = defaultdict(list)
for stat in stats:
grouped[stat.site.filename() + b":" + stat.site.function].append(stat)
# compute sums for each function
functiondata = []
for fname, sitestats in pycompat.iteritems(grouped):
total_cum_sec = 0
total_self_sec = 0
total_percent = 0
for stat in sitestats:
total_cum_sec += stat.totalseconds()
total_self_sec += stat.selfseconds()
total_percent += stat.selfpercent()
functiondata.append(
(fname, total_cum_sec, total_self_sec, total_percent, sitestats)
)
# sort by total self sec
functiondata.sort(reverse=True, key=lambda x: x[2])
for function in functiondata:
if function[3] < 0.05:
continue
fp.write(
b'%6.2f %9.2f %9.2f %s\n'
% (
function[3], # total percent
function[1], # total cum sec
function[2], # total self sec
function[0],
)
) # file:function
function[4].sort(reverse=True, key=lambda i: i.selfseconds())
for stat in function[4]:
# only show line numbers for significant locations (>1% time spent)
if stat.selfpercent() > 1:
source = stat.site.getsource(25)
if sys.version_info.major >= 3 and not isinstance(
source, bytes
):
source = pycompat.bytestr(source)
stattuple = (
stat.selfpercent(),
stat.selfseconds(),
stat.site.lineno,
source,
)
fp.write(b'%33.0f%% %6.2f line %d: %s\n' % stattuple)
def display_about_method(data, fp, function=None, **kwargs):
if function is None:
raise Exception(b"Invalid function")
filename = None
if b':' in function:
filename, function = function.split(b':')
relevant_samples = 0
parents = {}
children = {}
for sample in data.samples:
for i, site in enumerate(sample.stack):
if site.function == function and (
not filename or site.filename() == filename
):
relevant_samples += 1
if i != len(sample.stack) - 1:
parent = sample.stack[i + 1]
if parent in parents:
parents[parent] = parents[parent] + 1
else:
parents[parent] = 1
if site in children:
children[site] = children[site] + 1
else:
children[site] = 1
parents = [(parent, count) for parent, count in pycompat.iteritems(parents)]
parents.sort(reverse=True, key=lambda x: x[1])
for parent, count in parents:
fp.write(
b'%6.2f%% %s:%s line %s: %s\n'
% (
count / relevant_samples * 100,
pycompat.fsencode(parent.filename()),
pycompat.sysbytes(parent.function),
parent.lineno,
pycompat.sysbytes(parent.getsource(50)),
)
)
stats = SiteStats.buildstats(data.samples)
stats = [
s
for s in stats
if s.site.function == function
and (not filename or s.site.filename() == filename)
]
total_cum_sec = 0
total_self_sec = 0
total_self_percent = 0
total_cum_percent = 0
for stat in stats:
total_cum_sec += stat.totalseconds()
total_self_sec += stat.selfseconds()
total_self_percent += stat.selfpercent()
total_cum_percent += stat.totalpercent()
fp.write(
b'\n %s:%s Total: %0.2fs (%0.2f%%) Self: %0.2fs (%0.2f%%)\n\n'
% (
pycompat.sysbytes(filename or b'___'),
pycompat.sysbytes(function),
total_cum_sec,
total_cum_percent,
total_self_sec,
total_self_percent,
)
)
children = [(child, count) for child, count in pycompat.iteritems(children)]
children.sort(reverse=True, key=lambda x: x[1])
for child, count in children:
fp.write(
b' %6.2f%% line %s: %s\n'
% (
count / relevant_samples * 100,
child.lineno,
pycompat.sysbytes(child.getsource(50)),
)
)
def display_hotpath(data, fp, limit=0.05, **kwargs):
class HotNode(object):
def __init__(self, site):
self.site = site
self.count = 0
self.children = {}
def add(self, stack, time):
self.count += time
site = stack[0]
child = self.children.get(site)
if not child:
child = HotNode(site)
self.children[site] = child
if len(stack) > 1:
i = 1
# Skip boiler plate parts of the stack
while i < len(stack) and stack[i].skipname() in skips:
i += 1
if i < len(stack):
child.add(stack[i:], time)
else:
# Normally this is done by the .add() calls
child.count += time
root = HotNode(None)
lasttime = data.samples[0].time
for sample in data.samples:
root.add(sample.stack[::-1], sample.time - lasttime)
lasttime = sample.time
showtime = kwargs.get('showtime', True)
def _write(node, depth, multiple_siblings):
site = node.site
visiblechildren = [
c
for c in pycompat.itervalues(node.children)
if c.count >= (limit * root.count)
]
if site:
indent = depth * 2 - 1
filename = (site.filename() + b':').ljust(15)
function = site.function
# lots of string formatting
listpattern = (
b''.ljust(indent)
+ (b'\\' if multiple_siblings else b'|')
+ b' %4.1f%%'
+ (b' %5.2fs' % node.count if showtime else b'')
+ b' %s %s'
)
liststring = listpattern % (
node.count / root.count * 100,
filename,
function,
)
# 4 to account for the word 'line'
spacing_len = max(4, 55 - len(liststring))
prefix = b''
if spacing_len == 4:
prefix = b', '
codepattern = b'%s%s %d: %s%s'
codestring = codepattern % (
prefix,
b'line'.rjust(spacing_len),
site.lineno,
b''.ljust(max(0, 4 - len(str(site.lineno)))),
site.getsource(30),
)
finalstring = liststring + codestring
childrensamples = sum(
[c.count for c in pycompat.itervalues(node.children)]
)
# Make frames that performed more than 10% of the operation red
if node.count - childrensamples > (0.1 * root.count):
finalstring = b'\033[91m' + finalstring + b'\033[0m'
# Make frames that didn't actually perform work dark grey
elif node.count - childrensamples == 0:
finalstring = b'\033[90m' + finalstring + b'\033[0m'
fp.write(finalstring + b'\n')
newdepth = depth
if len(visiblechildren) > 1 or multiple_siblings:
newdepth += 1
visiblechildren.sort(reverse=True, key=lambda x: x.count)
for child in visiblechildren:
_write(child, newdepth, len(visiblechildren) > 1)
if root.count > 0:
_write(root, 0, False)
def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs):
if scriptpath is None:
scriptpath = encoding.environ[b'HOME'] + b'/flamegraph.pl'
if not os.path.exists(scriptpath):
fp.write(b'error: missing %s\n' % scriptpath)
fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n')
return
lines = {}
for sample in data.samples:
sites = [s.function for s in sample.stack]
sites.reverse()
line = b';'.join(sites)
if line in lines:
lines[line] = lines[line] + 1
else:
lines[line] = 1
fd, path = pycompat.mkstemp()
with open(path, b"w+") as file:
for line, count in pycompat.iteritems(lines):
file.write(b"%s %d\n" % (line, count))
if outputfile is None:
outputfile = b'~/flamegraph.svg'
os.system(b"perl ~/flamegraph.pl %s > %s" % (path, outputfile))
fp.write(b'Written to %s\n' % outputfile)
_pathcache = {}
def simplifypath(path):
"""Attempt to make the path to a Python module easier to read by
removing whatever part of the Python search path it was found
on."""
if path in _pathcache:
return _pathcache[path]
hgpath = encoding.__file__.rsplit(os.sep, 2)[0]
for p in [hgpath] + sys.path:
prefix = p + os.sep
if path.startswith(prefix):
path = path[len(prefix) :]
break
_pathcache[path] = path
return path
def write_to_json(data, fp):
samples = []
for sample in data.samples:
stack = []
for frame in sample.stack:
stack.append(
(
pycompat.sysstr(frame.path),
frame.lineno,
pycompat.sysstr(frame.function),
)
)
samples.append((sample.time, stack))
data = json.dumps(samples)
if not isinstance(data, bytes):
data = data.encode('utf-8')
fp.write(data)
def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
samples = []
laststack = collections.deque()
lastseen = collections.deque()
# The Chrome tracing format allows us to use a compact stack
# representation to save space. It's fiddly but worth it.
# We maintain a bijection between stack and ID.
stack2id = {}
id2stack = [] # will eventually be rendered
def stackid(stack):
if not stack:
return
if stack in stack2id:
return stack2id[stack]
parent = stackid(stack[1:])
myid = len(stack2id)
stack2id[stack] = myid
id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
if parent is not None:
id2stack[-1].update(parent=parent)
return myid
# The sampling profiler can sample multiple times without
# advancing the clock, potentially causing the Chrome trace viewer
# to render single-pixel columns that we cannot zoom in on. We
# work around this by pretending that zero-duration samples are a
# millisecond in length.
clamp = 0.001
# We provide knobs that by default attempt to filter out stack
# frames that are too noisy:
#
# * A few take almost all execution time. These are usually boring
# setup functions, giving a stack that is deep but uninformative.
#
# * Numerous samples take almost no time, but introduce lots of
# noisy, oft-deep "spines" into a rendered profile.
blacklist = set()
totaltime = data.samples[-1].time - data.samples[0].time
minthreshold = totaltime * minthreshold
maxthreshold = max(totaltime * maxthreshold, clamp)
def poplast():
oldsid = stackid(tuple(laststack))
oldcat, oldfunc = laststack.popleft()
oldtime, oldidx = lastseen.popleft()
duration = sample.time - oldtime
if minthreshold <= duration <= maxthreshold:
# ensure no zero-duration events
sampletime = max(oldtime + clamp, sample.time)
samples.append(
dict(
ph='E',
name=oldfunc,
cat=oldcat,
sf=oldsid,
ts=sampletime * 1e6,
pid=0,
)
)
else:
blacklist.add(oldidx)
# Much fiddling to synthesize correctly(ish) nested begin/end
# events given only stack snapshots.
for sample in data.samples:
stack = tuple(
(
(
'%s:%d'
% (simplifypath(pycompat.sysstr(frame.path)), frame.lineno),
pycompat.sysstr(frame.function),
)
for frame in sample.stack
)
)
qstack = collections.deque(stack)
if laststack == qstack:
continue
while laststack and qstack and laststack[-1] == qstack[-1]:
laststack.pop()
qstack.pop()
while laststack:
poplast()
for f in reversed(qstack):
lastseen.appendleft((sample.time, len(samples)))
laststack.appendleft(f)
path, name = f
sid = stackid(tuple(laststack))
samples.append(
dict(
ph='B',
name=name,
cat=path,
ts=sample.time * 1e6,
sf=sid,
pid=0,
)
)
laststack = collections.deque(stack)
while laststack:
poplast()
events = [
sample for idx, sample in enumerate(samples) if idx not in blacklist
]
frames = collections.OrderedDict(
(str(k), v) for (k, v) in enumerate(id2stack)
)
data = json.dumps(dict(traceEvents=events, stackFrames=frames), indent=1)
if not isinstance(data, bytes):
data = data.encode('utf-8')
fp.write(data)
fp.write(b'\n')
def printusage():
print(
r"""
The statprof command line allows you to inspect the last profile's results in
the following forms:
usage:
hotpath [-l --limit percent]
Shows a graph of calls with the percent of time each takes.
Red calls take over 10%% of the total time themselves.
lines
Shows the actual sampled lines.
functions
Shows the samples grouped by function.
function [filename:]functionname
Shows the callers and callees of a particular function.
flame [-s --script-path] [-o --output-file path]
Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg)
Requires that ~/flamegraph.pl exist.
(Specify alternate script path with --script-path.)"""
)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
printusage()
return 0
displayargs = {}
optstart = 2
displayargs[b'function'] = None
if argv[1] == 'hotpath':
displayargs[b'format'] = DisplayFormats.Hotpath
elif argv[1] == 'lines':
displayargs[b'format'] = DisplayFormats.ByLine
elif argv[1] == 'functions':
displayargs[b'format'] = DisplayFormats.ByMethod
elif argv[1] == 'function':
displayargs[b'format'] = DisplayFormats.AboutMethod
displayargs[b'function'] = argv[2]
optstart = 3
elif argv[1] == 'flame':
displayargs[b'format'] = DisplayFormats.FlameGraph
else:
printusage()
return 0
# process options
try:
opts, args = pycompat.getoptb(
sys.argv[optstart:],
b"hl:f:o:p:",
[b"help", b"limit=", b"file=", b"output-file=", b"script-path="],
)
except getopt.error as msg:
print(msg)
printusage()
return 2
displayargs[b'limit'] = 0.05
path = None
for o, value in opts:
if o in ("-l", "--limit"):
displayargs[b'limit'] = float(value)
elif o in ("-f", "--file"):
path = value
elif o in ("-o", "--output-file"):
displayargs[b'outputfile'] = value
elif o in ("-p", "--script-path"):
displayargs[b'scriptpath'] = value
elif o in ("-h", "help"):
printusage()
return 0
else:
assert False, b"unhandled option %s" % o
if not path:
print('must specify --file to load')
return 1
load_data(path=path)
display(**pycompat.strkwargs(displayargs))
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: site-packages/mercurial/templatefilters.py
```python
from __future__ import absolute_import
import os
import re
import time
from .i18n import _
from .node import hex
from . import (
encoding,
error,
pycompat,
registrar,
smartset,
templateutil,
url,
util,
)
from .utils import (
cborutil,
dateutil,
stringutil,
)
urlerr = util.urlerr
urlreq = util.urlreq
# filters are callables like:
# fn(obj)
# with:
# obj - object to be filtered (text, date, list and so on)
filters = {}
templatefilter = registrar.templatefilter(filters)
@templatefilter(b'addbreaks', intype=bytes)
def addbreaks(text):
"""Any text. Add an XHTML "<br />" tag before the end of
every line except the last.
"""
return text.replace(b'\n', b'<br/>\n')
agescales = [
(b"year", 3600 * 24 * 365, b'Y'),
(b"month", 3600 * 24 * 30, b'M'),
(b"week", 3600 * 24 * 7, b'W'),
(b"day", 3600 * 24, b'd'),
(b"hour", 3600, b'h'),
(b"minute", 60, b'm'),
(b"second", 1, b's'),
]
@templatefilter(b'age', intype=templateutil.date)
def age(date, abbrev=False):
"""Date. Returns a human-readable date/time difference between the
given date/time and the current date/time.
"""
def plural(t, c):
if c == 1:
return t
return t + b"s"
def fmt(t, c, a):
if abbrev:
return b"%d%s" % (c, a)
return b"%d %s" % (c, plural(t, c))
now = time.time()
then = date[0]
future = False
if then > now:
future = True
delta = max(1, int(then - now))
if delta > agescales[0][1] * 30:
return b'in the distant future'
else:
delta = max(1, int(now - then))
if delta > agescales[0][1] * 2:
return dateutil.shortdate(date)
for t, s, a in agescales:
n = delta // s
if n >= 2 or s == 1:
if future:
return b'%s from now' % fmt(t, n, a)
return b'%s ago' % fmt(t, n, a)
@templatefilter(b'basename', intype=bytes)
def basename(path):
"""Any text. Treats the text as a path, and returns the last
component of the path after splitting by the path separator.
For example, "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "".
"""
return os.path.basename(path)
def _tocborencodable(obj):
if isinstance(obj, smartset.abstractsmartset):
return list(obj)
return obj
@templatefilter(b'cbor')
def cbor(obj):
"""Any object. Serializes the object to CBOR bytes."""
# cborutil is stricter about type than json() filter
obj = pycompat.rapply(_tocborencodable, obj)
return b''.join(cborutil.streamencode(obj))
@templatefilter(b'commondir')
def commondir(filelist):
"""List of text. Treats each list item as file name with /
as path separator and returns the longest common directory
prefix shared by all list items.
Returns the empty string if no common prefix exists.
The list items are not normalized, i.e. "foo/../bar" is handled as
file "bar" in the directory "foo/..". Leading slashes are ignored.
For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and
["foo/bar", "baz"] becomes "".
"""
def common(a, b):
if len(a) > len(b):
a = b[: len(a)]
elif len(b) > len(a):
b = b[: len(a)]
if a == b:
return a
for i in pycompat.xrange(len(a)):
if a[i] != b[i]:
return a[:i]
return a
try:
if not filelist:
return b""
dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist]
if len(dirlist) == 1:
return b'/'.join(dirlist[0])
a = min(dirlist)
b = max(dirlist)
# The common prefix of a and b is shared with all
# elements of the list since Python sorts lexicographical
# and [1, x] after [1].
return b'/'.join(common(a, b))
except TypeError:
raise error.ParseError(_(b'argument is not a list of text'))
@templatefilter(b'count')
def count(i):
"""List or text. Returns the length as an integer."""
try:
return len(i)
except TypeError:
raise error.ParseError(_(b'not countable'))
@templatefilter(b'dirname', intype=bytes)
def dirname(path):
"""Any text. Treats the text as a path, and strips the last
component of the path after splitting by the path separator.
"""
return os.path.dirname(path)
@templatefilter(b'domain', intype=bytes)
def domain(author):
"""Any text. Finds the first string that looks like an email
address, and extracts just the domain component. Example: ``User
<<EMAIL>>`` becomes ``example.com``.
"""
f = author.find(b'@')
if f == -1:
return b''
author = author[f + 1 :]
f = author.find(b'>')
if f >= 0:
author = author[:f]
return author
@templatefilter(b'email', intype=bytes)
def email(text):
"""Any text. Extracts the first string that looks like an email
address. Example: ``User <<EMAIL>>`` becomes
``<EMAIL>``.
"""
return stringutil.email(text)
@templatefilter(b'escape', intype=bytes)
def escape(text):
"""Any text. Replaces the special XML/XHTML characters "&", "<"
and ">" with XML entities, and filters out NUL characters.
"""
return url.escape(text.replace(b'\0', b''), True)
para_re = None
space_re = None
def fill(text, width, initindent=b'', hangindent=b''):
'''fill many paragraphs with optional indentation.'''
global para_re, space_re
if para_re is None:
para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M)
space_re = re.compile(br' +')
def findparas():
start = 0
while True:
m = para_re.search(text, start)
if not m:
uctext = encoding.unifromlocal(text[start:])
w = len(uctext)
while w > 0 and uctext[w - 1].isspace():
w -= 1
yield (
encoding.unitolocal(uctext[:w]),
encoding.unitolocal(uctext[w:]),
)
break
yield text[start : m.start(0)], m.group(1)
start = m.end(1)
return b"".join(
[
stringutil.wrap(
space_re.sub(b' ', stringutil.wrap(para, width)),
width,
initindent,
hangindent,
)
+ rest
for para, rest in findparas()
]
)
@templatefilter(b'fill68', intype=bytes)
def fill68(text):
"""Any text. Wraps the text to fit in 68 columns."""
return fill(text, 68)
@templatefilter(b'fill76', intype=bytes)
def fill76(text):
"""Any text. Wraps the text to fit in 76 columns."""
return fill(text, 76)
@templatefilter(b'firstline', intype=bytes)
def firstline(text):
"""Any text. Returns the first line of text."""
try:
return text.splitlines(True)[0].rstrip(b'\r\n')
except IndexError:
return b''
@templatefilter(b'hex', intype=bytes)
def hexfilter(text):
"""Any text. Convert a binary Mercurial node identifier into
its long hexadecimal representation.
"""
return hex(text)
@templatefilter(b'hgdate', intype=templateutil.date)
def hgdate(text):
"""Date. Returns the date as a pair of numbers: "1157407993
25200" (Unix timestamp, timezone offset).
"""
return b"%d %d" % text
@templatefilter(b'isodate', intype=templateutil.date)
def isodate(text):
"""Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
+0200".
"""
return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2')
@templatefilter(b'isodatesec', intype=templateutil.date)
def isodatesec(text):
"""Date. Returns the date in ISO 8601 format, including
seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
filter.
"""
return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
def indent(text, prefix, firstline=b''):
'''indent each non-empty line of text after first with prefix.'''
lines = text.splitlines()
num_lines = len(lines)
endswithnewline = text[-1:] == b'\n'
def indenter():
for i in pycompat.xrange(num_lines):
l = lines[i]
if l.strip():
yield prefix if i else firstline
yield l
if i < num_lines - 1 or endswithnewline:
yield b'\n'
return b"".join(indenter())
@templatefilter(b'json')
def json(obj, paranoid=True):
"""Any object. Serializes the object to a JSON formatted text."""
if obj is None:
return b'null'
elif obj is False:
return b'false'
elif obj is True:
return b'true'
elif isinstance(obj, (int, pycompat.long, float)):
return pycompat.bytestr(obj)
elif isinstance(obj, bytes):
return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
elif isinstance(obj, type(u'')):
raise error.ProgrammingError(
b'Mercurial only does output with bytes: %r' % obj
)
elif util.safehasattr(obj, b'keys'):
out = [
b'"%s": %s'
% (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
for k, v in sorted(pycompat.iteritems(obj))
]
return b'{' + b', '.join(out) + b'}'
elif util.safehasattr(obj, b'__iter__'):
out = [json(i, paranoid) for i in obj]
return b'[' + b', '.join(out) + b']'
raise error.ProgrammingError(b'cannot encode %r' % obj)
@templatefilter(b'lower', intype=bytes)
def lower(text):
"""Any text. Converts the text to lowercase."""
return encoding.lower(text)
@templatefilter(b'nonempty', intype=bytes)
def nonempty(text):
"""Any text. Returns '(none)' if the string is empty."""
return text or b"(none)"
@templatefilter(b'obfuscate', intype=bytes)
def obfuscate(text):
"""Any text. Returns the input text rendered as a sequence of
XML entities.
"""
text = pycompat.unicode(
text, pycompat.sysstr(encoding.encoding), r'replace'
)
return b''.join([b'&#%d;' % ord(c) for c in text])
@templatefilter(b'permissions', intype=bytes)
def permissions(flags):
if b"l" in flags:
return b"lrwxrwxrwx"
if b"x" in flags:
return b"-rwxr-xr-x"
return b"-rw-r--r--"
@templatefilter(b'person', intype=bytes)
def person(author):
"""Any text. Returns the name before an email address,
interpreting it as per RFC 5322.
"""
return stringutil.person(author)
@templatefilter(b'revescape', intype=bytes)
def revescape(text):
"""Any text. Escapes all "special" characters, except @.
Forward slashes are escaped twice to prevent web servers from prematurely
unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz".
"""
return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F')
@templatefilter(b'rfc3339date', intype=templateutil.date)
def rfc3339date(text):
"""Date. Returns a date using the Internet date format
specified in RFC 3339: "2009-08-18T13:00:13+02:00".
"""
return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2")
@templatefilter(b'rfc822date', intype=templateutil.date)
def rfc822date(text):
"""Date. Returns a date using the same format used in email
headers: "Tue, 18 Aug 2009 13:00:13 +0200".
"""
return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2")
@templatefilter(b'short', intype=bytes)
def short(text):
"""Changeset hash. Returns the short form of a changeset hash,
i.e. a 12 hexadecimal digit string.
"""
return text[:12]
@templatefilter(b'shortbisect', intype=bytes)
def shortbisect(label):
"""Any text. Treats `label` as a bisection status, and
returns a single-character representing the status (G: good, B: bad,
S: skipped, U: untested, I: ignored). Returns single space if `text`
is not a valid bisection status.
"""
if label:
return label[0:1].upper()
return b' '
@templatefilter(b'shortdate', intype=templateutil.date)
def shortdate(text):
"""Date. Returns a date like "2006-09-18"."""
return dateutil.shortdate(text)
@templatefilter(b'slashpath', intype=bytes)
def slashpath(path):
"""Any text. Replaces the native path separator with slash."""
return util.pconvert(path)
@templatefilter(b'splitlines', intype=bytes)
def splitlines(text):
"""Any text. Split text into a list of lines."""
return templateutil.hybridlist(text.splitlines(), name=b'line')
@templatefilter(b'stringescape', intype=bytes)
def stringescape(text):
return stringutil.escapestr(text)
@templatefilter(b'stringify', intype=bytes)
def stringify(thing):
"""Any type. Turns the value into text by converting values into
text and concatenating them.
"""
return thing # coerced by the intype
@templatefilter(b'stripdir', intype=bytes)
def stripdir(text):
"""Treat the text as path and strip a directory level, if
possible. For example, "foo" and "foo/bar" becomes "foo".
"""
dir = os.path.dirname(text)
if dir == b"":
return os.path.basename(text)
else:
return dir
@templatefilter(b'tabindent', intype=bytes)
def tabindent(text):
"""Any text. Returns the text, with every non-empty line
except the first starting with a tab character.
"""
return indent(text, b'\t')
@templatefilter(b'upper', intype=bytes)
def upper(text):
"""Any text. Converts the text to uppercase."""
return encoding.upper(text)
@templatefilter(b'urlescape', intype=bytes)
def urlescape(text):
"""Any text. Escapes all "special" characters. For example,
"foo bar" becomes "foo%20bar".
"""
return urlreq.quote(text)
@templatefilter(b'user', intype=bytes)
def userfilter(text):
"""Any text. Returns a short representation of a user name or email
address."""
return stringutil.shortuser(text)
@templatefilter(b'emailuser', intype=bytes)
def emailuser(text):
"""Any text. Returns the user portion of an email address."""
return stringutil.emailuser(text)
@templatefilter(b'utf8', intype=bytes)
def utf8(text):
"""Any text. Converts from the local character encoding to UTF-8."""
return encoding.fromlocal(text)
@templatefilter(b'xmlescape', intype=bytes)
def xmlescape(text):
text = (
text.replace(b'&', b'&')
.replace(b'<', b'<')
.replace(b'>', b'>')
.replace(b'"', b'"')
.replace(b"'", b''')
) # ' invalid in HTML
return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text)
def websub(text, websubtable):
""":websub: Any text. Only applies to hgweb. Applies the regular
expression replacements defined in the websub section.
"""
if websubtable:
for regexp, format in websubtable:
text = regexp.sub(format, text)
return text
def loadfilter(ui, extname, registrarobj):
"""Load template filter from specified registrarobj"""
for name, func in pycompat.iteritems(registrarobj._table):
filters[name] = func
# tell hggettext to extract docstrings from these functions:
i18nfunctions = filters.values()
```
#### File: mercurial/testing/revlog.py
```python
from __future__ import absolute_import
import unittest
# picked from test-parse-index2, copied rather than imported
# so that it stays stable even if test-parse-index2 changes or disappears.
data_non_inlined = (
b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
try:
from ..cext import parsers as cparsers
except ImportError:
cparsers = None
@unittest.skipIf(
cparsers is None,
'The C version of the "parsers" module is not available. It is needed for this test.',
)
class RevlogBasedTestBase(unittest.TestCase):
def parseindex(self):
return cparsers.parse_index2(data_non_inlined, False)[0]
```
#### File: site-packages/mercurial/upgrade.py
```python
from __future__ import absolute_import
from .i18n import _
from . import (
error,
hg,
localrepo,
lock as lockmod,
pycompat,
requirements as requirementsmod,
scmutil,
)
from .upgrade_utils import (
actions as upgrade_actions,
engine as upgrade_engine,
)
from .utils import (
stringutil,
)
allformatvariant = upgrade_actions.allformatvariant
def upgraderepo(
ui,
repo,
run=False,
optimize=None,
backup=True,
manifest=None,
changelog=None,
filelogs=None,
):
"""Upgrade a repository in place."""
if optimize is None:
optimize = {}
repo = repo.unfiltered()
revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
specentries = (
(upgrade_engine.UPGRADE_CHANGELOG, changelog),
(upgrade_engine.UPGRADE_MANIFEST, manifest),
(upgrade_engine.UPGRADE_FILELOGS, filelogs),
)
specified = [(y, x) for (y, x) in specentries if x is not None]
if specified:
# we have some limitation on revlogs to be recloned
if any(x for y, x in specified):
revlogs = set()
for upgrade, enabled in specified:
if enabled:
revlogs.add(upgrade)
else:
# none are enabled
for upgrade, __ in specified:
revlogs.discard(upgrade)
# Ensure the repository can be upgraded.
upgrade_actions.check_source_requirements(repo)
default_options = localrepo.defaultcreateopts(repo.ui)
newreqs = localrepo.newreporequirements(repo.ui, default_options)
newreqs.update(upgrade_actions.preservedrequirements(repo))
upgrade_actions.check_requirements_changes(repo, newreqs)
# Find and validate all improvements that can be made.
alloptimizations = upgrade_actions.findoptimizations(repo)
# Apply and Validate arguments.
optimizations = []
for o in alloptimizations:
if o.name in optimize:
optimizations.append(o)
optimize.discard(o.name)
if optimize: # anything left is unknown
raise error.Abort(
_(b'unknown optimization action requested: %s')
% b', '.join(sorted(optimize)),
hint=_(b'run without arguments to see valid optimizations'),
)
format_upgrades = upgrade_actions.find_format_upgrades(repo)
up_actions = upgrade_actions.determine_upgrade_actions(
repo, format_upgrades, optimizations, repo.requirements, newreqs
)
removed_actions = upgrade_actions.find_format_downgrades(repo)
removedreqs = repo.requirements - newreqs
addedreqs = newreqs - repo.requirements
if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
removedreqs | addedreqs
)
if incompatible:
msg = _(
b'ignoring revlogs selection flags, format requirements '
b'change: %s\n'
)
ui.warn(msg % b', '.join(sorted(incompatible)))
revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
upgrade_op = upgrade_actions.UpgradeOperation(
ui,
newreqs,
repo.requirements,
up_actions,
removed_actions,
revlogs,
)
if not run:
fromconfig = []
onlydefault = []
for d in format_upgrades:
if d.fromconfig(repo):
fromconfig.append(d)
elif d.default:
onlydefault.append(d)
if fromconfig or onlydefault:
if fromconfig:
ui.status(
_(
b'repository lacks features recommended by '
b'current config options:\n\n'
)
)
for i in fromconfig:
ui.status(b'%s\n %s\n\n' % (i.name, i.description))
if onlydefault:
ui.status(
_(
b'repository lacks features used by the default '
b'config options:\n\n'
)
)
for i in onlydefault:
ui.status(b'%s\n %s\n\n' % (i.name, i.description))
ui.status(b'\n')
else:
ui.status(_(b'(no format upgrades found in existing repository)\n'))
ui.status(
_(
b'performing an upgrade with "--run" will make the following '
b'changes:\n\n'
)
)
upgrade_op.print_requirements()
upgrade_op.print_optimisations()
upgrade_op.print_upgrade_actions()
upgrade_op.print_affected_revlogs()
if upgrade_op.unused_optimizations:
ui.status(
_(
b'additional optimizations are available by specifying '
b'"--optimize <name>":\n\n'
)
)
upgrade_op.print_unused_optimizations()
return
if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
ui.status(_(b'nothing to do\n'))
return
# Else we're in the run=true case.
ui.write(_(b'upgrade will perform the following actions:\n\n'))
upgrade_op.print_requirements()
upgrade_op.print_optimisations()
upgrade_op.print_upgrade_actions()
upgrade_op.print_affected_revlogs()
ui.status(_(b'beginning upgrade...\n'))
with repo.wlock(), repo.lock():
ui.status(_(b'repository locked and read-only\n'))
# Our strategy for upgrading the repository is to create a new,
# temporary repository, write data to it, then do a swap of the
# data. There are less heavyweight ways to do this, but it is easier
# to create a new repo object than to instantiate all the components
# (like the store) separately.
tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
backuppath = None
try:
ui.status(
_(
b'creating temporary repository to stage upgraded '
b'data: %s\n'
)
% tmppath
)
# clone ui without using ui.copy because repo.ui is protected
repoui = repo.ui.__class__(repo.ui)
dstrepo = hg.repository(repoui, path=tmppath, create=True)
with dstrepo.wlock(), dstrepo.lock():
backuppath = upgrade_engine.upgrade(
ui, repo, dstrepo, upgrade_op
)
if not backup:
ui.status(
_(b'removing old repository content %s\n') % backuppath
)
repo.vfs.rmtree(backuppath, forcibly=True)
backuppath = None
finally:
ui.status(_(b'removing temporary repository %s\n') % tmppath)
repo.vfs.rmtree(tmppath, forcibly=True)
if backuppath and not ui.quiet:
ui.warn(
_(b'copy of old repository backed up at %s\n') % backuppath
)
ui.warn(
_(
b'the old repository will not be deleted; remove '
b'it to free up disk space once the upgraded '
b'repository is verified\n'
)
)
upgrade_op.print_post_op_messages()
def upgrade_share_to_safe(
ui,
hgvfs,
storevfs,
current_requirements,
mismatch_config,
mismatch_warn,
):
"""Upgrades a share to use share-safe mechanism"""
wlock = None
store_requirements = localrepo._readrequires(storevfs, False)
original_crequirements = current_requirements.copy()
# after upgrade, store requires will be shared, so lets find
# the requirements which are not present in store and
# write them to share's .hg/requires
diffrequires = current_requirements - store_requirements
# add share-safe requirement as it will mark the share as share-safe
diffrequires.add(requirementsmod.SHARESAFE_REQUIREMENT)
current_requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
# in `allow` case, we don't try to upgrade, we just respect the source
# state, update requirements and continue
if mismatch_config == b'allow':
return
try:
wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
# some process might change the requirement in between, re-read
# and update current_requirements
locked_requirements = localrepo._readrequires(hgvfs, True)
if locked_requirements != original_crequirements:
removed = current_requirements - locked_requirements
# update current_requirements in place because it's passed
# as reference
current_requirements -= removed
current_requirements |= locked_requirements
diffrequires = current_requirements - store_requirements
# add share-safe requirement as it will mark the share as share-safe
diffrequires.add(requirementsmod.SHARESAFE_REQUIREMENT)
current_requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
scmutil.writerequires(hgvfs, diffrequires)
ui.warn(_(b'repository upgraded to use share-safe mode\n'))
except error.LockError as e:
hint = _(
"see `hg help config.format.use-share-safe` for more information"
)
if mismatch_config == b'upgrade-abort':
raise error.Abort(
_(b'failed to upgrade share, got error: %s')
% stringutil.forcebytestr(e.strerror),
hint=hint,
)
elif mismatch_warn:
ui.warn(
_(b'failed to upgrade share, got error: %s\n')
% stringutil.forcebytestr(e.strerror),
hint=hint,
)
finally:
if wlock:
wlock.release()
def downgrade_share_to_non_safe(
ui,
hgvfs,
sharedvfs,
current_requirements,
mismatch_config,
mismatch_warn,
):
"""Downgrades a share which use share-safe to not use it"""
wlock = None
source_requirements = localrepo._readrequires(sharedvfs, True)
original_crequirements = current_requirements.copy()
# we cannot be 100% sure on which requirements were present in store when
# the source supported share-safe. However, we do know that working
# directory requirements were not there. Hence we remove them
source_requirements -= requirementsmod.WORKING_DIR_REQUIREMENTS
current_requirements |= source_requirements
current_requirements.remove(requirementsmod.SHARESAFE_REQUIREMENT)
if mismatch_config == b'allow':
return
try:
wlock = lockmod.trylock(ui, hgvfs, b'wlock', 0, 0)
# some process might change the requirement in between, re-read
# and update current_requirements
locked_requirements = localrepo._readrequires(hgvfs, True)
if locked_requirements != original_crequirements:
removed = current_requirements - locked_requirements
# update current_requirements in place because it's passed
# as reference
current_requirements -= removed
current_requirements |= locked_requirements
current_requirements |= source_requirements
current_requirements -= set(requirementsmod.SHARESAFE_REQUIREMENT)
scmutil.writerequires(hgvfs, current_requirements)
ui.warn(_(b'repository downgraded to not use share-safe mode\n'))
except error.LockError as e:
hint = _(
"see `hg help config.format.use-share-safe` for more information"
)
# If upgrade-abort is set, abort when upgrade fails, else let the
# process continue as `upgrade-allow` is set
if mismatch_config == b'downgrade-abort':
raise error.Abort(
_(b'failed to downgrade share, got error: %s')
% stringutil.forcebytestr(e.strerror),
hint=hint,
)
elif mismatch_warn:
ui.warn(
_(b'failed to downgrade share, got error: %s\n')
% stringutil.forcebytestr(e.strerror),
hint=hint,
)
finally:
if wlock:
wlock.release()
```
#### File: mercurial/utils/procutil.py
```python
from __future__ import absolute_import
import contextlib
import errno
import io
import os
import signal
import subprocess
import sys
import threading
import time
from ..i18n import _
from ..pycompat import (
getattr,
open,
)
from .. import (
encoding,
error,
policy,
pycompat,
)
# Import like this to keep import-checker happy
from ..utils import resourceutil
osutil = policy.importmod('osutil')
if pycompat.iswindows:
from .. import windows as platform
else:
from .. import posix as platform
def isatty(fp):
try:
return fp.isatty()
except AttributeError:
return False
class BadFile(io.RawIOBase):
"""Dummy file object to simulate closed stdio behavior"""
def readinto(self, b):
raise IOError(errno.EBADF, 'Bad file descriptor')
def write(self, b):
raise IOError(errno.EBADF, 'Bad file descriptor')
class LineBufferedWrapper(object):
def __init__(self, orig):
self.orig = orig
def __getattr__(self, attr):
return getattr(self.orig, attr)
def write(self, s):
orig = self.orig
res = orig.write(s)
if s.endswith(b'\n'):
orig.flush()
return res
io.BufferedIOBase.register(LineBufferedWrapper)
def make_line_buffered(stream):
if pycompat.ispy3 and not isinstance(stream, io.BufferedIOBase):
# On Python 3, buffered streams can be expected to subclass
# BufferedIOBase. This is definitively the case for the streams
# initialized by the interpreter. For unbuffered streams, we don't need
# to emulate line buffering.
return stream
if isinstance(stream, LineBufferedWrapper):
return stream
return LineBufferedWrapper(stream)
def unwrap_line_buffered(stream):
if isinstance(stream, LineBufferedWrapper):
assert not isinstance(stream.orig, LineBufferedWrapper)
return stream.orig
return stream
class WriteAllWrapper(object):
def __init__(self, orig):
self.orig = orig
def __getattr__(self, attr):
return getattr(self.orig, attr)
def write(self, s):
write1 = self.orig.write
m = memoryview(s)
total_to_write = len(s)
total_written = 0
while total_written < total_to_write:
total_written += write1(m[total_written:])
return total_written
io.IOBase.register(WriteAllWrapper)
def _make_write_all(stream):
assert pycompat.ispy3
if isinstance(stream, WriteAllWrapper):
return stream
if isinstance(stream, io.BufferedIOBase):
# The io.BufferedIOBase.write() contract guarantees that all data is
# written.
return stream
# In general, the write() method of streams is free to write only part of
# the data.
return WriteAllWrapper(stream)
if pycompat.ispy3:
# Python 3 implements its own I/O streams. Unlike stdio of C library,
# sys.stdin/stdout/stderr may be None if underlying fd is closed.
# TODO: .buffer might not exist if std streams were replaced; we'll need
# a silly wrapper to make a bytes stream backed by a unicode one.
if sys.stdin is None:
stdin = BadFile()
else:
stdin = sys.stdin.buffer
if sys.stdout is None:
stdout = BadFile()
else:
stdout = _make_write_all(sys.stdout.buffer)
if sys.stderr is None:
stderr = BadFile()
else:
stderr = _make_write_all(sys.stderr.buffer)
if pycompat.iswindows:
# Work around Windows bugs.
stdout = platform.winstdout(stdout)
stderr = platform.winstdout(stderr)
if isatty(stdout):
# The standard library doesn't offer line-buffered binary streams.
stdout = make_line_buffered(stdout)
else:
# Python 2 uses the I/O streams provided by the C library.
stdin = sys.stdin
stdout = sys.stdout
stderr = sys.stderr
if pycompat.iswindows:
# Work around Windows bugs.
stdout = platform.winstdout(stdout)
stderr = platform.winstdout(stderr)
if isatty(stdout):
if pycompat.iswindows:
# The Windows C runtime library doesn't support line buffering.
stdout = make_line_buffered(stdout)
else:
# glibc determines buffering on first write to stdout - if we
# replace a TTY destined stdout with a pipe destined stdout (e.g.
# pager), we want line buffering.
stdout = os.fdopen(stdout.fileno(), 'wb', 1)
findexe = platform.findexe
_gethgcmd = platform.gethgcmd
getuser = platform.getuser
getpid = os.getpid
hidewindow = platform.hidewindow
readpipe = platform.readpipe
setbinary = platform.setbinary
setsignalhandler = platform.setsignalhandler
shellquote = platform.shellquote
shellsplit = platform.shellsplit
spawndetached = platform.spawndetached
sshargs = platform.sshargs
testpid = platform.testpid
try:
setprocname = osutil.setprocname
except AttributeError:
pass
try:
unblocksignal = osutil.unblocksignal
except AttributeError:
pass
closefds = pycompat.isposix
def explainexit(code):
"""return a message describing a subprocess status
(codes from kill are negative - not os.system/wait encoding)"""
if code >= 0:
return _(b"exited with status %d") % code
return _(b"killed by signal %d") % -code
class _pfile(object):
"""File-like wrapper for a stream opened by subprocess.Popen()"""
def __init__(self, proc, fp):
self._proc = proc
self._fp = fp
def close(self):
# unlike os.popen(), this returns an integer in subprocess coding
self._fp.close()
return self._proc.wait()
def __iter__(self):
return iter(self._fp)
def __getattr__(self, attr):
return getattr(self._fp, attr)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def popen(cmd, mode=b'rb', bufsize=-1):
if mode == b'rb':
return _popenreader(cmd, bufsize)
elif mode == b'wb':
return _popenwriter(cmd, bufsize)
raise error.ProgrammingError(b'unsupported mode: %r' % mode)
def _popenreader(cmd, bufsize):
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
stdout=subprocess.PIPE,
)
return _pfile(p, p.stdout)
def _popenwriter(cmd, bufsize):
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
stdin=subprocess.PIPE,
)
return _pfile(p, p.stdin)
def popen2(cmd, env=None):
# Setting bufsize to -1 lets the system decide the buffer size.
# The default for bufsize is 0, meaning unbuffered. This leads to
# poor performance on Mac OS X: http://bugs.python.org/issue4194
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=-1,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=tonativeenv(env),
)
return p.stdin, p.stdout
def popen3(cmd, env=None):
stdin, stdout, stderr, p = popen4(cmd, env)
return stdin, stdout, stderr
def popen4(cmd, env=None, bufsize=-1):
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=tonativeenv(env),
)
return p.stdin, p.stdout, p.stderr, p
def pipefilter(s, cmd):
'''filter string S through command CMD, returning its output'''
p = subprocess.Popen(
tonativestr(cmd),
shell=True,
close_fds=closefds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
pout, perr = p.communicate(s)
return pout
def tempfilter(s, cmd):
"""filter string S through a pair of temporary files with CMD.
CMD is used as a template to create the real command to be run,
with the strings INFILE and OUTFILE replaced by the real names of
the temporary files generated."""
inname, outname = None, None
try:
infd, inname = pycompat.mkstemp(prefix=b'hg-filter-in-')
fp = os.fdopen(infd, 'wb')
fp.write(s)
fp.close()
outfd, outname = pycompat.mkstemp(prefix=b'hg-filter-out-')
os.close(outfd)
cmd = cmd.replace(b'INFILE', inname)
cmd = cmd.replace(b'OUTFILE', outname)
code = system(cmd)
if pycompat.sysplatform == b'OpenVMS' and code & 1:
code = 0
if code:
raise error.Abort(
_(b"command '%s' failed: %s") % (cmd, explainexit(code))
)
with open(outname, b'rb') as fp:
return fp.read()
finally:
try:
if inname:
os.unlink(inname)
except OSError:
pass
try:
if outname:
os.unlink(outname)
except OSError:
pass
_filtertable = {
b'tempfile:': tempfilter,
b'pipe:': pipefilter,
}
def filter(s, cmd):
"""filter a string through a command that transforms its input to its
output"""
for name, fn in pycompat.iteritems(_filtertable):
if cmd.startswith(name):
return fn(s, cmd[len(name) :].lstrip())
return pipefilter(s, cmd)
_hgexecutable = None
def hgexecutable():
"""return location of the 'hg' executable.
Defaults to $HG or 'hg' in the search path.
"""
if _hgexecutable is None:
hg = encoding.environ.get(b'HG')
mainmod = sys.modules['__main__']
if hg:
_sethgexecutable(hg)
elif resourceutil.mainfrozen():
if getattr(sys, 'frozen', None) == 'macosx_app':
# Env variable set by py2app
_sethgexecutable(encoding.environ[b'EXECUTABLEPATH'])
else:
_sethgexecutable(pycompat.sysexecutable)
elif (
not pycompat.iswindows
and os.path.basename(getattr(mainmod, '__file__', '')) == 'hg'
):
_sethgexecutable(pycompat.fsencode(mainmod.__file__))
else:
_sethgexecutable(
findexe(b'hg') or os.path.basename(pycompat.sysargv[0])
)
return _hgexecutable
def _sethgexecutable(path):
"""set location of the 'hg' executable"""
global _hgexecutable
_hgexecutable = path
def _testfileno(f, stdf):
fileno = getattr(f, 'fileno', None)
try:
return fileno and fileno() == stdf.fileno()
except io.UnsupportedOperation:
return False # fileno() raised UnsupportedOperation
def isstdin(f):
return _testfileno(f, sys.__stdin__)
def isstdout(f):
return _testfileno(f, sys.__stdout__)
def protectstdio(uin, uout):
"""Duplicate streams and redirect original if (uin, uout) are stdio
If uin is stdin, it's redirected to /dev/null. If uout is stdout, it's
redirected to stderr so the output is still readable.
Returns (fin, fout) which point to the original (uin, uout) fds, but
may be copy of (uin, uout). The returned streams can be considered
"owned" in that print(), exec(), etc. never reach to them.
"""
uout.flush()
fin, fout = uin, uout
if _testfileno(uin, stdin):
newfd = os.dup(uin.fileno())
nullfd = os.open(os.devnull, os.O_RDONLY)
os.dup2(nullfd, uin.fileno())
os.close(nullfd)
fin = os.fdopen(newfd, 'rb')
if _testfileno(uout, stdout):
newfd = os.dup(uout.fileno())
os.dup2(stderr.fileno(), uout.fileno())
fout = os.fdopen(newfd, 'wb')
return fin, fout
def restorestdio(uin, uout, fin, fout):
"""Restore (uin, uout) streams from possibly duplicated (fin, fout)"""
uout.flush()
for f, uif in [(fin, uin), (fout, uout)]:
if f is not uif:
os.dup2(f.fileno(), uif.fileno())
f.close()
def shellenviron(environ=None):
"""return environ with optional override, useful for shelling out"""
def py2shell(val):
"""convert python object into string that is useful to shell"""
if val is None or val is False:
return b'0'
if val is True:
return b'1'
return pycompat.bytestr(val)
env = dict(encoding.environ)
if environ:
env.update((k, py2shell(v)) for k, v in pycompat.iteritems(environ))
env[b'HG'] = hgexecutable()
return env
if pycompat.iswindows:
def shelltonative(cmd, env):
return platform.shelltocmdexe( # pytype: disable=module-attr
cmd, shellenviron(env)
)
tonativestr = encoding.strfromlocal
else:
def shelltonative(cmd, env):
return cmd
tonativestr = pycompat.identity
def tonativeenv(env):
"""convert the environment from bytes to strings suitable for Popen(), etc."""
return pycompat.rapply(tonativestr, env)
def system(cmd, environ=None, cwd=None, out=None):
"""enhanced shell command execution.
run with environment maybe modified, maybe in different dir.
if out is specified, it is assumed to be a file-like object that has a
write() method. stdout and stderr will be redirected to out."""
try:
stdout.flush()
except Exception:
pass
env = shellenviron(environ)
if out is None or isstdout(out):
rc = subprocess.call(
tonativestr(cmd),
shell=True,
close_fds=closefds,
env=tonativeenv(env),
cwd=pycompat.rapply(tonativestr, cwd),
)
else:
proc = subprocess.Popen(
tonativestr(cmd),
shell=True,
close_fds=closefds,
env=tonativeenv(env),
cwd=pycompat.rapply(tonativestr, cwd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for line in iter(proc.stdout.readline, b''):
out.write(line)
proc.wait()
rc = proc.returncode
if pycompat.sysplatform == b'OpenVMS' and rc & 1:
rc = 0
return rc
_is_gui = None
def _gui():
'''Are we running in a GUI?'''
if pycompat.isdarwin:
if b'SSH_CONNECTION' in encoding.environ:
# handle SSH access to a box where the user is logged in
return False
elif getattr(osutil, 'isgui', None):
# check if a CoreGraphics session is available
return osutil.isgui()
else:
# pure build; use a safe default
return True
else:
return (
pycompat.iswindows
or encoding.environ.get(b"DISPLAY")
or encoding.environ.get(b"WAYLAND_DISPLAY")
)
def gui():
global _is_gui
if _is_gui is None:
_is_gui = _gui()
return _is_gui
def hgcmd():
"""Return the command used to execute current hg
This is different from hgexecutable() because on Windows we want
to avoid things opening new shell windows like batch files, so we
get either the python call or current executable.
"""
if resourceutil.mainfrozen():
if getattr(sys, 'frozen', None) == 'macosx_app':
# Env variable set by py2app
return [encoding.environ[b'EXECUTABLEPATH']]
else:
return [pycompat.sysexecutable]
return _gethgcmd()
def rundetached(args, condfn):
"""Execute the argument list in a detached process.
condfn is a callable which is called repeatedly and should return
True once the child process is known to have started successfully.
At this point, the child process PID is returned. If the child
process fails to start or finishes before condfn() evaluates to
True, return -1.
"""
# Windows case is easier because the child process is either
# successfully starting and validating the condition or exiting
# on failure. We just poll on its PID. On Unix, if the child
# process fails to start, it will be left in a zombie state until
# the parent wait on it, which we cannot do since we expect a long
# running process on success. Instead we listen for SIGCHLD telling
# us our child process terminated.
terminated = set()
def handler(signum, frame):
terminated.add(os.wait())
prevhandler = None
SIGCHLD = getattr(signal, 'SIGCHLD', None)
if SIGCHLD is not None:
prevhandler = signal.signal(SIGCHLD, handler)
try:
pid = spawndetached(args)
while not condfn():
if (pid in terminated or not testpid(pid)) and not condfn():
return -1
time.sleep(0.1)
return pid
finally:
if prevhandler is not None:
signal.signal(signal.SIGCHLD, prevhandler)
@contextlib.contextmanager
def uninterruptible(warn):
"""Inhibit SIGINT handling on a region of code.
Note that if this is called in a non-main thread, it turns into a no-op.
Args:
warn: A callable which takes no arguments, and returns True if the
previous signal handling should be restored.
"""
oldsiginthandler = [signal.getsignal(signal.SIGINT)]
shouldbail = []
def disabledsiginthandler(*args):
if warn():
signal.signal(signal.SIGINT, oldsiginthandler[0])
del oldsiginthandler[0]
shouldbail.append(True)
try:
try:
signal.signal(signal.SIGINT, disabledsiginthandler)
except ValueError:
# wrong thread, oh well, we tried
del oldsiginthandler[0]
yield
finally:
if oldsiginthandler:
signal.signal(signal.SIGINT, oldsiginthandler[0])
if shouldbail:
raise KeyboardInterrupt
if pycompat.iswindows:
# no fork on Windows, but we can create a detached process
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
# No stdlib constant exists for this value
DETACHED_PROCESS = 0x00000008
# Following creation flags might create a console GUI window.
# Using subprocess.CREATE_NEW_CONSOLE might helps.
# See https://phab.mercurial-scm.org/D1701 for discussion
_creationflags = (
DETACHED_PROCESS
| subprocess.CREATE_NEW_PROCESS_GROUP # pytype: disable=module-attr
)
def runbgcommand(
script,
env,
shell=False,
stdout=None,
stderr=None,
ensurestart=True,
record_wait=None,
stdin_bytes=None,
):
'''Spawn a command without waiting for it to finish.'''
# we can't use close_fds *and* redirect stdin. I'm not sure that we
# need to because the detached process has no console connection.
try:
stdin = None
if stdin_bytes is not None:
stdin = pycompat.unnamedtempfile()
stdin.write(stdin_bytes)
stdin.flush()
stdin.seek(0)
p = subprocess.Popen(
pycompat.rapply(tonativestr, script),
shell=shell,
env=tonativeenv(env),
close_fds=True,
creationflags=_creationflags,
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
if record_wait is not None:
record_wait(p.wait)
finally:
if stdin is not None:
stdin.close()
else:
def runbgcommand(
cmd,
env,
shell=False,
stdout=None,
stderr=None,
ensurestart=True,
record_wait=None,
stdin_bytes=None,
):
"""Spawn a command without waiting for it to finish.
When `record_wait` is not None, the spawned process will not be fully
detached and the `record_wait` argument will be called with a the
`Subprocess.wait` function for the spawned process. This is mostly
useful for developers that need to make sure the spawned process
finished before a certain point. (eg: writing test)"""
if pycompat.isdarwin:
# avoid crash in CoreFoundation in case another thread
# calls gui() while we're calling fork().
gui()
# double-fork to completely detach from the parent process
# based on http://code.activestate.com/recipes/278731
if record_wait is None:
pid = os.fork()
if pid:
if not ensurestart:
# Even though we're not waiting on the child process,
# we still must call waitpid() on it at some point so
# it's not a zombie/defunct. This is especially relevant for
# chg since the parent process won't die anytime soon.
# We use a thread to make the overhead tiny.
def _do_wait():
os.waitpid(pid, 0)
t = threading.Thread(target=_do_wait)
t.daemon = True
t.start()
return
# Parent process
(_pid, status) = os.waitpid(pid, 0)
if os.WIFEXITED(status):
returncode = os.WEXITSTATUS(status)
else:
returncode = -(os.WTERMSIG(status))
if returncode != 0:
# The child process's return code is 0 on success, an errno
# value on failure, or 255 if we don't have a valid errno
# value.
#
# (It would be slightly nicer to return the full exception info
# over a pipe as the subprocess module does. For now it
# doesn't seem worth adding that complexity here, though.)
if returncode == 255:
returncode = errno.EINVAL
raise OSError(
returncode,
b'error running %r: %s'
% (cmd, os.strerror(returncode)),
)
return
returncode = 255
try:
if record_wait is None:
# Start a new session
os.setsid()
# connect stdin to devnull to make sure the subprocess can't
# muck up that stream for mercurial.
if stdin_bytes is None:
stdin = open(os.devnull, b'r')
else:
stdin = pycompat.unnamedtempfile()
stdin.write(stdin_bytes)
stdin.flush()
stdin.seek(0)
if stdout is None:
stdout = open(os.devnull, b'w')
if stderr is None:
stderr = open(os.devnull, b'w')
p = subprocess.Popen(
cmd,
shell=shell,
env=env,
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
if record_wait is not None:
record_wait(p.wait)
returncode = 0
except EnvironmentError as ex:
returncode = ex.errno & 0xFF
if returncode == 0:
# This shouldn't happen, but just in case make sure the
# return code is never 0 here.
returncode = 255
except Exception:
returncode = 255
finally:
# mission accomplished, this child needs to exit and not
# continue the hg process here.
stdin.close()
if record_wait is None:
os._exit(returncode)
```
#### File: mercurial/utils/resourceutil.py
```python
from __future__ import absolute_import
import imp
import os
import sys
from .. import pycompat
def mainfrozen():
"""return True if we are a frozen executable.
The code supports py2exe (most common, Windows only) and tools/freeze
(portable, not much used).
"""
return (
pycompat.safehasattr(sys, "frozen") # new py2exe
or pycompat.safehasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__") # tools/freeze
)
# the location of data files matching the source code
if mainfrozen() and getattr(sys, "frozen", None) != "macosx_app":
# executable version (py2exe) doesn't support __file__
datapath = os.path.dirname(pycompat.sysexecutable)
_rootpath = datapath
# The installers store the files outside of library.zip, like
# C:\Program Files\Mercurial\defaultrc\*.rc. This strips the
# leading "mercurial." off of the package name, so that these
# pseudo resources are found in their directory next to the
# executable.
def _package_path(package):
dirs = package.split(b".")
assert dirs[0] == b"mercurial"
return os.path.join(_rootpath, *dirs[1:])
else:
datapath = os.path.dirname(os.path.dirname(pycompat.fsencode(__file__)))
_rootpath = os.path.dirname(datapath)
def _package_path(package):
return os.path.join(_rootpath, *package.split(b"."))
try:
# importlib.resources exists from Python 3.7; see fallback in except clause
# further down
from importlib import resources
from .. import encoding
# Force loading of the resources module
resources.open_binary # pytype: disable=module-attr
def open_resource(package, name):
return resources.open_binary( # pytype: disable=module-attr
pycompat.sysstr(package), pycompat.sysstr(name)
)
def is_resource(package, name):
return resources.is_resource(
pycompat.sysstr(package), encoding.strfromlocal(name)
)
def contents(package):
for r in resources.contents(pycompat.sysstr(package)):
yield encoding.strtolocal(r)
except (ImportError, AttributeError):
# importlib.resources was not found (almost definitely because we're on a
# Python version before 3.7)
def open_resource(package, name):
path = os.path.join(_package_path(package), name)
return open(path, "rb")
def is_resource(package, name):
path = os.path.join(_package_path(package), name)
try:
return os.path.isfile(pycompat.fsdecode(path))
except (IOError, OSError):
return False
def contents(package):
path = pycompat.fsdecode(_package_path(package))
for p in os.listdir(path):
yield pycompat.fsencode(p)
```
#### File: site-packages/mercurial/worker.py
```python
from __future__ import absolute_import
import errno
import os
import signal
import sys
import threading
import time
try:
import selectors
selectors.BaseSelector
except ImportError:
from .thirdparty import selectors2 as selectors
from .i18n import _
from . import (
encoding,
error,
pycompat,
scmutil,
util,
)
def countcpus():
'''try to count the number of CPUs on the system'''
# posix
try:
n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if n > 0:
return n
except (AttributeError, ValueError):
pass
# windows
try:
n = int(encoding.environ[b'NUMBER_OF_PROCESSORS'])
if n > 0:
return n
except (KeyError, ValueError):
pass
return 1
def _numworkers(ui):
s = ui.config(b'worker', b'numcpus')
if s:
try:
n = int(s)
if n >= 1:
return n
except ValueError:
raise error.Abort(_(b'number of cpus must be an integer'))
return min(max(countcpus(), 4), 32)
if pycompat.ispy3:
def ismainthread():
return threading.current_thread() == threading.main_thread()
class _blockingreader(object):
def __init__(self, wrapped):
self._wrapped = wrapped
# Do NOT implement readinto() by making it delegate to
# _wrapped.readinto(), since that is unbuffered. The unpickler is fine
# with just read() and readline(), so we don't need to implement it.
def readline(self):
return self._wrapped.readline()
# issue multiple reads until size is fulfilled
def read(self, size=-1):
if size < 0:
return self._wrapped.readall()
buf = bytearray(size)
view = memoryview(buf)
pos = 0
while pos < size:
ret = self._wrapped.readinto(view[pos:])
if not ret:
break
pos += ret
del view
del buf[pos:]
return bytes(buf)
else:
def ismainthread():
return isinstance(threading.current_thread(), threading._MainThread)
def _blockingreader(wrapped):
return wrapped
if pycompat.isposix or pycompat.iswindows:
_STARTUP_COST = 0.01
# The Windows worker is thread based. If tasks are CPU bound, threads
# in the presence of the GIL result in excessive context switching and
# this overhead can slow down execution.
_DISALLOW_THREAD_UNSAFE = pycompat.iswindows
else:
_STARTUP_COST = 1e30
_DISALLOW_THREAD_UNSAFE = False
def worthwhile(ui, costperop, nops, threadsafe=True):
"""try to determine whether the benefit of multiple processes can
outweigh the cost of starting them"""
if not threadsafe and _DISALLOW_THREAD_UNSAFE:
return False
linear = costperop * nops
workers = _numworkers(ui)
benefit = linear - (_STARTUP_COST * workers + linear / workers)
return benefit >= 0.15
def worker(
ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True
):
"""run a function, possibly in parallel in multiple worker
processes.
returns a progress iterator
costperarg - cost of a single task
func - function to run. It is expected to return a progress iterator.
staticargs - arguments to pass to every invocation of the function
args - arguments to split into chunks, to pass to individual
workers
hasretval - when True, func and the current function return an progress
iterator then a dict (encoded as an iterator that yield many (False, ..)
then a (True, dict)). The dicts are joined in some arbitrary order, so
overlapping keys are a bad idea.
threadsafe - whether work items are thread safe and can be executed using
a thread-based worker. Should be disabled for CPU heavy tasks that don't
release the GIL.
"""
enabled = ui.configbool(b'worker', b'enabled')
if enabled and _platformworker is _posixworker and not ismainthread():
# The POSIX worker has to install a handler for SIGCHLD.
# Python up to 3.9 only allows this in the main thread.
enabled = False
if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe):
return _platformworker(ui, func, staticargs, args, hasretval)
return func(*staticargs + (args,))
def _posixworker(ui, func, staticargs, args, hasretval):
workers = _numworkers(ui)
oldhandler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
pids, problem = set(), [0]
def killworkers():
# unregister SIGCHLD handler as all children will be killed. This
# function shouldn't be interrupted by another SIGCHLD; otherwise pids
# could be updated while iterating, which would cause inconsistency.
signal.signal(signal.SIGCHLD, oldchldhandler)
# if one worker bails, there's no good reason to wait for the rest
for p in pids:
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno != errno.ESRCH:
raise
def waitforworkers(blocking=True):
for pid in pids.copy():
p = st = 0
while True:
try:
p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
break
except OSError as e:
if e.errno == errno.EINTR:
continue
elif e.errno == errno.ECHILD:
# child would already be reaped, but pids yet been
# updated (maybe interrupted just after waitpid)
pids.discard(pid)
break
else:
raise
if not p:
# skip subsequent steps, because child process should
# be still running in this case
continue
pids.discard(p)
st = _exitstatus(st)
if st and not problem[0]:
problem[0] = st
def sigchldhandler(signum, frame):
waitforworkers(blocking=False)
if problem[0]:
killworkers()
oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
ui.flush()
parentpid = os.getpid()
pipes = []
retval = {}
for pargs in partition(args, min(workers, len(args))):
# Every worker gets its own pipe to send results on, so we don't have to
# implement atomic writes larger than PIPE_BUF. Each forked process has
# its own pipe's descriptors in the local variables, and the parent
# process has the full list of pipe descriptors (and it doesn't really
# care what order they're in).
rfd, wfd = os.pipe()
pipes.append((rfd, wfd))
# make sure we use os._exit in all worker code paths. otherwise the
# worker may do some clean-ups which could cause surprises like
# deadlock. see sshpeer.cleanup for example.
# override error handling *before* fork. this is necessary because
# exception (signal) may arrive after fork, before "pid =" assignment
# completes, and other exception handler (dispatch.py) can lead to
# unexpected code path without os._exit.
ret = -1
try:
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGINT, oldhandler)
signal.signal(signal.SIGCHLD, oldchldhandler)
def workerfunc():
for r, w in pipes[:-1]:
os.close(r)
os.close(w)
os.close(rfd)
for result in func(*(staticargs + (pargs,))):
os.write(wfd, util.pickle.dumps(result))
return 0
ret = scmutil.callcatch(ui, workerfunc)
except: # parent re-raises, child never returns
if os.getpid() == parentpid:
raise
exctype = sys.exc_info()[0]
force = not issubclass(exctype, KeyboardInterrupt)
ui.traceback(force=force)
finally:
if os.getpid() != parentpid:
try:
ui.flush()
except: # never returns, no re-raises
pass
finally:
os._exit(ret & 255)
pids.add(pid)
selector = selectors.DefaultSelector()
for rfd, wfd in pipes:
os.close(wfd)
selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
def cleanup():
signal.signal(signal.SIGINT, oldhandler)
waitforworkers()
signal.signal(signal.SIGCHLD, oldchldhandler)
selector.close()
return problem[0]
try:
openpipes = len(pipes)
while openpipes > 0:
for key, events in selector.select():
try:
res = util.pickle.load(_blockingreader(key.fileobj))
if hasretval and res[0]:
retval.update(res[1])
else:
yield res
except EOFError:
selector.unregister(key.fileobj)
key.fileobj.close()
openpipes -= 1
except IOError as e:
if e.errno == errno.EINTR:
continue
raise
except: # re-raises
killworkers()
cleanup()
raise
status = cleanup()
if status:
if status < 0:
os.kill(os.getpid(), -status)
raise error.WorkerError(status)
if hasretval:
yield True, retval
def _posixexitstatus(code):
"""convert a posix exit status into the same form returned by
os.spawnv
returns None if the process was stopped instead of exiting"""
if os.WIFEXITED(code):
return os.WEXITSTATUS(code)
elif os.WIFSIGNALED(code):
return -(os.WTERMSIG(code))
def _windowsworker(ui, func, staticargs, args, hasretval):
class Worker(threading.Thread):
def __init__(
self, taskqueue, resultqueue, func, staticargs, *args, **kwargs
):
threading.Thread.__init__(self, *args, **kwargs)
self._taskqueue = taskqueue
self._resultqueue = resultqueue
self._func = func
self._staticargs = staticargs
self._interrupted = False
self.daemon = True
self.exception = None
def interrupt(self):
self._interrupted = True
def run(self):
try:
while not self._taskqueue.empty():
try:
args = self._taskqueue.get_nowait()
for res in self._func(*self._staticargs + (args,)):
self._resultqueue.put(res)
# threading doesn't provide a native way to
# interrupt execution. handle it manually at every
# iteration.
if self._interrupted:
return
except pycompat.queue.Empty:
break
except Exception as e:
# store the exception such that the main thread can resurface
# it as if the func was running without workers.
self.exception = e
raise
threads = []
def trykillworkers():
# Allow up to 1 second to clean worker threads nicely
cleanupend = time.time() + 1
for t in threads:
t.interrupt()
for t in threads:
remainingtime = cleanupend - time.time()
t.join(remainingtime)
if t.is_alive():
# pass over the workers joining failure. it is more
# important to surface the inital exception than the
# fact that one of workers may be processing a large
# task and does not get to handle the interruption.
ui.warn(
_(
b"failed to kill worker threads while "
b"handling an exception\n"
)
)
return
workers = _numworkers(ui)
resultqueue = pycompat.queue.Queue()
taskqueue = pycompat.queue.Queue()
retval = {}
# partition work to more pieces than workers to minimize the chance
# of uneven distribution of large tasks between the workers
for pargs in partition(args, workers * 20):
taskqueue.put(pargs)
for _i in range(workers):
t = Worker(taskqueue, resultqueue, func, staticargs)
threads.append(t)
t.start()
try:
while len(threads) > 0:
while not resultqueue.empty():
res = resultqueue.get()
if hasretval and res[0]:
retval.update(res[1])
else:
yield res
threads[0].join(0.05)
finishedthreads = [_t for _t in threads if not _t.is_alive()]
for t in finishedthreads:
if t.exception is not None:
raise t.exception
threads.remove(t)
except (Exception, KeyboardInterrupt): # re-raises
trykillworkers()
raise
while not resultqueue.empty():
res = resultqueue.get()
if hasretval and res[0]:
retval.update(res[1])
else:
yield res
if hasretval:
yield True, retval
if pycompat.iswindows:
_platformworker = _windowsworker
else:
_platformworker = _posixworker
_exitstatus = _posixexitstatus
def partition(lst, nslices):
"""partition a list into N slices of roughly equal size
The current strategy takes every Nth element from the input. If
we ever write workers that need to preserve grouping in input
we should consider allowing callers to specify a partition strategy.
mpm is not a fan of this partitioning strategy when files are involved.
In his words:
Single-threaded Mercurial makes a point of creating and visiting
files in a fixed order (alphabetical). When creating files in order,
a typical filesystem is likely to allocate them on nearby regions on
disk. Thus, when revisiting in the same order, locality is maximized
and various forms of OS and disk-level caching and read-ahead get a
chance to work.
This effect can be quite significant on spinning disks. I discovered it
circa Mercurial v0.4 when revlogs were named by hashes of filenames.
Tarring a repo and copying it to another disk effectively randomized
the revlog ordering on disk by sorting the revlogs by hash and suddenly
performance of my kernel checkout benchmark dropped by ~10x because the
"working set" of sectors visited no longer fit in the drive's cache and
the workload switched from streaming to random I/O.
What we should really be doing is have workers read filenames from a
ordered queue. This preserves locality and also keeps any worker from
getting more than one file out of balance.
"""
for i in range(nslices):
yield lst[i::nslices]
```
|
{
"source": "jessedc/cozir-python",
"score": 3
}
|
#### File: cozir-python/cozir_python/influx.py
```python
from datetime import datetime, timezone
from copy import deepcopy
co2_header = 'z'
co2f_header = 'Z'
tmp_header = 'T'
humid_header = 'H'
# https://www.influxdata.com/blog/getting-started-python-influxdb/
# {
# "measurement": "brushEvents",
# "tags": {
# "user": "Carol",
# "brushId": "6c89f539-71c6-490d-a28d-6c5d84c0ee2f"
# },
# "time": "2018-03-28T8:01:00Z",
# "fields": {
# "duration": 127
# }
# }
# Converts a line read from the wire into an array of influx DB points that can be serialised
def points_from_line(line: str):
timestamp = datetime.now(timezone.utc).astimezone().isoformat()
measure_base = {
"measurement": "co2",
"tags": {
"sensor": "cozir",
"location": "bedroom2"
},
"time": timestamp,
"fields": {}
}
measurements = []
# H 00486 T 01179 Z 01387 z 01377\r\n
splits = line.split()
i = 0
while i < (len(splits) - 1):
key = splits[i]
val = int(splits[i + 1].lstrip('0'))
i += 2
measurement = deepcopy(measure_base)
if key == tmp_header:
val = float(val - 1000) / 10.0
measurement["measurement"] = "temperature"
measurement["fields"] = {"temp": val}
elif key == humid_header:
val = float(val) / 10.0
measurement["measurement"] = "humidity"
measurement["fields"] = {"humidity": val}
elif key == co2f_header:
measurement["fields"] = {"co2": val}
else:
# ignore other fields for now
continue
measurements.append(measurement)
return measurements
```
#### File: jessedc/cozir-python/entrypoint.py
```python
from cozir_python.linereader import LineReader
from cozir_python.influx import points_from_line
from influxdb import InfluxDBClient
# FIXME: Remove this from the global scope here
client = InfluxDBClient(host='pi.hole', port=8086)
def on_read_line_callback(line):
if len(line.split()) % 2 != 0:
print('Invalid line')
return
points = points_from_line(line)
client.write_points(points, database='airquality')
print(points)
if __name__ == "__main__":
# On newer firmware/raspbian /dev/ttyAMA0 is sym linked to /dev/serial0
reader = LineReader('/dev/serial0')
reader.read_line_callback(on_read_line_callback)
```
|
{
"source": "jessedc/raspi-temp-python",
"score": 3
}
|
#### File: raspi-temp-python/temperature/temperature.py
```python
import re
import os
def read_temperature():
return cpu_temp(), gpu_temp()
def cpu_temp():
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as content_file:
content = content_file.readline().rstrip('\n')
return float(content) / 1000
# simple global regex for parsing the vcgencmd response
re_gpu_tmp = re.compile('temp=([0-9.]{3,})\'C\\n')
def gpu_temp():
return float(re_gpu_tmp.match(os.popen("/opt/vc/bin/vcgencmd measure_temp").readline()).group(1))
```
|
{
"source": "jessedc/sds011-pm-sensor-python",
"score": 3
}
|
#### File: sds011-pm-sensor-python/sds011/influx.py
```python
from datetime import datetime, timezone
def measurement_from_data(pm25, pm100, geohash):
"""
Turn the SDS011 object into a set of influx-db compatible measurement object
:param geohash: Geohash of the location
:param pm25:
:param pm100
:return: dictionary
"""
timestamp = datetime.now(timezone.utc).astimezone().isoformat()
return {
"measurement": "pm",
"tags": {
"sensor": "sds011",
"location": "outdoors",
"geohash": geohash,
},
"time": timestamp,
"fields": {
"pm25": float(pm25),
"pm100": float(pm100)
}
}
```
|
{
"source": "jessedc/ve.direct-python",
"score": 2
}
|
#### File: ve.direct-python/vedirect/vedirect.py
```python
import serial
"""
# VE.Direct parser inspired by https://github.com/karioja/vedirect/blob/master/vedirect.py
"""
class Vedirect:
# The error code of the device (relevant when the device is in the fault state).
#
# Error 19 can be ignored, this condition regularly occurs during start-up or shutdown of the MPPT charger.
# Since version 1.15 this error will no longer be reported.
#
# Error 21 can be ignored for 5 minutes, this condition regularly occurs during start-up or shutdown
# of the MPPT charger. Since version 1.16 this warning will no longer be reported when it is not persistent.
#
VICTRON_ERROR = {
'0': 'No error',
'2': 'Battery voltage too high',
'17': 'Charger temperature too high',
'18': 'Charger over current',
'19': 'Charger current reversed',
'20': 'Bulk time limit exceeded',
'21': 'Current sensor issue',
'26': 'Terminals overheated',
'28': 'Converter issue', # (dual converter models only)
'33': 'Input voltage too high (solar panel)',
'34': 'Input current too high (solar panel)',
'38': 'Input shutdown (excessive battery voltage)',
'39': 'Input shutdown (due to current flow during off mode)',
'65': 'Lost communication with one of devices',
'66': 'Synchronised charging device configuration issue',
'67': 'BMS connection lost',
'68': 'Network misconfigured',
'116': 'Factory calibration data lost',
'117': 'Invalid/incompatible firmware',
'119': 'User settings invalid'
}
# The state of operation
VICTRON_CS = {
'0': 'Off',
'2': 'Fault',
'3': 'Bulk',
'4': 'Absorption',
'5': 'Float',
'7': 'Equalize (manual)',
'245': 'Starting-up',
'247': 'Auto equalize / Recondition',
'252': 'External control'
}
# The possible values for the tracker operation
VICTRON_MTTP = {
'0': 'Off',
'1': 'Limited',
'2': 'Active'
}
# Off reason, this field described why a unit is switched off.
#
# Available on SmartSolar mppt chargers since firmware version v1.44 (VE.Direct models)
# and v1.03 (SmartSolar VE.Can models)
# FIXME: This might not work as a dictionary
VICTRON_OFF_REASON = {
"0x00000001": "No input power",
"0x00000002": "Switched off (power switch)",
"0x00000004": "Switched off (device mode register)",
"0x00000008": "Remote input",
"0x00000010": "Protection active",
"0x00000020": "Paygo",
"0x00000040": "BMS",
"0x00000080": "Engine shutdown detection",
"0x00000100": "Analysing input voltage"
}
def __init__(self, port='/dev/ttyAMA0', timeout=5):
"""
Initialise serial component of the Victron parser. Default value is the standard serial port on Raspberry pi
:param port:
:param timeout:
"""
self.ser = serial.Serial(port, 19200, timeout=timeout)
self.header1 = b'\r'
self.header2 = b'\n'
self.delimiter = b'\t'
self.hexmarker = b':'
self.key = bytearray()
self.value = bytearray()
self.bytes_sum = 0
self.state = self.wait_header
self.dict = {}
hex, wait_header, in_key, in_value, in_checksum = range(5)
def input(self, byte):
if byte == self.hexmarker and self.state != self.in_checksum:
self.state = self.hex
if self.state == self.wait_header:
self.bytes_sum += ord(byte)
if byte == self.header1:
self.state = self.wait_header
elif byte == self.header2:
self.state = self.in_key
return None
elif self.state == self.in_key:
self.bytes_sum += ord(byte)
if byte == self.delimiter:
if self.key.decode() == 'Checksum':
self.state = self.in_checksum
else:
self.state = self.in_value
else:
self.key += byte
return None
elif self.state == self.in_value:
self.bytes_sum += ord(byte)
if byte == self.header1:
self.state = self.wait_header
self.dict[self.key.decode()] = self.value.decode()
self.key = bytearray()
self.value = bytearray()
else:
self.value += byte
return None
elif self.state == self.in_checksum:
self.bytes_sum += ord(byte)
self.key = bytearray()
self.value = bytearray()
self.state = self.wait_header
if self.bytes_sum % 256 == 0:
self.bytes_sum = 0
return self.dict
else:
print('Malformed packet')
print('----------------')
for k, v in self.dict.items():
print("{} {}".format(k, v))
self.bytes_sum = 0
elif self.state == self.hex:
self.bytes_sum = 0
if byte == self.header2:
self.state = self.wait_header
else:
raise AssertionError()
def read_data_single(self):
while True:
byte = self.ser.read(1)
packet = self.input(byte)
if packet is not None:
return packet
def read_data_callback(self, callback):
while True:
byte = self.ser.read(1)
if byte:
packet = self.input(byte)
if packet is not None:
callback(packet)
else:
break
```
|
{
"source": "jesseddeng/Machine-learning-Final-Project",
"score": 2
}
|
#### File: Machine-learning-Final-Project/app/__init__.py
```python
from flask import Flask
from sklearn.externals import joblib
app = Flask(__name__)
app.config.from_object("app.config")
estimator = joblib.load('predicted.pkl')
from .views import *
# Handle Bad Requests
@app.errorhandler(404)
def page_not_found(e):
"""Page Not Found"""
return render_template('404.html'), 404
```
|
{
"source": "jessedearing/events-kubecon-na-2019",
"score": 2
}
|
#### File: events-kubecon-na-2019/app/main.py
```python
from kubernetes import client, config
from flask import Flask, request, abort
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}},
'root': {
'level': 'DEBUG',
'handlers': ['wsgi']
}
})
app = Flask(__name__) # pylint: disable=invalid-name
def deploymentApproved(namespace, deployment_name):
config.load_incluster_config()
events = client.CoreV1Api().list_namespaced_event(namespace)
if next(
filter(lambda e: e.involved_object.name == deployment_name
and e.involved_object.kind == 'Deployment'
and e.reason == 'Approval', events.items),
False):
return True
return False
@app.route('/', methods=['POST'])
def webhook():
admission = request.get_json()
if admission is None:
abort(400)
admission_request = admission.get('request', {})
uid = admission_request.get('uid')
namespace = admission_request.get('namespace')
owner_references = admission_request.get('object', {})\
.get('metadata', {}).get('ownerReferences', [])
deploy_approved = True
deployment_name = next(
filter(lambda r: r['kind'] == 'Deployment', owner_references),
{}).get('name', None)
app.logger.info(f"Checking deployment: {deployment_name}")
if deployment_name is not None:
deploy_approved = deploymentApproved(namespace, deployment_name)
resp = {
'apiVersion': 'admission.k8s.io/v1',
'kind': 'AdmissionReview',
'response': {
'uid': uid,
'allowed': deploy_approved
},
}
if deploy_approved is False:
app.logger.info("Denying deployment")
resp['response']['status'] = {'code': 403, 'message':
'Your deployment must be approved'}
else:
app.logger.info("Approving deployment")
return resp
if __name__ == "__main__":
app.run(host='127.0.0.1', debug=True, port=5000)
```
|
{
"source": "JesseDeMeulemeester/speculator",
"score": 2
}
|
#### File: speculator/scripts/post-processing.py
```python
import argparse
import logging
import os
from string import Template
from typing import List, Optional, TextIO, Tuple
import numpy as np
from scipy import stats
from tqdm import tqdm
def parse_file(filename: str,
constant_cols: Optional[List[int]] = None,
nonzero_cols: Optional[List[int]] = None,
outlier_cols: Optional[List[int]] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""Parse the file and return the mean and std of each column
@param filename The name of the file to parse
@param constant_cols The columns for which a constant value is expected. Any
rows not containing this value will be discarded
@param nonzero_cols The columns for which any rows containing zero should be
removed
@param outlier_cols The columns for which any rows containing outliers
should be removed
@return mean An array containing the means of each column
@return std An array containing the standard deviations of each column
"""
# Read the file into a numpy array
data = np.genfromtxt(filename, dtype=np.uint64, delimiter="|",
skip_header=1)
# Since the last character of each row contains a delimiter, the last column
# has to be removed
data = data[:, :-1]
if constant_cols is not None:
# Find the most occurring sequence
values, counts = np.unique(data[:, constant_cols], return_counts=True)
most_occurring = values[np.argmax(counts)]
# Filter out any rows for which these rows are not constant
row_mask = (data[:, constant_cols] == most_occurring).all(axis=1)
data = data[row_mask]
logging.debug("\tFiltered %d rows containing non-constant values",
np.count_nonzero(~row_mask))
if nonzero_cols is not None:
# Remove all rows for which the value in the columns specified by
# `nonzero_cols' is zero
row_mask = (data[:, nonzero_cols] != 0).all(axis=1)
data = data[row_mask]
logging.debug("\tFiltered %d rows containing zero values",
np.count_nonzero(~row_mask))
if outlier_cols is not None:
# Remove all rows that contain outliers in the columns described by
# `outliers_cols'
z_score = np.abs(stats.zscore(data[:, outlier_cols]))
# Ignore columns where all elements are the same
# These will result in a z_score equal to NaN
z_score[np.isnan(z_score)] = 0
# Filter out any rows for which the value in one of the specified
# columns is an outlier, i.e. has a z score larger than 3
row_mask = (z_score < 3).all(axis=1)
data = data[row_mask]
logging.debug("\tFiltered %d rows containing outliers",
np.count_nonzero(~row_mask))
# Take the mean and std of each column
mean = data.mean(axis=0)
std = data.std(axis=0)
return mean, std
def print_result(template: TextIO, output_file: TextIO, filename: str,
mean: np.ndarray, std: np.ndarray) -> None:
"""Print the mean and std to the output file
@param template The template specifying how to format the results in the
output file
@param output_file The file to output the results to
@param filename The filename of the analyzed file
@param mean The means of the analyzed file
@param std The standard deviations of the analyzed file
"""
# Creating the mapping
mapping = dict(filename=filename)
for i in range(len(mean)):
mapping[f"mean_{i}"] = mean[i]
mapping[f"std_{i}"] = std[i]
# For each line in the template file, replace all identifiers in the
# template and write the result to the output file
template.seek(0)
for line in template:
t = Template(line)
output_file.write(t.safe_substitute(mapping))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--location", "-l",
required=True,
help="Specify the result directory to be process")
parser.add_argument("--template", "-t", type=str,
help="Path to the template file. If not specified, "
"the default template file, located at "
"$SPEC_H/scripts/templates/final_results.txt.in, "
"will be used.")
parser.add_argument("--constant-cols", "-c", type=int, nargs="+",
help="The columns for which a constant value is "
"expected. Any rows not containing this value "
"will be discarded")
parser.add_argument("--nonzero-cols", "-n", type=int, nargs="+",
help="The columns for which any rows containing zero "
"should be removed.")
parser.add_argument("--outlier-cols", "-o", type=int, nargs="+",
help="The columns for which any rows containing "
"outliers should be removed.")
parser.add_argument("--verbose", "-v", action="store_true",
help="Print debug output.")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format="%(message)s", level=logging.DEBUG)
if args.template is None:
tmpl_filename = f"{os.environ['SPEC_H']}/scripts/templates" \
f"/final_results.txt.in "
else:
tmpl_filename = args.template
final_filename = os.path.join(args.location, "final_results.txt")
with open(final_filename, "w") as final, open(tmpl_filename, "r") as tmpl:
for dirname, _, filenames in os.walk(args.location):
for file in tqdm(sorted(filenames), disable=args.verbose):
if file == "final_results.txt":
continue
logging.debug("Considering %s", file)
f = os.path.join(dirname, file)
m, std = parse_file(f, args.constant_cols, args.nonzero_cols,
args.outlier_cols)
print_result(tmpl, final, file, m, std)
if __name__ == "__main__":
main()
```
|
{
"source": "jessedesimone/Portfolio",
"score": 3
}
|
#### File: portfolio/reader/reader.py
```python
print('******** Running reader.py ******** ')
#import packages
import os
from abc import ABC, abstractmethod
import logging
import pandas as pd
class directories:
'''Should be accessible in each module'''
BASE_DIR = '/Users/jessedesimone/DeSimone_Github/Portfolio/'
PROJ_DIR = os.path.join(BASE_DIR, 'portfolio/')
DATA_DIR = os.path.join(PROJ_DIR, 'data/')
ANALYSIS_DIR = os.path.join(PROJ_DIR, 'analysis/')
FUNCTION_DIR = os.path.join(PROJ_DIR, 'functions/')
RUNNER_DIR = os.path.join(PROJ_DIR, 'runner/')
TEST_DIR = os.path.join(PROJ_DIR, 'tests/')
OUT_DIR = os.path.join(PROJ_DIR, 'output/')
'''configure data reader'''
class InFileReader(ABC):
def __init__(self):
self._logger = logging.getLogger(__name__)
@abstractmethod
def read_data(self, filepath):
"""
Arguments:
filepath {string} -- filepath of data file
"""
class ExcelFileReader(InFileReader):
def read_data(self, filepath):
"""
Arguments:
filepath {string} -- filepath of the excel sheet
"""
try:
self._logger.info("Reading in input file: %s", filepath)
return pd.read_excel(filepath)
except FileNotFoundError:
self._logger.exception('Error trying to read file: %s', filepath)
raise
```
|
{
"source": "JesseDesjardins/SearchEngine",
"score": 2
}
|
#### File: JesseDesjardins/SearchEngine/boolean_retrieval.py
```python
from collections import deque
import re
# Import local files
from db_operations import retrieve_courses_doc_ids_not_from_set, retrieve_courses_doc_ids_from_term, retrieve_courses_all_terms, retrieve_courses_doc_ids_from_terms, retrieve_reuters_all_terms, retrieve_reuters_doc_ids_from_terms, retrieve_reuters_doc_ids_not_from_set
# execute_boolean_query inspired by https://runestone.academy/runestone/books/published/pythonds/BasicDS/InfixPrefixandPostfixExpressions.html
# TODO move a lot of the set theory directly to sql commands, most likely more efficient on a large data set
def execute_boolean_query(postfix_query_tokens, collection):
""" Get list doc_id's of all documents that are accpeted by the query """
operators = ['AND', 'AND_NOT', 'OR', 'OR_NOT', 'NOT']
operand_sets = deque()
if collection == "courses":
for token in postfix_query_tokens:
if token not in operators:
if '*' in token: # Wildcard management
# TODO: replace regex with bigrams
parts = token.split('*')
pattern = re.compile('^' + parts[0] + '.*' + parts[1] + '$')
terms = []
for term in retrieve_courses_all_terms():
if bool(pattern.match(term)):
terms.append(term)
operand_sets.append(retrieve_courses_doc_ids_from_terms(terms))
else:
operand_sets.append(retrieve_courses_doc_ids_from_term(token)) # Add the current set of id's to the stack
elif token == 'NOT':
operand_set_1 = operand_sets.pop()
operand_sets.append(retrieve_courses_doc_ids_not_from_set(operand_set_1))
else:
operand_set_2 = operand_sets.pop()
operand_set_1 = operand_sets.pop()
if token == 'AND':
operand_sets.append(intersection(operand_set_1, operand_set_2))
elif token == 'AND_NOT':
operand_sets.append(intersection(operand_set_1, retrieve_courses_doc_ids_not_from_set(operand_set_2)))
elif token == 'OR':
operand_sets.append(union(operand_set_1, operand_set_2))
elif token == 'OR_NOT':
operand_sets.append(union(operand_set_1, retrieve_courses_doc_ids_not_from_set(operand_set_2)))
else: # unrechable case
None
return operand_sets.pop()
elif collection == "reuters":
for token in postfix_query_tokens:
if token not in operators:
if '*' in token: # Wildcard management
# TODO: replace regex with bigrams
parts = token.split('*')
pattern = re.compile('^' + parts[0] + '.*' + parts[1] + '$')
terms = []
for term in retrieve_reuters_all_terms():
if bool(pattern.match(term)):
terms.append(term)
operand_sets.append(retrieve_reuters_doc_ids_from_terms(terms))
else:
operand_sets.append(retrieve_reuters_doc_ids_from_terms([token])) # Add the current set of id's to the stack
elif token == 'NOT':
operand_set_1 = operand_sets.pop()
operand_sets.append(retrieve_reuters_doc_ids_not_from_set(operand_set_1))
else:
operand_set_2 = operand_sets.pop()
operand_set_1 = operand_sets.pop()
if token == 'AND':
operand_sets.append(intersection(operand_set_1, operand_set_2))
elif token == 'AND_NOT':
operand_sets.append(intersection(operand_set_1, retrieve_reuters_doc_ids_not_from_set(operand_set_2)))
elif token == 'OR':
operand_sets.append(union(operand_set_1, operand_set_2))
elif token == 'OR_NOT':
operand_sets.append(union(operand_set_1, retrieve_reuters_doc_ids_not_from_set(operand_set_2)))
else: # unrechable case
None
return operand_sets.pop() # if isinstance(final, list) else # not sure what causes it, but the result is a list in a list
def union(lst1, lst2):
return [item for item in lst1] + [item for item in lst2]
def difference(lst1, lst2):
return [item for item in lst1 if item not in set(lst2)]
def intersection(lst1, lst2):
return [value for value in lst1 if value in set(lst2)]
if __name__ == "__main__":
print(execute_boolean_query(['l*p']))
```
#### File: JesseDesjardins/SearchEngine/corpus_access.py
```python
from db_operations import retrieve_courses_documents
def retrieve_courses_docs(doc_ids):
""" Returns a list of documents """
return retrieve_courses_documents(doc_ids)
def retrieve_reuters_docs(doc_ids):
""" Returns a list of documents """
return retrieve_reuters_documents(doc_ids)
```
#### File: JesseDesjardins/SearchEngine/word_processing.py
```python
import re
from nltk.corpus import stopwords, wordnet
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.tag import pos_tag
symbols = [':', ';', '(', ')', '!', '?', '...', ',', '.', '-', '\'', '\"', '\\', '/', '$']
""" List of symbols """
def tokenize(text):
sentences = []
for sentence in sent_tokenize(text):
sentences.append(word_tokenize(sentence))
return [token for sentence in sentences for token in sentence]
def case_fold(tokens):
return [token.lower() for token in tokens]
def remove_stopwords(tokens):
return [token
for token in tokens
if token not in
set(stopwords.words('english'))]
def normalize(tokens):
for symbol in symbols:
tokens = [token.replace(symbol, '') for token in tokens]
return [token for token in tokens if token!='']
def stem(tokens):
stemmer = PorterStemmer()
return list(dict.fromkeys([stemmer.stem(token) for token in tokens])) # Removes possible duplicates after stemming
# lemmatize method inpired by https://stackoverflow.com/a/15590384/3943418
def lemmatize(tokens):
lemmatizer = WordNetLemmatizer()
tagged_tokens = pos_tag(tokens)
tagged_tokens = [(token[0], _tag_to_wordnet(token[1])) for token in tagged_tokens]
lemmatized_tokens = []
for token, tag in tagged_tokens:
if tag is None:
lemmatized_tokens.append(token)
else:
lemmatized_tokens.append(lemmatizer.lemmatize(token, tag))
return list(dict.fromkeys(lemmatized_tokens)) # Removes possible duplicates after stemming
def _tag_to_wordnet(tag):
""" Helper function for lemmatize """
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return None
```
|
{
"source": "jessedeveloperinvestor/Multiple-Jesse-Projects",
"score": 3
}
|
#### File: jessedeveloperinvestor/Multiple-Jesse-Projects/Flask.py
```python
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello():
return "Hello there, I am Jesse. I develop softwares in Python3, HTML, JavaScript, CSS, Angular, React, Nodejs, SQL, mySQL, postgreSQL, C#, Java, Flutter and Arduino. Check out: 'https://github.com/jessedeveloperinvestor'"
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
```
|
{
"source": "jessedoyle/crystal",
"score": 2
}
|
#### File: etc/lldb/crystal_formatters.py
```python
import lldb
class CrystalArraySyntheticProvider:
def __init__(self, valobj, internal_dict):
self.valobj = valobj
self.buffer = None
self.size = 0
def update(self):
if self.valobj.type.is_pointer:
self.valobj = self.valobj.Dereference()
self.size = int(self.valobj.child[0].value)
self.type = self.valobj.type
self.buffer = self.valobj.child[3]
def num_children(self):
size = 0 if self.size is None else self.size
return size
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1
def get_child_at_index(self,index):
if index >= self.size:
return None
try:
elementType = self.buffer.type.GetPointeeType()
offset = elementType.size * index
return self.buffer.CreateChildAtOffset('[' + str(index) + ']', offset, elementType)
except Exception as e:
print('Got exception %s' % (str(e)))
return None
def findType(name, module):
cachedTypes = module.GetTypes()
for idx in range(cachedTypes.GetSize()):
type = cachedTypes.GetTypeAtIndex(idx)
if type.name == name:
return type
return None
def CrystalString_SummaryProvider(value, dict):
error = lldb.SBError()
if value.TypeIsPointerType():
value = value.Dereference()
process = value.GetTarget().GetProcess()
byteSize = int(value.child[0].value)
len = int(value.child[1].value)
len = byteSize or len
strAddr = value.child[2].load_addr
val = process.ReadCStringFromMemory(strAddr, len + 1, error)
return '"%s"' % val
def __lldb_init_module(debugger, dict):
debugger.HandleCommand('type synthetic add -l crystal_formatters.CrystalArraySyntheticProvider -x "^Array\(.+\)(\s*\**)?" -w Crystal')
debugger.HandleCommand('type summary add -F crystal_formatters.CrystalString_SummaryProvider -x "^(String|\(String \| Nil\))(\s*\**)?$" -w Crystal')
debugger.HandleCommand('type category enable Crystal')
```
|
{
"source": "jessedp/pihole5-list-tool",
"score": 3
}
|
#### File: pihole5-list-tool/ph5lt/stats.py
```python
from terminaltables import AsciiTable, SingleTable
from colors import color
from ph5lt import utils
stats = {
"total_adlist": "SELECT COUNT(*) FROM adlist",
"total_adlist_enabled": "SELECT COUNT(*) FROM adlist WHERE enabled = 1",
"total_adlist_disabled": "SELECT COUNT(*) FROM adlist WHERE enabled = 0",
"our_adlist": "SELECT COUNT(*) FROM adlist WHERE comment LIKE '%Firebog |%' OR comment LIKE '%[ph5lt]'",
"our_adlist_enabled": "SELECT COUNT(*) FROM adlist WHERE enabled = 1 AND (comment LIKE '%Firebog |%' OR comment LIKE '%[ph5lt]')",
"our_adlist_disabled": "SELECT COUNT(*) FROM adlist WHERE enabled = 0 AND (comment LIKE '%Firebog |%' OR comment LIKE '%[ph5lt]')",
"other_adlist": "SELECT COUNT(*) FROM adlist WHERE comment NOT LIKE '%Firebog |%' AND comment NOT LIKE '%[ph5lt]'",
"other_adlist_enabled": "SELECT COUNT(*) FROM adlist WHERE enabled = 1 AND comment NOT LIKE '%Firebog |%' AND comment NOT LIKE '%[ph5lt]'",
"other_adlist_disabled": "SELECT COUNT(*) FROM adlist WHERE enabled = 0 AND comment NOT LIKE '%Firebog |%' AND comment NOT LIKE '%[ph5lt]'",
"total_allow": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2)",
"total_allow_enabled": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND enabled = 1",
"total_allow_disabled": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND enabled = 0",
"our_allow": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND comment LIKE '%AndeepND |%' OR comment LIKE '%[ph5lt]'",
"our_allow_enabled": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND enabled = 1 AND (comment LIKE '%AndeepND |%' OR comment LIKE '%[ph5lt]')",
"our_allow_disabled": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND enabled = 0 AND (comment LIKE '%AndeepND |%' OR comment LIKE '%[ph5lt]')",
"other_allow": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND comment NOT LIKE '%AndeepND |%' AND comment NOT LIKE '%[ph5lt]'",
"other_allow_enabled": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND enabled = 1 AND comment NOT LIKE '%AndeepND |%' AND comment NOT LIKE '%[ph5lt]'",
"other_allow_disabled": "SELECT COUNT(*) FROM domainlist WHERE type IN (0,2) AND enabled = 0 AND comment NOT LIKE '%AndeepND |%' AND comment NOT LIKE '%[ph5lt]'",
}
def get(cur, name):
""" get stats using prebuilt statements """
if name not in stats:
return -1
cur.execute(stats[name])
return str(cur.fetchone()[0])
def adlist_top3_by_comment(cur):
""" top 3 adlists by comment """
sql = "SELECT comment, count(*) FROM adlist GROUP BY comment LIMIT 3"
cur.execute(sql)
return cur.fetchall()
def allow_top3_by_comment(cur):
""" top 3 allow lists by comment """
sql = "SELECT comment, count(*) FROM domainlist WHERE type IN (0,2) GROUP BY comment LIMIT 3"
cur.execute(sql)
return cur.fetchall()
def stat_bar(cur):
""" one-liner stat bar """
# Block : All=X Ours=Y Oth=Z | Allow : All=X Ours=Y Oth=Z
data = []
data.append("Blocks Enabled: All=" + str(get(cur, "total_adlist_enabled")))
data.append("│")
data.append("Ours=" + str(get(cur, "our_adlist_enabled")))
# data.append("│")
# data.append("Other=" + str(get(cur, "other_adlist_enabled")))
data.append("│")
data.append("Allows Enabled: All=" + str(get(cur, "total_allow_enabled")))
data.append("│")
data.append("Ours=" + str(get(cur, "our_allow_enabled")))
# data.append("│")
# data.append("Other=" + str(get(cur, "other_allow_enabled")))
table = SingleTable([data])
table.inner_heading_row_border = False
table.outer_border = False
table.inner_row_border = False
table.inner_column_border = False
table.padding_left = 2
print()
print(color(table.table, bg="#505050", fg="white"))
print()
def header(cur):
""" a stats overview header """
print()
block_header(cur)
# utils.info("──────────────────────────────────────────────────────────────")
print()
allow_header(cur)
print()
def allow_header(cur):
""" allow portion of header """
block_data = [
[
"Total :",
get(cur, "total_allow_enabled") + "/" + get(cur, "total_allow"),
],
["Our Lists :", get(cur, "our_allow_enabled") + "/" + get(cur, "our_allow")],
[
"Others :",
get(cur, "other_allow_enabled") + "/" + get(cur, "other_allow"),
],
]
block_table = AsciiTable(block_data)
block_table.inner_heading_row_border = False
block_table.outer_border = False
block_table.inner_row_border = False
block_table.inner_column_border = False
rows = allow_top3_by_comment(cur)
t3_block_data = []
for row in rows:
t3_block_data.append([row[0], row[1]])
t3_block_table = AsciiTable(t3_block_data)
t3_block_table.inner_heading_row_border = False
t3_block_table.outer_border = False
t3_block_table.inner_row_border = False
t3_block_table.inner_column_border = False
table_data = [
["Allowlist Stats", "Top 3 by Comment"],
[block_table.table, t3_block_table.table],
]
table = SingleTable(table_data)
table.padding_left = 2
table.outer_border = False
utils.info(table.table)
def block_header(cur):
""" block portion of header """
block_data = [
[
"Total :",
get(cur, "total_adlist_enabled") + "/" + get(cur, "total_adlist"),
],
["Our Lists :", get(cur, "our_adlist_enabled") + "/" + get(cur, "our_adlist")],
[
"Others :",
get(cur, "other_adlist_enabled") + "/" + get(cur, "other_adlist"),
],
]
block_table = AsciiTable(block_data)
block_table.inner_heading_row_border = False
block_table.outer_border = False
block_table.inner_row_border = False
block_table.inner_column_border = False
rows = adlist_top3_by_comment(cur)
t3_block_data = []
for row in rows:
t3_block_data.append([row[0], row[1]])
t3_block_table = AsciiTable(t3_block_data)
t3_block_table.inner_heading_row_border = False
t3_block_table.outer_border = False
t3_block_table.inner_row_border = False
t3_block_table.inner_column_border = False
table_data = [
["Ad/Blocklist Stats", "Top 3 by Comment"],
[block_table.table, t3_block_table.table],
[],
]
table = SingleTable(table_data)
table.padding_left = 2
table.outer_border = False
utils.info(table.table)
```
#### File: pihole5-list-tool/tests/test_prompts.py
```python
import os
import sqlite3
from unittest.mock import patch
from ph5lt import prompts
@patch("os.path.exists")
def test_check_db_FNF(self):
os.path.exists.value = False
result = prompts.check_db("blah")
assert result is False
@patch("sqlite3.connect")
def test_check_db_SqlDbError(self):
sqlite3.connect.side_effect = sqlite3.DatabaseError("nope")
result = prompts.check_db("blah")
assert result is False
@patch("sqlite3.connect")
def test_check_db_OperationalError(self):
sqlite3.connect.side_effect = sqlite3.OperationalError("nope")
result = prompts.check_db("blah")
assert result is False
def BROKEN_test_check_db_WrongDb():
""" can't get this mocked:
TypeError: catching classes that do not inherit from BaseException is not allowed
"""
# sqlite3.connect.value = True
with patch("prompts.sqlite3.connect().cursor()") as mocksql:
mocksql.side_effect = sqlite3.OperationalError
result = prompts.check_db("blah")
assert result is False
def test_check_db_SaulGoodMan():
""" Works, but really shouldn't ?? """
result = prompts.check_db("blah")
assert result is False
```
#### File: pihole5-list-tool/tests/test_utils.py
```python
import subprocess
import os
from subprocess import CalledProcessError
from unittest.mock import patch
from ph5lt import utils
class TestUtils:
def test_valid_url(self):
assert utils.valid_url("") is False
assert utils.valid_url("pihole") is False
assert utils.valid_url("pihole.net") is False
assert utils.valid_url("http://pihole.net") is True
assert utils.valid_url("http://pihole.net/v5") is True
assert utils.valid_url("http://pihole.net/v5?install=trye") is True
def test_validate_host(self):
test1 = "nope"
test2 = "nope.c"
test3 = "nope.com"
assert utils.validate_host(test1) is False
assert utils.validate_host(test2) is False
assert utils.validate_host(test3) is True
# TODO: enforce pi-url regex
def test_validate_regex(self):
assert utils.validate_regex("github") is True
def test_process_lines_empty(self):
new_list = utils.process_lines("", "", True)
assert len(new_list) == 0
def test_process_lines_full_url(self):
comment = "MyComment"
new_list = utils.process_lines(
"""
http://google.com
invalid
http://github.com
""",
comment,
True,
)
assert len(new_list) == 2
assert new_list[1]["url"] == "http://github.com"
assert new_list[1]["comment"] == comment
# TODO: Breakout host/url/regexes
def test_process_lines_any(self):
comment = "MyComment"
new_list = utils.process_lines(
"""
github
github.com
http://github.com
http://github.com/test
http://github.com/test?f08s
""",
comment,
True,
)
assert len(new_list) == 3
# assert new_list[1]["url"] == "http://github.com"
assert new_list[1]["comment"] == comment
def test_process_file(tmp_path):
comment = "MyComment"
tmpdir = tmp_path / "ph5lt"
tmpdir.mkdir()
tmpfile = tmpdir / "imports.txt"
urls = """
http://github.com
http://github.com/test
http://github.com/test?f08s
"""
tmpfile.write_text(urls)
impfile = open(tmpfile)
new_list = utils.process_lines(impfile.read(), comment, True)
assert len(new_list) == 3
# assert new_list[1]["url"] == "http://github.com"
assert new_list[1]["comment"] == comment
def test_find_docker_not_installed(fake_process):
def raise_FNF(process):
raise FileNotFoundError
fake_process.register_subprocess(
["docker", "inspect", "pihole"], stdout="not running FNF", callback=raise_FNF
)
result = utils.find_docker()
assert result == [False, None]
def raise_CPE(process):
raise CalledProcessError(returncode=1, cmd="test")
fake_process.register_subprocess(
["docker", "inspect", "pihole"], stdout="not running CPE", callback=raise_CPE
)
result = utils.find_docker()
assert result == [False, None]
def test_find_docker_image_not_running(fake_process):
fake_process.register_subprocess(
["docker", "inspect", "pihole"], stdout="not running", returncode=1
)
result = utils.find_docker()
assert result == [False, None]
def BROKEN_test_find_docker_image_not_found(fake_process):
""" Not actually launching mock process """
fake_process.register_subprocess(["docker", "inspect", "pihole"], stdout="bad json")
result = utils.find_docker()
assert result == [False, None]
@patch("os.path.exists")
def BROKEN_test_find_docker_image_found(fake_process, shared_datadir):
""" Not actually launching mock process """
path = "/home/jesse/projects/pihole/etc-pihole"
output = (shared_datadir / "docker_inspect_pihole.json").read_text().strip()
fake_process.register_subprocess(
["docker", "inspect", "pihole"], stdout=output,
)
result = utils.find_docker()
# os.path.exists.assert_called_once_with(path)
assert result == [True, path]
```
|
{
"source": "jessedp/tut",
"score": 2
}
|
#### File: jessedp/tut/config.py
```python
import sys
import os
from glob import glob
import pickle
import configparser
import logging
import logging.config
from tzlocal import get_localzone
from pytz import timezone
from pytz.exceptions import UnknownTimeZoneError
from tablo.api import Api
logger = logging.getLogger(__name__)
# For batch Api call
MAX_BATCH = 50
config = configparser.ConfigParser()
# TODO: see about using this for cleaner variable interpolation
# config = configparser.ConfigParser(
# interpolation=configparser.ExtendedInterpolation
# )
# prevent lowercasing options
config.optionxform = lambda option: option
orig_config = configparser.ConfigParser()
# built in shared options that we aren't allowing to be user-configurable
built_ins = {}
def view():
print(f"Settings from: {built_ins['config_file']}")
print("-" * 50)
# for display purposes...
orig_config['DEFAULT']['base_path'] = built_ins['base_path']
for sect in config.sections():
print(f'[{sect}]')
for item, val in config.items(sect):
ipol_disp = None
if item == 'base_path':
continue
else:
try:
test = orig_config.get(sect, item)
except configparser.NoOptionError:
test = None
def_val = f'{val} (default)'
if not test and not val:
val_disp = def_val
elif test and not val:
val_disp = f'{test} (default) '
elif val == test:
# The cheeky way I'm setting defaults means this can show
# up when it should just be "(default)"
val = config.get(sect, item)
raw_val = config.get(sect, item, raw=True)
if raw_val != val:
val_disp = f'{val} (set to default) '
ipol_disp = raw_val
else:
val_disp = f'{val} (set to default) '
else:
# print(f'{item} = {val}')
val_disp = val
pass
print('{:10}'.format(item) + " = " + val_disp)
if ipol_disp:
print('{:>10}'.format('real') + " = " + ipol_disp)
print()
print()
print("Built-in settings")
print("-" * 50)
print_dict(built_ins, '')
print()
print("Cached Devices")
print("-" * 50)
for name in glob(built_ins['db']['path'] + "device_*"):
with open(name, 'rb') as file:
device = pickle.load(file)
device.dump_info()
print()
print("Devices pre-loaded in Api")
print("-" * 50)
for device in Api.getTablos():
print(f"{device.ID} - {device.IP} - {device.modified}")
if Api.selectDevice(device.ID):
print("\tSuccessfully connected to Tablo!")
else:
print("\tUnable to connect to Tablo!")
print()
def discover(display=True):
Api.discover()
devices = Api.getTablos()
if not devices:
if display:
print("Unable to locate any Tablo devices!")
else:
for device in devices:
device.dump_info()
Api.selectDevice(device.ID)
if display:
print('srvInfo: ')
print_dict(Api.serverInfo)
print('subscription:')
print_dict(Api.subscription)
# cache the devices for later
# TODO: maybe save serverinfo and subscription if find a need
name = "device_" + device.ID
with open(built_ins['db']['path'] + name, 'wb') as file:
pickle.dump(device, file)
def setup():
# create/find what should our config file
if sys.platform == 'win32': # pragma: no cover
path = os.path.expanduser(r'~\Tablo')
else:
path = os.path.expanduser('~/Tablo')
built_ins['base_path'] = path
built_ins['config_file'] = built_ins['base_path'] + "/tablo.ini"
# this is here primarily for display order... :/
built_ins['dry_run'] = False
db_path = built_ins['base_path'] + "/db/"
built_ins['db'] = {
'path': db_path,
'guide': db_path + "guide.json",
'recordings': db_path + "recordings.json",
'recording_shows': db_path + "recording_shows.json"
}
os.makedirs(db_path, exist_ok=True)
if os.path.exists(built_ins['config_file']):
config.read(built_ins['config_file'])
else:
# write out a default config file
config.read_string(DEFAULT_CONFIG_FILE)
with open(built_ins['config_file'], 'w') as configfile:
configfile.write(DEFAULT_CONFIG_FILE)
orig_config.read_string(DEFAULT_CONFIG_FILE)
# Setup config defaults we're not configuring yet, but need
config['DEFAULT']['base_path'] = built_ins['base_path']
tz = ''
try:
tz = config.get('General', 'Timezone')
except configparser.NoSectionError:
config['General'] = {}
try:
timezone(tz)
except UnknownTimeZoneError:
if tz:
print("INVALID Timezone: '" + tz + "' - using defaults")
tz = get_localzone()
if tz:
config.set('General', 'Timezone', str(tz))
orig_config.set('General', 'Timezone', str(tz))
else:
config.set('General', 'Timezone', 'UTC')
orig_config.set('General', 'Timezone', 'UTC')
# Load cached devices so we don't *have* to discover
for name in glob(built_ins['db']['path'] + "device_*"):
with open(name, 'rb') as file:
device = pickle.load(file)
Api.add_device(device)
# if we cn, go ahead and select a default device
# TODO: try to use the config ip/id here too
if Api.devices and len(Api.devices.tablos) == 1:
Api.device = Api.devices.tablos[0]
def setup_logger(level=logging.CRITICAL):
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format':
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': level,
'formatter': 'default',
'stream': 'ext://sys.stdout'
},
},
'root': {
'level': 'DEBUG',
'handlers': ['console']
},
'loggers': {
'default': {
'level': 'DEBUG',
'handlers': ['console']
}
},
})
"""
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'filename': log_path,
'maxBytes': 1024,
'backupCount': 3
}
"""
# This should be in "util". Either I'm done or python's resolving of cyclical
# imports is ... so here it lays.
def print_dict(dictionary, prefix='\t', braces=1):
""" Recursively prints nested dictionaries."""
for key, value in dictionary.items():
if isinstance(value, dict):
print()
print('%s%s%s%s' % (prefix, braces * '[', key, braces * ']'))
print_dict(value, prefix + ' ', braces + 1)
else:
width = 20 - len(prefix)
w_fmt = '{:' + str(width) + '}'
txt = prefix + w_fmt.format(key) + " = " + str(value)
print(txt)
# print( + '%s = %s' % (key, value))
DEFAULT_CONFIG_FILE = \
"""[General]
Timezone =
# Timezone: defaults to your system, then UTC
[Tablo]
# Define settings for the Tablo device you want to use. Usually only one Tablo
# exists and will be found/used by default, so there's usually no need to set
# these.
#
# The values can be found by running './tablo.py config --discover'
#
# IMPORTANT: If these are set and wrong, you'll need to remove or manually
# change them before things work.
ID =
# ID: the device ID (see above) selects a specific Tablo regardless of IP
# (great for non-reserved DHCP addresses)
IP =
# IP: the device IP address.
[Output Locations]
# The locations/paths recordings will be output to
# These will default to HOME_DIR-Tablo
TV = %(base_path)s/TV
Movies = %(base_path)s/Movies
"""
```
#### File: jessedp/tut/export.py
```python
import gevent
import os
import contextlib
import shutil
import socket
import tempfile
import logging
from tqdm import tqdm
import ffmpeg
from tinydb import TinyDB, Query
from config import built_ins
from recording import Recording
logger = logging.getLogger(__name__)
def copy(id_list, args):
total = len(id_list)
if total == 0:
print(f"Nothing to process, exiting...")
return
elif total == 1:
print(f"Processing {total} recording")
else:
print(f"Processing {total} recordings")
print("-"*50)
for id in id_list:
# TODO: put a X of Y somewhere near here
_copy(id, args)
print()
print("FINISHED")
def _copy(obj_id, args):
# TODO: Whoops, now used this twice (search.py too)
path = built_ins['db']['recordings']
rec_db = TinyDB(path)
shows = Query()
# shortcut for later
shows_qry = shows.data
# TODO: deal with pieces of the same recording (TMSID?) marked "finished"
# ie, 2 portions (non-full) of the an episode
# + just skip them (do this!)
# + differentiate on recorded at the same time
# - look at recording/show data to see what it *should* be?
# - overwrite previous portions
obj = rec_db.get(
(shows_qry.object_id == int(obj_id))
&
(shows_qry.video_details.state == 'finished')
)
if obj is None:
print(
f'ERROR: Unable to find recording with object_id == "{obj_id}", '
f'skipping...')
return
rec = Recording(obj['data'])
watch = rec.watch()
if watch.error is not None:
print(rec.get_description())
print("ERROR: Recording no longer exists, skipping!")
return
out_file = rec.get_out_path('mp4')
# TODO: this could make weird dirs?
os.makedirs(os.path.dirname(out_file), exist_ok=True)
# Display what we're working on
if built_ins['log_level'] <= logging.INFO:
rec.print()
watch.dump_info()
else:
print(rec.get_description())
print(" " * 2 + f"writing to: {out_file}")
if not args.clobber and os.path.exists(out_file):
print("File exists, skipping")
return
total_duration = float(ffmpeg.probe(
watch.playlist_url)['format']['duration'])
if built_ins['dry_run']:
# maybe do a dry run writing to a temp path and deleting so the time
# is roughly the same?
print("DRY RUN: The recording wasn't saved.")
else:
with show_progress(total_duration) as socket_filename:
try:
copier = (
ffmpeg
# this is a m3u8 playlist
.input(watch.playlist_url)
.output(out_file, codec='copy',
preset='ultrafast', loglevel='info')
.overwrite_output()
.global_args(
'-progress', 'unix://{}'.format(socket_filename)
)
)
copier.run(capture_stdout=True, capture_stderr=True)
except KeyboardInterrupt:
os.remove(out_file)
raise KeyboardInterrupt
except ffmpeg.Error as e:
logger.error(e)
# TODO: all of this should probably be somewhere else...
@contextlib.contextmanager
def _tmpdir_scope():
tmpdir = tempfile.mkdtemp()
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def _do_watch_progress(filename, sock, handler):
"""Function to run in a separate gevent greenlet to read progress
events from a unix-domain socket."""
connection, client_address = sock.accept()
data = b''
try:
while True:
more_data = connection.recv(16)
if not more_data:
break
data += more_data
lines = data.split(b'\n')
for line in lines[:-1]:
line = line.decode()
parts = line.split('=')
key = parts[0] if len(parts) > 0 else None
value = parts[1] if len(parts) > 1 else None
handler(key, value)
data = lines[-1]
finally:
connection.close()
@contextlib.contextmanager
def _watch_progress(handler):
"""Context manager for creating a unix-domain socket and listen for
ffmpeg progress events.
The socket filename is yielded from the context manager and the
socket is closed when the context manager is exited.
Args:
handler: a function to be called when progress events are
received; receives a ``key`` argument and ``value``
argument. (The example ``show_progress`` below uses tqdm)
Yields:
socket_filename: the name of the socket file.
"""
with _tmpdir_scope() as tmpdir:
socket_filename = os.path.join(tmpdir, 'sock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.bind(socket_filename)
sock.listen(1)
child = gevent.spawn(_do_watch_progress,
socket_filename, sock, handler)
try:
yield socket_filename
except Exception:
gevent.kill(child)
raise
@contextlib.contextmanager
def show_progress(total_duration):
"""Create a unix-domain socket to watch progress and render tqdm
progress bar."""
with tqdm(total=round(total_duration, 2)) as bar:
def handler(key, value):
if key == 'out_time_ms':
time = round(float(value) / 1000000., 2)
bar.update(time - bar.n)
elif key == 'progress' and value == 'end':
bar.update(bar.total - bar.n)
with _watch_progress(handler) as socket_filename:
yield socket_filename
```
#### File: jessedp/tut/library.py
```python
import os
import re
import math
from datetime import timedelta
import pprint
import logging
from tinydb import TinyDB, Query
from tqdm import tqdm
from config import built_ins, MAX_BATCH
from util import chunks, file_time_str
from tablo.api import Api
from tablo.apiexception import APIError
from tablo.entities.show import Show
from recording import Recording
logger = logging.getLogger(__name__)
def view(args):
print()
path = built_ins['db']['recordings']
rec_db = TinyDB(path)
id_set = []
cnt = 0
for item in rec_db.all():
cnt += 1
if args.id_list:
obj_id = item['data']['object_id']
if obj_id not in id_set:
id_set.append(obj_id)
elif args.full:
pprint.pprint(item)
else:
Recording(item['data']).print()
if args.id_list:
print(id_set)
else:
print(f'Total recordings found: {cnt}')
def print_stats():
path = built_ins['db']['recordings']
rec_db = TinyDB(path)
shows = Query()
shows_qry = shows.data
field_title = '{:17}'
print("Overview")
print("-" * 50)
print(f"Built: {file_time_str(path)}")
cnt = len(rec_db.all())
print('{:10}'.format("Total Recordings") + ": " + f'{cnt}')
cnt = rec_db.count(shows_qry.user_info.watched == True) # noqa: E712
print('{:10}'.format("Total Watched") + ": " + f'{cnt}')
print()
print("By Current Recording State")
print("-"*50)
cnt = rec_db.count(shows_qry.video_details.state == 'finished')
print(field_title.format("Finished") + ": " + f'{cnt}')
cnt = rec_db.count(shows_qry.video_details.state == 'failed')
print(field_title.format("Failed") + ": " + f'{cnt}')
cnt = rec_db.count(shows_qry.video_details.state == 'recording')
print(field_title.format("Recording") + ": " + f'{cnt}')
print()
print("By Recording Type")
print("-" * 50)
cnt = rec_db.count(shows.path.matches(f'.*episode.*', flags=re.IGNORECASE))
print(field_title.format("Episodes/Series") + ": " + f'{cnt}')
cnt = rec_db.count(shows.path.matches(f'.*movie.*', flags=re.IGNORECASE))
print(field_title.format("Movies") + ": " + f'{cnt}')
cnt = rec_db.count(shows.path.matches(f'.*sports.*', flags=re.IGNORECASE))
print(field_title.format("Sports/Events") + ": " + f'{cnt}')
cnt = rec_db.count(
shows.path.matches(f'.*programs.*', flags=re.IGNORECASE)
)
print(field_title.format("Programs") + ": " + f'{cnt}')
print()
print("By Show")
print("-" * 50)
shows = {}
max_width = 0
for item in rec_db.all():
title = item['data']['airing_details']['show_title']
max_width = max(max_width, len(title))
key = _sortable_title(title)
if key not in shows.keys():
shows[key] = {'cnt': 1, 'title': title}
else:
shows[key]['cnt'] += 1
for key in sorted(shows.keys()):
# print(f"{shows[key]['title']} - {shows[key]['cnt']}")
print(
('{:' + str(max_width) + '}').format(shows[key]['title']) +
' - {:>2}'.format(shows[key]['cnt'])
)
def _sortable_title(title):
# toss a/an/the, force non-letters to end
articles = ['a', 'an', 'the']
word = title.split(' ', 1)[0].lower()
sort_title = title
if word in articles:
try:
sort_title = title.split(' ', 1)[1]
except Exception:
sort_title = title
if ord(sort_title[0]) not in range(ord('A'), ord('z') + 1):
sort_title = "ZZZZ" + sort_title
return sort_title
def build():
print("Building library. NO videos are being fetched.")
print("-"*50)
Api.discover()
connected = Api.selectDevice()
if not connected:
logger.exception("NOT CONNECTED")
# don't think we'll need this
# _build_guide()
_build_recordings()
def _build_guide():
guide_path = built_ins['db']['guide']
if not built_ins['dry_run']:
try:
os.unlink(guide_path)
except Exception:
pass
guide_db = {}
if not built_ins['dry_run']:
guide_db = TinyDB(guide_path)
# Load all the shows
print('Loading All Guide/Show data')
sections = Api.views('guide').shows.get()
total = sum(len(section.get('contents')) for section in sections)
print(f"Total Shows: {total}")
for section in sections:
contents = section.get('contents')
if not contents:
logger.info(f"Section {section.get('key').upper()} (0)")
continue
logger.info(f"Section {section.get('key').upper()} ({len(contents)})")
for piece in chunks(contents, MAX_BATCH):
shows = Api.batch.post(piece)
for path, data in shows.items():
show = Show.newFromData(data)
if not built_ins['dry_run']:
guide_db.insert({
'id': show.object_id,
'path': show.path,
'data': show.data,
'version': Api.device.version
})
def _build_recordings():
recs_path = built_ins['db']['recordings']
recshow_path = built_ins['db']['recording_shows']
if not built_ins['dry_run']:
try:
os.unlink(recs_path)
except Exception:
pass
try:
os.unlink(recshow_path)
except Exception:
pass
recs_db = TinyDB(recs_path)
programs = Api.recordings.airings.get()
show_paths = []
print(f"Total Recordings: {len(programs)}")
# cnt = 0
with tqdm(total=len(programs)) as pbar:
for piece in chunks(programs, MAX_BATCH):
airings = Api.batch.post(piece)
# cnt += len(airings)
# print(f"\tchunk: {cnt}/{len(programs)}")
for path, data in airings.items():
airing = Recording(data)
if airing.showPath not in show_paths:
show_paths.append(airing.showPath)
if not built_ins['dry_run']:
recs_db.insert({
'id': airing.object_id,
'path': airing.path,
'show_path': airing.showPath,
'data': airing.data,
'version': Api.device.version
})
pbar.update(1)
recshow_db = TinyDB(recshow_path)
print(f"Total Recorded Shows: {len(show_paths)}")
my_show = Query()
with tqdm(total=len(show_paths)) as pbar:
# this is silly and just to make the progress bar move :/
for piece in chunks(show_paths, math.ceil(MAX_BATCH/5)):
# not caring about progress, we'd use this:
# for piece in chunks(show_paths, MAX_BATCH):
airing_shows = Api.batch.post(piece)
for path, data in airing_shows.items():
stuff = recshow_db.search(my_show.show_path == path)
pbar.update(1)
if not stuff:
if not built_ins['dry_run']:
recshow_db.insert({
'id': data['object_id'],
'show_path': path,
'data': data,
'version': Api.device.version
})
print("Done!")
def print_dupes():
dupes = _find_dupes()
for key, data in dupes.items():
if len(data) > 1:
print(key + " = " + str(len(data)))
for item in data:
rec = Recording(item)
print("\t" + str(rec.object_id) + " | " +
rec.get_description() + " - " + rec.get_dur())
def _find_dupes():
path = built_ins['db']['recordings']
rec_db = TinyDB(path)
dupes = {}
for item in rec_db.all():
data = item['data']
if 'episode' in data.keys():
tmsid = data['episode']['tms_id']
if tmsid.startswith('SH'):
# TODO: this is easy, but wrong. SH* tms_id duplicates for
# every episode. Maybe replace with psuedo-title?
continue
if tmsid not in dupes:
dupes[tmsid] = []
dupes[tmsid].append(data)
else:
dupes[tmsid].append(data)
return dupes
def print_incomplete(args):
# weird way I made it work...
percent = args.incomplete
if percent == -1:
percent = 100
else:
percent = min(percent, 100)
percent = max(percent, 0)
percent = percent / 100
dupes = _find_dupes()
proper_dur = 0
matched = 0
total_recs = 0
id_set = []
for key, data in dupes.items():
if key.startswith('SH'):
continue
if len(data) > 0:
sum_actual_dur = 0
recs = []
for item in data:
rec = Recording(item)
actual_dur = rec.video_details['duration']
proper_dur = rec.airing_details['duration']
sum_actual_dur += actual_dur
if proper_dur > actual_dur:
recs.append(rec)
if (proper_dur * percent) > sum_actual_dur:
matched += 1
total_recs += len(recs)
header = None
for x in recs:
if args.id_list:
if x.object_id not in id_set:
id_set.append(x.object_id)
else:
if not header:
header = x.get_description() + \
" - " + x.episode['tms_id']
print(header)
print("\t" + str(x.object_id) + " | " +
x.get_description() + " - " + x.get_dur())
if not args.id_list:
sum_txt = str(timedelta(seconds=sum_actual_dur))
total_txt = str(timedelta(seconds=proper_dur))
pct = str(round(sum_actual_dur / proper_dur * 100, 2))
print(f"\n\t{sum_txt} / {total_txt} ({pct}%)")
print()
if args.id_list:
print(id_set)
else:
print(f"Total incomplete shows less than {percent*100}% - {matched} "
f"({total_recs} items)")
def delete(id_list, args):
# TODO: add a confirmation (sans --yyyyassss)
total = len(id_list)
if total == 0:
print(f"Nothing to delete, exiting...")
return
elif total == 1:
print(f"Deleting {total} recording")
else:
print(f"Deleting {total} recordings")
print("-" * 50)
# Load all the recs
path = built_ins['db']['recordings']
rec_db = TinyDB(path)
shows = Query()
# shortcut for later
shows_qry = shows.data
recs = []
total = 0
for obj_id in id_list:
obj = rec_db.get(
(shows_qry.object_id == int(obj_id))
&
(shows_qry.video_details.state != 'recording')
)
if not obj:
print(f'ERROR: Unable to find recording with '
f'object_id == "{obj_id}", skipping...')
continue
total += 1
recs.append(
{
'doc_id': obj.doc_id,
'obj_id': obj_id,
'rec': Recording(obj['data'])
})
# TODO: don't "total" like this
if total <= 0:
print(f"No recordings found; {len(id_list)} requested.")
elif total == 1:
print(f"Deleting {total} recording...")
else:
print(f"Deleting {total} recordings...")
if total > 0:
for rec in recs:
rec = rec['rec']
print(f" - {rec.get_actual_dur()} | {rec.get_description()} ")
print("-" * 50)
if not args.yes:
print()
print('\tAdd the "--yes" flag to actually delete things...')
print()
else:
for rec in recs:
_delete(rec, rec_db)
print("\nFINISHED")
def _delete(rec, rec_db):
doc_id = rec['doc_id']
item = rec['rec']
print(f"Deleting: {item.get_description()} ({item.get_actual_dur()})")
if built_ins['dry_run']:
print("DRY RUN: would have deleted...")
else:
try:
# try to delete the full recording
item.delete()
# delete the local db record instead of REBUILDing everything
rec_db.remove(doc_ids=[doc_id])
print("\tDeleted!")
except APIError:
print("Recording no longer exists")
pass
```
#### File: jessedp/tut/search.py
```python
from pprint import pprint
import re
from tinydb import TinyDB, Query
from config import built_ins
from util import datetime_comp
from recording import Recording
def search(args):
path = built_ins['db']['recordings']
rec_db = TinyDB(path)
shows = Query()
# shortcut for later
shows_qry = shows.data
# to store all possible search options/segments
params = []
# Handle search "term"arg - this checks title and description
if args.term:
params.append(
shows_qry.airing_details.show_title.matches(
f'.*{args.term}.*', flags=re.IGNORECASE
)
|
shows_qry.episode.description.matches(
f'.*{args.term}.*', flags=re.IGNORECASE
)
# Gah, should work, always bombs. Suspect on non-episodes
# though episode.description is fine?
# |
# shows_qry.episode['title'].matches(
# f'.*{args.term}.*', flags=re.IGNORECASE
# )
)
# Handle "after" date arg
if args.after:
params.append(
shows_qry.airing_details.datetime.test(
datetime_comp, '>', args.after)
)
# Handle "before" date arg
if args.before:
params.append(
shows_qry.airing_details.datetime.test(
datetime_comp, '<', args.before)
)
# Handle recording state args
if args.state:
state_params = []
for state in args.state:
state_params.append(
shows_qry.video_details.state == state
)
state_query = None
for param in state_params:
if not state_query:
state_query = param
else:
state_query = (state_query) | (param)
params.append(state_query)
# Handle recording type args
if args.type:
type_params = []
for rec_type in args.type:
type_params.append(
shows.path.matches(
f'.*{rec_type}.*', flags=re.IGNORECASE
)
)
type_query = None
for param in type_params:
if not type_query:
type_query = param
else:
type_query = (type_query) | (param)
params.append(type_query)
# Handle watched arg
if args.watched:
params.append(
shows_qry.user_info.watched == True # noqa: E712
)
# Handle season arg
if args.season:
params.append(
shows_qry.episode.season_number == args.season
)
# Handle season arg
if args.episode:
params.append(
shows_qry.episode.number == args.episode
)
# Handle tms-id arg
if args.tms_id:
params.append(
shows_qry.episode.tms_id == args.tms_id
)
# Handle duration limit
if args.duration:
params.append(
shows_qry.video_details.duration < args.duration
)
# Handle tablo object id arg
if args.id:
params.append(
shows_qry.object_id == int(args.id)
)
# Finally, put the all the query params together and do the search
query = None
for param in params:
if not query:
query = param
else:
query = query & param
if not query:
# TODO: probably shouldn't let this happen?
results = rec_db.all()
else:
results = rec_db.search(query)
if not results:
if args.id_list:
print([])
else:
# TODO: print the criteria we tried to match
print(f'No matching records found.')
else:
id_set = []
returned = 0
for item in results:
if args.id_list:
obj_id = item['data']['object_id']
if obj_id not in id_set:
id_set.append(obj_id)
elif args.full:
pprint(item)
else:
Recording(item['data']).print()
returned += 1
if args.limit and returned == args.limit:
break
if args.id_list:
print(id_set)
else:
if returned == len(results):
print(f'Total recordings found: {len(results)}')
else:
print(f'{returned}/{len(results)} total recordings displayed')
```
#### File: tut/tablo/compat.py
```python
import time
import datetime
try:
datetime.datetime.strptime('0', '%H')
except TypeError:
# Fix for datetime issues with XBMC/Kodi
class new_datetime(datetime.datetime):
@classmethod
def strptime(cls, dstring, dformat):
return datetime.datetime(*(time.strptime(dstring, dformat)[0:6]))
datetime.datetime = new_datetime
# This is weird, probably should be done/described better if it
# touches code that currently matters.
def timedelta_total_seconds(td):
try:
return td.total_seconds()
except Exception:
HOUR = 24
SEC_IN_HOUR = 3600
# For converting milliseconds back to seconds. sigh.
ONE_MILLION = 10 ** 6
return (
(float(td.seconds) + float(td.days) * HOUR * SEC_IN_HOUR)
* ONE_MILLION) / ONE_MILLION
```
#### File: tablo/entities/channel.py
```python
class Channel(object):
def __init__(self, data):
self.path = data['path']
self.object_id = data['object_id']
self.data = data
def __getattr__(self, name):
return self.data['channel'].get(name)
```
#### File: tablo/entities/series.py
```python
from .show import Show
from tablo.util import logger
from tablo.api import Api
from tablo.apiexception import APIError
class Series(Show):
type = 'SERIES'
airingType = 'episode'
def processData(self, data):
self.data = data['series']
def episodes(self):
return Api(self.path).episodes.get()
def seasons(self):
try:
return Api(self.path).seasons.get()
except APIError as e:
logger.error(f'Series.seasons() failed: {format(e.message)}')
return []
def _airings(self):
return self.episodes()
```
#### File: tut/tablo/__init__.py
```python
# from .apiexception import APIError
# from .entities.show import Show
# from .entities.movie import Movie
# from .entities.program import Program
# from .entities.series import Series
# from .entities.sport import Sport
# from .entities.channel import Channel
# from .entities.airing import Airing
# from .entities.airing import GridAiring
# from .api import TabloAPI
# from .watch import Watch
# def setUserAgent(agent):
# Api.USER_AGENT = agent
# Api = TabloAPI()
```
#### File: tut/tablo/util.py
```python
import logging
import pytz
import traceback
from tablo import compat
# TODO: remove this completely!
LOG_LEVEL = logging.DEBUG
logger = logging.getLogger(__name__)
def UTCNow():
return compat.datetime.datetime.now(pytz.timezone('UTC'))
# TODO: move processDate elsewhere (tablo.api?)
def processDate(date, format_='%Y-%m-%dT%H:%M'):
if not date:
return None
try:
from tablo.api import Api
return Api.timezone.fromutc(
compat.datetime.datetime.strptime(date.rsplit('Z', 1)[0], format_)
)
except Exception:
traceback.print_exc()
return None
def debug_log(msg):
import inspect
from os import path
func = inspect.currentframe().f_back.f_code
filename = path.basename(func.co_filename)
message = f'{filename}:{func.co_name}:{func.co_firstlineno}\n{msg}'
# Dump the message + the name of this function to the log.
logger.debug(message)
def dump_info(obj):
attrs = vars(obj)
logger.info(f'OBJECT str: {obj}')
logger.info(', '.join("%s: %s" % item for item in attrs.items()))
def print_dict(dictionary, prefix='\t', braces=1):
""" Recursively prints nested dictionaries."""
for key, value in dictionary.items():
if isinstance(value, dict):
print('%s%s%s%s' % (prefix, braces * '[', key, braces * ']'))
print_dict(value, prefix + ' ', braces + 1)
else:
print(prefix + '%s = %s' % (key, value))
```
|
{
"source": "jessedusty/Plaintext-Live-Timing",
"score": 3
}
|
#### File: jessedusty/Plaintext-Live-Timing/parse_scores.py
```python
import urllib.request
from operator import itemgetter
DNF_time = 9999
def parse_time(time):
if "DSQ" in time or "DNF" in time or "DNS" in time:
return DNF_time
if ":" in time:
parts = time.split(":")
return int(parts[0]) * 60 + float(parts[1])
return float(time)
def get_text(url):
with urllib.request.urlopen(url) as f:
return f.read().decode("UTF-8")
def print_person(person):
print(person["name"] + " #" + str(person["place"]))
print(" r1: " + person["r1"])
print(" r2: " + person["r2"])
print(" c: " + person["tt"])
def string_person(person):
a = person["name"] + " #" + str(person["place"]) + "\n"
if "r1" in person:
a += " r1: " + person["r1"] + "\n"
if "r2" in person:
a += " r2: " + person["r2"] + "\n"
if "tt" in person:
a += " c: " + person["tt"] + "\n"
return a
def retrieve_times(id): # 185595
url = "http://www.live-timing.com/includes/aj_race.php?r={}&&m=1&&u=5".format(id)
content = get_text(url)
racer = {"s_tt": DNF_time}
racers = []
for a in content.split("|"):
b = a.split("=")
if b[0] == "m":
if "name" in racer:
racers.append(racer)
racer = {"s_tt": DNF_time}
racer["name"] = b[1]
elif b[0] == "t":
racer["team"] = b[1]
elif b[0] == "r1":
racer["r1"] = b[1]
racer["s_r1"] = parse_time(b[1])
elif b[0] == "r2":
racer["r2"] = b[1]
racer["s_r2"] = parse_time(b[1])
elif b[0] == "tt":
racer["tt"] = b[1]
racer["s_tt"] = parse_time(b[1])
newlist = sorted(racers, key=itemgetter("s_tt"))
for i, n in enumerate(newlist):
n["place"] = i + 1
return newlist
def stringify_racers(times_list):
result = ""
for i in times_list:
result += string_person(i)
return result
def show_team(times_list, team="SIT"):
return filter(lambda x: x["team"] == team.upper(), times_list)
```
|
{
"source": "JesseEmond/csgains",
"score": 3
}
|
#### File: miner/coinslib/Command.py
```python
import json
class Command:
def __init__(self, name, args={}):
self.name = name
self.args = args
def to_json(self):
return json.dumps(self.__dict__)
```
#### File: csgains/miner/cpp_solvers.py
```python
from ctypes import cdll
import ctypes
lib = cdll.LoadLibrary('./cpp/libsolvers.so')
lib.solve_list_sort.restype = ctypes.c_uint64
lib.solve_shortest_path.restype = ctypes.c_uint64
lib.unit_test()
def solve_sorted_list(target_prefix, previous_hash, nb_elements):
try:
return lib.solve_list_sort(target_prefix.encode('ascii'),
previous_hash.encode('ascii'),
nb_elements,
True)
except:
return 0
def solve_reverse_sorted_list(target_prefix, previous_hash, nb_elements):
try:
return lib.solve_list_sort(target_prefix.encode('ascii'),
previous_hash.encode('ascii'),
nb_elements,
False)
except:
return 0
def solve_shortest_path(target_prefix, previous_hash, nb_blockers, grid_size):
try:
return lib.solve_shortest_path(target_prefix.encode('ascii'),
previous_hash.encode('ascii'),
nb_blockers,
grid_size)
except:
return 0
```
|
{
"source": "jesseengel/ddsp",
"score": 2
}
|
#### File: ddsp/training/train_util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import gin
import gin.tf
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2.summary as tf_summary
# ---------------------- Train op ----------------------------------------------
def _clip_gradients_by_norm(grads_and_vars, gradient_clip_norm):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, gradient_clip_norm)
return list(zip(clipped_gradients, variables))
@gin.configurable
def get_train_op(loss,
learning_rate=0.001,
lr_decay_steps=10000,
lr_decay_rate=0.98,
gradient_clip_norm=3.0,
use_tpu=True,
variables=None):
"""Get training operation with gradient clipping and learning rate decay.
Distilled from tf.contrib.layers.optimize_loss().
Args:
loss: Scalar tensor of the loss function.
learning_rate: Scalar initial learning rate.
lr_decay_steps: Exponential decay timescale.
lr_decay_rate: Exponential decay magnitude.
gradient_clip_norm: Global norm by which to scale gradients.
use_tpu: Use tpu for training.
variables: List of variables to optimize. tf.trainable_variables() if None.
Returns:
train_op: Operation that runs one iteration of training.
"""
global_step = tf.train.get_or_create_global_step()
with tf.variable_scope('training', values=[loss, global_step]):
# Make sure update ops run before computing loss.
update_ops = list(set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)))
with tf.control_dependencies(update_ops):
loss = tf.identity(loss)
# Learning rate variable, with decay.
learning_rate_decay_fn = functools.partial(
tf.train.exponential_decay,
decay_steps=lr_decay_steps,
decay_rate=lr_decay_rate,
staircase=True)
lr = tf.get_variable(
'learning_rate', [],
trainable=False,
initializer=tf.constant_initializer(learning_rate))
lr = learning_rate_decay_fn(lr, global_step)
# Optimizer.
opt = tf.train.AdamOptimizer(lr)
if use_tpu:
opt = tf.tpu.CrossShardOptimizer(opt)
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = tf.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss, variables, colocate_gradients_with_ops=False)
# Optionally clip gradients by global norm.
if isinstance(gradient_clip_norm, float):
gradients = _clip_gradients_by_norm(gradients, gradient_clip_norm)
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients, global_step=global_step, name='train')
# Ensure the train_op computes grad_updates.
with tf.control_dependencies([grad_updates]):
train_op = tf.identity(loss)
return train_op
# ---------------------- Estimators --------------------------------------------
def get_estimator_spec(loss,
mode,
model_dir,
use_tpu=True,
scaffold_fn=None,
variables_to_optimize=None,
host_call=None):
"""Get TPUEstimatorSpec depending on mode, for Model.get_model_fn()."""
train_op = get_train_op(
loss, use_tpu=use_tpu, variables=variables_to_optimize)
gin_config_saver_hook = gin.tf.GinConfigSaverHook(
model_dir, summarize_config=True)
# Train
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[
gin_config_saver_hook,
],
scaffold_fn=scaffold_fn,
host_call=host_call)
# Eval
elif mode == tf.estimator.ModeKeys.EVAL:
raise ValueError('Estimator evaluation is not supported. Use ddsp_run.py '
'--mode=eval instead.')
# Predict
elif mode == tf.estimator.ModeKeys.PREDICT:
raise ValueError('Do not use estimator.predict(), which requires a flat '
'dictionary of predictions. Use model.get_outputs() and '
'model.restore() instead.')
else:
raise ValueError('Unsupported mode: %s' % mode)
def get_host_call_fn(model_dir):
"""`host_call` function for creating training summaries when using TPU."""
def host_call_fn(**kwargs):
"""Host_call_fn.
Args:
**kwargs: dict of summary name to tf.Tensor mapping. The value we see here
is the tensor across all cores, concatenated along axis 0. This function
will take make a scalar summary that is the mean of the whole tensor (as
all the values are the same - the mean, trait of
tpu.CrossShardOptimizer).
Returns:
A merged summary op.
"""
gs = kwargs.pop('global_step')[0]
with tf_summary.create_file_writer(model_dir).as_default():
with tf_summary.record_if(tf.equal(gs % 10, 0)):
for name, tensor in kwargs.items():
# Take the mean across cores.
tensor = tf.reduce_mean(tensor)
tf_summary.scalar(name, tensor, step=gs)
return tf.summary.all_v2_summary_ops()
return host_call_fn
@gin.configurable
def create_estimator(model_fn,
model_dir,
master='',
batch_size=128,
save_checkpoint_steps=300,
save_summary_steps=300,
keep_checkpoint_max=100,
warm_start_from=None,
use_tpu=True):
"""Creates an estimator."""
config = tf.estimator.tpu.RunConfig(
master=master,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=save_checkpoint_steps),
save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoint_steps,
keep_checkpoint_max=keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
params = {'model_dir': model_dir}
return tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
model_dir=model_dir,
params=params,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size,
config=config,
warm_start_from=warm_start_from,
use_tpu=use_tpu,
eval_on_tpu=False)
# ---------------------- Training ----------------------------------------------
@gin.configurable
def train(data_provider,
model,
model_dir='~/tmp/ddsp',
num_steps=1000000,
master='',
use_tpu=True):
"""Main training loop."""
input_fn = data_provider.get_input_fn(shuffle=True, repeats=-1)
model_fn = model.get_model_fn(use_tpu=use_tpu)
estimator = create_estimator(
model_fn=model_fn,
model_dir=os.path.expanduser(model_dir),
master=master,
use_tpu=use_tpu)
estimator.train(input_fn=input_fn, max_steps=num_steps)
```
|
{
"source": "jesseerdmann/audiobonsai",
"score": 2
}
|
#### File: jesseerdmann/audiobonsai/parse_sorting_hat.py
```python
import re
from audiobonsai import settings
from datetime import datetime, date
from datetime import timedelta
from urllib.request import urlopen
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from pprint import pprint
from sausage_grinder.models import Release, Artist
from spotify_helper.models import SpotifyUser
from spotipy import SpotifyException
from spotify_helper.helpers import get_user_conn
def handle_album_list(sp, query_list, all_eligible=False):
track_list = []
album_dets_list = sp.albums(query_list)
if album_dets_list is None:
return
for album_dets in album_dets_list[u'albums']:
if album_dets is None:
print('Unable to retrieve information on one of the provided \
albums.')
continue
try:
album = Release.objects.get(spotify_uri=album_dets[u'uri'])
except Release.MultipleObjectsReturned:
print('Repeat album? {}, SKIPPING'.format(album_dets[u'uri']))
continue
track_list += album.process(sp, album_dets, all_eligible)
# Track.objects.bulk_create(track_list)
return
def handle_albums(sp, all_eligible=False):
candidate_list = Release.objects.filter(processed=False)
print('CANDIDATE LIST LENGTH: {0:d}'.format(len(candidate_list)))
offset = 0
batch_size = 20
while offset < len(candidate_list):
sp_uri_list = [candidate.spotify_uri for candidate in
candidate_list[offset:offset + batch_size]]
handle_album_list(sp, sp_uri_list, all_eligible)
if offset % 1000 == 0:
print('{}: -> {} albums processed'.format(datetime.now(), offset))
offset += batch_size
def handle_artist_list(sp, query_list):
artist_dets_list = sp.artists(query_list)
for artist_dets in artist_dets_list[u'artists']:
if artist_dets is None:
print('Unable to retrieve information on one of the provided \
albums.')
continue
try:
artist = Artist.objects.get(spotify_uri=artist_dets[u'uri'])
artist.process(sp, artist_dets)
except Artist.DoesNotExist:
print('Artist returned not in the database already, skipping.')
continue
def handle_artists(sp):
candidate_list = Artist.objects.all()
offset = 0
batch_size = 50
while offset < len(candidate_list):
sp_uri_list = [candidate.spotify_uri for candidate in
candidate_list[offset:offset + batch_size]]
handle_artist_list(sp, sp_uri_list)
if offset % 1000 == 0:
print('{}: -> {} artists processed'.format(datetime.now(), offset))
offset += batch_size
return True
def parse_sorting_hat():
print('{}: parse_sorting_hat'.format(datetime.now()))
user = User.objects.get(username=settings.SPOTIFY_USERNAME)
spotify_user = SpotifyUser.objects.get(user=user)
sp = get_user_conn(spotify_user, '127.0.0.1:8000')
if type(sp) is HttpResponseRedirect:
print('User {} not authed'.format(settings.SPOTIFY_USERNAME))
exit(-1)
print('{}: Downloading Sorting Hat and creating \
releases'.format(datetime.now()))
response = urlopen('http://everynoise.com/spotify_new_releases.html')
html = response.read().decode("utf-8")
releases = html.split('</div><div class=')
match_string = re.compile(' title="artist rank:.*')
group_text = ' title="artist rank: ([0-9,-]+)"><a onclick=".*" '\
'href="(spotify:album:.*)"><span class=.*>.*</span> '\
'<span class=.*>.*</span></a> <span class="play trackcount" '\
'albumid=spotify:album:.* nolink=true onclick=".*">' \
'([0-9]+)</span>'
group_string = re.compile(group_text)
candidate_list = []
for release in releases:
for match in match_string.findall(release):
bits = group_string.match(match)
if bits is None:
continue
try:
candidate = Release.objects.get(spotify_uri=bits.group(2))
except Release.MultipleObjectsReturned:
pass
except Release.DoesNotExist:
candidate = Release(spotify_uri=bits.group(2),
sorting_hat_track_num=int(bits.group(3)))
if bits.group(1) != '-':
candidate.sorting_hat_rank = int(bits.group(1))
candidate_list.append(candidate)
# Shorten list for debugging
candidate_list = candidate_list[0:50]
print(candidate_list)
Release.objects.bulk_create(candidate_list)
print('{0:d} releases processed'.format(len(candidate_list)))
print('{0:d} candidate releases'.format(len(candidate_list)))
'''
done = False
while not done:
try:
print('{}: handle_albums'.format(datetime.now()))
handle_albums(sp, False)
except SpotifyException:
sp = get_user_conn(spotify_user, '127.0.0.1:8000')
continue
done = True
done = False
while not done:
try:
print('{}: delete_ineligible_releases'.format(datetime.now()))
except SpotifyException:
sp = get_user_conn(spotify_user, '127.0.0.1:8000')
continue
done = True
done = False
while not done:
try:
print('{}: handle_artists'.format(datetime.now()))
handle_artists(sp)
except SpotifyException:
sp = get_user_conn(spotify_user, '127.0.0.1:8000')
continue
print('{}: done'.format(datetime.now()))
done = True
'''
return
if __name__ == '__main__':
parse_sorting_hat()
```
|
{
"source": "JesseFarebro/dqn-ale",
"score": 2
}
|
#### File: JesseFarebro/dqn-ale/experience_replay.py
```python
import numpy as np
from config import cfg
class CircularBuffer:
def __init__(self, maxlen, shape, dtype):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = np.empty((maxlen,) + shape, dtype=dtype)
def __len__(self):
return self.length
def __getitem__(self, idx):
if isinstance(idx, int):
if idx < 0 or idx >= self.length:
raise KeyError()
elif isinstance(idx, np.ndarray):
if (idx < 0).any() or (idx >= self.length).any():
raise KeyError()
return self.data.take(self.start + idx, mode="wrap", axis=0)
def __array__(self):
return self.data.take(
np.arange(self.start, self.start + self.length), mode="wrap", axis=0
)
def append(self, v):
if self.length < self.maxlen:
self.length += 1
elif self.length == self.maxlen:
self.start = (self.start + 1) % self.maxlen
else:
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
class ExperienceReplay:
def __init__(self, size, obs_shape):
self._obs_height, self._obs_width, self._obs_channels = obs_shape
self.size = size
self.observations = CircularBuffer(size, obs_shape, np.uint8)
self.actions = CircularBuffer(size, (), np.uint8)
self.rewards = CircularBuffer(size, (), np.int32)
self.terminals = CircularBuffer(size, (), np.bool)
def __len__(self):
return len(self.observations)
def _get_full_observations(self, samples, batch_size):
full_observation = np.empty(
(batch_size, self._obs_height, self._obs_width, cfg.frame_history_size),
dtype=np.uint8,
)
for batch_index, start_idx in enumerate(samples):
assert start_idx >= cfg.frame_history_size - 1
assert start_idx <= self.size - 1
start_range_idx = start_idx - (cfg.frame_history_size - 1)
end_range_idx = start_range_idx + cfg.frame_history_size
frame_index_range = np.arange(start_range_idx, end_range_idx, dtype=np.int)
terminal = np.argwhere(self.terminals[frame_index_range])
assert len(frame_index_range) == cfg.frame_history_size
assert frame_index_range[cfg.frame_history_size - 1] == start_idx
assert len(terminal) <= 1
full_observation[batch_index] = np.concatenate(
self.observations[frame_index_range], axis=2
)
# Zero out frames that don't come from the current episode
if len(terminal) > 0:
full_observation[batch_index, :, :, : np.squeeze(terminal) + 1] = 0
return full_observation
def append(self, obs, action, reward, terminal):
self.observations.append(obs)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(terminal)
def sample(self, batch_size):
assert len(self.observations) >= batch_size
samples = np.random.randint(
cfg.frame_history_size, len(self.observations), size=batch_size
)
batch_observations = self._get_full_observations(
(samples - 1) % self.size, batch_size
)
batch_rewards = self.rewards[samples]
batch_actions = self.actions[samples]
batch_next_observation = self._get_full_observations(samples, batch_size)
batch_terminals = self.terminals[samples]
return (
batch_observations,
batch_actions,
batch_rewards,
batch_next_observation,
batch_terminals,
)
```
#### File: JesseFarebro/dqn-ale/main.py
```python
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from atari_environment import AtariEnvironment
from config import cfg
from dqn import DQN
from experience_replay import CircularBuffer, ExperienceReplay
def main(_):
# Reproducability
tf.reset_default_graph()
np.random.seed(cfg.random_seed)
tf.set_random_seed(cfg.random_seed)
# Logging
summary_writer = tf.contrib.summary.create_file_writer(cfg.log_dir)
summary_writer.set_as_default()
episode_ph = tf.placeholder(tf.int64, (), name="episode")
reward_ph = tf.placeholder(tf.float32, (), name="epeisode/reward")
step_ph = tf.placeholder(tf.int64, (), name="episode/steps")
with tf.contrib.summary.always_record_summaries():
episode_summary = [
tf.contrib.summary.scalar("episode/reward", reward_ph, step=episode_ph),
tf.contrib.summary.scalar("episode/step", step_ph, step=episode_ph),
]
if not tf.gfile.Exists(cfg.save_dir):
tf.gfile.MakeDirs(cfg.save_dir)
episode_results_path = os.path.join(cfg.save_dir, "episodeResults.csv")
episode_results = tf.gfile.GFile(episode_results_path, "w")
episode_results.write("episode,reward,steps\n")
# Setup ALE and DQN graph
obs_shape = (84, 84, 1)
input_height, input_width, _ = obs_shape
# Log DQN summaries every n steps
with tf.contrib.summary.record_summaries_every_n_global_steps(
cfg.log_summary_every
):
dqn = DQN(input_height, input_width, AtariEnvironment.num_actions)
# Global step
global_step = tf.train.get_or_create_global_step()
increment_step = tf.assign_add(global_step, 1)
# Save all variables
vars_to_save = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=cfg.q_scope
)
saver = tf.train.Saver(var_list=vars_to_save)
# Handle loading specific variables
restoring = cfg.restore_dir is not None
vars_to_load = []
if restoring:
for scope in cfg.load_scope.split(","):
vars_to_load.extend(
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
)
loader = tf.train.Saver(var_list=vars_to_load)
tf.logging.info("Variables to save: ")
tf.logging.info(vars_to_save)
tf.logging.info("Variables to load: ")
tf.logging.info(vars_to_load)
# Setup session
def init_fn(scaffold, sess):
tf.contrib.summary.initialize(session=sess)
if restoring:
chkpt = (
tf.train.latest_checkpoint(cfg.restore_dir)
if cfg.restore_file is None
else os.path.join(cfg.restore_dir, cfg.restore_file)
)
tf.logging.info("Restoring weights from checkpoint %s" % chkpt)
loader.restore(sess, chkpt)
scaffold = tf.train.Scaffold(init_fn=init_fn)
sess = tf.train.SingularMonitoredSession(scaffold=scaffold)
sess.run(dqn.copy_to_target)
# Initialize ALE
postprocess_frame = lambda frame: sess.run(
dqn.process_frame, feed_dict={dqn.image: frame}
)
env = AtariEnvironment(obs_shape, postprocess_frame)
# Replay buffer
replay_buffer = ExperienceReplay(cfg.replay_buffer_size, obs_shape)
# Perform random policy to get some training data
with tqdm(
total=cfg.seed_frames, disable=cfg.disable_progress or cfg.evaluate
) as pbar:
seed_steps = 0
while seed_steps * cfg.frame_skip < cfg.seed_frames and (
not sess.should_stop() and not cfg.evaluate
):
action = np.random.randint(AtariEnvironment.num_actions)
reward, next_state, terminal = env.act(action)
seed_steps += 1
replay_buffer.append(
next_state[:, :, -1, np.newaxis], action, reward, terminal
)
if terminal:
pbar.update(env.episode_frames)
env.reset(inc_episode_count=False)
if cfg.evaluate:
assert cfg.max_episode_count > 0
else:
assert len(replay_buffer) >= cfg.seed_frames // cfg.frame_skip
# Main training loop
steps = tf.train.global_step(sess, global_step)
env.reset(inc_episode_count=False)
terminal = False
total = cfg.max_episode_count if cfg.evaluate else cfg.num_frames
with tqdm(total=total, disable=cfg.disable_progress) as pbar:
# Loop while we haven't observed our max frame number
# If we are at our max frame number we will finish the current episode
while (
not (
# We must be evaluating or observed the last frame
# As well as be terminal
# As well as seen the maximum episode number
(steps * cfg.frame_skip > cfg.num_frames or cfg.evaluate)
and terminal
and env.episode_count >= cfg.max_episode_count
)
and not sess.should_stop()
):
# Epsilon greedy policy with epsilon annealing
if not cfg.evaluate and steps * cfg.frame_skip < cfg.eps_anneal_over:
# Only compute epsilon step while we're still annealing epsilon
epsilon = cfg.eps_initial - steps * (
(cfg.eps_initial - cfg.eps_final) / cfg.eps_anneal_over
)
else:
epsilon = cfg.eps_final
# Epsilon greedy policy
if np.random.uniform() < epsilon:
action = np.random.randint(0, AtariEnvironment.num_actions)
else:
action = sess.run(dqn.action, feed_dict={dqn.S: [env.state]})
# Perform environment step
steps = sess.run(increment_step)
reward, next_state, terminal = env.act(action)
if not cfg.evaluate:
replay_buffer.append(
next_state[:, :, -1, np.newaxis], action, reward, terminal
)
# Sample and do gradient updates
if steps % cfg.learning_freq == 0:
placeholders = [
dqn.S,
dqn.actions,
dqn.rewards,
dqn.S_p,
dqn.terminals,
]
batch = replay_buffer.sample(cfg.batch_size)
sess.run(
[dqn.train, dqn.summary],
feed_dict=dict(zip(placeholders, batch)),
)
if steps % cfg.target_update_every == 0:
sess.run([dqn.copy_to_target])
if steps % cfg.model_chkpt_every == 0:
saver.save(
sess.raw_session(), "%s/model_epoch_%04d" % (cfg.log_dir, steps)
)
if terminal:
episode_results.write(
"%d,%d,%d\n"
% (env.episode_count, env.episode_reward, env.episode_frames)
)
episode_results.flush()
# Log episode summaries to Tensorboard
sess.run(
episode_summary,
feed_dict={
reward_ph: env.episode_reward,
step_ph: env.episode_frames // cfg.frame_skip,
episode_ph: env.episode_count,
},
)
pbar.update(env.episode_frames if not cfg.evaluate else 1)
env.reset()
episode_results.close()
tf.logging.info(
"Finished %d %s"
% (
cfg.max_episode_count if cfg.evaluate else cfg.num_frames,
"episodes" if cfg.evaluate else "frames",
)
)
if __name__ == "__main__":
tf.app.run()
```
#### File: dqn-ale/tests/test_experience_replay.py
```python
import numpy as np
import pytest
from experience_replay import ExperienceReplay
@pytest.mark.incremental
class TestExperienceReplay:
def test_observation_construction(self):
""" Tests observation construction from partial observations """
obs_shape = (84, 84, 1)
er = ExperienceReplay(5, obs_shape)
obs_ = []
obs_next_ = []
for i in range(1, 6):
partial_obs = np.ones(obs_shape) * i
if i < 5:
obs_.append(partial_obs)
if i > 1:
obs_next_.append(partial_obs)
er.append(partial_obs, 0, 0, 0)
obs_ = np.transpose(np.array(obs_), (3, 1, 2, 0))
obs_next_ = np.transpose(np.array(obs_next_), (3, 1, 2, 0))
batch = er.sample(1)
obs, rewards, actions, obs_next, terminals = batch
assert np.array_equal(obs_, obs)
assert np.array_equal(obs_next_, obs_next)
def test_observation_zeroing(self):
""" Tests zeroing out of frames not from current episode """
obs_shape = (84, 84, 1)
er = ExperienceReplay(5, obs_shape)
for terminal_idx in range(5):
obs_ = []
obs_next_ = []
for i in range(1, 6):
partial_obs = np.ones(obs_shape) * i
terminal = 1 if i == terminal_idx else 0
er.append(partial_obs, 0, 0, terminal)
if i <= terminal_idx:
partial_obs *= 0
if i < 5:
obs_.append(partial_obs)
if i > 1:
obs_next_.append(partial_obs)
obs_ = np.transpose(np.array(obs_), (3, 1, 2, 0))
obs_next_ = np.transpose(np.array(obs_next_), (3, 1, 2, 0))
batch = er.sample(1)
obs, rewards, actions, obs_next, terminals = batch
assert np.array_equal(obs_, obs)
assert np.array_equal(obs_next_, obs_next)
def test_sampling(self):
""" Tests observation construction from partial observations """
obs_shape = (84, 84, 1)
er = ExperienceReplay(5, obs_shape)
for i in range(1, 6):
partial_obs = np.ones(obs_shape) * i
er.append(partial_obs, 1, 1, 0)
batch = er.sample(1)
_, rewards, actions, _, terminals = batch
assert np.array_equal(rewards, np.array([1]))
assert np.array_equal(actions, np.array([1]))
assert np.array_equal(terminals, np.array([0]))
```
|
{
"source": "JesseFarebro/PixelCNNPP",
"score": 2
}
|
#### File: JesseFarebro/PixelCNNPP/train.py
```python
import tensorflow as tf
import gin
import logging
from tqdm import trange, tqdm
from models.PixelCNNPP import PixelCNNPP
from utils.losses import logistic_mixture_loss
@gin.configurable
def train(
strategy,
log_dir,
dataset_fn,
model_cls=PixelCNNPP,
optimizer_cls=tf.keras.optimizers.Adam,
learning_rate=0.0002,
learning_rate_decay=0.999995,
batch_size=64,
max_epoch=5000,
chkpt_to_keep=5,
images_to_log=16,
log_images_every=50,
debug=False,
**kwargs
):
logging.info("Running with %d replicas" % strategy.num_replicas_in_sync)
global_batch_size = batch_size * strategy.num_replicas_in_sync
train_dataset, eval_dataset = dataset_fn(global_batch_size)
train_len = tf.data.experimental.cardinality(train_dataset)
eval_len = tf.data.experimental.cardinality(eval_dataset)
train_iterator = strategy.experimental_distribute_dataset(train_dataset)
eval_iterator = strategy.experimental_distribute_dataset(eval_dataset)
structure = tf.data.experimental.get_structure(train_iterator)
_, width, height, channels = structure.shape.as_list()
inputs_shape = tf.TensorShape([None, width, height, channels])
learning_rate_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
learning_rate, max_epoch, learning_rate_decay
)
with strategy.scope():
model = model_cls(inputs_shape)
model.build(inputs_shape)
optimizer = optimizer_cls(learning_rate_schedule)
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
manager = tf.train.CheckpointManager(checkpoint, log_dir, chkpt_to_keep, 1)
restore_status = checkpoint.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
logging.info("Resuming from %s" % manager.latest_checkpoint)
restore_status.assert_existing_objects_matched()
with strategy.scope():
@tf.function
def train_step(batch):
def step_fn(inputs):
with tf.GradientTape() as tape:
mixture = model(inputs, training=True)
loss = logistic_mixture_loss(
inputs, mixture, num_mixtures=model.num_mixtures
)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_loss = strategy.experimental_run_v2(step_fn, (batch,))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_loss, axis=None
)
@tf.function
def eval_step(batch):
def step_fn(inputs):
mixture = model(inputs, training=False)
loss = logistic_mixture_loss(
inputs, mixture, num_mixtures=model.num_mixtures
)
return loss
per_replica_loss = strategy.experimental_run_v2(step_fn, (batch,))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_loss, axis=None
)
bpd = lambda loss: loss / (
global_batch_size * tf.math.log(2.0) * width * height * channels
)
train_loss = tf.keras.metrics.Mean("train_loss")
train_bpd = tf.keras.metrics.Mean("train_bpd")
eval_loss = tf.keras.metrics.Mean("eval_loss")
eval_bpd = tf.keras.metrics.Mean("eval_bpd")
for epoch in trange(1, max_epoch + 1, initial=1):
train_loss.reset_states()
train_bpd.reset_states()
for batch in tqdm(
train_iterator,
total=train_len.numpy() if train_len > 0 else None,
desc="train",
unit="images",
unit_scale=global_batch_size,
):
aggregate_loss = train_step(batch)
train_loss.update_state(aggregate_loss)
train_bpd.update_state(bpd(aggregate_loss))
eval_loss.reset_states()
eval_bpd.reset_states()
for batch in tqdm(
eval_iterator,
total=eval_len.numpy() if eval_len > 0 else None,
desc="eval",
unit="images",
unit_scale=global_batch_size,
):
aggregate_loss = eval_step(batch)
eval_loss.update_state(aggregate_loss)
eval_bpd.update_state(bpd(aggregate_loss))
tf.summary.scalar(
"train/NegativeLogLikelihood", train_loss.result(), step=epoch
)
tf.summary.scalar("train/BitsPerDimension", train_bpd.result(), step=epoch)
tf.summary.scalar("eval/NegaitveLogLikelihood", eval_loss.result(), step=epoch)
tf.summary.scalar("eval/BitsPerDimension", eval_bpd.result(), step=epoch)
if epoch % log_images_every == 0:
samples = model.sample(images_to_log)
samples = tf.cast((samples + 1.0) * 127.5, tf.uint8)
tf.summary.image("samples", samples, step=epoch, max_outputs=images_to_log)
manager.save(epoch)
```
|
{
"source": "jessefogarty/googlesearch",
"score": 3
}
|
#### File: googlesearch/googlesearch/__init__.py
```python
from bs4 import BeautifulSoup
from requests import get
from random import randint
from typing import Any, Generator
useragents = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/13.2b11866 Mobile/16A366 Safari/605.1.15",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
"User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0",
]
def search(
term: str, num_results: int = 10, lang: str = "en", proxy: Any = None
) -> list:
usr_agent = {"User-Agent": useragents[randint(0, len(useragents))]}
def fetch_results(search_term: str, number_results: int, language_code: str) -> str:
"""Search term GET request to Google.
args:
Any: supplied in outer function
returns:
str: SERP html source in
"""
escaped_search_term = search_term.replace(" ", "+")
google_url = "https://www.google.com/search?q={}&num={}&hl={}".format(
escaped_search_term, number_results + 1, language_code
)
proxies = None
if proxy:
if proxy[:5] == "https":
proxies = {"https": proxy}
else:
proxies = {"http": proxy}
response = get(google_url, headers=usr_agent, proxies=proxies)
response.raise_for_status()
return response.text
def parse_results(raw_html: str) -> Generator[str, None, None]:
"""Finds and returns ahref links to pages from a Google SERP html source."""
soup = BeautifulSoup(raw_html, "html.parser")
result_block = soup.find_all("div", attrs={"class": "g"})
for result in result_block:
link = result.find("a", href=True)
title = result.find("h3")
if link and title:
yield link["href"]
html = fetch_results(term, num_results, lang)
return list(parse_results(html))
```
|
{
"source": "JesseFriedman01/linear-regression-real-estate-values",
"score": 3
}
|
#### File: JesseFriedman01/linear-regression-real-estate-values/LR_Home_values.py
```python
import quandl
import numpy as np
from sklearn.linear_model import LinearRegression
from datetime import datetime,date
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import matplotlib.ticker as mtick
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
########################################################################
# chartstringLR(dateio, zip_code)
# Purpose: use linear regression based on historical real estate values
# in zip_code to predict future value on date_io, plot
# values/data on graph
# Pre-conditions: dateio is a string in yyyy-m-d format, zip_code is a
# valid 5 digit zip code in which historical data exists
# Post-conditions: None
########################################################################
def chartstringLR(fut_date, zip_code):
# https://www.quandl.com/data/ZILLOW-Zillow-Real-Estate-Research
# Zillow Home Value Index - All Homes - zip_code
location_str = "ZILLOW/z%s_ZHVIAH" % ( str(zip_code) )
df = quandl.get(location_str)
dateraw = df.index
datetoordinal = []
# convert dates in dateraw to list of toordinals because they're easier to work with in this format
for i in range(len(dateraw)):
strdate = dateraw[i].strftime("%m%d%y")
datetoordinal.append(datetime.strptime(strdate, '%m%d%y').toordinal())
X = np.array(datetoordinal).reshape(-1, 1)
y = np.array(df['Value']).reshape(-1, 1)
# produces y-coordinates for regression line via y = mx + b
def line_coords(slope, y_int, dates):
y_coor = []
for date in dates:
y_coor.append((float(slope[0][0]) * date) + float(y_int[0]))
return (y_coor)
X_fut = datetoordinal
# extend "future prediction line"
for i in range(100):
X_fut.append(X_fut[-1] + i)
# toordinal dates back to regular date format
X_fut_regular_dates = []
for i in range(len(X_fut)):
X_fut_regular_dates.append(date.fromordinal(X_fut[i]))
# 20% of sample reserved for testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
lr = LinearRegression().fit(X_train, y_train)
a = line_coords(lr.coef_, lr.intercept_, X_fut)
#confidence=(lr.score(X_test, y_test)) # commented out but can be used as needed
pred_date = datetime.strptime(fut_date, '%Y-%m-%d').date()
pred_date = pred_date.toordinal()
pred = (lr.predict(pred_date))
pred_lr = lr.predict(X)
fig, ax = plt.subplots()
fmt = '${x:,.0f}'
tick = mtick.StrMethodFormatter(fmt)
ax.yaxis.set_major_formatter(tick)
for spine in plt.gca().spines.values():
spine.set_alpha(.1)
ax.plot(df.index, y)
ax.plot(df.index, pred_lr, '--')
ax.plot(X_fut_regular_dates, a, '--', color='g', alpha=.25)
ax.scatter(pred_date, pred, color='g', s=100)
# percent change of predicted value versus today's (linear regression) value
pct_change = (pred[0][0] - int(y[-1])) / int(y[-1]) * 100
annotate_string = "Prediction Data:\n\nDate: %s \nEst. Value: %s \nPct. Change: %s%%" % (
fut_date, '${:,.2f}'.format(pred[0][0]), round(pct_change, 2))
# rectangle with data on bottom right of chart
at = AnchoredText(annotate_string,
prop=dict(size=10, color='darkgreen'), frameon=True,
loc=4
)
at.patch.set_boxstyle("round")
at.patch.set_fc('whitesmoke')
at.patch.set_alpha(.25)
ax.add_artist(at)
plt.xlabel('Date')
plt.ylabel('Value')
plt.legend([ str(zip_code) + ' Value Index', 'Regression Line', 'Future Prediction Line'])
plt.show()
nyc = chartstringLR( '2030-1-1', 10012 )
```
|
{
"source": "JesseFriedman01/NewsAPI",
"score": 3
}
|
#### File: JesseFriedman01/NewsAPI/NewsAPI_demo.py
```python
from newsapi import NewsApiClient
from datetime import datetime
import config
def company_data (company_name, days_back):
newsapi = NewsApiClient(api_key=config.api_key)
source = "abc-news,ars-technica,associated-press,bbc-news,bloomberg,breitbart-news,business-insider,buzzfeed,cnbc,cnn,engadget," \
"espn,financial-post,financial-times,fortune,google-news,mashable,msnbc,national-geographic,nbc-news,new-york-magazine," \
"politico,reuters,techcrunch,techradar,the-economist,the-huffington-post,the-new-york-times,the-wall-street-journal," \
"the-washington-post,time,usa-today,wired"
# today's date
now_date = datetime.now().date()
# convert date to ordinal and subtract # of days to look back for article
prior_toord = datetime.now().date().toordinal() - days_back
# convert look back date from ordinal to normal date format
prior_date = datetime.fromordinal(prior_toord).date()
# there are other sort_by choices though relevancy has had the best results
top_headlines = newsapi.get_everything(q=company_name, sources=source, from_parameter=str(prior_date), to=str(now_date), language='en', sort_by='relevancy')
headline_counter = 0
news_list = []
if top_headlines['totalResults'] > 0:
for i in range(0, top_headlines['totalResults'] ):
news_list.append([
top_headlines['articles'][i]['url'],
top_headlines['articles'][i]['title'],
top_headlines['articles'][i]['source']['name'],
# just the date, not the time
top_headlines['articles'][i]['publishedAt'][0:10],
])
# although not in the API documentation, a free plan is limited to 20 articles per request
if headline_counter >= 19:
break
headline_counter+=1
else:
news_list.append(['none','none'])
return news_list
print ( company_data('microsoft', 5) )
```
|
{
"source": "jessefriend/flickrdrivepipeline",
"score": 3
}
|
#### File: jessefriend/flickrdrivepipeline/flickr.py
```python
import flickrapi
import webbrowser
import xml.etree.ElementTree as ET
import re
#SIZES ensures the photos we are getting from Flickr are not too large
SIZES = ["url_o", "url_k", "url_h", "url_l", "url_c"]
api_key = "API KEY"
api_secret = "API SECRET"
def get_photos(image_tag):
extras = ','.join(SIZES)
flickr = flickrapi.FlickrAPI(api_key, api_secret)
# Only do this if we don't have a valid token already
# if not flickr.token_valid(perms=u'read'):
#
# # Get a request token
# flickr.get_request_token(oauth_callback='oob')
#
# # Open a browser at the authentication URL. Do this however
# # you want, as long as the user visits that URL.
# authorize_url = flickr.auth_url(perms=u'read')
# webbrowser.open_new_tab(authorize_url)
#
# # Get the verifier code from the user. Do this however you
# # want, as long as the user gives the application the code.
# verifier = str(input('Verifier code: 966-106-432 '))
#
# # Trade the request token for an access token
# flickr.get_access_token(verifier)
#photos walks through Flickr based on the arguments and returns the photos as data objects
photos = flickr.walk(text=image_tag, # it will search by image title and image tags
extras=extras, # get the urls for each size we want, and only in Vienna
privacy_filter=1, # search only for public photos
per_page=50,
has_geo=1,
bbox = '16.195221,48.122134,16.578369,48.320795',
place_type_id = '11',
min_taken_date='2016-01-01',
max_taken_date='2016-12-31',
sort='relevance') # we want what we are looking for to appear first
return photos
def get_photo_info(photo):
info =['','']
for i in range(len(SIZES)):
# makes sure the loop is done in the order we want
info[0] = photo.get(SIZES[i])
info[1] = photo.get('id')
if info[0]: # if url is None try with the next size
return info
def get_info(photos):
#can add a counter and max to limit the amount of photos downlaoded, uncomment commented code in below section
# counter=0
# max= 10
urls=[]
ids=[]
#look through photo objects and append urls, and important info to arrays
for photo in photos:
# if counter < max:
info = get_photo_info(photo) # get preffered size url
if info[0]:
urls.append(info[0])
# counter += 1
# if no url for the desired sizes then try with the next photo
if info[1]:
ids.append(info[1])
else:
break
information=[urls, ids]
return information
def get_all_info(info):
flickr = flickrapi.FlickrAPI(api_key, api_secret)
total_info = []
for photoID in info[1]:
#gather info for each photo
full_info = flickr.photos.getInfo(api_key=api_key,photo_id=photoID)
#full_info returns as an unorganized xml, so it is converted to a string, then regex applied below for information parsing
full_data = ET.tostring(full_info).decode("utf-8")
#regex for attributes
lat_find = re.compile(r'(?<=latitude=)\S+')
long_find = re.compile(r'(?<=longitude=)\S+')
desc_find = re.compile(r'(?<=description>)\S+')
title_find = re.compile(r'(?<=title>)\S+')
taken_find = re.compile(r'(?<=taken=)\S+')
realname_find = re.compile(r'(?<=realname=)\S+')
username_find = re.compile(r'(?<=username=)\S+')
#cleaning data
lat = [x.strip('"') for x in re.findall(lat_find, full_data)][0]
long = [x.strip('">') for x in re.findall(long_find, full_data)][0]
desc = [x.strip('>') for x in re.findall(desc_find, full_data)][0]
title = [x.strip('>') for x in re.findall(title_find, full_data)][0]
taken = [x.strip('"') for x in re.findall(taken_find, full_data)][0]
realname = [x.strip('"') for x in re.findall(realname_find, full_data)][0]
username = [x.strip('"') for x in re.findall(username_find, full_data)][0]
#storing data to return attribute array
photo_info=[]
photo_info.append(lat)
photo_info.append(long)
photo_info.append(photoID)
photo_info.append(desc)
photo_info.append(title)
photo_info.append(taken)
photo_info.append(realname)
photo_info.append(username)
total_info.append(photo_info)
return total_info
```
|
{
"source": "jessefriend/python_web_scraper",
"score": 2
}
|
#### File: jessefriend/python_web_scraper/production_roads.py
```python
import os
import logging
import requests
import urllib
from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.exceptions import AirflowFailException, AirflowSkipException
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": airflow.utils.dates.days_ago(7),
# "email": ["admin email on failure and restart"],
# "email_on_failure": False,
# "email_on_retry": False,
"retries": 3,
"retry_delay": timedelta(minutes=1),
"depends_on_past": False,
"catchup_by_default": False,
# 'queue': 'building_detection_queue',
# 'pool': 'backfill',
#'priority_weight': 3,
# 'end_date': datetime(2016, 1, 1),
# 'dag': dag,
# 'sla': timedelta(hours=2),
#'execution_timeout': timedelta(seconds=10800),
# 'on_failure_callback': some_function,
# 'on_success_callback': some_other_function,
# 'on_retry_callback': another_function,
# 'trigger_rule': u'all_success'
}
def update_production_runs_request(**kwargs):
"""
Request to insert production-run entry into db table production_runs.
"""
dag_run = kwargs["dag_run"]
dag_run_conf = dag_run.conf
dag_run_id = dag_run.run_id
user_id = dag_run_conf["user_id"]
object_type = 'roads'
model_id = dag_run_conf["model_id"]
raster_id = dag_run_conf["raster_id"]
raster_path = dag_run_conf["src_img_name"]
raster_bucket = dag_run_conf["src_bucket_name"]
start_timestamp = dag_run_conf["start_timestamp"]
# save production run in db table
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/add"
if raster_id is None:
req_params = urllib.parse.urlencode(
{
"user_id": user_id,
"object_type": object_type,
"model_id": model_id,
"raster_path": raster_path,
"raster_bucket": raster_bucket,
"start_timestamp": start_timestamp,
"dag_run_id": dag_run_id,
"status": 'running'
}
)
else:
req_params = urllib.parse.urlencode(
{
"user_id": user_id,
"object_type": object_type,
"model_id": model_id,
"raster_id": raster_id,
"raster_path": raster_path,
"raster_bucket": raster_bucket,
"start_timestamp": start_timestamp,
"dag_run_id": dag_run_id,
"status": 'running'
}
)
req_url = f"{req_url}?{req_params}"
logging.info(req_url)
response = requests.post(req_url)
if response.status_code != 200:
logging.error(
f'Inserting production run entry to db table failed. Tile : {dag_run_conf["src_img_name"]}, Status code : {response.status_code}.'
)
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'failed'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
response.raise_for_status()
def data_ingestion_request(**kwargs):
"""
Request for calling the data ingestion and getting all parameters
for the following services
"""
dag_run = kwargs["dag_run"]
dag_run_conf = dag_run.conf
service_url = os.environ["DATAINGESTION_URL"]
req_url = f"http://{service_url}/fetch_tile_chunks"
req_params = urllib.parse.urlencode(
{
"src_bucket_name": dag_run_conf["src_bucket_name"],
"src_img_name": dag_run_conf["src_img_name"],
"pu_size": dag_run_conf["pu_size"],
"model_id": dag_run_conf["model_id"],
}
)
req_url = f"{req_url}?{req_params}"
resp = requests.post(req_url)
ti = kwargs["ti"]
if resp.status_code == 200:
dict_list = resp.json()
ti.xcom_push(key="model_id", value=dag_run_conf["model_id"])
ti.xcom_push(key="tile_list", value=dict_list)
else:
error_msg = resp.json()["message"]
logging.error(
f'Data ingestion failed. Tile : {dag_run_conf["src_img_name"]}, Status code : {resp.status_code}, Message : {error_msg}'
)
# update db entry
dag_run_id = dag_run.run_id
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'failed'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
resp.raise_for_status()
def apply_model_request(**kwargs):
"""Request for calling the apply model service"""
ti = kwargs["ti"]
dag_run = kwargs["dag_run"]
dag_run_conf = dag_run.conf
src_img_name = dag_run_conf["src_img_name"]
db_tile_basename_list = ti.xcom_pull(task_ids="data_ingestion", key="tile_list")
bucket = dag_run_conf["src_bucket_name"]
model_id = ti.xcom_pull(task_ids="data_ingestion", key="model_id")
service_url = os.environ["APPLYMODEL_URL"]
base_url = f"http://{service_url}/applyModel/predictByModelId"
logging.info(f"Processing tile : {db_tile_basename_list[0]}")
req_params = urllib.parse.urlencode(
{
"src_bucket_name": bucket,
"src_img_name": src_img_name,
"db_tile_basename": db_tile_basename_list[0]["tile_name"],
"model_id": model_id,
"dst_bucket": "predictions",
}
)
req_url = f"{base_url}?{req_params}"
try:
resp = requests.post(req_url, timeout=30*60)
out = resp.json()
status_code = resp.status_code
except Exception as e:
status_code = 400
out = {}
out["type"] = str(type(e))
out["message"] = str(e)
logging.info(f"out: {out}")
if status_code == 200:
logging.info(
f"Success. Tile : {db_tile_basename_list[0]}, Status code : {status_code}"
)
ti.xcom_push(key="img_bucket", value=out["bucket"])
ti.xcom_push(key="img_path", value=out["path"])
ti.xcom_push(
key="db_tile_basename", value=db_tile_basename_list[0]["tile_name"]
)
else:
error_type = out["type"]
error_msg = out["message"]
logging.warn(
f"Prediction failed. Tile : {db_tile_basename_list[0]}, Status code : {status_code}, Error : {error_type}, \n Message : {error_msg}"
)
# update db entry
dag_run_id = dag_run.run_id
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'failed'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
raise AirflowFailException(f"Prediction failed. Tile : {db_tile_basename_list[0]}, Status code : {status_code}, Error : {error_type}, \n Message : {error_msg}"
)
def vectorization_request(**kwargs):
"""Request for calling road-detector"""
ti = kwargs["ti"]
dag_run = kwargs["dag_run"]
dag_run_conf = dag_run.conf
pred_bucket = ti.xcom_pull(task_ids="apply_model", key="img_bucket")
pred_img_path = ti.xcom_pull(task_ids="apply_model", key="img_path")
db_tile_basename = ti.xcom_pull(task_ids="apply_model", key="db_tile_basename")
logging.info(f"Vectorizing tile : {db_tile_basename}")
service_url = os.environ["DETECTORROADS_URL"]
base_url = f"http://{service_url}/detect_roads"
req_params = urllib.parse.urlencode(
{
"img_bucket": pred_bucket,
"img_path": pred_img_path,
"db_tile_basename": db_tile_basename,
}
)
req_url = f"{base_url}?{req_params}"
logging.info(f"req_url tile : {req_url}")
resp = requests.post(req_url)
if resp.status_code == 200:
logging.info(
f"Vectorization successful. PU basename : {db_tile_basename}, Status code : {resp.status_code}"
)
ti.xcom_push(key="db_tile_basename", value=db_tile_basename)
ti.xcom_push(key="roads_detected", value=True)
# delete prediction from s3
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/s3Service/deleteFile"
req_params = urllib.parse.urlencode(
{"bucket_name": pred_bucket,
"s3_path": pred_img_path}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
resp.raise_for_status()
elif resp.status_code == 204:
logging.info(
f"Vectorization successful, but no buildings were found.. PU basename : {db_tile_basename}, Status code : {resp.status_code}"
)
ti.xcom_push(key="db_tile_basename", value=db_tile_basename)
ti.xcom_push(key="roads_detected", value=False)
# update db entry to success
dag_run_id = dag_run.run_id
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'success'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
# delete prediction from s3
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/s3Service/deleteFile"
req_params = urllib.parse.urlencode(
{"bucket_name": pred_bucket,
"s3_path": pred_img_path}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
resp.raise_for_status()
else:
out = resp.json()
error_type = out["type"]
error_msg = out["message"]
logging.warn(
f"Vectorization failed. PU basename : {db_tile_basename}, Status code : {resp.status_code}, Error : {error_type}, \n Message : {error_msg}"
)
# update db entry
dag_run_id = dag_run.run_id
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'failed'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
# delete prediction from s3
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/s3Service/deleteFile"
req_params = urllib.parse.urlencode(
{"bucket_name": pred_bucket,
"s3_path": pred_img_path}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
resp.raise_for_status()
def stitching_request(**kwargs):
"""Request for calling road stitching service"""
ti = kwargs["ti"]
dag_run = kwargs["dag_run"]
roads_detected = ti.xcom_pull(task_ids="road_vectorization", key="roads_detected")
if roads_detected is False:
raise AirflowSkipException
db_tile_basename = ti.xcom_pull(
task_ids="road_vectorization", key="db_tile_basename"
)
service_url = os.environ["ROADSTITCHING_URL"]
base_url = f"http://{service_url}/stitch_roads"
req_params = urllib.parse.urlencode(
{
"db_tile_basename": db_tile_basename,
}
)
req_url = f"{base_url}?{req_params}"
logging.info(f"req_url tile : {req_url}")
resp = requests.post(req_url)
out = resp.json()
logging.info(out)
if resp.status_code == 200:
logging.info(
f"Stitching successful. PU basename : {db_tile_basename}, Status code : {resp.status_code}"
)
# update db entry --> success
dag_run_id = dag_run.run_id
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'success'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
else:
error_type = out["type"]
error_msg = out["message"]
logging.warn(
f"Stitching failed. PU basename : {db_tile_basename}, Status code : {resp.status_code}, Error : {error_type}, \n Message : {error_msg}"
)
# update db entry --> failed
dag_run_id = dag_run.run_id
service_url = os.environ["DATAMANAGEMENT_URL"]
req_url = f"http://{service_url}/productionRuns/updateStatus"
req_params = urllib.parse.urlencode(
{"dag_run_id": dag_run_id, "status": 'failed'}
)
req_url = f"{req_url}?{req_params}"
requests.post(req_url)
resp.raise_for_status()
##########################
##########################
##########################
dag = airflow.DAG(
"production_roads",
default_args=default_args,
schedule_interval=None,
)
db_update_task = PythonOperator(
task_id="update_production_runs",
python_callable=update_production_runs_request,
weight_rule="upstream",
provide_context=True,
task_concurrency=15,
dag=dag,
)
ingestion_task = PythonOperator(
task_id="data_ingestion",
python_callable=data_ingestion_request,
weight_rule="upstream",
provide_context=True,
task_concurrency=15,
retries=0,
dag=dag,
)
apply_model_task = PythonOperator(
task_id="apply_model",
python_callable=apply_model_request,
weight_rule="upstream",
provide_context=True,
task_concurrency=1,
dag=dag,
)
vectorization_task = PythonOperator(
task_id="road_vectorization",
python_callable=vectorization_request,
weight_rule="upstream",
provide_context=True,
task_concurrency=2,
dag=dag,
)
stitching_task = PythonOperator(
task_id="road_stitching",
python_callable=stitching_request,
weight_rule="upstream",
provide_context=True,
task_concurrency=1,
dag=dag,
)
db_update_task >> ingestion_task >> apply_model_task >> vectorization_task >> stitching_task
```
|
{
"source": "jessegeerts/neural-nets",
"score": 3
}
|
#### File: neural-nets/bayesnets/split_bayes_proper.py
```python
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from edward.models import Categorical, Normal
import edward as ed
import pandas as pd
from tqdm import tqdm
from mnist_loader import TrainingSet, TestSet
# Prepare Datasets
training_data_list = []
test_data_list = []
for task in range(0, 10, 2):
train = TrainingSet(one_hot=False)
test = TestSet(one_hot=False)
train.take_subset([task,task+1])
test.take_subset([task,task+1])
train.labels = train.labels - task
test.labels = test.labels - task
training_data_list.append(train)
test_data_list.append(test)
##### SETTING UP THE NEURAL NETWORK ######
ed.set_seed(314159)
N = 100 # number of images in a minibatch.
D = 784 # number of features.
K = 10 # number of classes.
n_heads = 5
head_size = int(K/n_heads)
# define the feedforward neural network function
def neural_network(x, W_0, W_1, b_0, b_1):
h = tf.tanh(tf.matmul(x, W_0) + b_0)
h = tf.matmul(h, W_1) + b_1
return h
def run_training_cycle(inference,batch_size,training_data,x,y_ph):
for _ in range(inference.n_iter):
X_batch, Y_batch = training_data.next_batch(batch_size,shuffle=False)
info_dict = inference.update(feed_dict={x: X_batch, y_ph: Y_batch})
inference.print_progress(info_dict)
def take_posterior_samples(n_samples,X_test,testhead,qW_0,qb_0,qW_1,qb_1):
prob_lst = []
samples = []
w_0_samples = []
b_0_samples = []
w_1_samples = []
b_1_samples = []
for _ in tqdm(range(n_samples)):
w0_samp = qW_0.sample()
b0_samp = qb_0.sample()
w1_samp = qW_1[testhead].sample()
b1_samp = qb_1[testhead].sample()
w_0_samples.append(w0_samp)
b_0_samples.append(b0_samp)
w_1_samples.append(w1_samp)
b_1_samples.append(b1_samp)
# Also compute the probabiliy of each class for each sample.
prob = tf.nn.softmax(neural_network(X_test, w0_samp, w1_samp, b0_samp, b1_samp))
prob_lst.append(prob.eval())
sample = tf.concat([tf.reshape(w1_samp, [-1]), b1_samp], 0)
samples.append(sample.eval())
return prob_lst, samples
# Create a placeholder to hold the data (in minibatches) in a TensorFlow graph.
x = tf.placeholder(tf.float32, [N, D])
# Normal(0,1) priors for the variables. Note that the syntax assumes TensorFlow 1.1.
W_0 = Normal(loc=tf.zeros([D, 20]), scale=tf.ones([D, 20]))
b_0 = Normal(loc=tf.zeros(20), scale=tf.ones(20))
W_1 = []
b_1 = []
for head in range(5):
W_1.append(Normal(loc=tf.zeros([20, head_size]), scale=tf.ones([20, head_size])))
b_1.append(Normal(loc=tf.zeros(head_size), scale=tf.ones(head_size)))
# Categorical likelihood for classication.
y=[]
for head in range(5):
y.append(Categorical(neural_network(x, W_0, W_1[head], b_0, b_1[head])))
# Contruct the q(w) and q(b). in this case we assume Normal distributions.
qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 20])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, 20]))))
qb_0 = Normal(loc=tf.Variable(tf.random_normal([20])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([20]))))
qW_1 = []
qb_1 = []
for head in range(5):
qW_1.append(Normal(loc=tf.Variable(tf.random_normal([20, head_size])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([head_size])))))
qb_1.append(Normal(loc=tf.Variable(tf.random_normal([head_size])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([head_size])))))
# We use a placeholder for the labels in anticipation of the training data.
y_ph = tf.placeholder(tf.int32, [N])
head = 0
accuracies = []
for head in range(5):
training_data = training_data_list[head]
# Define the VI inference technique, ie. minimise the KL divergence between q and p.
inference = ed.KLqp({W_0: qW_0, W_1[head]: qW_1[head],
b_0: qb_0, b_1[head]: qb_1[head]}, data={y[head]:y_ph})
# Initialise the inference variables
inference.initialize(n_iter=2000, n_print=100, scale={y[head]: float(training_data._num_examples) / N})
# We will use an interactive session.
sess = tf.InteractiveSession()
# Initialise all the vairables in the session.
tf.global_variables_initializer().run()
# Let the training begin. We load the data in minibatches and update the VI infernce using each new batch.
run_training_cycle(inference, N, training_data, x, y_ph)
for testhead in range(5):
test_data = test_data_list[testhead]
X_test = test_data.images
Y_test = test_data.labels
# Generate samples from the posterior and store them.
prob_lst, samples = take_posterior_samples(10, X_test, testhead, qW_0, qb_0, qW_1, qb_1)
# Compute the accuracy of the model.
accy_test = []
for prob in prob_lst:
y_trn_prd = np.argmax(prob,axis=1).astype(np.float32)
acc = (y_trn_prd == Y_test).mean()*100
accy_test.append(acc)
"""
plt.hist(accy_test)
plt.title("Histogram of prediction accuracies in the MNIST test data")
plt.xlabel("Accuracy")
plt.ylabel("Frequency")
"""
# Here we compute the mean of probabilties for each class for all the (w,b) samples.
Y_pred = np.argmax(np.mean(prob_lst,axis=0),axis=1)
accuracy = (Y_pred == Y_test).mean()*100
print("accuracy in predicting the test data = ", accuracy)
accuracies.append(accuracy)
plt.figure()
# Create a Pandas DataFrame of posterior samples.
samples_df = pd.DataFrame(data = samples, index=range(10))
# Now create a small subset by taking the first 5 weights, labelled as W_0, ... , W_4.
samples_5 = pd.DataFrame(data = samples_df[list(range(5))].values,columns=["W_0", "W_1", "W_2", "W_3", "W_4"])
# We use Seaborn PairGrid to make a triale plot to show auto and cross correlations.
g = sns.PairGrid(samples_5, diag_sharey=False)
g.map_lower(sns.kdeplot, n_levels = 4,cmap="Blues_d")
g.map_upper(plt.scatter)
g.map_diag(sns.kdeplot,legend=False)
plt.subplots_adjust(top=0.95)
g.fig.suptitle('Joint posterior distribution of the first 5 weights')
# Load the first image from the test data and its label.
test_image = X_test[1:2]
test_label = Y_test[1]
print('truth = ',test_label)
pixels = test_image.reshape((28, 28))
plt.figure()
plt.imshow(pixels,cmap='Blues')
# Now the check what the model perdicts for each (w,b) sample from the posterior. This may take a few seconds...
sing_img_probs = []
n_samples = 100
for _ in tqdm(range(n_samples)):
w0_samp = qW_0.sample()
b0_samp = qb_0.sample()
w1_samp = qW_1[head].sample()
b1_samp = qb_1[head].sample()
# Also compue the probabiliy of each class for each (w,b) sample.
prob = tf.nn.softmax(neural_network(X_test[1:2], w0_samp, w1_samp, b0_samp, b1_samp))
sing_img_probs.append(prob.eval())
# Create a histogram of these predictions.
plt.figure()
plt.hist(np.argmax(sing_img_probs,axis=2),bins=range(10))
plt.xticks(np.arange(0,K))
plt.xlim(0,K)
plt.xlabel("Accuracy of the prediction of the test digit")
plt.ylabel("Frequency")
```
|
{
"source": "Jesse-Goertzen/291-MiniProject-2",
"score": 3
}
|
#### File: Jesse-Goertzen/291-MiniProject-2/Parser.py
```python
import re
class Parser():
string = ""
def __init__(self):
pass
# returns: (operator {str}, date {date})
# Use findall and have operator and date in one group, then do another regex to create a tuple for each date query
def _dateQuery(self):
match = re.findall("date\s*(<|<=|>|>=|=)\s*(\d{4}/\d{2}/\d{2})", self.string)
self.string = re.sub("date\s*(<|<=|>|>=|=)\s*\d{4}/\d{2}/\d{2}", "", self.string)
if match:
ret = []
for s in match:
if s[0] == '=':
s = ("==", s[1])
ret.append(' '.join(str(c) for c in s))
return ret
else:
return []
# returns: (operator {str}, price {int})
def _priceQuery(self):
match = re.findall("price\s*(<|<=|>|>=|=)\s*(\d+)", self.string)
self.string = re.sub("price\s*(<|<=|>|>=|=)\s*\d+", "", self.string)
if match:
ret = []
for s in match:
if s[0] == '=':
s = ("==", s[1])
ret.append(' '.join(str(c) for c in s))
return ret
else:
return []
def _locQuery(self):
match = re.findall("location\s*=\s*([0-9a-zA-Z_-]+)", self.string)
self.string = re.sub("location\s*=\s*[0-9a-zA-Z_-]+", "", self.string)
return match if match else []
def _catQuery(self):
match = re.findall("cat\s*=\s*([0-9a-zA-Z_-]+)", self.string)
self.string = re.sub("cat\s*=\s*[0-9a-zA-Z_-]+", "", self.string)
return match if match else []
# Maybe have the other methods return the query with the sub query removed,
# then after all other methods are called the only thing left is a term query?
def _termQuery(self):
match = re.findall("[a-zA-Z0-9_-]+%?", self.string)
self.string = re.sub("[a-zA-Z0-9_-]+%?", "", self.string)
return match if match else []
def parse(self, string):
self.string = string
queries = dict()
queries["date"] = self._dateQuery()
queries["price"] = self._priceQuery()
queries["loc"] = self._locQuery()
queries["cat"] = self._catQuery()
queries["term"] = self._termQuery()
# q = {k:v for k,v in queries.items() if v is not None}
return queries
if __name__ == "__main__":
p = Parser()
qs = [
"camera",
"camera%",
"date <= 2018/11/05",
"date > 2018/11/05",
"price = 20",
"price >= 20",
"location= edmonton date=2018/11/07",
"cat=art-collectibles camera",
"camera date>= 2018/11/05 date<=2018/11/07 price > 20 price < 40"
]
for q in qs:
print(p.parse(q))
```
|
{
"source": "jessegomer/Text-Date-Analyzer",
"score": 3
}
|
#### File: Text-Date-Analyzer/src/scraper.py
```python
import requests
import json
import re
import numpy as np
import shelve
class Corpus(object):
english = 15
class NgramScraper(object):
def __init__(self, n, corpus=Corpus.english, smoothing=0, start_year=1800, end_year=1999, call_limit=10):
self.start_year = start_year
self.end_year = end_year
self.years = [i for i in range(start_year, end_year+1)]
self.range = len(self.years)
self.n = n
self.corpus = corpus
self.smoothing = smoothing
self.call_limit = call_limit
def load_shelf(self):
return shelve.open("../caches/cache_corpus_{}_n_{}".format(self.corpus, self.n))
def process_raw_data(self, ngrams, raw_data):
data = []
for ngram in ngrams:
#since it is a case insenstive search there could be multiple results per ngram
matches = [n for n in raw_data if (n["ngram"].lower() == ngram + " (all)") or
(n["type"] == "NGRAM" and n["ngram"].lower() == ngram)]
if len(matches) > 0:
array = np.float_(matches[0]['timeseries'])
else:
array = np.zeros(self.range, np.float_)
data.append(array)
return data
def make_totals(self):
raw_counts = file("../metadata/{}_counts.txt".format(self.corpus)).read()
totals = []
for year_count in raw_counts.split("\t"):
items = year_count.split(",")
if int(items[0]) in self.years:
totals.append(int(items[1]))
return np.float_(totals)
def call_api(self, ngrams, cache):
params = {"content": ",".join(ngrams),
"case_insensitive": "on",
"year_start": self.start_year,
"year_end": self.end_year,
"corpus": self.corpus,
"smoothing": self.smoothing}
req = requests.get('http://books.google.com/ngrams/graph', params=params)
req.raise_for_status()
result = re.findall('var data = (.*?);\\n', req.text)
if not result:
cache.close()
raise Exception("API response not as expected")
raw_data = json.loads(result[0])
return self.process_raw_data(ngrams, raw_data)
def call_group(self, ngrams, cache):
results = self.call_api(ngrams, cache)
for ngram, result in zip(ngrams, results):
cache[ngram] = result
return results
def get_ngram_counts(self, ngrams):
cache = self.load_shelf()
data = []
to_call = []
for ngram in ngrams:
if cache.has_key(ngram):
data.append(cache[ngram])
else:
to_call.append(ngram)
if len(to_call) == self.call_limit:
data += self.call_group(to_call, cache)
to_call = []
if len(to_call) > 0:
data += self.call_group(to_call, cache)
cache.close()
return data
```
|
{
"source": "jessegonzalez-life360/itsybitsy",
"score": 2
}
|
#### File: itsybitsy/itsybitsy/itsybitsy.py
```python
__version__ = "1.0.1"
import asyncio
import getpass
import logging
import os
import signal
import sys
from contextlib import contextmanager
from termcolor import colored
from typing import Dict
from . import charlotte, charlotte_web, cli_args, constants, crawl, logs, node, plugin_core, providers, renderers
from .plugins import render_json, render_ascii
# python3 check
REQUIRED_PYTHON_VERSION = (3, 8)
def tuple_join(the_tuple):
return '.'.join(str(i) for i in the_tuple)
if sys.version_info[0] < REQUIRED_PYTHON_VERSION[0] or sys.version_info[1] < REQUIRED_PYTHON_VERSION[1]:
print(f"Python version {tuple_join(sys.version_info[:2])} detected. This script requires Python version >= "
f"{tuple_join(REQUIRED_PYTHON_VERSION)} available at `/usr/bin/env python3`")
sys.exit(1)
# catch ctrl-c
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
def main():
print(f"Hello, {getpass.getuser()}", file=sys.stderr)
plugin_core.import_plugin_classes()
_parse_builtin_args()
_set_debug_level()
charlotte.init()
_create_outputs_directory_if_absent()
_cli_command().exec()
print(f"\nGoodbye, {getpass.getuser()}\n", file=sys.stderr)
def _parse_builtin_args():
try:
with _suppress_console_out():
constants.ARGS, _ = cli_args.parse_args(renderers.get_renderer_refs())
except SystemExit:
# this is done in order to display custom plugin level arguments in --help script output
providers.parse_provider_args(cli_args.spider_subparser)
renderers.parse_renderer_args(cli_args.spider_subparser)
renderers.parse_renderer_args(cli_args.render_subparser)
cli_args.argparser.parse_known_args()
@contextmanager
def _suppress_console_out():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = devnull
sys.stderr = devnull
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class Command:
def exec(self):
self._initialize_plugins()
constants.ARGS = cli_args.argparser.parse_args()
tree = self._generate_tree()
_render(tree)
def _initialize_plugins(self):
raise NotImplementedError('Plugin initialization not implemented')
def _generate_tree(self) -> Dict[str, node.Node]:
raise NotImplementedError('Tree generation not implemented')
class RenderCommand(Command):
def _initialize_plugins(self):
_initialize_renderers()
def _generate_tree(self) -> Dict[str, node.Node]:
if not constants.ARGS.output:
constants.ARGS.output = ['ascii']
return render_json.load(constants.ARGS.json_file or constants.LASTRUN_FILE)
class SpiderCommand(Command):
def _initialize_plugins(self):
_initialize_renderers(True)
_initialize_providers()
def _generate_tree(self) -> Dict[str, node.Node]:
tree = asyncio.get_event_loop().run_until_complete(_crawl_water_spout())
render_json.dump(tree, constants.LASTRUN_FILE)
return tree
def _cli_command() -> Command:
if cli_args.command_render == constants.ARGS.command:
return RenderCommand()
elif cli_args.command_spider == constants.ARGS.command:
return SpiderCommand()
else:
print(colored(f"Invalid command: {constants.ARGS.command}. Please file bug with maintainer.", 'red'))
sys.exit(1)
async def _crawl_water_spout():
tree = _parse_seed_tree()
await _crawl_and_render_to_stderr_unless_quiet_is_specified(tree)
return tree
async def _crawl_and_render_to_stderr_unless_quiet_is_specified(tree: Dict[str, node.Node]):
outfile = open(os.devnull, 'w') if constants.ARGS.quiet else sys.stderr
crawl_tasks = [
crawl.crawl(tree, []),
render_ascii.render_tree(tree, [], out=outfile, print_slowly_for_humans=True)
]
await asyncio.gather(*crawl_tasks)
def _parse_seed_tree() -> Dict[str, node.Node]:
return {
f"SEED:{address}":
node.Node(
crawl_strategy=charlotte.SEED_CRAWL_STRATEGY,
protocol=charlotte_web.PROTOCOL_SEED,
protocol_mux='seed',
provider=provider,
containerized=providers.get_provider_by_ref(provider).is_container_platform(),
from_hint=False,
address=address
)
for provider, address in [seed.split(':') for seed in constants.ARGS.seeds]
}
def _initialize_renderers(parse_spider_command_args: bool = False):
renderers.parse_renderer_args(cli_args.render_subparser)
if parse_spider_command_args:
renderers.parse_renderer_args(cli_args.spider_subparser)
constants.ARGS, _ = cli_args.argparser.parse_known_args()
renderers.register_renderers()
def _initialize_providers():
providers.parse_provider_args(cli_args.spider_subparser)
constants.ARGS, _ = cli_args.argparser.parse_known_args()
providers.register_providers()
def _create_outputs_directory_if_absent():
if not os.path.exists(constants.OUTPUTS_DIR):
os.makedirs(constants.OUTPUTS_DIR)
def _render(tree: Dict[str, node.Node]) -> None:
if not constants.ARGS.output:
return
for renderer_ref in constants.ARGS.output:
renderer = renderers.get_renderer_by_ref(renderer_ref)
renderer.render(tree)
def _set_debug_level():
if constants.ARGS.debug:
logs.logger.setLevel(logging.DEBUG)
```
#### File: itsybitsy/itsybitsy/plugin_core.py
```python
from termcolor import colored
from typing import Dict, List, Optional
import configargparse
import importlib
import pkgutil
import re
import sys
import itsybitsy.plugins
def import_plugin_classes():
for _1, name, _2 in pkgutil.iter_modules(itsybitsy.plugins.__path__,
itsybitsy.plugins.__name__ + "."):
importlib.import_module(name)
class PluginArgParser:
def __init__(self, prefix: str, argparser: configargparse.ArgParser):
self._prefix = prefix
self._argparser = argparser
def add_argument(self, option_name: str, **kwargs):
"""
A wrapper method on top of the classic ArgParse::add_argument(). All keyword arguments are supported, however
only a single option_name is allowed, such as '--foo-argument'. Argument registered here will be prepended
with the ProviderInterface() ref in order to avoid namespace collisions between provider plugins. For example
'--foo-argument' registered by a ProviderInterface() with ref() = 'bar' will result in a CLI arg of
'--bar-foo-argument'.
:param option_name: such as '--foo-something'
:param kwargs: pass through kwargs for ArgParse::add_argument, such as "required", "type", "nargs", etc.
:return:
"""
option_name = f"{self._prefix}-{option_name}"
option_name_with_dashes_consoliated = re.sub('-+', '-', option_name)
option_name_with_leading_dashes = f"--{option_name_with_dashes_consoliated}"
self._argparser.add_argument(option_name_with_leading_dashes, **kwargs)
class PluginRefNotImplemented(Exception):
"""Exception thrown if provider has not implemented ref() method"""
class PluginClobberException(Exception):
"""An exception indicating a provider is clobbering the namespace of another plugin"""
class PluginInterface:
@staticmethod
def ref() -> str:
"""
Every plugin is identified by a unique "reference" or "ref" which much be declared by implemented this
public abstract method.
:return: the unique reference or "ref" of the provider.
"""
raise PluginRefNotImplemented
@staticmethod
def register_cli_args(argparser: PluginArgParser):
"""Each plugin has a chance to register custom CLI args which will be prefixed with `self.ref()` """
class PluginFamilyRegistry:
"""Registry for plugins within a plugin Family"""
def __init__(self, cls: PluginInterface, cli_args_prefix: str = ''):
self._cls: PluginInterface = cls
self._cli_args_prefix = cli_args_prefix
self._plugin_registry: Dict[str, PluginInterface] = {}
def parse_plugin_args(self, argparser: configargparse.ArgParser):
"""Plugins are given an opportunity to register custom CLI arguments"""
for plugin in self._cls.__subclasses__():
plugin: PluginInterface
prefix = f'{self._cli_args_prefix}-{plugin.ref()}' if self._cli_args_prefix else plugin.ref()
plugin_argparser = PluginArgParser(prefix, argparser)
plugin.register_cli_args(plugin_argparser)
def register_plugins(self, disabled_classes: Optional[List[str]] = None):
for plugin in [c for c in self._cls.__subclasses__() if c not in (disabled_classes or [])]:
if plugin.ref() in self._plugin_registry:
raise PluginClobberException(f"Provider {plugin.ref()} already registered!")
self._plugin_registry[plugin.ref()] = plugin()
def get_plugin(self, ref: str) -> PluginInterface:
try:
return self._plugin_registry[ref]
except KeyError as e:
print(colored(f"Attempted to load invalid plugin: {ref}", 'red'))
print(e, 'yellow')
sys.exit(1)
def get_registered_plugin_refs(self) -> List[str]:
return list(self._plugin_registry.keys())
```
#### File: itsybitsy/plugins/provider_ssh.py
```python
import asyncio
import asyncssh
import os
import paramiko
import getpass
import sys
from asyncssh import ChannelOpenError, SSHClientConnection
from termcolor import colored
from typing import List, Optional
from itsybitsy import constants, logs
from itsybitsy.providers import ProviderInterface, TimeoutException, parse_crawl_strategy_response
from itsybitsy.plugin_core import PluginArgParser
from itsybitsy.node import NodeTransport
bastion: Optional[SSHClientConnection] = None
connect_timeout = 5
connection_semaphore = None
connection_semaphore_spaces_used = 0
connection_semaphore_spaces_min = 10
ssh_connect_args = {'known_hosts': None}
class ProviderSSH(ProviderInterface):
@staticmethod
def ref() -> str:
return 'ssh'
@staticmethod
def register_cli_args(argparser: PluginArgParser):
argparser.add_argument('--bastion-timeout', type=int, default=10, metavar='TIMEOUT',
help='Timeout in seconds to establish SSH connection to bastion (jump server)')
argparser.add_argument('--concurrency', type=int, default=10, metavar='CONCURRENCY',
help='Max number of concurrent SSH connections')
argparser.add_argument('--config-file', default="~/.ssh/config", metavar='FILE',
help='SSH config file to parse for configuring SSH sessions. '
'As in `ssh -F ~/.ssh/config`)')
argparser.add_argument('--passphrase', action='store_true',
help='Prompt for, and use the specified passphrase to decrype SSH private keys')
argparser.add_argument('--name-command', required=True, metavar='COMMAND',
help='Used by SSH Provider to determine node name')
async def open_connection(self, address: str) -> SSHClientConnection:
if not bastion:
await _configure(address)
logs.logger.debug(f"Getting asyncio SSH connection for host {address}")
async with connection_semaphore:
return await _get_connection(address)
async def lookup_name(self, address: str, connection: SSHClientConnection) -> str:
logs.logger.debug(f"Getting service name for address {address}")
node_name_command = constants.ARGS.ssh_name_command
async with connection_semaphore:
result = await connection.run(node_name_command, check=True)
node_name = result.stdout.strip()
logs.logger.debug(f"Discovered name: {node_name} for address {address}")
return node_name
async def crawl_downstream(self, address: str, connection: SSHClientConnection, **kwargs) -> List[NodeTransport]:
try:
command = kwargs['shell_command']
except IndexError as e:
print(colored(f"Crawl Strategy incorrectly configured for provider SSH. "
f"Expected **kwargs['shell_command']. Got:{str(kwargs)}", 'red'))
raise e
response = await connection.run(command)
if response.stdout.strip().startswith('ERROR:'):
raise Exception('CRAWL ERROR: ' + response.stdout.strip().replace("\n", "\t"))
return parse_crawl_strategy_response(response.stdout.strip(), address, command)
async def _get_connection(host: str, retry_num=0) -> asyncssh.SSHClientConnection:
try:
logs.logger.debug(f"Getting asyncio SSH connection for host {host}")
return await bastion.connect_ssh(host, **ssh_connect_args)
except ChannelOpenError:
raise TimeoutException(f"asyncssh.ChannelOpenError encountered opening SSH connection for {host}")
except Exception as e:
if retry_num < 3:
asyncio.ensure_future(_occupy_one_sempahore_space())
await asyncio.sleep(.1)
return await _get_connection(host, retry_num+1)
raise e
async def _occupy_one_sempahore_space() -> None:
"""Use up one spot in the SSH connection semaphore.
This is used to fine tune whether the semaphore is configured
for too many concurrent SHH connection. It will not occupy more
than that which leaves {semaphore_spaces_min} spaces in the
semaphore for real work.
"""
global connection_semaphore_spaces_used
if (constants.ARGS.ssh_concurrency - connection_semaphore_spaces_used) > connection_semaphore_spaces_min:
async with connection_semaphore:
connection_semaphore_spaces_used += 1
logs.logger.debug(f"Using 1 additional semaphore space, ({connection_semaphore_spaces_used} used)")
forever_in_the_context_of_this_program = 86400
await asyncio.sleep(forever_in_the_context_of_this_program)
# configuration private functions
async def _configure(address: str):
global bastion, connection_semaphore, ssh_connect_args
connection_semaphore = asyncio.BoundedSemaphore(constants.ARGS.ssh_concurrency)
ssh_config = _get_ssh_config_for_host(address)
jump_server_address = _get_jump_server_for_host(ssh_config)
ssh_connect_args['username'] = ssh_config.get('user')
if constants.ARGS.ssh_passphrase:
ssh_connect_args['passphrase'] = getpass.getpass(colored("Enter SSH key passphrase:", 'green'))
try:
bastion = await asyncio.wait_for(
asyncssh.connect(jump_server_address, **ssh_connect_args), timeout=constants.ARGS.ssh_bastion_timeout
)
except asyncio.TimeoutError:
print(colored(f"Timeout connecting to SSH bastion server: {jump_server_address}. "
f"Try turning it off and on again.", 'red'))
sys.exit(1)
except asyncssh.PermissionDenied:
print(colored(f"SSH Permission denied attempting to connect to {address}. It is possible that your SSH Key "
f"requires a passphrase. If this is the case please add either it to ssh-agent with `ssh-add` "
f"(See https://www.ssh.com/ssh/add for details on that process) or try again using the "
f"--ssh-passphrase argument. ", 'red'))
sys.exit(1)
def _get_ssh_config_for_host(host: str) -> dict:
"""Parse ssh config file to retrieve bastion address and username
:param host: (str) host to parse ssh config file for
:return: a dict of ssh config, e.g.
{
'forwardagent': 'yes',
'hostname': '10.0.0.145',
'proxycommand': 'ssh -q ops nc 10.0.0.145 22',
'serveraliveinterval': '120',
'stricthostkeychecking': 'no',
'user': 'foo',
'userknownhostsfile': '/dev/null'
}
"""
ssh_config = paramiko.SSHConfig()
user_config_file = os.path.expanduser(constants.ARGS.ssh_config_file)
try:
with open(user_config_file) as f:
ssh_config.parse(f)
except FileNotFoundError:
print("{} file could not be found. Aborting.".format(user_config_file))
sys.exit(1)
return ssh_config.lookup(host)
def _get_jump_server_for_host(config: dict) -> str:
"""
:param config: ssh config in dict format as returned by paramiko.SSHConfig().lookup()
"""
config_file_path = os.path.expanduser(constants.ARGS.ssh_config_file)
proxycommand_host = _get_proxycommand_host(config)
proxyjump_host = _get_proxyjump_host(config)
bastion_host = proxyjump_host or proxycommand_host
if not bastion_host:
print(colored(f"Required SSH directive ProxyJump (or ProxyCommand) not found (or misconfigured)"
f" in {config_file_path}... Please correct your ssh config! SSH directives found:", 'red'))
constants.PP.pprint(config)
sys.exit(1)
bastion_config = _get_ssh_config_for_host(bastion_host)
if 'hostname' not in bastion_config:
print(colored(f"{bastion_host} misconfigured in {config_file_path}... "
f"Please correct your ssh config! Contents:", 'red'))
constants.PP.pprint(config)
sys.exit(1)
return bastion_config['hostname']
def _get_proxycommand_host(config):
if 'proxycommand' not in config:
return None
proxycommand_columns = config['proxycommand'].split(" ")
if 6 != len(proxycommand_columns):
return None
return proxycommand_columns[2]
def _get_proxyjump_host(config):
if 'proxyjump' not in config:
return None
return config['proxyjump']
```
#### File: itsybitsy/plugins/render_json.py
```python
import json
from dataclasses import asdict, is_dataclass
from typing import Dict
from itsybitsy import constants, renderers
from itsybitsy.charlotte import CrawlStrategy
from itsybitsy.charlotte_web import Protocol
from itsybitsy.node import Node
class RendererJson(renderers.RendererInterface):
@staticmethod
def ref() -> str:
return 'json'
def render(self, tree: Dict[str, Node]):
tree_with_args = _add_cli_args_to_json_tree(tree)
print(json.dumps(tree_with_args, cls=_EnhancedJSONEncoder))
class _EnhancedJSONEncoder(json.JSONEncoder):
"""Dataclass objects to not have native support for JSON serialization. This class allows for that"""
def default(self, o): # pylint: disable=method-hidden
if is_dataclass(o):
return asdict(o)
return super().default(o)
def _deserialize_object(dct: dict):
"""Used to json deserialization to our custom classes"""
dct_type = dct.get('__type__')
if not dct_type:
return dct
if 'Node' == dct_type:
return Node(**dct)
elif 'CrawlStrategy' == dct_type:
return CrawlStrategy(**dct)
elif 'Protocol' == dct_type:
return Protocol(**dct)
raise Exception(f"Unrecognized __type__: ({dct_type}) encountered during json deserialization")
def load(file):
"""
load json rendering of tree from `file`, parse requisite outputs
:param file:
:return:
"""
loaded = json.load(open(file), object_hook=_deserialize_object)
constants.ARGS.max_depth = int(loaded['args']['max_depth'])
constants.ARGS.skip_nonblocking_grandchildren = loaded['args']['skip_nonblocking_grandchildren']
return loaded['tree']
def dump(tree: Dict[str, Node], file: str = None) -> None:
"""
dump json of the tree to a file - includes globals.ARGS in the dump
:param tree:
:param file:
:return:
"""
tree_with_args = _add_cli_args_to_json_tree(tree)
with open(file, 'w+') as file_handle:
json.dump(tree_with_args, file_handle, cls=_EnhancedJSONEncoder)
def _add_cli_args_to_json_tree(tree: Dict[str, Node]) -> dict:
return {
'args': vars(constants.ARGS),
'tree': tree
}
```
#### File: tests/plugins/test_render_ascii.py
```python
from itsybitsy.plugins import render_ascii
from itsybitsy.node import Node
from dataclasses import replace
from typing import Dict
import asyncio
import pytest
import sys
@pytest.fixture(autouse=True)
def set_default_cli_args(cli_args_mock):
cli_args_mock.render_ascii_verbose = False
cli_args_mock.debug = False
async def _helper_render_tree_with_timeout(tree: Dict[str, Node]) -> None:
await asyncio.wait_for(render_ascii.render_tree(tree, [], sys.stdout), .1)
@pytest.mark.asyncio
async def test_render_tree_case_seed(tree_stubbed, capsys):
"""Test a single seed node is printed correctly - no errors or edge cases"""
# arrange
seed = tree_stubbed[list(tree_stubbed)[0]]
seed.children = {}
# act
await _helper_render_tree_with_timeout(tree_stubbed)
captured = capsys.readouterr()
# assert
assert f"\n{seed.service_name} [{seed.protocol_mux}]\n" == captured.out
@pytest.mark.asyncio
async def test_render_tree_case_child(tree_stubbed_with_child, capsys):
"""Test a single child node is printed correctly - no errors or edge cases"""
# arrange
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
expected = ("\n"
f"{seed.service_name} [{seed.protocol_mux}]\n"
f" └--{child.protocol.ref}--> {child.service_name} [port:{child.protocol_mux}]\n")
assert expected == captured.out
# wait_for: wait for service name to print
@pytest.mark.asyncio
async def test_render_tree_case_crawl_not_complete(tree_stubbed, capsys, mocker):
"""Render should not happen for a node unless `crawl_complete()` returns True"""
# arrange
seed = tree_stubbed[list(tree_stubbed)[0]]
mocker.patch.object(seed, 'crawl_complete', return_value=False)
# act/assert
with pytest.raises(asyncio.TimeoutError):
await _helper_render_tree_with_timeout(tree_stubbed)
captured = capsys.readouterr()
assert seed.service_name not in captured
@pytest.mark.asyncio
async def test_render_tree_case_children_namelookup_incomplete(tree_stubbed_with_child, capsys, mocker):
"""Render should not happen for any children until all children names have been looked up"""
# arrange
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
another_child = replace(child, service_name='another_child')
seed.children['last_child'] = another_child
mocker.patch.object(child, 'crawl_complete', return_value=True)
mocker.patch.object(another_child, 'crawl_complete', return_value=True)
mocker.patch.object(child, 'name_lookup_complete', return_value=True)
mocker.patch.object(another_child, 'name_lookup_complete', return_value=False)
# act/assert
with pytest.raises(asyncio.TimeoutError):
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
assert seed.service_name in captured.out
assert child.service_name not in captured.out
assert another_child.service_name not in captured.out
@pytest.mark.asyncio
@pytest.mark.parametrize('error', ['NULL_ADDRESS', 'TIMEOUT', 'AWS_LOOKUP_FAILED'])
async def test_render_tree_case_child_errors(error, tree_stubbed_with_child, capsys):
"""A node with errors and no service name is displayed correctly"""
# arrange
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
child.service_name = None
child.errors = {error: True}
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert f" └--{child.protocol.ref}--? \x1b[31m{{ERR:{error}}} \x1b[0mUNKNOWN [port:{child.protocol_mux}]" \
in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_child_warning_cycle(tree_stubbed_with_child, capsys):
"""A node with a CYCLE warning is displayed correctly"""
# arrange
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
child.warnings = {'CYCLE': True}
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert f" <--{child.protocol.ref}--> \x1b[33m{{WARN:CYCLE}} \x1b[0m{child.service_name}" in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_child_warning_defunct(cli_args_mock, tree_stubbed_with_child, capsys):
"""A node with a DEFUNCT warning is displayed correctly"""
# arrange
cli_args_mock.hide_defunct = False
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
child.warnings = {'DEFUNCT': True}
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert f" └--{child.protocol.ref}--x \x1b[33m{{WARN:DEFUNCT}} \x1b[0m{child.service_name}" in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_hide_defunct(cli_args_mock, tree_stubbed_with_child, capsys):
"""A node with a DEFUNCT warning is displayed correctly"""
# arrange
cli_args_mock.hide_defunct = True
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
child.warnings = {'DEFUNCT': True}
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert 'DEFUNCT' not in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_respect_cli_max_depth(cli_args_mock, tree_stubbed_with_child, capsys):
"""--max-depth arg is respected"""
# arrange
cli_args_mock.max_depth = 0
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
child.service_name = 'DEPTH_1'
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert child.service_name not in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_last_child(tree_stubbed_with_child, node_fixture, capsys):
"""A single node with multiple children, the last child printed is slightly different"""
# arrange
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
last_child = replace(node_fixture, service_name='last_child_service', address='last_child_address', children={})
seed.children['last_child'] = last_child
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert f"|--{child.protocol.ref}--> {child.service_name}" in captured.out
assert f"└--{last_child.protocol.ref}--> {last_child.service_name} " in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_merged_nodes(tree_stubbed_with_child, capsys):
"""A single node with multiple children, the last child printed is slightly different"""
# arrange
seed = tree_stubbed_with_child[list(tree_stubbed_with_child)[0]]
child = seed.children[list(seed.children)[0]]
redundant_child = replace(child, protocol_mux='some_other_mux')
seed.children['redundant_child'] = redundant_child
# - we have to capture this now because render_tree will mutate these objects!
expected_merged_mux = f"{child.protocol_mux},{redundant_child.protocol_mux}"
# act
await _helper_render_tree_with_timeout(tree_stubbed_with_child)
captured = capsys.readouterr()
# assert
assert f"--> {child.service_name} [port:{expected_merged_mux}]" in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_node_hint_merged(tree_named, protocol_fixture, node_fixture_factory, capsys):
"""Tests that two child nodes which are on the same protocol/mux are merged together if 1 is a hint"""
# arrange
protocol_ref, protocol_mux, error, service_name = ('FOO', 'barbaz', 'BUZZ', 'qux')
protocol_fixture = replace(protocol_fixture, ref=protocol_ref)
child_node_crawled = replace(node_fixture_factory(), service_name=None, errors={error: True})
child_node_crawled.protocol = protocol_fixture
child_node_crawled.protocol_mux = protocol_mux
child_node_hint = replace(node_fixture_factory(), service_name=service_name, from_hint=True)
child_node_hint.protocol = protocol_fixture
child_node_hint.protocol_mux = protocol_mux
tree = tree_named
list(tree.values())[0].children = {'crawled': child_node_crawled, 'hinted': child_node_hint}
# act
await _helper_render_tree_with_timeout(tree)
captured = capsys.readouterr()
# assert
assert 'UNKNOWN' not in captured.out
assert f"\x1b[36m{{INFO:FROM_HINT}} \x1b[0m\x1b[31m{{ERR:{error}}} \x1b[0m{service_name}" in captured.out
@pytest.mark.asyncio
async def test_render_tree_case_node_nonhint_not_merged(tree_named, protocol_fixture, node_fixture_factory, capsys):
"""
Ensures that 2 children on the same protocol/mux are not accidentally merged into one
Ensures that 2 children not on the same protocol/mux are not accidentally merged into one
"""
# arrange
protocol_ref, protocol_mux_1, protocol_mux_2, child_1_name, child_2_name, child_3_name = \
('FOO', 'barbaz', 'buzzqux', 'quxx', 'quz', 'clorge')
protocol_fixture = replace(protocol_fixture, ref=protocol_ref)
child_1 = replace(node_fixture_factory(), service_name=child_1_name)
child_1.protocol = protocol_fixture
child_1.protocol_mux = protocol_mux_1
child_1.children = []
child_2 = replace(node_fixture_factory(), service_name=child_2_name)
child_2.protocol = protocol_fixture
child_2.protocol_mux = protocol_mux_1
child_2.children = []
child_3 = replace(node_fixture_factory(), service_name=child_3_name)
child_3.protocol = protocol_fixture
child_3.protocol_mux = protocol_mux_2
child_3.children = []
list(tree_named.values())[0].children = {'child1': child_1, 'child2': child_2, 'child3': child_3}
# act
await _helper_render_tree_with_timeout(tree_named)
captured = capsys.readouterr()
assert child_1_name in captured.out
assert child_2_name in captured.out
assert child_3_name in captured.out
```
#### File: itsybitsy/tests/test_crawl.py
```python
from itsybitsy import crawl, node
from itsybitsy.providers import TimeoutException
import asyncio
import pytest
from unittest.mock import MagicMock
@pytest.fixture(autouse=True)
def clear_caches():
"""Clear crawl.py caches between tests - otherwise our asserts for function calls may not pass"""
crawl.service_name_cache = {}
crawl.child_cache = {}
@pytest.fixture(autouse=True)
def set_default_timeout(builtin_providers, cli_args_mock):
cli_args_mock.timeout = 30
@pytest.fixture
def mock_provider_ref() -> str:
return 'mock_provider'
@pytest.fixture
def provider_mock(mocker, mock_provider_ref) -> MagicMock:
provider_mock = mocker.patch('itsybitsy.providers.ProviderInterface', autospec=True)
provider_mock.ref.return_value = mock_provider_ref
mocker.patch('itsybitsy.providers.get_provider_by_ref', return_value=provider_mock)
return provider_mock
@pytest.fixture
def cs_mock(protocol_fixture, mocker, mock_provider_ref) -> MagicMock:
"""it is a required fixture to include, whether or not it is used explicitly, in or to mock crawl_downstream"""
cs_mock = mocker.patch('itsybitsy.charlotte.CrawlStrategy', autospec=True)
mocker.patch('itsybitsy.charlotte.crawl_strategies', [cs_mock])
cs_mock.rewrite_service_name.side_effect = lambda x, y: x
cs_mock.filter_service_name.return_value = False
cs_mock.protocol = protocol_fixture
cs_mock.provider_args = {}
cs_mock.providers = [mock_provider_ref]
return cs_mock
@pytest.fixture
def protocol_mock(mocker, dummy_protocol_ref) -> MagicMock:
protocol_mock = mocker.patch('itsybitsy.charlotte_web.Protocol')
protocol_mock.ref = dummy_protocol_ref
return protocol_mock
@pytest.fixture
def hint_mock(protocol_fixture, mocker) -> MagicMock:
hint_mock = mocker.patch('itsybitsy.charlotte_web.Hint', autospec=True)
hint_mock.instance_provider = 'dummy_hint_provider'
hint_mock.protocol = protocol_fixture
mocker.patch('itsybitsy.charlotte_web.hints', [hint_mock])
return hint_mock
@pytest.fixture(autouse=True)
def set_default_cli_args(cli_args_mock):
cli_args_mock.obfuscate = False
# helpers
async def _wait_for_all_tasks_to_complete(event_loop):
"""Wait for all tasks to complete in the event loop. Assumes that 1 task will remain incomplete - and that
is the task for the async `test_...` function itself"""
while len(asyncio.all_tasks(event_loop)) > 1:
await asyncio.sleep(0.1) # we "fire and forget" in crawl() and so have to "manually" "wait"
# Calls to ProviderInterface::open_connection
@pytest.mark.asyncio
async def test_crawl_case_connection_opened_and_passed(tree, provider_mock, cs_mock):
"""Crawling a single node tree - connection is opened and passed to both lookup_name and crawl_downstream"""
# arrange
# mock provider
stub_connection = 'foo_connection'
provider_mock.open_connection.return_value = stub_connection
provider_mock.lookup_name.return_value = 'bar_name'
# mock crawl strategy
stub_provider_args = {'baz': 'buz'}
cs_mock.provider_args = stub_provider_args
cs_mock.providers = [provider_mock.ref()]
# act
await crawl.crawl(tree, [])
# assert
provider_mock.open_connection.assert_called_once_with(list(tree.values())[0].address)
provider_mock.lookup_name.assert_called_once_with(list(tree.values())[0].address, stub_connection)
provider_mock.crawl_downstream.assert_called_once_with(list(tree.values())[0].address, stub_connection,
**stub_provider_args)
@pytest.mark.asyncio
async def test_crawl_case_open_connection_handles_skip_protocol_mux(tree, provider_mock, cs_mock, mocker):
"""If a node should be skipped due to protocol_mux, we do not even open the connection and we set an error."""
# arrange
skip_function = mocker.patch('itsybitsy.charlotte_web.skip_protocol_mux', return_value=True)
# act
await crawl.crawl(tree, [])
# assert
assert 'CONNECT_SKIPPED' in list(tree.values())[0].errors
provider_mock.open_connection.assert_not_called()
provider_mock.lookup_name.assert_not_called()
provider_mock.crawl_downstream.assert_not_called()
skip_function.assert_called_once_with(list(tree.values())[0].protocol_mux)
@pytest.mark.asyncio
async def test_crawl_case_open_connection_handles_timeout_exception(tree, provider_mock, cs_mock):
"""Respects the contractual TimeoutException or ProviderInterface. If thrown we set TIMEOUT error
but do not stop crawling"""
# arrange
provider_mock.open_connection.side_effect = TimeoutException
# act
await crawl.crawl(tree, [])
assert 'TIMEOUT' in list(tree.values())[0].errors
provider_mock.lookup_name.assert_not_called()
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_open_connection_handles_timeout(tree, provider_mock, cs_mock, cli_args_mock, mocker):
"""A natural timeout during ProviderInterface::open_connections is also handled by setting TIMEOUT error"""
# arrange
cli_args_mock.timeout = .1
async def slow_open_connection(_):
await asyncio.sleep(1)
provider_mock.open_connection.side_effect = slow_open_connection
# act
await crawl.crawl(tree, [])
assert 'TIMEOUT' in list(tree.values())[0].errors
provider_mock.lookup_name.assert_not_called()
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_open_connection_handles_exceptions(tree, provider_mock, cs_mock):
"""Handle any other exceptions thrown by ProviderInterface::open_connection by exiting the program"""
# arrange
provider_mock.open_connection.side_effect = Exception('BOOM')
# act/assert
with pytest.raises(SystemExit):
await crawl.crawl(tree, [])
# Calls to ProviderInterface::lookup_name
@pytest.mark.asyncio
async def test_crawl_case_lookup_name_uses_cache(tree, node_fixture_factory, provider_mock):
"""Validate the calls to lookup_name for the same address are cached"""
# arrange
address = 'use_this_address_twice'
node2 = node_fixture_factory()
node2.address = address
tree['dummy2'] = node2
list(tree.values())[0].address = address
# act
await crawl.crawl(tree, [])
# assert
provider_mock.lookup_name.assert_called_once()
@pytest.mark.asyncio
async def test_crawl_case_lookup_name_handles_timeout(tree, provider_mock, cs_mock, cli_args_mock, mocker):
"""Timeout is handled during lookup_name and results in a sys.exit"""
# arrange
cli_args_mock.timeout = .1
async def slow_lookup_name(address):
await asyncio.sleep(1)
provider_mock.lookup_name = slow_lookup_name
# act/assert
with pytest.raises(SystemExit):
await crawl.crawl(tree, [])
@pytest.mark.asyncio
async def test_crawl_case_lookup_name_handles_exceptions(tree, provider_mock, cs_mock):
"""Any exceptions thrown by lookup_name are handled by exiting the program"""
# arrange
provider_mock.lookup_name.side_effect = Exception('BOOM')
# act/assert
with pytest.raises(SystemExit):
await crawl.crawl(tree, [])
# Calls to ProviderInterface::crawl_downstream
@pytest.mark.asyncio
@pytest.mark.parametrize('name,crawl_expected,error', [(None, False, 'NAME_LOOKUP_FAILED'), ('foo', True, None)])
async def test_crawl_case_crawl_downstream_based_on_name(name, crawl_expected, error, tree, provider_mock, cs_mock):
"""Depending on whether provider.name_lookup() returns a name - we should or should not crawl_downstream()"""
# arrange
provider_mock.lookup_name.return_value = name
cs_mock.providers = [provider_mock.ref()]
# act
await crawl.crawl(tree, [])
# assert
assert provider_mock.crawl_downstream.called == crawl_expected
if error:
assert error in list(tree.values())[0].errors
@pytest.mark.asyncio
@pytest.mark.parametrize('attr', ['warnings', 'errors'])
async def test_crawl_case_do_not_crawl_downstream_node_with_warns_errors(attr, tree, provider_mock, cs_mock):
"""We should not crawl_downstream for node with any arbitrary warning or error"""
# arrange
provider_mock.lookup_name.return_value = 'dummy_name'
setattr(list(tree.values())[0], attr, {'DUMMY': True})
# act
await crawl.crawl(tree, [])
# assert
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_crawl_downstream_uses_cache(tree, node_fixture_factory, provider_mock, cs_mock, event_loop):
"""Validate the calls to crawl_downstream for the same address are cached. Caching is only guaranteed for
different branches in the tree since siblings execute concurrently - and so we have to test a tree with more
depth > 1"""
# arrange
repeated_service_name = 'double_name'
singleton_service_name = 'single_name'
node2 = node_fixture_factory()
node2.address = 'foo' # must be different than list(tree.values())[0].address to avoid caching
node2_child = node.NodeTransport('foo_mux', 'bar_address')
tree['dummy2'] = node2
provider_mock.lookup_name.side_effect = [repeated_service_name, singleton_service_name, repeated_service_name]
provider_mock.crawl_downstream.side_effect = [[], [node2_child], []]
cs_mock.providers = [provider_mock.ref()]
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert 2 == provider_mock.crawl_downstream.call_count
@pytest.mark.asyncio
async def test_crawl_case_crawl_downstream_handles_timeout(tree, provider_mock, cs_mock, cli_args_mock, mocker):
"""Timeout is respected during crawl_downstream and results in a sys.exit"""
# arrange
cli_args_mock.timeout = .1
async def slow_crawl_downstream(address, connection):
await asyncio.sleep(1)
provider_mock.lookup_name.return_value = 'dummy'
provider_mock.crawl_downstream.side_effect = slow_crawl_downstream
cs_mock.providers = [provider_mock.ref()]
# act/assert
with pytest.raises(SystemExit) as e:
await crawl.crawl(tree, [])
assert True
@pytest.mark.asyncio
async def test_crawl_case_crawl_downstream_handles_exceptions(tree, provider_mock, cs_mock, cli_args_mock, mocker):
"""Any exceptions thrown by crawl_downstream are handled by exiting the program"""
# arrange
cli_args_mock.timeout = .1
provider_mock.lookup_name.return_value = 'dummy'
provider_mock.open_connection.side_effect = Exception('BOOM')
# act/assert
with pytest.raises(SystemExit):
await crawl.crawl(tree, [])
# handle Cycles
@pytest.mark.asyncio
async def test_crawl_case_cycle(tree, provider_mock, cs_mock):
"""Cycles should be detected, name lookup should still happen for them, but crawl_downstream should not"""
# arrange
cycle_service_name = 'foops_i_did_it_again'
provider_mock.lookup_name.return_value = cycle_service_name
# act
await crawl.crawl(tree, [cycle_service_name])
# assert
assert 'CYCLE' in list(tree.values())[0].warnings
provider_mock.lookup_name.assert_called_once()
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_service_name_rewrite_cycle_detected(tree, provider_mock, cs_mock):
"""Validate cycles are detected for rewritten service names"""
# arrange
cycle_service_name = 'foops_i_did_it_again'
provider_mock.lookup_name.return_value = 'original_service_name'
list(tree.values())[0].crawl_strategy = cs_mock
cs_mock.rewrite_service_name.side_effect = None
cs_mock.rewrite_service_name.return_value = cycle_service_name
# act
await crawl.crawl(tree, [cycle_service_name])
# assert
assert 'CYCLE' in list(tree.values())[0].warnings
# Parsing of ProviderInterface::crawl_downstream
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol_mux,address,debug_identifier,num_connections,warnings,errors', [
('foo_mux', 'bar_address', 'baz_name', 100, [], []),
('foo_mux', 'bar_address', 'baz_name', None, [], []),
('foo_mux', 'bar_address', None, None, [], []),
('foo_mux', 'bar_address', 'baz_name', 0, ['DEFUNCT'], []),
('foo_mux', None, None, None, [], ['NULL_ADDRESS']),
])
async def test_crawl_case_crawl_results_parsed(protocol_mux, address, debug_identifier, num_connections, warnings, errors,
tree, provider_mock, cs_mock, event_loop):
"""Crawl results are parsed into Node objects. We detect 0 connections as a "DEFUNCT" node. `None` address
is acceptable, but is detected as a "NULL_ADDRESS" node"""
# arrange
seed = list(tree.values())[0]
child_nt = node.NodeTransport(protocol_mux, address, debug_identifier, num_connections)
provider_mock.lookup_name.side_effect = ['seed_name', 'child_name']
provider_mock.crawl_downstream.side_effect = [[child_nt], []]
cs_mock.providers = [provider_mock.ref()]
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert 1 == len(seed.children)
child: node.Node = seed.children[list(seed.children)[0]]
assert protocol_mux == child.protocol_mux
assert address == child.address
for warning in warnings:
assert warning in child.warnings
for error in errors:
assert error in child.errors
# Recursive calls to crawl::crawl()
@pytest.mark.asyncio
async def test_crawl_case_children_with_address_crawled(tree, provider_mock, cs_mock, event_loop, mocker):
"""Discovered children with an address are recursively crawled """
# arrange
child_nt = node.NodeTransport('dummy_protocol_mux', 'dummy_address')
provider_mock.lookup_name.side_effect = ['seed_name', 'child_name']
provider_mock.crawl_downstream.side_effect = [[child_nt], []]
cs_mock.providers = [provider_mock.ref()]
crawl_spy = mocker.patch('itsybitsy.crawl.crawl', side_effect=crawl.crawl)
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert 2 == crawl_spy.call_count
child_node = crawl_spy.await_args.args[0][list(crawl_spy.await_args.args[0])[0]]
assert 'dummy_address' == child_node.address
assert list(tree.values())[0].service_name == crawl_spy.await_args.args[1][0]
@pytest.mark.asyncio
async def test_crawl_case_children_without_address_not_crawled(tree, provider_mock, cs_mock, event_loop,
mocker):
"""Discovered children without an address are not recursively crawled """
# arrange
child_nt = node.NodeTransport('dummy_protocol_mux', None)
provider_mock.lookup_name.return_value = 'dummy'
provider_mock.crawl_downstream.return_value = [child_nt]
crawl_spy = mocker.patch('itsybitsy.crawl.crawl', side_effect=crawl.crawl)
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert 1 == crawl_spy.call_count
# Hints
@pytest.mark.asyncio
async def test_crawl_case_hint_attributes_set(tree, provider_mock, hint_mock, mocker, event_loop):
"""For hints used in crawling... attributes are correctly translated from the Hint the Node"""
# arrange
mocker.patch('itsybitsy.charlotte_web.hints', return_value=[hint_mock])
hint_nt = node.NodeTransport('dummy_protocol_mux', 'dummy_address', 'dummy_debug_id')
provider_mock.take_a_hint.return_value = [hint_nt]
provider_mock.lookup_name.side_effect = ['dummy', None]
providers_get_mock = mocker.patch('itsybitsy.providers.get_provider_by_ref', return_value=provider_mock)
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert list(list(tree.values())[0].children.values())[0].from_hint
assert list(list(tree.values())[0].children.values())[0].protocol == hint_mock.protocol
assert list(list(tree.values())[0].children.values())[0].service_name == hint_nt.debug_identifier
providers_get_mock.assert_any_call(hint_mock.instance_provider)
@pytest.mark.asyncio
async def test_crawl_case_hint_name_used(tree, provider_mock, hint_mock, mocker, event_loop):
"""Hint `debug_identifier` field is respected in crawling (and overwritten by new name, not overwritten by None)"""
# arrange
mocker.patch('itsybitsy.charlotte_web.hints', return_value=[hint_mock])
hint_nt = node.NodeTransport('dummy_protocol_mux', 'dummy_address', 'dummy_debug_id')
provider_mock.take_a_hint.return_value = [hint_nt]
provider_mock.lookup_name.side_effect = ['dummy', None]
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert list(list(tree.values())[0].children.values())[0].service_name == hint_nt.debug_identifier
# respect CLI args
@pytest.mark.asyncio
async def test_crawl_case_respect_cli_skip_protocol_mux(tree, provider_mock, cs_mock, cli_args_mock,
mocker, event_loop):
"""Children discovered on these muxes are neither included in the tree - nor crawled"""
# arrange
skip_this_protocol_mux = 'foo_mux'
cli_args_mock.skip_protocol_muxes = [skip_this_protocol_mux]
child_nt = node.NodeTransport(skip_this_protocol_mux, 'dummy_address')
provider_mock.lookup_name.return_value = 'bar_name'
provider_mock.crawl_downstream.return_value = [child_nt]
crawl_spy = mocker.patch('itsybitsy.crawl.crawl', side_effect=crawl.crawl)
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert 0 == len(list(tree.values())[0].children)
assert 1 == crawl_spy.call_count
@pytest.mark.asyncio
async def test_crawl_case_respect_cli_skip_protocols(tree, provider_mock, cs_mock, cli_args_mock, mocker):
"""Crawling of protocols configured to be "skipped" does not happen at all."""
# arrange
skip_this_protocol = 'FOO'
cli_args_mock.skip_protocols = [skip_this_protocol]
cs_mock.protocol = mocker.patch('itsybitsy.charlotte_web.Protocol', autospec=True)
cs_mock.protocol.ref = skip_this_protocol
provider_mock.lookup_name.return_value = 'bar_name'
# act
await crawl.crawl(tree, [])
# assert
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_respect_cli_disable_providers(tree, provider_mock, cs_mock, cli_args_mock, mocker,
event_loop):
"""Children discovered which have been determined to use disabled providers - are neither included in the tree
nor crawled"""
# arrange
disable_this_provider = 'foo_provider'
cli_args_mock.disable_providers = [disable_this_provider]
child_nt = node.NodeTransport('dummy_mux', 'dummy_address')
provider_mock.lookup_name.return_value = 'bar_name'
provider_mock.crawl_downstream.return_value = [child_nt]
cs_mock.determine_child_provider.return_value = disable_this_provider
crawl_spy = mocker.patch('itsybitsy.crawl.crawl', side_effect=crawl.crawl)
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert 0 == len(list(tree.values())[0].children)
assert 1 == crawl_spy.call_count
@pytest.mark.asyncio
@pytest.mark.parametrize('child_blocking,grandchild_blocking,crawls_expected,downstream_crawls_expected',
[(False, False, 2, 1), (True, False, 2, 2)])
async def test_crawl_case_respect_cli_skip_nonblocking_grandchildren(child_blocking, grandchild_blocking,
crawls_expected, downstream_crawls_expected,
tree, provider_mock, protocol_mock, cs_mock,
cli_args_mock, mocker, event_loop):
"""When --skip-nonblocking-grandchildren is specified, include nonblocking children of the seed, but nowhere else"""
# arrange
cli_args_mock.skip_nonblocking_grandchildren = True
child_nt = node.NodeTransport('dummy_protocol_mux', 'dummy_address')
grandchild_nt = node.NodeTransport('dummy_protocol_mux_gc', 'dummy_address_gc')
provider_mock.lookup_name.side_effect = ['seed_name', 'child_name', 'grandchild_name']
provider_mock.crawl_downstream.side_effect = [[child_nt], [grandchild_nt], []]
type(protocol_mock).blocking = mocker.PropertyMock(side_effect=[True, child_blocking, grandchild_blocking])
cs_mock.protocol = protocol_mock
crawl_spy = mocker.patch('itsybitsy.crawl.crawl', side_effect=crawl.crawl)
# act
await crawl.crawl(tree, [])
await _wait_for_all_tasks_to_complete(event_loop)
# assert
assert crawl_spy.call_count == crawls_expected
assert provider_mock.crawl_downstream.call_count == downstream_crawls_expected
@pytest.mark.asyncio
async def test_crawl_case_respect_cli_max_depth(tree, node_fixture, provider_mock, cs_mock, cli_args_mock):
"""We should not crawl_downstream if max-depth is exceeded"""
# arrange
cli_args_mock.max_depth = 0
provider_mock.lookup_name.return_value = 'dummy_name'
# act
await crawl.crawl(tree, [])
# assert
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_respect_cli_obfuscate(tree, node_fixture, cs_mock, provider_mock, cli_args_mock):
"""We need to test a child for protocol mux obfuscation since the tree is already populated with a fully hydrated
Node - which is past the point of obfuscation"""
# arrange
cli_args_mock.obfuscate = True
seed_service_name = 'actual_service_name_foo'
child_protocol_mux = 'child_actual_protocol_mux'
child_nt = node.NodeTransport(child_protocol_mux)
provider_mock.lookup_name.return_value = seed_service_name
provider_mock.lookup_name.return_value = 'dummy_service_name'
provider_mock.crawl_downstream.side_effect = [[child_nt], []]
cs_mock.providers = [provider_mock.ref()]
# act
await crawl.crawl(tree, [])
# assert
seed: node.Node = list(tree.values())[0]
child: node.Node = seed.children[list(seed.children)[0]]
assert seed.service_name != seed_service_name
assert child.protocol_mux != child_protocol_mux
# respect charlotte / charlotte_web configurations
@pytest.mark.asyncio
async def test_crawl_case_respect_cs_filter_service_name(tree, provider_mock, cs_mock):
"""We respect when a service name is configured to be skipped by a specific crawl strategy"""
# arrange
cs_mock.filter_service_name.return_value = True
provider_mock.lookup_name.return_value = 'bar_name'
# act
await crawl.crawl(tree, [])
# assert
cs_mock.filter_service_name.assert_called_once_with(list(tree.values())[0].service_name)
provider_mock.crawl_downstream.assert_not_called()
@pytest.mark.asyncio
async def test_crawl_case_respect_cs_service_name_rewrite(tree, provider_mock, cs_mock):
"""Validate service_name_rewrites are called and used"""
# arrange
service_name = 'foo_name'
rewritten_service_name = 'bar_name'
provider_mock.lookup_name.return_value = service_name
list(tree.values())[0].crawl_strategy = cs_mock
cs_mock.rewrite_service_name.side_effect = None
cs_mock.rewrite_service_name.return_value = rewritten_service_name
# act
await crawl.crawl(tree, [])
# assert
assert list(tree.values())[0].service_name == rewritten_service_name
@pytest.mark.asyncio
async def test_crawl_case_respect_charlotte_web_skip(tree, provider_mock, cs_mock, mocker):
"""Skip service name is respected for charlotte_web"""
# arrange
service_name = 'foo_name'
provider_mock.lookup_name.return_value = service_name
skip_function = mocker.patch('itsybitsy.charlotte_web.skip_service_name', return_value=True)
# act
await crawl.crawl(tree, [])
# assert
provider_mock.lookup_name.assert_called_once()
provider_mock.crawl_downstream.assert_not_called()
skip_function.assert_called_once_with(service_name)
```
|
{
"source": "jessehall3/go-camo",
"score": 3
}
|
#### File: go-camo/examples/python-hex.py
```python
import hashlib
import hmac
CAMO_HOST = 'https://img.example.com'
def camo_url(hmac_key, image_url):
if image_url.startswith("https:"):
return image_url
hexdigest = hmac.new(hmac_key, image_url, hashlib.sha1).hexdigest()
hexurl = image_url.encode('hex')
requrl = '%s/%s/%s' % (CAMO_HOST, hexdigest, hexurl)
return requrl
print camo_url("test", "http://golang.org/doc/gopher/frontpage.png")
# 'https://img.example.org/0f6def1cb147b0e84f39cbddc5ea10c80253a6f3/687474703a2f2f676f6c616e672e6f72672f646f632f676f706865722f66726f6e74706167652e706e67'
```
|
{
"source": "jessehamner/OpenDataDay",
"score": 3
}
|
#### File: jessehamner/OpenDataDay/fixProj.py
```python
import sys
from osgeo import osr
def esriprj2standards(shapeprj_path):
prj_file = open(shapeprj_path, 'r')
prj_txt = prj_file.read()
srs = osr.SpatialReference()
srs.ImportFromESRI([prj_txt])
print 'Shape prj is: %s' % prj_txt
print 'WKT is: %s' % srs.ExportToWkt()
print 'Proj4 is: %s' % srs.ExportToProj4()
srs.AutoIdentifyEPSG()
print 'EPSG is: %s' % srs.GetAuthorityCode(None)
esriprj2standards(sys.argv[1])
```
|
{
"source": "jessehamner/WeatherWidget",
"score": 3
}
|
#### File: jessehamner/WeatherWidget/hwo.py
```python
import os
import re
import logging
import requests
from bs4 import BeautifulSoup
class HWO(object):
"""
Hazardous weather outlook (HWO) object class.
"""
def __init__(self, data, outputfile='hwo.txt'):
"""
Create an object and empty dictionary.
"""
self.data = data
self.outputfile = outputfile
self.hwo_text = ''
self.hwodict = dict(spotter=[],
dayone=[],
daystwothroughseven=[],
today_text='',
has_spotter=False
)
def get_hwo(self):
"""
Get the HTML-only Hazardous Weather Outlook. The raw text of this statement
is available inside
<body>
<div id="local"> <div id="localcontent">
<pre class="glossaryProduct">
(Text is here)
</pre>
"""
params_dict = {'site': self.data['hwo_site'],
'issuedby': self.data['nws_abbr'],
'product': 'HWO',
'format': 'txt',
'version': 1,
'glossary': 0
}
response = requests.get(self.data['defaults']['hwo_url'],
params=params_dict,
verify=False,
timeout=10)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
pres = soup.body.find_all('pre')
for pretag in pres:
self.hwo_text = pretag.get_text()
if len(self.hwo_text) > 200:
cur = open(os.path.join(self.data['output_dir'], self.outputfile), 'w')
cur.write(self.hwo_text)
cur.close()
return self.hwo_text
return None
def split_hwo(self):
"""
Pull out today's hazardous weather outlook and spotter activation notice.
Return a slightly more compact text block of the two paragraphs.
"""
bodytext = self.hwo_text
logging.debug('Raw body text of HWO: \n%s', bodytext)
dayone = re.search(r'(\.DAY ONE.*?)(\.DAYS TWO THROUGH SEVEN.*?)', bodytext, re.DOTALL)
if dayone:
hwotext = re.sub(r'\n\n$', '', dayone.group(1))
hwotext = re.sub(r'\.{1,}DAY ONE[\.]{1,}', '', hwotext)
first_sentence = re.search(r'^(.*)\.', hwotext).group(1)
logging.debug('First sentence: %s', first_sentence)
hwotext = re.sub('\n', ' ', hwotext)
hwotext = nice_plumbing(hwotext)
first_info = re.sub(first_sentence, '', hwotext)
first_info = re.sub(r'^\s*\.*', '', first_info)
self.hwodict['dayone'] = [first_sentence.strip(), first_info.strip()]
daytwo = re.search('DAYS TWO THROUGH SEVEN(.*)SPOTTER', bodytext, re.DOTALL)
if daytwo:
daytwo = daytwo.group(1)
if daytwo:
logging.debug('DayTwo: %s', daytwo)
daytwo = re.sub(r'\n{1,}', ' ', daytwo)
daytwo = re.sub(r'\.{3,}\s*', ' ', daytwo)
first_sentence = re.search(r'^(.*?)\.', daytwo).group(1)
logging.debug('First sentence: %s', first_sentence)
second_info = re.sub(first_sentence, '', daytwo)
second_info = nice_plumbing(second_info)
self.hwodict['daystwothroughseven'] = [first_sentence.strip(),
second_info.strip()]
spotter = re.search(r'(\.*SPOTTER INFORMATION STATEMENT.*?)(\s*\$\$)',
bodytext, re.DOTALL)
if spotter:
spottext = nice_plumbing(spotter.group(1))
spottext = re.sub(r'SPOTTER INFORMATION STATEMENT[\.]{1,}',
'', spottext)
spottext = re.sub('\n', ' ', spottext)
self.hwodict['spotter'] = ['Spotter Information Statement',
spottext.strip()]
if spottext:
self.hwodict['today_text'] = '{0}{1}\n\n'.format(self.hwodict['dayone'][1],
spottext)
if re.search('Spotter activation is not expected at this time', spottext):
return True
self.hwodict['has_spotter'] = True
return True
def nice_plumbing(text):
"""
Try and regex/tidy some of the text.
"""
return_text = re.sub(r'^\s*\.*', '', text)
return_text = re.sub(r'\.\s+\.$', '.', return_text)
return_text = re.sub(r'\n+$', '', return_text)
return return_text
```
#### File: jessehamner/WeatherWidget/outage.py
```python
from __future__ import print_function
import re
import logging
import datetime
import requests
from bs4 import BeautifulSoup
class Outage(object):
"""
Fan out and check for outages, then store the info and pass it back up.
"""
def __init__(self, data=''):
"""
Instantiate the Outage object and set up common variables.
"""
self.data = data
self.defaults = data['defaults']
self.ftm_params = {'site': 'NWS',
'issuedby': data['radar_station'],
'product': 'FTM',
'format': 'CI',
'version': 1,
'glossary': 0
}
self.ftm_text = ''
self.return_text = ''
def check_outage(self):
"""
Check a webpage for information about any outages at the radar site.
The product is called a 'Free Text Message' (FTM).
'https://forecast.weather.gov/product.php?site=NWS
&issuedby=FWS&product=FTM&format=CI&version=1&glossary=0'
The information is identical to the HWO call.
"""
print('ftm parameter dict: {0}'.format(self.ftm_params))
try:
response = requests.get(self.defaults['hwo_url'],
params=self.ftm_params,
verify=True, timeout=10)
except requests.exceptions.ConnectionError as exc:
print('ConnectionError: {0}'.format(exc))
return None
html = response.text
soup = BeautifulSoup(html, 'html.parser')
if not soup:
print('WARNING: no returned data from html request for outages.')
return None
try:
pres = soup.body.find_all('pre')
except TypeError:
return None
except AttributeError:
return None
for pretag in pres:
self.ftm_text = pretag.get_text()
# print('ftm_text: {0}'.format(ftm_text))
if len(self.ftm_text) > 100:
self.ftm_text = self.ftm_text.split('\n')
return True
return False
def parse_outage(self):
"""
Read the outage text, if any, and determine:
- should it be displayed (i.e. is it timely and current?)
- what text is relevant (but default to "all of the text")
"""
if not self.ftm_text:
print('No outage text seen. Returning -None-')
return None
message_date = ''
for line in self.ftm_text:
line.strip()
if re.search(r'^\s*$', line):
continue
if re.search(r'^\s*FTM|^000\s*$|^NOUS', line):
continue
if re.search('MESSAGE DATE:', line, flags=re.I):
message_date = re.sub(r'MESSAGE DATE:\s+', '', line, flags=re.I)
print('Date of issue: {0}'.format(message_date))
dateobj = datetime.datetime.strptime(message_date, '%b %d %Y %H:%M:%S')
today = datetime.datetime.now()
if (today - dateobj) > datetime.timedelta(days=1):
print('Outage info is older than one day -- ignoring.')
return None
else:
self.return_text = str('{0}\nNWS FTM NOTICE:'.format(self.return_text))
else:
self.return_text = str('{0} {1}'.format(self.return_text, line))
if message_date:
self.return_text = re.sub(' ', ' ', self.return_text)
return self.return_text.strip()
return None
```
|
{
"source": "jessehon/etsy-convos",
"score": 2
}
|
#### File: etsy_convos/convos/filters.py
```python
from rest_framework import filters
class ActiveForUserFilter(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return queryset.active_for(request.user)
class ThreadFolderFilter(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
folder = request.QUERY_PARAMS.get('folder', None)
if folder is not None:
queryset = queryset.folder_for(folder, request.user)
return queryset
```
#### File: etsy_convos/convos/serializers.py
```python
from rest_framework import serializers
from .models import *
class BaseConvoMessageSerializer(serializers.ModelSerializer):
is_read = serializers.SerializerMethodField()
class Meta:
model = ConvoMessage
def get_is_read(self, obj):
return obj.get_is_read_for(self.context['request'].user)
class ConvoMessageSerializer(BaseConvoMessageSerializer):
subject = serializers.CharField(source='thread.subject')
class Meta(BaseConvoMessageSerializer.Meta):
fields = ('id', 'thread', 'sender', 'recipient', 'subject', 'body', 'is_read',)
read_only_fields = ('thread', 'sender',)
def create(self, validated_data):
thread_data = validated_data.get('thread', None)
thread = ConvoThread.objects.create(**thread_data)
validated_data['thread'] = thread
return ConvoMessage.objects.create(**validated_data)
class ConvoMessageNestedSerializer(BaseConvoMessageSerializer):
class Meta(BaseConvoMessageSerializer.Meta):
fields = ('id', 'sender', 'recipient', 'body', 'is_read',)
read_only_fields = ('sender', 'recipient',)
class ConvoMessageNestedPreviewSerializer(BaseConvoMessageSerializer):
class Meta(BaseConvoMessageSerializer.Meta):
fields = ('id', 'sender', 'recipient', 'body_excerpt', 'is_read',)
class BaseConvoThreadSerializer(serializers.ModelSerializer):
messages = serializers.SerializerMethodField()
last_message = serializers.SerializerMethodField()
class Meta:
model = ConvoThread
def get_messages(self, obj):
messages = obj.get_messages_for(self.context['request'].user)
return ConvoMessageNestedPreviewSerializer(instance=messages, context=self.context, many=True).data
def get_last_message(self, obj):
message = obj.get_last_message_for(self.context['request'].user)
return ConvoMessageNestedPreviewSerializer(instance=message, context=self.context).data
class ConvoThreadSerializer(BaseConvoThreadSerializer):
class Meta(BaseConvoThreadSerializer.Meta):
fields = ('id', 'subject', 'messages')
read_only_fields = ('messages')
class ConvoThreadPreviewSerializer(BaseConvoThreadSerializer):
class Meta(BaseConvoThreadSerializer.Meta):
fields = ('id', 'subject', 'last_message')
```
|
{
"source": "jessehood/djorg",
"score": 2
}
|
#### File: djorg/bookmarks/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.views.decorators.http import require_http_methods, require_POST, require_GET
from django.views.generic.edit import UpdateView
from django.urls import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from .models import Bookmark
from .forms import BookmarkForm
def index(request):
if request.user.is_anonymous:
return HttpResponseRedirect(reverse('index'))
context = {'bookmarks': Bookmark.objects.filter(user=request.user), 'form': BookmarkForm()}
return render(request, 'bookmarks/index.html', context)
@require_POST
def create(request):
"""Takes the bookmark form data and saves it to the database."""
form = BookmarkForm(request.POST)
if form.is_valid():
bookmark = form.save(commit=False)
bookmark.user = request.user
bookmark.save()
return HttpResponseRedirect(reverse('index'))
class BookmarkUpdate(UpdateView):
model = Bookmark
fields = ['name', 'url', 'notes']
template_name = 'bookmarks/edit.html'
success_url = reverse_lazy('index')
@require_GET
def delete(request, pk):
bookmark = get_object_or_404(Bookmark, pk=pk)
bookmark.delete()
return HttpResponseRedirect(reverse('index'))
```
#### File: djorg/notes/schema.py
```python
from graphene_django import DjangoObjectType
import graphene
from .models import Note as NoteModel
from djorg.settings import DEBUG
class Note(DjangoObjectType):
"""Transform data to Graphene representation"""
class Meta:
model = NoteModel
interfaces = (graphene.relay.Node, )
class Query(graphene.ObjectType):
"""Expose data results."""
notes = graphene.List(Note)
def resolve_notes(self, info):
user = info.context.user
if DEBUG:
return NoteModel.objects.all()
if user.is_anonymous:
return NoteModel.objects.none()
else:
return NoteModel.objects.filter(user=user)
schema = graphene.Schema(query=Query)
```
|
{
"source": "jessehorne/anonchat.io",
"score": 3
}
|
#### File: anonchat.io/lib/ConfParser.py
```python
from ConfigParser import ConfigParser
def parse(filename):
config_dir = ""
parser = ConfigParser()
parser.read(config_dir + filename + ".conf")
return parser._sections[filename]
```
|
{
"source": "jessehorne/life",
"score": 3
}
|
#### File: jessehorne/life/life.py
```python
import math
import random
import pygame
from pygame.locals import *
class Life:
def __init__(self):
self._running = True
self._display_surf = None
# life init
self.cells_x = 30
self.cells_y = 30
self.cells_size = 1000/self.cells_x
self.bg_color = (20,11,34)
self.line_color = (17,29,50)
self.cells_color = (42,42,126)
self.cells_color_dark = (42,100,126)
self.life_mode = "plant"
self.nodes = []
for y in xrange(self.cells_y):
for x in xrange(self.cells_x):
node = {}
node["start_x"] = (x * self.cells_size)
node["start_y"] = (y * self.cells_size)
node["alive"] = False
self.nodes.append(node)
# keys to change self.life_mode
self.plant_key = pygame.K_ESCAPE
self.play_key = pygame.K_p
self.life_time_interval = .1 # seconds in between each generation
self.life_time_clock = 0
# end life init
self.getTicksLastFrame = 0
self.size = self.width, self.height = (self.cells_x*self.cells_size), (self.cells_y*self.cells_size)
def find_node(self, start_x, start_y):
for node in self.nodes:
if node["start_x"] == start_x and node["start_y"] == start_y:
return node
break
return False
# will calculate 'gens' generations
def calc_generation(self, gens):
for i in xrange(gens):
alives = []
deads = []
for node in self.nodes:
neighbor_count = 0 # number of alive neighbors for alive cells_y
# up-left
up_left = self.find_node(node["start_x"] - self.cells_size, node["start_y"] - self.cells_size)
if up_left != False:
if up_left["alive"] == True:
neighbor_count += 1
# up
up = self.find_node(node["start_x"], node["start_y"] - self.cells_size)
if up != False:
if up["alive"] == True:
neighbor_count += 1
# up-right
up_right = self.find_node(node["start_x"] + self.cells_size, node["start_y"] - self.cells_size)
if up_right != False:
if up_right["alive"] == True:
neighbor_count += 1
# left
left = self.find_node(node["start_x"] - self.cells_size, node["start_y"])
if left != False:
if left["alive"] == True:
neighbor_count += 1
# right
right = self.find_node(node["start_x"] + self.cells_size, node["start_y"])
if right != False:
if right["alive"] == True:
neighbor_count += 1
# down-left
down_left = self.find_node(node["start_x"] - self.cells_size, node["start_y"] + self.cells_size)
if down_left != False:
if down_left["alive"] == True:
neighbor_count += 1
# down
down = self.find_node(node["start_x"], node["start_y"] + self.cells_size)
if down != False:
if down["alive"] == True:
neighbor_count += 1
# down-right
down_right = self.find_node(node["start_x"] + self.cells_size, node["start_y"] + self.cells_size)
if down_right != False:
if down_right["alive"] == True:
neighbor_count += 1
if node["alive"]:
if neighbor_count < 2:
deads.append(node)
if neighbor_count == 2 or neighbor_count == 3:
alives.append(node)
if neighbor_count > 3:
deads.append(node)
else:
if neighbor_count == 3:
alives.append(node)
for node in self.nodes:
for alive in alives:
if alive == node:
node["alive"] = True
for dead in deads:
if dead == node:
node["alive"] = False
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
pygame.display.set_caption("Life")
self._running = True
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
elif event.type == pygame.MOUSEBUTTONUP:
if self.life_mode == "plant":
start_x = math.floor(self.mouse_x/self.cells_size*self.cells_size)
start_y = math.floor(self.mouse_y/self.cells_size*self.cells_size)
for node in self.nodes:
if node["start_x"] == start_x and node["start_y"] == start_y:
node["alive"] = not node["alive"]
break
elif event.type == pygame.KEYUP:
if event.key == self.plant_key:
self.life_mode = "plant"
elif event.key == self.play_key:
self.life_mode = "play"
elif event.key == pygame.K_MINUS:
if self.life_time_interval > 0.0:
self.life_time_interval -= 0.1
elif event.key == pygame.K_EQUALS:
self.life_time_interval += 0.1
elif event.key == pygame.K_c:
for node in self.nodes:
node["alive"] = False
self.font = pygame.font.SysFont("monospace", 15)
def on_loop(self):
self.mouse_x, self.mouse_y = pygame.mouse.get_pos()
t = pygame.time.get_ticks()
# deltaTime in seconds.
deltaTime = (t - self.getTicksLastFrame) / 1000.0
self.getTicksLastFrame = t
if self.life_mode == "play":
self.life_time_clock += deltaTime
# generation clock
if self.life_time_clock > self.life_time_interval:
self.calc_generation(1)
self.life_time_clock = 0
def on_render(self):
# background color
self._display_surf.fill(self.bg_color)
# draw mouse square
if self.life_mode == "plant":
start_x = math.floor(self.mouse_x/self.cells_size)*self.cells_size
start_y = math.floor(self.mouse_y/self.cells_size)*self.cells_size
end_x = self.cells_size
end_y = self.cells_size
pygame.draw.rect(self._display_surf, self.cells_color_dark, (start_x, start_y, end_x, end_y))
# draw nodes
for node in self.nodes:
if node["alive"]:
pygame.draw.rect(self._display_surf, self.cells_color, (node["start_x"], node["start_y"], self.cells_size, self.cells_size))
# grid lines
for y in xrange(0, self.cells_y-1):
start_x = 0
start_y = y * self.cells_size + self.cells_size
end_x = self.height
end_y = y * self.cells_size + self.cells_size
pygame.draw.line(self._display_surf, self.line_color, (start_x, start_y), (end_x, end_y), 1)
for x in xrange(0, self.cells_x-1):
start_x = x * self.cells_size + self.cells_size
start_y = 0
end_x = x * self.cells_size + self.cells_size
end_y = self.width
pygame.draw.line(self._display_surf, self.line_color, (start_x, start_y), (end_x, end_y), 1)
# print details to screen
speed_label = self.font.render("[-, +] Speed = {} seconds".format(self.life_time_interval), 1, (255, 255, 255))
self._display_surf.blit(speed_label, (20, 20))
mode_label = self.font.render("['p' for Play, 'escape' for 'Plant'] Mode = {}".format(self.life_mode.capitalize()), 1, (255, 255, 255))
self._display_surf.blit(mode_label, (20, 40))
clear_label = self.font.render("['c' to clear]", 1, (255, 255, 255))
self._display_surf.blit(clear_label, (20, 60))
# update display
pygame.display.update()
def on_cleanup(self):
pygame.quit()
def on_execute(self):
if self.on_init() == False:
self._running = False
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
if __name__ == "__main__":
life = Life()
life.on_execute()
```
|
{
"source": "jessehu312/NeighbourNetwork",
"score": 2
}
|
#### File: app/controllers/home.py
```python
from flask import Blueprint, send_from_directory, render_template, request
from firebase_admin import auth
from app.models.user import User
from sqlalchemy import func, distinct
from app.database import db
blueprint = Blueprint('home', __name__)
@blueprint.route('/')
def root():
user = None
try:
id_token = request.cookies.get('id_token')
decoded_token = auth.verify_id_token(id_token)
user = User.query.get(decoded_token['uid'])
except:
pass
# this user is not logged in or session has expired
content = 'null'
if user:
content = user.to_dict()
community_count = 0
try:
community_count = db.session.query(func.count(distinct(User.zip_code)))[0][0]
except:
pass
return render_template('index.html', **{'content':content, 'community_count': community_count})
@blueprint.route('/<path:filepath>')
def serve(filepath):
return send_from_directory('./app/static', filename=filepath)
```
|
{
"source": "jessehub/integrations-core",
"score": 2
}
|
#### File: datadog_checks/vault/vault.py
```python
from time import time as timestamp
import requests
from simplejson import JSONDecodeError
from datadog_checks.checks import AgentCheck
from datadog_checks.config import is_affirmative
from datadog_checks.utils.containers import hash_mutable
from .errors import ApiUnreachable
class Vault(AgentCheck):
CHECK_NAME = 'vault'
DEFAULT_API_VERSION = '1'
EVENT_LEADER_CHANGE = 'vault.leader_change'
SERVICE_CHECK_CONNECT = 'vault.can_connect'
SERVICE_CHECK_UNSEALED = 'vault.unsealed'
SERVICE_CHECK_INITIALIZED = 'vault.initialized'
HTTP_CONFIG_REMAPPER = {
'ssl_verify': {'name': 'tls_verify'},
'ssl_cert': {'name': 'tls_cert'},
'ssl_private_key': {'name': 'tls_private_key'},
'ssl_ca_cert': {'name': 'tls_ca_cert'},
'ssl_ignore_warning': {'name': 'tls_ignore_warning'},
}
def __init__(self, name, init_config, instances):
super(Vault, self).__init__(name, init_config, instances)
self.api_versions = {
'1': {'functions': {'check_leader': self.check_leader_v1, 'check_health': self.check_health_v1}}
}
self.config = {}
if 'client_token' in self.instance:
self.http.options['headers']['X-Vault-Token'] = self.instance['client_token']
def check(self, instance):
config = self.get_config(instance)
if config is None:
return
api = config['api']
tags = list(config['tags'])
# We access the version of the Vault API corresponding to each instance's `api_url`.
try:
api['check_leader'](config, tags)
api['check_health'](config, tags)
except ApiUnreachable:
raise
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.OK, tags=tags)
def check_leader_v1(self, config, tags):
url = config['api_url'] + '/sys/leader'
leader_data = self.access_api(url, tags)
is_leader = is_affirmative(leader_data.get('is_self'))
tags.append('is_leader:{}'.format('true' if is_leader else 'false'))
self.gauge('vault.is_leader', int(is_leader), tags=tags)
current_leader = leader_data.get('leader_address')
previous_leader = config['leader']
if config['detect_leader'] and current_leader:
if previous_leader is not None and current_leader != previous_leader:
self.event(
{
'timestamp': timestamp(),
'event_type': self.EVENT_LEADER_CHANGE,
'msg_title': 'Leader change',
'msg_text': 'Leader changed from `{}` to `{}`.'.format(previous_leader, current_leader),
'alert_type': 'info',
'source_type_name': self.CHECK_NAME,
'host': self.hostname,
'tags': tags,
}
)
config['leader'] = current_leader
def check_health_v1(self, config, tags):
url = config['api_url'] + '/sys/health'
health_params = {'standbyok': True, 'perfstandbyok': True}
health_data = self.access_api(url, tags, params=health_params)
cluster_name = health_data.get('cluster_name')
if cluster_name:
tags.append('cluster_name:{}'.format(cluster_name))
vault_version = health_data.get('version')
if vault_version:
tags.append('vault_version:{}'.format(vault_version))
unsealed = not is_affirmative(health_data.get('sealed'))
if unsealed:
self.service_check(self.SERVICE_CHECK_UNSEALED, AgentCheck.OK, tags=tags)
else:
self.service_check(self.SERVICE_CHECK_UNSEALED, AgentCheck.CRITICAL, tags=tags)
initialized = is_affirmative(health_data.get('initialized'))
if initialized:
self.service_check(self.SERVICE_CHECK_INITIALIZED, AgentCheck.OK, tags=tags)
else:
self.service_check(self.SERVICE_CHECK_INITIALIZED, AgentCheck.CRITICAL, tags=tags)
def get_config(self, instance):
instance_id = hash_mutable(instance)
config = self.config.get(instance_id)
if config is None:
config = {}
try:
api_url = instance['api_url']
api_version = api_url[-1]
if api_version not in self.api_versions:
self.log.warning(
'Unknown Vault API version `{}`, using version '
'`{}`'.format(api_version, self.DEFAULT_API_VERSION)
)
api_url = api_url[:-1] + self.DEFAULT_API_VERSION
api_version = self.DEFAULT_API_VERSION
config['api_url'] = api_url
config['api'] = self.api_versions[api_version]['functions']
except KeyError:
self.log.error('Vault configuration setting `api_url` is required')
return
config['tags'] = instance.get('tags', [])
# Keep track of the previous cluster leader to detect changes.
config['leader'] = None
config['detect_leader'] = is_affirmative(instance.get('detect_leader'))
self.config[instance_id] = config
return config
def access_api(self, url, tags, params=None):
try:
response = self.http.get(url, params=params)
response.raise_for_status()
json_data = response.json()
except requests.exceptions.HTTPError:
msg = 'The Vault endpoint `{}` returned {}.'.format(url, response.status_code)
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.CRITICAL, message=msg, tags=tags)
self.log.exception(msg)
raise ApiUnreachable
except JSONDecodeError:
msg = 'The Vault endpoint `{}` returned invalid json data.'.format(url)
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.CRITICAL, message=msg, tags=tags)
self.log.exception(msg)
raise ApiUnreachable
except requests.exceptions.Timeout:
msg = 'Vault endpoint `{}` timed out after {} seconds'.format(url, self.http.options['timeout'])
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.CRITICAL, message=msg, tags=tags)
self.log.exception(msg)
raise ApiUnreachable
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
msg = 'Error accessing Vault endpoint `{}`'.format(url)
self.service_check(self.SERVICE_CHECK_CONNECT, AgentCheck.CRITICAL, message=msg, tags=tags)
self.log.exception(msg)
raise ApiUnreachable
return json_data
```
|
{
"source": "jessehylton/Podrum",
"score": 2
}
|
#### File: src/podrum/Player.py
```python
from podrum.network.PacketPool import PacketPool
class Player:
connection = None
server = None
logger = None
address = Nome
name = None
locale = None
randomId = None
uuid = None
xuid = None
skin = None
viewDistance = None
gamemode = 0
pitch = 0
yaw = 0
headYaw = 0
onGround = False
platformChatId = ''
deviceOS = None
deviceModel = None
deviceId = Nome
def __init__(self, connection, address, logger, server):
self.connection = connection
self.address = address
self.logger = logger
self.server = server
```
#### File: podrum/utils/Logger.py
```python
from datetime import datetime
from podrum.utils.TextFormat import TextFormat
TextFormat = TextFormat()
class Logger:
def log(type_, content):
time = datetime.now()
if type_ == 'info':
print(f'{TextFormat.BLUE}[INFO: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == 'warn':
print(f'{TextFormat.YELLOW}[WARNING: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == 'error':
print(f'{TextFormat.RED}[ERROR: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == 'success':
print(f'{TextFormat.GREEN}[SUCCESS: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "emergency":
print(f'{TextFormat.GOLD}[EMERGENCY: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "alert":
print(f'{TextFormat.PURPLE}[ALERT: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "notice":
print(f'{TextFormat.AQUA}[NOTICE: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "critical":
print(f'{TextFormat.RED}[CRITICAL: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "debug":
print(f'{TTextFormat.GRAY}[DEBUG: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
else:
print(f'[{type_.upper()}: {time.strftime("%H:%M")}]{content}')
```
#### File: podrum/utils/Utils.py
```python
import base64
import binascii
import json
import os
import signal
import sys
import socket
import time
import urllib
import hmac
import hashlib
class Utils:
def getOS():
if sys.platform == 'linux' or sys.platform == 'linux2':
return 'linux'
elif sys.platform == 'darwin':
return 'osx'
elif sys.platform == 'win32' or sys.platform == 'win64':
return 'windows'
def killServer():
os.kill(os.getpid(), signal.SIGTERM)
def getPrivateIpAddress():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
return ip
def getPublicIpAddress():
ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
return ip
def microtime(get_as_float = False) :
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def substr(string, start, length = None):
if start < 0:
start = start + len(string)
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def hex2bin(hexdec):
if hexdec == 'x':
return False
if hexdec == '':
return False
dec = int(hexdec, 16)
b = binascii.unhexlify('%x' % dec)
return b
def binToHex(b):
return binascii.hexlify(b)
def HMACSHA256(data, secret):
encodedData = data.encode()
byteSecret = secret.encode()
return hmac.new(byteSecret, encodedData, hashlib.sha256).hexdigest().upper()
def base64UrlEncode(data):
return base64.urlsafe_b64encode(data.encode()).replace(b"=", b"").decode()
def base64UrlDecode(data):
return base64.urlsafe_b64decode(data).decode()
def encodeJWT(header, payload, secret):
body = Utils.base64UrlEncode(json.dumps(header)) + "." + Utils.base64UrlEncode(json.dumps(payload))
secret = Utils.HMACSHA256(body, secret)
return body + "." + Utils.base64UrlEncode(secret)
def decodeJWT(token: str):
[headB64, payloadB64, sigB64] = token.split(".")
rawPayloadJSON = Utils.base64UrlDecode(payloadB64)
if rawPayloadJSON == False:
raise Exception("Payload base64 is invalid and cannot be decoded")
decodedPayload = json.loads(rawPayloadJSON)
if isinstance(decodedPayload, str):
decodedPayload = json.loads(decodedPayload)
if not isinstance(decodedPayload, dict):
raise Exception("Decoded payload should be dict, " + str(type(decodedPayload).__name__) + " received")
return decodedPayload
```
#### File: podrum/wizard/Parser.py
```python
import os
from podrum.lang import Base
class Parser:
def checkYesNo(str):
str = str.lower()
if str == 'y' or str == 'yes':
return True
elif str == 'n' or str == 'no':
return False
else:
return
def checkIfLangExists(str):
path = os.getcwd() + '/src/podrum/lang/'
allLangs = Base.Base.getLangNames(path)
if(str in allLangs):
return True
else:
return False
```
#### File: pyraklib/server/ServerHandler.py
```python
from ..Binary import Binary
from ..PyRakLib import PyRakLib
from ..protocol import EncapsulatedPacket
import time
from ..server import PyRakLibServer, ServerInstance
class ServerHandler:
server = None
instance = None
def __init__(self, server: PyRakLibServer, instance: ServerInstance):
self.server = server
self.instance = instance
def sendEncapsulated(self, identifier: str, packet: bytearray, flags: int = PyRakLib.PRIORITY_NORMAL):
buffer = ""
buffer += chr(PyRakLib.PACKET_ENCAPSULATED)
buffer += chr(len(identifier))
buffer += identifier
buffer += chr(flags)
buffer += packet.toBinary(True)
self.server.pushMainToThreadPacket(buffer)
def sendRaw(self, address: str, port: int, payload: bytearray):
buffer = chr(PyRakLib.PACKET_RAW) + chr(len(address)) + address + str(Binary.writeShort(port)) + payload
self.server.pushMainToThreadPacket(buffer)
def closeSession(self, identifier: str, reason: str):
buffer = chr(PyRakLib.PACKET_CLOSE_SESSION) + chr(len(identifier)) + identifier + chr(len(reason)) + reason
self.server.pushMainToThreadPacket(buffer)
def sendOption(self, name: str, value: str):
buffer = chr(PyRakLib.PACKET_SET_OPTION) + chr(len(name)) + name + value
self.server.pushMainToThreadPacket(buffer)
def blockAddress(self, address: str, timeout: int):
buffer = chr(PyRakLib.PACKET_BLOCK_ADDRESS) + chr(len(address)) + address + str(Binary.writeInt(timeout))
self.server.pushMainToThreadPacket(buffer)
def shutdown(self):
self.server.shutdown()
buffer = chr(PyRakLib.PACKET_SHUTDOWN)
self.server.pushMainToThreadPacket(buffer)
time.sleep(50000 / 1000000.0) # Sleep for 1 tick
def emergencyShutdown(self):
self.server.shutdown()
self.server.pushMainToThreadPacket("\x7f") # Emergency Shutdown
def invalidSession(self, identifier):
buffer = chr(PyRakLib.PACKET_INVALID_SESSION) + chr(len(identifier)) + identifier
self.server.pushMainToThreadPacket(buffer)
def handlePacket(self):
packet = self.server.readThreadToMainPacket()
if packet == None:
return
if len(packet) > 0:
id = ord(packet[0])
offset = 1
if id == PyRakLib.PACKET_ENCAPSULATED:
offset += 1
length = ord(packet[offset])
identifier = packet[offset:offset+length]
offset += length + 1
flags = ord(packet[offset])
buffer = packet[offset:]
self.instance.handleEncapsulated(identifier, EncapsulatedPacket.fromBinary(buffer, True), flags)
elif id == PyRakLib.PACKET_RAW:
length = ord(packet[offset])
offset += 1
address = packet[offset:offset+length]
offset += length
port = Binary.readShort(packet[offset:offset+2])
offset += 2
payload = packet[offset:]
self.instance.handleRaw(address, port, payload)
elif id == PyRakLib.PACKET_SET_OPTION:
length = ord(packet[offset])
offset += 1
name = packet[offset:offset+length]
offset += length
value = packet[offset:]
self.instance.handleOption(name, value)
elif id == PyRakLib.PACKET_OPEN_SESSION:
offset += 1
length = ord(packet[offset])
identifier = packet[offset:offset+length]
offset += length + 1
length = ord(packet[offset])
address = packet[offset:offset+length]
offset += len
port = Binary.readShort(packet[offset:offset+2])
offset += 2
clientID = Binary.readLong(packet[offset:offset+8])
self.instance.openSession(identifier, address, port, clientID)
elif id == PyRakLib.PACKET_CLOSE_SESSION:
length = ord(packet[offset])
offset += 1
identifier = packet[offset:offset+length]
offset += length
length = ord(packet[offset])
offset += 1
reason = packet[offset:offset+length]
self.instance.closeSession(identifier, reason)
elif id == PyRakLib.PACKET_INVALID_SESSION:
offset += 1
length = ord(packet[offset])
identifier = packet[offset:offset+length]
self.instance.closeSession(identifier, "Invalid session")
elif id == PyRakLib.PACKET_ACK_NOTIFICATION:
offset += 1
length = ord(packet[offset])
identifier = packet[offset:offset+length]
offset += length
identifierACK = Binary.readInt(packet[offset:offset+4])
self.instance.notifyACK(identifier, identifierACK)
return True
return False
```
|
{
"source": "jesseinit/feather-insure",
"score": 2
}
|
#### File: feather-insure/admin/admin_model.py
```python
from app import admin, db
from uuid import uuid4
from sqlalchemy.dialects.postgresql import UUID
from utils.model_utils import UtilityMixin, PlansView
from sqlalchemy import func
class Plans(UtilityMixin, db.Model): # type: ignore
""" Plans model for storing various insurance plans """
id = db.Column(
UUID(as_uuid=True),
unique=True,
nullable=False,
default=lambda: uuid4().hex,
primary_key=True,
)
plan_name = db.Column(db.String(100), unique=True, index=True, nullable=False)
currency = db.Column(db.String(3), nullable=False)
price = db.Column(db.Float(), nullable=False)
payment_frequency = db.Column(db.String(20), nullable=False)
created_on = db.Column(db.DateTime, server_default=func.now())
def __init__(self, **kwargs):
for field in list(kwargs.keys()):
setattr(self, field, kwargs[field])
def __repr__(self):
return f"<Plans >>> {self.plan_name}>"
admin.add_view(PlansView(Plans, db.session))
```
#### File: migrations/versions/41642e3152e0_add_plans_model_and_user_model.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('plans',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('plan_name', sa.String(length=100), nullable=False),
sa.Column('currency', sa.String(length=3), nullable=False),
sa.Column('price', sa.Float(), nullable=False),
sa.Column('payment_frequency', sa.String(length=20), nullable=False),
sa.Column('created_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('plan_name')
)
op.create_table('user',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('created_on', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
op.drop_table('plans')
# ### end Alembic commands ###
```
#### File: tests/user_entity_tests/test_user_login.py
```python
import json
class TestUserResourceEndpoints:
def test_user_login_successfull(self, client, new_user):
""" Test for a successful user login """
response = client.post(
"/api/v1/user/login",
data=json.dumps(
{
"email": new_user.email,
"password": "<PASSWORD>",
}
),
)
resp = response.get_json()
assert response.status_code == 200
assert resp["message"] == "Login was successfull"
assert "token" in resp["data"].keys()
def test_user_login_unsuccessfull_wrong_password(self, client, new_user):
""" Test for an unsuccessful user login with a bad password """
response = client.post(
"/api/v1/user/login",
data=json.dumps(
{
"email": new_user.email,
"password": "<PASSWORD>",
}
),
)
resp = response.get_json()
assert response.status_code == 401
assert resp["message"] == "Your email or password is not correct"
assert resp["status"] == "failed"
def test_user_login_unsuccessfull(self, client):
""" Test for an unsuccessful user login for a non-existing user """
response = client.post(
"/api/v1/user/login",
data=json.dumps(
{
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
),
)
resp = response.get_json()
assert response.status_code == 401
assert resp["message"] == "Your email or password is not correct"
assert resp["status"] == "failed"
```
#### File: feather-insure/user/user_schema.py
```python
from app import ma
from utils.base_schema import BaseSchema
from marshmallow import fields, validate, pre_dump
from user.user_model import User
class RegisterSchema(BaseSchema):
first_name = fields.Str(
required=True,
validate=validate.Length(
min=2, max=50, error="First name should contain 2 to 50 characters"
),
error_messages={"required": "You've not entered your First Name"},
)
last_name = fields.Str(
required=True,
validate=validate.Length(
min=2, max=50, error="Last name should contain 2 to 50 characters"
),
error_messages={"required": "You've not entered your Last Name"},
)
email = fields.Email(
required=True,
error_messages={
"required": "You've not entered your Email Address",
"invalid": "Please enter a valid email address",
},
)
password = fields.Str(
required=True,
validate=validate.Length(
min=6, max=50, error="Password should contain 6 to 50 characters"
),
error_messages={"required": "You've not entered your password"},
)
@pre_dump
def preprocess(self, data, **kwargs):
data["email"] = data["email"].lower()
data["first_name"] = data["first_name"].title()
data["last_name"] = data["last_name"].title()
return data
class LoginSchema(BaseSchema):
email = fields.Email(
required=True,
error_messages={
"required": "You've not entered your Email Address",
"invalid": "Please enter a valid email address",
},
)
password = fields.Str(
required=True,
validate=validate.Length(
min=6, max=50, error="Password should contain 6 to 50 characters"
),
error_messages={"required": "You've not entered your password"},
)
@pre_dump
def preprocess(self, data, **kwargs):
data["email"] = data["email"].lower()
return data
class UserProfileSchema(ma.SQLAlchemyAutoSchema): # type: ignore
class Meta:
model = User
```
|
{
"source": "Jesse-jApps/pg8000",
"score": 2
}
|
#### File: test/native/test_dbapi.py
```python
import os
import time
import pytest
@pytest.fixture
def has_tzset():
# Neither Windows nor Jython 2.5.3 have a time.tzset() so skip
if hasattr(time, "tzset"):
os.environ["TZ"] = "UTC"
time.tzset()
return True
return False
# DBAPI compatible interface tests
@pytest.fixture
def db_table(con, has_tzset):
con.run("START TRANSACTION")
con.run(
"CREATE TEMPORARY TABLE t1 "
"(f1 int primary key, f2 int not null, f3 varchar(50) null) "
"ON COMMIT DROP"
)
con.run("INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3=None)
con.run("INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=2, v2=10, v3=None)
con.run("INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=3, v2=100, v3=None)
con.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=4, v2=1000, v3=None
)
con.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=5, v2=10000, v3=None
)
return con
def test_named(db_table):
res = db_table.run("SELECT f1, f2, f3 FROM t1 WHERE f1 > :f1", f1=3)
for row in res:
f1, f2, f3 = row
def test_row_count(db_table):
db_table.run("SELECT * FROM t1")
assert 5 == db_table.row_count
db_table.run("UPDATE t1 SET f3 = :v WHERE f2 > 101", v="Hello!")
assert 2 == db_table.row_count
db_table.run("DELETE FROM t1")
assert 5 == db_table.row_count
def test_prepared_statement(con):
con.run("PREPARE gen_series AS SELECT generate_series(1, 10);")
con.run("EXECUTE gen_series")
```
|
{
"source": "JesseJMa/data-structure-and-algorithm",
"score": 4
}
|
#### File: data-structure/Heap/Heap.py
```python
class Heap:
def __init__(self, capacity: int):
self._capacity = capacity # allocate capacity for heap
self._data = [0] * (capacity + 1)
self._count = 0
@classmethod
def _parent(cls, child_index: int) -> int:
return child_index // 2
@classmethod
def _left(cls, parent_index: int) -> int:
"""The left child index."""
return parent_index * 2 - 1
@classmethod
def _right(cls, parent_index: int) -> int:
return parent_index * 2 + 1
def _shiftUp(self) -> None:
i, parent = self._count, Heap._parent(self._count)
while parent and self._data[parent] < self._data[i]:
tmp = self._data[parent]
self._data[parent] = self._data[i]
self._data[i] = tmp
i = parent
parent = Heap._parent(parent)
def _insert(self, value: int) -> None:
if self._count > self._capacity: return
self._count += 1
self._data[self._count] = value
self._shiftUp()
```
#### File: data-structure-and-algorithm/Heap/PriorityQueue.py
```python
import math
class QueueNode:
def __init__(self, priority, data=None):
assert type(priority) is int and priority >= 0
self.data = data
self.priority = priority
class PriorityQueue:
def __init__(self, capacity=100):
self._q = []
self._capacity = capacity
self._length = 0
def enqueue(self, data, priority):
if self._length > self._capacity:
return False
newNode = QueueNode(priority, data)
self._q.append(newNode)
self._length += 1
nn = self._length - 1
while nn > 0:
lp = nn // 2 # the last Parent node index
if self._q[nn].priority > self._q[lp].priority:
self._q[nn], self._q[lp] = self._q[lp], self._q[nn]
nn = lp
else:
break
return True
```
#### File: data-structure-and-algorithm/leetcode/0001_two-sum.py
```python
def twoSum(nums, target):
hashed = {}
for idx, ele in enumerate(nums):
if ele in hashed:
return [hashed[ele], idx]
hashed[target - ele] = idx
print(twoSum([2,1,3, 7], 9))
```
#### File: sorts/quickSort/insertion_sort.py
```python
def insertion_sort(arr, l, r):
for i in range(l + 1, r + 1):
temp = arr[i]
index = i
while index > 0 and arr[index - 1] > temp:
arr[index] = arr[index - 1]
index -= 1
arr[index] = temp
# arr = [2,4, 6, 8]
# insertion_sort(arr, 0, 2)
# print(arr)
```
#### File: sorts/quickSort/quickSort.py
```python
import random
def qsort(arr):
if len(arr) < 2:
return arr
pivot_element = random.choice(arr)
small = [i for i in arr if i < pivot_element]
medium = [i for i in arr if i == pivot_element]
big = [i for i in arr if i > pivot_element]
return qsort(small) + medium + qsort(big)
if __name__ == '__main__':
arr = [3,5,6,1,8,7,7,2,0,1]
print(qsort(arr))
```
|
{
"source": "jessejohn01/CSCI446PA2",
"score": 3
}
|
#### File: jessejohn01/CSCI446PA2/rdt_3_0.py
```python
import network_3_0
import argparse
import time
from time import sleep
import hashlib
class Packet:
## the number of bytes used to store packet length
seq_num_S_length = 10
length_S_length = 10
## length of md5 checksum in hex
checksum_length = 32
def __init__(self, seq_num, msg_S):
self.seq_num = seq_num
self.msg_S = msg_S
@classmethod
def from_byte_S(self, byte_S):
if Packet.corrupt(byte_S):
raise RuntimeError('Cannot initialize Packet: byte_S is corrupt')
#extract the fields
seq_num = int(byte_S[Packet.length_S_length : Packet.length_S_length+Packet.seq_num_S_length])
msg_S = byte_S[Packet.length_S_length+Packet.seq_num_S_length+Packet.checksum_length :]
return self(seq_num, msg_S)
def get_byte_S(self):
#convert sequence number of a byte field of seq_num_S_length bytes
seq_num_S = str(self.seq_num).zfill(self.seq_num_S_length)
#convert length to a byte field of length_S_length bytes
length_S = str(self.length_S_length + len(seq_num_S) + self.checksum_length + len(self.msg_S)).zfill(self.length_S_length)
#compute the checksum
checksum = hashlib.md5((length_S+seq_num_S+self.msg_S).encode('utf-8'))
checksum_S = checksum.hexdigest()
#compile into a string
return length_S + seq_num_S + checksum_S + self.msg_S
@staticmethod
def corrupt(byte_S):
#extract the fields
length_S = byte_S[0:Packet.length_S_length]
seq_num_S = byte_S[Packet.length_S_length : Packet.seq_num_S_length+Packet.seq_num_S_length]
checksum_S = byte_S[Packet.seq_num_S_length+Packet.seq_num_S_length : Packet.seq_num_S_length+Packet.length_S_length+Packet.checksum_length]
msg_S = byte_S[Packet.seq_num_S_length+Packet.seq_num_S_length+Packet.checksum_length :]
#compute the checksum locally
checksum = hashlib.md5(str(length_S+seq_num_S+msg_S).encode('utf-8'))
computed_checksum_S = checksum.hexdigest()
#and check if the same
return checksum_S != computed_checksum_S
class RDT:
## latest sequence number used in a packet
seq_num = 1
## buffer of bytes read from network
byte_buffer = ''
def __init__(self, role_S, server_S, port):
self.network = network_3_0.NetworkLayer(role_S, server_S, port)
def disconnect(self):
self.network.disconnect()
def rdt_1_0_send(self, msg_S):
p = Packet(self.seq_num, msg_S)
self.seq_num += 1
self.network.udt_send(p.get_byte_S())
def rdt_1_0_receive(self):
ret_S = None
byte_S = self.network.udt_receive()
self.byte_buffer += byte_S
#keep extracting packets - if reordered, could get more than one
while True:
#check if we have received enough bytes
if(len(self.byte_buffer) < Packet.length_S_length):
return ret_S #not enough bytes to read packet length
#extract length of packet
length = int(self.byte_buffer[:Packet.length_S_length])
if len(self.byte_buffer) < length:
return ret_S #not enough bytes to read the whole packet
#create packet from buffer content and add to return string
p = Packet.from_byte_S(self.byte_buffer[0:length])
ret_S = p.msg_S if (ret_S is None) else ret_S + p.msg_S
#remove the packet bytes from the buffer
self.byte_buffer = self.byte_buffer[length:]
#if this was the last packet, will return on the next iteration
def waitForACK(self,p): #Wait for an ACK Packet. Basically listening for a packet.
byte_S = self.network.udt_receive()
self.byte_buffer += byte_S
while True: #Keep grabbing bytes.
byte_S = self.network.udt_receive()
self.byte_buffer += byte_S
if(len(self.byte_buffer) >= Packet.length_S_length): # Check to make sure we have enough bytes for a packet.
length = int(self.byte_buffer[:Packet.length_S_length])
if(len(self.byte_buffer) >= length): # Check our bytes are the right length
if(Packet.corrupt(self.byte_buffer[0:length])): #Check for corruption
self.byte_buffer = self.byte_buffer[length:]
self.network.udt_send(p.get_byte_S()) #If not resend.
else:
receivedPacket = Packet.from_byte_S(self.byte_buffer[0:length])
self.byte_buffer = self.byte_buffer[length:]
if(receivedPacket.msg_S == 'ACK' and receivedPacket.seq_num >= self.seq_num):#Check if ACK packet.
self.seq_num = self.seq_num + 1
return
else:
self.network.udt_send(p.get_byte_S())
def waitForMore(self, ack): #Method for making sure there is no resends. Wait for .1 seconds
end = time.time() + .1
byte_buffer2 = ''
while (time.time() < end):
isDuplicate = False
bytes2 = self.network.udt_receive()
byte_buffer2 += bytes2
if (len(byte_buffer2) < Packet.length_S_length): #restarts if not enough bytes
continue #restart loop
length = int(byte_buffer2[:Packet.length_S_length])
if (len(byte_buffer2) < length): #Restart if not matching length
continue #restart
if (Packet.corrupt(byte_buffer2[0:length])): #Is the packet corrupt?
nack = Packet(self.seq_num, 'NACK') #Create NACK packet.
self.network.udt_send(nack.get_byte_S()) #Send
byte_buffer2 = '' #Empty the buffer.
if (isDuplicate): #Checks for duplicates and adds more time
end = end + .1
continue
else: # Time expired
p2 = Packet.from_byte_S(byte_buffer2[0:length])
if (p2.seq_num == self.seq_num - 1): #Check if it was a different packet.
isDuplicate = True
end = end + .1
self.network.udt_send(ack.get_byte_S()) #We don't have to wait anymore send ACK.
byte_buffer2 = ''
else:
nack = Packet(self.seq_num, 'NACK')
self.network.udt_send(nack.get_byte_S())
break
def rdt_3_0_send(self, msg_S):
p = Packet(self.seq_num, msg_S)
self.network.udt_send(p.get_byte_S())
byte_S = self.network.udt_receive()
self.byte_buffer += byte_S
while True: # Keep checking for packets.
timeoutTime = time.time() + .05 #Timer for lost packets.
while(time.time() < timeoutTime):
byte_S = self.network.udt_receive()
self.byte_buffer += byte_S
if(len(self.byte_buffer) >= Packet.length_S_length):
length = int(self.byte_buffer[:Packet.length_S_length])
if(len(self.byte_buffer) >= length): #Make sure packet is right length.
if(Packet.corrupt(self.byte_buffer[0:length])): #Check to make sure not corrupt.
self.byte_buffer = self.byte_buffer[length:]
break
else:
receivedPacket = Packet.from_byte_S(self.byte_buffer[0:length])
self.byte_buffer = self.byte_buffer[length:]
if(receivedPacket.msg_S == 'ACK' and receivedPacket.seq_num >= self.seq_num): #Check if right ACK packet for one we sent.
self.seq_num = self.seq_num + 1
self.byte_buffer = self.byte_buffer[length:]
return
else:
break #Break out the timer somethings wrong.
self.network.udt_send(p.get_byte_S())
def rdt_3_0_receive(self):
ret_S = None
byte_S = self.network.udt_receive()
self.byte_buffer += byte_S
while True: #Keep checking for packets.
if (len(self.byte_buffer) < Packet.length_S_length): #Is packet right length
return ret_S
length = int(self.byte_buffer[:Packet.length_S_length])
if (len(self.byte_buffer) < length):
return ret_S
if(Packet.corrupt(self.byte_buffer[0:length])): # Check for corrupt packets.
nack = Packet(self.seq_num, 'NACK')
self.network.udt_send(nack.get_byte_S())
self.byte_buffer = self.byte_buffer[length:]
else:
p = Packet.from_byte_S(self.byte_buffer[0:length])
if (p.seq_num <= self.seq_num): #Is packet right sequence number.
ret_S = p.msg_S if (ret_S is None) else ret_S + p.msg_S
self.seq_num = self.seq_num + 1
ack = Packet(p.seq_num, 'ACK')
self.network.udt_send(ack.get_byte_S())
end = time.time() + .2
byte_buffer2 = ''
while(time.time() < end):
isDuplicate = False
bytes2 = self.network.udt_receive()
byte_buffer2 += bytes2
try:
if (len(byte_buffer2) < Packet.length_S_length):
continue
except ValueError:
continue
length = int(byte_buffer2[:Packet.length_S_length])
if (len(byte_buffer2) < length):
continue
if(Packet.corrupt(byte_buffer2[0:length])):
nack = Packet(self.seq_num, 'NACK')
self.network.udt_send(nack.get_byte_S())
byte_buffer2 = ''
if(isDuplicate):
end = end + .2
continue
else:
p2 = Packet.from_byte_S(byte_buffer2[0:length])
if (p2.seq_num <= self.seq_num-1):
isDuplicate = True
end = end + .2
ack1 = Packet(p2.seq_num, 'ACK')
self.network.udt_send(ack1.get_byte_S())
byte_buffer2 = ''
else:
nack = Packet(self.seq_num, 'NACK')
self.network.udt_send(nack.get_byte_S())
break
else:
nack = Packet(self.seq_num, 'NACK')
self.network.udt_send(nack.get_byte_S())
self.byte_buffer = self.byte_buffer[length:]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RDT implementation.')
parser.add_argument('role', help='Role is either client or server.', choices=['client', 'server'])
parser.add_argument('server', help='Server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
rdt = RDT(args.role, args.server, args.port)
if args.role == 'client':
rdt.rdt_1_0_send('MSG_FROM_CLIENT')
sleep(2)
print(rdt.rdt_1_0_receive())
rdt.disconnect()
else:
sleep(1)
print(rdt.rdt_1_0_receive())
rdt.rdt_1_0_send('MSG_FROM_SERVER')
rdt.disconnect()
```
|
{
"source": "jessekl/twiliochallenge",
"score": 3
}
|
#### File: modules/admin/forms.py
```python
from flask.ext.wtf import Form
from wtforms import (HiddenField, SubmitField, RadioField, FileField, DateField)
from wtforms.validators import AnyOf
from fbone.extensions import db
from fbone.modules.user import USER_ROLE, USER_STATUS
class UserForm(Form):
next = HiddenField()
role_code = RadioField(u"Role", [AnyOf([str(val) for val in USER_ROLE.keys()])],
choices=[(str(val), label) for val, label in USER_ROLE.items()])
status_code = RadioField(u"Status", [AnyOf([str(val) for val in USER_STATUS.keys()])],
choices=[(str(val), label) for val, label in USER_STATUS.items()])
# A demo of datepicker.
created_time = DateField(u'Created time')
submit = SubmitField(u'Save')
def save(self, user):
self.populate_obj(user)
db.session.add(user)
db.session.commit()
class EditTranslationForm(Form):
multipart = True
file = FileField(u"Upload Translation File")
language = HiddenField()
submit = SubmitField(u'Save')
class UploadLogoForm(Form):
multipart = True
file = FileField(u"Upload Logo File")
submit = SubmitField(u'Save')
```
#### File: modules/movies/models.py
```python
from sqlalchemy import Column, func
from fbone.modules.base import Base
from fbone.extensions import db
from fbone.utils import get_current_time, STRING_LEN
class Movie(Base):
name = Column(db.String(STRING_LEN))
release_date = Column(db.String(STRING_LEN))
poster_url = Column(db.String(STRING_LEN))
def to_dict(self):
return {
'id' : self.id,
'name' : self.name,
'release_date': self.release_date,
'poster_url': self.poster_url
}
```
#### File: modules/settings/views.py
```python
from flask import Blueprint, render_template, request, flash
from flask.ext.login import login_required, current_user
from fbone.modules.user import User
from .forms import ProfileForm, PasswordForm
settings = Blueprint('settings', __name__, url_prefix='/settings')
@settings.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
user = User.query.filter_by(name=current_user.name).first_or_404()
form = ProfileForm(obj=user.user_detail,
email=current_user.email,
role_code=current_user.role_code,
status_code=current_user.status_code,
next=request.args.get('next'))
if form.validate_on_submit():
form.create_profile(request, user)
flash('Public profile updated.', 'success')
return render_template('settings/profile.html', user=user,
active="profile", form=form)
@settings.route('/password', methods=['GET', 'POST'])
@login_required
def password():
user = User.query.filter_by(name=current_user.name).first_or_404()
form = PasswordForm(next=request.args.get('next'))
if form.validate_on_submit():
form.update_password(user)
flash('Password updated.', 'success')
return render_template('settings/password.html', user=user,
active="password", form=form)
```
#### File: jessekl/twiliochallenge/manage.py
```python
import os
from flask.ext.script import Manager
from flask.ext.migrate import MigrateCommand
from fbone import create_app
from fbone.extensions import db
from fbone.utils import PROJECT_PATH, MALE
from fbone.modules.user import User, ADMIN, ACTIVE
from fbone.modules.movies import Movie
from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand
app = create_app()
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config', required=False)
manager.add_command('create_user', CreateUserCommand())
manager.add_command('delete_user', DeleteUserCommand())
manager.add_command('list_users', ListUsersCommand())
manager.add_command('db', MigrateCommand)
@manager.command
def initdb():
"""Init/reset database."""
db.drop_all()
db.create_all()
admin = User(
name=u'admin',
fullname=u'<NAME>',
email=u'<EMAIL>',
password=u'<PASSWORD>',
role_code=ADMIN,
status_code=ACTIVE,
gender_code=MALE,
bio=u'FSU Grad. Go Noles!')
db.session.add(admin)
db.session.commit()
@manager.command
def tests():
"""Run the tests."""
import pytest
exit_code = pytest.main([os.path.join(PROJECT_PATH, 'tests'), '--verbose'])
return exit_code
if __name__ == "__main__":
manager.run()
```
#### File: twiliochallenge/tests/test_config.py
```python
from fbone.factory import create_app
from fbone.config import TestConfig
def test_default_config():
app = create_app()
assert app.config['DEBUG'] is True
assert app.config['TESTING'] is False
def test_test_config():
app = create_app()
app.config.from_object(TestConfig)
assert app.config['TESTING'] is True
```
#### File: twiliochallenge/tests/test_models.py
```python
import pytest
from fbone.modules.user import User, USER
from .factories import UserFactory
@pytest.mark.usefixtures('session')
class TestUser:
def test_get_by_id(self):
user = User(name='bar', email='<EMAIL>', fullname='bar')
User().save(user)
retrieved = User().get_by_id(user.id)
assert retrieved == user
# def test_created_at_defaults_to_datetime(self):
# user = User(name='qux', email='<EMAIL>')
def test_password_is_nullable(self):
user = User(name='zap', email='<EMAIL>')
assert user.password is None
def test_factory(self, db):
user = UserFactory(password="<PASSWORD>")
db.session.commit()
assert bool(user.name)
assert bool(user.email)
assert user.is_admin() is False
assert user.check_password('<PASSWORD>')
# def test_check_password(self):
# user = User.create(username="foo", email="<EMAIL>",
# password="<PASSWORD>")
# assert user.check_password('<PASSWORD>') is True
# assert user.check_password("<PASSWORD>") is False
# def test_full_name(self):
# user = UserFactory(first_name="Foo", last_name="Bar")
# assert user.full_name == "Foo Bar"
def test_roles(self):
u = User(name='qux', email='<EMAIL>', fullname='qux')
User().save(u)
assert u.role_code == USER
```
#### File: alembic/testing/env.py
```python
import os
import shutil
import textwrap
from alembic.compat import u
from alembic.script import Script, ScriptDirectory
from alembic import util
from . import engines
from . import provision
def _get_staging_directory():
if provision.FOLLOWER_IDENT:
return "scratch_%s" % provision.FOLLOWER_IDENT
else:
return 'scratch'
def staging_env(create=True, template="generic", sourceless=False):
from alembic import command, script
cfg = _testing_config()
if create:
path = os.path.join(_get_staging_directory(), 'scripts')
if os.path.exists(path):
shutil.rmtree(path)
command.init(cfg, path)
if sourceless:
try:
# do an import so that a .pyc/.pyo is generated.
util.load_python_file(path, 'env.py')
except AttributeError:
# we don't have the migration context set up yet
# so running the .env py throws this exception.
# theoretically we could be using py_compiler here to
# generate .pyc/.pyo without importing but not really
# worth it.
pass
make_sourceless(os.path.join(path, "env.py"))
sc = script.ScriptDirectory.from_config(cfg)
return sc
def clear_staging_env():
shutil.rmtree(_get_staging_directory(), True)
def script_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
path = os.path.join(dir_, "script.py.mako")
with open(path, 'w') as f:
f.write(txt)
def env_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
txt = """
from alembic import context
config = context.config
""" + txt
path = os.path.join(dir_, "env.py")
pyc_path = util.pyc_file_from_path(path)
if os.access(pyc_path, os.F_OK):
os.unlink(pyc_path)
with open(path, 'w') as f:
f.write(txt)
def _sqlite_file_db():
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return engines.testing_engine(url=url)
def _sqlite_testing_config(sourceless=False):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false"))
def _multi_dir_testing_config(sourceless=False):
dir_ = os.path.join(_get_staging_directory(), 'scripts')
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, url, "true" if sourceless else "false"))
def _no_sql_testing_config(dialect="postgresql", directives=""):
"""use a postgresql url with no host so that
connections guaranteed to fail"""
dir_ = os.path.join(_get_staging_directory(), 'scripts')
return _write_config_file("""
[alembic]
script_location = %s
sqlalchemy.url = %s://
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
""" % (dir_, dialect, directives))
def _write_config_file(text):
cfg = _testing_config()
with open(cfg.config_file_name, 'w') as f:
f.write(text)
return cfg
def _testing_config():
from alembic.config import Config
if not os.access(_get_staging_directory(), os.F_OK):
os.mkdir(_get_staging_directory())
return Config(os.path.join(_get_staging_directory(), 'test_alembic.ini'))
def write_script(
scriptdir, rev_id, content, encoding='ascii', sourceless=False):
old = scriptdir.revision_map.get_revision(rev_id)
path = old.path
content = textwrap.dedent(content)
if encoding:
content = content.encode(encoding)
with open(path, 'wb') as fp:
fp.write(content)
pyc_path = util.pyc_file_from_path(path)
if os.access(pyc_path, os.F_OK):
os.unlink(pyc_path)
script = Script._from_path(scriptdir, path)
old = scriptdir.revision_map.get_revision(script.revision)
if old.down_revision != script.down_revision:
raise Exception("Can't change down_revision "
"on a refresh operation.")
scriptdir.revision_map.add_revision(script, _replace=True)
if sourceless:
make_sourceless(path)
def make_sourceless(path):
# note that if -O is set, you'd see pyo files here,
# the pyc util function looks at sys.flags.optimize to handle this
pyc_path = util.pyc_file_from_path(path)
assert os.access(pyc_path, os.F_OK)
# look for a non-pep3147 path here.
# if not present, need to copy from __pycache__
simple_pyc_path = util.simple_pyc_file_from_path(path)
if not os.access(simple_pyc_path, os.F_OK):
shutil.copyfile(pyc_path, simple_pyc_path)
os.unlink(path)
def three_rev_fixture(cfg):
a = util.rev_id()
b = util.rev_id()
c = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(a, "revision a", refresh=True)
write_script(script, a, """\
"Rev A"
revision = '%s'
down_revision = None
from alembic import op
def upgrade():
op.execute("CREATE STEP 1")
def downgrade():
op.execute("DROP STEP 1")
""" % a)
script.generate_revision(b, "revision b", refresh=True)
write_script(script, b, u("""# coding: utf-8
"Rev B, méil"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 2")
def downgrade():
op.execute("DROP STEP 2")
""") % (b, a), encoding="utf-8")
script.generate_revision(c, "revision c", refresh=True)
write_script(script, c, """\
"Rev C"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 3")
def downgrade():
op.execute("DROP STEP 3")
""" % (c, b))
return a, b, c
```
#### File: alembic/testing/requirements.py
```python
from alembic import util
from . import exclusions
if util.sqla_094:
from sqlalchemy.testing.requirements import Requirements
else:
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.open()
@property
def unique_constraint_reflection(self):
return exclusions.skip_if(
lambda config: not util.sqla_084,
"SQLAlchemy 0.8.4 or greater required"
)
@property
def foreign_key_match(self):
return exclusions.fails_if(
lambda config: not util.sqla_08,
"MATCH for foreign keys added in SQLAlchemy 0.8.0"
)
@property
def fail_before_sqla_079(self):
return exclusions.fails_if(
lambda config: not util.sqla_079,
"SQLAlchemy 0.7.9 or greater required"
)
@property
def fail_before_sqla_080(self):
return exclusions.fails_if(
lambda config: not util.sqla_08,
"SQLAlchemy 0.8.0 or greater required"
)
@property
def fail_before_sqla_083(self):
return exclusions.fails_if(
lambda config: not util.sqla_083,
"SQLAlchemy 0.8.3 or greater required"
)
@property
def fail_before_sqla_084(self):
return exclusions.fails_if(
lambda config: not util.sqla_084,
"SQLAlchemy 0.8.4 or greater required"
)
@property
def fail_before_sqla_09(self):
return exclusions.fails_if(
lambda config: not util.sqla_09,
"SQLAlchemy 0.9.0 or greater required"
)
@property
def fail_before_sqla_099(self):
return exclusions.fails_if(
lambda config: not util.sqla_099,
"SQLAlchemy 0.9.9 or greater required"
)
@property
def sqlalchemy_08(self):
return exclusions.skip_if(
lambda config: not util.sqla_08,
"SQLAlchemy 0.8.0b2 or greater required"
)
@property
def sqlalchemy_09(self):
return exclusions.skip_if(
lambda config: not util.sqla_09,
"SQLAlchemy 0.9.0 or greater required"
)
@property
def sqlalchemy_092(self):
return exclusions.skip_if(
lambda config: not util.sqla_092,
"SQLAlchemy 0.9.2 or greater required"
)
@property
def sqlalchemy_094(self):
return exclusions.skip_if(
lambda config: not util.sqla_094,
"SQLAlchemy 0.9.4 or greater required"
)
```
#### File: site-packages/flasky/flasky.py
```python
from flask import Flask
import inspect
class Flasky(object):
"""
Flasky - Lazy man's Flask Application
Convert your class into a flask app.
"""
def __init__(self, rule=None):
self.app = Flask(__name__)
if rule:
self.rule = rule
else:
self.rule = {
'ignore': ['__init__'],
'map': {'index': '/'},
}
for name, func in inspect.getmembers(self, inspect.ismethod):
if name in self.rule['ignore']:
continue
elif name in self.rule['map']:
self.app.add_url_rule(self.rule['map'][name], view_func=func)
continue
url_prefix = [''] + name.split('_')
args = inspect.getargspec(func).args
for x in xrange(len(args), 0, -1):
url = '/'.join(url_prefix + ['<%s>' % _ for _ in args[1:x]])
self.app.add_url_rule(url, view_func=func)
if not func.func_defaults:
break
elif x <= len(args) - len(func.func_defaults):
break
```
#### File: flasky/tests/flasky_test.py
```python
from flasky import Flasky
import unittest
class MyGorgeousWebSpace(Flasky):
def __init__(self):
rule = {
"ignore": ["__init__", "private"],
"map": {"index": "/"},
}
super(MyGorgeousWebSpace, self).__init__(rule)
def index(self):
return "Hello, lazy man :p"
def private(self):
return "Hey!"
def status(self):
return "I'm ok."
def status_detail(self):
return "I'm ok, seriously. (sigh)"
def author(self, name):
return "Yeah, I heard about %s, a little bit." % name
def post(self, slug=None):
if slug:
return "I didn't have chance to write about %s." % slug
else:
return "What did you expect from me? Ummmm... like a list of blog posts?"
class FlaskyTestCase(unittest.TestCase):
def setUp(self):
self.app = MyGorgeousWebSpace().app.test_client()
def test_mapped(self):
rv = self.app.get('/')
assert rv.data == "Hello, lazy man :p"
def test_ignore(self):
rv = self.app.get('/private')
assert rv.status_code == 404
def test_simple(self):
rv = self.app.get('/status')
assert rv.data == "I'm ok."
def test_autoslash(self):
rv = self.app.get('/status/detail')
assert rv.data == "I'm ok, seriously. (sigh)"
def test_parameter_101(self):
rv = self.app.get('/author/lqez')
assert rv.data == "Yeah, I heard about lqez, a little bit."
def test_parameter_102(self):
rv = self.app.get('/author')
assert rv.status_code == 404
def test_parameter_103(self):
rv = self.app.get('/author/lqez/idiot')
assert rv.status_code == 404
def test_parameter_201(self):
rv = self.app.get('/post')
assert rv.data == "What did you expect from me? Ummmm... like a list of blog posts?"
def test_parameter_202(self):
rv = self.app.get('/post/django')
assert rv.data == "I didn't have chance to write about django."
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jessekrubin/lager",
"score": 3
}
|
#### File: src/lager/core.py
```python
import asyncio
from functools import wraps
from time import time
from typing import Union
from loguru import logger
from lager.const import LOG_LEVELS
__all__ = ['loglevel', 'flog', 'handlers', 'logger', 'log', 'LOG', 'ln', 'LN']
logger.t = logger.trace
logger.d = logger.debug
logger.i = logger.info
logger.s = logger.success
logger.w = logger.warning
logger.e = logger.error
logger.c = logger.critical
# commonly used dgpy aliases
log = logger
LOG = logger
# ln => natural log
ln = logger
LN = logger
def loglevel(level: Union[str, int]) -> str:
"""Convert log-level abrev to a valid loguru log level"""
return LOG_LEVELS[str(level).strip("'").strip('"').lower()]
def flog(funk=None, level="debug", enter=True, exit=True):
"""Log function (sync/async) enter and exit using this decorator
Args:
funk (Callable): Function to decorate
level (Union[int, str]): Log level
enter (bool): Log function entry if True
exit (bool): Log function exit if False
Returns:
A wrapped function that now has logging!
Usage:
# SYNC
@flog
def add(a, b):
return a + b
add(1, 4)
# ASYNC
@flog
async def add_async(a, b):
return a + b
import asyncio
asyncio.run(add_async(1, 4))
"""
def _flog(funk):
name = funk.__name__
@wraps(funk)
def _flog_decorator(*args, **kwargs):
logger_ = logger.opt(depth=1)
if enter:
logger_.log(
loglevel(level),
"FLOG-ENTER > '{}' (args={}, kwargs={})",
name,
args,
kwargs,
)
ti = time()
result = funk(*args, **kwargs)
tf = time()
if exit:
logger_.log(
loglevel(level),
"FLOG-EXIT < '{}' (return={}, dt_sec={})",
name,
result,
tf - ti,
)
return result
@wraps(funk)
async def _flog_decorator_async(*args, **kwargs):
logger_ = logger.opt(depth=7)
if enter:
logger_.log(
loglevel(level),
"FLOG-ENTER > '{}' (args={}, kwargs={})",
name,
args,
kwargs,
)
ti = time()
result = await funk(*args, **kwargs)
tf = time()
if exit:
logger_.log(
loglevel(level),
"FLOG-EXIT < '{}' (return={}, dt_sec={})",
name,
result,
tf - ti,
)
return result
if asyncio.iscoroutinefunction(funk) or asyncio.iscoroutine(funk):
return _flog_decorator_async
return _flog_decorator
return _flog(funk) if funk else _flog
def handlers():
"""Return all handlers"""
return logger._core.handlers
```
#### File: lager/tests/test_version.py
```python
from os import path
from lager import __version__
PWD = path.split(path.realpath(__file__))[0]
def _get_version() -> str:
_dirpath = PWD
version = "UNKNOWN???"
for i in range(3):
_filepath = path.join(_dirpath, "pyproject.toml")
if path.exists(_filepath):
version = (
[l for l in open(_filepath).read().split("\n") if "version" in l][0]
.replace("version = ", "")
.strip('"')
)
return version
_dirpath = path.split(_dirpath)[0]
return version
def test_version() -> None:
pyproject_version: str = _get_version()
assert __version__ == pyproject_version
```
|
{
"source": "jessekrubin/pup",
"score": 4
}
|
#### File: pup/pupy/cheese.py
```python
def string_score(strang: str) -> int:
"""Sum of letter values where a==1 and z == 26
:param strang: string to be scored
:type strang: str
:returns: -> score of the string
:rtype: int
.. doctest:: python
>>> string_score('me')
18
>>> string_score('poooood')
95
>>> string_score('gregory')
95
"""
return sum((ord(character) - 96 for character in strang.lower()))
def is_palindrome(string: str) -> bool:
"""True a string is a palindrome; False if string is not a palindrome.
:param string:
.. doctest::python
>>> is_palindrome("racecar")
True
>>> is_palindrome("greg")
False
"""
return all(
character == string[-index - 1] for index, character in enumerate(string)
)
if __name__ == "__main__":
from doctest import testmod
testmod()
```
#### File: pup/pupy/fmt.py
```python
from binascii import hexlify
from math import ceil
from os import path
from os import stat
from os import urandom
from re import compile as _compile
from re import sub
from shutil import get_terminal_size
from string import printable
from typing import Any
from typing import Iterator
from typing import List
from typing import Optional
from pupy._typing import Flint
def nbytes(num: Flint) -> str:
"""
this function will convert bytes to MB.... GB... etc
.. doctest:: python
>>> nbytes(100)
'100.0 bytes'
>>> nbytes(1000)
'1000.0 bytes'
>>> nbytes(10000)
'9.8 KB'
>>> nbytes(100000)
'97.7 KB'
>>> nbytes(1000000)
'976.6 KB'
>>> nbytes(10000000)
'9.5 MB'
>>> nbytes(100000000)
'95.4 MB'
>>> nbytes(1000000000)
'953.7 MB'
>>> nbytes(10000000000)
'9.3 GB'
>>> nbytes(100000000000)
'93.1 GB'
>>> nbytes(1000000000000)
'931.3 GB'
>>> nbytes(10000000000000)
'9.1 TB'
>>> nbytes(100000000000000)
'90.9 TB'
"""
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def filesize(filepath: str) -> str:
"""this function will return the file size
:param filepath:
:return:
"""
if path.isfile(filepath):
file_info = stat(filepath)
return nbytes(file_info.st_size)
def nseconds(t1: float, t2: Optional[float] = None) -> str:
"""Formats time string
Formats t1 if t2 is None as a string; Calculates the time and formats
the time t2-t1 if t2 is not None.
:param t1: time 1/initial in seconds
:type t1: double
:param t2: time 2 (Default value = None)
:type t2: None or double
:returns: formated string of the t1 - t2 or t1
:rtype: str
"""
if t2 is not None:
return nseconds((t2 - t1))
elif t1 == 0.0:
return "0 sec"
elif 0.000001 > t1 >= 0.000000001:
return "%.3f ns" % ((10 ** 9) * t1)
elif 0.001 > t1 >= 0.000001:
return "%.3f μs" % ((10 ** 6) * t1)
elif 1 > t1 >= 0.001:
return "%.3f ms" % ((10 ** 3) * t1)
return "%.3f sec" % t1
def term_table(
strings: List[str], row_wise: bool = False, filler: str = "~"
) -> Iterator[Any]:
"""
:param strings:
:param row_wise:
:param filler:
:return:
"""
max_str_len = max(len(str) for str in strings) + 5
terminal_cols = get_terminal_size((80, 20)).columns
n_cols = terminal_cols // max_str_len
n_rows = int(ceil(len(strings) / n_cols))
spaces = " " * ((terminal_cols - (max_str_len * n_cols)) // n_cols)
size_string = "{:<" + str(max_str_len) + "}" + spaces
fmtstring = size_string * (n_cols - 1) + "{:<}"
strings.extend(filler for _ in range(n_rows * n_cols - len(strings)))
if row_wise:
line_iter = zip(*(strings[i::n_cols] for i in range(n_cols)))
else:
line_iter = (strings[i::n_rows] for i in range(n_rows))
return (fmtstring.format(*row) for row in line_iter)
def bytes2str(bites: bytes, encoding: str = "utf-8") -> str:
"""Convert bytes to a string
:param bites: bytes
:type bites: bytes
:param encoding: encoding of the string (default is utf-8)
:type encoding: str
:return: converted bytes
:rtype: str
.. doctest:: python
>>> a = b'abcdefg'
>>> type(a)
<class 'bytes'>
>>> bytes2str(a)
'abcdefg'
>>> type(bytes2str(a))
<class 'str'>
"""
return bites.decode(encoding)
def binary_string(number: int) -> str:
"""Number to binary string
:param number: some number (an integer) to turn into a binary string
:return: Some string which is the binary string
:rtype: str
.. doctest:: python
>>> binary_string(200)
'11001000'
>>> binary_string(10)
'1010'
"""
return bin(number)[2:]
def strip_comments(string: str) -> str:
"""
:param string:
:return:
"""
filelines = string.splitlines(keepends=False)
r = _compile(r'(?:"(?:[^"\\]|\\.)*"|[^"#])*(#|$)')
return "\n".join((line[: r.match(line).start(1)] for line in filelines))
def strip_ascii(s: str) -> str:
"""Remove all ascii characters from a string
:param s: string with non-ascii characters
:type s: string
:return: string of only the non-ascii characters
.. doctest::
>>> string_w_non_ascii_chars = 'Three fourths: ¾'
>>> strip_ascii(string_w_non_ascii_chars)
'¾'
"""
return "".join(sc for sc in (str(c) for c in s) if sc not in printable)
def no_b(string: str) -> str:
"""Removes the b'' from binary strings and sub-strings that contain b''
:param string: A string surrounded by b'' or a sub-string with b''
:return: A string without binary b'' quotes surround it
.. doctest::
>>> no_b("b'a_string'")
'a_string'
"""
return sub("b'([^']*)'", r"\1", string)
def no_u(string: str) -> str:
"""Removes the u'' from unicode strings and sub-strings that contain u''
:param string: A string surrounded by u'' or a sub-string with u''
:return: A string without unicode u'' quotes surround it
.. doctest:: python
>>> a = "u'a_string'"
>>> no_u(a)
'a_string'
"""
return sub("u'([^']*)'", r"\1", string)
def rhex_str(length: int = 4) -> str:
"""Returns a random hex string
:param length: length of random bytes to turn into hex (defaults to 4)
:type length: int
:return: random hexadecimal string
:rtype: str
.. doctest:: python
>>> a = rhex_str()
>>> isinstance(a, str)
True
>>> len(a) == 8
True
"""
return bytes2str(hexlify(urandom(length)))
```
#### File: pup/pupy/utils.py
```python
from contextlib import contextmanager
from datetime import datetime
from inspect import stack
from os import environ
from os import makedirs
from os import path
from shutil import rmtree
from tempfile import mkdtemp
from time import time
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from pupy._alias import pp
from pupy._typing import F
from pupy.sh import cd
from pupy.sh import link_dirs
from pupy.sh import link_files
from pupy.sh import unlink_dirs
from pupy.sh import unlink_files
def timestamp(ts: Optional[Union[float, datetime]] = None) -> str:
"""Time stamp string w/ format yyyymmdd-HHMMSS
:return: timestamp string
.. doctest:: python
>>> from datetime import datetime
>>> stamps = ['20190225-161151', '20190225-081151']
>>> timestamp(1551111111.111111) in stamps
True
>>> datetime.now().strftime("%Y%m%d-%H%M%S") == timestamp()
True
>>> timestamp(datetime.now()) == timestamp()
True
"""
if ts is None:
return datetime.now().strftime("%Y%m%d-%H%M%S")
elif isinstance(ts, float):
return datetime.fromtimestamp(ts).strftime("%Y%m%d-%H%M%S")
elif isinstance(ts, datetime):
return ts.strftime("%Y%m%d-%H%M%S")
def environ_dict() -> Dict[str, str]:
"""
:return:
"""
return {k: environ[k] for k in environ}
@contextmanager
def linked_tmp_dir(
suffix: Optional[str] = None,
prefix: Optional[str] = None,
dir: Optional[str] = None,
mkdirs: Optional[List[str]] = None,
lndirs: Optional[List[Tuple[str, str]]] = None,
lnfiles: Optional[List[Tuple[str, str]]] = None,
) -> Any:
"""
:param suffix:
:param prefix:
:param dir:
:param mkdirs:
:param lndirs:
:param lnfiles:
"""
if lndirs is None:
lndirs = []
if lnfiles is None:
lnfiles = []
if mkdirs is None:
mkdirs = []
temp_dir = mkdtemp(suffix, prefix, dir)
lnfiles = [
(path.join(temp_dir, _rel_link), target) for _rel_link, target in lnfiles
]
lndirs = [(path.join(temp_dir, _rel_link), target) for _rel_link, target in lndirs]
# print(mkdirs)
_dirs2make = [
path.join(temp_dir, e)
for e in (
dirpath if isinstance(dirpath, str) else path.join(*dirpath)
for dirpath in mkdirs
)
]
_dirs2make.extend((path.split(link)[0] for link, target in lnfiles))
_dirs2make.extend((path.split(link)[0] for link, target in lndirs))
for dirpath_route in _dirs2make:
# print("mkingdir", dirpath_route)
makedirs(path.join(temp_dir, dirpath_route), exist_ok=True)
link_files(lnfiles)
link_dirs(lndirs)
# from pupy.foreign import files_gen, dirs_gen
# from pprint import pprint
# pprint(list(files_gen(temp_dir)))
# pprint(list(dirs_gen(temp_dir)))
# try:
# lndirs = (
# (path.join(temp_dir, _rel_link), target) for _rel_link, target in lndirs
# )
# except TypeError as e:
# pass
try:
yield temp_dir
finally:
try:
unlink_files(lnfiles)
except Exception as e:
pass
try:
unlink_dirs(lndirs)
except Exception as e:
pass
try:
rmtree(temp_dir)
except PermissionError:
# sleep(3)
# print(pwd())
# print(temp_dir)
cd("..")
# print(pwd())
rmtree(temp_dir)
def prinfo(obj: Any) -> None:
"""
:param obj:
"""
try:
pp({"object": obj, "type": obj})
except:
print("object:\n{}".format(obj))
print("type:\n{}".format(type(obj)))
def pyfilepath(split: bool = False) -> str:
"""
:param split:
:return:
"""
_filepath = path.abspath(stack()[1][1])
if split:
return path.split(_filepath)
return _filepath
def time_funk(funk: F, *args, **kwargs):
"""
:param funk:
:param args:
:param kwargs:
:return:
"""
ti = time()
_ret = funk(*args, **kwargs)
tf = time()
return _ret, tf - ti
def cmp_funks(
f1: F, f2: F, runs: int, *args, **kwargs
) -> Dict[str, Union[str, float, int]]:
"""
:param f1:
:param f2:
:param runs:
:param args:
:param kwargs:
:return:
"""
f1_time = 0
f2_time = 0
for i in range(runs):
r1, f1t = time_funk(f1, *args, **kwargs)
f1_time += f1t
r2, f2t = time_funk(f2, *args, **kwargs)
f2_time += f2t
f1_time_avg: float = f1_time / runs
f2_time_avg: float = f2_time / runs
return {
"f1": str(f1.__name__),
"f2": str(f2.__name__),
"f1-time": f1_time_avg,
"f2-time": f2_time_avg,
"f1/f2": f1_time_avg / f2_time_avg,
"runs": runs,
}
```
#### File: pup/scripts/innout.py
```python
__version__ = "0.0.0"
import sys
from argparse import ArgumentParser
from argparse import FileType
from os import listdir
from os import path
# from pupy import ljson #$# load json from filepath (filepath)
# from pupy import lstr #$# load str from filepath (filepath)
# from pupy import sjson #$# save json to filepath (filepath, data)
# from pupy import sstr #$# save str to filepath (filepath, string)
# from pupy import files_gen #$# gen filepaths below directory (dirpath)
# from pupy import dirs_gen #$# gen dirpaths below directory (dirpath)
PARSER = ArgumentParser(description="python scripty.py < stdin > stdout")
_INS = PARSER.add_mutually_exclusive_group()
_INS.add_argument(
"-i",
"--input",
type=FileType("r"),
default=sys.stdin,
metavar="IN",
help="Input file name; or stdin.",
)
_INS.add_argument(
"strinput", nargs="?", type=str, metavar="STDIN", help="Input string via"
)
PARSER.add_argument(
"-o",
"--output",
type=FileType("w"),
help="Output file name (defaults to STDOUT)",
default=sys.stdout,
)
def main():
ARGV = PARSER.parse_args()
input_str = ARGV.strinput or ARGV.input.read()
ARGV.output.write(input_str)
ARGV.output.write("OUTPUT\n")
if __name__ == "__main__":
main()
```
#### File: jessekrubin/pup/sync.py
```python
import asyncio
from concurrent.futures import ThreadPoolExecutor
from os import lstat
from os import mkdir
from time import time
import aiofiles
from pupy import aio
from pupy import sh
from pupy.foreign import dirs_gen
from pupy.foreign import files_gen
async def lstat_async(filepath):
return await lstat(filepath)
async def read_in_chunks(file_object, chunk_size=4096):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
async def _cp_files(src_filepath, dest_filepath):
# f = open(src_filepath, 'rb')
# async with aiofiles.open('filename', mode='r') as f:
# contents = await f.read()
# with open(dest_filepath, 'wb') as df:
# async for piece in read_in_chunks(f):
# df.write(piece)
async with aiofiles.open(src_filepath, 'rb') as sf:
async with aiofiles.open(dest_filepath, 'wb') as df:
chunk = await sf.read(2048)
await df.write(chunk)
# async for piece in read_in_chunks(f):
# df.write(piece)
async def _sync_files(src_filepath, dest_filepath):
print('----------')
print('src', src_filepath)
print('dst', dest_filepath)
# _cp_files(src_filepath, dest_filepath)
print(lstat(src_filepath))
try:
dest_lstat = lstat(dest_filepath)
except FileNotFoundError:
await _cp_files(src_filepath, dest_filepath)
return
src_lstat = lstat(src_filepath)
print(dest_lstat, src_lstat)
async def _sync(src, dest):
dirs = ((dirpath, dirpath.replace(src, dest))
for dirpath in dirs_gen(src, abspath=True))
for srcdirpath, destdirpath in dirs:
try:
mkdir(destdirpath)
except FileExistsError:
pass
filepaths = ((filepath, filepath.replace(src, dest))
for filepath in files_gen(src, abspath=True))
for src_filepath, dest_filepath in filepaths:
print(src_filepath)
await _sync_files(src_filepath, dest_filepath)
def sync(src, dest):
loop = asyncio.get_event_loop()
p = ThreadPoolExecutor(4) # Create a ProcessPool with 2 processes
loop.run_until_complete(_sync(src, dest))
def dir_diff(src, dest):
loop = asyncio.get_event_loop()
p = ThreadPoolExecutor(4) # Create a ProcessPool with 2 processes
loop.run_until_complete(_sync(src, dest))
# ta = time()
# sync('./docs', './docs_2')
# tb = time()
# print("done uno", tb - ta)
# tc = time()
# sh.LIN.sync('./docs', './docs_3')
# td = time()
# print(td - tc)
if __name__ == "__main__":
pass
# from doctest import testmod
# testmod()
```
#### File: pup/tests/test_fmt.py
```python
from os import path
from pupy import sstr
from pupy.fmt import filesize
from pupy.fmt import nseconds
from pupy.fmt import strip_comments
from pupy.fmt import term_table
TOP_BABY_NAMES = [
"Aaliyah",
"Aaron",
"Abigail",
"Adam",
"Addison",
"Adeline",
"Adrian",
"Aiden",
"Alexa",
"Alexander",
"Alice",
"Allison",
"Amelia",
"Andrew",
"Angel",
"Anna",
"Anthony",
"Aria",
"Ariana",
"Arianna",
"Asher",
"Aubree",
"Aubrey",
"Audrey",
"Aurora",
"Austin",
"Autumn",
"Ava",
"Avery",
"Ayden",
"Bella",
"Benjamin",
"Brayden",
"Brooklyn",
"Bryson",
"Caleb",
"Cameron",
"Camila",
"Caroline",
"Carson",
"Carter",
"Charles",
"Charlotte",
"Chase",
"Chloe",
"Christian",
"Christopher",
"Claire",
"Clara",
"Colton",
"Connor",
"Cooper",
"Cora",
"Daniel",
"David",
"Dominic",
"Dylan",
"Easton",
"Eleanor",
"Elena",
"Eli",
"Eliana",
"Elias",
"Elijah",
"Elizabeth",
"Ella",
"Ellie",
"Emilia",
"Emily",
"Emma",
"Ethan",
"Eva",
"Evan",
"Evelyn",
"Everly",
"Ezekiel",
"Ezra",
"Gabriel",
"Gabriella",
"Gavin",
"Genesis",
"Gianna",
"Grace",
"Grayson",
"Greyson",
"Hailey",
"Hannah",
"Harper",
"Hazel",
"Henry",
"Hudson",
"Hunter",
"Ian",
"Isaac",
"Isabella",
"Isabelle",
"Isaiah",
"Jace",
"Jack",
"Jackson",
"Jacob",
"James",
"Jameson",
"Jason",
"Jaxon",
"Jaxson",
"Jayden",
"Jeremiah",
"John",
"Jonathan",
"Jordan",
"Jose",
"Joseph",
"Joshua",
"Josiah",
"Julia",
"Julian",
"Kayden",
"Kaylee",
"Kennedy",
"Kinsley",
"Landon",
"Layla",
"Leah",
"Leo",
"Leonardo",
"Levi",
"Liam",
"Lillian",
"Lily",
"Lincoln",
"Logan",
"Lucas",
"Lucy",
"Luke",
"Luna",
"Lydia",
"Mackenzie",
"Madeline",
"Madelyn",
"Madison",
"Mason",
"Mateo",
"Matthew",
"Maverick",
"Maya",
"Melanie",
"Mia",
"Michael",
"Mila",
"Naomi",
"Natalie",
"Nathan",
"Nevaeh",
"Nicholas",
"Noah",
"Nolan",
"Nora",
"Nova",
"Oliver",
"Olivia",
"Owen",
"Paisley",
"Parker",
"Penelope",
"Peyton",
"Piper",
"Quinn",
"Reagan",
"Riley",
"Robert",
"Roman",
"Ruby",
"Ryan",
"Sadie",
"Samantha",
"Samuel",
"Santiago",
"Sarah",
"Savannah",
"Sawyer",
"Scarlett",
"Sebastian",
"Serenity",
"Skylar",
"Sofia",
"Sophia",
"Stella",
"Theodore",
"Thomas",
"Valentina",
"Victoria",
"Violet",
"Vivian",
"William",
"Willow",
"Wyatt",
"Xavier",
"Zoe",
"Zoey",
]
def test_term_table_col_wise_1():
expected = [
"Aaliyah",
"Carter",
"Genesis",
"Kinsley",
"Olivia",
"Aaron",
"Charles",
"Gianna",
"Landon",
"Owen",
"Abigail",
"Charlotte",
"Grace",
"Layla",
"Paisley",
"Adam",
"Chase",
"Grayson",
"Leah",
"Parker",
"Addison",
"Chloe",
"Greyson",
"Leo",
"Penelope",
"Adeline",
"Christian",
"Hailey",
"Leonardo",
"Peyton",
"Adrian",
"Christopher",
"Hannah",
"Levi",
"Piper",
"Aiden",
"Claire",
"Harper",
"Liam",
"Quinn",
"Alexa",
"Clara",
"Hazel",
"Lillian",
"Reagan",
"Alexander",
"Colton",
"Henry",
"Lily",
"Riley",
"Alice",
"Connor",
"Hudson",
"Lincoln",
"Robert",
"Allison",
"Cooper",
"Hunter",
"Logan",
"Roman",
"Amelia",
"Cora",
"Ian",
"Lucas",
"Ruby",
"Andrew",
"Daniel",
"Isaac",
"Lucy",
"Ryan",
"Angel",
"David",
"Isabella",
"Luke",
"Sadie",
"Anna",
"Dominic",
"Isabelle",
"Luna",
"Samantha",
"Anthony",
"Dylan",
"Isaiah",
"Lydia",
"Samuel",
"Aria",
"Easton",
"Jace",
"Mackenzie",
"Santiago",
"Ariana",
"Eleanor",
"Jack",
"Madeline",
"Sarah",
"Arianna",
"Elena",
"Jackson",
"Madelyn",
"Savannah",
"Asher",
"Eli",
"Jacob",
"Madison",
"Sawyer",
"Aubree",
"Eliana",
"James",
"Mason",
"Scarlett",
"Aubrey",
"Elias",
"Jameson",
"Mateo",
"Sebastian",
"Audrey",
"Elijah",
"Jason",
"Matthew",
"Serenity",
"Aurora",
"Elizabeth",
"Jaxon",
"Maverick",
"Skylar",
"Austin",
"Ella",
"Jaxson",
"Maya",
"Sofia",
"Autumn",
"Ellie",
"Jayden",
"Melanie",
"Sophia",
"Ava",
"Emilia",
"Jeremiah",
"Mia",
"Stella",
"Avery",
"Emily",
"John",
"Michael",
"Theodore",
"Ayden",
"Emma",
"Jonathan",
"Mila",
"Thomas",
"Bella",
"Ethan",
"Jordan",
"Naomi",
"Valentina",
"Benjamin",
"Eva",
"Jose",
"Natalie",
"Victoria",
"Brayden",
"Evan",
"Joseph",
"Nathan",
"Violet",
"Brooklyn",
"Evelyn",
"Joshua",
"Nevaeh",
"Vivian",
"Bryson",
"Everly",
"Josiah",
"Nicholas",
"William",
"Caleb",
"Ezekiel",
"Julia",
"Noah",
"Willow",
"Cameron",
"Ezra",
"Julian",
"Nolan",
"Wyatt",
"Camila",
"Gabriel",
"Kayden",
"Nora",
"Xavier",
"Caroline",
"Gabriella",
"Kaylee",
"Nova",
"Zoe",
"Carson",
"Gavin",
"Kennedy",
"Oliver",
"Zoey",
]
line_vals = []
for l in term_table(TOP_BABY_NAMES, row_wise=False):
line_vals.extend(s for s in l.split(" ") if s != "~" and s != "")
assert expected == line_vals
def test_term_table_row_wise_1():
expected = [
"Aaliyah",
"Aaron",
"Abigail",
"Adam",
"Addison",
"Adeline",
"Adrian",
"Aiden",
"Alexa",
"Alexander",
"Alice",
"Allison",
"Amelia",
"Andrew",
"Angel",
"Anna",
"Anthony",
"Aria",
"Ariana",
"Arianna",
"Asher",
"Aubree",
"Aubrey",
"Audrey",
"Aurora",
"Austin",
"Autumn",
"Ava",
"Avery",
"Ayden",
"Bella",
"Benjamin",
"Brayden",
"Brooklyn",
"Bryson",
"Caleb",
"Cameron",
"Camila",
"Caroline",
"Carson",
"Carter",
"Charles",
"Charlotte",
"Chase",
"Chloe",
"Christian",
"Christopher",
"Claire",
"Clara",
"Colton",
"Connor",
"Cooper",
"Cora",
"Daniel",
"David",
"Dominic",
"Dylan",
"Easton",
"Eleanor",
"Elena",
"Eli",
"Eliana",
"Elias",
"Elijah",
"Elizabeth",
"Ella",
"Ellie",
"Emilia",
"Emily",
"Emma",
"Ethan",
"Eva",
"Evan",
"Evelyn",
"Everly",
"Ezekiel",
"Ezra",
"Gabriel",
"Gabriella",
"Gavin",
"Genesis",
"Gianna",
"Grace",
"Grayson",
"Greyson",
"Hailey",
"Hannah",
"Harper",
"Hazel",
"Henry",
"Hudson",
"Hunter",
"Ian",
"Isaac",
"Isabella",
"Isabelle",
"Isaiah",
"Jace",
"Jack",
"Jackson",
"Jacob",
"James",
"Jameson",
"Jason",
"Jaxon",
"Jaxson",
"Jayden",
"Jeremiah",
"John",
"Jonathan",
"Jordan",
"Jose",
"Joseph",
"Joshua",
"Josiah",
"Julia",
"Julian",
"Kayden",
"Kaylee",
"Kennedy",
"Kinsley",
"Landon",
"Layla",
"Leah",
"Leo",
"Leonardo",
"Levi",
"Liam",
"Lillian",
"Lily",
"Lincoln",
"Logan",
"Lucas",
"Lucy",
"Luke",
"Luna",
"Lydia",
"Mackenzie",
"Madeline",
"Madelyn",
"Madison",
"Mason",
"Mateo",
"Matthew",
"Maverick",
"Maya",
"Melanie",
"Mia",
"Michael",
"Mila",
"Naomi",
"Natalie",
"Nathan",
"Nevaeh",
"Nicholas",
"Noah",
"Nolan",
"Nora",
"Nova",
"Oliver",
"Olivia",
"Owen",
"Paisley",
"Parker",
"Penelope",
"Peyton",
"Piper",
"Quinn",
"Reagan",
"Riley",
"Robert",
"Roman",
"Ruby",
"Ryan",
"Sadie",
"Samantha",
"Samuel",
"Santiago",
"Sarah",
"Savannah",
"Sawyer",
"Scarlett",
"Sebastian",
"Serenity",
"Skylar",
"Sofia",
"Sophia",
"Stella",
"Theodore",
"Thomas",
"Valentina",
"Victoria",
"Violet",
"Vivian",
"William",
"Willow",
"Wyatt",
"Xavier",
"Zoe",
"Zoey",
]
line_vals = []
for l in term_table(TOP_BABY_NAMES, row_wise=True):
line_vals.extend(s for s in l.split(" ") if s != "~" and s != "")
assert line_vals == expected
def test_term_table_row_wise_2():
expected = [
"Aaliyah",
"Aaron",
"Abigail",
"Adam",
"Addison",
"Adeline",
"Adrian",
"Aiden",
"Alexa",
"Alexander",
"Alice",
"Allison",
"Amelia",
"Andrew",
"Angel",
"Anna",
"Anthony",
"Aria",
"Ariana",
"Arianna",
"Asher",
"Aubree",
"Aubrey",
"Audrey",
"Aurora",
"Austin",
"Autumn",
"Ava",
"Avery",
"Ayden",
"Bella",
"Benjamin",
"Brayden",
"Brooklyn",
"Bryson",
"Caleb",
"Cameron",
]
line_vals = []
for l in term_table(TOP_BABY_NAMES[:37], row_wise=True):
line_vals.extend(s for s in l.split(" ") if s != "~" and s != "")
assert expected == line_vals
def test_term_table_col_wise_2():
expected = [
"Aaliyah",
"Alexa",
"Anthony",
"Aurora",
"Brayden",
"Aaron",
"Alexander",
"Aria",
"Austin",
"Brooklyn",
"Abigail",
"Alice",
"Ariana",
"Autumn",
"Bryson",
"Adam",
"Allison",
"Arianna",
"Ava",
"Caleb",
"Addison",
"Amelia",
"Asher",
"Avery",
"Cameron",
"Adeline",
"Andrew",
"Aubree",
"Ayden",
"Adrian",
"Angel",
"Aubrey",
"Bella",
"Aiden",
"Anna",
"Audrey",
"Benjamin",
]
line_vals = []
for l in term_table(TOP_BABY_NAMES[:37], row_wise=False):
line_vals.extend(s for s in l.split(" ") if s != "~" and s != "")
assert line_vals == expected
def test_ftime_0seconds():
"""
"""
ti = 5.4321
tf = 5.4321
assert nseconds(ti, tf) == "0 sec"
def test_ftime_seconds():
"""
"""
ti = 1.2345
tf = 5.4321
assert nseconds(ti, tf) == "4.198 sec"
def test_ftime_milliseconds():
"""
"""
ti = 1.2345 * (10 ** (-3))
tf = 5.4321 * (10 ** (-3))
assert nseconds(ti, tf) == "4.198 ms"
def test_ftime_microseconds():
"""
"""
ti = 1.2345 * (10 ** (-6))
tf = 5.4321 * (10 ** (-6))
assert nseconds(ti, tf) == "4.198 μs"
def test_ftime_nanoseconds():
"""
"""
ti = 1.2345 * (10 ** (-9))
tf = 5.4321 * (10 ** (-9))
assert nseconds(ti, tf) == "4.198 ns"
def test_filesize(tmpdir):
"""
"""
filepath = path.join(tmpdir, "somefile.txt")
sstr(path.join(tmpdir, "somefile.txt"), "12342312312")
assert filesize(filepath) == "11.0 bytes"
something = """
# this func does a thing
def thisfunc():
pass # and here we have a comment
# another comment
a = 2.3+4
b = 'pood'
"""
something_no_comments: str = """
def thisfunc():
pass
a = 2.3+4
b = 'pood'
"""
def test_strip_comments():
no_comments = strip_comments(something)
assert no_comments == something_no_comments
```
#### File: pup/tests/test_linked_tmp_dir.py
```python
import os
from os import chdir
from os import path
from os import sep
import pupy.utils
from pupy import dirs_gen
from pupy import files_gen
PWD = path.split(path.realpath(__file__))[0]
def test_mkdirs():
dirs = [("something",), ("something", "else")]
expected = [path.join(*route) for route in dirs]
with pupy.utils.linked_tmp_dir(dir=PWD, mkdirs=dirs) as tmpdir:
dirs = sorted(
dirpath
for dirpath in (
tmp_subdir.replace(tmpdir, "").strip(sep)
for tmp_subdir in dirs_gen(tmpdir)
)
if dirpath != ""
)
assert set(dirs) == set(expected)
assert all(not path.exists(d) for d in dirs)
def test_linkin():
tdata = [
["dummy_dir", "a_file.txt"],
["dummy_dir", "b_file.txt"],
["dummy_dir", "a_dir", "c_file.txt"],
["dummy_dir", "a_dir", "a_a_dir", "d_file.txt"],
["dummy_dir", "b_dir", "e_file.txt"],
["dummy_dir", "b_dir", "f_file.txt"],
]
chdir(PWD)
lnfiles = [(path.join(*route), path.join(PWD, *route)) for route in tdata]
print(lnfiles)
dirs = [path.join(PWD, *route[:-1]) for route in tdata]
for thingy in set(dirs):
os.makedirs(thingy, exist_ok=True)
print(dirs)
# for uno, dos in lnfiles:
# touch(uno)
tmp_dirpath = None
with pupy.utils.linked_tmp_dir(lnfiles=lnfiles) as tmpdir:
tmp_dirpath = tmpdir
linkedfiles = sorted(
dirpath
for dirpath in (
tmp_subdir.replace(tmpdir, "").strip(sep)
for tmp_subdir in files_gen(tmpdir)
)
if dirpath != ""
)
# print(list(files_gen(tmpdir)))
# print(tmpdir)
# print(os.listdir(tmpdir))
lnfiles_links = [link for link, target in lnfiles]
assert set(lnfiles_links) == set(linkedfiles)
assert not path.exists(tmp_dirpath)
for link, target in lnfiles:
assert path.exists(target)
# try:
# rmtree(path.join(PWD, 'dummy_dir'))
# except:
# pass
if __name__ == "__main__":
pass
```
#### File: pup/tests/test_pytriples_gen.py
```python
from pupy.maths import pytriple_gen
lt100 = {
(3, 4, 5),
(5, 12, 13),
(8, 15, 17),
(7, 24, 25),
(20, 21, 29),
(9, 40, 41),
(12, 35, 37),
(11, 60, 61),
(28, 45, 53),
(33, 56, 65),
(13, 84, 85),
(16, 63, 65),
(48, 55, 73),
(39, 80, 89),
(36, 77, 85),
(65, 72, 97),
}
gt100_lt300 = {
(20, 99, 101),
(60, 91, 109),
(15, 112, 113),
(44, 117, 125),
(88, 105, 137),
(17, 144, 145),
(24, 143, 145),
(51, 140, 149),
(85, 132, 157),
(119, 120, 169),
(52, 165, 173),
(19, 180, 181),
(57, 176, 185),
(104, 153, 185),
(95, 168, 193),
(28, 195, 197),
(84, 187, 205),
(133, 156, 205),
(21, 220, 221),
(140, 171, 221),
(60, 221, 229),
(105, 208, 233),
(120, 209, 241),
(32, 255, 257),
(23, 264, 265),
(96, 247, 265),
(69, 260, 269),
(115, 252, 277),
(160, 231, 281),
(161, 240, 289),
(68, 285, 293),
}
lt300 = set.union(lt100, gt100_lt300)
def test_pytriplets_c_lt100():
"""Testing pytriples with c values less than 100"""
assert {t for t in pytriple_gen(100)} == lt100
def test_pytriplets_c_lt300():
"""Testing pytriples with c values less than 100"""
p_set = {t for t in pytriple_gen(300)}
assert lt300 == p_set
def test_pytriplets_2_c_lt100():
"""Testing pytriples with c values less than 100"""
assert lt100 == set(pytriple_gen(100))
def test_pytriplets_2_c_lt300():
"""Testing pytriples with c values less than 100"""
vals = set()
gen = pytriple_gen(300)
while len(vals) < len(lt300):
vals.add(next(gen))
assert vals == lt300
# test_pytriplets_2_c_lt100()
# test_pytriplets_2_c_lt300()
```
#### File: pup/tests/test_sh.py
```python
import os
from os import mkdir
from os import path
from os import sep
import pytest
from pupy import files_gen
from pupy.sh import cd
from pupy.sh import cp
from pupy.sh import export
from pupy.sh import mv
from pupy.sh import rm
from pupy.sh import touch
PWD = path.split(path.realpath(__file__))[0]
def test_mv_uno(tmpdir):
filepath_parts = [
("dir", "file1.txt"),
("dir", "file2.txt"),
("dir", "file3.txt"),
("dir", "dir2", "file1.txt"),
("dir", "dir2", "file2.txt"),
("dir", "dir2", "file3.txt"),
("dir", "dir2a", "file1.txt"),
("dir", "dir2a", "file2.txt"),
("dir", "dir2a", "file3.txt"),
]
for f in filepath_parts:
filepath = path.join(tmpdir, *f)
touch(filepath)
files = list(sorted(files_gen(tmpdir)))
print(files)
cd(tmpdir)
mkdir("out")
mv("dir", "out")
files = list(
sorted((e.replace(str(tmpdir), "").strip(sep) for e in files_gen(tmpdir)))
)
print(files)
expected = set(path.join("out", *f) for f in filepath_parts)
got = set(files)
assert expected == got
def test_mv_multi(tmpdir):
filepath_parts = [
("dir", "file1.txt"),
("dir", "file2.txt"),
("dir", "file3.txt"),
("dir", "dir2", "file1.txt"),
("dir", "dir2", "file2.txt"),
("dir", "dir2", "file3.txt"),
("dir", "dir2a", "file1.txt"),
("dir", "dir2a", "file2.txt"),
("dir", "dir2a", "file3.txt"),
]
for f in filepath_parts:
filepath = path.join(tmpdir, *f)
touch(filepath)
files = list(sorted(files_gen(tmpdir)))
# print(files)
cd(tmpdir)
mkdir("out")
mv("dir/*", "out")
files = list(
sorted((e.replace(str(tmpdir), "").strip(sep) for e in files_gen(tmpdir)))
)
# print(files)
expected = set(
path.join("out", *f).replace(sep + "dir" + sep, sep) for f in filepath_parts
)
got = set(files)
# print(expected)
# print(got)
assert expected == got
def test_export_single_key():
key = "HERM=pood"
from os import environ
assert "HERM" not in environ
export(key)
assert "HERM" in environ
assert environ["HERM"] == "pood"
del environ["HERM"]
def test_export_key_val():
key, val = "HERM", "pood"
from os import environ
assert "HERM" not in environ
export(key, val)
assert "HERM" in environ
@pytest.fixture(
params=[
"file.txt",
path.join("dir", "file.txt"),
path.join("dir1", "dir2", "file.txt"),
path.join("dir1", "dir2", "dir3", "file.txt"),
path.join("dir1", "dir2", "dir3", "dir4", "file.txt"),
]
)
def dummy_filepath(request):
return request.param
def test_touch(dummy_filepath, tmpdir):
fdpath = path.join(tmpdir, dummy_filepath)
assert not path.exists(fdpath)
touch(fdpath)
assert path.exists(fdpath)
def test_rm_multi(tmpdir):
os.chdir(tmpdir)
test_files = ["q", "w", "e", "r", "t", "y", "u", "i", "o", "a", "s", "d"]
mkdir("test_env")
cd("test_env")
test_files = [x + ".txt" for x in test_files]
for x in test_files:
with open(x, "w") as f:
f.write(" ")
expected = []
cd(tmpdir)
rm("test_env/*.txt")
actual = os.listdir("test_env")
assert expected == actual
def test_rm_para(tmpdir):
os.chdir(tmpdir)
test_files = ["q", "w", "e"]
mkdir("test_env")
cd("test_env")
test_files = [x + ".txt" for x in test_files]
for x in test_files:
with open(x, "w") as f:
f.write(" ")
expected = []
cd(tmpdir)
actual = os.listdir("test_env")
rm("test_env", r=True)
assert not os.path.exists("test_env")
def test_cp(tmpdir):
os.chdir(tmpdir)
test_files = ["q", "w", "e", "r", "t", "y", "u", "i", "o", "a", "s", "d"]
mkdir("test_env")
cd("test_env")
test_files = [x + ".txt" for x in test_files]
for x in test_files:
with open(x, "w") as f:
f.write(" ")
cd(tmpdir)
os.mkdir("cp_dir")
print(os.listdir("."))
cp("test_env/*.txt", "cp_dir")
actual = os.listdir("cp_dir")
assert set(test_files) == set(actual)
def test_cp_dir(tmpdir):
os.chdir(tmpdir)
test_files = ["q", "w", "e", "r", "t", "y", "u", "i", "o", "a", "s", "d"]
mkdir("test_env")
cd("test_env")
test_files = [x + ".txt" for x in test_files]
for x in test_files:
with open(x, "w") as f:
f.write(" ")
cd(tmpdir)
print(os.listdir("."))
cp("test_env", "cp_dir", r=True)
actual = os.listdir("cp_dir")
assert set(test_files) == set(actual)
```
#### File: pup/tests/test_trigon.py
```python
from pupy.maths import Trigon
from pupy.maths import Vuple
from pupy.maths import pytriple_gen
class TestTrigon(object):
def test_triangle_area_half(self):
"""
"""
t2 = [(1, 0), (0, 1), (0, 0)]
assert 0.5 == Trigon(*t2).area()
def test_origin_in_triangle(self):
"""
"""
pts = [(-340, 495), (-153, -910), (835, -947)]
tri = Trigon.from_points(pts)
assert (0, 0) in tri
assert tri.contains_origin()
def test_point_on_perimeter(self):
"""
"""
pts = [(-340, 495), (-153, -910), (835, -947)]
tri = Trigon.from_points(pts)
assert tri.is_perimeter_point(pts[0])
def test_origin_not_in_triangle(self):
"""
"""
tri = Trigon((-175, 41), (-421, -714), (574, -645))
assert Vuple((0, 0)) not in tri
assert not tri.contains_origin()
class TestPytriplesGen(object):
# primatives less than 100
lt100 = {
(3, 4, 5),
(5, 12, 13),
(8, 15, 17),
(7, 24, 25),
(20, 21, 29),
(9, 40, 41),
(12, 35, 37),
(11, 60, 61),
(28, 45, 53),
(33, 56, 65),
(13, 84, 85),
(16, 63, 65),
(48, 55, 73),
(39, 80, 89),
(36, 77, 85),
(65, 72, 97),
}
lt300 = {
(3, 4, 5),
(5, 12, 13),
(8, 15, 17),
(7, 24, 25),
(20, 21, 29),
(9, 40, 41),
(12, 35, 37),
(11, 60, 61),
(28, 45, 53),
(33, 56, 65),
(13, 84, 85),
(16, 63, 65),
(48, 55, 73),
(39, 80, 89),
(36, 77, 85),
(65, 72, 97),
(20, 99, 101),
(60, 91, 109),
(15, 112, 113),
(44, 117, 125),
(88, 105, 137),
(17, 144, 145),
(24, 143, 145),
(51, 140, 149),
(85, 132, 157),
(119, 120, 169),
(52, 165, 173),
(19, 180, 181),
(57, 176, 185),
(104, 153, 185),
(95, 168, 193),
(28, 195, 197),
(84, 187, 205),
(133, 156, 205),
(21, 220, 221),
(140, 171, 221),
(60, 221, 229),
(105, 208, 233),
(120, 209, 241),
(32, 255, 257),
(23, 264, 265),
(96, 247, 265),
(69, 260, 269),
(115, 252, 277),
(160, 231, 281),
(161, 240, 289),
(68, 285, 293),
}
def test_pytriplets_c_lt100(self):
"""
Testing the primative pytriplet generator
"""
p_set = {t for t in pytriple_gen(100)}
assert self.lt100 == p_set
def test_pytriplets_c_lt300(self):
"""
"""
p_set = {t for t in pytriple_gen(300)}
assert self.lt300 == p_set
```
#### File: pup/tests/test_utils.py
```python
from os import path
from pupy.utils import pyfilepath
def test_pyfilepath_filepath():
a = pyfilepath()
assert a.endswith("test_utils.py")
def test_pyfilepath_split():
filepath = pyfilepath()
dirpath, filename = pyfilepath(split=True)
assert path.join(dirpath, filename) == filepath
```
#### File: pup/tests/test_vuple.py
```python
from __future__ import division
from pupy.maths import Vuple
class Test_Vuple(object):
def test_gt(self):
"""
"""
a = Vuple((12, 3))
b = Vuple((7, 5))
assert a > b
def test_equal(self):
"""
"""
a = Vuple((12, 3))
b = Vuple((12, 3))
assert a == b
b = Vuple((3, 4))
assert a != b
def test_add(self):
"""
"""
a = Vuple((12, 3))
b = Vuple((7, 5))
assert Vuple((19, 8)) == a + b
def test_add_scalar(self):
"""
"""
a = Vuple((12, 3))
assert Vuple((16, 7)) == a + 4
def test_sub(self):
"""
"""
a = Vuple((12, 3))
b = Vuple((7, 5))
assert Vuple((5, -2)) == a - b
assert Vuple((-5, 2)) == b - a
def test_mag(self):
"""
"""
assert 5.0 == Vuple.mag((3, 4))
assert 5 == Vuple((3, 4)).get_mag()
def test_mul_scalar(self):
"""
"""
v = Vuple((3, 4))
v = v * 2
assert (6, 8) == v
def test_imul_scalar(self):
"""
"""
v = Vuple((3, 4))
v *= 2
assert (6, 8) == v
def test_div_scalar(self):
"""
"""
v = Vuple((6, 8)) / 2
assert (3, 4) == v
def test_idiv_scalar(self):
"""
"""
v = Vuple((6, 8))
v /= 2
assert (3, 4) == v
def test_unit_vuple(self):
"""
"""
v = Vuple((3, 4))
assert (0.6, 0.8) == Vuple.unit_vuple(v)
v = Vuple((3, 4))
assert (0.6, 0.8) == v.normalize()
def test_angle_radians(self):
"""
"""
v1 = Vuple((10, 10))
v2 = Vuple((1, 0))
assert 180 == int(round((4 * Vuple.angle(v1, v2))))
def test_angle_degrees(self):
"""
"""
v1 = Vuple((10, 10))
v2 = Vuple((1, 0))
assert 45 == round(Vuple.angle(v1, v2, radians=False))
```
|
{
"source": "jesselangdon/steamm",
"score": 2
}
|
#### File: STeAMM/lib/gdal2xyz.py
```python
try:
from osgeo import gdal
except ImportError:
import gdal
import sys
try:
import numpy as Numeric
except ImportError:
import Numeric
def main(srcfile, dstfile, arg = '-csv' ):
srcwin = None
skip = 1
delim = ' '
band_nums = [1]
# Open source file.
srcds = gdal.Open(srcfile)
if srcds is None:
print('Could not open %s.' % srcfile)
sys.exit( 1 )
bands = []
for band_num in band_nums:
band = srcds.GetRasterBand(band_num)
if band is None:
print('Could not get band %d' % band_num)
sys.exit( 1 )
bands.append(band)
gt = srcds.GetGeoTransform()
# Collect information on all the source files.
if srcwin is None:
srcwin = (0,0,srcds.RasterXSize,srcds.RasterYSize)
# Open the output file.
if dstfile is not None:
dst_fh = open(dstfile,'wt')
else:
dst_fh = sys.stdout
band_format = (("%g" + delim) * len(bands)).rstrip(delim) + '\n'
# Setup an appropriate print format.
if abs(gt[0]) < 180 and abs(gt[3]) < 180 \
and abs(srcds.RasterXSize * gt[1]) < 180 \
and abs(srcds.RasterYSize * gt[5]) < 180:
format = '%.10g' + delim + '%.10g' + delim + '%s'
else:
format = '%.3f' + delim + '%.3f' + delim + '%s'
# Loop emitting data.
for y in range(srcwin[1],srcwin[1]+srcwin[3],skip):
data = []
for band in bands:
band_data = band.ReadAsArray( srcwin[0], y, srcwin[2], 1 )
band_data = Numeric.reshape( band_data, (srcwin[2],) )
data.append(band_data)
for x_i in range(0,srcwin[2],skip):
x = x_i + srcwin[0]
geo_x = gt[0] + (x+0.5) * gt[1] + (y+0.5) * gt[2]
geo_y = gt[3] + (x+0.5) * gt[4] + (y+0.5) * gt[5]
x_i_data = []
for i in range(len(bands)):
x_i_data.append(data[i][x_i])
band_str = band_format % tuple(x_i_data)
line = format % (float(geo_x),float(geo_y), band_str)
dst_fh.write( line )
return
```
|
{
"source": "jesselegg/python-social-auth",
"score": 2
}
|
#### File: social/tests/pipeline.py
```python
from social.pipeline.partial import partial
def ask_for_password(strategy, *args, **kwargs):
if strategy.session_get('password'):
return {'password': strategy.session_get('password')}
else:
return strategy.redirect(strategy.build_absolute_uri('/password'))
@partial
def ask_for_slug(strategy, *args, **kwargs):
if strategy.session_get('slug'):
return {'slug': strategy.session_get('slug')}
else:
return strategy.redirect(strategy.build_absolute_uri('/slug'))
def set_password(strategy, user, *args, **kwargs):
user.set_password(kwargs['password'])
def set_slug(strategy, user, *args, **kwargs):
user.slug = kwargs['slug']
def remove_user(strategy, user, *args, **kwargs):
return {'user': None}
@partial
def set_user_from_kwargs(strategy, *args, **kwargs):
if strategy.session_get('attribute'):
kwargs['user'].id
else:
return strategy.redirect(strategy.build_absolute_uri('/attribute'))
@partial
def set_user_from_args(strategy, user, *args, **kwargs):
if strategy.session_get('attribute'):
user.id
else:
return strategy.redirect(strategy.build_absolute_uri('/attribute'))
```
|
{
"source": "jesseli2002/python-skyfield",
"score": 2
}
|
#### File: skyfield/tests/test_constellations.py
```python
from skyfield.constellationlib import load_constellation_lookup
def test_constellations():
lookup = load_constellation_lookup()
assert lookup(24, -90) == 'Oct'
assert lookup(0, 0) == 'Psc'
assert lookup(4.65, 0) == 'Ori'
assert lookup(10, 90) == 'UMi'
assert (lookup([4.65, 10], [0, 90]) == ['Ori', 'UMi']).all()
```
|
{
"source": "JesseLivezey/DynamicalComponentsAnalysis",
"score": 3
}
|
#### File: DynamicalComponentsAnalysis/dca/cov_util.py
```python
import logging
import numpy as np
import scipy as sp
import collections
import torch
import functools
from numpy.lib.stride_tricks import as_strided
from sklearn.utils.extmath import randomized_svd
from sklearn.utils import check_random_state
logging.basicConfig()
def form_lag_matrix(X, T, stride=1, stride_tricks=True, rng=None, writeable=False):
"""Form the data matrix with `T` lags.
Parameters
----------
X : ndarray (n_time, N)
Timeseries with no lags.
T : int
Number of lags.
stride : int or float
If stride is an `int`, it defines the stride between lagged samples used
to estimate the cross covariance matrix. Setting stride > 1 can speed up the
calculation, but may lead to a loss in accuracy. Setting stride to a `float`
greater than 0 and less than 1 will random subselect samples.
rng : NumPy random state
Only used if `stride` is a float.
stride_tricks : bool
Whether to use numpy stride tricks to form the lagged matrix or create
a new array. Using numpy stride tricks can can lower memory usage, especially for
large `T`. If `False`, a new array is created.
writeable : bool
For testing. You should not need to set this to True. This function uses stride tricks
to form the lag matrix which means writing to the array will have confusing behavior.
If `stride_tricks` is `False`, this flag does nothing.
Returns
-------
X_with_lags : ndarray (n_lagged_time, N * T)
Timeseries with lags.
"""
if not isinstance(stride, int) or stride < 1:
if not isinstance(stride, float) or stride <= 0. or stride >= 1.:
raise ValueError('stride should be an int and greater than or equal to 1 or a float ' +
'between 0 and 1.')
N = X.shape[1]
frac = None
if isinstance(stride, float):
frac = stride
stride = 1
n_lagged_samples = (len(X) - T) // stride + 1
if n_lagged_samples < 1:
raise ValueError('T is too long for a timeseries of length {}.'.format(len(X)))
if stride_tricks:
X = np.asarray(X, dtype=float, order='C')
shape = (n_lagged_samples, N * T)
strides = (X.strides[0] * stride,) + (X.strides[-1],)
X_with_lags = as_strided(X, shape=shape, strides=strides, writeable=writeable)
else:
X_with_lags = np.zeros((n_lagged_samples, T * N))
for i in range(n_lagged_samples):
X_with_lags[i, :] = X[i * stride:i * stride + T, :].flatten()
if frac is not None:
rng = check_random_state(rng)
idxs = np.sort(rng.choice(n_lagged_samples, size=int(np.ceil(n_lagged_samples * frac)),
replace=False))
X_with_lags = X_with_lags[idxs]
return X_with_lags
def rectify_spectrum(cov, epsilon=1e-6, logger=None):
"""Rectify the spectrum of a covariance matrix.
Parameters
----------
cov : ndarray
Covariance matrix
epsilon : float
Minimum eigenvalue for the rectified spectrum.
verbose : bool
Whethere to print when the spectrum needs to be rectified.
"""
eigvals = sp.linalg.eigvalsh(cov)
n_neg = np.sum(eigvals <= 0.)
if n_neg > 0:
cov += (-np.min(eigvals) + epsilon) * np.eye(cov.shape[0])
if logger is not None:
string = 'Non-PSD matrix, {} of {} eigenvalues were not positive.'
logger.info(string.format(n_neg, eigvals.size))
def toeplitzify(cov, T, N, symmetrize=True):
"""Make a matrix block-Toeplitz by averaging along the block diagonal.
Parameters
----------
cov : ndarray (T*N, T*N)
Covariance matrix to make block toeplitz.
T : int
Number of blocks.
N : int
Number of features per block.
symmetrize : bool
Whether to ensure that the whole matrix is symmetric.
Optional (default=True).
Returns
-------
cov_toep : ndarray (T*N, T*N)
Toeplitzified matrix.
"""
cov_toep = np.zeros((T * N, T * N))
for delta_t in range(T):
to_avg_lower = np.zeros((T - delta_t, N, N))
to_avg_upper = np.zeros((T - delta_t, N, N))
for i in range(T - delta_t):
to_avg_lower[i] = cov[(delta_t + i) * N:(delta_t + i + 1) * N, i * N:(i + 1) * N]
to_avg_upper[i] = cov[i * N:(i + 1) * N, (delta_t + i) * N:(delta_t + i + 1) * N]
avg_lower = np.mean(to_avg_lower, axis=0)
avg_upper = np.mean(to_avg_upper, axis=0)
if symmetrize:
avg_lower = 0.5 * (avg_lower + avg_upper.T)
avg_upper = avg_lower.T
for i in range(T - delta_t):
cov_toep[(delta_t + i) * N:(delta_t + i + 1) * N, i * N:(i + 1) * N] = avg_lower
cov_toep[i * N:(i + 1) * N, (delta_t + i) * N:(delta_t + i + 1) * N] = avg_upper
return cov_toep
def calc_chunked_cov(X, T, stride, chunks, cov_est=None, rng=None, stride_tricks=True):
"""Calculate an unormalized (by sample count) lagged covariance matrix
in chunks to save memory.
Parameters
----------
X : np.ndarray, shape (# time-steps, N)
The N-dimensional time series data from which the cross-covariance
matrices are computed.
T : int
The number of time lags.
stride : int
The number of time-points to skip between samples.
chunks : int
Number of chunks to break the data into when calculating the lagged cross
covariance. More chunks will mean less memory used
cov_est : ndarray
Current estimate of unnormalized cov_est to be added to.
Return
------
cov_est : ndarray
Current covariance estimate.
n_samples
How many samples were used.
"""
if cov_est is None:
cov_est = 0.
n_samples = 0
if X.shape[0] < T * chunks:
raise ValueError('Time series is too short to chunk for cov estimation.')
ends = np.linspace(0, X.shape[0], chunks + 1, dtype=int)[1:]
start = 0
for chunk in range(chunks):
X_with_lags = form_lag_matrix(X[start:ends[chunk]], T, stride=stride,
rng=rng, stride_tricks=stride_tricks)
start = ends[chunk] - T + 1
ni_samples = X_with_lags.shape[0]
cov_est += np.dot(X_with_lags.T, X_with_lags)
n_samples += ni_samples
return cov_est, n_samples
def calc_cross_cov_mats_from_data(X, T, mean=None, chunks=None, stride=1,
rng=None, regularization=None, reg_ops=None,
stride_tricks=True, logger=None):
"""Compute the N-by-N cross-covariance matrix, where N is the data dimensionality,
for each time lag up to T-1.
Parameters
----------
X : np.ndarray, shape (# time-steps, N)
The N-dimensional time series data from which the cross-covariance
matrices are computed.
T : int
The number of time lags.
chunks : int
Number of chunks to break the data into when calculating the lagged cross
covariance. More chunks will mean less memory used
stride : int or float
If stride is an `int`, it defines the stride between lagged samples used
to estimate the cross covariance matrix. Setting stride > 1 can speed up the
calculation, but may lead to a loss in accuracy. Setting stride to a `float`
greater than 0 and less than 1 will random subselect samples.
rng : NumPy random state
Only used if `stride` is a float.
regularization : string
Regularization method for computing the spatiotemporal covariance matrix.
reg_ops : dict
Paramters for regularization.
stride_tricks : bool
Whether to use numpy stride tricks in form_lag_matrix. True will use less
memory for large T.
Returns
-------
cross_cov_mats : np.ndarray, shape (T, N, N), float
Cross-covariance matrices. cross_cov_mats[dt] is the cross-covariance between
X(t) and X(t+dt), where X(t) is an N-dimensional vector.
"""
if reg_ops is None:
reg_ops = dict()
if chunks is not None and regularization is not None:
raise NotImplementedError
if isinstance(X, list) or X.ndim == 3:
for Xi in X:
if len(Xi) <= T:
raise ValueError('T must be shorter than the length of the shortest ' +
'timeseries. If you are using the DCA model, 2 * DCA.T must be ' +
'shorter than the shortest timeseries.')
if mean is None:
mean = np.concatenate(X).mean(axis=0, keepdims=True)
X = [Xi - mean for Xi in X]
N = X[0].shape[-1]
if chunks is None:
cov_est = np.zeros((N * T, N * T))
n_samples = 0
for Xi in X:
X_with_lags = form_lag_matrix(Xi, T, stride=stride, stride_tricks=stride_tricks,
rng=rng)
cov_est += np.dot(X_with_lags.T, X_with_lags)
n_samples += len(X_with_lags)
cov_est /= (n_samples - 1.)
else:
n_samples = 0
cov_est = np.zeros((N * T, N * T))
for Xi in X:
cov_est, ni_samples = calc_chunked_cov(Xi, T, stride, chunks, cov_est=cov_est,
stride_tricks=stride_tricks, rng=rng)
n_samples += ni_samples
cov_est /= (n_samples - 1.)
else:
if len(X) <= T:
raise ValueError('T must be shorter than the length of the shortest ' +
'timeseries. If you are using the DCA model, 2 * DCA.T must be ' +
'shorter than the shortest timeseries.')
if mean is None:
mean = X.mean(axis=0, keepdims=True)
X = X - mean
N = X.shape[-1]
if chunks is None:
X_with_lags = form_lag_matrix(X, T, stride=stride, stride_tricks=stride_tricks,
rng=rng)
cov_est = np.cov(X_with_lags, rowvar=False)
else:
cov_est, n_samples = calc_chunked_cov(X, T, stride, chunks,
stride_tricks=stride_tricks, rng=rng)
cov_est /= (n_samples - 1.)
if regularization is None:
cov_est = toeplitzify(cov_est, T, N)
elif regularization == 'kron':
num_folds = reg_ops.get('num_folds', 5)
r_vals = np.arange(1, min(2 * T, N**2 + 1))
sigma_vals = np.concatenate([np.linspace(1, 4 * T + 1, 10), [100. * T]])
alpha_vals = np.concatenate([[0.], np.logspace(-2, -1, 10)])
ll_vals, opt_idx = cv_toeplitz(X_with_lags, T, N, r_vals, sigma_vals, alpha_vals,
num_folds=num_folds)
ri, si, ai = opt_idx
cov = np.cov(X_with_lags, rowvar=False)
cov_est = toeplitz_reg_taper_shrink(cov, T, N, r_vals[ri], sigma_vals[si], alpha_vals[ai])
else:
raise ValueError
rectify_spectrum(cov_est, logger=logger)
cross_cov_mats = calc_cross_cov_mats_from_cov(cov_est, T, N)
return cross_cov_mats
def calc_cross_cov_mats_from_cov(cov, T, N):
"""Calculates T N-by-N cross-covariance matrices given
a N*T-by-N*T spatiotemporal covariance matrix by
averaging over off-diagonal cross-covariance blocks with
constant `|t1-t2|`.
Parameters
----------
N : int
Numbner of spatial dimensions.
T: int
Number of time-lags.
cov : np.ndarray, shape (N*T, N*T)
Spatiotemporal covariance matrix.
Returns
-------
cross_cov_mats : np.ndarray, shape (T, N, N)
Cross-covariance matrices.
"""
use_torch = isinstance(cov, torch.Tensor)
if use_torch:
cross_cov_mats = torch.zeros((T, N, N))
else:
cross_cov_mats = np.zeros((T, N, N))
for delta_t in range(T):
if use_torch:
to_avg_lower = torch.zeros((T - delta_t, N, N))
to_avg_upper = torch.zeros((T - delta_t, N, N))
else:
to_avg_lower = np.zeros((T - delta_t, N, N))
to_avg_upper = np.zeros((T - delta_t, N, N))
for i in range(T - delta_t):
to_avg_lower[i, :, :] = cov[(delta_t + i) * N:(delta_t + i + 1) * N, i * N:(i + 1) * N]
to_avg_upper[i, :, :] = cov[i * N:(i + 1) * N, (delta_t + i) * N:(delta_t + i + 1) * N]
avg_lower = to_avg_lower.mean(axis=0)
avg_upper = to_avg_upper.mean(axis=0)
if use_torch:
cross_cov_mats[delta_t, :, :] = 0.5 * (avg_lower + avg_upper.t())
else:
cross_cov_mats[delta_t, :, :] = 0.5 * (avg_lower + avg_upper.T)
return cross_cov_mats
def calc_cov_from_cross_cov_mats(cross_cov_mats):
"""Calculates the N*T-by-N*T spatiotemporal covariance matrix based on
T N-by-N cross-covariance matrices.
Parameters
----------
cross_cov_mats : np.ndarray, shape (T, N, N)
Cross-covariance matrices: cross_cov_mats[dt] is the
cross-covariance between X(t) and X(t+dt), where each
of X(t) and X(t+dt) is a N-dimensional vector.
Returns
-------
cov : np.ndarray, shape (N*T, N*T)
Big covariance matrix, stationary in time by construction.
"""
N = cross_cov_mats.shape[1]
T = len(cross_cov_mats)
use_torch = isinstance(cross_cov_mats, torch.Tensor)
cross_cov_mats_repeated = []
for i in range(T):
for j in range(T):
if i > j:
cross_cov_mats_repeated.append(cross_cov_mats[abs(i - j)])
else:
if use_torch:
cross_cov_mats_repeated.append(cross_cov_mats[abs(i - j)].t())
else:
cross_cov_mats_repeated.append(cross_cov_mats[abs(i - j)].T)
if use_torch:
cov_tensor = torch.reshape(torch.stack(cross_cov_mats_repeated), (T, T, N, N))
cov = torch.cat([torch.cat([cov_ii_jj for cov_ii_jj in cov_ii], dim=1)
for cov_ii in cov_tensor])
else:
cov_tensor = np.reshape(np.stack(cross_cov_mats_repeated), (T, T, N, N))
cov = np.concatenate([np.concatenate([cov_ii_jj for cov_ii_jj in cov_ii], axis=1)
for cov_ii in cov_tensor])
return cov
def calc_pi_from_data(X, T, proj=None, stride=1, rng=None):
"""Calculates the Gaussian Predictive Information between variables
{1,...,T_pi} and {T_pi+1,...,2*T_pi}..
Parameters
----------
X : ndarray or torch tensor (time, features) or (batches, time, features)
Data used to calculate the PI.
T : int
This T should be 2 * T_pi. This T sets the joint window length not the
past or future window length.
proj : ndarray or torch tensor
Projection matrix for data (optional). If `proj` is not given, the PI of
the dataset is given.
stride : int or float
If stride is an `int`, it defines the stride between lagged samples used
to estimate the cross covariance matrix. Setting stride > 1 can speed up the
calculation, but may lead to a loss in accuracy. Setting stride to a `float`
greater than 0 and less than 1 will random subselect samples.
rng : NumPy random state
Only used if `stride` is a float.
Returns
-------
PI : float
Mutual information in nats.
"""
ccms = calc_cross_cov_mats_from_data(X, T, stride=stride, rng=rng)
return calc_pi_from_cross_cov_mats(ccms, proj=proj)
def calc_pi_from_cov(cov_2_T_pi):
"""Calculates the Gaussian Predictive Information between variables
{1,...,T_pi} and {T_pi+1,...,2*T_pi} with covariance matrix cov_2_T_pi.
Parameters
----------
cov_2_T_pi : np.ndarray, shape (2*T_pi, 2*T_pi)
Covariance matrix.
Returns
-------
PI : float
Mutual information in nats.
"""
T_pi = cov_2_T_pi.shape[0] // 2
use_torch = isinstance(cov_2_T_pi, torch.Tensor)
cov_T_pi = cov_2_T_pi[:T_pi, :T_pi]
if use_torch:
logdet_T_pi = torch.slogdet(cov_T_pi)[1]
logdet_2T_pi = torch.slogdet(cov_2_T_pi)[1]
else:
logdet_T_pi = np.linalg.slogdet(cov_T_pi)[1]
logdet_2T_pi = np.linalg.slogdet(cov_2_T_pi)[1]
PI = logdet_T_pi - .5 * logdet_2T_pi
return PI
def project_cross_cov_mats(cross_cov_mats, proj):
"""Projects the cross covariance matrices.
Parameters
----------
cross_cov_mats : np.ndarray, shape (T, N, N)
Cross-covariance matrices: cross_cov_mats[dt] is the
cross-covariance between X(t) and X(t+dt), where each
of X(t) and X(t+dt) is a N-dimensional vector.
proj: np.ndarray, shape (N, d), optional
If provided, the N-dimensional data are projected onto a d-dimensional
basis given by the columns of proj. Then, the mutual information is
computed for this d-dimensional timeseries.
Returns
-------
cross_cov_mats_proj : ndarray, shape (T, d, d)
Projected cross covariances matrices.
"""
if isinstance(cross_cov_mats, torch.Tensor):
use_torch = True
elif isinstance(cross_cov_mats[0], torch.Tensor):
cross_cov_mats = torch.stack(cross_cov_mats)
use_torch = True
else:
use_torch = False
if use_torch and isinstance(proj, np.ndarray):
proj = torch.tensor(proj, device=cross_cov_mats.device, dtype=cross_cov_mats.dtype)
T = cross_cov_mats.shape[0] // 2
if use_torch:
cross_cov_mats_proj = torch.matmul(proj.t().unsqueeze(0),
torch.matmul(cross_cov_mats,
proj.unsqueeze(0)))
else:
cross_cov_mats_proj = []
for i in range(2 * T):
cross_cov = cross_cov_mats[i]
cross_cov_proj = np.dot(proj.T, np.dot(cross_cov, proj))
cross_cov_mats_proj.append(cross_cov_proj)
cross_cov_mats_proj = np.stack(cross_cov_mats_proj)
return cross_cov_mats_proj
def calc_pi_from_cross_cov_mats(cross_cov_mats, proj=None):
"""Calculates predictive information for a spatiotemporal Gaussian
process with T-1 N-by-N cross-covariance matrices.
Parameters
----------
cross_cov_mats : np.ndarray, shape (T, N, N)
Cross-covariance matrices: cross_cov_mats[dt] is the
cross-covariance between X(t) and X(t+dt), where each
of X(t) and X(t+dt) is a N-dimensional vector.
proj: np.ndarray, shape (N, d), optional
If provided, the N-dimensional data are projected onto a d-dimensional
basis given by the columns of proj. Then, the mutual information is
computed for this d-dimensional timeseries.
Returns
-------
PI : float
Mutual information in nats.
"""
if proj is not None:
cross_cov_mats_proj = project_cross_cov_mats(cross_cov_mats, proj)
else:
cross_cov_mats_proj = cross_cov_mats
cov_2_T_pi = calc_cov_from_cross_cov_mats(cross_cov_mats_proj)
PI = calc_pi_from_cov(cov_2_T_pi)
return PI
def calc_block_toeplitz_logdets(cross_cov_mats, proj=None):
"""Calculates logdets which can be used to calculate predictive information or entropy
for a spatiotemporal Gaussian process with T N-by-N cross-covariance matrices using
the block-Toeplitz algorithm.
Based on:
<NAME>. "A decomposition of block toeplitz matrices with applications
to vector time series." 1989a). Unpublished manuscript (1989).
Parameters
----------
cross_cov_mats : np.ndarray, shape (T, N, N)
Cross-covariance matrices: cross_cov_mats[dt] is the
cross-covariance between X(t) and X(t+dt), where each
of X(t) and X(t+dt) is a N-dimensional vector.
proj: np.ndarray, shape (N, d), optional
If provided, the N-dimensional data are projected onto a d-dimensional
basis given by the columns of proj. Then, the mutual information is
computed for this d-dimensional timeseries.
Returns
-------
lodgets : list
T logdets.
"""
use_torch = isinstance(cross_cov_mats, torch.Tensor)
if proj is not None:
ccms = project_cross_cov_mats(cross_cov_mats, proj)
else:
ccms = cross_cov_mats
T, d, d = ccms.shape
A = dict()
Ab = dict()
if use_torch:
v = ccms[0]
vb = [ccms[0]]
D = ccms[1]
for ii in range(1, T):
if ii > 1:
As = torch.stack([A[ii - 2, ii - jj - 1] for jj in range(1, ii)])
D = ccms[ii] - torch.matmul(As, ccms[1:ii]).sum(dim=0)
A[(ii - 1, ii - 1)] = torch.solve(D.t(), vb[ii - 1].t())[0].t()
Ab[(ii - 1, ii - 1)] = torch.solve(D, v.t())[0].t()
for kk in range(1, ii):
A[(ii - 1, kk - 1)] = (A[(ii - 2, kk - 1)]
- A[(ii - 1, ii - 1)].mm(Ab[(ii - 2, ii - kk - 1)]))
Ab[(ii - 1, kk - 1)] = (Ab[(ii - 2, kk - 1)]
- Ab[(ii - 1, ii - 1)].mm(A[(ii - 2, ii - kk - 1)]))
if ii < T - 1:
As = torch.stack([A[(ii - 1, jj - 1)] for jj in range(1, ii + 1)])
if ii == 1:
cs = ccms[[1]]
else:
cs = ccms[1: ii + 1]
v = ccms[0] - torch.matmul(As, torch.transpose(cs, 1, 2)).sum(dim=0)
Abs = torch.stack([Ab[(ii - 1, jj - 1)] for jj in range(1, ii + 1)])
if ii == 1:
cs = ccms[[1]]
else:
cs = ccms[1: ii + 1]
vb.append(ccms[0] - torch.matmul(Abs, cs).sum(dim=0))
logdets = [torch.slogdet(vb[ii])[1] for ii in range(T)]
else:
vb = np.zeros((T, d, d))
v = ccms[0]
vb[0] = ccms[0]
D = ccms[1]
for ii in range(1, T):
if ii > 1:
D = ccms[ii] - sum([A[ii - 2, ii - jj - 1].dot(ccms[jj])
for jj in range(1, ii)])
A[(ii - 1, ii - 1)] = np.linalg.solve(vb[ii - 1].T, D.T).T
Ab[(ii - 1, ii - 1)] = np.linalg.solve(v.T, D).T
for kk in range(1, ii):
if ii < T - 1:
A[(ii - 1, kk - 1)] = (A[(ii - 2, kk - 1)]
- A[(ii - 1, ii - 1)].dot(Ab[(ii - 2, ii - kk - 1)]))
Ab[(ii - 1, kk - 1)] = (Ab[(ii - 2, kk - 1)]
- Ab[(ii - 1, ii - 1)].dot(A[(ii - 2, ii - kk - 1)]))
if ii < T - 1:
v = ccms[0] - sum([A[(ii - 1, jj - 1)].dot(ccms[jj].T) for jj in range(1, ii + 1)])
vb[ii] = ccms[0] - sum([Ab[(ii - 1, jj - 1)].dot(ccms[jj]) for jj in range(1, ii + 1)])
logdets = [np.linalg.slogdet(vb[ii])[1] for ii in range(T)]
return logdets
def calc_pi_from_cross_cov_mats_block_toeplitz(cross_cov_mats, proj=None):
"""Calculates predictive information for a spatiotemporal Gaussian
process with T-1 N-by-N cross-covariance matrices using the block-Toeplitz
algorithm.
Based on:
<NAME>. "A decomposition of block toeplitz matrices with applications
to vector time series." 1989a). Unpublished manuscript (1989).
Parameters
----------
cross_cov_mats : np.ndarray, shape (T, N, N)
Cross-covariance matrices: cross_cov_mats[dt] is the
cross-covariance between X(t) and X(t+dt), where each
of X(t) and X(t+dt) is a N-dimensional vector.
proj: np.ndarray, shape (N, d), optional
If provided, the N-dimensional data are projected onto a d-dimensional
basis given by the columns of proj. Then, the mutual information is
computed for this d-dimensional timeseries.
Returns
-------
PI : float
Mutual information in nats.
"""
T = cross_cov_mats.shape[0]
logdets = calc_block_toeplitz_logdets(cross_cov_mats, proj)
return sum(logdets[:T // 2]) - 0.5 * sum(logdets)
"""
====================================================================================================
====================================================================================================
=================================== ===============================
=================================== KronPCA-related methods ===============================
=================================== ===============================
====================================================================================================
====================================================================================================
"""
class memoized(object):
"""Decorator for memoization.
From: https://wiki.python.org/moin/PythonDecoratorLibrary.
Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.abc.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
# Return the function's docstring.
return self.func.__doc__
def __get__(self, obj, objtype):
# Support instance methods.
return functools.partial(self.__call__, obj)
@memoized
def pv_permutation(T, N):
A = np.arange((T * N)**2, dtype=np.int).reshape((T * N, T * N))
A_perm = np.zeros((T**2, N**2), dtype=np.int)
for i in range(T):
for j in range(T):
row_idx = i * T + j
A_block = A[i * N:(i + 1) * N, j * N:(j + 1) * N]
A_perm[row_idx, :] = A_block.T.reshape((N**2,)) # equivalent to I_block.vectorize
perm = A_perm.ravel()
perm_inv = perm.argsort()
return perm, perm_inv
def pv_rearrange(C, T, N):
perm, _ = pv_permutation(T, N)
C_prime = C.ravel()[perm].reshape((T**2, N**2))
return C_prime
def pv_rearrange_inv(C, T, N):
_, perm_inv = pv_permutation(T, N)
C_prime = C.ravel()[perm_inv].reshape((T * N, T * N))
return C_prime
def build_P(T):
P = np.zeros((2 * T - 1, T**2))
idx = np.arange(T**2).reshape((T, T)).T + 1
for offset in range(-T + 1, T):
diag_idx = np.diagonal(idx, offset=offset)
P[offset + T - 1, diag_idx - 1] = 1. / np.sqrt(T - np.abs(offset))
return P
def toeplitz_reg(cov, T, N, r):
R_C = pv_rearrange(cov, T, N)
P = build_P(T)
to_svd = P.dot(R_C)
U, s, Vt = randomized_svd(to_svd, n_components=r + 1, n_iter=40, random_state=42)
trunc_svd = U[:, :-1].dot(np.diag(s[:-1] - s[-1])).dot(Vt[:-1, :])
cov_reg = pv_rearrange_inv(P.T.dot(trunc_svd), T, N)
return cov_reg
def non_toeplitz_reg(cov, T, N, r):
R_C = pv_rearrange(cov, T, N)
U, s, Vt = randomized_svd(R_C, n_components=r + 1, n_iter=40, random_state=42)
trunc_svd = U[:, :-1].dot(np.diag(s[:-1] - s[-1])).dot(Vt[:-1, :])
cov_reg = pv_rearrange_inv(trunc_svd, T, N)
return cov_reg
def toeplitz_reg_taper_shrink(cov, T, N, r, sigma, alpha):
cov_reg = toeplitz_reg(cov, T, N, r)
cov_reg_taper = taper_cov(cov_reg, T, N, sigma)
cov_reg_taper_shrink = (1. - alpha) * cov_reg_taper + alpha * np.eye(T * N)
return cov_reg_taper_shrink
def gaussian_log_likelihood(cov, sample_cov, num_samples):
to_trace = np.linalg.solve(cov, sample_cov)
log_det_cov = np.linalg.slogdet(cov)[1]
d = cov.shape[1]
log_likelihood = -0.5 * num_samples * (d * np.log(2. * np.pi) +
log_det_cov + np.trace(to_trace))
return log_likelihood
def taper_cov(cov, T, N, sigma):
t = np.arange(T).reshape((T, 1))
delta_t = t - t.T
temporal_kernel = np.exp(-(delta_t / sigma)**2)
full_kernel = np.kron(temporal_kernel, np.ones((N, N)))
result = full_kernel * cov
return result
def cv_toeplitz(X_with_lags, T, N, r_vals, sigma_vals, alpha_vals, num_folds=10, verbose=False):
fold_size = int(np.floor(len(X_with_lags) / num_folds))
P = build_P(T)
ll_vals = np.zeros((num_folds, len(r_vals), len(sigma_vals), len(alpha_vals)))
for cv_iter in range(num_folds):
if verbose:
print("fold =", cv_iter + 1)
X_train = np.concatenate((X_with_lags[:cv_iter * fold_size],
X_with_lags[(cv_iter + 1) * fold_size:]), axis=0)
X_test = X_with_lags[cv_iter * fold_size:(cv_iter + 1) * fold_size]
num_samples = len(X_test)
cov_train, cov_test = np.cov(X_train.T), np.cov(X_test.T)
cov_train, cov_test = toeplitzify(cov_train, T, N), toeplitzify(cov_test, T, N)
rectify_spectrum(cov_train)
rectify_spectrum(cov_test)
R_C = pv_rearrange(cov_train, T, N)
to_svd = P.dot(R_C)
U, s, Vt = randomized_svd(to_svd, n_components=np.max(r_vals), n_iter=40, random_state=42)
for r_idx in range(len(r_vals)):
r = r_vals[r_idx]
if verbose:
print("r =", r)
if r_idx == len(r_vals) - 1:
trunc_svd = to_svd
else:
trunc_svd = U[:, :r].dot(np.diag(s[:r] - s[r])).dot(Vt[:r, :])
cov_kron = pv_rearrange_inv(P.T.dot(trunc_svd), T, N)
for sigma_idx in range(len(sigma_vals)):
sigma = sigma_vals[sigma_idx]
cov_kron_taper = taper_cov(cov_kron, T, N, sigma)
for alpha_idx in range(len(alpha_vals)):
alpha = alpha_vals[alpha_idx]
cov_kron_taper_shrunk = ((1. - alpha) * cov_kron_taper + alpha * np.eye(T * N))
ll = gaussian_log_likelihood(cov_kron_taper_shrunk, cov_test, num_samples)
ll_vals[cv_iter, r_idx, sigma_idx, alpha_idx] = ll
opt_idx = np.unravel_index(ll_vals.mean(axis=0).argmax(), ll_vals.shape[1:])
return ll_vals, opt_idx
```
#### File: DynamicalComponentsAnalysis/dca/data_util.py
```python
import h5py, pickle
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.signal import resample
from scipy.ndimage import convolve1d
from .cov_util import form_lag_matrix # noqa:F401
def sum_over_chunks(X, stride):
X_trunc = X[:len(X) - (len(X) % stride)]
reshaped = X_trunc.reshape((len(X_trunc) // stride, stride, X.shape[1]))
summed = reshaped.sum(axis=1)
return summed
def moving_center(X, n, axis=0):
if n % 2 == 0:
n += 1
w = -np.ones(n) / n
w[n // 2] += 1
X_ctd = convolve1d(X, w, axis=axis)
return X_ctd
def calc_autocorr_fns(X, T):
autocorr_fns = np.zeros((X.shape[1], T))
for dt in range(T):
autocorr_fns[:, dt] = np.sum((X[dt:] * X[:len(X) - dt]), axis=0) / (len(X) - dt)
return autocorr_fns
def load_kording_paper_data(filename, bin_width_s=0.05, min_spike_count=10, preprocess=True):
with open(filename, "rb") as fname:
data = pickle.load(fname)
X, Y = data[0], data[1]
good_X_idx = (1 - (np.isnan(X[:, 0]) + np.isnan(X[:, 1]))).astype(np.bool)
good_Y_idx = (1 - (np.isnan(Y[:, 0]) + np.isnan(Y[:, 1]))).astype(np.bool)
good_idx = good_X_idx * good_Y_idx
X, Y = X[good_idx], Y[good_idx]
chunk_size = int(np.round(bin_width_s / 0.05)) # 50 ms default bin width
X, Y = sum_over_chunks(X, chunk_size), sum_over_chunks(Y, chunk_size) / chunk_size
X = X[:, np.sum(X, axis=0) > min_spike_count]
if preprocess:
X = np.sqrt(X)
X = moving_center(X, n=600)
Y -= Y.mean(axis=0, keepdims=True)
Y /= Y.std(axis=0, keepdims=True)
return {'neural': X, 'loc': Y}
def load_weather_data(filename):
df = pd.read_csv(filename)
df['datetime'] = pd.to_datetime(df['datetime'])
df.set_index('datetime', inplace=True)
df = df[['Vancouver', 'Portland', 'San Francisco', 'Seattle',
'Los Angeles', 'San Diego', 'Las Vegas', 'Phoenix', 'Albuquerque',
'Denver', 'San Antonio', 'Dallas', 'Houston', 'Kansas City',
'Minneapolis', 'Saint Louis', 'Chicago', 'Nashville', 'Indianapolis',
'Atlanta', 'Detroit', 'Jacksonville', 'Charlotte', 'Miami',
'Pittsburgh', 'Toronto', 'Philadelphia', 'New York', 'Montreal',
'Boston']]
df = df.dropna(axis=0, how='any')
dts = (df.index[1:] - df.index[:-1]).to_numpy()
df = df.iloc[np.nonzero(dts > dts.min())[0].max() + 1:]
Xfs = df.values.copy()
ds_factor = 24
X = resample(Xfs, Xfs.shape[0] // ds_factor, axis=0)
return X
"""
Download .mat files from
https://zenodo.org/record/583331#.XNtzE5NKjys
Longest session (only has M1): indy_20160627_01.mat
TODO: use downsampling w/ scipy.signal instead of decimation
"""
def load_sabes_data(filename, bin_width_s=.05, preprocess=True):
# Load MATLAB file
with h5py.File(filename, "r") as f:
# Get channel names (e.g. M1 001 or S1 001)
n_channels = f['chan_names'].shape[1]
chan_names = []
for i in range(n_channels):
chan_names.append(f[f['chan_names'][0, i]][()].tobytes()[::2].decode())
# Get M1 and S1 indices
M1_indices = [i for i in range(n_channels) if chan_names[i].split(' ')[0] == 'M1']
S1_indices = [i for i in range(n_channels) if chan_names[i].split(' ')[0] == 'S1']
# Get time
t = f['t'][0, :]
# Individually process M1 and S1 indices
result = {}
for indices in (M1_indices, S1_indices):
if len(indices) == 0:
continue
# Get region (M1 or S1)
region = chan_names[indices[0]].split(" ")[0]
# Perform binning
n_channels = len(indices)
n_sorted_units = f["spikes"].shape[0] - 1 # The FIRST one is the 'hash' -- ignore!
d = n_channels * n_sorted_units
max_t = t[-1]
n_bins = int(np.floor((max_t - t[0]) / bin_width_s))
binned_spikes = np.zeros((n_bins, d), dtype=np.int)
for chan_idx in indices:
for unit_idx in range(1, n_sorted_units): # ignore hash!
spike_times = f[f["spikes"][unit_idx, chan_idx]][()]
if spike_times.shape == (2,):
# ignore this case (no data)
continue
spike_times = spike_times[0, :]
# get rid of extraneous t vals
spike_times = spike_times[spike_times - t[0] < n_bins * bin_width_s]
bin_idx = np.floor((spike_times - t[0]) / bin_width_s).astype(np.int)
unique_idxs, counts = np.unique(bin_idx, return_counts=True)
# make sure to ignore the hash here...
binned_spikes[unique_idxs, chan_idx * n_sorted_units + unit_idx - 1] += counts
binned_spikes = binned_spikes[:, binned_spikes.sum(axis=0) > 0]
if preprocess:
binned_spikes = binned_spikes[:, binned_spikes.sum(axis=0) > 5000]
binned_spikes = np.sqrt(binned_spikes)
binned_spikes = moving_center(binned_spikes, n=600)
result[region] = binned_spikes
# Get cursor position
cursor_pos = f["cursor_pos"][:].T
# Line up the binned spikes with the cursor data
t_mid_bin = np.arange(len(binned_spikes)) * bin_width_s + bin_width_s / 2
cursor_pos_interp = interp1d(t - t[0], cursor_pos, axis=0)
cursor_interp = cursor_pos_interp(t_mid_bin)
if preprocess:
cursor_interp -= cursor_interp.mean(axis=0, keepdims=True)
cursor_interp /= cursor_interp.std(axis=0, keepdims=True)
result["cursor"] = cursor_interp
return result
def load_accel_data(filename, preprocess=True):
df = pd.read_csv(filename)
X = df.values[:, 1:]
if preprocess:
X -= X.mean(axis=0, keepdims=True)
X /= X.std(axis=0, keepdims=True)
return X
class CrossValidate:
def __init__(self, X, Y, num_folds, stack=True):
self.X, self.Y = X, Y
self.num_folds = num_folds
self.idxs = np.array_split(np.arange(len(X)), num_folds)
self.stack = stack
def __iter__(self):
self.fold_idx = 0
return self
def __next__(self):
fold_idx = self.fold_idx
if fold_idx == self.num_folds:
raise StopIteration
test_idxs = self.idxs[fold_idx]
train_idxs = []
if fold_idx > 0:
train_idxs.append(np.concatenate([self.idxs[ii] for ii in range(fold_idx)]))
if fold_idx < self.num_folds - 1:
train_idxs.append(np.concatenate([self.idxs[ii]
for ii in range(fold_idx + 1, self.num_folds)]))
X, Y = self.X, self.Y
X_test = X[test_idxs]
Y_test = Y[test_idxs]
if self.stack:
X_train = np.concatenate([X[idxs] for idxs in train_idxs])
Y_train = np.concatenate([Y[idxs] for idxs in train_idxs])
else:
X_train = [X[idxs] for idxs in train_idxs]
Y_train = [Y[idxs] for idxs in train_idxs]
self.fold_idx += 1
return X_train, X_test, Y_train, Y_test, fold_idx
```
|
{
"source": "JesseLivezey/gabor_fit",
"score": 3
}
|
#### File: gabor_fit/gabor_fit/fit.py
```python
import theano
import theano.tensor as T
from scipy.optimize import minimize
from scipy.ndimage.filters import gaussian_filter as gf
from numpy import fft
import numpy as np
def setup_graph():
"""Setup the theano graph for all possible operations."""
n_x = T.lscalar('n_x')
n_y = T.lscalar('n_y')
pos_x = T.arange(n_x).dimshuffle(0, 'x', 'x')
pos_y = T.arange(n_y).dimshuffle('x', 0, 'x')
params = T.dvector('params')
s_params = split_params(params)
x, y, theta, phi, lkx, lvx, lvy = s_params
xp = x.dimshuffle('x', 'x', 0)
yp = y.dimshuffle('x', 'x', 0)
thetap = theta.dimshuffle('x', 'x', 0)
phip = phi.dimshuffle('x', 'x', 0)
lkxp = lkx.dimshuffle('x', 'x', 0)
lkxp = 2. * np.pi / (2. + T.exp(lkxp))
lvxp = lvx.dimshuffle('x', 'x', 0)
lvyp = lvy.dimshuffle('x', 'x', 0)
x_prime = T.cos(theta)*(pos_x-x) -T.sin(theta)*(pos_y-y)
y_prime = T.sin(theta)*(pos_x-x) +T.cos(theta)*(pos_y-y)
envelope = T.exp(-x_prime**2/T.exp(lvxp)/2.-y_prime**2/T.exp(lvyp)/2.)
phase = T.sin(lkxp * x_prime+phip)
gabor = envelope * phase
gabor_norm = T.sqrt((gabor**2).sum(axis=(0, 1), keepdims=True))
envelope_norm = T.sqrt((envelope**2).sum(axis=(0, 1), keepdims=True))
phase_norm = T.sqrt((phase**2).sum(axis=(0, 1), keepdims=True))
gabor = gabor/gabor_norm
envelope = envelope/envelope_norm
phase = phase/phase_norm
return params, s_params, n_x, n_y, gabor, envelope, phase
def fit_lvx_lvy_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
x, y, theta, phi, lkx, lvx, lvy = s_params
se = ((data-gabor)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params, consider_constant=(x, y, theta, phi, lkx))
return params, mse, se, grad, gabor, n_x, n_y
def fit_theta_phi_lkx_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
x, y, theta, phi, lkx, lvx, lvy = s_params
se = ((data-gabor)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params, consider_constant=(x, y, lvx, lvy))
return params, mse, se, grad, gabor, n_x, n_y
def fit_theta_phi_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
x, y, theta, phi, lkx, lvx, lvy = s_params
se = ((data-phase)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params, consider_constant=[lkx])
return params, mse, se, grad, phase, n_x, n_y
def fit_only_envelope_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
se = ((data-envelope)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params)
return params, mse, se, grad, envelope, n_x, n_y
def fit_x_y_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
x, y, theta, phi, lkx, lvx, lvy = s_params
se = ((data-envelope)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params, consider_constant=[theta, lvx, lvy])
return params, mse, se, grad, gabor, n_x, n_y
def fit_phi_x_y_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
x, y, theta, phi, lkx, lvx, lvy = s_params
se = ((data-gabor)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params, consider_constant=[theta, lkx, lvx, lvy])
return params, mse, se, grad, gabor, n_x, n_y
def fit_envelope_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
x, y, theta, phi, lkx, lvx, lvy = s_params
se = ((data-gabor)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params, consider_constant=[theta, lkx])
return params, mse, se, grad, gabor, n_x, n_y
def fit_all_function(data):
params, s_params, n_x, n_y, gabor, envelope, phase = setup_graph()
se = ((data-gabor)**2).sum(axis=(0, 1))
mse = se.mean().astype('float64')
grad = T.grad(mse, params)
return params, mse, se, grad, gabor, n_x, n_y
def combine_params(x, y, theta, phi, lkx, lvx, lvy):
"""Turns individual parameter vectors into a parameter array."""
if isinstance(x, theano.tensor.TensorVariable):
rval = T.concatenate([x, y, theta, phi, lkx, lvx, lvy])
else:
rval = np.concatenate([x, y, theta, phi, lkx, lvx, lvy])
return rval
def split_params(params):
"""Splits a parameter vector for a batch of gabors into individual parameter
vectors."""
n_samples = params.shape[0]//7
x = params[:n_samples].astype('float32')
y = params[n_samples:2*n_samples].astype('float32')
theta = params[2*n_samples:3*n_samples].astype('float32')
phi = params[3*n_samples:4*n_samples].astype('float32')
lkx = params[4*n_samples:5*n_samples].astype('float32')
lvx = params[5*n_samples:6*n_samples].astype('float32')
lvy = params[6*n_samples:].astype('float32')
return x, y, theta, phi, lkx, lvx, lvy
def standardize_params(*params):
"""Convert parameters from internal representation to standard Gabor
parameters.
Parameters
----------
x, y, theta, phi, lkx, lvx, lvy
Either a combines vector or split parameters.
Returns
-------
x : float
Center of the Gabor in the x direction in pixels.
y : float
Center of the Gabor in the y direction in pixels.
theta : float
Rotation of the Gabor in the plane.
phi : float
Phase of the Gabor.
kx : float
Wavevector of Gabor (2*pi/lambda).
vx : float
Variance of the Gabor along the oscilation direction.
vy : float
Variance of the Gabor perpendictular to the oscilation direction.
"""
combine = False
if len(params) == 1:
x, y, theta, phi, lkx, lvx, lvy = split_params(*params)
combine = True
else:
x, y, theta, phi, lkx, lvx, lvy = params
if isinstance(x, theano.tensor.TensorVariable):
kx = 2.*np.pi / (2.*np.sqrt(2)+T.exp(lkx))
rval = x, y, theta, phi, kx, T.exp(lvx), T.exp(lvy)
else:
kx = 2.*np.pi / (2.*np.sqrt(2)+np.exp(lkx))
rval = x, y, theta, phi, kx, np.exp(lvx), np.exp(lvy)
if combine:
rval = combine_params(*rval)
return rval
class GaborFit(object):
"""Fit Gabor parameters to patches and visualize Gabors."""
def __init__(self):
self.data = theano.shared(np.empty((1,1,1), dtype='float32'))
(params, mse, se, grad, gabor,
n_x_s, n_y_s) = fit_x_y_function(self.data)
self._fit_x_y = theano.function([params], [mse, grad],
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
self._fit_x_y_se = theano.function([params], se,
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
(params, mse, se, grad, gabor,
n_x_s, n_y_s) = fit_phi_x_y_function(self.data)
self._fit_phi_x_y = theano.function([params], [mse, grad],
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
self._fit_phi_x_y_se = theano.function([params], se,
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
(params, mse, se, grad, gabor,
n_x_s, n_y_s) = fit_theta_phi_lkx_function(self.data)
self._fit_theta_phi_lkx = theano.function([params], [mse, grad],
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
self._fit_theta_phi_lkx_se = theano.function([params], se,
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
(params, mse, se, grad, gabor,
n_x_s, n_y_s) = fit_all_function(self.data)
self._fit_all = theano.function([params], [mse, grad],
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
self._fit_all_se = theano.function([params], se,
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
(params, mse, se, grad, gabor,
n_x_s, n_y_s) = fit_lvx_lvy_function(self.data)
self._fit_lvx_lvy = theano.function([params], [mse, grad],
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
self._fit_lvx_lvy_se = theano.function([params], se,
givens={n_x_s: self.data.shape[0],
n_y_s: self.data.shape[1]})
params_s, s_params, n_x_s, n_y_s, gabor, envelope, phase = setup_graph()
self._make_gabor = theano.function([params_s, n_x_s, n_y_s], gabor)
self._make_phase = theano.function([params_s, n_x_s, n_y_s], phase)
self._make_envelope = theano.function([params_s, n_x_s, n_y_s], envelope)
def fit(self, X, var_init=.05):
"""Given image patches, find best-fit Gabor parameters.
Parameters
----------
X : ndarray (n_x, n_y, n_batch)
Image patches for fitting.
var_init : float
Ballpark variance initialization scaled by dim**2.
Returns
-------
x : list
List of all parameter setting during fitting.
best_params : ndarray
Internal parameter vector for best parameters.
best_se : ndarray
Squared-error for best parameters settings for each element of the
batch.
"""
# Calculate different versions of the data
n_x, n_y, n_samples = X.shape
init = np.zeros(7*n_samples)
X_norm = np.sqrt((X**2).sum(axis=(0, 1), keepdims=True))
X = X/X_norm
fao = np.array([gf(abs(xi), 2, mode='constant', cval=0.)
for xi in X.transpose(2, 0, 1)]).transpose(1, 2, 0).astype('float32')
fao_norm = np.sqrt((fao**2).sum(axis=(0, 1), keepdims=True))
fao = fao/fao_norm
aps = abs(fft.fft2(X, axes=(0, 1)))
aps_norm = np.sqrt((aps**2).sum(axis=(0, 1), keepdims=True))
aps = aps/aps_norm
freqs = fft.fftfreq(n_x)[:, np.newaxis] + 1j*fft.fftfreq(n_y)[np.newaxis, :]
thetas = np.linspace(0., np.pi, 8)
kx_min = 2.*np.pi/np.sqrt(n_x**2+n_y**2)
kx_max = 2.*np.pi/2./np.sqrt(2.)
kxs = np.linspace(kx_min, kx_max, 20, endpoint=True)
lkxs = np.log(2.*np.pi/kxs + 2.*np.sqrt(2))
def choose_best(best_se, best_params, se, params):
compare = se < best_se
best_params = best_params.reshape(7, -1)
params = params.reshape(7, -1)
best_se[compare] = se[compare]
best_params[:, compare] = params[:, compare]
return best_se, best_params.ravel()
best_se = np.inf*np.ones(n_samples)
best_params = np.zeros(7*n_samples)
x = []
for vi in [var_init/2., var_init, 2.*var_init]:
init = np.zeros(7*n_samples)
init[:n_samples] = n_x/2.
init[n_samples:2*n_samples] = n_y/2.
init[4*n_samples:5*n_samples] = lkxs[0]
init[5*n_samples:6*n_samples] = np.log(vi*(n_x)**2)
init[6*n_samples:7*n_samples] = np.log(vi*(n_y)**2)
# Fit envelope mean
func = self._fit_x_y
self.data.set_value(fao.astype('float32'))
res = minimize(func, init, method='L-BFGS-B', jac=True)
x.append(res.x)
params = res.x
x.append(best_params)
self.data.set_value(X.astype('float32'))
func = self._fit_theta_phi_lkx
func_se = self._fit_theta_phi_lkx_se
for theta in thetas:
for lkx in lkxs:
init = params.copy()
init[2*n_samples:3*n_samples] = theta
init[4*n_samples:5*n_samples] = lkx
res = minimize(func, init, method='L-BFGS-B', jac=True)
params = res.x
se = func_se(params)
best_se, best_params = choose_best(best_se, best_params, se, params)
x.append(best_params)
#print theta, k, se.mean(), best_se.mean()
# Fit envelope widths
func = self._fit_lvx_lvy
res = minimize(func, best_params, method='L-BFGS-B', jac=True)
params = res.x
se = self._fit_lvx_lvy_se(params)
best_se, best_params = choose_best(best_se, best_params, se, params)
x.append(best_params)
# Fit envelope center and phase
func = self._fit_phi_x_y
res = minimize(func, best_params, method='L-BFGS-B', jac=True)
params = res.x
se = self._fit_phi_x_y_se(params)
best_se, best_params = choose_best(best_se, best_params, se, params)
x.append(best_params)
# Fit envelope center and phase
func = self._fit_all
res = minimize(func, best_params, method='L-BFGS-B', jac=True)
params = res.x
se = self._fit_all_se(params)
best_se, best_params = choose_best(best_se, best_params, se, params)
x.append(best_params)
return x, split_params(best_params), best_se
def make_gabor(self, params, n_x, n_y):
return self._make_gabor(params, n_x, n_y)
def make_phase(self, params, n_x, n_y):
return self._make_phase(params, n_x, n_y)
def make_envelope(self, params, n_x, n_y):
return self._make_envelope(params, n_x, n_y)
```
|
{
"source": "JesseLivezey/mixture_models",
"score": 2
}
|
#### File: JesseLivezey/mixture_models/models.py
```python
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
floatX = theano.config.floatX
def log_gaussian_symbolic(X, alpha, beta):
alpha = alpha.dimshuffle('x', 0)
X = X.dimshuffle(0, 'x')
sqrt2pi = np.sqrt(2. * np.pi).astype(floatX)
return ((alpha * X) + (beta * X**2) +
(.5 * T.log(-2. * beta)) +
(.25 * alpha**2 / beta) - np.log(sqrt2pi))
def gaussian_symbolic(X, alpha, beta):
lg = log_gaussian_symbolic(X, alpha, beta)
return T.exp(lg)
def mog_posterior_symbolic(X, alpha, beta, pi):
gs = gaussian_symbolic(X, alpha, beta)
numerator = pi.dimshuffle('x', 0) * gs
return numerator / numerator.sum(axis=1, keepdims=True)
def mog_em_objective_symbolic(X, alpha, beta, pi, posterior):
lg = log_gaussian_symbolic(X, alpha, beta)
return (posterior * lg).sum()
class MixtureModel(object):
"""
Mixture model base class.
Parameters
----------
n_mixtures : int
Number of mixtures.
seed : int (optional)
Random seed.
"""
def __init__(self, n_mixtures, seed=20161119):
self.rng = np.random.RandomState(seed)
self.n_mixtures = n_mixtures
self._setup()
def _setup(self):
raise NotImplementedError
def _update_X(self, X):
self._update_X_theano(X.astype(floatX))
def fit(self, X, n_steps=10):
self._update_X(X)
for ii in range(n_steps):
print self.em_objective(X)
self._update_params()
def posterior(self, X):
return self._posterior(X.astype(floatX))
def em_objective(self, X):
return self._em_objective(X.astype(floatX))
class GaussianMixture(MixtureModel):
def _setup(self):
pi = self.rng.rand(self.n_mixtures)
pi /= pi.sum()
self.pi = theano.shared(pi.astype(floatX), 'pi')
alpha = self.rng.randn(self.n_mixtures)
self.alpha = theano.shared(alpha.astype(floatX), 'alpha')
beta = -1. * np.ones(self.n_mixtures)
self.beta = theano.shared(beta.astype(floatX), 'beta')
self.X = theano.shared(np.ones(1).astype(floatX))
X = T.vector('X')
updates = OrderedDict()
updates[self.X] = X
self._update_X_theano = theano.function([X], [], updates=updates)
# Setup posterior symbolic and theano function with input X
posterior_in = mog_posterior_symbolic(X, self.alpha,
self.beta, self.pi)
self._posterior = theano.function([X], posterior_in)
# Setup EM objective symbolic and theano function
em_objective = mog_em_objective_symbolic(X, self.alpha,
self.beta, self.pi,
posterior_in)
self._em_objective = theano.function([X], em_objective)
# Setup posterior symbolic with shared X for fitting
posterior_sh = mog_posterior_symbolic(self.X, self.alpha,
self.beta, self.pi)
em_objective = mog_em_objective_symbolic(self.X, self.alpha,
self.beta, self.pi,
posterior_sh)
# Setup EM fit function
updates = OrderedDict()
weight_x = posterior_sh.T.dot(self.X)
weight_x2 = posterior_sh.T.dot(self.X**2)
weight = posterior_sh.sum(axis=0)
pi_update = posterior_sh.mean(axis=0)
pi_update = T.switch(pi_update > 0., pi_update, 0.)
pi_update = pi_update / pi_update.sum()
alpha_update = 2. * self.beta * weight_x / weight
a = weight_x2
b = weight / 2.
c = -weight * self.alpha**2 / 4.
beta_update = (-b - T.sqrt(b**2 - 4. * a * c)) / (2. * a)
updates[self.pi] = pi_update
updates[self.alpha] = alpha_update
updates[self.beta] = beta_update
self._update_params = theano.function([], [em_objective], updates=updates)
class RayleighMixture(MixtureModel):
def _setup(self):
pi = self.rng.rand(self.n_mixtures)
pi /= pi.sum()
self.pi = theano.shared(pi.astype(floatX))
neg_log_beta = self.rng.randn(self.n_mixtures)
self.neg_log_beta = theano.shared(neg_log_beta.astype(floatX))
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.